diff --git a/.style.yapf b/.style.yapf
index 24681e21f7..4741fb4f3b 100644
--- a/.style.yapf
+++ b/.style.yapf
@@ -1,4 +1,3 @@
[style]
based_on_style = pep8
-indent_width = 2
column_limit = 80
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 0d164419c0..63d3cdaf5a 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -1,7 +1,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Top-level presubmit script for depot tools.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
@@ -31,149 +30,164 @@ TEST_TIMEOUT_S = 330 # 5m 30s
def CheckPylint(input_api, output_api):
- """Gather all the pylint logic into one place to make it self-contained."""
- files_to_check = [
- r'^[^/]*\.py$',
- r'^testing_support/[^/]*\.py$',
- r'^tests/[^/]*\.py$',
- r'^recipe_modules/.*\.py$', # Allow recursive search in recipe modules.
- ]
- files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP)
- if os.path.exists('.gitignore'):
- with open('.gitignore', encoding='utf-8') as fh:
- lines = [l.strip() for l in fh.readlines()]
- files_to_skip.extend([fnmatch.translate(l) for l in lines if
- l and not l.startswith('#')])
- if os.path.exists('.git/info/exclude'):
- with open('.git/info/exclude', encoding='utf-8') as fh:
- lines = [l.strip() for l in fh.readlines()]
- files_to_skip.extend([fnmatch.translate(l) for l in lines if
- l and not l.startswith('#')])
- disabled_warnings = [
- 'R0401', # Cyclic import
- 'W0613', # Unused argument
- 'C0415', # import-outside-toplevel
- 'R1710', # inconsistent-return-statements
- 'E1101', # no-member
- 'E1120', # no-value-for-parameter
- 'R1708', # stop-iteration-return
- 'W1510', # subprocess-run-check
- # Checks which should be re-enabled after Python 2 support is removed.
- 'R0205', # useless-object-inheritance
- 'R1725', # super-with-arguments
- 'W0707', # raise-missing-from
- 'W1113', # keyword-arg-before-vararg
- ]
- return input_api.RunTests(input_api.canned_checks.GetPylint(
- input_api,
- output_api,
- files_to_check=files_to_check,
- files_to_skip=files_to_skip,
- disabled_warnings=disabled_warnings,
- version='2.7'), parallel=False)
+ """Gather all the pylint logic into one place to make it self-contained."""
+ files_to_check = [
+ r'^[^/]*\.py$',
+ r'^testing_support/[^/]*\.py$',
+ r'^tests/[^/]*\.py$',
+ r'^recipe_modules/.*\.py$', # Allow recursive search in recipe modules.
+ ]
+ files_to_skip = list(input_api.DEFAULT_FILES_TO_SKIP)
+ if os.path.exists('.gitignore'):
+ with open('.gitignore', encoding='utf-8') as fh:
+ lines = [l.strip() for l in fh.readlines()]
+ files_to_skip.extend([
+ fnmatch.translate(l) for l in lines
+ if l and not l.startswith('#')
+ ])
+ if os.path.exists('.git/info/exclude'):
+ with open('.git/info/exclude', encoding='utf-8') as fh:
+ lines = [l.strip() for l in fh.readlines()]
+ files_to_skip.extend([
+ fnmatch.translate(l) for l in lines
+ if l and not l.startswith('#')
+ ])
+ disabled_warnings = [
+ 'R0401', # Cyclic import
+ 'W0613', # Unused argument
+ 'C0415', # import-outside-toplevel
+ 'R1710', # inconsistent-return-statements
+ 'E1101', # no-member
+ 'E1120', # no-value-for-parameter
+ 'R1708', # stop-iteration-return
+ 'W1510', # subprocess-run-check
+ # Checks which should be re-enabled after Python 2 support is removed.
+ 'R0205', # useless-object-inheritance
+ 'R1725', # super-with-arguments
+ 'W0707', # raise-missing-from
+ 'W1113', # keyword-arg-before-vararg
+ ]
+ return input_api.RunTests(input_api.canned_checks.GetPylint(
+ input_api,
+ output_api,
+ files_to_check=files_to_check,
+ files_to_skip=files_to_skip,
+ disabled_warnings=disabled_warnings,
+ version='2.7'),
+ parallel=False)
def CheckRecipes(input_api, output_api):
- file_filter = lambda x: x.LocalPath() == 'infra/config/recipes.cfg'
- return input_api.canned_checks.CheckJsonParses(input_api, output_api,
- file_filter=file_filter)
+ file_filter = lambda x: x.LocalPath() == 'infra/config/recipes.cfg'
+ return input_api.canned_checks.CheckJsonParses(input_api,
+ output_api,
+ file_filter=file_filter)
def CheckUsePython3(input_api, output_api):
- results = []
+ results = []
- if sys.version_info.major != 3:
- results.append(
- output_api.PresubmitError(
- 'Did not use Python3 for //tests/PRESUBMIT.py.'))
+ if sys.version_info.major != 3:
+ results.append(
+ output_api.PresubmitError(
+ 'Did not use Python3 for //tests/PRESUBMIT.py.'))
- return results
+ return results
def CheckJsonFiles(input_api, output_api):
- return input_api.canned_checks.CheckJsonParses(
- input_api, output_api)
+ return input_api.canned_checks.CheckJsonParses(input_api, output_api)
def CheckUnitTestsOnCommit(input_api, output_api):
- """ Do not run integration tests on upload since they are way too slow."""
+ """ Do not run integration tests on upload since they are way too slow."""
- input_api.SetTimeout(TEST_TIMEOUT_S)
+ input_api.SetTimeout(TEST_TIMEOUT_S)
- # Run only selected tests on Windows.
- test_to_run_list = [r'.*test\.py$']
- tests_to_skip_list = []
- if input_api.platform.startswith(('cygwin', 'win32')):
- print('Warning: skipping most unit tests on Windows')
- tests_to_skip_list.extend([
- r'.*auth_test\.py$',
- r'.*git_common_test\.py$',
- r'.*git_hyper_blame_test\.py$',
- r'.*git_map_test\.py$',
- r'.*ninjalog_uploader_test\.py$',
- r'.*recipes_test\.py$',
- ])
+ # Run only selected tests on Windows.
+ test_to_run_list = [r'.*test\.py$']
+ tests_to_skip_list = []
+ if input_api.platform.startswith(('cygwin', 'win32')):
+ print('Warning: skipping most unit tests on Windows')
+ tests_to_skip_list.extend([
+ r'.*auth_test\.py$',
+ r'.*git_common_test\.py$',
+ r'.*git_hyper_blame_test\.py$',
+ r'.*git_map_test\.py$',
+ r'.*ninjalog_uploader_test\.py$',
+ r'.*recipes_test\.py$',
+ ])
- tests = input_api.canned_checks.GetUnitTestsInDirectory(
- input_api,
- output_api,
- 'tests',
- files_to_check=test_to_run_list,
- files_to_skip=tests_to_skip_list)
+ tests = input_api.canned_checks.GetUnitTestsInDirectory(
+ input_api,
+ output_api,
+ 'tests',
+ files_to_check=test_to_run_list,
+ files_to_skip=tests_to_skip_list)
- return input_api.RunTests(tests)
+ return input_api.RunTests(tests)
def CheckCIPDManifest(input_api, output_api):
- # Validate CIPD manifests.
- root = input_api.os_path.normpath(
- input_api.os_path.abspath(input_api.PresubmitLocalPath()))
- rel_file = lambda rel: input_api.os_path.join(root, rel)
- cipd_manifests = set(rel_file(input_api.os_path.join(*x)) for x in (
- ('cipd_manifest.txt',),
- ('bootstrap', 'manifest.txt'),
- ('bootstrap', 'manifest_bleeding_edge.txt'),
+ # Validate CIPD manifests.
+ root = input_api.os_path.normpath(
+ input_api.os_path.abspath(input_api.PresubmitLocalPath()))
+ rel_file = lambda rel: input_api.os_path.join(root, rel)
+ cipd_manifests = set(
+ rel_file(input_api.os_path.join(*x)) for x in (
+ ('cipd_manifest.txt', ),
+ ('bootstrap', 'manifest.txt'),
+ ('bootstrap', 'manifest_bleeding_edge.txt'),
- # Also generate a file for the cipd client itself.
- ('cipd_client_version',),
- ))
- affected_manifests = input_api.AffectedFiles(
- include_deletes=False,
- file_filter=lambda x:
- input_api.os_path.normpath(x.AbsoluteLocalPath()) in cipd_manifests)
- tests = []
- for path in affected_manifests:
- path = path.AbsoluteLocalPath()
- if path.endswith('.txt'):
- tests.append(input_api.canned_checks.CheckCIPDManifest(
- input_api, output_api, path=path))
- else:
- pkg = 'infra/tools/cipd/${platform}'
- ver = input_api.ReadFile(path)
- tests.append(input_api.canned_checks.CheckCIPDManifest(
- input_api, output_api,
- content=CIPD_CLIENT_ENSURE_FILE_TEMPLATE % (pkg, ver)))
- tests.append(input_api.canned_checks.CheckCIPDClientDigests(
- input_api, output_api, client_version_file=path))
+ # Also generate a file for the cipd client itself.
+ (
+ 'cipd_client_version', ),
+ ))
+ affected_manifests = input_api.AffectedFiles(
+ include_deletes=False,
+ file_filter=lambda x: input_api.os_path.normpath(x.AbsoluteLocalPath()
+ ) in cipd_manifests)
+ tests = []
+ for path in affected_manifests:
+ path = path.AbsoluteLocalPath()
+ if path.endswith('.txt'):
+ tests.append(
+ input_api.canned_checks.CheckCIPDManifest(input_api,
+ output_api,
+ path=path))
+ else:
+ pkg = 'infra/tools/cipd/${platform}'
+ ver = input_api.ReadFile(path)
+ tests.append(
+ input_api.canned_checks.CheckCIPDManifest(
+ input_api,
+ output_api,
+ content=CIPD_CLIENT_ENSURE_FILE_TEMPLATE % (pkg, ver)))
+ tests.append(
+ input_api.canned_checks.CheckCIPDClientDigests(
+ input_api, output_api, client_version_file=path))
- return input_api.RunTests(tests)
+ return input_api.RunTests(tests)
def CheckOwnersFormat(input_api, output_api):
- return input_api.canned_checks.CheckOwnersFormat(input_api, output_api)
+ return input_api.canned_checks.CheckOwnersFormat(input_api, output_api)
def CheckOwnersOnUpload(input_api, output_api):
- return input_api.canned_checks.CheckOwners(input_api, output_api,
- allow_tbr=False)
+ return input_api.canned_checks.CheckOwners(input_api,
+ output_api,
+ allow_tbr=False)
+
def CheckDoNotSubmitOnCommit(input_api, output_api):
- return input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)
+ return input_api.canned_checks.CheckDoNotSubmit(input_api, output_api)
def CheckPatchFormatted(input_api, output_api):
- # TODO(https://crbug.com/979330) If clang-format is fixed for non-chromium
- # repos, remove check_clang_format=False so that proto files can be formatted
- return input_api.canned_checks.CheckPatchFormatted(input_api,
- output_api,
- check_clang_format=False)
+ # TODO(https://crbug.com/979330) If clang-format is fixed for non-chromium
+ # repos, remove check_clang_format=False so that proto files can be
+ # formatted
+ return input_api.canned_checks.CheckPatchFormatted(input_api,
+ output_api,
+ check_clang_format=False)
diff --git a/auth.py b/auth.py
index b01664b56c..5696a7ac5c 100644
--- a/auth.py
+++ b/auth.py
@@ -1,7 +1,6 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Google OAuth2 related functions."""
from __future__ import print_function
@@ -16,6 +15,8 @@ import os
import subprocess2
+# TODO: Should fix these warnings.
+# pylint: disable=line-too-long
# This is what most GAE apps require for authentication.
OAUTH_SCOPE_EMAIL = 'https://www.googleapis.com/auth/userinfo.email'
@@ -27,89 +28,87 @@ OAUTH_SCOPES = OAUTH_SCOPE_EMAIL
# Mockable datetime.datetime.utcnow for testing.
def datetime_now():
- return datetime.datetime.utcnow()
+ return datetime.datetime.utcnow()
# OAuth access token with its expiration time (UTC datetime or None if unknown).
-class AccessToken(collections.namedtuple('AccessToken', [
- 'token',
- 'expires_at',
- ])):
-
- def needs_refresh(self):
- """True if this AccessToken should be refreshed."""
- if self.expires_at is not None:
- # Allow 30s of clock skew between client and backend.
- return datetime_now() + datetime.timedelta(seconds=30) >= self.expires_at
- # Token without expiration time never expires.
- return False
+class AccessToken(
+ collections.namedtuple('AccessToken', [
+ 'token',
+ 'expires_at',
+ ])):
+ def needs_refresh(self):
+ """True if this AccessToken should be refreshed."""
+ if self.expires_at is not None:
+ # Allow 30s of clock skew between client and backend.
+ return datetime_now() + datetime.timedelta(
+ seconds=30) >= self.expires_at
+ # Token without expiration time never expires.
+ return False
class LoginRequiredError(Exception):
- """Interaction with the user is required to authenticate."""
-
- def __init__(self, scopes=OAUTH_SCOPE_EMAIL):
- msg = (
- 'You are not logged in. Please login first by running:\n'
- ' luci-auth login -scopes %s' % scopes)
- super(LoginRequiredError, self).__init__(msg)
+ """Interaction with the user is required to authenticate."""
+ def __init__(self, scopes=OAUTH_SCOPE_EMAIL):
+ msg = ('You are not logged in. Please login first by running:\n'
+ ' luci-auth login -scopes %s' % scopes)
+ super(LoginRequiredError, self).__init__(msg)
def has_luci_context_local_auth():
- """Returns whether LUCI_CONTEXT should be used for ambient authentication."""
- ctx_path = os.environ.get('LUCI_CONTEXT')
- if not ctx_path:
- return False
- try:
- with open(ctx_path) as f:
- loaded = json.load(f)
- except (OSError, IOError, ValueError):
- return False
- return loaded.get('local_auth', {}).get('default_account_id') is not None
+ """Returns whether LUCI_CONTEXT should be used for ambient authentication."""
+ ctx_path = os.environ.get('LUCI_CONTEXT')
+ if not ctx_path:
+ return False
+ try:
+ with open(ctx_path) as f:
+ loaded = json.load(f)
+ except (OSError, IOError, ValueError):
+ return False
+ return loaded.get('local_auth', {}).get('default_account_id') is not None
class Authenticator(object):
- """Object that knows how to refresh access tokens when needed.
+ """Object that knows how to refresh access tokens when needed.
Args:
scopes: space separated oauth scopes. Defaults to OAUTH_SCOPE_EMAIL.
"""
+ def __init__(self, scopes=OAUTH_SCOPE_EMAIL):
+ self._access_token = None
+ self._scopes = scopes
- def __init__(self, scopes=OAUTH_SCOPE_EMAIL):
- self._access_token = None
- self._scopes = scopes
-
- def has_cached_credentials(self):
- """Returns True if credentials can be obtained.
+ def has_cached_credentials(self):
+ """Returns True if credentials can be obtained.
If returns False, get_access_token() later will probably ask for interactive
login by raising LoginRequiredError.
If returns True, get_access_token() won't ask for interactive login.
"""
- return bool(self._get_luci_auth_token())
+ return bool(self._get_luci_auth_token())
- def get_access_token(self):
- """Returns AccessToken, refreshing it if necessary.
+ def get_access_token(self):
+ """Returns AccessToken, refreshing it if necessary.
Raises:
LoginRequiredError if user interaction is required.
"""
- if self._access_token and not self._access_token.needs_refresh():
- return self._access_token
+ if self._access_token and not self._access_token.needs_refresh():
+ return self._access_token
- # Token expired or missing. Maybe some other process already updated it,
- # reload from the cache.
- self._access_token = self._get_luci_auth_token()
- if self._access_token and not self._access_token.needs_refresh():
- return self._access_token
+ # Token expired or missing. Maybe some other process already updated it,
+ # reload from the cache.
+ self._access_token = self._get_luci_auth_token()
+ if self._access_token and not self._access_token.needs_refresh():
+ return self._access_token
- # Nope, still expired. Needs user interaction.
- logging.error('Failed to create access token')
- raise LoginRequiredError(self._scopes)
+ # Nope, still expired. Needs user interaction.
+ logging.error('Failed to create access token')
+ raise LoginRequiredError(self._scopes)
- def authorize(self, http):
- """Monkey patches authentication logic of httplib2.Http instance.
+ def authorize(self, http):
+ """Monkey patches authentication logic of httplib2.Http instance.
The modified http.request method will add authentication headers to each
request.
@@ -120,46 +119,53 @@ class Authenticator(object):
Returns:
A modified instance of http that was passed in.
"""
- # Adapted from oauth2client.OAuth2Credentials.authorize.
- request_orig = http.request
+ # Adapted from oauth2client.OAuth2Credentials.authorize.
+ request_orig = http.request
- @functools.wraps(request_orig)
- def new_request(
- uri, method='GET', body=None, headers=None,
- redirections=httplib2.DEFAULT_MAX_REDIRECTS,
- connection_type=None):
- headers = (headers or {}).copy()
- headers['Authorization'] = 'Bearer %s' % self.get_access_token().token
- return request_orig(
- uri, method, body, headers, redirections, connection_type)
+ @functools.wraps(request_orig)
+ def new_request(uri,
+ method='GET',
+ body=None,
+ headers=None,
+ redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+ connection_type=None):
+ headers = (headers or {}).copy()
+ headers['Authorization'] = 'Bearer %s' % self.get_access_token(
+ ).token
+ return request_orig(uri, method, body, headers, redirections,
+ connection_type)
- http.request = new_request
- return http
+ http.request = new_request
+ return http
- ## Private methods.
+ ## Private methods.
- def _run_luci_auth_login(self):
- """Run luci-auth login.
+ def _run_luci_auth_login(self):
+ """Run luci-auth login.
Returns:
AccessToken with credentials.
"""
- logging.debug('Running luci-auth login')
- subprocess2.check_call(['luci-auth', 'login', '-scopes', self._scopes])
- return self._get_luci_auth_token()
+ logging.debug('Running luci-auth login')
+ subprocess2.check_call(['luci-auth', 'login', '-scopes', self._scopes])
+ return self._get_luci_auth_token()
- def _get_luci_auth_token(self):
- logging.debug('Running luci-auth token')
- try:
- out, err = subprocess2.check_call_out(
- ['luci-auth', 'token', '-scopes', self._scopes, '-json-output', '-'],
- stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
- logging.debug('luci-auth token stderr:\n%s', err)
- token_info = json.loads(out)
- return AccessToken(
- token_info['token'],
- datetime.datetime.utcfromtimestamp(token_info['expiry']))
- except subprocess2.CalledProcessError as e:
- # subprocess2.CalledProcessError.__str__ nicely formats stdout/stderr.
- logging.error('luci-auth token failed: %s', e)
- return None
+ def _get_luci_auth_token(self):
+ logging.debug('Running luci-auth token')
+ try:
+ out, err = subprocess2.check_call_out([
+ 'luci-auth', 'token', '-scopes', self._scopes, '-json-output',
+ '-'
+ ],
+ stdout=subprocess2.PIPE,
+ stderr=subprocess2.PIPE)
+ logging.debug('luci-auth token stderr:\n%s', err)
+ token_info = json.loads(out)
+ return AccessToken(
+ token_info['token'],
+ datetime.datetime.utcfromtimestamp(token_info['expiry']))
+ except subprocess2.CalledProcessError as e:
+ # subprocess2.CalledProcessError.__str__ nicely formats
+ # stdout/stderr.
+ logging.error('luci-auth token failed: %s', e)
+ return None
diff --git a/autoninja.py b/autoninja.py
index d4f8c0c0f2..202c5740ab 100755
--- a/autoninja.py
+++ b/autoninja.py
@@ -2,7 +2,6 @@
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""
This script (intended to be invoked by autoninja or autoninja.bat) detects
whether a build is accelerated using a service like goma. If so, it runs with a
@@ -19,256 +18,273 @@ import subprocess
import sys
if sys.platform == 'darwin':
- import resource
+ import resource
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
- # The -t tools are incompatible with -j
- t_specified = False
- j_specified = False
- offline = False
- output_dir = '.'
- input_args = args
- # On Windows the autoninja.bat script passes along the arguments enclosed in
- # double quotes. This prevents multiple levels of parsing of the special '^'
- # characters needed when compiling a single file but means that this script
- # gets called with a single argument containing all of the actual arguments,
- # separated by spaces. When this case is detected we need to do argument
- # splitting ourselves. This means that arguments containing actual spaces are
- # not supported by autoninja, but that is not a real limitation.
- if (sys.platform.startswith('win') and len(args) == 2
- and input_args[1].count(' ') > 0):
- input_args = args[:1] + args[1].split()
+ # The -t tools are incompatible with -j
+ t_specified = False
+ j_specified = False
+ offline = False
+ output_dir = '.'
+ input_args = args
+ # On Windows the autoninja.bat script passes along the arguments enclosed in
+ # double quotes. This prevents multiple levels of parsing of the special '^'
+ # characters needed when compiling a single file but means that this script
+ # gets called with a single argument containing all of the actual arguments,
+ # separated by spaces. When this case is detected we need to do argument
+ # splitting ourselves. This means that arguments containing actual spaces
+ # are not supported by autoninja, but that is not a real limitation.
+ if (sys.platform.startswith('win') and len(args) == 2
+ and input_args[1].count(' ') > 0):
+ input_args = args[:1] + args[1].split()
- # Ninja uses getopt_long, which allow to intermix non-option arguments.
- # To leave non supported parameters untouched, we do not use getopt.
- for index, arg in enumerate(input_args[1:]):
- if arg.startswith('-j'):
- j_specified = True
- if arg.startswith('-t'):
- t_specified = True
- if arg == '-C':
- # + 1 to get the next argument and +1 because we trimmed off input_args[0]
- output_dir = input_args[index + 2]
- elif arg.startswith('-C'):
- # Support -Cout/Default
- output_dir = arg[2:]
- elif arg in ('-o', '--offline'):
- offline = True
- elif arg == '-h':
- print('autoninja: Use -o/--offline to temporary disable goma.',
- file=sys.stderr)
- print(file=sys.stderr)
+ # Ninja uses getopt_long, which allow to intermix non-option arguments.
+ # To leave non supported parameters untouched, we do not use getopt.
+ for index, arg in enumerate(input_args[1:]):
+ if arg.startswith('-j'):
+ j_specified = True
+ if arg.startswith('-t'):
+ t_specified = True
+ if arg == '-C':
+ # + 1 to get the next argument and +1 because we trimmed off
+ # input_args[0]
+ output_dir = input_args[index + 2]
+ elif arg.startswith('-C'):
+ # Support -Cout/Default
+ output_dir = arg[2:]
+ elif arg in ('-o', '--offline'):
+ offline = True
+ elif arg == '-h':
+ print('autoninja: Use -o/--offline to temporary disable goma.',
+ file=sys.stderr)
+ print(file=sys.stderr)
- use_goma = False
- use_remoteexec = False
- use_rbe = False
- use_siso = False
-
- # Attempt to auto-detect remote build acceleration. We support gn-based
- # builds, where we look for args.gn in the build tree, and cmake-based builds
- # where we look for rules.ninja.
- if os.path.exists(os.path.join(output_dir, 'args.gn')):
- with open(os.path.join(output_dir, 'args.gn')) as file_handle:
- for line in file_handle:
- # use_goma, use_remoteexec, or use_rbe will activate build acceleration.
- #
- # This test can match multi-argument lines. Examples of this are:
- # is_debug=false use_goma=true is_official_build=false
- # use_goma=false# use_goma=true This comment is ignored
- #
- # Anything after a comment is not consider a valid argument.
- line_without_comment = line.split('#')[0]
- if re.search(r'(^|\s)(use_goma)\s*=\s*true($|\s)',
- line_without_comment):
- use_goma = True
- continue
- if re.search(r'(^|\s)(use_remoteexec)\s*=\s*true($|\s)',
- line_without_comment):
- use_remoteexec = True
- continue
- if re.search(r'(^|\s)(use_rbe)\s*=\s*true($|\s)', line_without_comment):
- use_rbe = True
- continue
- if re.search(r'(^|\s)(use_siso)\s*=\s*true($|\s)',
- line_without_comment):
- use_siso = True
- continue
-
- siso_marker = os.path.join(output_dir, '.siso_deps')
- if use_siso:
- ninja_marker = os.path.join(output_dir, '.ninja_log')
- # autosiso generates a .ninja_log file so the mere existence of a
- # .ninja_log file doesn't imply that a ninja build was done. However if
- # there is a .ninja_log but no .siso_deps then that implies a ninja build.
- if os.path.exists(ninja_marker) and not os.path.exists(siso_marker):
- return ('echo Run gn clean before switching from ninja to siso in %s' %
- output_dir)
- siso = ['autosiso'] if use_remoteexec else ['siso', 'ninja']
- if sys.platform.startswith('win'):
- # An explicit 'call' is needed to make sure the invocation of autosiso
- # returns to autoninja.bat, and the command prompt title gets reset.
- siso = ['call'] + siso
- return ' '.join(siso + input_args[1:])
-
- if os.path.exists(siso_marker):
- return ('echo Run gn clean before switching from siso to ninja in %s' %
- output_dir)
-
- else:
- for relative_path in [
- '', # GN keeps them in the root of output_dir
- 'CMakeFiles'
- ]:
- path = os.path.join(output_dir, relative_path, 'rules.ninja')
- if os.path.exists(path):
- with open(path) as file_handle:
- for line in file_handle:
- if re.match(r'^\s*command\s*=\s*\S+gomacc', line):
- use_goma = True
- break
-
- # Strip -o/--offline so ninja doesn't see them.
- input_args = [arg for arg in input_args if arg not in ('-o', '--offline')]
-
- # If GOMA_DISABLED is set to "true", "t", "yes", "y", or "1"
- # (case-insensitive) then gomacc will use the local compiler instead of doing
- # a goma compile. This is convenient if you want to briefly disable goma. It
- # avoids having to rebuild the world when transitioning between goma/non-goma
- # builds. However, it is not as fast as doing a "normal" non-goma build
- # because an extra process is created for each compile step. Checking this
- # environment variable ensures that autoninja uses an appropriate -j value in
- # this situation.
- goma_disabled_env = os.environ.get('GOMA_DISABLED', '0').lower()
- if offline or goma_disabled_env in ['true', 't', 'yes', 'y', '1']:
use_goma = False
+ use_remoteexec = False
+ use_rbe = False
+ use_siso = False
- if use_goma:
- gomacc_file = 'gomacc.exe' if sys.platform.startswith('win') else 'gomacc'
- goma_dir = os.environ.get('GOMA_DIR', os.path.join(SCRIPT_DIR, '.cipd_bin'))
- gomacc_path = os.path.join(goma_dir, gomacc_file)
- # Don't invoke gomacc if it doesn't exist.
- if os.path.exists(gomacc_path):
- # Check to make sure that goma is running. If not, don't start the build.
- status = subprocess.call([gomacc_path, 'port'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=False)
- if status == 1:
- print('Goma is not running. Use "goma_ctl ensure_start" to start it.',
- file=sys.stderr)
- if sys.platform.startswith('win'):
- # Set an exit code of 1 in the batch file.
- print('cmd "/c exit 1"')
- else:
- # Set an exit code of 1 by executing 'false' in the bash script.
- print('false')
- sys.exit(1)
+ # Attempt to auto-detect remote build acceleration. We support gn-based
+ # builds, where we look for args.gn in the build tree, and cmake-based
+ # builds where we look for rules.ninja.
+ if os.path.exists(os.path.join(output_dir, 'args.gn')):
+ with open(os.path.join(output_dir, 'args.gn')) as file_handle:
+ for line in file_handle:
+ # use_goma, use_remoteexec, or use_rbe will activate build
+ # acceleration.
+ #
+ # This test can match multi-argument lines. Examples of this
+ # are: is_debug=false use_goma=true is_official_build=false
+ # use_goma=false# use_goma=true This comment is ignored
+ #
+ # Anything after a comment is not consider a valid argument.
+ line_without_comment = line.split('#')[0]
+ if re.search(r'(^|\s)(use_goma)\s*=\s*true($|\s)',
+ line_without_comment):
+ use_goma = True
+ continue
+ if re.search(r'(^|\s)(use_remoteexec)\s*=\s*true($|\s)',
+ line_without_comment):
+ use_remoteexec = True
+ continue
+ if re.search(r'(^|\s)(use_rbe)\s*=\s*true($|\s)',
+ line_without_comment):
+ use_rbe = True
+ continue
+ if re.search(r'(^|\s)(use_siso)\s*=\s*true($|\s)',
+ line_without_comment):
+ use_siso = True
+ continue
- # A large build (with or without goma) tends to hog all system resources.
- # Launching the ninja process with 'nice' priorities improves this situation.
- prefix_args = []
- if (sys.platform.startswith('linux')
- and os.environ.get('NINJA_BUILD_IN_BACKGROUND', '0') == '1'):
- # nice -10 is process priority 10 lower than default 0
- # ionice -c 3 is IO priority IDLE
- prefix_args = ['nice'] + ['-10']
+ siso_marker = os.path.join(output_dir, '.siso_deps')
+ if use_siso:
+ ninja_marker = os.path.join(output_dir, '.ninja_log')
+ # autosiso generates a .ninja_log file so the mere existence of a
+ # .ninja_log file doesn't imply that a ninja build was done. However
+ # if there is a .ninja_log but no .siso_deps then that implies a
+ # ninja build.
+ if os.path.exists(ninja_marker) and not os.path.exists(siso_marker):
+ return (
+ 'echo Run gn clean before switching from ninja to siso in '
+ '%s' % output_dir)
+ siso = ['autosiso'] if use_remoteexec else ['siso', 'ninja']
+ if sys.platform.startswith('win'):
+ # An explicit 'call' is needed to make sure the invocation of
+ # autosiso returns to autoninja.bat, and the command prompt
+ # title gets reset.
+ siso = ['call'] + siso
+ return ' '.join(siso + input_args[1:])
- # Tell goma or reclient to do local compiles. On Windows these environment
- # variables are set by the wrapper batch file.
- offline_env = ['RBE_remote_disabled=1', 'GOMA_DISABLED=1'
- ] if offline and not sys.platform.startswith('win') else []
+ if os.path.exists(siso_marker):
+ return (
+ 'echo Run gn clean before switching from siso to ninja in %s' %
+ output_dir)
- # On macOS, the default limit of open file descriptors is too low (256).
- # This causes a large j value to result in 'Too many open files' errors.
- # Check whether the limit can be raised to a large enough value. If yes,
- # use `ulimit -n .... &&` as a prefix to increase the limit when running
- # ninja.
- if sys.platform == 'darwin':
- wanted_limit = 200000 # Large enough to avoid any risk of exhaustion.
- fileno_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
- if fileno_limit <= wanted_limit:
- try:
- resource.setrlimit(resource.RLIMIT_NOFILE, (wanted_limit, hard_limit))
- except Exception as _:
- pass
- fileno_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
- if fileno_limit >= wanted_limit:
- prefix_args = ['ulimit', '-n', f'{wanted_limit}', '&&'] + offline_env
- offline_env = []
-
-
- # Call ninja.py so that it can find ninja binary installed by DEPS or one in
- # PATH.
- ninja_path = os.path.join(SCRIPT_DIR, 'ninja.py')
- # If using remoteexec, use ninja_reclient.py which wraps ninja.py with
- # starting and stopping reproxy.
- if use_remoteexec:
- ninja_path = os.path.join(SCRIPT_DIR, 'ninja_reclient.py')
-
- args = offline_env + prefix_args + [sys.executable, ninja_path
- ] + input_args[1:]
-
- num_cores = multiprocessing.cpu_count()
- if not j_specified and not t_specified:
- if not offline and (use_goma or use_remoteexec or use_rbe):
- args.append('-j')
- default_core_multiplier = 80
- if platform.machine() in ('x86_64', 'AMD64'):
- # Assume simultaneous multithreading and therefore half as many cores as
- # logical processors.
- num_cores //= 2
-
- core_multiplier = int(
- os.environ.get('NINJA_CORE_MULTIPLIER', default_core_multiplier))
- j_value = num_cores * core_multiplier
-
- core_limit = int(os.environ.get('NINJA_CORE_LIMIT', j_value))
- j_value = min(j_value, core_limit)
-
- if sys.platform.startswith('win'):
- # On windows, j value higher than 1000 does not improve build
- # performance.
- j_value = min(j_value, 1000)
- elif sys.platform == 'darwin':
- # If the number of open file descriptors is large enough (or it can be
- # raised to a large enough value), then set j value to 1000. This limit
- # comes from ninja which is limited to at most FD_SETSIZE (1024) open
- # file descriptors (using 1000 leave a bit of head room).
- #
- # If the number of open file descriptors cannot be raised, then use a
- # j value of 200 which is the maximum value that reliably work with
- # the default limit of 256.
- if fileno_limit >= wanted_limit:
- j_value = min(j_value, 1000)
- else:
- j_value = min(j_value, 200)
-
- args.append('%d' % j_value)
else:
- j_value = num_cores
- # Ninja defaults to |num_cores + 2|
- j_value += int(os.environ.get('NINJA_CORE_ADDITION', '2'))
- args.append('-j')
- args.append('%d' % j_value)
+ for relative_path in [
+ '', # GN keeps them in the root of output_dir
+ 'CMakeFiles'
+ ]:
+ path = os.path.join(output_dir, relative_path, 'rules.ninja')
+ if os.path.exists(path):
+ with open(path) as file_handle:
+ for line in file_handle:
+ if re.match(r'^\s*command\s*=\s*\S+gomacc', line):
+ use_goma = True
+ break
- # On Windows, fully quote the path so that the command processor doesn't think
- # the whole output is the command.
- # On Linux and Mac, if people put depot_tools in directories with ' ',
- # shell would misunderstand ' ' as a path separation.
- # TODO(yyanagisawa): provide proper quoting for Windows.
- # see https://cs.chromium.org/chromium/src/tools/mb/mb.py
- for i in range(len(args)):
- if (i == 0 and sys.platform.startswith('win')) or ' ' in args[i]:
- args[i] = '"%s"' % args[i].replace('"', '\\"')
+ # Strip -o/--offline so ninja doesn't see them.
+ input_args = [arg for arg in input_args if arg not in ('-o', '--offline')]
- if os.environ.get('NINJA_SUMMARIZE_BUILD', '0') == '1':
- args += ['-d', 'stats']
+ # If GOMA_DISABLED is set to "true", "t", "yes", "y", or "1"
+ # (case-insensitive) then gomacc will use the local compiler instead of
+ # doing a goma compile. This is convenient if you want to briefly disable
+ # goma. It avoids having to rebuild the world when transitioning between
+ # goma/non-goma builds. However, it is not as fast as doing a "normal"
+ # non-goma build because an extra process is created for each compile step.
+ # Checking this environment variable ensures that autoninja uses an
+ # appropriate -j value in this situation.
+ goma_disabled_env = os.environ.get('GOMA_DISABLED', '0').lower()
+ if offline or goma_disabled_env in ['true', 't', 'yes', 'y', '1']:
+ use_goma = False
- return ' '.join(args)
+ if use_goma:
+ gomacc_file = 'gomacc.exe' if sys.platform.startswith(
+ 'win') else 'gomacc'
+ goma_dir = os.environ.get('GOMA_DIR',
+ os.path.join(SCRIPT_DIR, '.cipd_bin'))
+ gomacc_path = os.path.join(goma_dir, gomacc_file)
+ # Don't invoke gomacc if it doesn't exist.
+ if os.path.exists(gomacc_path):
+ # Check to make sure that goma is running. If not, don't start the
+ # build.
+ status = subprocess.call([gomacc_path, 'port'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False)
+ if status == 1:
+ print(
+ 'Goma is not running. Use "goma_ctl ensure_start" to start '
+ 'it.',
+ file=sys.stderr)
+ if sys.platform.startswith('win'):
+ # Set an exit code of 1 in the batch file.
+ print('cmd "/c exit 1"')
+ else:
+ # Set an exit code of 1 by executing 'false' in the bash
+ # script.
+ print('false')
+ sys.exit(1)
+
+ # A large build (with or without goma) tends to hog all system resources.
+ # Launching the ninja process with 'nice' priorities improves this
+ # situation.
+ prefix_args = []
+ if (sys.platform.startswith('linux')
+ and os.environ.get('NINJA_BUILD_IN_BACKGROUND', '0') == '1'):
+ # nice -10 is process priority 10 lower than default 0
+ # ionice -c 3 is IO priority IDLE
+ prefix_args = ['nice'] + ['-10']
+
+ # Tell goma or reclient to do local compiles. On Windows these environment
+ # variables are set by the wrapper batch file.
+ offline_env = ['RBE_remote_disabled=1', 'GOMA_DISABLED=1'
+ ] if offline and not sys.platform.startswith('win') else []
+
+ # On macOS, the default limit of open file descriptors is too low (256).
+ # This causes a large j value to result in 'Too many open files' errors.
+ # Check whether the limit can be raised to a large enough value. If yes,
+ # use `ulimit -n .... &&` as a prefix to increase the limit when running
+ # ninja.
+ if sys.platform == 'darwin':
+ wanted_limit = 200000 # Large enough to avoid any risk of exhaustion.
+ fileno_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ if fileno_limit <= wanted_limit:
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE,
+ (wanted_limit, hard_limit))
+ except Exception as _:
+ pass
+ fileno_limit, hard_limit = resource.getrlimit(
+ resource.RLIMIT_NOFILE)
+ if fileno_limit >= wanted_limit:
+ prefix_args = ['ulimit', '-n', f'{wanted_limit}', '&&'
+ ] + offline_env
+ offline_env = []
+
+ # Call ninja.py so that it can find ninja binary installed by DEPS or one in
+ # PATH.
+ ninja_path = os.path.join(SCRIPT_DIR, 'ninja.py')
+ # If using remoteexec, use ninja_reclient.py which wraps ninja.py with
+ # starting and stopping reproxy.
+ if use_remoteexec:
+ ninja_path = os.path.join(SCRIPT_DIR, 'ninja_reclient.py')
+
+ args = offline_env + prefix_args + [sys.executable, ninja_path
+ ] + input_args[1:]
+
+ num_cores = multiprocessing.cpu_count()
+ if not j_specified and not t_specified:
+ if not offline and (use_goma or use_remoteexec or use_rbe):
+ args.append('-j')
+ default_core_multiplier = 80
+ if platform.machine() in ('x86_64', 'AMD64'):
+ # Assume simultaneous multithreading and therefore half as many
+ # cores as logical processors.
+ num_cores //= 2
+
+ core_multiplier = int(
+ os.environ.get('NINJA_CORE_MULTIPLIER',
+ default_core_multiplier))
+ j_value = num_cores * core_multiplier
+
+ core_limit = int(os.environ.get('NINJA_CORE_LIMIT', j_value))
+ j_value = min(j_value, core_limit)
+
+ if sys.platform.startswith('win'):
+ # On windows, j value higher than 1000 does not improve build
+ # performance.
+ j_value = min(j_value, 1000)
+ elif sys.platform == 'darwin':
+ # If the number of open file descriptors is large enough (or it
+ # can be raised to a large enough value), then set j value to
+ # 1000. This limit comes from ninja which is limited to at most
+ # FD_SETSIZE (1024) open file descriptors (using 1000 leave a
+ # bit of head room).
+ #
+ # If the number of open file descriptors cannot be raised, then
+ # use a j value of 200 which is the maximum value that reliably
+ # work with the default limit of 256.
+ if fileno_limit >= wanted_limit:
+ j_value = min(j_value, 1000)
+ else:
+ j_value = min(j_value, 200)
+
+ args.append('%d' % j_value)
+ else:
+ j_value = num_cores
+ # Ninja defaults to |num_cores + 2|
+ j_value += int(os.environ.get('NINJA_CORE_ADDITION', '2'))
+ args.append('-j')
+ args.append('%d' % j_value)
+
+ # On Windows, fully quote the path so that the command processor doesn't
+ # think the whole output is the command. On Linux and Mac, if people put
+ # depot_tools in directories with ' ', shell would misunderstand ' ' as a
+ # path separation. TODO(yyanagisawa): provide proper quoting for Windows.
+ # see https://cs.chromium.org/chromium/src/tools/mb/mb.py
+ for i in range(len(args)):
+ if (i == 0 and sys.platform.startswith('win')) or ' ' in args[i]:
+ args[i] = '"%s"' % args[i].replace('"', '\\"')
+
+ if os.environ.get('NINJA_SUMMARIZE_BUILD', '0') == '1':
+ args += ['-d', 'stats']
+
+ return ' '.join(args)
if __name__ == '__main__':
- print(main(sys.argv))
+ print(main(sys.argv))
diff --git a/autosiso.py b/autosiso.py
index 3088aa23eb..d73ef36a77 100755
--- a/autosiso.py
+++ b/autosiso.py
@@ -18,53 +18,53 @@ import siso
def _use_remoteexec(argv):
- out_dir = reclient_helper.find_ninja_out_dir(argv)
- gn_args_path = os.path.join(out_dir, 'args.gn')
- if not os.path.exists(gn_args_path):
+ out_dir = reclient_helper.find_ninja_out_dir(argv)
+ gn_args_path = os.path.join(out_dir, 'args.gn')
+ if not os.path.exists(gn_args_path):
+ return False
+ with open(gn_args_path) as f:
+ for line in f:
+ line_without_comment = line.split('#')[0]
+ if re.search(r'(^|\s)use_remoteexec\s*=\s*true($|\s)',
+ line_without_comment):
+ return True
return False
- with open(gn_args_path) as f:
- for line in f:
- line_without_comment = line.split('#')[0]
- if re.search(r'(^|\s)use_remoteexec\s*=\s*true($|\s)',
- line_without_comment):
- return True
- return False
def main(argv):
- # On Windows the autosiso.bat script passes along the arguments enclosed in
- # double quotes. This prevents multiple levels of parsing of the special '^'
- # characters needed when compiling a single file but means that this script
- # gets called with a single argument containing all of the actual arguments,
- # separated by spaces. When this case is detected we need to do argument
- # splitting ourselves. This means that arguments containing actual spaces are
- # not supported by autoninja, but that is not a real limitation.
- if (sys.platform.startswith('win') and len(argv) == 2
- and argv[1].count(' ') > 0):
- argv = argv[:1] + argv[1].split()
+ # On Windows the autosiso.bat script passes along the arguments enclosed in
+ # double quotes. This prevents multiple levels of parsing of the special '^'
+ # characters needed when compiling a single file but means that this script
+ # gets called with a single argument containing all of the actual arguments,
+ # separated by spaces. When this case is detected we need to do argument
+ # splitting ourselves. This means that arguments containing actual spaces
+ # are not supported by autoninja, but that is not a real limitation.
+ if (sys.platform.startswith('win') and len(argv) == 2
+ and argv[1].count(' ') > 0):
+ argv = argv[:1] + argv[1].split()
- if not _use_remoteexec(argv):
- print(
- "`use_remoteexec=true` is not detected.\n"
- "Please run `siso` command directly.",
- file=sys.stderr)
- return 1
+ if not _use_remoteexec(argv):
+ print(
+ "`use_remoteexec=true` is not detected.\n"
+ "Please run `siso` command directly.",
+ file=sys.stderr)
+ return 1
- with reclient_helper.build_context(argv, 'autosiso') as ret_code:
- if ret_code:
- return ret_code
- argv = [
- argv[0],
- 'ninja',
- # Do not authenticate when using Reproxy.
- '-project=',
- '-reapi_instance=',
- ] + argv[1:]
- return siso.main(argv)
+ with reclient_helper.build_context(argv, 'autosiso') as ret_code:
+ if ret_code:
+ return ret_code
+ argv = [
+ argv[0],
+ 'ninja',
+ # Do not authenticate when using Reproxy.
+ '-project=',
+ '-reapi_instance=',
+ ] + argv[1:]
+ return siso.main(argv)
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv))
- except KeyboardInterrupt:
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv))
+ except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/bazel.py b/bazel.py
index 8fbd77854e..e3c9eb3150 100755
--- a/bazel.py
+++ b/bazel.py
@@ -25,29 +25,29 @@ from typing import List, Optional
def _find_bazel_cros() -> Optional[Path]:
- """Find the bazel launcher for ChromiumOS."""
- cwd = Path.cwd()
- for parent in itertools.chain([cwd], cwd.parents):
- bazel_launcher = parent / "chromite" / "bin" / "bazel"
- if bazel_launcher.exists():
- return bazel_launcher
- return None
+ """Find the bazel launcher for ChromiumOS."""
+ cwd = Path.cwd()
+ for parent in itertools.chain([cwd], cwd.parents):
+ bazel_launcher = parent / "chromite" / "bin" / "bazel"
+ if bazel_launcher.exists():
+ return bazel_launcher
+ return None
def _find_next_bazel_in_path() -> Optional[Path]:
- """The fallback method: search the remainder of PATH for bazel."""
- # Remove depot_tools from PATH if present.
- depot_tools = Path(__file__).resolve().parent
- path_env = os.environ.get("PATH", os.defpath)
- search_paths = []
- for path in path_env.split(os.pathsep):
- if Path(path).resolve() != depot_tools:
- search_paths.append(path)
- new_path_env = os.pathsep.join(search_paths)
- bazel = shutil.which("bazel", path=new_path_env)
- if bazel:
- return Path(bazel)
- return None
+ """The fallback method: search the remainder of PATH for bazel."""
+ # Remove depot_tools from PATH if present.
+ depot_tools = Path(__file__).resolve().parent
+ path_env = os.environ.get("PATH", os.defpath)
+ search_paths = []
+ for path in path_env.split(os.pathsep):
+ if Path(path).resolve() != depot_tools:
+ search_paths.append(path)
+ new_path_env = os.pathsep.join(search_paths)
+ bazel = shutil.which("bazel", path=new_path_env)
+ if bazel:
+ return Path(bazel)
+ return None
# All functions used to search for Bazel (in order of search).
@@ -71,15 +71,15 @@ it's actually installed."""
def main(argv: List[str]) -> int:
- """Main."""
- for search_func in _SEARCH_FUNCTIONS:
- bazel = search_func()
- if bazel:
- os.execv(bazel, [str(bazel), *argv])
+ """Main."""
+ for search_func in _SEARCH_FUNCTIONS:
+ bazel = search_func()
+ if bazel:
+ os.execv(bazel, [str(bazel), *argv])
- print(_FIND_FAILURE_MSG, file=sys.stderr)
- return 1
+ print(_FIND_FAILURE_MSG, file=sys.stderr)
+ return 1
if __name__ == "__main__":
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/bootstrap/bootstrap.py b/bootstrap/bootstrap.py
index 39665881a2..7afa2b237c 100644
--- a/bootstrap/bootstrap.py
+++ b/bootstrap/bootstrap.py
@@ -17,7 +17,6 @@ import subprocess
import sys
import tempfile
-
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
@@ -29,26 +28,31 @@ BAT_EXT = '.bat' if IS_WIN else ''
# Top-level stubs to generate that fall through to executables within the Git
# directory.
WIN_GIT_STUBS = {
- 'git.bat': 'cmd\\git.exe',
- 'gitk.bat': 'cmd\\gitk.exe',
- 'ssh.bat': 'usr\\bin\\ssh.exe',
- 'ssh-keygen.bat': 'usr\\bin\\ssh-keygen.exe',
+ 'git.bat': 'cmd\\git.exe',
+ 'gitk.bat': 'cmd\\gitk.exe',
+ 'ssh.bat': 'usr\\bin\\ssh.exe',
+ 'ssh-keygen.bat': 'usr\\bin\\ssh-keygen.exe',
}
# Accumulated template parameters for generated stubs.
-class Template(collections.namedtuple('Template', (
- 'PYTHON_RELDIR', 'PYTHON_BIN_RELDIR', 'PYTHON_BIN_RELDIR_UNIX',
- 'PYTHON3_BIN_RELDIR', 'PYTHON3_BIN_RELDIR_UNIX', 'GIT_BIN_RELDIR',
- 'GIT_BIN_RELDIR_UNIX', 'GIT_PROGRAM',
- ))):
+class Template(
+ collections.namedtuple('Template', (
+ 'PYTHON_RELDIR',
+ 'PYTHON_BIN_RELDIR',
+ 'PYTHON_BIN_RELDIR_UNIX',
+ 'PYTHON3_BIN_RELDIR',
+ 'PYTHON3_BIN_RELDIR_UNIX',
+ 'GIT_BIN_RELDIR',
+ 'GIT_BIN_RELDIR_UNIX',
+ 'GIT_PROGRAM',
+ ))):
+ @classmethod
+ def empty(cls):
+ return cls(**{k: None for k in cls._fields})
- @classmethod
- def empty(cls):
- return cls(**{k: None for k in cls._fields})
-
- def maybe_install(self, name, dst_path):
- """Installs template |name| to |dst_path| if it has changed.
+ def maybe_install(self, name, dst_path):
+ """Installs template |name| to |dst_path| if it has changed.
This loads the template |name| from THIS_DIR, resolves template parameters,
and installs it to |dst_path|. See `maybe_update` for more information.
@@ -59,14 +63,14 @@ class Template(collections.namedtuple('Template', (
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
- template_path = os.path.join(THIS_DIR, name)
- with open(template_path, 'r', encoding='utf8') as fd:
- t = string.Template(fd.read())
- return maybe_update(t.safe_substitute(self._asdict()), dst_path)
+ template_path = os.path.join(THIS_DIR, name)
+ with open(template_path, 'r', encoding='utf8') as fd:
+ t = string.Template(fd.read())
+ return maybe_update(t.safe_substitute(self._asdict()), dst_path)
def maybe_update(content, dst_path):
- """Writes |content| to |dst_path| if |dst_path| does not already match.
+ """Writes |content| to |dst_path| if |dst_path| does not already match.
This function will ensure that there is a file at |dst_path| containing
|content|. If |dst_path| already exists and contains |content|, no operation
@@ -79,22 +83,22 @@ def maybe_update(content, dst_path):
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
- # If the path already exists and matches the new content, refrain from writing
- # a new one.
- if os.path.exists(dst_path):
- with open(dst_path, 'r', encoding='utf-8') as fd:
- if fd.read() == content:
- return False
+ # If the path already exists and matches the new content, refrain from
+ # writing a new one.
+ if os.path.exists(dst_path):
+ with open(dst_path, 'r', encoding='utf-8') as fd:
+ if fd.read() == content:
+ return False
- logging.debug('Updating %r', dst_path)
- with open(dst_path, 'w', encoding='utf-8') as fd:
- fd.write(content)
- os.chmod(dst_path, 0o755)
- return True
+ logging.debug('Updating %r', dst_path)
+ with open(dst_path, 'w', encoding='utf-8') as fd:
+ fd.write(content)
+ os.chmod(dst_path, 0o755)
+ return True
def maybe_copy(src_path, dst_path):
- """Writes the content of |src_path| to |dst_path| if needed.
+ """Writes the content of |src_path| to |dst_path| if needed.
See `maybe_update` for more information.
@@ -104,13 +108,13 @@ def maybe_copy(src_path, dst_path):
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
- with open(src_path, 'r', encoding='utf-8') as fd:
- content = fd.read()
- return maybe_update(content, dst_path)
+ with open(src_path, 'r', encoding='utf-8') as fd:
+ content = fd.read()
+ return maybe_update(content, dst_path)
def call_if_outdated(stamp_path, stamp_version, fn):
- """Invokes |fn| if the stamp at |stamp_path| doesn't match |stamp_version|.
+ """Invokes |fn| if the stamp at |stamp_path| doesn't match |stamp_version|.
This can be used to keep a filesystem record of whether an operation has been
performed. The record is stored at |stamp_path|. To invalidate a record,
@@ -128,22 +132,22 @@ def call_if_outdated(stamp_path, stamp_version, fn):
Returns (bool): True if an update occurred.
"""
- stamp_version = stamp_version.strip()
- if os.path.isfile(stamp_path):
- with open(stamp_path, 'r', encoding='utf-8') as fd:
- current_version = fd.read().strip()
- if current_version == stamp_version:
- return False
+ stamp_version = stamp_version.strip()
+ if os.path.isfile(stamp_path):
+ with open(stamp_path, 'r', encoding='utf-8') as fd:
+ current_version = fd.read().strip()
+ if current_version == stamp_version:
+ return False
- fn()
+ fn()
- with open(stamp_path, 'w', encoding='utf-8') as fd:
- fd.write(stamp_version)
- return True
+ with open(stamp_path, 'w', encoding='utf-8') as fd:
+ fd.write(stamp_version)
+ return True
def _in_use(path):
- """Checks if a Windows file is in use.
+ """Checks if a Windows file is in use.
When Windows is using an executable, it prevents other writers from
modifying or deleting that executable. We can safely test for an in-use
@@ -152,93 +156,93 @@ def _in_use(path):
Returns (bool): True if the file was in use, False if not.
"""
- try:
- with open(path, 'r+'):
- return False
- except IOError:
- return True
+ try:
+ with open(path, 'r+'):
+ return False
+ except IOError:
+ return True
def _toolchain_in_use(toolchain_path):
- """Returns (bool): True if a toolchain rooted at |path| is in use.
+ """Returns (bool): True if a toolchain rooted at |path| is in use.
"""
- # Look for Python files that may be in use.
- for python_dir in (
- os.path.join(toolchain_path, 'python', 'bin'), # CIPD
- toolchain_path, # Legacy ZIP distributions.
- ):
- for component in (
- os.path.join(python_dir, 'python.exe'),
- os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
+ # Look for Python files that may be in use.
+ for python_dir in (
+ os.path.join(toolchain_path, 'python', 'bin'), # CIPD
+ toolchain_path, # Legacy ZIP distributions.
+ ):
+ for component in (
+ os.path.join(python_dir, 'python.exe'),
+ os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
):
- if os.path.isfile(component) and _in_use(component):
- return True
- # Look for Pytho:n 3 files that may be in use.
- python_dir = os.path.join(toolchain_path, 'python3', 'bin')
- for component in (
- os.path.join(python_dir, 'python3.exe'),
- os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
- ):
- if os.path.isfile(component) and _in_use(component):
- return True
- return False
-
+ if os.path.isfile(component) and _in_use(component):
+ return True
+ # Look for Pytho:n 3 files that may be in use.
+ python_dir = os.path.join(toolchain_path, 'python3', 'bin')
+ for component in (
+ os.path.join(python_dir, 'python3.exe'),
+ os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
+ ):
+ if os.path.isfile(component) and _in_use(component):
+ return True
+ return False
def _check_call(argv, stdin_input=None, **kwargs):
- """Wrapper for subprocess.check_call that adds logging."""
- logging.info('running %r', argv)
- if stdin_input is not None:
- kwargs['stdin'] = subprocess.PIPE
- proc = subprocess.Popen(argv, **kwargs)
- proc.communicate(input=stdin_input)
- if proc.returncode:
- raise subprocess.CalledProcessError(proc.returncode, argv, None)
+ """Wrapper for subprocess.check_call that adds logging."""
+ logging.info('running %r', argv)
+ if stdin_input is not None:
+ kwargs['stdin'] = subprocess.PIPE
+ proc = subprocess.Popen(argv, **kwargs)
+ proc.communicate(input=stdin_input)
+ if proc.returncode:
+ raise subprocess.CalledProcessError(proc.returncode, argv, None)
def _safe_rmtree(path):
- if not os.path.exists(path):
- return
+ if not os.path.exists(path):
+ return
- def _make_writable_and_remove(path):
- st = os.stat(path)
- new_mode = st.st_mode | 0o200
- if st.st_mode == new_mode:
- return False
- try:
- os.chmod(path, new_mode)
- os.remove(path)
- return True
- except Exception:
- return False
+ def _make_writable_and_remove(path):
+ st = os.stat(path)
+ new_mode = st.st_mode | 0o200
+ if st.st_mode == new_mode:
+ return False
+ try:
+ os.chmod(path, new_mode)
+ os.remove(path)
+ return True
+ except Exception:
+ return False
- def _on_error(function, path, excinfo):
- if not _make_writable_and_remove(path):
- logging.warning('Failed to %s: %s (%s)', function, path, excinfo)
+ def _on_error(function, path, excinfo):
+ if not _make_writable_and_remove(path):
+ logging.warning('Failed to %s: %s (%s)', function, path, excinfo)
- shutil.rmtree(path, onerror=_on_error)
+ shutil.rmtree(path, onerror=_on_error)
def clean_up_old_installations(skip_dir):
- """Removes Python installations other than |skip_dir|.
+ """Removes Python installations other than |skip_dir|.
This includes an "in-use" check against the "python.exe" in a given directory
to avoid removing Python executables that are currently ruinning. We need
this because our Python bootstrap may be run after (and by) other software
that is using the bootstrapped Python!
"""
- root_contents = os.listdir(ROOT_DIR)
- for f in ('win_tools-*_bin', 'python27*_bin', 'git-*_bin', 'bootstrap-*_bin'):
- for entry in fnmatch.filter(root_contents, f):
- full_entry = os.path.join(ROOT_DIR, entry)
- if full_entry == skip_dir or not os.path.isdir(full_entry):
- continue
+ root_contents = os.listdir(ROOT_DIR)
+ for f in ('win_tools-*_bin', 'python27*_bin', 'git-*_bin',
+ 'bootstrap-*_bin'):
+ for entry in fnmatch.filter(root_contents, f):
+ full_entry = os.path.join(ROOT_DIR, entry)
+ if full_entry == skip_dir or not os.path.isdir(full_entry):
+ continue
- logging.info('Cleaning up old installation %r', entry)
- if not _toolchain_in_use(full_entry):
- _safe_rmtree(full_entry)
- else:
- logging.info('Toolchain at %r is in-use; skipping', full_entry)
+ logging.info('Cleaning up old installation %r', entry)
+ if not _toolchain_in_use(full_entry):
+ _safe_rmtree(full_entry)
+ else:
+ logging.info('Toolchain at %r is in-use; skipping', full_entry)
# Version of "git_postprocess" system configuration (see |git_postprocess|).
@@ -246,111 +250,110 @@ GIT_POSTPROCESS_VERSION = '2'
def git_get_mingw_dir(git_directory):
- """Returns (str) The "mingw" directory in a Git installation, or None."""
- for candidate in ('mingw64', 'mingw32'):
- mingw_dir = os.path.join(git_directory, candidate)
- if os.path.isdir(mingw_dir):
- return mingw_dir
- return None
+ """Returns (str) The "mingw" directory in a Git installation, or None."""
+ for candidate in ('mingw64', 'mingw32'):
+ mingw_dir = os.path.join(git_directory, candidate)
+ if os.path.isdir(mingw_dir):
+ return mingw_dir
+ return None
def git_postprocess(template, git_directory):
- # Update depot_tools files for "git help "
- mingw_dir = git_get_mingw_dir(git_directory)
- if mingw_dir:
- docsrc = os.path.join(ROOT_DIR, 'man', 'html')
- git_docs_dir = os.path.join(mingw_dir, 'share', 'doc', 'git-doc')
- for name in os.listdir(docsrc):
- maybe_copy(
- os.path.join(docsrc, name),
- os.path.join(git_docs_dir, name))
- else:
- logging.info('Could not find mingw directory for %r.', git_directory)
+ # Update depot_tools files for "git help "
+ mingw_dir = git_get_mingw_dir(git_directory)
+ if mingw_dir:
+ docsrc = os.path.join(ROOT_DIR, 'man', 'html')
+ git_docs_dir = os.path.join(mingw_dir, 'share', 'doc', 'git-doc')
+ for name in os.listdir(docsrc):
+ maybe_copy(os.path.join(docsrc, name),
+ os.path.join(git_docs_dir, name))
+ else:
+ logging.info('Could not find mingw directory for %r.', git_directory)
- # Create Git templates and configure its base layout.
- for stub_name, relpath in WIN_GIT_STUBS.items():
- stub_template = template._replace(GIT_PROGRAM=relpath)
- stub_template.maybe_install(
- 'git.template.bat',
- os.path.join(ROOT_DIR, stub_name))
+ # Create Git templates and configure its base layout.
+ for stub_name, relpath in WIN_GIT_STUBS.items():
+ stub_template = template._replace(GIT_PROGRAM=relpath)
+ stub_template.maybe_install('git.template.bat',
+ os.path.join(ROOT_DIR, stub_name))
- # Set-up our system configuration environment. The following set of
- # parameters is versioned by "GIT_POSTPROCESS_VERSION". If they change,
- # update "GIT_POSTPROCESS_VERSION" accordingly.
- def configure_git_system():
- git_bat_path = os.path.join(ROOT_DIR, 'git.bat')
- _check_call([git_bat_path, 'config', '--system', 'core.autocrlf', 'false'])
- _check_call([git_bat_path, 'config', '--system', 'core.filemode', 'false'])
- _check_call([git_bat_path, 'config', '--system', 'core.preloadindex',
- 'true'])
- _check_call([git_bat_path, 'config', '--system', 'core.fscache', 'true'])
- _check_call([git_bat_path, 'config', '--system', 'protocol.version', '2'])
+ # Set-up our system configuration environment. The following set of
+ # parameters is versioned by "GIT_POSTPROCESS_VERSION". If they change,
+ # update "GIT_POSTPROCESS_VERSION" accordingly.
+ def configure_git_system():
+ git_bat_path = os.path.join(ROOT_DIR, 'git.bat')
+ _check_call(
+ [git_bat_path, 'config', '--system', 'core.autocrlf', 'false'])
+ _check_call(
+ [git_bat_path, 'config', '--system', 'core.filemode', 'false'])
+ _check_call(
+ [git_bat_path, 'config', '--system', 'core.preloadindex', 'true'])
+ _check_call(
+ [git_bat_path, 'config', '--system', 'core.fscache', 'true'])
+ _check_call(
+ [git_bat_path, 'config', '--system', 'protocol.version', '2'])
- call_if_outdated(
- os.path.join(git_directory, '.git_postprocess'),
- GIT_POSTPROCESS_VERSION,
- configure_git_system)
+ call_if_outdated(os.path.join(git_directory, '.git_postprocess'),
+ GIT_POSTPROCESS_VERSION, configure_git_system)
def main(argv):
- parser = argparse.ArgumentParser()
- parser.add_argument('--verbose', action='store_true')
- parser.add_argument('--bootstrap-name', required=True,
- help='The directory of the Python installation.')
- args = parser.parse_args(argv)
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--verbose', action='store_true')
+ parser.add_argument('--bootstrap-name',
+ required=True,
+ help='The directory of the Python installation.')
+ args = parser.parse_args(argv)
- logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
- template = Template.empty()._replace(
- PYTHON_RELDIR=os.path.join(args.bootstrap_name, 'python'),
- PYTHON_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python', 'bin'),
- PYTHON_BIN_RELDIR_UNIX=posixpath.join(
- args.bootstrap_name, 'python', 'bin'),
- PYTHON3_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python3', 'bin'),
- PYTHON3_BIN_RELDIR_UNIX=posixpath.join(
- args.bootstrap_name, 'python3', 'bin'),
- GIT_BIN_RELDIR=os.path.join(args.bootstrap_name, 'git'),
- GIT_BIN_RELDIR_UNIX=posixpath.join(args.bootstrap_name, 'git'))
+ template = Template.empty()._replace(
+ PYTHON_RELDIR=os.path.join(args.bootstrap_name, 'python'),
+ PYTHON_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python', 'bin'),
+ PYTHON_BIN_RELDIR_UNIX=posixpath.join(args.bootstrap_name, 'python',
+ 'bin'),
+ PYTHON3_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python3', 'bin'),
+ PYTHON3_BIN_RELDIR_UNIX=posixpath.join(args.bootstrap_name, 'python3',
+ 'bin'),
+ GIT_BIN_RELDIR=os.path.join(args.bootstrap_name, 'git'),
+ GIT_BIN_RELDIR_UNIX=posixpath.join(args.bootstrap_name, 'git'))
- bootstrap_dir = os.path.join(ROOT_DIR, args.bootstrap_name)
+ bootstrap_dir = os.path.join(ROOT_DIR, args.bootstrap_name)
- # Clean up any old Python and Git installations.
- clean_up_old_installations(bootstrap_dir)
+ # Clean up any old Python and Git installations.
+ clean_up_old_installations(bootstrap_dir)
- if IS_WIN:
- git_postprocess(template, os.path.join(bootstrap_dir, 'git'))
- templates = [
- ('git-bash.template.sh', 'git-bash', ROOT_DIR),
- ('python27.bat', 'python.bat', ROOT_DIR),
- ('python3.bat', 'python3.bat', ROOT_DIR),
- ]
- for src_name, dst_name, dst_dir in templates:
- # Re-evaluate and regenerate our root templated files.
- template.maybe_install(src_name, os.path.join(dst_dir, dst_name))
+ if IS_WIN:
+ git_postprocess(template, os.path.join(bootstrap_dir, 'git'))
+ templates = [
+ ('git-bash.template.sh', 'git-bash', ROOT_DIR),
+ ('python27.bat', 'python.bat', ROOT_DIR),
+ ('python3.bat', 'python3.bat', ROOT_DIR),
+ ]
+ for src_name, dst_name, dst_dir in templates:
+ # Re-evaluate and regenerate our root templated files.
+ template.maybe_install(src_name, os.path.join(dst_dir, dst_name))
- # Emit our Python bin depot-tools-relative directory. This is read by
- # python.bat, python3.bat, vpython[.bat] and vpython3[.bat] to identify the
- # path of the current Python installation.
- #
- # We use this indirection so that upgrades can change this pointer to
- # redirect "python.bat" to a new Python installation. We can't just update
- # "python.bat" because batch file executions reload the batch file and seek
- # to the previous cursor in between every command, so changing the batch
- # file contents could invalidate any existing executions.
- #
- # The intention is that the batch file itself never needs to change when
- # switching Python versions.
+ # Emit our Python bin depot-tools-relative directory. This is read by
+ # python.bat, python3.bat, vpython[.bat] and vpython3[.bat] to identify the
+ # path of the current Python installation.
+ #
+ # We use this indirection so that upgrades can change this pointer to
+ # redirect "python.bat" to a new Python installation. We can't just update
+ # "python.bat" because batch file executions reload the batch file and seek
+ # to the previous cursor in between every command, so changing the batch
+ # file contents could invalidate any existing executions.
+ #
+ # The intention is that the batch file itself never needs to change when
+ # switching Python versions.
- maybe_update(
- template.PYTHON_BIN_RELDIR,
- os.path.join(ROOT_DIR, 'python_bin_reldir.txt'))
+ maybe_update(template.PYTHON_BIN_RELDIR,
+ os.path.join(ROOT_DIR, 'python_bin_reldir.txt'))
- maybe_update(
- template.PYTHON3_BIN_RELDIR,
- os.path.join(ROOT_DIR, 'python3_bin_reldir.txt'))
+ maybe_update(template.PYTHON3_BIN_RELDIR,
+ os.path.join(ROOT_DIR, 'python3_bin_reldir.txt'))
- return 0
+ return 0
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/breakpad.py b/breakpad.py
index 6d4dd1626d..cef1b3ee54 100644
--- a/breakpad.py
+++ b/breakpad.py
@@ -1,7 +1,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""This file remains here because of multiple find_depot_tools.py scripts
that attempt to import it as a way to find depot_tools.
diff --git a/clang_format.py b/clang_format.py
index 4e7b1baadd..bfe3b4b47e 100755
--- a/clang_format.py
+++ b/clang_format.py
@@ -2,7 +2,6 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Redirects to the version of clang-format checked into the Chrome tree.
clang-format binaries are pulled down from Google Cloud Storage whenever you
@@ -18,77 +17,81 @@ import sys
class NotFoundError(Exception):
- """A file could not be found."""
- def __init__(self, e):
- Exception.__init__(self,
- 'Problem while looking for clang-format in Chromium source tree:\n'
- '%s' % e)
+ """A file could not be found."""
+ def __init__(self, e):
+ Exception.__init__(
+ self,
+ 'Problem while looking for clang-format in Chromium source tree:\n'
+ '%s' % e)
def FindClangFormatToolInChromiumTree():
- """Return a path to the clang-format executable, or die trying."""
- primary_solution_path = gclient_paths.GetPrimarySolutionPath()
- if primary_solution_path:
- bin_path = os.path.join(primary_solution_path, 'third_party',
- 'clang-format',
- 'clang-format' + gclient_paths.GetExeSuffix())
- if os.path.exists(bin_path):
- return bin_path
+ """Return a path to the clang-format executable, or die trying."""
+ primary_solution_path = gclient_paths.GetPrimarySolutionPath()
+ if primary_solution_path:
+ bin_path = os.path.join(primary_solution_path, 'third_party',
+ 'clang-format',
+ 'clang-format' + gclient_paths.GetExeSuffix())
+ if os.path.exists(bin_path):
+ return bin_path
- bin_path = gclient_paths.GetBuildtoolsPlatformBinaryPath()
- if not bin_path:
- raise NotFoundError(
- 'Could not find checkout in any parent of the current path.\n'
- 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium checkout.')
+ bin_path = gclient_paths.GetBuildtoolsPlatformBinaryPath()
+ if not bin_path:
+ raise NotFoundError(
+ 'Could not find checkout in any parent of the current path.\n'
+ 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium '
+ 'checkout.')
- tool_path = os.path.join(bin_path,
- 'clang-format' + gclient_paths.GetExeSuffix())
- if not os.path.exists(tool_path):
- raise NotFoundError('File does not exist: %s' % tool_path)
- return tool_path
+ tool_path = os.path.join(bin_path,
+ 'clang-format' + gclient_paths.GetExeSuffix())
+ if not os.path.exists(tool_path):
+ raise NotFoundError('File does not exist: %s' % tool_path)
+ return tool_path
def FindClangFormatScriptInChromiumTree(script_name):
- """Return a path to a clang-format helper script, or die trying."""
- primary_solution_path = gclient_paths.GetPrimarySolutionPath()
- if primary_solution_path:
- script_path = os.path.join(primary_solution_path, 'third_party',
- 'clang-format', 'script', script_name)
- if os.path.exists(script_path):
- return script_path
+ """Return a path to a clang-format helper script, or die trying."""
+ primary_solution_path = gclient_paths.GetPrimarySolutionPath()
+ if primary_solution_path:
+ script_path = os.path.join(primary_solution_path, 'third_party',
+ 'clang-format', 'script', script_name)
+ if os.path.exists(script_path):
+ return script_path
- tools_path = gclient_paths.GetBuildtoolsPath()
- if not tools_path:
- raise NotFoundError(
- 'Could not find checkout in any parent of the current path.\n',
- 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium checkout.')
+ tools_path = gclient_paths.GetBuildtoolsPath()
+ if not tools_path:
+ raise NotFoundError(
+ 'Could not find checkout in any parent of the current path.\n',
+ 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium '
+ 'checkout.')
- script_path = os.path.join(tools_path, 'clang_format', 'script', script_name)
- if not os.path.exists(script_path):
- raise NotFoundError('File does not exist: %s' % script_path)
- return script_path
+ script_path = os.path.join(tools_path, 'clang_format', 'script',
+ script_name)
+ if not os.path.exists(script_path):
+ raise NotFoundError('File does not exist: %s' % script_path)
+ return script_path
def main(args):
- try:
- tool = FindClangFormatToolInChromiumTree()
- except NotFoundError as e:
- sys.stderr.write("%s\n" % str(e))
- return 1
+ try:
+ tool = FindClangFormatToolInChromiumTree()
+ except NotFoundError as e:
+ sys.stderr.write("%s\n" % str(e))
+ return 1
- # Add some visibility to --help showing where the tool lives, since this
- # redirection can be a little opaque.
- help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
- if any(match in args for match in help_syntax):
- print(
- '\nDepot tools redirects you to the clang-format at:\n %s\n' % tool)
+ # Add some visibility to --help showing where the tool lives, since this
+ # redirection can be a little opaque.
+ help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
+ if any(match in args for match in help_syntax):
+ print('\nDepot tools redirects you to the clang-format at:\n %s\n' %
+ tool)
- return subprocess.call([tool] + args)
+ return subprocess.call([tool] + args)
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(1)
diff --git a/compile_single_file.py b/compile_single_file.py
index c143fed55d..b766e0073b 100644
--- a/compile_single_file.py
+++ b/compile_single_file.py
@@ -10,64 +10,64 @@ import os
import subprocess
import sys
-
DEPOT_TOOLS_DIR = os.path.dirname(os.path.realpath(__file__))
+
# This function is inspired from the one in src/tools/vim/ninja-build.vim in the
# Chromium repository.
def path_to_source_root(path):
- """Returns the absolute path to the chromium source root."""
- candidate = os.path.dirname(path)
- # This is a list of directories that need to identify the src directory. The
- # shorter it is, the more likely it's wrong (checking for just
- # "build/common.gypi" would find "src/v8" for files below "src/v8", as
- # "src/v8/build/common.gypi" exists). The longer it is, the more likely it is
- # to break when we rename directories.
- fingerprints = ['chrome', 'net', 'v8', 'build', 'skia']
- while candidate and not all(
- os.path.isdir(os.path.join(candidate, fp)) for fp in fingerprints):
- new_candidate = os.path.dirname(candidate)
- if new_candidate == candidate:
- raise Exception("Couldn't find source-dir from %s" % path)
- candidate = os.path.dirname(candidate)
- return candidate
+ """Returns the absolute path to the chromium source root."""
+ candidate = os.path.dirname(path)
+ # This is a list of directories that need to identify the src directory. The
+ # shorter it is, the more likely it's wrong (checking for just
+ # "build/common.gypi" would find "src/v8" for files below "src/v8", as
+ # "src/v8/build/common.gypi" exists). The longer it is, the more likely it
+ # is to break when we rename directories.
+ fingerprints = ['chrome', 'net', 'v8', 'build', 'skia']
+ while candidate and not all(
+ os.path.isdir(os.path.join(candidate, fp)) for fp in fingerprints):
+ new_candidate = os.path.dirname(candidate)
+ if new_candidate == candidate:
+ raise Exception("Couldn't find source-dir from %s" % path)
+ candidate = os.path.dirname(candidate)
+ return candidate
def main():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--file-path',
- help='The file path, could be absolute or relative to the current '
- 'directory.',
- required=True)
- parser.add_argument(
- '--build-dir',
- help='The build directory, relative to the source directory.',
- required=True)
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--file-path',
+ help='The file path, could be absolute or relative to the current '
+ 'directory.',
+ required=True)
+ parser.add_argument(
+ '--build-dir',
+ help='The build directory, relative to the source directory.',
+ required=True)
- options = parser.parse_args()
+ options = parser.parse_args()
- src_dir = path_to_source_root(os.path.abspath(options.file_path))
- abs_build_dir = os.path.join(src_dir, options.build_dir)
- src_relpath = os.path.relpath(options.file_path, abs_build_dir)
+ src_dir = path_to_source_root(os.path.abspath(options.file_path))
+ abs_build_dir = os.path.join(src_dir, options.build_dir)
+ src_relpath = os.path.relpath(options.file_path, abs_build_dir)
- print('Building %s' % options.file_path)
+ print('Building %s' % options.file_path)
- carets = '^'
- if sys.platform == 'win32':
- # The caret character has to be escaped on Windows as it's an escape
- # character.
- carets = '^^'
+ carets = '^'
+ if sys.platform == 'win32':
+ # The caret character has to be escaped on Windows as it's an escape
+ # character.
+ carets = '^^'
- command = [
- 'python3',
- os.path.join(DEPOT_TOOLS_DIR, 'ninja.py'), '-C', abs_build_dir,
- '%s%s' % (src_relpath, carets)
- ]
- # |shell| should be set to True on Windows otherwise the carets characters
- # get dropped from the command line.
- return subprocess.call(command, shell=sys.platform=='win32')
+ command = [
+ 'python3',
+ os.path.join(DEPOT_TOOLS_DIR, 'ninja.py'), '-C', abs_build_dir,
+ '%s%s' % (src_relpath, carets)
+ ]
+ # |shell| should be set to True on Windows otherwise the carets characters
+ # get dropped from the command line.
+ return subprocess.call(command, shell=sys.platform == 'win32')
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/cpplint.py b/cpplint.py
index 62662bf5ad..a8a8761243 100755
--- a/cpplint.py
+++ b/cpplint.py
@@ -29,7 +29,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: skip-file
-
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
@@ -54,7 +53,6 @@ import string
import sys
import unicodedata
-
_USAGE = r"""
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
@@ -244,14 +242,14 @@ _ERROR_CATEGORIES = [
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
- ]
+]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
- ]
+]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
@@ -262,12 +260,12 @@ _DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
- ]
+]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
- ]
+]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
@@ -416,7 +414,7 @@ _CPP_HEADERS = frozenset([
'cuchar',
'cwchar',
'cwctype',
- ])
+])
# List of functions from . See [meta.type.synop]
_TYPE_TRAITS = [
@@ -644,18 +642,16 @@ _TYPE_TRAITS = [
_TYPE_TRAITS_RE = re.compile(r'\b::(?:' + ('|'.join(_TYPE_TRAITS)) + ')<')
# Type names
-_TYPES = re.compile(
- r'^(?:'
- # [dcl.type.simple]
- r'(char(16_t|32_t)?)|wchar_t|'
- r'bool|short|int|long|signed|unsigned|float|double|'
- # [support.types]
- r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
- # [cstdint.syn]
- r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
- r'(u?int(max|ptr)_t)|'
- r')$')
-
+_TYPES = re.compile(r'^(?:'
+ # [dcl.type.simple]
+ r'(char(16_t|32_t)?)|wchar_t|'
+ r'bool|short|int|long|signed|unsigned|float|double|'
+ # [support.types]
+ r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
+ # [cstdint.syn]
+ r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
+ r'(u?int(max|ptr)_t)|'
+ r')$')
# These headers are excluded from [build/include], [build/include_directory],
# and [build/include_order] checks:
@@ -674,27 +670,28 @@ _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
- 'DCHECK', 'CHECK',
- 'EXPECT_TRUE', 'ASSERT_TRUE',
- 'EXPECT_FALSE', 'ASSERT_FALSE',
- ]
+ 'DCHECK',
+ 'CHECK',
+ 'EXPECT_TRUE',
+ 'ASSERT_TRUE',
+ 'EXPECT_FALSE',
+ 'ASSERT_FALSE',
+]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
-for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
- ('>=', 'GE'), ('>', 'GT'),
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
- _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
- _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
- _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
- _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+ _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
-for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
- ('>=', 'LT'), ('>', 'LE'),
- ('<=', 'GT'), ('<', 'GE')]:
- _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
- _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'),
+ ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]:
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
@@ -713,7 +710,7 @@ _ALT_TOKEN_REPLACEMENT = {
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
- }
+}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
@@ -723,7 +720,6 @@ _ALT_TOKEN_REPLACEMENT = {
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
-
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
@@ -733,10 +729,10 @@ _POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
-_NO_ASM = 0 # Outside of inline assembly block
-_INSIDE_ASM = 1 # Inside inline assembly block
-_END_ASM = 2 # Last line of inline assembly block
-_BLOCK_ASM = 3 # The whole block is an inline assembly block
+_NO_ASM = 0 # Outside of inline assembly block
+_INSIDE_ASM = 1 # Inside inline assembly block
+_END_ASM = 2 # Last line of inline assembly block
+_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
@@ -779,7 +775,7 @@ _global_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
- """Updates the global list of line error-suppressions.
+ """Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
@@ -791,27 +787,28 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error):
linenum: int, the number of the current line.
error: function, an error handler.
"""
- matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
- if matched:
- if matched.group(1):
- suppressed_line = linenum + 1
- else:
- suppressed_line = linenum
- category = matched.group(2)
- if category in (None, '(*)'): # => "suppress all"
- _error_suppressions.setdefault(None, set()).add(suppressed_line)
- else:
- if category.startswith('(') and category.endswith(')'):
- category = category[1:-1]
- if category in _ERROR_CATEGORIES:
- _error_suppressions.setdefault(category, set()).add(suppressed_line)
- elif category not in _LEGACY_ERROR_CATEGORIES:
- error(filename, linenum, 'readability/nolint', 5,
- 'Unknown NOLINT error category: %s' % category)
+ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
+ if matched:
+ if matched.group(1):
+ suppressed_line = linenum + 1
+ else:
+ suppressed_line = linenum
+ category = matched.group(2)
+ if category in (None, '(*)'): # => "suppress all"
+ _error_suppressions.setdefault(None, set()).add(suppressed_line)
+ else:
+ if category.startswith('(') and category.endswith(')'):
+ category = category[1:-1]
+ if category in _ERROR_CATEGORIES:
+ _error_suppressions.setdefault(category,
+ set()).add(suppressed_line)
+ elif category not in _LEGACY_ERROR_CATEGORIES:
+ error(filename, linenum, 'readability/nolint', 5,
+ 'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
- """Updates the list of global error suppressions.
+ """Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
@@ -819,23 +816,23 @@ def ProcessGlobalSuppresions(lines):
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
- for line in lines:
- if _SEARCH_C_FILE.search(line):
- for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
- if _SEARCH_KERNEL_FILE.search(line):
- for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
+ for line in lines:
+ if _SEARCH_C_FILE.search(line):
+ for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
+ _global_error_suppressions[category] = True
+ if _SEARCH_KERNEL_FILE.search(line):
+ for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
+ _global_error_suppressions[category] = True
def ResetNolintSuppressions():
- """Resets the set of NOLINT suppressions to empty."""
- _error_suppressions.clear()
- _global_error_suppressions.clear()
+ """Resets the set of NOLINT suppressions to empty."""
+ _error_suppressions.clear()
+ _global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
- """Returns true if the specified error category is suppressed on this line.
+ """Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
@@ -847,23 +844,23 @@ def IsErrorSuppressedByNolint(category, linenum):
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
- return (_global_error_suppressions.get(category, False) or
- linenum in _error_suppressions.get(category, set()) or
- linenum in _error_suppressions.get(None, set()))
+ return (_global_error_suppressions.get(category, False)
+ or linenum in _error_suppressions.get(category, set())
+ or linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
- """Matches the string with the pattern, caching the compiled regexp."""
- # The regexp compilation caching is inlined in both Match and Search for
- # performance reasons; factoring it out into a separate function turns out
- # to be noticeably expensive.
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].match(s)
+ """Matches the string with the pattern, caching the compiled regexp."""
+ # The regexp compilation caching is inlined in both Match and Search for
+ # performance reasons; factoring it out into a separate function turns out
+ # to be noticeably expensive.
+ if pattern not in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
- """Replaces instances of pattern in a string with a replacement.
+ """Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
@@ -875,25 +872,25 @@ def ReplaceAll(pattern, rep, s):
Returns:
string with replacements made (or original string if no replacements)
"""
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].sub(rep, s)
+ if pattern not in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
- """Searches the string for the pattern, caching the compiled regexp."""
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].search(s)
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if pattern not in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
- """File extension (excluding dot) matches a source file extension."""
- return s in ('c', 'cc', 'cpp', 'cxx')
+ """File extension (excluding dot) matches a source file extension."""
+ return s in ('c', 'cc', 'cpp', 'cxx')
class _IncludeState(object):
- """Tracks line numbers for includes, and the order in which includes appear.
+ """Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
@@ -904,35 +901,35 @@ class _IncludeState(object):
raise an _IncludeError with an appropriate error message.
"""
- # self._section will move monotonically through this set. If it ever
- # needs to move backwards, CheckNextIncludeOrder will raise an error.
- _INITIAL_SECTION = 0
- _MY_H_SECTION = 1
- _C_SECTION = 2
- _CPP_SECTION = 3
- _OTHER_H_SECTION = 4
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, CheckNextIncludeOrder will raise an error.
+ _INITIAL_SECTION = 0
+ _MY_H_SECTION = 1
+ _C_SECTION = 2
+ _CPP_SECTION = 3
+ _OTHER_H_SECTION = 4
- _TYPE_NAMES = {
- _C_SYS_HEADER: 'C system header',
- _CPP_SYS_HEADER: 'C++ system header',
- _LIKELY_MY_HEADER: 'header this file implements',
- _POSSIBLE_MY_HEADER: 'header this file may implement',
- _OTHER_HEADER: 'other header',
- }
- _SECTION_NAMES = {
- _INITIAL_SECTION: "... nothing. (This can't be an error.)",
- _MY_H_SECTION: 'a header this file implements',
- _C_SECTION: 'C system header',
- _CPP_SECTION: 'C++ system header',
- _OTHER_H_SECTION: 'other header',
- }
+ _TYPE_NAMES = {
+ _C_SYS_HEADER: 'C system header',
+ _CPP_SYS_HEADER: 'C++ system header',
+ _LIKELY_MY_HEADER: 'header this file implements',
+ _POSSIBLE_MY_HEADER: 'header this file may implement',
+ _OTHER_HEADER: 'other header',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing. (This can't be an error.)",
+ _MY_H_SECTION: 'a header this file implements',
+ _C_SECTION: 'C system header',
+ _CPP_SECTION: 'C++ system header',
+ _OTHER_H_SECTION: 'other header',
+ }
- def __init__(self):
- self.include_list = [[]]
- self.ResetSection('')
+ def __init__(self):
+ self.include_list = [[]]
+ self.ResetSection('')
- def FindHeader(self, header):
- """Check if a header has already been included.
+ def FindHeader(self, header):
+ """Check if a header has already been included.
Args:
header: header to check.
@@ -940,35 +937,35 @@ class _IncludeState(object):
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
- for section_list in self.include_list:
- for f in section_list:
- if f[0] == header:
- return f[1]
- return -1
+ for section_list in self.include_list:
+ for f in section_list:
+ if f[0] == header:
+ return f[1]
+ return -1
- def ResetSection(self, directive):
- """Reset section checking for preprocessor directive.
+ def ResetSection(self, directive):
+ """Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
- # The name of the current section.
- self._section = self._INITIAL_SECTION
- # The path of last found header.
- self._last_header = ''
+ # The name of the current section.
+ self._section = self._INITIAL_SECTION
+ # The path of last found header.
+ self._last_header = ''
- # Update list of includes. Note that we never pop from the
- # include list.
- if directive in ('if', 'ifdef', 'ifndef'):
- self.include_list.append([])
- elif directive in ('else', 'elif'):
- self.include_list[-1] = []
+ # Update list of includes. Note that we never pop from the
+ # include list.
+ if directive in ('if', 'ifdef', 'ifndef'):
+ self.include_list.append([])
+ elif directive in ('else', 'elif'):
+ self.include_list[-1] = []
- def SetLastHeader(self, header_path):
- self._last_header = header_path
+ def SetLastHeader(self, header_path):
+ self._last_header = header_path
- def CanonicalizeAlphabeticalOrder(self, header_path):
- """Returns a path canonicalized for alphabetical comparison.
+ def CanonicalizeAlphabeticalOrder(self, header_path):
+ """Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
@@ -980,10 +977,10 @@ class _IncludeState(object):
Returns:
Canonicalized path.
"""
- return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
+ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
- def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
- """Check if a header is in alphabetical order with the previous header.
+ def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
+ """Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -993,18 +990,19 @@ class _IncludeState(object):
Returns:
Returns true if the header is in alphabetical order.
"""
- # If previous section is different from current section, _last_header will
- # be reset to empty string, so it's always less than current header.
- #
- # If previous line was a blank line, assume that the headers are
- # intentionally sorted the way they are.
- if (self._last_header > header_path and
- Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
- return False
- return True
+ # If previous section is different from current section, _last_header
+ # will be reset to empty string, so it's always less than current
+ # header.
+ #
+ # If previous line was a blank line, assume that the headers are
+ # intentionally sorted the way they are.
+ if (self._last_header > header_path and Match(
+ r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
+ return False
+ return True
- def CheckNextIncludeOrder(self, header_type):
- """Returns a non-empty error message if the next header is out of order.
+ def CheckNextIncludeOrder(self, header_type):
+ """Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
@@ -1017,80 +1015,79 @@ class _IncludeState(object):
error message describing what's wrong.
"""
- error_message = ('Found %s after %s' %
- (self._TYPE_NAMES[header_type],
- self._SECTION_NAMES[self._section]))
+ error_message = (
+ 'Found %s after %s' %
+ (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section]))
- last_section = self._section
+ last_section = self._section
- if header_type == _C_SYS_HEADER:
- if self._section <= self._C_SECTION:
- self._section = self._C_SECTION
- else:
- self._last_header = ''
- return error_message
- elif header_type == _CPP_SYS_HEADER:
- if self._section <= self._CPP_SECTION:
- self._section = self._CPP_SECTION
- else:
- self._last_header = ''
- return error_message
- elif header_type == _LIKELY_MY_HEADER:
- if self._section <= self._MY_H_SECTION:
- self._section = self._MY_H_SECTION
- else:
- self._section = self._OTHER_H_SECTION
- elif header_type == _POSSIBLE_MY_HEADER:
- if self._section <= self._MY_H_SECTION:
- self._section = self._MY_H_SECTION
- else:
- # This will always be the fallback because we're not sure
- # enough that the header is associated with this file.
- self._section = self._OTHER_H_SECTION
- else:
- assert header_type == _OTHER_HEADER
- self._section = self._OTHER_H_SECTION
+ if header_type == _C_SYS_HEADER:
+ if self._section <= self._C_SECTION:
+ self._section = self._C_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _CPP_SYS_HEADER:
+ if self._section <= self._CPP_SECTION:
+ self._section = self._CPP_SECTION
+ else:
+ self._last_header = ''
+ return error_message
+ elif header_type == _LIKELY_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ self._section = self._OTHER_H_SECTION
+ elif header_type == _POSSIBLE_MY_HEADER:
+ if self._section <= self._MY_H_SECTION:
+ self._section = self._MY_H_SECTION
+ else:
+ # This will always be the fallback because we're not sure
+ # enough that the header is associated with this file.
+ self._section = self._OTHER_H_SECTION
+ else:
+ assert header_type == _OTHER_HEADER
+ self._section = self._OTHER_H_SECTION
- if last_section != self._section:
- self._last_header = ''
+ if last_section != self._section:
+ self._last_header = ''
- return ''
+ return ''
class _CppLintState(object):
- """Maintains module-wide state.."""
+ """Maintains module-wide state.."""
+ def __init__(self):
+ self.verbose_level = 1 # global setting.
+ self.error_count = 0 # global count of reported errors
+ # filters to apply when emitting error messages
+ self.filters = _DEFAULT_FILTERS[:]
+ # backup of filter list. Used to restore the state after each file.
+ self._filters_backup = self.filters[:]
+ self.counting = 'total' # In what way are we counting errors?
+ self.errors_by_category = {} # string to int dict storing error counts
- def __init__(self):
- self.verbose_level = 1 # global setting.
- self.error_count = 0 # global count of reported errors
- # filters to apply when emitting error messages
- self.filters = _DEFAULT_FILTERS[:]
- # backup of filter list. Used to restore the state after each file.
- self._filters_backup = self.filters[:]
- self.counting = 'total' # In what way are we counting errors?
- self.errors_by_category = {} # string to int dict storing error counts
+ # output format:
+ # "emacs" - format that emacs can parse (default)
+ # "vs7" - format that Microsoft Visual Studio 7 can parse
+ self.output_format = 'emacs'
- # output format:
- # "emacs" - format that emacs can parse (default)
- # "vs7" - format that Microsoft Visual Studio 7 can parse
- self.output_format = 'emacs'
+ def SetOutputFormat(self, output_format):
+ """Sets the output format for errors."""
+ self.output_format = output_format
- def SetOutputFormat(self, output_format):
- """Sets the output format for errors."""
- self.output_format = output_format
+ def SetVerboseLevel(self, level):
+ """Sets the module's verbosity, and returns the previous setting."""
+ last_verbose_level = self.verbose_level
+ self.verbose_level = level
+ return last_verbose_level
- def SetVerboseLevel(self, level):
- """Sets the module's verbosity, and returns the previous setting."""
- last_verbose_level = self.verbose_level
- self.verbose_level = level
- return last_verbose_level
+ def SetCountingStyle(self, counting_style):
+ """Sets the module's counting options."""
+ self.counting = counting_style
- def SetCountingStyle(self, counting_style):
- """Sets the module's counting options."""
- self.counting = counting_style
-
- def SetFilters(self, filters):
- """Sets the error-message filters.
+ def SetFilters(self, filters):
+ """Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
@@ -1103,86 +1100,88 @@ class _CppLintState(object):
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
- # Default filters always have less priority than the flag ones.
- self.filters = _DEFAULT_FILTERS[:]
- self.AddFilters(filters)
+ # Default filters always have less priority than the flag ones.
+ self.filters = _DEFAULT_FILTERS[:]
+ self.AddFilters(filters)
- def AddFilters(self, filters):
- """ Adds more filters to the existing list of error-message filters. """
- for filt in filters.split(','):
- clean_filt = filt.strip()
- if clean_filt:
- self.filters.append(clean_filt)
- for filt in self.filters:
- if not (filt.startswith('+') or filt.startswith('-')):
- raise ValueError('Every filter in --filters must start with + or -'
- ' (%s does not)' % filt)
+ def AddFilters(self, filters):
+ """ Adds more filters to the existing list of error-message filters. """
+ for filt in filters.split(','):
+ clean_filt = filt.strip()
+ if clean_filt:
+ self.filters.append(clean_filt)
+ for filt in self.filters:
+ if not (filt.startswith('+') or filt.startswith('-')):
+ raise ValueError(
+ 'Every filter in --filters must start with + or -'
+ ' (%s does not)' % filt)
- def BackupFilters(self):
- """ Saves the current filter list to backup storage."""
- self._filters_backup = self.filters[:]
+ def BackupFilters(self):
+ """ Saves the current filter list to backup storage."""
+ self._filters_backup = self.filters[:]
- def RestoreFilters(self):
- """ Restores filters previously backed up."""
- self.filters = self._filters_backup[:]
+ def RestoreFilters(self):
+ """ Restores filters previously backed up."""
+ self.filters = self._filters_backup[:]
- def ResetErrorCounts(self):
- """Sets the module's error statistic back to zero."""
- self.error_count = 0
- self.errors_by_category = {}
+ def ResetErrorCounts(self):
+ """Sets the module's error statistic back to zero."""
+ self.error_count = 0
+ self.errors_by_category = {}
- def IncrementErrorCount(self, category):
- """Bumps the module's error statistic."""
- self.error_count += 1
- if self.counting in ('toplevel', 'detailed'):
- if self.counting != 'detailed':
- category = category.split('/')[0]
- if category not in self.errors_by_category:
- self.errors_by_category[category] = 0
- self.errors_by_category[category] += 1
+ def IncrementErrorCount(self, category):
+ """Bumps the module's error statistic."""
+ self.error_count += 1
+ if self.counting in ('toplevel', 'detailed'):
+ if self.counting != 'detailed':
+ category = category.split('/')[0]
+ if category not in self.errors_by_category:
+ self.errors_by_category[category] = 0
+ self.errors_by_category[category] += 1
+
+ def PrintErrorCounts(self):
+ """Print a summary of errors by category, and the total."""
+ for category, count in self.errors_by_category.items():
+ sys.stderr.write('Category \'%s\' errors found: %d\n' %
+ (category, count))
+ sys.stderr.write('Total errors found: %d\n' % self.error_count)
- def PrintErrorCounts(self):
- """Print a summary of errors by category, and the total."""
- for category, count in self.errors_by_category.items():
- sys.stderr.write('Category \'%s\' errors found: %d\n' %
- (category, count))
- sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
- """Gets the module's output format."""
- return _cpplint_state.output_format
+ """Gets the module's output format."""
+ return _cpplint_state.output_format
def _SetOutputFormat(output_format):
- """Sets the module's output format."""
- _cpplint_state.SetOutputFormat(output_format)
+ """Sets the module's output format."""
+ _cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
- """Returns the module's verbosity setting."""
- return _cpplint_state.verbose_level
+ """Returns the module's verbosity setting."""
+ return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
- """Sets the module's verbosity, and returns the previous setting."""
- return _cpplint_state.SetVerboseLevel(level)
+ """Sets the module's verbosity, and returns the previous setting."""
+ return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
- """Sets the module's counting options."""
- _cpplint_state.SetCountingStyle(level)
+ """Sets the module's counting options."""
+ _cpplint_state.SetCountingStyle(level)
def _Filters():
- """Returns the module's list of output filters, as a list."""
- return _cpplint_state.filters
+ """Returns the module's list of output filters, as a list."""
+ return _cpplint_state.filters
def _SetFilters(filters):
- """Sets the module's error-message filters.
+ """Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
@@ -1191,10 +1190,11 @@ def _SetFilters(filters):
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
- _cpplint_state.SetFilters(filters)
+ _cpplint_state.SetFilters(filters)
+
def _AddFilters(filters):
- """Adds more filter overrides.
+ """Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
@@ -1203,96 +1203,100 @@ def _AddFilters(filters):
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
- _cpplint_state.AddFilters(filters)
+ _cpplint_state.AddFilters(filters)
+
def _BackupFilters():
- """ Saves the current filter list to backup storage."""
- _cpplint_state.BackupFilters()
+ """ Saves the current filter list to backup storage."""
+ _cpplint_state.BackupFilters()
+
def _RestoreFilters():
- """ Restores filters previously backed up."""
- _cpplint_state.RestoreFilters()
+ """ Restores filters previously backed up."""
+ _cpplint_state.RestoreFilters()
+
class _FunctionState(object):
- """Tracks current function name and the number of lines in its body."""
+ """Tracks current function name and the number of lines in its body."""
- _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
- _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
- def __init__(self):
- self.in_a_function = False
- self.lines_in_function = 0
- self.current_function = ''
+ def __init__(self):
+ self.in_a_function = False
+ self.lines_in_function = 0
+ self.current_function = ''
- def Begin(self, function_name):
- """Start analyzing function body.
+ def Begin(self, function_name):
+ """Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
- self.in_a_function = True
- self.lines_in_function = 0
- self.current_function = function_name
+ self.in_a_function = True
+ self.lines_in_function = 0
+ self.current_function = function_name
- def Count(self):
- """Count line in current function body."""
- if self.in_a_function:
- self.lines_in_function += 1
+ def Count(self):
+ """Count line in current function body."""
+ if self.in_a_function:
+ self.lines_in_function += 1
- def Check(self, error, filename, linenum):
- """Report if too many lines in function body.
+ def Check(self, error, filename, linenum):
+ """Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
- if not self.in_a_function:
- return
+ if not self.in_a_function:
+ return
- if Match(r'T(EST|est)', self.current_function):
- base_trigger = self._TEST_TRIGGER
- else:
- base_trigger = self._NORMAL_TRIGGER
- trigger = base_trigger * 2**_VerboseLevel()
+ if Match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2**_VerboseLevel()
- if self.lines_in_function > trigger:
- error_level = int(math.log(self.lines_in_function / base_trigger, 2))
- # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
- if error_level > 5:
- error_level = 5
- error(filename, linenum, 'readability/fn_size', error_level,
- 'Small and focused functions are preferred:'
- ' %s has %d non-comment lines'
- ' (error triggered by exceeding %d lines).' % (
- self.current_function, self.lines_in_function, trigger))
+ if self.lines_in_function > trigger:
+ error_level = int(math.log(self.lines_in_function / base_trigger,
+ 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(
+ filename, linenum, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' %
+ (self.current_function, self.lines_in_function, trigger))
- def End(self):
- """Stop analyzing function body."""
- self.in_a_function = False
+ def End(self):
+ """Stop analyzing function body."""
+ self.in_a_function = False
class _IncludeError(Exception):
- """Indicates a problem with the include order in a file."""
- pass
+ """Indicates a problem with the include order in a file."""
+ pass
class FileInfo(object):
- """Provides utility functions for filenames.
+ """Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
+ def __init__(self, filename):
+ self._filename = filename
- def __init__(self, filename):
- self._filename = filename
+ def FullName(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
- def FullName(self):
- """Make Windows paths like Unix."""
- return os.path.abspath(self._filename).replace('\\', '/')
-
- def RepositoryName(self):
- r"""FullName after removing the local path to the repository.
+ def RepositoryName(self):
+ r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
@@ -1301,47 +1305,48 @@ class FileInfo(object):
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
- fullname = self.FullName()
+ fullname = self.FullName()
- if os.path.exists(fullname):
- project_dir = os.path.dirname(fullname)
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
- if _project_root:
- prefix = os.path.commonprefix([_project_root, project_dir])
- return fullname[len(prefix) + 1:]
+ if _project_root:
+ prefix = os.path.commonprefix([_project_root, project_dir])
+ return fullname[len(prefix) + 1:]
- if os.path.exists(os.path.join(project_dir, ".svn")):
- # If there's a .svn file in the current directory, we recursively look
- # up the directory tree for the top of the SVN checkout
- root_dir = project_dir
- one_up_dir = os.path.dirname(root_dir)
- while os.path.exists(os.path.join(one_up_dir, ".svn")):
- root_dir = os.path.dirname(root_dir)
- one_up_dir = os.path.dirname(one_up_dir)
+ if os.path.exists(os.path.join(project_dir, ".svn")):
+ # If there's a .svn file in the current directory, we
+ # recursively look up the directory tree for the top of the SVN
+ # checkout
+ root_dir = project_dir
+ one_up_dir = os.path.dirname(root_dir)
+ while os.path.exists(os.path.join(one_up_dir, ".svn")):
+ root_dir = os.path.dirname(root_dir)
+ one_up_dir = os.path.dirname(one_up_dir)
- prefix = os.path.commonprefix([root_dir, project_dir])
- return fullname[len(prefix) + 1:]
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
- # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
- # searching up from the current path.
- root_dir = os.path.dirname(fullname)
- while (root_dir != os.path.dirname(root_dir) and
- not os.path.exists(os.path.join(root_dir, ".git")) and
- not os.path.exists(os.path.join(root_dir, ".hg")) and
- not os.path.exists(os.path.join(root_dir, ".svn"))):
- root_dir = os.path.dirname(root_dir)
+ # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory
+ # by searching up from the current path.
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir)
+ and not os.path.exists(os.path.join(root_dir, ".git"))
+ and not os.path.exists(os.path.join(root_dir, ".hg"))
+ and not os.path.exists(os.path.join(root_dir, ".svn"))):
+ root_dir = os.path.dirname(root_dir)
- if (os.path.exists(os.path.join(root_dir, ".git")) or
- os.path.exists(os.path.join(root_dir, ".hg")) or
- os.path.exists(os.path.join(root_dir, ".svn"))):
- prefix = os.path.commonprefix([root_dir, project_dir])
- return fullname[len(prefix) + 1:]
+ if (os.path.exists(os.path.join(root_dir, ".git"))
+ or os.path.exists(os.path.join(root_dir, ".hg"))
+ or os.path.exists(os.path.join(root_dir, ".svn"))):
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
- # Don't know what to do; header guard warnings may be wrong...
- return fullname
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
- def Split(self):
- """Splits the file into the directory, basename, and extension.
+ def Split(self):
+ """Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
@@ -1350,57 +1355,57 @@ class FileInfo(object):
A tuple of (directory, basename, extension).
"""
- googlename = self.RepositoryName()
- project, rest = os.path.split(googlename)
- return (project,) + os.path.splitext(rest)
+ googlename = self.RepositoryName()
+ project, rest = os.path.split(googlename)
+ return (project, ) + os.path.splitext(rest)
- def BaseName(self):
- """File base name - text after the final slash, before the final period."""
- return self.Split()[1]
+ def BaseName(self):
+ """File base name - text after the final slash, before the final period."""
+ return self.Split()[1]
- def Extension(self):
- """File extension - text following the final period."""
- return self.Split()[2]
+ def Extension(self):
+ """File extension - text following the final period."""
+ return self.Split()[2]
- def NoExtension(self):
- """File has no source file extension."""
- return '/'.join(self.Split()[0:2])
+ def NoExtension(self):
+ """File has no source file extension."""
+ return '/'.join(self.Split()[0:2])
- def IsSource(self):
- """File has a source file extension."""
- return _IsSourceExtension(self.Extension()[1:])
+ def IsSource(self):
+ """File has a source file extension."""
+ return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
- """If confidence >= verbose, category passes filter and is not suppressed."""
+ """If confidence >= verbose, category passes filter and is not suppressed."""
- # There are three ways we might decide not to print an error message:
- # a "NOLINT(category)" comment appears in the source,
- # the verbosity level isn't high enough, or the filters filter it out.
- if IsErrorSuppressedByNolint(category, linenum):
- return False
+ # There are three ways we might decide not to print an error message:
+ # a "NOLINT(category)" comment appears in the source,
+ # the verbosity level isn't high enough, or the filters filter it out.
+ if IsErrorSuppressedByNolint(category, linenum):
+ return False
- if confidence < _cpplint_state.verbose_level:
- return False
+ if confidence < _cpplint_state.verbose_level:
+ return False
- is_filtered = False
- for one_filter in _Filters():
- if one_filter.startswith('-'):
- if category.startswith(one_filter[1:]):
- is_filtered = True
- elif one_filter.startswith('+'):
- if category.startswith(one_filter[1:]):
- is_filtered = False
- else:
- assert False # should have been checked for in SetFilter.
- if is_filtered:
- return False
+ is_filtered = False
+ for one_filter in _Filters():
+ if one_filter.startswith('-'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = True
+ elif one_filter.startswith('+'):
+ if category.startswith(one_filter[1:]):
+ is_filtered = False
+ else:
+ assert False # should have been checked for in SetFilter.
+ if is_filtered:
+ return False
- return True
+ return True
def Error(filename, linenum, category, confidence, message):
- """Logs the fact we've found a lint error.
+ """Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
@@ -1421,17 +1426,17 @@ def Error(filename, linenum, category, confidence, message):
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
- if _ShouldPrintError(category, confidence, linenum):
- _cpplint_state.IncrementErrorCount(category)
- if _cpplint_state.output_format == 'vs7':
- sys.stderr.write('%s(%s): (cpplint) %s [%s] [%d]\n' %
- (filename, linenum, message, category, confidence))
- elif _cpplint_state.output_format == 'eclipse':
- sys.stderr.write('%s:%s: (cpplint) warning: %s [%s] [%d]\n' %
- (filename, linenum, message, category, confidence))
- else:
- sys.stderr.write('%s:%s: (cpplint) %s [%s] [%d]\n' %
- (filename, linenum, message, category, confidence))
+ if _ShouldPrintError(category, confidence, linenum):
+ _cpplint_state.IncrementErrorCount(category)
+ if _cpplint_state.output_format == 'vs7':
+ sys.stderr.write('%s(%s): (cpplint) %s [%s] [%d]\n' %
+ (filename, linenum, message, category, confidence))
+ elif _cpplint_state.output_format == 'eclipse':
+ sys.stderr.write('%s:%s: (cpplint) warning: %s [%s] [%d]\n' %
+ (filename, linenum, message, category, confidence))
+ else:
+ sys.stderr.write('%s:%s: (cpplint) %s [%s] [%d]\n' %
+ (filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
@@ -1447,15 +1452,18 @@ _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
-_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
- r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
- _RE_PATTERN_C_COMMENTS + r'\s+|' +
- r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
- _RE_PATTERN_C_COMMENTS + r')')
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(r'(\s*' +
+ _RE_PATTERN_C_COMMENTS +
+ r'\s*$|' +
+ _RE_PATTERN_C_COMMENTS +
+ r'\s+|' + r'\s+' +
+ _RE_PATTERN_C_COMMENTS +
+ r'(?=\W)|' +
+ _RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
- """Does line terminate so, that the next symbol is in string constant.
+ """Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
@@ -1467,12 +1475,12 @@ def IsCppString(line):
string constant.
"""
- line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
- return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
- """Removes C++11 raw strings from lines.
+ """Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
@@ -1491,108 +1499,110 @@ def CleanseRawStrings(raw_lines):
list of lines with C++11 raw strings replaced by empty strings.
"""
- delimiter = None
- lines_without_raw_strings = []
- for line in raw_lines:
- if delimiter:
- # Inside a raw string, look for the end
- end = line.find(delimiter)
- if end >= 0:
- # Found the end of the string, match leading space for this
- # line and resume copying the original lines, and also insert
- # a "" on the last line.
- leading_space = Match(r'^(\s*)\S', line)
- line = leading_space.group(1) + '""' + line[end + len(delimiter):]
- delimiter = None
- else:
- # Haven't found the end yet, append a blank line.
- line = '""'
+ delimiter = None
+ lines_without_raw_strings = []
+ for line in raw_lines:
+ if delimiter:
+ # Inside a raw string, look for the end
+ end = line.find(delimiter)
+ if end >= 0:
+ # Found the end of the string, match leading space for this
+ # line and resume copying the original lines, and also insert
+ # a "" on the last line.
+ leading_space = Match(r'^(\s*)\S', line)
+ line = leading_space.group(1) + '""' + line[end +
+ len(delimiter):]
+ delimiter = None
+ else:
+ # Haven't found the end yet, append a blank line.
+ line = '""'
- # Look for beginning of a raw string, and replace them with
- # empty strings. This is done in a loop to handle multiple raw
- # strings on the same line.
- while delimiter is None:
- # Look for beginning of a raw string.
- # See 2.14.15 [lex.string] for syntax.
- #
- # Once we have matched a raw string, we check the prefix of the
- # line to make sure that the line is not part of a single line
- # comment. It's done this way because we remove raw strings
- # before removing comments as opposed to removing comments
- # before removing raw strings. This is because there are some
- # cpplint checks that requires the comments to be preserved, but
- # we don't want to check comments that are inside raw strings.
- matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
- if (matched and
- not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
- matched.group(1))):
- delimiter = ')' + matched.group(2) + '"'
+ # Look for beginning of a raw string, and replace them with
+ # empty strings. This is done in a loop to handle multiple raw
+ # strings on the same line.
+ while delimiter is None:
+ # Look for beginning of a raw string.
+ # See 2.14.15 [lex.string] for syntax.
+ #
+ # Once we have matched a raw string, we check the prefix of the
+ # line to make sure that the line is not part of a single line
+ # comment. It's done this way because we remove raw strings
+ # before removing comments as opposed to removing comments
+ # before removing raw strings. This is because there are some
+ # cpplint checks that requires the comments to be preserved, but
+ # we don't want to check comments that are inside raw strings.
+ matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$',
+ line)
+ if (matched and
+ not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
+ matched.group(1))):
+ delimiter = ')' + matched.group(2) + '"'
- end = matched.group(3).find(delimiter)
- if end >= 0:
- # Raw string ended on same line
- line = (matched.group(1) + '""' +
- matched.group(3)[end + len(delimiter):])
- delimiter = None
- else:
- # Start of a multi-line raw string
- line = matched.group(1) + '""'
- else:
- break
+ end = matched.group(3).find(delimiter)
+ if end >= 0:
+ # Raw string ended on same line
+ line = (matched.group(1) + '""' +
+ matched.group(3)[end + len(delimiter):])
+ delimiter = None
+ else:
+ # Start of a multi-line raw string
+ line = matched.group(1) + '""'
+ else:
+ break
- lines_without_raw_strings.append(line)
+ lines_without_raw_strings.append(line)
- # TODO(unknown): if delimiter is not None here, we might want to
- # emit a warning for unterminated string.
- return lines_without_raw_strings
+ # TODO(unknown): if delimiter is not None here, we might want to
+ # emit a warning for unterminated string.
+ return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
- """Find the beginning marker for a multiline comment."""
- while lineix < len(lines):
- if lines[lineix].strip().startswith('/*'):
- # Only return this marker if the comment goes beyond this line
- if lines[lineix].strip().find('*/', 2) < 0:
- return lineix
- lineix += 1
- return len(lines)
+ """Find the beginning marker for a multiline comment."""
+ while lineix < len(lines):
+ if lines[lineix].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[lineix].strip().find('*/', 2) < 0:
+ return lineix
+ lineix += 1
+ return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
- """We are inside a comment, find the end marker."""
- while lineix < len(lines):
- if lines[lineix].strip().endswith('*/'):
- return lineix
- lineix += 1
- return len(lines)
+ """We are inside a comment, find the end marker."""
+ while lineix < len(lines):
+ if lines[lineix].strip().endswith('*/'):
+ return lineix
+ lineix += 1
+ return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
- """Clears a range of lines for multi-line comments."""
- # Having // dummy comments makes the lines non-empty, so we will not get
- # unnecessary blank line warnings later in the code.
- for i in range(begin, end):
- lines[i] = '/**/'
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
- """Removes multiline (c-style) comments from lines."""
- lineix = 0
- while lineix < len(lines):
- lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
- if lineix_begin >= len(lines):
- return
- lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
- if lineix_end >= len(lines):
- error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
- 'Could not find end of multi-line comment')
- return
- RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
- lineix = lineix_end + 1
+ """Removes multiline (c-style) comments from lines."""
+ lineix = 0
+ while lineix < len(lines):
+ lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
+ if lineix_begin >= len(lines):
+ return
+ lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
+ if lineix_end >= len(lines):
+ error(filename, lineix_begin + 1, 'readability/multiline_comment',
+ 5, 'Could not find end of multi-line comment')
+ return
+ RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
+ lineix = lineix_end + 1
def CleanseComments(line):
- """Removes //-comments and single-line C-style /* */ comments.
+ """Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
@@ -1600,15 +1610,15 @@ def CleanseComments(line):
Returns:
The line with single-line comments removed.
"""
- commentpos = line.find('//')
- if commentpos != -1 and not IsCppString(line[:commentpos]):
- line = line[:commentpos].rstrip()
- # get rid of /* ... */
- return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+ commentpos = line.find('//')
+ if commentpos != -1 and not IsCppString(line[:commentpos]):
+ line = line[:commentpos].rstrip()
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
- """Holds 4 copies of all lines with different preprocessing applied to them.
+ """Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
@@ -1617,26 +1627,26 @@ class CleansedLines(object):
strings removed.
All these members are of , and of the same length.
"""
+ def __init__(self, lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self.num_lines = len(lines)
+ self.lines_without_raw_strings = CleanseRawStrings(lines)
+ for linenum in range(len(self.lines_without_raw_strings)):
+ self.lines.append(
+ CleanseComments(self.lines_without_raw_strings[linenum]))
+ elided = self._CollapseStrings(
+ self.lines_without_raw_strings[linenum])
+ self.elided.append(CleanseComments(elided))
- def __init__(self, lines):
- self.elided = []
- self.lines = []
- self.raw_lines = lines
- self.num_lines = len(lines)
- self.lines_without_raw_strings = CleanseRawStrings(lines)
- for linenum in range(len(self.lines_without_raw_strings)):
- self.lines.append(CleanseComments(
- self.lines_without_raw_strings[linenum]))
- elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
- self.elided.append(CleanseComments(elided))
+ def NumLines(self):
+ """Returns the number of lines represented."""
+ return self.num_lines
- def NumLines(self):
- """Returns the number of lines represented."""
- return self.num_lines
-
- @staticmethod
- def _CollapseStrings(elided):
- """Collapses strings and chars on a line to simple "" or '' blocks.
+ @staticmethod
+ def _CollapseStrings(elided):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
@@ -1646,64 +1656,66 @@ class CleansedLines(object):
Returns:
The line with collapsed strings.
"""
- if _RE_PATTERN_INCLUDE.match(elided):
- return elided
+ if _RE_PATTERN_INCLUDE.match(elided):
+ return elided
- # Remove escaped characters first to make quote/single quote collapsing
- # basic. Things that look like escaped characters shouldn't occur
- # outside of strings and chars.
- elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+ # Remove escaped characters first to make quote/single quote collapsing
+ # basic. Things that look like escaped characters shouldn't occur
+ # outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
- # Replace quoted strings and digit separators. Both single quotes
- # and double quotes are processed in the same loop, otherwise
- # nested quotes wouldn't work.
- collapsed = ''
- while True:
- # Find the first quote character
- match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
- if not match:
- collapsed += elided
- break
- head, quote, tail = match.groups()
+ # Replace quoted strings and digit separators. Both single quotes
+ # and double quotes are processed in the same loop, otherwise
+ # nested quotes wouldn't work.
+ collapsed = ''
+ while True:
+ # Find the first quote character
+ match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
+ if not match:
+ collapsed += elided
+ break
+ head, quote, tail = match.groups()
- if quote == '"':
- # Collapse double quoted strings
- second_quote = tail.find('"')
- if second_quote >= 0:
- collapsed += head + '""'
- elided = tail[second_quote + 1:]
- else:
- # Unmatched double quote, don't bother processing the rest
- # of the line since this is probably a multiline string.
- collapsed += elided
- break
- else:
- # Found single quote, check nearby text to eliminate digit separators.
- #
- # There is no special handling for floating point here, because
- # the integer/fractional/exponent parts would all be parsed
- # correctly as long as there are digits on both sides of the
- # separator. So we are fine as long as we don't see something
- # like "0.'3" (gcc 4.9.0 will not allow this literal).
- if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
- match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
- collapsed += head + match_literal.group(1).replace("'", '')
- elided = match_literal.group(2)
- else:
- second_quote = tail.find('\'')
- if second_quote >= 0:
- collapsed += head + "''"
- elided = tail[second_quote + 1:]
- else:
- # Unmatched single quote
- collapsed += elided
- break
+ if quote == '"':
+ # Collapse double quoted strings
+ second_quote = tail.find('"')
+ if second_quote >= 0:
+ collapsed += head + '""'
+ elided = tail[second_quote + 1:]
+ else:
+ # Unmatched double quote, don't bother processing the rest
+ # of the line since this is probably a multiline string.
+ collapsed += elided
+ break
+ else:
+ # Found single quote, check nearby text to eliminate digit
+ # separators.
+ #
+ # There is no special handling for floating point here, because
+ # the integer/fractional/exponent parts would all be parsed
+ # correctly as long as there are digits on both sides of the
+ # separator. So we are fine as long as we don't see something
+ # like "0.'3" (gcc 4.9.0 will not allow this literal).
+ if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
+ match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$',
+ "'" + tail)
+ collapsed += head + match_literal.group(1).replace("'", '')
+ elided = match_literal.group(2)
+ else:
+ second_quote = tail.find('\'')
+ if second_quote >= 0:
+ collapsed += head + "''"
+ elided = tail[second_quote + 1:]
+ else:
+ # Unmatched single quote
+ collapsed += elided
+ break
- return collapsed
+ return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
- """Find the position just after the end of current parenthesized expression.
+ """Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
@@ -1715,73 +1727,74 @@ def FindEndOfExpressionInLine(line, startpos, stack):
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
- for i in range(startpos, len(line)):
- char = line[i]
- if char in '([{':
- # Found start of parenthesized expression, push to expression stack
- stack.append(char)
- elif char == '<':
- # Found potential start of template argument list
- if i > 0 and line[i - 1] == '<':
- # Left shift operator
- if stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
- elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
- # operator<, don't add to stack
- continue
- else:
- # Tentative start of template argument list
- stack.append('<')
- elif char in ')]}':
- # Found end of parenthesized expression.
- #
- # If we are currently expecting a matching '>', the pending '<'
- # must have been an operator. Remove them from expression stack.
- while stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
- if ((stack[-1] == '(' and char == ')') or
- (stack[-1] == '[' and char == ']') or
- (stack[-1] == '{' and char == '}')):
- stack.pop()
- if not stack:
- return (i + 1, None)
- else:
- # Mismatched parentheses
- return (-1, None)
- elif char == '>':
- # Found potential end of template argument list.
+ for i in range(startpos, len(line)):
+ char = line[i]
+ if char in '([{':
+ # Found start of parenthesized expression, push to expression stack
+ stack.append(char)
+ elif char == '<':
+ # Found potential start of template argument list
+ if i > 0 and line[i - 1] == '<':
+ # Left shift operator
+ if stack and stack[-1] == '<':
+ stack.pop()
+ if not stack:
+ return (-1, None)
+ elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
+ # operator<, don't add to stack
+ continue
+ else:
+ # Tentative start of template argument list
+ stack.append('<')
+ elif char in ')]}':
+ # Found end of parenthesized expression.
+ #
+ # If we are currently expecting a matching '>', the pending '<'
+ # must have been an operator. Remove them from expression stack.
+ while stack and stack[-1] == '<':
+ stack.pop()
+ if not stack:
+ return (-1, None)
+ if ((stack[-1] == '(' and char == ')')
+ or (stack[-1] == '[' and char == ']')
+ or (stack[-1] == '{' and char == '}')):
+ stack.pop()
+ if not stack:
+ return (i + 1, None)
+ else:
+ # Mismatched parentheses
+ return (-1, None)
+ elif char == '>':
+ # Found potential end of template argument list.
- # Ignore "->" and operator functions
- if (i > 0 and
- (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
- continue
+ # Ignore "->" and operator functions
+ if (i > 0 and (line[i - 1] == '-'
+ or Search(r'\boperator\s*$', line[0:i - 1]))):
+ continue
- # Pop the stack if there is a matching '<'. Otherwise, ignore
- # this '>' since it must be an operator.
- if stack:
- if stack[-1] == '<':
- stack.pop()
- if not stack:
- return (i + 1, None)
- elif char == ';':
- # Found something that look like end of statements. If we are currently
- # expecting a '>', the matching '<' must have been an operator, since
- # template argument list should not contain statements.
- while stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
+ # Pop the stack if there is a matching '<'. Otherwise, ignore
+ # this '>' since it must be an operator.
+ if stack:
+ if stack[-1] == '<':
+ stack.pop()
+ if not stack:
+ return (i + 1, None)
+ elif char == ';':
+ # Found something that look like end of statements. If we are
+ # currently expecting a '>', the matching '<' must have been an
+ # operator, since template argument list should not contain
+ # statements.
+ while stack and stack[-1] == '<':
+ stack.pop()
+ if not stack:
+ return (-1, None)
- # Did not find end of expression or unbalanced parentheses on this line
- return (-1, stack)
+ # Did not find end of expression or unbalanced parentheses on this line
+ return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
- """If input points to ( or { or [ or <, finds the position that closes it.
+ """If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
@@ -1803,29 +1816,29 @@ def CloseExpression(clean_lines, linenum, pos):
'cleansed' line at linenum.
"""
- line = clean_lines.elided[linenum]
- if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
- return (line, clean_lines.NumLines(), -1)
-
- # Check first line
- (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
- if end_pos > -1:
- return (line, linenum, end_pos)
-
- # Continue scanning forward
- while stack and linenum < clean_lines.NumLines() - 1:
- linenum += 1
line = clean_lines.elided[linenum]
- (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
- if end_pos > -1:
- return (line, linenum, end_pos)
+ if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
+ return (line, clean_lines.NumLines(), -1)
- # Did not find end of expression before end of file, give up
- return (line, clean_lines.NumLines(), -1)
+ # Check first line
+ (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+
+ # Continue scanning forward
+ while stack and linenum < clean_lines.NumLines() - 1:
+ linenum += 1
+ line = clean_lines.elided[linenum]
+ (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
+ if end_pos > -1:
+ return (line, linenum, end_pos)
+
+ # Did not find end of expression before end of file, give up
+ return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
- """Find position at the matching start of current expression.
+ """Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
@@ -1840,69 +1853,68 @@ def FindStartOfExpressionInLine(line, endpos, stack):
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
- i = endpos
- while i >= 0:
- char = line[i]
- if char in ')]}':
- # Found end of expression, push to expression stack
- stack.append(char)
- elif char == '>':
- # Found potential end of template argument list.
- #
- # Ignore it if it's a "->" or ">=" or "operator>"
- if (i > 0 and
- (line[i - 1] == '-' or
- Match(r'\s>=\s', line[i - 1:]) or
- Search(r'\boperator\s*$', line[0:i]))):
- i -= 1
- else:
- stack.append('>')
- elif char == '<':
- # Found potential start of template argument list
- if i > 0 and line[i - 1] == '<':
- # Left shift operator
- i -= 1
- else:
- # If there is a matching '>', we can pop the expression stack.
- # Otherwise, ignore this '<' since it must be an operator.
- if stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (i, None)
- elif char in '([{':
- # Found start of expression.
- #
- # If there are any unmatched '>' on the stack, they must be
- # operators. Remove those.
- while stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (-1, None)
- if ((char == '(' and stack[-1] == ')') or
- (char == '[' and stack[-1] == ']') or
- (char == '{' and stack[-1] == '}')):
- stack.pop()
- if not stack:
- return (i, None)
- else:
- # Mismatched parentheses
- return (-1, None)
- elif char == ';':
- # Found something that look like end of statements. If we are currently
- # expecting a '<', the matching '>' must have been an operator, since
- # template argument list should not contain statements.
- while stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (-1, None)
+ i = endpos
+ while i >= 0:
+ char = line[i]
+ if char in ')]}':
+ # Found end of expression, push to expression stack
+ stack.append(char)
+ elif char == '>':
+ # Found potential end of template argument list.
+ #
+ # Ignore it if it's a "->" or ">=" or "operator>"
+ if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:])
+ or Search(r'\boperator\s*$', line[0:i]))):
+ i -= 1
+ else:
+ stack.append('>')
+ elif char == '<':
+ # Found potential start of template argument list
+ if i > 0 and line[i - 1] == '<':
+ # Left shift operator
+ i -= 1
+ else:
+ # If there is a matching '>', we can pop the expression stack.
+ # Otherwise, ignore this '<' since it must be an operator.
+ if stack and stack[-1] == '>':
+ stack.pop()
+ if not stack:
+ return (i, None)
+ elif char in '([{':
+ # Found start of expression.
+ #
+ # If there are any unmatched '>' on the stack, they must be
+ # operators. Remove those.
+ while stack and stack[-1] == '>':
+ stack.pop()
+ if not stack:
+ return (-1, None)
+ if ((char == '(' and stack[-1] == ')')
+ or (char == '[' and stack[-1] == ']')
+ or (char == '{' and stack[-1] == '}')):
+ stack.pop()
+ if not stack:
+ return (i, None)
+ else:
+ # Mismatched parentheses
+ return (-1, None)
+ elif char == ';':
+ # Found something that look like end of statements. If we are
+ # currently expecting a '<', the matching '>' must have been an
+ # operator, since template argument list should not contain
+ # statements.
+ while stack and stack[-1] == '>':
+ stack.pop()
+ if not stack:
+ return (-1, None)
- i -= 1
+ i -= 1
- return (-1, stack)
+ return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
- """If input points to ) or } or ] or >, finds the position that opens it.
+ """If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
@@ -1918,42 +1930,44 @@ def ReverseCloseExpression(clean_lines, linenum, pos):
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
- line = clean_lines.elided[linenum]
- if line[pos] not in ')}]>':
- return (line, 0, -1)
-
- # Check last line
- (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
- if start_pos > -1:
- return (line, linenum, start_pos)
-
- # Continue scanning backward
- while stack and linenum > 0:
- linenum -= 1
line = clean_lines.elided[linenum]
- (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
- if start_pos > -1:
- return (line, linenum, start_pos)
+ if line[pos] not in ')}]>':
+ return (line, 0, -1)
- # Did not find start of expression before beginning of file, give up
- return (line, 0, -1)
+ # Check last line
+ (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
+ if start_pos > -1:
+ return (line, linenum, start_pos)
+
+ # Continue scanning backward
+ while stack and linenum > 0:
+ linenum -= 1
+ line = clean_lines.elided[linenum]
+ (start_pos,
+ stack) = FindStartOfExpressionInLine(line,
+ len(line) - 1, stack)
+ if start_pos > -1:
+ return (line, linenum, start_pos)
+
+ # Did not find start of expression before beginning of file, give up
+ return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
- """Logs an error if no Copyright message appears at the top of the file."""
+ """Logs an error if no Copyright message appears at the top of the file."""
- # We'll say it should occur by line 10. Don't forget there's a
- # dummy line at the front.
- for line in range(1, min(len(lines), 11)):
- if re.search(r'Copyright', lines[line], re.I): break
- else: # means no copyright line was found
- error(filename, 0, 'legal/copyright', 5,
- 'No copyright message found. '
- 'You should have a line: "Copyright [year] "')
+ # We'll say it should occur by line 10. Don't forget there's a
+ # dummy line at the front.
+ for line in range(1, min(len(lines), 11)):
+ if re.search(r'Copyright', lines[line], re.I): break
+ else: # means no copyright line was found
+ error(
+ filename, 0, 'legal/copyright', 5, 'No copyright message found. '
+ 'You should have a line: "Copyright [year] "')
def GetIndentLevel(line):
- """Return the number of leading spaces in line.
+ """Return the number of leading spaces in line.
Args:
line: A string to check.
@@ -1961,15 +1975,15 @@ def GetIndentLevel(line):
Returns:
An integer count of leading spaces, possibly zero.
"""
- indent = Match(r'^( *)\S', line)
- if indent:
- return len(indent.group(1))
- else:
- return 0
+ indent = Match(r'^( *)\S', line)
+ if indent:
+ return len(indent.group(1))
+ else:
+ return 0
def PathSplitToList(path):
- """Returns the path split into a list by the separator.
+ """Returns the path split into a list by the separator.
Args:
path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
@@ -1977,25 +1991,25 @@ def PathSplitToList(path):
Returns:
A list of path components (e.g. ['a', 'b', 'c]).
"""
- lst = []
- while True:
- (head, tail) = os.path.split(path)
- if head == path: # absolute paths end
- lst.append(head)
- break
- if tail == path: # relative paths end
- lst.append(tail)
- break
+ lst = []
+ while True:
+ (head, tail) = os.path.split(path)
+ if head == path: # absolute paths end
+ lst.append(head)
+ break
+ if tail == path: # relative paths end
+ lst.append(tail)
+ break
- path = head
- lst.append(tail)
+ path = head
+ lst.append(tail)
- lst.reverse()
- return lst
+ lst.reverse()
+ return lst
def GetHeaderGuardCPPVariable(filename):
- """Returns the CPP variable that should be used as a header guard.
+ """Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
@@ -2006,73 +2020,77 @@ def GetHeaderGuardCPPVariable(filename):
"""
- # Restores original filename in case that cpplint is invoked from Emacs's
- # flymake.
- filename = re.sub(r'_flymake\.h$', '.h', filename)
- filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
- # Replace 'c++' with 'cpp'.
- filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
+ # Restores original filename in case that cpplint is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+ filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
+ # Replace 'c++' with 'cpp'.
+ filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
- fileinfo = FileInfo(filename)
- file_path_from_root = fileinfo.RepositoryName()
+ fileinfo = FileInfo(filename)
+ file_path_from_root = fileinfo.RepositoryName()
- def FixupPathFromRoot():
- if _root_debug:
- sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
- % (_root, fileinfo.RepositoryName()))
+ def FixupPathFromRoot():
+ if _root_debug:
+ sys.stderr.write(
+ "\n_root fixup, _root = '%s', repository name = '%s'\n" %
+ (_root, fileinfo.RepositoryName()))
- # Process the file path with the --root flag if it was set.
- if not _root:
- if _root_debug:
- sys.stderr.write("_root unspecified\n")
- return file_path_from_root
+ # Process the file path with the --root flag if it was set.
+ if not _root:
+ if _root_debug:
+ sys.stderr.write("_root unspecified\n")
+ return file_path_from_root
- def StripListPrefix(lst, prefix):
- # f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
- if lst[:len(prefix)] != prefix:
- return None
- # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
- return lst[(len(prefix)):]
+ def StripListPrefix(lst, prefix):
+ # f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
+ if lst[:len(prefix)] != prefix:
+ return None
+ # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
+ return lst[(len(prefix)):]
- # root behavior:
- # --root=subdir , lstrips subdir from the header guard
- maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
- PathSplitToList(_root))
+ # root behavior:
+ # --root=subdir , lstrips subdir from the header guard
+ maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
+ PathSplitToList(_root))
- if _root_debug:
- sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
- " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
+ if _root_debug:
+ sys.stderr.write(
+ ("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
+ " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
- if maybe_path:
- return os.path.join(*maybe_path)
+ if maybe_path:
+ return os.path.join(*maybe_path)
- # --root=.. , will prepend the outer directory to the header guard
- full_path = fileinfo.FullName()
- # adapt slashes for windows
- root_abspath = os.path.abspath(_root).replace('\\', '/')
+ # --root=.. , will prepend the outer directory to the header guard
+ full_path = fileinfo.FullName()
+ # adapt slashes for windows
+ root_abspath = os.path.abspath(_root).replace('\\', '/')
- maybe_path = StripListPrefix(PathSplitToList(full_path),
- PathSplitToList(root_abspath))
+ maybe_path = StripListPrefix(PathSplitToList(full_path),
+ PathSplitToList(root_abspath))
- if _root_debug:
- sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
- "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
+ if _root_debug:
+ sys.stderr.write(
+ ("_root prepend (maybe_path=%s, full_path=%s, " +
+ "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
- if maybe_path:
- return os.path.join(*maybe_path)
+ if maybe_path:
+ return os.path.join(*maybe_path)
- if _root_debug:
- sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
+ if _root_debug:
+ sys.stderr.write("_root ignore, returning %s\n" %
+ (file_path_from_root))
- # --root=FAKE_DIR is ignored
- return file_path_from_root
+ # --root=FAKE_DIR is ignored
+ return file_path_from_root
- file_path_from_root = FixupPathFromRoot()
- return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
+ file_path_from_root = FixupPathFromRoot()
+ return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
- """Checks that the file contains a header guard.
+ """Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
@@ -2083,119 +2101,123 @@ def CheckForHeaderGuard(filename, clean_lines, error):
error: The function to call with any errors found.
"""
- # Don't check for header guards if there are error suppression
- # comments somewhere in this file.
- #
- # Because this is silencing a warning for a nonexistent line, we
- # only support the very specific NOLINT(build/header_guard) syntax,
- # and not the general NOLINT or NOLINT(*) syntax.
- raw_lines = clean_lines.lines_without_raw_strings
- for i in raw_lines:
- if Search(r'//\s*NOLINT\(build/header_guard\)', i):
- return
+ # Don't check for header guards if there are error suppression
+ # comments somewhere in this file.
+ #
+ # Because this is silencing a warning for a nonexistent line, we
+ # only support the very specific NOLINT(build/header_guard) syntax,
+ # and not the general NOLINT or NOLINT(*) syntax.
+ raw_lines = clean_lines.lines_without_raw_strings
+ for i in raw_lines:
+ if Search(r'//\s*NOLINT\(build/header_guard\)', i):
+ return
- cppvar = GetHeaderGuardCPPVariable(filename)
+ cppvar = GetHeaderGuardCPPVariable(filename)
- ifndef = ''
- ifndef_linenum = 0
- define = ''
- endif = ''
- endif_linenum = 0
- for linenum, line in enumerate(raw_lines):
- linesplit = line.split()
- if len(linesplit) >= 2:
- # find the first occurrence of #ifndef and #define, save arg
- if not ifndef and linesplit[0] == '#ifndef':
- # set ifndef to the header guard presented on the #ifndef line.
- ifndef = linesplit[1]
- ifndef_linenum = linenum
- if not define and linesplit[0] == '#define':
- define = linesplit[1]
- # find the last occurrence of #endif, save entire line
- if line.startswith('#endif'):
- endif = line
- endif_linenum = linenum
+ ifndef = ''
+ ifndef_linenum = 0
+ define = ''
+ endif = ''
+ endif_linenum = 0
+ for linenum, line in enumerate(raw_lines):
+ linesplit = line.split()
+ if len(linesplit) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and linesplit[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = linesplit[1]
+ ifndef_linenum = linenum
+ if not define and linesplit[0] == '#define':
+ define = linesplit[1]
+ # find the last occurrence of #endif, save entire line
+ if line.startswith('#endif'):
+ endif = line
+ endif_linenum = linenum
- if not ifndef or not define or ifndef != define:
- error(filename, 0, 'build/header_guard', 5,
- 'No #ifndef header guard found, suggested CPP variable is: %s' %
- cppvar)
- return
+ if not ifndef or not define or ifndef != define:
+ error(
+ filename, 0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
- # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
- # for backward compatibility.
- if ifndef != cppvar:
- error_level = 0
- if ifndef != cppvar + '_':
- error_level = 5
+ # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
+ # for backward compatibility.
+ if ifndef != cppvar:
+ error_level = 0
+ if ifndef != cppvar + '_':
+ error_level = 5
- ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
+ ParseNolintSuppressions(filename, raw_lines[ifndef_linenum],
+ ifndef_linenum, error)
+ error(filename, ifndef_linenum, 'build/header_guard', error_level,
+ '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+ # Check for "//" comments on endif line.
+ ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
- error(filename, ifndef_linenum, 'build/header_guard', error_level,
- '#ifndef header guard has wrong style, please use: %s' % cppvar)
-
- # Check for "//" comments on endif line.
- ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
- error)
- match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
- if match:
- if match.group(1) == '_':
- # Issue low severity warning for deprecated double trailing underscore
- error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif // %s"' % cppvar)
- return
-
- # Didn't find the corresponding "//" comment. If this file does not
- # contain any "//" comments at all, it could be that the compiler
- # only wants "/**/" comments, look for those instead.
- no_single_line_comments = True
- for i in range(1, len(raw_lines) - 1):
- line = raw_lines[i]
- if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
- no_single_line_comments = False
- break
-
- if no_single_line_comments:
- match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
+ match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
- if match.group(1) == '_':
- # Low severity warning for double trailing underscore
- error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif /* %s */"' % cppvar)
- return
+ if match.group(1) == '_':
+ # Issue low severity warning for deprecated double trailing
+ # underscore
+ error(filename, endif_linenum, 'build/header_guard', 0,
+ '#endif line should be "#endif // %s"' % cppvar)
+ return
- # Didn't find anything
- error(filename, endif_linenum, 'build/header_guard', 5,
- '#endif line should be "#endif // %s"' % cppvar)
+ # Didn't find the corresponding "//" comment. If this file does not
+ # contain any "//" comments at all, it could be that the compiler
+ # only wants "/**/" comments, look for those instead.
+ no_single_line_comments = True
+ for i in range(1, len(raw_lines) - 1):
+ line = raw_lines[i]
+ if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//',
+ line):
+ no_single_line_comments = False
+ break
+
+ if no_single_line_comments:
+ match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
+ if match:
+ if match.group(1) == '_':
+ # Low severity warning for double trailing underscore
+ error(filename, endif_linenum, 'build/header_guard', 0,
+ '#endif line should be "#endif /* %s */"' % cppvar)
+ return
+
+ # Didn't find anything
+ error(filename, endif_linenum, 'build/header_guard', 5,
+ '#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
- """Logs an error if a .cc file does not include its header."""
+ """Logs an error if a .cc file does not include its header."""
- # Do not check test files
- fileinfo = FileInfo(filename)
- if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
- return
-
- headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
- if not os.path.exists(headerfile):
- return
- headername = FileInfo(headerfile).RepositoryName()
- first_include = 0
- for section_list in include_state.include_list:
- for f in section_list:
- if headername in f[0] or f[0] in headername:
+ # Do not check test files
+ fileinfo = FileInfo(filename)
+ if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
- if not first_include:
- first_include = f[1]
- error(filename, first_include, 'build/include', 5,
- '%s should include its header file %s' % (fileinfo.RepositoryName(),
- headername))
+ headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
+ if not os.path.exists(headerfile):
+ return
+ headername = FileInfo(headerfile).RepositoryName()
+ first_include = 0
+ for section_list in include_state.include_list:
+ for f in section_list:
+ if headername in f[0] or f[0] in headername:
+ return
+ if not first_include:
+ first_include = f[1]
+
+ error(
+ filename, first_include, 'build/include', 5,
+ '%s should include its header file %s' %
+ (fileinfo.RepositoryName(), headername))
def CheckForBadCharacters(filename, lines, error):
- """Logs an error for each line containing bad characters.
+ """Logs an error for each line containing bad characters.
Two kinds of bad characters:
@@ -2211,16 +2233,19 @@ def CheckForBadCharacters(filename, lines, error):
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
- for linenum, line in enumerate(lines):
- if u'\ufffd' in line:
- error(filename, linenum, 'readability/utf8', 5,
- 'Line contains invalid UTF-8 (or Unicode replacement character).')
- if '\0' in line:
- error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
+ for linenum, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(
+ filename, linenum, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8 (or Unicode replacement character).'
+ )
+ if '\0' in line:
+ error(filename, linenum, 'readability/nul', 5,
+ 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
- """Logs an error if there is no newline char at the end of the file.
+ """Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
@@ -2228,17 +2253,18 @@ def CheckForNewlineAtEOF(filename, lines, error):
error: The function to call with any errors found.
"""
- # The array lines() was created by adding two newlines to the
- # original file (go figure), then splitting on \n.
- # To verify that the file ends in \n, we just have to make sure the
- # last-but-two element of lines() exists and is empty.
- if len(lines) < 3 or lines[-2]:
- error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
- 'Could not find a newline character at the end of the file.')
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(filename,
+ len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
- """Logs an error if we see /* ... */ or "..." that extend past one line.
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
@@ -2254,25 +2280,27 @@ def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Remove all \\ (escaped backslashes) from the line. They are OK, and the
- # second (escaped) slash may trigger later \" detection erroneously.
- line = line.replace('\\\\', '')
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
- if line.count('/*') > line.count('*/'):
- error(filename, linenum, 'readability/multiline_comment', 5,
- 'Complex multi-line /*...*/-style comment found. '
- 'Lint may give bogus warnings. '
- 'Consider replacing these with //-style comments, '
- 'with #if 0...#endif, '
- 'or with more clearly structured multi-line comments.')
+ if line.count('/*') > line.count('*/'):
+ error(
+ filename, linenum, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
- if (line.count('"') - line.count('\\"')) % 2:
- error(filename, linenum, 'readability/multiline_string', 5,
- 'Multi-line string ("...") found. This lint script doesn\'t '
- 'do well with such strings, and may give bogus warnings. '
- 'Use C++11 raw strings or concatenation instead.')
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(
+ filename, linenum, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. '
+ 'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
@@ -2299,14 +2327,13 @@ _THREADING_LIST = (
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
- ('strtok(', 'strtok_r(',
- _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
+ ('strtok(', 'strtok_r(', _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
- )
+)
def CheckPosixThreading(filename, clean_lines, linenum, error):
- """Checks for calls to thread-unsafe functions.
+ """Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
@@ -2320,19 +2347,19 @@ def CheckPosixThreading(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
- for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
- # Additional pattern matching check to confirm that this is the
- # function we are looking for
- if Search(pattern, line):
- error(filename, linenum, 'runtime/threadsafe_fn', 2,
- 'Consider using ' + multithread_safe_func +
- '...) instead of ' + single_thread_func +
- '...) for improved thread safety.')
+ line = clean_lines.elided[linenum]
+ for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
+ # Additional pattern matching check to confirm that this is the
+ # function we are looking for
+ if Search(pattern, line):
+ error(
+ filename, linenum, 'runtime/threadsafe_fn', 2,
+ 'Consider using ' + multithread_safe_func + '...) instead of ' +
+ single_thread_func + '...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
- """Checks that VLOG() is only used for defining a logging level.
+ """Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
@@ -2343,20 +2370,21 @@ def CheckVlogArguments(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
- if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
- error(filename, linenum, 'runtime/vlog', 5,
- 'VLOG() should be used with numeric verbosity level. '
- 'Use LOG() if you want symbolic severity levels.')
+ line = clean_lines.elided[linenum]
+ if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
+ error(
+ filename, linenum, 'runtime/vlog', 5,
+ 'VLOG() should be used with numeric verbosity level. '
+ 'Use LOG() if you want symbolic severity levels.')
+
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
-_RE_PATTERN_INVALID_INCREMENT = re.compile(
- r'^\s*\*\w+(\+\+|--);')
+_RE_PATTERN_INVALID_INCREMENT = re.compile(r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
- """Checks for invalid increment *count++.
+ """Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
@@ -2371,38 +2399,38 @@ def CheckInvalidIncrement(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
- if _RE_PATTERN_INVALID_INCREMENT.match(line):
- error(filename, linenum, 'runtime/invalid_increment', 5,
- 'Changing pointer instead of value (or unused value of operator*).')
+ line = clean_lines.elided[linenum]
+ if _RE_PATTERN_INVALID_INCREMENT.match(line):
+ error(
+ filename, linenum, 'runtime/invalid_increment', 5,
+ 'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
- if Search(r'^#define', clean_lines[linenum]):
- return True
+ if Search(r'^#define', clean_lines[linenum]):
+ return True
- if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
- return True
+ if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
+ return True
- return False
+ return False
def IsForwardClassDeclaration(clean_lines, linenum):
- return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
+ return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
- """Stores information about a generic block of code."""
+ """Stores information about a generic block of code."""
+ def __init__(self, linenum, seen_open_brace):
+ self.starting_linenum = linenum
+ self.seen_open_brace = seen_open_brace
+ self.open_parentheses = 0
+ self.inline_asm = _NO_ASM
+ self.check_namespace_indentation = False
- def __init__(self, linenum, seen_open_brace):
- self.starting_linenum = linenum
- self.seen_open_brace = seen_open_brace
- self.open_parentheses = 0
- self.inline_asm = _NO_ASM
- self.check_namespace_indentation = False
-
- def CheckBegin(self, filename, clean_lines, linenum, error):
- """Run checks that applies to text up to the opening brace.
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
@@ -2414,10 +2442,10 @@ class _BlockInfo(object):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- pass
+ pass
- def CheckEnd(self, filename, clean_lines, linenum, error):
- """Run checks that applies to text after the closing brace.
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
@@ -2427,10 +2455,10 @@ class _BlockInfo(object):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- pass
+ pass
- def IsBlockInfo(self):
- """Returns true if this block is a _BlockInfo.
+ def IsBlockInfo(self):
+ """Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
@@ -2438,229 +2466,230 @@ class _BlockInfo(object):
Returns:
True for this class, False for derived classes.
"""
- return self.__class__ == _BlockInfo
+ return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
- """Stores information about an 'extern "C"' block."""
-
- def __init__(self, linenum):
- _BlockInfo.__init__(self, linenum, True)
+ """Stores information about an 'extern "C"' block."""
+ def __init__(self, linenum):
+ _BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
- """Stores information about a class."""
+ """Stores information about a class."""
+ def __init__(self, name, class_or_struct, clean_lines, linenum):
+ _BlockInfo.__init__(self, linenum, False)
+ self.name = name
+ self.is_derived = False
+ self.check_namespace_indentation = True
+ if class_or_struct == 'struct':
+ self.access = 'public'
+ self.is_struct = True
+ else:
+ self.access = 'private'
+ self.is_struct = False
- def __init__(self, name, class_or_struct, clean_lines, linenum):
- _BlockInfo.__init__(self, linenum, False)
- self.name = name
- self.is_derived = False
- self.check_namespace_indentation = True
- if class_or_struct == 'struct':
- self.access = 'public'
- self.is_struct = True
- else:
- self.access = 'private'
- self.is_struct = False
+ # Remember initial indentation level for this class. Using raw_lines
+ # here instead of elided to account for leading comments.
+ self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
- # Remember initial indentation level for this class. Using raw_lines here
- # instead of elided to account for leading comments.
- self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
+ # Try to find the end of the class. This will be confused by things
+ # like: class A { } *x = { ...
+ #
+ # But it's still good enough for CheckSectionSpacing.
+ self.last_line = 0
+ depth = 0
+ for i in range(linenum, clean_lines.NumLines()):
+ line = clean_lines.elided[i]
+ depth += line.count('{') - line.count('}')
+ if not depth:
+ self.last_line = i
+ break
- # Try to find the end of the class. This will be confused by things like:
- # class A {
- # } *x = { ...
- #
- # But it's still good enough for CheckSectionSpacing.
- self.last_line = 0
- depth = 0
- for i in range(linenum, clean_lines.NumLines()):
- line = clean_lines.elided[i]
- depth += line.count('{') - line.count('}')
- if not depth:
- self.last_line = i
- break
+ def CheckBegin(self, filename, clean_lines, linenum, error):
+ # Look for a bare ':'
+ if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
+ self.is_derived = True
- def CheckBegin(self, filename, clean_lines, linenum, error):
- # Look for a bare ':'
- if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
- self.is_derived = True
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ # If there is a DISALLOW macro, it should appear near the end of
+ # the class.
+ seen_last_thing_in_class = False
+ for i in range(linenum - 1, self.starting_linenum, -1):
+ match = Search(
+ r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\('
+ + self.name + r'\)', clean_lines.elided[i])
+ if match:
+ if seen_last_thing_in_class:
+ error(
+ filename, i, 'readability/constructors', 3,
+ match.group(1) +
+ ' should be the last thing in the class')
+ break
- def CheckEnd(self, filename, clean_lines, linenum, error):
- # If there is a DISALLOW macro, it should appear near the end of
- # the class.
- seen_last_thing_in_class = False
- for i in range(linenum - 1, self.starting_linenum, -1):
- match = Search(
- r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
- self.name + r'\)',
- clean_lines.elided[i])
- if match:
- if seen_last_thing_in_class:
- error(filename, i, 'readability/constructors', 3,
- match.group(1) + ' should be the last thing in the class')
- break
+ if not Match(r'^\s*$', clean_lines.elided[i]):
+ seen_last_thing_in_class = True
- if not Match(r'^\s*$', clean_lines.elided[i]):
- seen_last_thing_in_class = True
-
- # Check that closing brace is aligned with beginning of the class.
- # Only do this if the closing brace is indented by only whitespaces.
- # This means we will not check single-line class definitions.
- indent = Match(r'^( *)\}', clean_lines.elided[linenum])
- if indent and len(indent.group(1)) != self.class_indent:
- if self.is_struct:
- parent = 'struct ' + self.name
- else:
- parent = 'class ' + self.name
- error(filename, linenum, 'whitespace/indent', 3,
- 'Closing brace should be aligned with beginning of %s' % parent)
+ # Check that closing brace is aligned with beginning of the class.
+ # Only do this if the closing brace is indented by only whitespaces.
+ # This means we will not check single-line class definitions.
+ indent = Match(r'^( *)\}', clean_lines.elided[linenum])
+ if indent and len(indent.group(1)) != self.class_indent:
+ if self.is_struct:
+ parent = 'struct ' + self.name
+ else:
+ parent = 'class ' + self.name
+ error(
+ filename, linenum, 'whitespace/indent', 3,
+ 'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
- """Stores information about a namespace."""
+ """Stores information about a namespace."""
+ def __init__(self, name, linenum):
+ _BlockInfo.__init__(self, linenum, False)
+ self.name = name or ''
+ self.check_namespace_indentation = True
- def __init__(self, name, linenum):
- _BlockInfo.__init__(self, linenum, False)
- self.name = name or ''
- self.check_namespace_indentation = True
+ def CheckEnd(self, filename, clean_lines, linenum, error):
+ """Check end of namespace comments."""
+ line = clean_lines.raw_lines[linenum]
- def CheckEnd(self, filename, clean_lines, linenum, error):
- """Check end of namespace comments."""
- line = clean_lines.raw_lines[linenum]
+ # Check how many lines is enclosed in this namespace. Don't issue
+ # warning for missing namespace comments if there aren't enough
+ # lines. However, do apply checks if there is already an end of
+ # namespace comment and it's incorrect.
+ #
+ # TODO(unknown): We always want to check end of namespace comments
+ # if a namespace is large, but sometimes we also want to apply the
+ # check if a short namespace contained nontrivial things (something
+ # other than forward declarations). There is currently no logic on
+ # deciding what these nontrivial things are, so this check is
+ # triggered by namespace size only, which works most of the time.
+ if (linenum - self.starting_linenum < 10
+ and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
+ return
- # Check how many lines is enclosed in this namespace. Don't issue
- # warning for missing namespace comments if there aren't enough
- # lines. However, do apply checks if there is already an end of
- # namespace comment and it's incorrect.
- #
- # TODO(unknown): We always want to check end of namespace comments
- # if a namespace is large, but sometimes we also want to apply the
- # check if a short namespace contained nontrivial things (something
- # other than forward declarations). There is currently no logic on
- # deciding what these nontrivial things are, so this check is
- # triggered by namespace size only, which works most of the time.
- if (linenum - self.starting_linenum < 10
- and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
- return
-
- # Look for matching comment at end of namespace.
- #
- # Note that we accept C style "/* */" comments for terminating
- # namespaces, so that code that terminate namespaces inside
- # preprocessor macros can be cpplint clean.
- #
- # We also accept stuff like "// end of namespace ." with the
- # period at the end.
- #
- # Besides these, we don't accept anything else, otherwise we might
- # get false negatives when existing comment is a substring of the
- # expected namespace.
- if self.name:
- # Named namespace
- if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
- re.escape(self.name) + r'[\*/\.\\\s]*$'),
- line):
- error(filename, linenum, 'readability/namespace', 5,
- 'Namespace should be terminated with "// namespace %s"' %
- self.name)
- else:
- # Anonymous namespace
- if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
- # If "// namespace anonymous" or "// anonymous namespace (more text)",
- # mention "// anonymous namespace" as an acceptable form
- if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
- error(filename, linenum, 'readability/namespace', 5,
- 'Anonymous namespace should be terminated with "// namespace"'
- ' or "// anonymous namespace"')
+ # Look for matching comment at end of namespace.
+ #
+ # Note that we accept C style "/* */" comments for terminating
+ # namespaces, so that code that terminate namespaces inside
+ # preprocessor macros can be cpplint clean.
+ #
+ # We also accept stuff like "// end of namespace ." with the
+ # period at the end.
+ #
+ # Besides these, we don't accept anything else, otherwise we might
+ # get false negatives when existing comment is a substring of the
+ # expected namespace.
+ if self.name:
+ # Named namespace
+ if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
+ re.escape(self.name) + r'[\*/\.\\\s]*$'), line):
+ error(
+ filename, linenum, 'readability/namespace', 5,
+ 'Namespace should be terminated with "// namespace %s"' %
+ self.name)
else:
- error(filename, linenum, 'readability/namespace', 5,
- 'Anonymous namespace should be terminated with "// namespace"')
+ # Anonymous namespace
+ if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
+ # If "// namespace anonymous" or "// anonymous namespace (more
+ # text)", mention "// anonymous namespace" as an acceptable form
+ if Match(
+ r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b',
+ line):
+ error(
+ filename, linenum, 'readability/namespace', 5,
+ 'Anonymous namespace should be terminated with "// namespace"'
+ ' or "// anonymous namespace"')
+ else:
+ error(
+ filename, linenum, 'readability/namespace', 5,
+ 'Anonymous namespace should be terminated with "// namespace"'
+ )
class _PreprocessorInfo(object):
- """Stores checkpoints of nesting stacks when #if/#else is seen."""
+ """Stores checkpoints of nesting stacks when #if/#else is seen."""
+ def __init__(self, stack_before_if):
+ # The entire nesting stack before #if
+ self.stack_before_if = stack_before_if
- def __init__(self, stack_before_if):
- # The entire nesting stack before #if
- self.stack_before_if = stack_before_if
+ # The entire nesting stack up to #else
+ self.stack_before_else = []
- # The entire nesting stack up to #else
- self.stack_before_else = []
-
- # Whether we have already seen #else or #elif
- self.seen_else = False
+ # Whether we have already seen #else or #elif
+ self.seen_else = False
class NestingState(object):
- """Holds states related to parsing braces."""
+ """Holds states related to parsing braces."""
+ def __init__(self):
+ # Stack for tracking all braces. An object is pushed whenever we
+ # see a "{", and popped when we see a "}". Only 3 types of
+ # objects are possible:
+ # - _ClassInfo: a class or struct.
+ # - _NamespaceInfo: a namespace.
+ # - _BlockInfo: some other type of block.
+ self.stack = []
- def __init__(self):
- # Stack for tracking all braces. An object is pushed whenever we
- # see a "{", and popped when we see a "}". Only 3 types of
- # objects are possible:
- # - _ClassInfo: a class or struct.
- # - _NamespaceInfo: a namespace.
- # - _BlockInfo: some other type of block.
- self.stack = []
+ # Top of the previous stack before each Update().
+ #
+ # Because the nesting_stack is updated at the end of each line, we
+ # had to do some convoluted checks to find out what is the current
+ # scope at the beginning of the line. This check is simplified by
+ # saving the previous top of nesting stack.
+ #
+ # We could save the full stack, but we only need the top. Copying
+ # the full nesting stack would slow down cpplint by ~10%.
+ self.previous_stack_top = []
- # Top of the previous stack before each Update().
- #
- # Because the nesting_stack is updated at the end of each line, we
- # had to do some convoluted checks to find out what is the current
- # scope at the beginning of the line. This check is simplified by
- # saving the previous top of nesting stack.
- #
- # We could save the full stack, but we only need the top. Copying
- # the full nesting stack would slow down cpplint by ~10%.
- self.previous_stack_top = []
+ # Stack of _PreprocessorInfo objects.
+ self.pp_stack = []
- # Stack of _PreprocessorInfo objects.
- self.pp_stack = []
-
- def SeenOpenBrace(self):
- """Check if we have seen the opening brace for the innermost block.
+ def SeenOpenBrace(self):
+ """Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
- return (not self.stack) or self.stack[-1].seen_open_brace
+ return (not self.stack) or self.stack[-1].seen_open_brace
- def InNamespaceBody(self):
- """Check if we are currently one level inside a namespace body.
+ def InNamespaceBody(self):
+ """Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
- return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
+ return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
- def InExternC(self):
- """Check if we are currently one level inside an 'extern "C"' block.
+ def InExternC(self):
+ """Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
- return self.stack and isinstance(self.stack[-1], _ExternCInfo)
+ return self.stack and isinstance(self.stack[-1], _ExternCInfo)
- def InClassDeclaration(self):
- """Check if we are currently one level inside a class or struct declaration.
+ def InClassDeclaration(self):
+ """Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
- return self.stack and isinstance(self.stack[-1], _ClassInfo)
+ return self.stack and isinstance(self.stack[-1], _ClassInfo)
- def InAsmBlock(self):
- """Check if we are currently one level inside an inline ASM block.
+ def InAsmBlock(self):
+ """Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
- return self.stack and self.stack[-1].inline_asm != _NO_ASM
+ return self.stack and self.stack[-1].inline_asm != _NO_ASM
- def InTemplateArgumentList(self, clean_lines, linenum, pos):
- """Check if current position is inside template argument list.
+ def InTemplateArgumentList(self, clean_lines, linenum, pos):
+ """Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -2669,50 +2698,52 @@ class NestingState(object):
Returns:
True if (linenum, pos) is inside template arguments.
"""
- while linenum < clean_lines.NumLines():
- # Find the earliest character that might indicate a template argument
- line = clean_lines.elided[linenum]
- match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
- if not match:
- linenum += 1
- pos = 0
- continue
- token = match.group(1)
- pos += len(match.group(0))
+ while linenum < clean_lines.NumLines():
+ # Find the earliest character that might indicate a template
+ # argument
+ line = clean_lines.elided[linenum]
+ match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
+ if not match:
+ linenum += 1
+ pos = 0
+ continue
+ token = match.group(1)
+ pos += len(match.group(0))
- # These things do not look like template argument list:
- # class Suspect {
- # class Suspect x; }
- if token in ('{', '}', ';'): return False
+ # These things do not look like template argument list:
+ # class Suspect {
+ # class Suspect x; }
+ if token in ('{', '}', ';'): return False
- # These things look like template argument list:
- # template
- # template
- # template
- # template
- if token in ('>', '=', '[', ']', '.'): return True
+ # These things look like template argument list:
+ # template
+ # template
+ # template
+ # template
+ if token in ('>', '=', '[', ']', '.'): return True
- # Check if token is an unmatched '<'.
- # If not, move on to the next character.
- if token != '<':
- pos += 1
- if pos >= len(line):
- linenum += 1
- pos = 0
- continue
+ # Check if token is an unmatched '<'.
+ # If not, move on to the next character.
+ if token != '<':
+ pos += 1
+ if pos >= len(line):
+ linenum += 1
+ pos = 0
+ continue
- # We can't be sure if we just find a single '<', and need to
- # find the matching '>'.
- (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
- if end_pos < 0:
- # Not sure if template argument list or syntax error in file
+ # We can't be sure if we just find a single '<', and need to
+ # find the matching '>'.
+ (_, end_line, end_pos) = CloseExpression(clean_lines, linenum,
+ pos - 1)
+ if end_pos < 0:
+ # Not sure if template argument list or syntax error in file
+ return False
+ linenum = end_line
+ pos = end_pos
return False
- linenum = end_line
- pos = end_pos
- return False
- def UpdatePreprocessor(self, line):
- """Update preprocessor stack.
+ def UpdatePreprocessor(self, line):
+ """Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
@@ -2732,44 +2763,46 @@ class NestingState(object):
Args:
line: current line to check.
"""
- if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
- # Beginning of #if block, save the nesting stack here. The saved
- # stack will allow us to restore the parsing state in the #else case.
- self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
- elif Match(r'^\s*#\s*(else|elif)\b', line):
- # Beginning of #else block
- if self.pp_stack:
- if not self.pp_stack[-1].seen_else:
- # This is the first #else or #elif block. Remember the
- # whole nesting stack up to this point. This is what we
- # keep after the #endif.
- self.pp_stack[-1].seen_else = True
- self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
+ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ # Beginning of #if block, save the nesting stack here. The saved
+ # stack will allow us to restore the parsing state in the #else
+ # case.
+ self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
+ elif Match(r'^\s*#\s*(else|elif)\b', line):
+ # Beginning of #else block
+ if self.pp_stack:
+ if not self.pp_stack[-1].seen_else:
+ # This is the first #else or #elif block. Remember the
+ # whole nesting stack up to this point. This is what we
+ # keep after the #endif.
+ self.pp_stack[-1].seen_else = True
+ self.pp_stack[-1].stack_before_else = copy.deepcopy(
+ self.stack)
- # Restore the stack to how it was before the #if
- self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
- else:
- # TODO(unknown): unexpected #else, issue warning?
- pass
- elif Match(r'^\s*#\s*endif\b', line):
- # End of #if or #else blocks.
- if self.pp_stack:
- # If we saw an #else, we will need to restore the nesting
- # stack to its former state before the #else, otherwise we
- # will just continue from where we left off.
- if self.pp_stack[-1].seen_else:
- # Here we can just use a shallow copy since we are the last
- # reference to it.
- self.stack = self.pp_stack[-1].stack_before_else
- # Drop the corresponding #if
- self.pp_stack.pop()
- else:
- # TODO(unknown): unexpected #endif, issue warning?
- pass
+ # Restore the stack to how it was before the #if
+ self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
+ else:
+ # TODO(unknown): unexpected #else, issue warning?
+ pass
+ elif Match(r'^\s*#\s*endif\b', line):
+ # End of #if or #else blocks.
+ if self.pp_stack:
+ # If we saw an #else, we will need to restore the nesting
+ # stack to its former state before the #else, otherwise we
+ # will just continue from where we left off.
+ if self.pp_stack[-1].seen_else:
+ # Here we can just use a shallow copy since we are the last
+ # reference to it.
+ self.stack = self.pp_stack[-1].stack_before_else
+ # Drop the corresponding #if
+ self.pp_stack.pop()
+ else:
+ # TODO(unknown): unexpected #endif, issue warning?
+ pass
- # TODO(unknown): Update() is too long, but we will refactor later.
- def Update(self, filename, clean_lines, linenum, error):
- """Update nesting state with current line.
+ # TODO(unknown): Update() is too long, but we will refactor later.
+ def Update(self, filename, clean_lines, linenum, error):
+ """Update nesting state with current line.
Args:
filename: The name of the current file.
@@ -2777,198 +2810,201 @@ class NestingState(object):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Remember top of the previous nesting stack.
- #
- # The stack is always pushed/popped and not modified in place, so
- # we can just do a shallow copy instead of copy.deepcopy. Using
- # deepcopy would slow down cpplint by ~28%.
- if self.stack:
- self.previous_stack_top = self.stack[-1]
- else:
- self.previous_stack_top = None
-
- # Update pp_stack
- self.UpdatePreprocessor(line)
-
- # Count parentheses. This is to avoid adding struct arguments to
- # the nesting stack.
- if self.stack:
- inner_block = self.stack[-1]
- depth_change = line.count('(') - line.count(')')
- inner_block.open_parentheses += depth_change
-
- # Also check if we are starting or ending an inline assembly block.
- if inner_block.inline_asm in (_NO_ASM, _END_ASM):
- if (depth_change != 0 and
- inner_block.open_parentheses == 1 and
- _MATCH_ASM.match(line)):
- # Enter assembly block
- inner_block.inline_asm = _INSIDE_ASM
- else:
- # Not entering assembly block. If previous line was _END_ASM,
- # we will now shift to _NO_ASM state.
- inner_block.inline_asm = _NO_ASM
- elif (inner_block.inline_asm == _INSIDE_ASM and
- inner_block.open_parentheses == 0):
- # Exit assembly block
- inner_block.inline_asm = _END_ASM
-
- # Consume namespace declaration at the beginning of the line. Do
- # this in a loop so that we catch same line declarations like this:
- # namespace proto2 { namespace bridge { class MessageSet; } }
- while True:
- # Match start of namespace. The "\b\s*" below catches namespace
- # declarations even if it weren't followed by a whitespace, this
- # is so that we don't confuse our namespace checker. The
- # missing spaces will be flagged by CheckSpacing.
- namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
- if not namespace_decl_match:
- break
-
- new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
- self.stack.append(new_namespace)
-
- line = namespace_decl_match.group(2)
- if line.find('{') != -1:
- new_namespace.seen_open_brace = True
- line = line[line.find('{') + 1:]
-
- # Look for a class declaration in whatever is left of the line
- # after parsing namespaces. The regexp accounts for decorated classes
- # such as in:
- # class LOCKABLE API Object {
- # };
- class_decl_match = Match(
- r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
- r'(class|struct)\s+(?:[A-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
- r'(.*)$', line)
- if (class_decl_match and
- (not self.stack or self.stack[-1].open_parentheses == 0)):
- # We do not want to accept classes that are actually template arguments:
- # template ,
- # template class Ignore3>
- # void Function() {};
- #
- # To avoid template argument cases, we scan forward and look for
- # an unmatched '>'. If we see one, assume we are inside a
- # template argument list.
- end_declaration = len(class_decl_match.group(1))
- if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
- self.stack.append(_ClassInfo(
- class_decl_match.group(3), class_decl_match.group(2),
- clean_lines, linenum))
- line = class_decl_match.group(4)
-
- # If we have not yet seen the opening brace for the innermost block,
- # run checks here.
- if not self.SeenOpenBrace():
- self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
-
- # Update access control if we are inside a class/struct
- if self.stack and isinstance(self.stack[-1], _ClassInfo):
- classinfo = self.stack[-1]
- access_match = Match(
- r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
- r':(?:[^:]|$)',
- line)
- if access_match:
- classinfo.access = access_match.group(2)
-
- # Check that access keywords are indented +1 space. Skip this
- # check if the keywords are not preceded by whitespaces.
- indent = access_match.group(1)
- if (len(indent) != classinfo.class_indent + 1 and
- Match(r'^\s*$', indent)):
- if classinfo.is_struct:
- parent = 'struct ' + classinfo.name
- else:
- parent = 'class ' + classinfo.name
- slots = ''
- if access_match.group(3):
- slots = access_match.group(3)
- error(filename, linenum, 'whitespace/indent', 3,
- '%s%s: should be indented +1 space inside %s' % (
- access_match.group(2), slots, parent))
-
- # Consume braces or semicolons from what's left of the line
- while True:
- # Match first brace, semicolon, or closed parenthesis.
- matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
- if not matched:
- break
-
- token = matched.group(1)
- if token == '{':
- # If namespace or class hasn't seen a opening brace yet, mark
- # namespace/class head as complete. Push a new block onto the
- # stack otherwise.
- if not self.SeenOpenBrace():
- self.stack[-1].seen_open_brace = True
- elif Match(r'^extern\s*"[^"]*"\s*\{', line):
- self.stack.append(_ExternCInfo(linenum))
- else:
- self.stack.append(_BlockInfo(linenum, True))
- if _MATCH_ASM.match(line):
- self.stack[-1].inline_asm = _BLOCK_ASM
-
- elif token == ';' or token == ')':
- # If we haven't seen an opening brace yet, but we already saw
- # a semicolon, this is probably a forward declaration. Pop
- # the stack for these.
+ # Remember top of the previous nesting stack.
#
- # Similarly, if we haven't seen an opening brace yet, but we
- # already saw a closing parenthesis, then these are probably
- # function arguments with extra "class" or "struct" keywords.
- # Also pop these stack for these.
- if not self.SeenOpenBrace():
- self.stack.pop()
- else: # token == '}'
- # Perform end of block checks and pop the stack.
+ # The stack is always pushed/popped and not modified in place, so
+ # we can just do a shallow copy instead of copy.deepcopy. Using
+ # deepcopy would slow down cpplint by ~28%.
if self.stack:
- self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
- self.stack.pop()
- line = matched.group(2)
+ self.previous_stack_top = self.stack[-1]
+ else:
+ self.previous_stack_top = None
- def InnermostClass(self):
- """Get class info on the top of the stack.
+ # Update pp_stack
+ self.UpdatePreprocessor(line)
+
+ # Count parentheses. This is to avoid adding struct arguments to
+ # the nesting stack.
+ if self.stack:
+ inner_block = self.stack[-1]
+ depth_change = line.count('(') - line.count(')')
+ inner_block.open_parentheses += depth_change
+
+ # Also check if we are starting or ending an inline assembly block.
+ if inner_block.inline_asm in (_NO_ASM, _END_ASM):
+ if (depth_change != 0 and inner_block.open_parentheses == 1
+ and _MATCH_ASM.match(line)):
+ # Enter assembly block
+ inner_block.inline_asm = _INSIDE_ASM
+ else:
+ # Not entering assembly block. If previous line was
+ # _END_ASM, we will now shift to _NO_ASM state.
+ inner_block.inline_asm = _NO_ASM
+ elif (inner_block.inline_asm == _INSIDE_ASM
+ and inner_block.open_parentheses == 0):
+ # Exit assembly block
+ inner_block.inline_asm = _END_ASM
+
+ # Consume namespace declaration at the beginning of the line. Do
+ # this in a loop so that we catch same line declarations like this:
+ # namespace proto2 { namespace bridge { class MessageSet; } }
+ while True:
+ # Match start of namespace. The "\b\s*" below catches namespace
+ # declarations even if it weren't followed by a whitespace, this
+ # is so that we don't confuse our namespace checker. The
+ # missing spaces will be flagged by CheckSpacing.
+ namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$',
+ line)
+ if not namespace_decl_match:
+ break
+
+ new_namespace = _NamespaceInfo(namespace_decl_match.group(1),
+ linenum)
+ self.stack.append(new_namespace)
+
+ line = namespace_decl_match.group(2)
+ if line.find('{') != -1:
+ new_namespace.seen_open_brace = True
+ line = line[line.find('{') + 1:]
+
+ # Look for a class declaration in whatever is left of the line
+ # after parsing namespaces. The regexp accounts for decorated classes
+ # such as in:
+ # class LOCKABLE API Object {
+ # };
+ class_decl_match = Match(
+ r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
+ r'(class|struct)\s+(?:[A-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
+ r'(.*)$', line)
+ if (class_decl_match
+ and (not self.stack or self.stack[-1].open_parentheses == 0)):
+ # We do not want to accept classes that are actually template
+ # arguments: template ,
+ # template class Ignore3> void Function() {};
+ #
+ # To avoid template argument cases, we scan forward and look for
+ # an unmatched '>'. If we see one, assume we are inside a
+ # template argument list.
+ end_declaration = len(class_decl_match.group(1))
+ if not self.InTemplateArgumentList(clean_lines, linenum,
+ end_declaration):
+ self.stack.append(
+ _ClassInfo(class_decl_match.group(3),
+ class_decl_match.group(2), clean_lines, linenum))
+ line = class_decl_match.group(4)
+
+ # If we have not yet seen the opening brace for the innermost block,
+ # run checks here.
+ if not self.SeenOpenBrace():
+ self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
+
+ # Update access control if we are inside a class/struct
+ if self.stack and isinstance(self.stack[-1], _ClassInfo):
+ classinfo = self.stack[-1]
+ access_match = Match(
+ r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
+ r':(?:[^:]|$)', line)
+ if access_match:
+ classinfo.access = access_match.group(2)
+
+ # Check that access keywords are indented +1 space. Skip this
+ # check if the keywords are not preceded by whitespaces.
+ indent = access_match.group(1)
+ if (len(indent) != classinfo.class_indent + 1
+ and Match(r'^\s*$', indent)):
+ if classinfo.is_struct:
+ parent = 'struct ' + classinfo.name
+ else:
+ parent = 'class ' + classinfo.name
+ slots = ''
+ if access_match.group(3):
+ slots = access_match.group(3)
+ error(
+ filename, linenum, 'whitespace/indent', 3,
+ '%s%s: should be indented +1 space inside %s' %
+ (access_match.group(2), slots, parent))
+
+ # Consume braces or semicolons from what's left of the line
+ while True:
+ # Match first brace, semicolon, or closed parenthesis.
+ matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ if not matched:
+ break
+
+ token = matched.group(1)
+ if token == '{':
+ # If namespace or class hasn't seen a opening brace yet, mark
+ # namespace/class head as complete. Push a new block onto the
+ # stack otherwise.
+ if not self.SeenOpenBrace():
+ self.stack[-1].seen_open_brace = True
+ elif Match(r'^extern\s*"[^"]*"\s*\{', line):
+ self.stack.append(_ExternCInfo(linenum))
+ else:
+ self.stack.append(_BlockInfo(linenum, True))
+ if _MATCH_ASM.match(line):
+ self.stack[-1].inline_asm = _BLOCK_ASM
+
+ elif token == ';' or token == ')':
+ # If we haven't seen an opening brace yet, but we already saw
+ # a semicolon, this is probably a forward declaration. Pop
+ # the stack for these.
+ #
+ # Similarly, if we haven't seen an opening brace yet, but we
+ # already saw a closing parenthesis, then these are probably
+ # function arguments with extra "class" or "struct" keywords.
+ # Also pop these stack for these.
+ if not self.SeenOpenBrace():
+ self.stack.pop()
+ else: # token == '}'
+ # Perform end of block checks and pop the stack.
+ if self.stack:
+ self.stack[-1].CheckEnd(filename, clean_lines, linenum,
+ error)
+ self.stack.pop()
+ line = matched.group(2)
+
+ def InnermostClass(self):
+ """Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
- for i in range(len(self.stack), 0, -1):
- classinfo = self.stack[i - 1]
- if isinstance(classinfo, _ClassInfo):
- return classinfo
- return None
+ for i in range(len(self.stack), 0, -1):
+ classinfo = self.stack[i - 1]
+ if isinstance(classinfo, _ClassInfo):
+ return classinfo
+ return None
- def CheckCompletedBlocks(self, filename, error):
- """Checks that all classes and namespaces have been completely parsed.
+ def CheckCompletedBlocks(self, filename, error):
+ """Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
- # Note: This test can result in false positives if #ifdef constructs
- # get in the way of brace matching. See the testBuildClass test in
- # cpplint_unittest.py for an example of this.
- for obj in self.stack:
- if isinstance(obj, _ClassInfo):
- error(filename, obj.starting_linenum, 'build/class', 5,
- 'Failed to find complete declaration of class %s' %
- obj.name)
- elif isinstance(obj, _NamespaceInfo):
- error(filename, obj.starting_linenum, 'build/namespaces', 5,
- 'Failed to find complete declaration of namespace %s' %
- obj.name)
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpplint_unittest.py for an example of this.
+ for obj in self.stack:
+ if isinstance(obj, _ClassInfo):
+ error(
+ filename, obj.starting_linenum, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ obj.name)
+ elif isinstance(obj, _NamespaceInfo):
+ error(
+ filename, obj.starting_linenum, 'build/namespaces', 5,
+ 'Failed to find complete declaration of namespace %s' %
+ obj.name)
-def CheckForNonStandardConstructs(filename, clean_lines, linenum,
- nesting_state, error):
- r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state,
+ error):
+ r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
@@ -2995,139 +3031,145 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
filename, line number, error level, and message
"""
- # Remove comments from the line, but leave in strings for now.
- line = clean_lines.lines[linenum]
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[linenum]
- if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
- error(filename, linenum, 'runtime/printf_format', 3,
- '%q in format strings is deprecated. Use %ll instead.')
+ if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(filename, linenum, 'runtime/printf_format', 3,
+ '%q in format strings is deprecated. Use %ll instead.')
- if Search(r'printf\s*\(.*".*%\d+\$', line):
- error(filename, linenum, 'runtime/printf_format', 2,
- '%N$ formats are unconventional. Try rewriting to avoid them.')
+ if Search(r'printf\s*\(.*".*%\d+\$', line):
+ error(filename, linenum, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
- # Remove escaped backslashes before looking for undefined escapes.
- line = line.replace('\\\\', '')
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
- if Search(r'("|\').*\\(%|\[|\(|{)', line):
- error(filename, linenum, 'build/printf_format', 3,
- '%, [, (, and { are undefined character escapes. Unescape them.')
+ if Search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(
+ filename, linenum, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
- # For the rest, work with both comments and strings removed.
- line = clean_lines.elided[linenum]
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[linenum]
- if Search(r'\b(const|volatile|void|char|short|int|long'
+ if Search(
+ r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
- r'\s+(register|static|extern|typedef)\b',
- line):
- error(filename, linenum, 'build/storage_class', 5,
- 'Storage-class specifier (static, extern, typedef, etc) should be '
- 'at the beginning of the declaration.')
+ r'\s+(register|static|extern|typedef)\b', line):
+ error(
+ filename, linenum, 'build/storage_class', 5,
+ 'Storage-class specifier (static, extern, typedef, etc) should be '
+ 'at the beginning of the declaration.')
- if Match(r'\s*#\s*endif\s*[^/\s]+', line):
- error(filename, linenum, 'build/endif_comment', 5,
- 'Uncommented text after #endif is non-standard. Use a comment.')
+ if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(filename, linenum, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
- if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
- error(filename, linenum, 'build/forward_decl', 5,
- 'Inner-style forward declarations are invalid. Remove this line.')
+ if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ error(
+ filename, linenum, 'build/forward_decl', 5,
+ 'Inner-style forward declarations are invalid. Remove this line.')
- if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
- line):
- error(filename, linenum, 'build/deprecated', 3,
- '>? and (max and min) operators are non-standard and deprecated.')
+ if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+ line):
+ error(
+ filename, linenum, 'build/deprecated', 3,
+ '>? and (max and min) operators are non-standard and deprecated.'
+ )
- if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
- # TODO(unknown): Could it be expanded safely to arbitrary references,
- # without triggering too many false positives? The first
- # attempt triggered 5 warnings for mostly benign code in the regtest, hence
- # the restriction.
- # Here's the original regexp, for the reference:
- # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
- # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
- error(filename, linenum, 'runtime/member_string_references', 2,
- 'const string& members are dangerous. It is much better to use '
- 'alternatives, such as pointers or simple constants.')
+ if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
+ # TODO(unknown): Could it be expanded safely to arbitrary references,
+ # without triggering too many false positives? The first
+ # attempt triggered 5 warnings for mostly benign code in the regtest,
+ # hence the restriction. Here's the original regexp, for the reference:
+ # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' r'\s*const\s*' +
+ # type_name + '\s*&\s*\w+\s*;'
+ error(
+ filename, linenum, 'runtime/member_string_references', 2,
+ 'const string& members are dangerous. It is much better to use '
+ 'alternatives, such as pointers or simple constants.')
- # Everything else in this function operates on class declarations.
- # Return early if the top of the nesting stack is not a class, or if
- # the class head is not completed yet.
- classinfo = nesting_state.InnermostClass()
- if not classinfo or not classinfo.seen_open_brace:
- return
+ # Everything else in this function operates on class declarations.
+ # Return early if the top of the nesting stack is not a class, or if
+ # the class head is not completed yet.
+ classinfo = nesting_state.InnermostClass()
+ if not classinfo or not classinfo.seen_open_brace:
+ return
- # The class may have been declared with namespace or classname qualifiers.
- # The constructor and destructor will not have those qualifiers.
- base_classname = classinfo.name.split('::')[-1]
+ # The class may have been declared with namespace or classname qualifiers.
+ # The constructor and destructor will not have those qualifiers.
+ base_classname = classinfo.name.split('::')[-1]
- # Look for single-argument constructors that aren't marked explicit.
- # Technically a valid construct, but against style.
- explicit_constructor_match = Match(
- r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
- r'(?:(?:inline|constexpr)\s+)*%s\s*'
- r'\(((?:[^()]|\([^()]*\))*)\)'
- % re.escape(base_classname),
- line)
+ # Look for single-argument constructors that aren't marked explicit.
+ # Technically a valid construct, but against style.
+ explicit_constructor_match = Match(
+ r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
+ r'(?:(?:inline|constexpr)\s+)*%s\s*'
+ r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line)
- if explicit_constructor_match:
- is_marked_explicit = explicit_constructor_match.group(1)
+ if explicit_constructor_match:
+ is_marked_explicit = explicit_constructor_match.group(1)
- if not explicit_constructor_match.group(2):
- constructor_args = []
- else:
- constructor_args = explicit_constructor_match.group(2).split(',')
+ if not explicit_constructor_match.group(2):
+ constructor_args = []
+ else:
+ constructor_args = explicit_constructor_match.group(2).split(',')
- # collapse arguments so that commas in template parameter lists and function
- # argument parameter lists don't split arguments in two
- i = 0
- while i < len(constructor_args):
- constructor_arg = constructor_args[i]
- while (constructor_arg.count('<') > constructor_arg.count('>') or
- constructor_arg.count('(') > constructor_arg.count(')')):
- constructor_arg += ',' + constructor_args[i + 1]
- del constructor_args[i + 1]
- constructor_args[i] = constructor_arg
- i += 1
+ # collapse arguments so that commas in template parameter lists and
+ # function argument parameter lists don't split arguments in two
+ i = 0
+ while i < len(constructor_args):
+ constructor_arg = constructor_args[i]
+ while (constructor_arg.count('<') > constructor_arg.count('>')
+ or constructor_arg.count('(') > constructor_arg.count(')')):
+ constructor_arg += ',' + constructor_args[i + 1]
+ del constructor_args[i + 1]
+ constructor_args[i] = constructor_arg
+ i += 1
- defaulted_args = [arg for arg in constructor_args if '=' in arg]
- noarg_constructor = (not constructor_args or # empty arg list
- # 'void' arg specifier
- (len(constructor_args) == 1 and
- constructor_args[0].strip() == 'void'))
- onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
- not noarg_constructor) or
- # all but at most one arg defaulted
- (len(constructor_args) >= 1 and
- not noarg_constructor and
- len(defaulted_args) >= len(constructor_args) - 1))
- initializer_list_constructor = bool(
- onearg_constructor and
- Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
- copy_constructor = bool(
- onearg_constructor and
- Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
- % re.escape(base_classname), constructor_args[0].strip()))
+ defaulted_args = [arg for arg in constructor_args if '=' in arg]
+ noarg_constructor = (
+ not constructor_args or # empty arg list
+ # 'void' arg specifier
+ (len(constructor_args) == 1
+ and constructor_args[0].strip() == 'void'))
+ onearg_constructor = (
+ (
+ len(constructor_args) == 1 and # exactly one arg
+ not noarg_constructor) or
+ # all but at most one arg defaulted
+ (len(constructor_args) >= 1 and not noarg_constructor
+ and len(defaulted_args) >= len(constructor_args) - 1))
+ initializer_list_constructor = bool(
+ onearg_constructor
+ and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
+ copy_constructor = bool(onearg_constructor and Match(
+ r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' %
+ re.escape(base_classname), constructor_args[0].strip()))
- if (not is_marked_explicit and
- onearg_constructor and
- not initializer_list_constructor and
- not copy_constructor):
- if defaulted_args:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Constructors callable with one argument '
- 'should be marked explicit.')
- else:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Single-parameter constructors should be marked explicit.')
- elif is_marked_explicit and not onearg_constructor:
- if noarg_constructor:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Zero-parameter constructors should not be marked explicit.')
+ if (not is_marked_explicit and onearg_constructor
+ and not initializer_list_constructor and not copy_constructor):
+ if defaulted_args:
+ error(
+ filename, linenum, 'runtime/explicit', 5,
+ 'Constructors callable with one argument '
+ 'should be marked explicit.')
+ else:
+ error(
+ filename, linenum, 'runtime/explicit', 5,
+ 'Single-parameter constructors should be marked explicit.')
+ elif is_marked_explicit and not onearg_constructor:
+ if noarg_constructor:
+ error(
+ filename, linenum, 'runtime/explicit', 5,
+ 'Zero-parameter constructors should not be marked explicit.'
+ )
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
- """Checks for the correctness of various spacing around function calls.
+ """Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
@@ -3135,76 +3177,78 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Since function calls often occur inside if/for/while/switch
- # expressions - which have their own, more liberal conventions - we
- # first see if we should be looking inside such an expression for a
- # function call, to which we can apply more strict standards.
- fncall = line # if there's no control flow construct, look at whole line
- for pattern in (r'\bif\s*(?:constexpr\s*)?\((.*)\)\s*{',
- r'\bfor\s*\((.*)\)\s*{',
- r'\bwhile\s*\((.*)\)\s*[{;]',
- r'\bswitch\s*\((.*)\)\s*{'):
- match = Search(pattern, line)
- if match:
- fncall = match.group(1) # look inside the parens for function calls
- break
+ # Since function calls often occur inside if/for/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ fncall = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*(?:constexpr\s*)?\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ match = Search(pattern, line)
+ if match:
+ fncall = match.group(1) # look inside the parens for function calls
+ break
- # Except in if/for/while/switch, there should never be space
- # immediately inside parens (eg "f( 3, 4 )"). We make an exception
- # for nested parens ( (a+b) + c ). Likewise, there should never be
- # a space before a ( when it's a function argument. I assume it's a
- # function argument when the char before the whitespace is legal in
- # a function name (alnum + _) and we're not starting a macro. Also ignore
- # pointers and references to arrays and functions coz they're too tricky:
- # we use a very simple way to recognize these:
- # " (something)(maybe-something)" or
- # " (something)(maybe-something," or
- # " (something)[something]"
- # Note that we assume the contents of [] to be short enough that
- # they'll never need to wrap.
- if ( # Ignore control structures.
- not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
- fncall) and
- # Ignore pointers/references to functions.
- not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
- # Ignore pointers/references to arrays.
- not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
- if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
- error(filename, linenum, 'whitespace/parens', 4,
- 'Extra space after ( in function call')
- elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
- error(filename, linenum, 'whitespace/parens', 2,
- 'Extra space after (')
- if (Search(r'\w\s+\(', fncall) and
- not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
- not Search(r'#\s*define|typedef|__except|using\s+\w+\s*=', fncall) and
- not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
- not Search(r'\bcase\s+\(', fncall)):
- # TODO(unknown): Space after an operator function seem to be a common
- # error, silence those for now by restricting them to highest verbosity.
- if Search(r'\boperator_*\b', line):
- error(filename, linenum, 'whitespace/parens', 0,
- 'Extra space before ( in function call')
- else:
- error(filename, linenum, 'whitespace/parens', 4,
- 'Extra space before ( in function call')
- # If the ) is followed only by a newline or a { + newline, assume it's
- # part of a control statement (if/while/etc), and don't complain
- if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
- # If the closing parenthesis is preceded by only whitespaces,
- # try to give a more descriptive error message.
- if Search(r'^\s+\)', fncall):
- error(filename, linenum, 'whitespace/parens', 2,
- 'Closing ) should be moved to the previous line')
- else:
- error(filename, linenum, 'whitespace/parens', 2,
- 'Extra space before )')
+ # Except in if/for/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not Search(
+ r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
+ fncall) and
+ # Ignore pointers/references to functions.
+ not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+ # Ignore pointers/references to arrays.
+ not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+ if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (Search(r'\w\s+\(', fncall) and not Search(
+ r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall)
+ and not Search(r'#\s*define|typedef|__except|using\s+\w+\s*=',
+ fncall)
+ and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)
+ and not Search(r'\bcase\s+\(', fncall)):
+ # TODO(unknown): Space after an operator function seem to be a
+ # common error, silence those for now by restricting them to highest
+ # verbosity.
+ if Search(r'\boperator_*\b', line):
+ error(filename, linenum, 'whitespace/parens', 0,
+ 'Extra space before ( in function call')
+ else:
+ error(filename, linenum, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+ # If the closing parenthesis is preceded by only whitespaces,
+ # try to give a more descriptive error message.
+ if Search(r'^\s+\)', fncall):
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Closing ) should be moved to the previous line')
+ else:
+ error(filename, linenum, 'whitespace/parens', 2,
+ 'Extra space before )')
def IsBlankLine(line):
- """Returns true if the given line is blank.
+ """Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
@@ -3215,26 +3259,26 @@ def IsBlankLine(line):
Returns:
True, if the given line is blank.
"""
- return not line or line.isspace()
+ return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
- is_namespace_indent_item = (
- len(nesting_state.stack) > 1 and
- nesting_state.stack[-1].check_namespace_indentation and
- isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
- nesting_state.previous_stack_top == nesting_state.stack[-2])
+ is_namespace_indent_item = (
+ len(nesting_state.stack) > 1
+ and nesting_state.stack[-1].check_namespace_indentation
+ and isinstance(nesting_state.previous_stack_top, _NamespaceInfo)
+ and nesting_state.previous_stack_top == nesting_state.stack[-2])
- if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
- clean_lines.elided, line):
- CheckItemIndentationInNamespace(filename, clean_lines.elided,
- line, error)
+ if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
+ clean_lines.elided, line):
+ CheckItemIndentationInNamespace(filename, clean_lines.elided, line,
+ error)
-def CheckForFunctionLengths(filename, clean_lines, linenum,
- function_state, error):
- """Reports for long function bodies.
+def CheckForFunctionLengths(filename, clean_lines, linenum, function_state,
+ error):
+ """Reports for long function bodies.
For an overview why this is done, see:
https://google.github.io/styleguide/cppguide.html#Write_Short_Functions
@@ -3255,49 +3299,51 @@ def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
- lines = clean_lines.lines
- line = lines[linenum]
- joined_line = ''
+ lines = clean_lines.lines
+ line = lines[linenum]
+ joined_line = ''
- starting_func = False
- regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
- match_result = Match(regexp, line)
- if match_result:
- # If the name is all caps and underscores, figure it's a macro and
- # ignore it, unless it's TEST or TEST_F.
- function_name = match_result.group(1).split()[-1]
- if function_name == 'TEST' or function_name == 'TEST_F' or (
- not Match(r'[A-Z_0-9]+$', function_name)):
- starting_func = True
+ starting_func = False
+ regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+ match_result = Match(regexp, line)
+ if match_result:
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name == 'TEST' or function_name == 'TEST_F' or (not Match(
+ r'[A-Z_0-9]+$', function_name)):
+ starting_func = True
- if starting_func:
- body_found = False
- for start_linenum in range(linenum, clean_lines.NumLines()):
- start_line = lines[start_linenum]
- joined_line += ' ' + start_line.lstrip()
- if Search(r'(;|})', start_line): # Declarations and trivial functions
- body_found = True
- break # ... ignore
- elif Search(r'{', start_line):
- body_found = True
- function = Search(r'((\w|:)*)\(', line).group(1)
- if Match(r'TEST', function): # Handle TEST... macros
- parameter_regexp = Search(r'(\(.*\))', joined_line)
- if parameter_regexp: # Ignore bad syntax
- function += parameter_regexp.group(1)
- else:
- function += '()'
- function_state.Begin(function)
- break
- if not body_found:
- # No body for the function (or evidence of a non-function) was found.
- error(filename, linenum, 'readability/fn_size', 5,
- 'Lint failed to find start of function body.')
- elif Match(r'^\}\s*$', line): # function end
- function_state.Check(error, filename, linenum)
- function_state.End()
- elif not Match(r'^\s*$', line):
- function_state.Count() # Count non-blank/non-comment lines.
+ if starting_func:
+ body_found = False
+ for start_linenum in range(linenum, clean_lines.NumLines()):
+ start_line = lines[start_linenum]
+ joined_line += ' ' + start_line.lstrip()
+ if Search(r'(;|})',
+ start_line): # Declarations and trivial functions
+ body_found = True
+ break # ... ignore
+ elif Search(r'{', start_line):
+ body_found = True
+ function = Search(r'((\w|:)*)\(', line).group(1)
+ if Match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = Search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+ function_state.Begin(function)
+ break
+ if not body_found:
+ # No body for the function (or evidence of a non-function) was
+ # found.
+ error(filename, linenum, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+ elif Match(r'^\}\s*$', line): # function end
+ function_state.Check(error, filename, linenum)
+ function_state.End()
+ elif not Match(r'^\s*$', line):
+ function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(
@@ -3305,7 +3351,7 @@ _RE_PATTERN_TODO = re.compile(
def CheckComment(line, filename, linenum, next_line_start, error):
- """Checks for common mistakes in comments.
+ """Checks for common mistakes in comments.
Args:
line: The line in question.
@@ -3314,52 +3360,55 @@ def CheckComment(line, filename, linenum, next_line_start, error):
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
- commentpos = line.find('//')
- if commentpos != -1:
- # Check if the // may be in quotes. If so, ignore it
- if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
- # Allow one space for new scopes, two spaces otherwise:
- if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
- ((commentpos >= 1 and
- line[commentpos-1] not in string.whitespace) or
- (commentpos >= 2 and
- line[commentpos-2] not in string.whitespace))):
- error(filename, linenum, 'whitespace/comments', 2,
- 'At least two spaces is best between code and comments')
+ commentpos = line.find('//')
+ if commentpos != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
+ # Allow one space for new scopes, two spaces otherwise:
+ if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos)
+ and ((commentpos >= 1
+ and line[commentpos - 1] not in string.whitespace) or
+ (commentpos >= 2
+ and line[commentpos - 2] not in string.whitespace))):
+ error(filename, linenum, 'whitespace/comments', 2,
+ 'At least two spaces is best between code and comments')
- # Checks for common mistakes in TODO comments.
- comment = line[commentpos:]
- match = _RE_PATTERN_TODO.match(comment)
- if match:
- # One whitespace is correct; zero whitespace is handled elsewhere.
- leading_whitespace = match.group(1)
- if len(leading_whitespace) > 1:
- error(filename, linenum, 'whitespace/todo', 2,
- 'Too many spaces before TODO')
+ # Checks for common mistakes in TODO comments.
+ comment = line[commentpos:]
+ match = _RE_PATTERN_TODO.match(comment)
+ if match:
+ # One whitespace is correct; zero whitespace is handled
+ # elsewhere.
+ leading_whitespace = match.group(1)
+ if len(leading_whitespace) > 1:
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'Too many spaces before TODO')
- username = match.group(2)
- if not username:
- error(filename, linenum, 'readability/todo', 2,
- 'Missing username in TODO; it should look like '
- '"// TODO(my_username): Stuff."')
+ username = match.group(2)
+ if not username:
+ error(
+ filename, linenum, 'readability/todo', 2,
+ 'Missing username in TODO; it should look like '
+ '"// TODO(my_username): Stuff."')
- middle_whitespace = match.group(3)
- # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
- if middle_whitespace != ' ' and middle_whitespace != '':
- error(filename, linenum, 'whitespace/todo', 2,
- 'TODO(my_username) should be followed by a space')
+ middle_whitespace = match.group(3)
+ # Comparisons made explicit for correctness -- pylint:
+ # disable=g-explicit-bool-comparison
+ if middle_whitespace != ' ' and middle_whitespace != '':
+ error(filename, linenum, 'whitespace/todo', 2,
+ 'TODO(my_username) should be followed by a space')
- # If the comment contains an alphanumeric character, there
- # should be a space somewhere between it and the // unless
- # it's a /// or //! Doxygen comment.
- if (Match(r'//[^ ]*\w', comment) and
- not Match(r'(///|//\!)(\s+|$)', comment)):
- error(filename, linenum, 'whitespace/comments', 4,
- 'Should have a space between // and comment')
+ # If the comment contains an alphanumeric character, there
+ # should be a space somewhere between it and the // unless
+ # it's a /// or //! Doxygen comment.
+ if (Match(r'//[^ ]*\w', comment)
+ and not Match(r'(///|//\!)(\s+|$)', comment)):
+ error(filename, linenum, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
- """Checks for the correctness of various spacing issues in the code.
+ """Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
@@ -3376,121 +3425,121 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
error: The function to call with any errors found.
"""
- # Don't use "elided" lines here, otherwise we can't check commented lines.
- # Don't want to use "raw" either, because we don't want to check inside C++11
- # raw strings,
- raw = clean_lines.lines_without_raw_strings
- line = raw[linenum]
+ # Don't use "elided" lines here, otherwise we can't check commented lines.
+ # Don't want to use "raw" either, because we don't want to check inside
+ # C++11 raw strings,
+ raw = clean_lines.lines_without_raw_strings
+ line = raw[linenum]
- # Before nixing comments, check if the line is blank for no good
- # reason. This includes the first line after a block is opened, and
- # blank lines at the end of a function (ie, right before a line like '}'
- #
- # Skip all the blank line checks if we are immediately inside a
- # namespace body. In other words, don't issue blank line warnings
- # for this block:
- # namespace {
- #
- # }
- #
- # A warning about missing end of namespace comments will be issued instead.
- #
- # Also skip blank line checks for 'extern "C"' blocks, which are formatted
- # like namespaces.
- if (IsBlankLine(line) and
- not nesting_state.InNamespaceBody() and
- not nesting_state.InExternC()):
- elided = clean_lines.elided
- prev_line = elided[linenum - 1]
- prevbrace = prev_line.rfind('{')
- # TODO(unknown): Don't complain if line before blank line, and line after,
- # both start with alnums and are indented the same amount.
- # This ignores whitespace at the start of a namespace block
- # because those are not usually indented.
- if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
- # OK, we have a blank line at the start of a code block. Before we
- # complain, we check if it is an exception to the rule: The previous
- # non-empty line has the parameters of a function header that are indented
- # 4 spaces (because they did not fit in a 80 column line when placed on
- # the same line as the function name). We also check for the case where
- # the previous line is indented 6 spaces, which may happen when the
- # initializers of a constructor do not fit into a 80 column line.
- exception = False
- if Match(r' {6}\w', prev_line): # Initializer list?
- # We are looking for the opening column of initializer list, which
- # should be indented 4 spaces to cause 6 space indentation afterwards.
- search_position = linenum-2
- while (search_position >= 0
- and Match(r' {6}\w', elided[search_position])):
- search_position -= 1
- exception = (search_position >= 0
- and elided[search_position][:5] == ' :')
- else:
- # Search for the function arguments or an initializer list. We use a
- # simple heuristic here: If the line is indented 4 spaces; and we have a
- # closing paren, without the opening paren, followed by an opening brace
- # or colon (for initializer lists) we assume that it is the last line of
- # a function header. If we have a colon indented 4 spaces, it is an
- # initializer list.
- exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
- prev_line)
- or Match(r' {4}:', prev_line))
-
- if not exception:
- error(filename, linenum, 'whitespace/blank_line', 2,
- 'Redundant blank line at the start of a code block '
- 'should be deleted.')
- # Ignore blank lines at the end of a block in a long if-else
- # chain, like this:
- # if (condition1) {
- # // Something followed by a blank line
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}'
+ #
+ # Skip all the blank line checks if we are immediately inside a
+ # namespace body. In other words, don't issue blank line warnings
+ # for this block:
+ # namespace {
#
- # } else if (condition2) {
- # // Something else
# }
+ #
+ # A warning about missing end of namespace comments will be issued instead.
+ #
+ # Also skip blank line checks for 'extern "C"' blocks, which are formatted
+ # like namespaces.
+ if (IsBlankLine(line) and not nesting_state.InNamespaceBody()
+ and not nesting_state.InExternC()):
+ elided = clean_lines.elided
+ prev_line = elided[linenum - 1]
+ prevbrace = prev_line.rfind('{')
+ # TODO(unknown): Don't complain if line before blank line, and line
+ # after, both start with alnums and are indented the same amount. This
+ # ignores whitespace at the start of a namespace block because those are
+ # not usually indented.
+ if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are
+ # indented 4 spaces (because they did not fit in a 80 column line
+ # when placed on the same line as the function name). We also check
+ # for the case where the previous line is indented 6 spaces, which
+ # may happen when the initializers of a constructor do not fit into
+ # a 80 column line.
+ exception = False
+ if Match(r' {6}\w', prev_line): # Initializer list?
+ # We are looking for the opening column of initializer list,
+ # which should be indented 4 spaces to cause 6 space indentation
+ # afterwards.
+ search_position = linenum - 2
+ while (search_position >= 0
+ and Match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We
+ # use a simple heuristic here: If the line is indented 4 spaces;
+ # and we have a closing paren, without the opening paren,
+ # followed by an opening brace or colon (for initializer lists)
+ # we assume that it is the last line of a function header. If
+ # we have a colon indented 4 spaces, it is an initializer list.
+ exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ prev_line) or Match(r' {4}:', prev_line))
+
+ if not exception:
+ error(
+ filename, linenum, 'whitespace/blank_line', 2,
+ 'Redundant blank line at the start of a code block '
+ 'should be deleted.')
+ # Ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if linenum + 1 < clean_lines.NumLines():
+ next_line = raw[linenum + 1]
+ if (next_line and Match(r'\s*}', next_line)
+ and next_line.find('} else ') == -1):
+ error(
+ filename, linenum, 'whitespace/blank_line', 3,
+ 'Redundant blank line at the end of a code block '
+ 'should be deleted.')
+
+ matched = Match(r'\s*(public|protected|private):', prev_line)
+ if matched:
+ error(filename, linenum, 'whitespace/blank_line', 3,
+ 'Do not leave a blank line after "%s:"' % matched.group(1))
+
+ # Next, check comments
+ next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
- next_line = raw[linenum + 1]
- if (next_line
- and Match(r'\s*}', next_line)
- and next_line.find('} else ') == -1):
- error(filename, linenum, 'whitespace/blank_line', 3,
- 'Redundant blank line at the end of a code block '
- 'should be deleted.')
+ next_line = raw[linenum + 1]
+ next_line_start = len(next_line) - len(next_line.lstrip())
+ CheckComment(line, filename, linenum, next_line_start, error)
- matched = Match(r'\s*(public|protected|private):', prev_line)
- if matched:
- error(filename, linenum, 'whitespace/blank_line', 3,
- 'Do not leave a blank line after "%s:"' % matched.group(1))
+ # get rid of comments and strings
+ line = clean_lines.elided[linenum]
- # Next, check comments
- next_line_start = 0
- if linenum + 1 < clean_lines.NumLines():
- next_line = raw[linenum + 1]
- next_line_start = len(next_line) - len(next_line.lstrip())
- CheckComment(line, filename, linenum, next_line_start, error)
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []', 'return []() {};', 'auto [abc, ...] = ...;' or in the case of
+ # c++ attributes like 'class [[clang::lto_visibility_public]] MyClass'.
+ if (Search(r'\w\s+\[', line)
+ and not Search(r'(?:auto&?|delete|return)\s+\[', line)
+ and not Search(r'\s+\[\[', line)):
+ error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [')
- # get rid of comments and strings
- line = clean_lines.elided[linenum]
-
- # You shouldn't have spaces before your brackets, except maybe after
- # 'delete []', 'return []() {};', 'auto [abc, ...] = ...;' or in the case of
- # c++ attributes like 'class [[clang::lto_visibility_public]] MyClass'.
- if (Search(r'\w\s+\[', line)
- and not Search(r'(?:auto&?|delete|return)\s+\[', line)
- and not Search(r'\s+\[\[', line)):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Extra space before [')
-
- # In range-based for, we wanted spaces before and after the colon, but
- # not around "::" tokens that might appear.
- if (Search(r'for *\(.*[^:]:[^: ]', line) or
- Search(r'for *\(.*[^: ]:[^:]', line)):
- error(filename, linenum, 'whitespace/forcolon', 2,
- 'Missing space around colon in range-based for loop')
+ # In range-based for, we wanted spaces before and after the colon, but
+ # not around "::" tokens that might appear.
+ if (Search(r'for *\(.*[^:]:[^: ]', line)
+ or Search(r'for *\(.*[^: ]:[^:]', line)):
+ error(filename, linenum, 'whitespace/forcolon', 2,
+ 'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing around operators.
+ """Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
@@ -3498,114 +3547,114 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Don't try to do spacing checks for operator methods. Do this by
- # replacing the troublesome characters with something else,
- # preserving column position for all other characters.
- #
- # The replacement is done repeatedly to avoid false positives from
- # operators that call operators.
- while True:
- match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
+ # Don't try to do spacing checks for operator methods. Do this by
+ # replacing the troublesome characters with something else,
+ # preserving column position for all other characters.
+ #
+ # The replacement is done repeatedly to avoid false positives from
+ # operators that call operators.
+ while True:
+ match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
+ if match:
+ line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
+ else:
+ break
+
+ # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
+ # Otherwise not. Note we only check for non-spaces on *both* sides;
+ # sometimes people put non-spaces on one side when aligning ='s among
+ # many lines (not that this is behavior that I approve of...)
+ if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line))
+ and not Search(r'\b(if|while|for) ', line)
+ # Operators taken from [lex.operators] in C++11 standard.
+ and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
+ and not Search(r'operator=', line)):
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # It's ok not to have spaces around binary operators like + - * /, but if
+ # there's too little whitespace, we get concerned. It's hard to tell,
+ # though, so we punt on this one for now. TODO.
+
+ # You should always have whitespace around binary operators.
+ #
+ # Check <= and >= first to avoid false positives with < and >, then
+ # check non-include lines for spacing around < and >.
+ #
+ # If the operator is followed by a comma, assume it's be used in a
+ # macro context and don't do any checks. This avoids false
+ # positives.
+ #
+ # Note that && is not included here. This is because there are too
+ # many false positives due to RValue references.
+ match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
- line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
- else:
- break
-
- # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
- # Otherwise not. Note we only check for non-spaces on *both* sides;
- # sometimes people put non-spaces on one side when aligning ='s among
- # many lines (not that this is behavior that I approve of...)
- if ((Search(r'[\w.]=', line) or
- Search(r'=[\w.]', line))
- and not Search(r'\b(if|while|for) ', line)
- # Operators taken from [lex.operators] in C++11 standard.
- and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
- and not Search(r'operator=', line)):
- error(filename, linenum, 'whitespace/operators', 4,
- 'Missing spaces around =')
-
- # It's ok not to have spaces around binary operators like + - * /, but if
- # there's too little whitespace, we get concerned. It's hard to tell,
- # though, so we punt on this one for now. TODO.
-
- # You should always have whitespace around binary operators.
- #
- # Check <= and >= first to avoid false positives with < and >, then
- # check non-include lines for spacing around < and >.
- #
- # If the operator is followed by a comma, assume it's be used in a
- # macro context and don't do any checks. This avoids false
- # positives.
- #
- # Note that && is not included here. This is because there are too
- # many false positives due to RValue references.
- match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around %s' % match.group(1))
- elif not Match(r'#.*include', line):
- # Look for < that is not surrounded by spaces. This is only
- # triggered if both sides are missing spaces, even though
- # technically should should flag if at least one side is missing a
- # space. This is done to avoid some false positives with shifts.
- match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
- if match:
- (_, _, end_pos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around <')
+ 'Missing spaces around %s' % match.group(1))
+ elif not Match(r'#.*include', line):
+ # Look for < that is not surrounded by spaces. This is only
+ # triggered if both sides are missing spaces, even though
+ # technically should should flag if at least one side is missing a
+ # space. This is done to avoid some false positives with shifts.
+ match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
+ if match:
+ (_, _, end_pos) = CloseExpression(clean_lines, linenum,
+ len(match.group(1)))
+ if end_pos <= -1:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around <')
- # Look for > that is not surrounded by spaces. Similar to the
- # above, we only trigger if both sides are missing spaces to avoid
- # false positives with shifts.
- match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
- if match:
- (_, _, start_pos) = ReverseCloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if start_pos <= -1:
+ # Look for > that is not surrounded by spaces. Similar to the
+ # above, we only trigger if both sides are missing spaces to avoid
+ # false positives with shifts.
+ match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
+ if match:
+ (_, _, start_pos) = ReverseCloseExpression(clean_lines, linenum,
+ len(match.group(1)))
+ if start_pos <= -1:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >')
+
+ # We allow no-spaces around << when used like this: 10<<20, but
+ # not otherwise (particularly, not when used as streams)
+ #
+ # We also allow operators following an opening parenthesis, since
+ # those tend to be macros that deal with operators.
+ match = Search(
+ r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
+ if (match and not (match.group(1).isdigit() and match.group(2).isdigit())
+ and not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around >')
+ 'Missing spaces around <<')
- # We allow no-spaces around << when used like this: 10<<20, but
- # not otherwise (particularly, not when used as streams)
- #
- # We also allow operators following an opening parenthesis, since
- # those tend to be macros that deal with operators.
- match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
- if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
- not (match.group(1) == 'operator' and match.group(2) == ';')):
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around <<')
+ # We allow no-spaces around >> for almost anything. This is because
+ # C++11 allows ">>" to close nested templates, which accounts for
+ # most cases when ">>" is not followed by a space.
+ #
+ # We still warn on ">>" followed by alpha character, because that is
+ # likely due to ">>" being used for right shifts, e.g.:
+ # value >> alpha
+ #
+ # When ">>" is used to close templates, the alphanumeric letter that
+ # follows would be part of an identifier, and there should still be
+ # a space separating the template type and the identifier.
+ # type> alpha
+ match = Search(r'>>[a-zA-Z_]', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 3,
+ 'Missing spaces around >>')
- # We allow no-spaces around >> for almost anything. This is because
- # C++11 allows ">>" to close nested templates, which accounts for
- # most cases when ">>" is not followed by a space.
- #
- # We still warn on ">>" followed by alpha character, because that is
- # likely due to ">>" being used for right shifts, e.g.:
- # value >> alpha
- #
- # When ">>" is used to close templates, the alphanumeric letter that
- # follows would be part of an identifier, and there should still be
- # a space separating the template type and the identifier.
- # type> alpha
- match = Search(r'>>[a-zA-Z_]', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around >>')
-
- # There shouldn't be space around unary operators
- match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 4,
- 'Extra space for operator %s' % match.group(1))
+ # There shouldn't be space around unary operators
+ match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if match:
+ error(filename, linenum, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing around parentheses.
+ """Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
@@ -3613,37 +3662,38 @@ def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # No spaces after an if, while, switch, or for
- match = Search(r' (if\(|for\(|while\(|switch\()', line)
- if match:
- error(filename, linenum, 'whitespace/parens', 5,
- 'Missing space before ( in %s' % match.group(1))
-
- # For if/for/while/switch, the left and right parens should be
- # consistent about how many spaces are inside the parens, and
- # there should either be zero or one spaces inside the parens.
- # We don't want: "if ( foo)" or "if ( foo )".
- # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
- match = Search(r'\b(if|for|while|switch)\s*'
- r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
- line)
- if match:
- if len(match.group(2)) != len(match.group(4)):
- if not (match.group(3) == ';' and
- len(match.group(2)) == 1 + len(match.group(4)) or
- not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ # No spaces after an if, while, switch, or for
+ match = Search(r' (if\(|for\(|while\(|switch\()', line)
+ if match:
error(filename, linenum, 'whitespace/parens', 5,
- 'Mismatching spaces inside () in %s' % match.group(1))
- if len(match.group(2)) not in [0, 1]:
- error(filename, linenum, 'whitespace/parens', 5,
- 'Should have zero or one spaces inside ( and ) in %s' %
- match.group(1))
+ 'Missing space before ( in %s' % match.group(1))
+
+ # For if/for/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ match = Search(
+ r'\b(if|for|while|switch)\s*'
+ r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line)
+ if match:
+ if len(match.group(2)) != len(match.group(4)):
+ if not (match.group(3) == ';'
+ and len(match.group(2)) == 1 + len(match.group(4)) or
+ not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ error(filename, linenum, 'whitespace/parens', 5,
+ 'Mismatching spaces inside () in %s' % match.group(1))
+ if len(match.group(2)) not in [0, 1]:
+ error(
+ filename, linenum, 'whitespace/parens', 5,
+ 'Should have zero or one spaces inside ( and ) in %s' %
+ match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing near commas and semicolons.
+ """Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
@@ -3651,35 +3701,35 @@ def CheckCommaSpacing(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- raw = clean_lines.lines_without_raw_strings
- line = clean_lines.elided[linenum]
+ raw = clean_lines.lines_without_raw_strings
+ line = clean_lines.elided[linenum]
- # You should always have a space after a comma (either as fn arg or operator)
- #
- # This does not apply when the non-space character following the
- # comma is another comma, since the only time when that happens is
- # for empty macro arguments.
- #
- # We run this check in two passes: first pass on elided lines to
- # verify that lines contain missing whitespaces, second pass on raw
- # lines to confirm that those missing whitespaces are not due to
- # elided comments.
- if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
- Search(r',[^,\s]', raw[linenum])):
- error(filename, linenum, 'whitespace/comma', 3,
- 'Missing space after ,')
+ # You should always have a space after a comma (either as fn arg or
+ # operator)
+ #
+ # This does not apply when the non-space character following the
+ # comma is another comma, since the only time when that happens is
+ # for empty macro arguments.
+ #
+ # We run this check in two passes: first pass on elided lines to
+ # verify that lines contain missing whitespaces, second pass on raw
+ # lines to confirm that those missing whitespaces are not due to
+ # elided comments.
+ if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line))
+ and Search(r',[^,\s]', raw[linenum])):
+ error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,')
- # You should always have a space after a semicolon
- # except for few corner cases
- # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
- # space after ;
- if Search(r';[^\s};\\)/]', line):
- error(filename, linenum, 'whitespace/semicolon', 3,
- 'Missing space after ;')
+ # You should always have a space after a semicolon
+ # except for few corner cases
+ # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
+ # space after ;
+ if Search(r';[^\s};\\)/]', line):
+ error(filename, linenum, 'whitespace/semicolon', 3,
+ 'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
- """Check if expression looks like a type name, returns true if so.
+ """Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -3689,60 +3739,61 @@ def _IsType(clean_lines, nesting_state, expr):
Returns:
True, if token looks like a type.
"""
- # Keep only the last token in the expression
- last_word = Match(r'^.*(\b\S+)$', expr)
- if last_word:
- token = last_word.group(1)
- else:
- token = expr
+ # Keep only the last token in the expression
+ last_word = Match(r'^.*(\b\S+)$', expr)
+ if last_word:
+ token = last_word.group(1)
+ else:
+ token = expr
- # Match native types and stdint types
- if _TYPES.match(token):
- return True
-
- # Try a bit harder to match templated types. Walk up the nesting
- # stack until we find something that resembles a typename
- # declaration for what we are looking for.
- typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
- r'\b')
- block_index = len(nesting_state.stack) - 1
- while block_index >= 0:
- if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
- return False
-
- # Found where the opening brace is. We want to scan from this
- # line up to the beginning of the function, minus a few lines.
- # template
- # class C
- # : public ... { // start scanning here
- last_line = nesting_state.stack[block_index].starting_linenum
-
- next_block_start = 0
- if block_index > 0:
- next_block_start = nesting_state.stack[block_index - 1].starting_linenum
- first_line = last_line
- while first_line >= next_block_start:
- if clean_lines.elided[first_line].find('template') >= 0:
- break
- first_line -= 1
- if first_line < next_block_start:
- # Didn't find any "template" keyword before reaching the next block,
- # there are probably no template things to check for this block
- block_index -= 1
- continue
-
- # Look for typename in the specified range
- for i in range(first_line, last_line + 1, 1):
- if Search(typename_pattern, clean_lines.elided[i]):
+ # Match native types and stdint types
+ if _TYPES.match(token):
return True
- block_index -= 1
- return False
+ # Try a bit harder to match templated types. Walk up the nesting
+ # stack until we find something that resembles a typename
+ # declaration for what we are looking for.
+ typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
+ r'\b')
+ block_index = len(nesting_state.stack) - 1
+ while block_index >= 0:
+ if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
+ return False
+
+ # Found where the opening brace is. We want to scan from this
+ # line up to the beginning of the function, minus a few lines.
+ # template
+ # class C
+ # : public ... { // start scanning here
+ last_line = nesting_state.stack[block_index].starting_linenum
+
+ next_block_start = 0
+ if block_index > 0:
+ next_block_start = nesting_state.stack[block_index -
+ 1].starting_linenum
+ first_line = last_line
+ while first_line >= next_block_start:
+ if clean_lines.elided[first_line].find('template') >= 0:
+ break
+ first_line -= 1
+ if first_line < next_block_start:
+ # Didn't find any "template" keyword before reaching the next block,
+ # there are probably no template things to check for this block
+ block_index -= 1
+ continue
+
+ # Look for typename in the specified range
+ for i in range(first_line, last_line + 1, 1):
+ if Search(typename_pattern, clean_lines.elided[i]):
+ return True
+ block_index -= 1
+
+ return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
- """Checks for horizontal spacing near commas.
+ """Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
@@ -3752,86 +3803,88 @@ def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Except after an opening paren, or after another opening brace (in case of
- # an initializer list, for instance), you should have spaces before your
- # braces when they are delimiting blocks, classes, namespaces etc.
- # And since you should never have braces at the beginning of a line,
- # this is an easy test. Except that braces used for initialization don't
- # follow the same rule; we often don't want spaces before those.
- match = Match(r'^(.*[^ ({>]){', line)
+ # Except after an opening paren, or after another opening brace (in case of
+ # an initializer list, for instance), you should have spaces before your
+ # braces when they are delimiting blocks, classes, namespaces etc.
+ # And since you should never have braces at the beginning of a line,
+ # this is an easy test. Except that braces used for initialization don't
+ # follow the same rule; we often don't want spaces before those.
+ match = Match(r'^(.*[^ ({>]){', line)
- if match:
- # Try a bit harder to check for brace initialization. This
- # happens in one of the following forms:
- # Constructor() : initializer_list_{} { ... }
- # Constructor{}.MemberFunction()
- # Type variable{};
- # FunctionCall(type{}, ...);
- # LastArgument(..., type{});
- # LOG(INFO) << type{} << " ...";
- # map_of_type[{...}] = ...;
- # ternary = expr ? new type{} : nullptr;
- # OuterTemplate{}>
- #
- # We check for the character following the closing brace, and
- # silence the warning if it's one of those listed above, i.e.
- # "{.;,)<>]:".
- #
- # To account for nested initializer list, we allow any number of
- # closing braces up to "{;,)<". We can't simply silence the
- # warning on first sight of closing brace, because that would
- # cause false negatives for things that are not initializer lists.
- # Silence this: But not this:
- # Outer{ if (...) {
- # Inner{...} if (...){ // Missing space before {
- # }; }
- #
- # There is a false negative with this approach if people inserted
- # spurious semicolons, e.g. "if (cond){};", but we will catch the
- # spurious semicolon with a separate check.
- leading_text = match.group(1)
- (endline, endlinenum, endpos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- trailing_text = ''
- if endpos > -1:
- trailing_text = endline[endpos:]
- for offset in range(endlinenum + 1,
- min(endlinenum + 3, clean_lines.NumLines() - 1)):
- trailing_text += clean_lines.elided[offset]
- # We also suppress warnings for `uint64_t{expression}` etc., as the style
- # guide recommends brace initialization for integral types to avoid
- # overflow/truncation.
- if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
- and not _IsType(clean_lines, nesting_state, leading_text)):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Missing space before {')
+ if match:
+ # Try a bit harder to check for brace initialization. This
+ # happens in one of the following forms:
+ # Constructor() : initializer_list_{} { ... }
+ # Constructor{}.MemberFunction()
+ # Type variable{};
+ # FunctionCall(type{}, ...);
+ # LastArgument(..., type{});
+ # LOG(INFO) << type{} << " ...";
+ # map_of_type[{...}] = ...;
+ # ternary = expr ? new type{} : nullptr;
+ # OuterTemplate{}>
+ #
+ # We check for the character following the closing brace, and
+ # silence the warning if it's one of those listed above, i.e.
+ # "{.;,)<>]:".
+ #
+ # To account for nested initializer list, we allow any number of
+ # closing braces up to "{;,)<". We can't simply silence the
+ # warning on first sight of closing brace, because that would
+ # cause false negatives for things that are not initializer lists.
+ # Silence this: But not this:
+ # Outer{ if (...) {
+ # Inner{...} if (...){ // Missing space before {
+ # }; }
+ #
+ # There is a false negative with this approach if people inserted
+ # spurious semicolons, e.g. "if (cond){};", but we will catch the
+ # spurious semicolon with a separate check.
+ leading_text = match.group(1)
+ (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum,
+ len(match.group(1)))
+ trailing_text = ''
+ if endpos > -1:
+ trailing_text = endline[endpos:]
+ for offset in range(endlinenum + 1,
+ min(endlinenum + 3,
+ clean_lines.NumLines() - 1)):
+ trailing_text += clean_lines.elided[offset]
+ # We also suppress warnings for `uint64_t{expression}` etc., as the
+ # style guide recommends brace initialization for integral types to
+ # avoid overflow/truncation.
+ if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
+ and not _IsType(clean_lines, nesting_state, leading_text)):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before {')
- # Make sure '} else {' has spaces.
- if Search(r'}else', line):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Missing space before else')
+ # Make sure '} else {' has spaces.
+ if Search(r'}else', line):
+ error(filename, linenum, 'whitespace/braces', 5,
+ 'Missing space before else')
- # You shouldn't have a space before a semicolon at the end of the line.
- # There's a special case for "for" since the style guide allows space before
- # the semicolon there.
- if Search(r':\s*;\s*$', line):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Semicolon defining empty statement. Use {} instead.')
- elif Search(r'^\s*;\s*$', line):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Line contains only semicolon. If this should be an empty statement, '
- 'use {} instead.')
- elif (Search(r'\s+;\s*$', line) and
- not Search(r'\bfor\b', line)):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Extra space before last semicolon. If this should be an empty '
- 'statement, use {} instead.')
+ # You shouldn't have a space before a semicolon at the end of the line.
+ # There's a special case for "for" since the style guide allows space before
+ # the semicolon there.
+ if Search(r':\s*;\s*$', line):
+ error(filename, linenum, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use {} instead.')
+ elif Search(r'^\s*;\s*$', line):
+ error(
+ filename, linenum, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty statement, '
+ 'use {} instead.')
+ elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)):
+ error(
+ filename, linenum, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
- """Check if the token ending on (linenum, column) is decltype().
+ """Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -3840,16 +3893,16 @@ def IsDecltype(clean_lines, linenum, column):
Returns:
True if this token is decltype() expression, False otherwise.
"""
- (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
- if start_col < 0:
+ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
+ if start_col < 0:
+ return False
+ if Search(r'\bdecltype\s*$', text[0:start_col]):
+ return True
return False
- if Search(r'\bdecltype\s*$', text[0:start_col]):
- return True
- return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
- """Checks for additional blank line issues related to sections.
+ """Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
@@ -3860,51 +3913,54 @@ def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- # Skip checks if the class is small, where small means 25 lines or less.
- # 25 lines seems like a good cutoff since that's the usual height of
- # terminals, and any class that can't fit in one screen can't really
- # be considered "small".
- #
- # Also skip checks if we are on the first line. This accounts for
- # classes that look like
- # class Foo { public: ... };
- #
- # If we didn't find the end of the class, last_line would be zero,
- # and the check will be skipped by the first condition.
- if (class_info.last_line - class_info.starting_linenum <= 24 or
- linenum <= class_info.starting_linenum):
- return
+ # Skip checks if the class is small, where small means 25 lines or less.
+ # 25 lines seems like a good cutoff since that's the usual height of
+ # terminals, and any class that can't fit in one screen can't really
+ # be considered "small".
+ #
+ # Also skip checks if we are on the first line. This accounts for
+ # classes that look like
+ # class Foo { public: ... };
+ #
+ # If we didn't find the end of the class, last_line would be zero,
+ # and the check will be skipped by the first condition.
+ if (class_info.last_line - class_info.starting_linenum <= 24
+ or linenum <= class_info.starting_linenum):
+ return
- matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
- if matched:
- # Issue warning if the line before public/protected/private was
- # not a blank line, but don't do this if the previous line contains
- # "class" or "struct". This can happen two ways:
- # - We are at the beginning of the class.
- # - We are forward-declaring an inner class that is semantically
- # private, but needed to be public for implementation reasons.
- # Also ignores cases where the previous line ends with a backslash as can be
- # common when defining classes in C macros.
- prev_line = clean_lines.lines[linenum - 1]
- if (not IsBlankLine(prev_line) and
- not Search(r'\b(class|struct)\b', prev_line) and
- not Search(r'\\$', prev_line)):
- # Try a bit harder to find the beginning of the class. This is to
- # account for multi-line base-specifier lists, e.g.:
- # class Derived
- # : public Base {
- end_class_head = class_info.starting_linenum
- for i in range(class_info.starting_linenum, linenum):
- if Search(r'\{\s*$', clean_lines.lines[i]):
- end_class_head = i
- break
- if end_class_head < linenum - 1:
- error(filename, linenum, 'whitespace/blank_line', 3,
- '"%s:" should be preceded by a blank line' % matched.group(1))
+ matched = Match(r'\s*(public|protected|private):',
+ clean_lines.lines[linenum])
+ if matched:
+ # Issue warning if the line before public/protected/private was
+ # not a blank line, but don't do this if the previous line contains
+ # "class" or "struct". This can happen two ways:
+ # - We are at the beginning of the class.
+ # - We are forward-declaring an inner class that is semantically
+ # private, but needed to be public for implementation reasons.
+ # Also ignores cases where the previous line ends with a backslash as
+ # can be common when defining classes in C macros.
+ prev_line = clean_lines.lines[linenum - 1]
+ if (not IsBlankLine(prev_line)
+ and not Search(r'\b(class|struct)\b', prev_line)
+ and not Search(r'\\$', prev_line)):
+ # Try a bit harder to find the beginning of the class. This is to
+ # account for multi-line base-specifier lists, e.g.:
+ # class Derived
+ # : public Base {
+ end_class_head = class_info.starting_linenum
+ for i in range(class_info.starting_linenum, linenum):
+ if Search(r'\{\s*$', clean_lines.lines[i]):
+ end_class_head = i
+ break
+ if end_class_head < linenum - 1:
+ error(
+ filename, linenum, 'whitespace/blank_line', 3,
+ '"%s:" should be preceded by a blank line' %
+ matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
- """Return the most recent non-blank line and its line number.
+ """Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
@@ -3917,17 +3973,17 @@ def GetPreviousNonBlankLine(clean_lines, linenum):
if this is the first non-blank line.
"""
- prevlinenum = linenum - 1
- while prevlinenum >= 0:
- prevline = clean_lines.elided[prevlinenum]
- if not IsBlankLine(prevline): # if not a blank line...
- return (prevline, prevlinenum)
- prevlinenum -= 1
- return ('', -1)
+ prevlinenum = linenum - 1
+ while prevlinenum >= 0:
+ prevline = clean_lines.elided[prevlinenum]
+ if not IsBlankLine(prevline): # if not a blank line...
+ return (prevline, prevlinenum)
+ prevlinenum -= 1
+ return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
- """Looks for misplaced braces (e.g. at the end of line).
+ """Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
@@ -3936,116 +3992,128 @@ def CheckBraces(filename, clean_lines, linenum, error):
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum] # get rid of comments and strings
+ line = clean_lines.elided[linenum] # get rid of comments and strings
- if Match(r'\s*{\s*$', line):
- # We allow an open brace to start a line in the case where someone is using
- # braces in a block to explicitly create a new scope, which is commonly used
- # to control the lifetime of stack-allocated variables. Braces are also
- # used for brace initializers inside function calls. We don't detect this
- # perfectly: we just don't complain if the last non-whitespace character on
- # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
- # previous line starts a preprocessor block. We also allow a brace on the
- # following line if it is part of an array initialization and would not fit
- # within the 80 character limit of the preceding line.
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline)
- and not (len(prevline) > _line_length - 2 and '[]' in prevline)):
- error(filename, linenum, 'whitespace/braces', 4,
- '{ should almost always be at the end of the previous line')
+ if Match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone is
+ # using braces in a block to explicitly create a new scope, which is
+ # commonly used to control the lifetime of stack-allocated variables.
+ # Braces are also used for brace initializers inside function calls. We
+ # don't detect this perfectly: we just don't complain if the last
+ # non-whitespace character on the previous non-blank line is ',', ';',
+ # ':', '(', '{', or '}', or if the previous line starts a preprocessor
+ # block. We also allow a brace on the following line if it is part of an
+ # array initialization and would not fit within the 80 character limit
+ # of the preceding line.
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if (not Search(r'[,;:}{(]\s*$', prevline)
+ and not Match(r'\s*#', prevline) and
+ not (len(prevline) > _line_length - 2 and '[]' in prevline)):
+ error(filename, linenum, 'whitespace/braces', 4,
+ '{ should almost always be at the end of the previous line')
- # An else clause should be on the same line as the preceding closing brace.
- if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if Match(r'\s*}\s*$', prevline):
- error(filename, linenum, 'whitespace/newline', 4,
- 'An else should appear on the same line as the preceding }')
+ # An else clause should be on the same line as the preceding closing brace.
+ if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if Match(r'\s*}\s*$', prevline):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
- # If braces come on one side of an else, they should be on both.
- # However, we have to worry about "else if" that spans multiple lines!
- if Search(r'else if\s*(?:constexpr\s*)?\(', line): # could be multi-line if
- brace_on_left = bool(Search(r'}\s*else if\s*(?:constexpr\s*)?\(', line))
- # find the ( after the if
- pos = line.find('else if')
- pos = line.find('(', pos)
- if pos > 0:
- (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
- brace_on_right = endline[endpos:].find('{') != -1
- if brace_on_left != brace_on_right: # must be brace after if
+ # If braces come on one side of an else, they should be on both.
+ # However, we have to worry about "else if" that spans multiple lines!
+ if Search(r'else if\s*(?:constexpr\s*)?\(', line): # could be multi-line if
+ brace_on_left = bool(Search(r'}\s*else if\s*(?:constexpr\s*)?\(', line))
+ # find the ( after the if
+ pos = line.find('else if')
+ pos = line.find('(', pos)
+ if pos > 0:
+ (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
+ brace_on_right = endline[endpos:].find('{') != -1
+ if brace_on_left != brace_on_right: # must be brace after if
+ error(
+ filename, linenum, 'readability/braces', 5,
+ 'If an else has a brace on one side, it should have it on both'
+ )
+ elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
- elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
- error(filename, linenum, 'readability/braces', 5,
- 'If an else has a brace on one side, it should have it on both')
- # Likewise, an else should never have the else clause on the same line
- if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
- error(filename, linenum, 'whitespace/newline', 4,
- 'Else clause should never be on same line as else (use 2 lines)')
+ # Likewise, an else should never have the else clause on the same line
+ if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
- # In the same way, a do/while should never be on one line
- if Match(r'\s*do [^\s{]', line):
- error(filename, linenum, 'whitespace/newline', 4,
- 'do/while clauses should not be on a single line')
+ # In the same way, a do/while should never be on one line
+ if Match(r'\s*do [^\s{]', line):
+ error(filename, linenum, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
- # Check single-line if/else bodies. The style guide says 'curly braces are not
- # required for single-line statements'. We additionally allow multi-line,
- # single statements, but we reject anything with more than one semicolon in
- # it. This means that the first semicolon after the if should be at the end of
- # its line, and the line after that should have an indent level equal to or
- # lower than the if. We also check for ambiguous if/else nesting without
- # braces.
- if_else_match = Search(r'\b(if\s*(?:constexpr\s*)?\(|else\b)', line)
- if if_else_match and not Match(r'\s*#', line):
- if_indent = GetIndentLevel(line)
- endline, endlinenum, endpos = line, linenum, if_else_match.end()
- if_match = Search(r'\bif\s*(?:constexpr\s*)?\(', line)
- if if_match:
- # This could be a multiline if condition, so find the end first.
- pos = if_match.end() - 1
- (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
- # Check for an opening brace, either directly after the if or on the next
- # line. If found, this isn't a single-statement conditional.
- if (not Match(r'\s*{', endline[endpos:])
- and not (Match(r'\s*$', endline[endpos:])
- and endlinenum < (len(clean_lines.elided) - 1)
- and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
- while (endlinenum < len(clean_lines.elided)
- and ';' not in clean_lines.elided[endlinenum][endpos:]):
- endlinenum += 1
- endpos = 0
- if endlinenum < len(clean_lines.elided):
- endline = clean_lines.elided[endlinenum]
- # We allow a mix of whitespace and closing braces (e.g. for one-liner
- # methods) and a single \ after the semicolon (for macros)
- endpos = endline.find(';')
- if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
- # Semicolon isn't the last character, there's something trailing.
- # Output a warning if the semicolon is not contained inside
- # a lambda expression.
- if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
- endline):
- error(filename, linenum, 'readability/braces', 4,
- 'If/else bodies with multiple statements require braces')
- elif endlinenum < len(clean_lines.elided) - 1:
- # Make sure the next line is dedented
- next_line = clean_lines.elided[endlinenum + 1]
- next_indent = GetIndentLevel(next_line)
- # With ambiguous nested if statements, this will error out on the
- # if that *doesn't* match the else, regardless of whether it's the
- # inner one or outer one.
- if (if_match and Match(r'\s*else\b', next_line)
- and next_indent != if_indent):
- error(filename, linenum, 'readability/braces', 4,
- 'Else clause should be indented at the same level as if. '
- 'Ambiguous nested if/else chains require braces.')
- elif next_indent > if_indent:
- error(filename, linenum, 'readability/braces', 4,
- 'If/else bodies with multiple statements require braces')
+ # Check single-line if/else bodies. The style guide says 'curly braces are
+ # not required for single-line statements'. We additionally allow
+ # multi-line, single statements, but we reject anything with more than one
+ # semicolon in it. This means that the first semicolon after the if should
+ # be at the end of its line, and the line after that should have an indent
+ # level equal to or lower than the if. We also check for ambiguous if/else
+ # nesting without braces.
+ if_else_match = Search(r'\b(if\s*(?:constexpr\s*)?\(|else\b)', line)
+ if if_else_match and not Match(r'\s*#', line):
+ if_indent = GetIndentLevel(line)
+ endline, endlinenum, endpos = line, linenum, if_else_match.end()
+ if_match = Search(r'\bif\s*(?:constexpr\s*)?\(', line)
+ if if_match:
+ # This could be a multiline if condition, so find the end first.
+ pos = if_match.end() - 1
+ (endline, endlinenum,
+ endpos) = CloseExpression(clean_lines, linenum, pos)
+ # Check for an opening brace, either directly after the if or on the
+ # next line. If found, this isn't a single-statement conditional.
+ if (not Match(r'\s*{', endline[endpos:])
+ and not (Match(r'\s*$', endline[endpos:]) and endlinenum <
+ (len(clean_lines.elided) - 1) and Match(
+ r'\s*{', clean_lines.elided[endlinenum + 1]))):
+ while (endlinenum < len(clean_lines.elided)
+ and ';' not in clean_lines.elided[endlinenum][endpos:]):
+ endlinenum += 1
+ endpos = 0
+ if endlinenum < len(clean_lines.elided):
+ endline = clean_lines.elided[endlinenum]
+ # We allow a mix of whitespace and closing braces (e.g. for
+ # one-liner methods) and a single \ after the semicolon (for
+ # macros)
+ endpos = endline.find(';')
+ if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
+ # Semicolon isn't the last character, there's something
+ # trailing. Output a warning if the semicolon is not
+ # contained inside a lambda expression.
+ if not Match(
+ r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
+ endline):
+ error(
+ filename, linenum, 'readability/braces', 4,
+ 'If/else bodies with multiple statements require braces'
+ )
+ elif endlinenum < len(clean_lines.elided) - 1:
+ # Make sure the next line is dedented
+ next_line = clean_lines.elided[endlinenum + 1]
+ next_indent = GetIndentLevel(next_line)
+ # With ambiguous nested if statements, this will error out
+ # on the if that *doesn't* match the else, regardless of
+ # whether it's the inner one or outer one.
+ if (if_match and Match(r'\s*else\b', next_line)
+ and next_indent != if_indent):
+ error(
+ filename, linenum, 'readability/braces', 4,
+ 'Else clause should be indented at the same level as if. '
+ 'Ambiguous nested if/else chains require braces.')
+ elif next_indent > if_indent:
+ error(
+ filename, linenum, 'readability/braces', 4,
+ 'If/else bodies with multiple statements require braces'
+ )
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
- """Looks for redundant trailing semicolon.
+ """Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
@@ -4054,143 +4122,143 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Block bodies should not be followed by a semicolon. Due to C++11
- # brace initialization and C++20 concepts, there are more places
- # where semicolons are required than not. Places that are
- # recognized as true positives are listed below.
- #
- # 1. Some flavor of block following closing parenthesis:
- # for (;;) {};
- # while (...) {};
- # switch (...) {};
- # Function(...) {};
- # if (...) {};
- # if (...) else if (...) {};
- #
- # 2. else block:
- # if (...) else {};
- #
- # 3. const member function:
- # Function(...) const {};
- #
- # 4. Block following some statement:
- # x = 42;
- # {};
- #
- # 5. Block at the beginning of a function:
- # Function(...) {
- # {};
- # }
- #
- # Note that naively checking for the preceding "{" will also match
- # braces inside multi-dimensional arrays, but this is fine since
- # that expression will not contain semicolons.
- #
- # 6. Block following another block:
- # while (true) {}
- # {};
- #
- # 7. End of namespaces:
- # namespace {};
- #
- # These semicolons seems far more common than other kinds of
- # redundant semicolons, possibly due to people converting classes
- # to namespaces. For now we do not warn for this case.
- #
- # Try matching case 1 first.
- match = Match(r'^(.*\)\s*)\{', line)
- if match:
- # Matched closing parenthesis (case 1). Check the token before the
- # matching opening parenthesis, and don't warn if it looks like a
- # macro. This avoids these false positives:
- # - macro that defines a base class
- # - multi-line macro that defines a base class
- # - macro that defines the whole class-head
+ # Block bodies should not be followed by a semicolon. Due to C++11
+ # brace initialization and C++20 concepts, there are more places
+ # where semicolons are required than not. Places that are
+ # recognized as true positives are listed below.
#
- # But we still issue warnings for macros that we know are safe to
- # warn, specifically:
- # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
- # - TYPED_TEST
- # - INTERFACE_DEF
- # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
+ # 1. Some flavor of block following closing parenthesis:
+ # for (;;) {};
+ # while (...) {};
+ # switch (...) {};
+ # Function(...) {};
+ # if (...) {};
+ # if (...) else if (...) {};
#
- # We implement an allowlist of safe macros instead of a blocklist of
- # unsafe macros, even though the latter appears less frequently in
- # google code and would have been easier to implement. This is because
- # the downside for getting the allowlist wrong means some extra
- # semicolons, while the downside for getting the blocklist wrong
- # would result in compile errors.
+ # 2. else block:
+ # if (...) else {};
#
- # In addition to macros, we also don't want to warn on
- # - Compound literals
- # - Lambdas
- # - alignas specifier with anonymous structs
- # - decltype
- # - Type casts with parentheses, e.g.: var = (Type){value};
- # - Return type casts with parentheses, e.g.: return (Type){value};
- # - Function pointers with initializer list, e.g.: int (*f)(){};
- # - Requires expression, e.g. C = requires(){};
- closing_brace_pos = match.group(1).rfind(')')
- opening_parenthesis = ReverseCloseExpression(
- clean_lines, linenum, closing_brace_pos)
- if opening_parenthesis[2] > -1:
- line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
- macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
- func = Match(r'^(.*\])\s*$', line_prefix)
- if ((macro and macro.group(1) not in
- ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
- 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
- 'LOCKS_EXCLUDED', 'INTERFACE_DEF'))
- or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1)))
- or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix)
- or Search(r'\b(decltype|requires)$', line_prefix)
- or Search(r'(?:\s+=|\breturn)\s*$', line_prefix)
- or (Match(r'^\s*$', line_prefix) and Search(
- r'(?:\s+=|\breturn)\s*$', clean_lines.elided[linenum - 1]))
- or Search(r'\(\*\w+\)$', line_prefix)):
- match = None
- if (match and
- opening_parenthesis[1] > 1 and
- Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
- # Multi-line lambda-expression
- match = None
+ # 3. const member function:
+ # Function(...) const {};
+ #
+ # 4. Block following some statement:
+ # x = 42;
+ # {};
+ #
+ # 5. Block at the beginning of a function:
+ # Function(...) {
+ # {};
+ # }
+ #
+ # Note that naively checking for the preceding "{" will also match
+ # braces inside multi-dimensional arrays, but this is fine since
+ # that expression will not contain semicolons.
+ #
+ # 6. Block following another block:
+ # while (true) {}
+ # {};
+ #
+ # 7. End of namespaces:
+ # namespace {};
+ #
+ # These semicolons seems far more common than other kinds of
+ # redundant semicolons, possibly due to people converting classes
+ # to namespaces. For now we do not warn for this case.
+ #
+ # Try matching case 1 first.
+ match = Match(r'^(.*\)\s*)\{', line)
+ if match:
+ # Matched closing parenthesis (case 1). Check the token before the
+ # matching opening parenthesis, and don't warn if it looks like a
+ # macro. This avoids these false positives:
+ # - macro that defines a base class
+ # - multi-line macro that defines a base class
+ # - macro that defines the whole class-head
+ #
+ # But we still issue warnings for macros that we know are safe to
+ # warn, specifically:
+ # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
+ # - TYPED_TEST
+ # - INTERFACE_DEF
+ # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
+ #
+ # We implement an allowlist of safe macros instead of a blocklist of
+ # unsafe macros, even though the latter appears less frequently in
+ # google code and would have been easier to implement. This is because
+ # the downside for getting the allowlist wrong means some extra
+ # semicolons, while the downside for getting the blocklist wrong
+ # would result in compile errors.
+ #
+ # In addition to macros, we also don't want to warn on
+ # - Compound literals
+ # - Lambdas
+ # - alignas specifier with anonymous structs
+ # - decltype
+ # - Type casts with parentheses, e.g.: var = (Type){value};
+ # - Return type casts with parentheses, e.g.: return (Type){value};
+ # - Function pointers with initializer list, e.g.: int (*f)(){};
+ # - Requires expression, e.g. C = requires(){};
+ closing_brace_pos = match.group(1).rfind(')')
+ opening_parenthesis = ReverseCloseExpression(clean_lines, linenum,
+ closing_brace_pos)
+ if opening_parenthesis[2] > -1:
+ line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
+ macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
+ func = Match(r'^(.*\])\s*$', line_prefix)
+ if ((macro and macro.group(1)
+ not in ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
+ 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
+ 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
+ (func and not Search(r'\boperator\s*\[\s*\]', func.group(1)))
+ or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix)
+ or Search(r'\b(decltype|requires)$', line_prefix)
+ or Search(r'(?:\s+=|\breturn)\s*$', line_prefix) or
+ (Match(r'^\s*$', line_prefix) and Search(
+ r'(?:\s+=|\breturn)\s*$', clean_lines.elided[linenum - 1]))
+ or Search(r'\(\*\w+\)$', line_prefix)):
+ match = None
+ if (match and opening_parenthesis[1] > 1 and Search(
+ r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
+ # Multi-line lambda-expression
+ match = None
- else:
- # Try matching cases 2-3.
- match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
- if not match:
- # Try matching cases 4-6. These are always matched on separate lines.
- #
- # Note that we can't simply concatenate the previous line to the
- # current line and do a single match, otherwise we may output
- # duplicate warnings for the blank line case:
- # if (cond) {
- # // blank line
- # }
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if prevline and Search(r'[;{}]\s*$', prevline):
- match = Match(r'^(\s*)\{', line)
+ else:
+ # Try matching cases 2-3.
+ match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
+ if not match:
+ # Try matching cases 4-6. These are always matched on separate
+ # lines.
+ #
+ # Note that we can't simply concatenate the previous line to the
+ # current line and do a single match, otherwise we may output
+ # duplicate warnings for the blank line case:
+ # if (cond) {
+ # // blank line
+ # }
+ prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+ if prevline and Search(r'[;{}]\s*$', prevline):
+ match = Match(r'^(\s*)\{', line)
- # Check matching closing brace
- if match:
- (endline, endlinenum, endpos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
- # Current {} pair is eligible for semicolon check, and we have found
- # the redundant semicolon, output warning here.
- #
- # Note: because we are scanning forward for opening braces, and
- # outputting warnings for the matching closing brace, if there are
- # nested blocks with trailing semicolons, we will get the error
- # messages in reversed order.
- error(filename, endlinenum, 'readability/braces', 4,
- "You don't need a ; after a }")
+ # Check matching closing brace
+ if match:
+ (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum,
+ len(match.group(1)))
+ if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
+ # Current {} pair is eligible for semicolon check, and we have found
+ # the redundant semicolon, output warning here.
+ #
+ # Note: because we are scanning forward for opening braces, and
+ # outputting warnings for the matching closing brace, if there are
+ # nested blocks with trailing semicolons, we will get the error
+ # messages in reversed order.
+ error(filename, endlinenum, 'readability/braces', 4,
+ "You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
- """Look for empty loop/conditional body with only a single semicolon.
+ """Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
@@ -4199,102 +4267,115 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
error: The function to call with any errors found.
"""
- # Search for loop keywords at the beginning of the line. Because only
- # whitespaces are allowed before the keywords, this will also ignore most
- # do-while-loops, since those lines should start with closing brace.
- #
- # We also check "if" blocks here, since an empty conditional block
- # is likely an error.
- line = clean_lines.elided[linenum]
- matched = Match(r'\s*(for|while|if)\s*\(', line)
- if matched:
- # Find the end of the conditional expression.
- (end_line, end_linenum, end_pos) = CloseExpression(
- clean_lines, linenum, line.find('('))
+ # Search for loop keywords at the beginning of the line. Because only
+ # whitespaces are allowed before the keywords, this will also ignore most
+ # do-while-loops, since those lines should start with closing brace.
+ #
+ # We also check "if" blocks here, since an empty conditional block
+ # is likely an error.
+ line = clean_lines.elided[linenum]
+ matched = Match(r'\s*(for|while|if)\s*\(', line)
+ if matched:
+ # Find the end of the conditional expression.
+ (end_line, end_linenum,
+ end_pos) = CloseExpression(clean_lines, linenum, line.find('('))
- # Output warning if what follows the condition expression is a semicolon.
- # No warning for all other cases, including whitespace or newline, since we
- # have a separate check for semicolons preceded by whitespace.
- if end_pos >= 0 and Match(r';', end_line[end_pos:]):
- if matched.group(1) == 'if':
- error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
- 'Empty conditional bodies should use {}')
- else:
- error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
- 'Empty loop bodies should use {} or continue')
+ # Output warning if what follows the condition expression is a
+ # semicolon. No warning for all other cases, including whitespace or
+ # newline, since we have a separate check for semicolons preceded by
+ # whitespace.
+ if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ if matched.group(1) == 'if':
+ error(filename, end_linenum,
+ 'whitespace/empty_conditional_body', 5,
+ 'Empty conditional bodies should use {}')
+ else:
+ error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
+ 'Empty loop bodies should use {} or continue')
- # Check for if statements that have completely empty bodies (no comments)
- # and no else clauses.
- if end_pos >= 0 and matched.group(1) == 'if':
- # Find the position of the opening { for the if statement.
- # Return without logging an error if it has no brackets.
- opening_linenum = end_linenum
- opening_line_fragment = end_line[end_pos:]
- # Loop until EOF or find anything that's not whitespace or opening {.
- while not Search(r'^\s*\{', opening_line_fragment):
- if Search(r'^(?!\s*$)', opening_line_fragment):
- # Conditional has no brackets.
- return
- opening_linenum += 1
- if opening_linenum == len(clean_lines.elided):
- # Couldn't find conditional's opening { or any code before EOF.
- return
- opening_line_fragment = clean_lines.elided[opening_linenum]
- # Set opening_line (opening_line_fragment may not be entire opening line).
- opening_line = clean_lines.elided[opening_linenum]
+ # Check for if statements that have completely empty bodies (no
+ # comments) and no else clauses.
+ if end_pos >= 0 and matched.group(1) == 'if':
+ # Find the position of the opening { for the if statement.
+ # Return without logging an error if it has no brackets.
+ opening_linenum = end_linenum
+ opening_line_fragment = end_line[end_pos:]
+ # Loop until EOF or find anything that's not whitespace or opening
+ # {.
+ while not Search(r'^\s*\{', opening_line_fragment):
+ if Search(r'^(?!\s*$)', opening_line_fragment):
+ # Conditional has no brackets.
+ return
+ opening_linenum += 1
+ if opening_linenum == len(clean_lines.elided):
+ # Couldn't find conditional's opening { or any code before
+ # EOF.
+ return
+ opening_line_fragment = clean_lines.elided[opening_linenum]
+ # Set opening_line (opening_line_fragment may not be entire opening
+ # line).
+ opening_line = clean_lines.elided[opening_linenum]
- # Find the position of the closing }.
- opening_pos = opening_line_fragment.find('{')
- if opening_linenum == end_linenum:
- # We need to make opening_pos relative to the start of the entire line.
- opening_pos += end_pos
- (closing_line, closing_linenum, closing_pos) = CloseExpression(
- clean_lines, opening_linenum, opening_pos)
- if closing_pos < 0:
- return
+ # Find the position of the closing }.
+ opening_pos = opening_line_fragment.find('{')
+ if opening_linenum == end_linenum:
+ # We need to make opening_pos relative to the start of the
+ # entire line.
+ opening_pos += end_pos
+ (closing_line, closing_linenum,
+ closing_pos) = CloseExpression(clean_lines, opening_linenum,
+ opening_pos)
+ if closing_pos < 0:
+ return
- # Now construct the body of the conditional. This consists of the portion
- # of the opening line after the {, all lines until the closing line,
- # and the portion of the closing line before the }.
- if (clean_lines.raw_lines[opening_linenum] !=
- CleanseComments(clean_lines.raw_lines[opening_linenum])):
- # Opening line ends with a comment, so conditional isn't empty.
- return
- if closing_linenum > opening_linenum:
- # Opening line after the {. Ignore comments here since we checked above.
- body = list(opening_line[opening_pos+1:])
- # All lines until closing line, excluding closing line, with comments.
- body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
- # Closing line before the }. Won't (and can't) have comments.
- body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
- body = '\n'.join(body)
- else:
- # If statement has brackets and fits on a single line.
- body = opening_line[opening_pos+1:closing_pos-1]
+ # Now construct the body of the conditional. This consists of the
+ # portion of the opening line after the {, all lines until the
+ # closing line, and the portion of the closing line before the }.
+ if (clean_lines.raw_lines[opening_linenum] != CleanseComments(
+ clean_lines.raw_lines[opening_linenum])):
+ # Opening line ends with a comment, so conditional isn't empty.
+ return
+ if closing_linenum > opening_linenum:
+ # Opening line after the {. Ignore comments here since we
+ # checked above.
+ body = list(opening_line[opening_pos + 1:])
+ # All lines until closing line, excluding closing line, with
+ # comments.
+ body.extend(clean_lines.raw_lines[opening_linenum +
+ 1:closing_linenum])
+ # Closing line before the }. Won't (and can't) have comments.
+ body.append(clean_lines.elided[closing_linenum][:closing_pos -
+ 1])
+ body = '\n'.join(body)
+ else:
+ # If statement has brackets and fits on a single line.
+ body = opening_line[opening_pos + 1:closing_pos - 1]
- # Check if the body is empty
- if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
- return
- # The body is empty. Now make sure there's not an else clause.
- current_linenum = closing_linenum
- current_line_fragment = closing_line[closing_pos:]
- # Loop until EOF or find anything that's not whitespace or else clause.
- while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
- if Search(r'^(?=\s*else)', current_line_fragment):
- # Found an else clause, so don't log an error.
- return
- current_linenum += 1
- if current_linenum == len(clean_lines.elided):
- break
- current_line_fragment = clean_lines.elided[current_linenum]
+ # Check if the body is empty
+ if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
+ return
+ # The body is empty. Now make sure there's not an else clause.
+ current_linenum = closing_linenum
+ current_line_fragment = closing_line[closing_pos:]
+ # Loop until EOF or find anything that's not whitespace or else
+ # clause.
+ while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
+ if Search(r'^(?=\s*else)', current_line_fragment):
+ # Found an else clause, so don't log an error.
+ return
+ current_linenum += 1
+ if current_linenum == len(clean_lines.elided):
+ break
+ current_line_fragment = clean_lines.elided[current_linenum]
- # The body is empty and there's no else clause until EOF or other code.
- error(filename, end_linenum, 'whitespace/empty_if_body', 4,
- ('If statement had no body and no else clause'))
+ # The body is empty and there's no else clause until EOF or other
+ # code.
+ error(filename, end_linenum, 'whitespace/empty_if_body', 4,
+ ('If statement had no body and no else clause'))
def FindCheckMacro(line):
- """Find a replaceable CHECK-like macro.
+ """Find a replaceable CHECK-like macro.
Args:
line: line to search on.
@@ -4302,22 +4383,22 @@ def FindCheckMacro(line):
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
- for macro in _CHECK_MACROS:
- i = line.find(macro)
- if i >= 0:
- # Find opening parenthesis. Do a regular expression match here
- # to make sure that we are matching the expected CHECK macro, as
- # opposed to some other macro that happens to contain the CHECK
- # substring.
- matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
- if not matched:
- continue
- return (macro, len(matched.group(1)))
- return (None, -1)
+ for macro in _CHECK_MACROS:
+ i = line.find(macro)
+ if i >= 0:
+ # Find opening parenthesis. Do a regular expression match here
+ # to make sure that we are matching the expected CHECK macro, as
+ # opposed to some other macro that happens to contain the CHECK
+ # substring.
+ matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
+ if not matched:
+ continue
+ return (macro, len(matched.group(1)))
+ return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
- """Checks the use of CHECK and EXPECT macros.
+ """Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
@@ -4326,116 +4407,117 @@ def CheckCheck(filename, clean_lines, linenum, error):
error: The function to call with any errors found.
"""
- # Decide the set of replacement macros that should be suggested
- lines = clean_lines.elided
- (check_macro, start_pos) = FindCheckMacro(lines[linenum])
- if not check_macro:
- return
-
- # Find end of the boolean expression by matching parentheses
- (last_line, end_line, end_pos) = CloseExpression(
- clean_lines, linenum, start_pos)
- if end_pos < 0:
- return
-
- # If the check macro is followed by something other than a
- # semicolon, assume users will log their own custom error messages
- # and don't suggest any replacements.
- if not Match(r'\s*;', last_line[end_pos:]):
- return
-
- if linenum == end_line:
- expression = lines[linenum][start_pos + 1:end_pos - 1]
- else:
- expression = lines[linenum][start_pos + 1:]
- for i in range(linenum + 1, end_line):
- expression += lines[i]
- expression += last_line[0:end_pos - 1]
-
- # Parse expression so that we can take parentheses into account.
- # This avoids false positives for inputs like "CHECK((a < 4) == b)",
- # which is not replaceable by CHECK_LE.
- lhs = ''
- rhs = ''
- operator = None
- while expression:
- matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
- r'==|!=|>=|>|<=|<|\()(.*)$', expression)
- if matched:
- token = matched.group(1)
- if token == '(':
- # Parenthesized operand
- expression = matched.group(2)
- (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
- if end < 0:
- return # Unmatched parenthesis
- lhs += '(' + expression[0:end]
- expression = expression[end:]
- elif token in ('&&', '||'):
- # Logical and/or operators. This means the expression
- # contains more than one term, for example:
- # CHECK(42 < a && a < b);
- #
- # These are not replaceable with CHECK_LE, so bail out early.
+ # Decide the set of replacement macros that should be suggested
+ lines = clean_lines.elided
+ (check_macro, start_pos) = FindCheckMacro(lines[linenum])
+ if not check_macro:
return
- elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
- # Non-relational operator
- lhs += token
- expression = matched.group(2)
- else:
- # Relational operator
- operator = token
- rhs = matched.group(2)
- break
+
+ # Find end of the boolean expression by matching parentheses
+ (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum,
+ start_pos)
+ if end_pos < 0:
+ return
+
+ # If the check macro is followed by something other than a
+ # semicolon, assume users will log their own custom error messages
+ # and don't suggest any replacements.
+ if not Match(r'\s*;', last_line[end_pos:]):
+ return
+
+ if linenum == end_line:
+ expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
- # Unparenthesized operand. Instead of appending to lhs one character
- # at a time, we do another regular expression match to consume several
- # characters at once if possible. Trivial benchmark shows that this
- # is more efficient when the operands are longer than a single
- # character, which is generally the case.
- matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
- if not matched:
- matched = Match(r'^(\s*\S)(.*)$', expression)
- if not matched:
- break
- lhs += matched.group(1)
- expression = matched.group(2)
+ expression = lines[linenum][start_pos + 1:]
+ for i in range(linenum + 1, end_line):
+ expression += lines[i]
+ expression += last_line[0:end_pos - 1]
- # Only apply checks if we got all parts of the boolean expression
- if not (lhs and operator and rhs):
- return
+ # Parse expression so that we can take parentheses into account.
+ # This avoids false positives for inputs like "CHECK((a < 4) == b)",
+ # which is not replaceable by CHECK_LE.
+ lhs = ''
+ rhs = ''
+ operator = None
+ while expression:
+ matched = Match(
+ r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
+ r'==|!=|>=|>|<=|<|\()(.*)$', expression)
+ if matched:
+ token = matched.group(1)
+ if token == '(':
+ # Parenthesized operand
+ expression = matched.group(2)
+ (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
+ if end < 0:
+ return # Unmatched parenthesis
+ lhs += '(' + expression[0:end]
+ expression = expression[end:]
+ elif token in ('&&', '||'):
+ # Logical and/or operators. This means the expression
+ # contains more than one term, for example:
+ # CHECK(42 < a && a < b);
+ #
+ # These are not replaceable with CHECK_LE, so bail out early.
+ return
+ elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
+ # Non-relational operator
+ lhs += token
+ expression = matched.group(2)
+ else:
+ # Relational operator
+ operator = token
+ rhs = matched.group(2)
+ break
+ else:
+ # Unparenthesized operand. Instead of appending to lhs one
+ # character at a time, we do another regular expression match to
+ # consume several characters at once if possible. Trivial benchmark
+ # shows that this is more efficient when the operands are longer
+ # than a single character, which is generally the case.
+ matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
+ if not matched:
+ matched = Match(r'^(\s*\S)(.*)$', expression)
+ if not matched:
+ break
+ lhs += matched.group(1)
+ expression = matched.group(2)
- # Check that rhs do not contain logical operators. We already know
- # that lhs is fine since the loop above parses out && and ||.
- if rhs.find('&&') > -1 or rhs.find('||') > -1:
- return
+ # Only apply checks if we got all parts of the boolean expression
+ if not (lhs and operator and rhs):
+ return
- # At least one of the operands must be a constant literal. This is
- # to avoid suggesting replacements for unprintable things like
- # CHECK(variable != iterator)
- #
- # The following pattern matches decimal, hex integers, strings, and
- # characters (in that order).
- lhs = lhs.strip()
- rhs = rhs.strip()
- match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
- if Match(match_constant, lhs) or Match(match_constant, rhs):
- # Note: since we know both lhs and rhs, we can provide a more
- # descriptive error message like:
- # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
- # Instead of:
- # Consider using CHECK_EQ instead of CHECK(a == b)
+ # Check that rhs do not contain logical operators. We already know
+ # that lhs is fine since the loop above parses out && and ||.
+ if rhs.find('&&') > -1 or rhs.find('||') > -1:
+ return
+
+ # At least one of the operands must be a constant literal. This is
+ # to avoid suggesting replacements for unprintable things like
+ # CHECK(variable != iterator)
#
- # We are still keeping the less descriptive message because if lhs
- # or rhs gets long, the error message might become unreadable.
- error(filename, linenum, 'readability/check', 2,
- 'Consider using %s instead of %s(a %s b)' % (
- _CHECK_REPLACEMENT[check_macro][operator],
- check_macro, operator))
+ # The following pattern matches decimal, hex integers, strings, and
+ # characters (in that order).
+ lhs = lhs.strip()
+ rhs = rhs.strip()
+ match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
+ if Match(match_constant, lhs) or Match(match_constant, rhs):
+ # Note: since we know both lhs and rhs, we can provide a more
+ # descriptive error message like:
+ # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
+ # Instead of:
+ # Consider using CHECK_EQ instead of CHECK(a == b)
+ #
+ # We are still keeping the less descriptive message because if lhs
+ # or rhs gets long, the error message might become unreadable.
+ error(
+ filename, linenum, 'readability/check', 2,
+ 'Consider using %s instead of %s(a %s b)' %
+ (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
- """Check alternative keywords being used in boolean expressions.
+ """Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
@@ -4443,32 +4525,33 @@ def CheckAltTokens(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Avoid preprocessor lines
- if Match(r'^\s*#', line):
- return
+ # Avoid preprocessor lines
+ if Match(r'^\s*#', line):
+ return
- # Last ditch effort to avoid multi-line comments. This will not help
- # if the comment started before the current line or ended after the
- # current line, but it catches most of the false positives. At least,
- # it provides a way to workaround this warning for people who use
- # multi-line comments in preprocessor macros.
- #
- # TODO(unknown): remove this once cpplint has better support for
- # multi-line comments.
- if line.find('/*') >= 0 or line.find('*/') >= 0:
- return
+ # Last ditch effort to avoid multi-line comments. This will not help
+ # if the comment started before the current line or ended after the
+ # current line, but it catches most of the false positives. At least,
+ # it provides a way to workaround this warning for people who use
+ # multi-line comments in preprocessor macros.
+ #
+ # TODO(unknown): remove this once cpplint has better support for
+ # multi-line comments.
+ if line.find('/*') >= 0 or line.find('*/') >= 0:
+ return
- for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
- error(filename, linenum, 'readability/alt_tokens', 2,
- 'Use operator %s instead of %s' % (
- _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ error(
+ filename, linenum, 'readability/alt_tokens', 2,
+ 'Use operator %s instead of %s' %
+ (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
- """Checks rules from the 'C++ style rules' section of cppguide.html.
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
@@ -4484,104 +4567,105 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error: The function to call with any errors found.
"""
- # Don't use "elided" lines here, otherwise we can't check commented lines.
- # Don't want to use "raw" either, because we don't want to check inside C++11
- # raw strings,
- raw_lines = clean_lines.lines_without_raw_strings
- line = raw_lines[linenum]
- prev = raw_lines[linenum - 1] if linenum > 0 else ''
+ # Don't use "elided" lines here, otherwise we can't check commented lines.
+ # Don't want to use "raw" either, because we don't want to check inside
+ # C++11 raw strings,
+ raw_lines = clean_lines.lines_without_raw_strings
+ line = raw_lines[linenum]
+ prev = raw_lines[linenum - 1] if linenum > 0 else ''
- if line.find('\t') != -1:
- error(filename, linenum, 'whitespace/tab', 1,
- 'Tab found; better to use spaces')
+ if line.find('\t') != -1:
+ error(filename, linenum, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
- # One or three blank spaces at the beginning of the line is weird; it's
- # hard to reconcile that with 2-space indents.
- # NOTE: here are the conditions rob pike used for his tests. Mine aren't
- # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
- # if(RLENGTH > 20) complain = 0;
- # if(match($0, " +(error|private|public|protected):")) complain = 0;
- # if(match(prev, "&& *$")) complain = 0;
- # if(match(prev, "\\|\\| *$")) complain = 0;
- # if(match(prev, "[\",=><] *$")) complain = 0;
- # if(match($0, " <<")) complain = 0;
- # if(match(prev, " +for \\(")) complain = 0;
- # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
- scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
- classinfo = nesting_state.InnermostClass()
- initial_spaces = 0
- cleansed_line = clean_lines.elided[linenum]
- while initial_spaces < len(line) and line[initial_spaces] == ' ':
- initial_spaces += 1
- # There are certain situations we allow one space, notably for
- # section labels, and also lines containing multi-line raw strings.
- # We also don't check for lines that look like continuation lines
- # (of lines ending in double quotes, commas, equals, or angle brackets)
- # because the rules for how to indent those are non-trivial.
- if (not Search(r'[",=><] *$', prev) and
- (initial_spaces == 1 or initial_spaces == 3) and
- not Match(scope_or_label_pattern, cleansed_line) and
- not (clean_lines.raw_lines[linenum] != line and
- Match(r'^\s*""', line))):
- error(filename, linenum, 'whitespace/indent', 3,
- 'Weird number of spaces at line-start. '
- 'Are you using a 2-space indent?')
+ # One or three blank spaces at the beginning of the line is weird; it's
+ # hard to reconcile that with 2-space indents.
+ # NOTE: here are the conditions rob pike used for his tests. Mine aren't
+ # as sophisticated, but it may be worth becoming so:
+ # RLENGTH==initial_spaces if(RLENGTH > 20) complain = 0; if(match($0, "
+ # +(error|private|public|protected):")) complain = 0; if(match(prev, "&&
+ # *$")) complain = 0; if(match(prev, "\\|\\| *$")) complain = 0;
+ # if(match(prev, "[\",=><] *$")) complain = 0; if(match($0, " <<")) complain
+ # = 0; if(match(prev, " +for \\(")) complain = 0; if(prevodd &&
+ # match(prevprev, " +for \\(")) complain = 0;
+ scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
+ classinfo = nesting_state.InnermostClass()
+ initial_spaces = 0
+ cleansed_line = clean_lines.elided[linenum]
+ while initial_spaces < len(line) and line[initial_spaces] == ' ':
+ initial_spaces += 1
+ # There are certain situations we allow one space, notably for
+ # section labels, and also lines containing multi-line raw strings.
+ # We also don't check for lines that look like continuation lines
+ # (of lines ending in double quotes, commas, equals, or angle brackets)
+ # because the rules for how to indent those are non-trivial.
+ if (not Search(r'[",=><] *$', prev)
+ and (initial_spaces == 1 or initial_spaces == 3)
+ and not Match(scope_or_label_pattern, cleansed_line)
+ and not (clean_lines.raw_lines[linenum] != line
+ and Match(r'^\s*""', line))):
+ error(
+ filename, linenum, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 2-space indent?')
- if line and line[-1].isspace():
- error(filename, linenum, 'whitespace/end_of_line', 4,
- 'Line ends in whitespace. Consider deleting these extra spaces.')
+ if line and line[-1].isspace():
+ error(
+ filename, linenum, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
- # Check if the line is a header guard.
- is_header_guard = False
- if file_extension == 'h':
- cppvar = GetHeaderGuardCPPVariable(filename)
- if (line.startswith('#ifndef %s' % cppvar) or
- line.startswith('#define %s' % cppvar) or
- line.startswith('#endif // %s' % cppvar)):
- is_header_guard = True
- # #include lines and header guards can be long, since there's no clean way to
- # split them.
- #
- # URLs can be long too. It's possible to split these, but it makes them
- # harder to cut&paste.
- #
- # The "$Id:...$" comment may also get very long without it being the
- # developers fault.
- if (not line.startswith('#include') and not is_header_guard and
- not Match(r'^\s*//.*http(s?)://\S*$', line) and
- not Match(r'^\s*//\s*[^\s]*$', line) and
- not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
- if len(line) > _line_length:
- error(filename, linenum, 'whitespace/line_length', 2,
- 'Lines should be <= %i characters long' % _line_length)
+ # Check if the line is a header guard.
+ is_header_guard = False
+ if file_extension == 'h':
+ cppvar = GetHeaderGuardCPPVariable(filename)
+ if (line.startswith('#ifndef %s' % cppvar)
+ or line.startswith('#define %s' % cppvar)
+ or line.startswith('#endif // %s' % cppvar)):
+ is_header_guard = True
+ # #include lines and header guards can be long, since there's no clean way
+ # to split them.
+ #
+ # URLs can be long too. It's possible to split these, but it makes them
+ # harder to cut&paste.
+ #
+ # The "$Id:...$" comment may also get very long without it being the
+ # developers fault.
+ if (not line.startswith('#include') and not is_header_guard
+ and not Match(r'^\s*//.*http(s?)://\S*$', line)
+ and not Match(r'^\s*//\s*[^\s]*$', line)
+ and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ if len(line) > _line_length:
+ error(filename, linenum, 'whitespace/line_length', 2,
+ 'Lines should be <= %i characters long' % _line_length)
- if (cleansed_line.count(';') > 1 and
- # for loops are allowed two ;'s (and may run over two lines).
- cleansed_line.find('for') == -1 and
- (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
- GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
- # It's ok to have many commands in a switch case that fits in 1 line
- not ((cleansed_line.find('case ') != -1 or
- cleansed_line.find('default:') != -1) and
- cleansed_line.find('break;') != -1)):
- error(filename, linenum, 'whitespace/newline', 0,
- 'More than one command on the same line')
+ if (cleansed_line.count(';') > 1 and
+ # for loops are allowed two ;'s (and may run over two lines).
+ cleansed_line.find('for') == -1 and
+ (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1
+ or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1)
+ and
+ # It's ok to have many commands in a switch case that fits in 1 line
+ not ((cleansed_line.find('case ') != -1
+ or cleansed_line.find('default:') != -1)
+ and cleansed_line.find('break;') != -1)):
+ error(filename, linenum, 'whitespace/newline', 0,
+ 'More than one command on the same line')
- # Some more style checks
- CheckBraces(filename, clean_lines, linenum, error)
- CheckTrailingSemicolon(filename, clean_lines, linenum, error)
- CheckEmptyBlockBody(filename, clean_lines, linenum, error)
- CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
- CheckOperatorSpacing(filename, clean_lines, linenum, error)
- CheckParenthesisSpacing(filename, clean_lines, linenum, error)
- CheckCommaSpacing(filename, clean_lines, linenum, error)
- CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
- CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
- CheckCheck(filename, clean_lines, linenum, error)
- CheckAltTokens(filename, clean_lines, linenum, error)
- classinfo = nesting_state.InnermostClass()
- if classinfo:
- CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
+ # Some more style checks
+ CheckBraces(filename, clean_lines, linenum, error)
+ CheckTrailingSemicolon(filename, clean_lines, linenum, error)
+ CheckEmptyBlockBody(filename, clean_lines, linenum, error)
+ CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
+ CheckOperatorSpacing(filename, clean_lines, linenum, error)
+ CheckParenthesisSpacing(filename, clean_lines, linenum, error)
+ CheckCommaSpacing(filename, clean_lines, linenum, error)
+ CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
+ CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
+ CheckCheck(filename, clean_lines, linenum, error)
+ CheckAltTokens(filename, clean_lines, linenum, error)
+ classinfo = nesting_state.InnermostClass()
+ if classinfo:
+ CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
@@ -4594,7 +4678,7 @@ _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
- """Drops common suffixes like _test.cc or -inl.h from filename.
+ """Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
@@ -4612,16 +4696,16 @@ def _DropCommonSuffixes(filename):
Returns:
The filename with the common suffix removed.
"""
- for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
- 'inl.h', 'impl.h', 'internal.h'):
- if (filename.endswith(suffix) and len(filename) > len(suffix) and
- filename[-len(suffix) - 1] in ('-', '_')):
- return filename[:-len(suffix) - 1]
- return os.path.splitext(filename)[0]
+ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h',
+ 'internal.h'):
+ if (filename.endswith(suffix) and len(filename) > len(suffix)
+ and filename[-len(suffix) - 1] in ('-', '_')):
+ return filename[:-len(suffix) - 1]
+ return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, is_system):
- """Figures out what kind of header 'include' is.
+ """Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
@@ -4644,44 +4728,43 @@ def _ClassifyInclude(fileinfo, include, is_system):
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
- # This is a list of all standard c++ header files, except
- # those already checked for above.
- is_cpp_h = include in _CPP_HEADERS
+ # This is a list of all standard c++ header files, except
+ # those already checked for above.
+ is_cpp_h = include in _CPP_HEADERS
- if is_system:
- if is_cpp_h:
- return _CPP_SYS_HEADER
- else:
- return _C_SYS_HEADER
+ if is_system:
+ if is_cpp_h:
+ return _CPP_SYS_HEADER
+ else:
+ return _C_SYS_HEADER
- # If the target file and the include we're checking share a
- # basename when we drop common extensions, and the include
- # lives in . , then it's likely to be owned by the target file.
- target_dir, target_base = (
- os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
- include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
- if target_base == include_base and (
- include_dir == target_dir or
- include_dir == os.path.normpath(target_dir + '/../public')):
- return _LIKELY_MY_HEADER
+ # If the target file and the include we're checking share a
+ # basename when we drop common extensions, and the include
+ # lives in . , then it's likely to be owned by the target file.
+ target_dir, target_base = (os.path.split(
+ _DropCommonSuffixes(fileinfo.RepositoryName())))
+ include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+ if target_base == include_base and (
+ include_dir == target_dir
+ or include_dir == os.path.normpath(target_dir + '/../public')):
+ return _LIKELY_MY_HEADER
- # If the target and include share some initial basename
- # component, it's possible the target is implementing the
- # include, so it's allowed to be first, but we'll never
- # complain if it's not there.
- target_first_component = _RE_FIRST_COMPONENT.match(target_base)
- include_first_component = _RE_FIRST_COMPONENT.match(include_base)
- if (target_first_component and include_first_component and
- target_first_component.group(0) ==
- include_first_component.group(0)):
- return _POSSIBLE_MY_HEADER
-
- return _OTHER_HEADER
+ # If the target and include share some initial basename
+ # component, it's possible the target is implementing the
+ # include, so it's allowed to be first, but we'll never
+ # complain if it's not there.
+ target_first_component = _RE_FIRST_COMPONENT.match(target_base)
+ include_first_component = _RE_FIRST_COMPONENT.match(include_base)
+ if (target_first_component and include_first_component
+ and target_first_component.group(0)
+ == include_first_component.group(0)):
+ return _POSSIBLE_MY_HEADER
+ return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
- """Check rules that are applicable to #include lines.
+ """Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
@@ -4694,68 +4777,70 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
- fileinfo = FileInfo(filename)
- line = clean_lines.lines[linenum]
+ fileinfo = FileInfo(filename)
+ line = clean_lines.lines[linenum]
- # "include" should use the new style "foo/bar.h" instead of just "bar.h"
- # Only do this check if the included header follows google naming
- # conventions. If not, assume that it's a 3rd party API that
- # requires special include conventions.
- #
- # We also make an exception for Lua headers, which follow google
- # naming convention but not the include convention.
- match = Match(r'#include\s*"([^/]+\.h)"', line)
- if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
- error(filename, linenum, 'build/include_directory', 4,
- 'Include the directory when naming .h files')
+ # "include" should use the new style "foo/bar.h" instead of just "bar.h"
+ # Only do this check if the included header follows google naming
+ # conventions. If not, assume that it's a 3rd party API that
+ # requires special include conventions.
+ #
+ # We also make an exception for Lua headers, which follow google
+ # naming convention but not the include convention.
+ match = Match(r'#include\s*"([^/]+\.h)"', line)
+ if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
+ error(filename, linenum, 'build/include_directory', 4,
+ 'Include the directory when naming .h files')
- # we shouldn't include a file more than once. actually, there are a
- # handful of instances where doing so is okay, but in general it's
- # not.
- match = _RE_PATTERN_INCLUDE.search(line)
- if match:
- include = match.group(2)
- is_system = (match.group(1) == '<')
- duplicate_line = include_state.FindHeader(include)
- if duplicate_line >= 0:
- error(filename, linenum, 'build/include', 4,
- '"%s" already included at %s:%s' %
- (include, filename, duplicate_line))
- elif (include.endswith('.cc') and
- os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
- error(filename, linenum, 'build/include', 4,
- 'Do not include .cc files from other packages')
- elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
- include_state.include_list[-1].append((include, linenum))
-
- # We want to ensure that headers appear in the right order:
- # 1) for foo.cc, foo.h (preferred location)
- # 2) c system files
- # 3) cpp system files
- # 4) for foo.cc, foo.h (deprecated location)
- # 5) other google headers
- #
- # We classify each include statement as one of those 5 types
- # using a number of techniques. The include_state object keeps
- # track of the highest type seen, and complains if we see a
- # lower type after that.
- error_message = include_state.CheckNextIncludeOrder(
- _ClassifyInclude(fileinfo, include, is_system))
- if error_message:
- error(filename, linenum, 'build/include_order', 4,
- '%s. Should be: %s.h, c system, c++ system, other.' %
- (error_message, fileinfo.BaseName()))
- canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
- if not include_state.IsInAlphabeticalOrder(
- clean_lines, linenum, canonical_include):
- error(filename, linenum, 'build/include_alpha', 4,
- 'Include "%s" not in alphabetical order' % include)
- include_state.SetLastHeader(canonical_include)
+ # we shouldn't include a file more than once. actually, there are a
+ # handful of instances where doing so is okay, but in general it's
+ # not.
+ match = _RE_PATTERN_INCLUDE.search(line)
+ if match:
+ include = match.group(2)
+ is_system = (match.group(1) == '<')
+ duplicate_line = include_state.FindHeader(include)
+ if duplicate_line >= 0:
+ error(
+ filename, linenum, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, duplicate_line))
+ elif (include.endswith('.cc') and os.path.dirname(
+ fileinfo.RepositoryName()) != os.path.dirname(include)):
+ error(filename, linenum, 'build/include', 4,
+ 'Do not include .cc files from other packages')
+ elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
+ include_state.include_list[-1].append((include, linenum))
+ # We want to ensure that headers appear in the right order:
+ # 1) for foo.cc, foo.h (preferred location)
+ # 2) c system files
+ # 3) cpp system files
+ # 4) for foo.cc, foo.h (deprecated location)
+ # 5) other google headers
+ #
+ # We classify each include statement as one of those 5 types
+ # using a number of techniques. The include_state object keeps
+ # track of the highest type seen, and complains if we see a
+ # lower type after that.
+ error_message = include_state.CheckNextIncludeOrder(
+ _ClassifyInclude(fileinfo, include, is_system))
+ if error_message:
+ error(
+ filename, linenum, 'build/include_order', 4,
+ '%s. Should be: %s.h, c system, c++ system, other.' %
+ (error_message, fileinfo.BaseName()))
+ canonical_include = include_state.CanonicalizeAlphabeticalOrder(
+ include)
+ if not include_state.IsInAlphabeticalOrder(clean_lines, linenum,
+ canonical_include):
+ error(filename, linenum, 'build/include_alpha', 4,
+ 'Include "%s" not in alphabetical order' % include)
+ include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
- r"""Retrieves all the text between matching open and close parentheses.
+ r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
@@ -4774,40 +4859,40 @@ def _GetTextInside(text, start_pattern):
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
- # TODO(unknown): Audit cpplint.py to see what places could be profitably
- # rewritten to use _GetTextInside (and use inferior regexp matching today).
+ # TODO(unknown): Audit cpplint.py to see what places could be profitably
+ # rewritten to use _GetTextInside (and use inferior regexp matching today).
- # Give opening punctuations to get the matching close-punctuations.
- matching_punctuation = {'(': ')', '{': '}', '[': ']'}
- closing_punctuation = set(matching_punctuation.values())
+ # Give opening punctuations to get the matching close-punctuations.
+ matching_punctuation = {'(': ')', '{': '}', '[': ']'}
+ closing_punctuation = set(matching_punctuation.values())
- # Find the position to start extracting text.
- match = re.search(start_pattern, text, re.M)
- if not match: # start_pattern not found in text.
- return None
- start_position = match.end(0)
+ # Find the position to start extracting text.
+ match = re.search(start_pattern, text, re.M)
+ if not match: # start_pattern not found in text.
+ return None
+ start_position = match.end(0)
- assert start_position > 0, (
- 'start_pattern must ends with an opening punctuation.')
- assert text[start_position - 1] in matching_punctuation, (
- 'start_pattern must ends with an opening punctuation.')
- # Stack of closing punctuations we expect to have in text after position.
- punctuation_stack = [matching_punctuation[text[start_position - 1]]]
- position = start_position
- while punctuation_stack and position < len(text):
- if text[position] == punctuation_stack[-1]:
- punctuation_stack.pop()
- elif text[position] in closing_punctuation:
- # A closing punctuation without matching opening punctuations.
- return None
- elif text[position] in matching_punctuation:
- punctuation_stack.append(matching_punctuation[text[position]])
- position += 1
- if punctuation_stack:
- # Opening punctuations left without matching close-punctuations.
- return None
- # punctuations match.
- return text[start_position:position - 1]
+ assert start_position > 0, (
+ 'start_pattern must ends with an opening punctuation.')
+ assert text[start_position - 1] in matching_punctuation, (
+ 'start_pattern must ends with an opening punctuation.')
+ # Stack of closing punctuations we expect to have in text after position.
+ punctuation_stack = [matching_punctuation[text[start_position - 1]]]
+ position = start_position
+ while punctuation_stack and position < len(text):
+ if text[position] == punctuation_stack[-1]:
+ punctuation_stack.pop()
+ elif text[position] in closing_punctuation:
+ # A closing punctuation without matching opening punctuations.
+ return None
+ elif text[position] in matching_punctuation:
+ punctuation_stack.append(matching_punctuation[text[position]])
+ position += 1
+ if punctuation_stack:
+ # Opening punctuations left without matching close-punctuations.
+ return None
+ # punctuations match.
+ return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
@@ -4826,22 +4911,23 @@ _RE_PATTERN_TYPE = (
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
-_RE_PATTERN_REF_PARAM = re.compile(
- r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
- r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
+_RE_PATTERN_REF_PARAM = re.compile(r'(' + _RE_PATTERN_TYPE +
+ r'(?:\s*(?:\bconst\b|[*]))*\s*'
+ r'&\s*' + _RE_PATTERN_IDENT +
+ r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
-_RE_PATTERN_CONST_REF_PARAM = (
- r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
- r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
+_RE_PATTERN_CONST_REF_PARAM = (r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
+ r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' +
+ _RE_PATTERN_IDENT + r')')
# Stream types.
-_RE_PATTERN_REF_STREAM_PARAM = (
- r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
+_RE_PATTERN_REF_STREAM_PARAM = (r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT +
+ r')')
-def CheckLanguage(filename, clean_lines, linenum, file_extension,
- include_state, nesting_state, error):
- """Checks rules from the 'C++ language rules' section of cppguide.html.
+def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
+ nesting_state, error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
@@ -4856,152 +4942,160 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
- # If the line is empty or consists of entirely a comment, no need to
- # check it.
- line = clean_lines.elided[linenum]
- if not line:
- return
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[linenum]
+ if not line:
+ return
- match = _RE_PATTERN_INCLUDE.search(line)
- if match:
- CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
- return
-
- # Reset include state across preprocessor directives. This is meant
- # to silence warnings for conditional includes.
- match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
- if match:
- include_state.ResetSection(match.group(1))
-
- # Make Windows paths like Unix.
- fullname = os.path.abspath(filename).replace('\\', '/')
-
- # Perform other checks now that we are sure that this is not an include line
- CheckCasts(filename, clean_lines, linenum, error)
- CheckGlobalStatic(filename, clean_lines, linenum, error)
- CheckPrintf(filename, clean_lines, linenum, error)
-
- if file_extension == 'h':
- # TODO(unknown): check that 1-arg constructors are explicit.
- # How to tell it's a constructor?
- # (handled in CheckForNonStandardConstructs for now)
- # TODO(unknown): check that classes declare or disable copy/assign
- # (level 1 error)
- pass
-
- # Check if people are using the verboten C basic types. The only exception
- # we regularly allow is "unsigned short port" for port.
- if Search(r'\bshort port\b', line):
- if not Search(r'\bunsigned short port\b', line):
- error(filename, linenum, 'runtime/int', 4,
- 'Use "unsigned short" for ports, not "short"')
- else:
- match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+ match = _RE_PATTERN_INCLUDE.search(line)
if match:
- error(filename, linenum, 'runtime/int', 4,
- 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
+ CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
+ return
- # Check if some verboten operator overloading is going on
- # TODO(unknown): catch out-of-line unary operator&:
- # class X {};
- # int operator&(const X& x) { return 42; } // unary operator&
- # The trick is it's hard to tell apart from binary operator&:
- # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
- if Search(r'\boperator\s*&\s*\(\s*\)', line):
- error(filename, linenum, 'runtime/operator', 4,
- 'Unary operator& is dangerous. Do not use it.')
+ # Reset include state across preprocessor directives. This is meant
+ # to silence warnings for conditional includes.
+ match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
+ if match:
+ include_state.ResetSection(match.group(1))
- # Check for suspicious usage of "if" like
- # } if (a == b) {
- if Search(r'\}\s*if\s*(?:constexpr\s*)?\(', line):
- error(filename, linenum, 'readability/braces', 4,
- 'Did you mean "else if"? If not, start a new line for "if".')
+ # Make Windows paths like Unix.
+ fullname = os.path.abspath(filename).replace('\\', '/')
- # Check for potential format string bugs like printf(foo).
- # We constrain the pattern not to pick things like DocidForPrintf(foo).
- # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
- # TODO(unknown): Catch the following case. Need to change the calling
- # convention of the whole function to process multiple line to handle it.
- # printf(
- # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
- printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
- if printf_args:
- match = Match(r'([\w.\->()]+)$', printf_args)
- if match and match.group(1) != '__VA_ARGS__':
- function_name = re.search(r'\b((?:string)?printf)\s*\(',
- line, re.I).group(1)
- error(filename, linenum, 'runtime/printf', 4,
- 'Potential format string bug. Do %s("%%s", %s) instead.'
- % (function_name, match.group(1)))
+ # Perform other checks now that we are sure that this is not an include line
+ CheckCasts(filename, clean_lines, linenum, error)
+ CheckGlobalStatic(filename, clean_lines, linenum, error)
+ CheckPrintf(filename, clean_lines, linenum, error)
- # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
- match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
- if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
- error(filename, linenum, 'runtime/memset', 4,
- 'Did you mean "memset(%s, 0, %s)"?'
- % (match.group(1), match.group(2)))
+ if file_extension == 'h':
+ # TODO(unknown): check that 1-arg constructors are explicit.
+ # How to tell it's a constructor?
+ # (handled in CheckForNonStandardConstructs for now)
+ # TODO(unknown): check that classes declare or disable copy/assign
+ # (level 1 error)
+ pass
- if Search(r'\busing namespace\b', line):
- error(filename, linenum, 'build/namespaces', 5,
- 'Do not use namespace using-directives. '
- 'Use using-declarations instead.')
+ # Check if people are using the verboten C basic types. The only exception
+ # we regularly allow is "unsigned short port" for port.
+ if Search(r'\bshort port\b', line):
+ if not Search(r'\bunsigned short port\b', line):
+ error(filename, linenum, 'runtime/int', 4,
+ 'Use "unsigned short" for ports, not "short"')
+ else:
+ match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+ if match:
+ error(
+ filename, linenum, 'runtime/int', 4,
+ 'Use int16/int64/etc, rather than the C type %s' %
+ match.group(1))
- # Detect variable-length arrays.
- match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
- if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
- match.group(3).find(']') == -1):
- # Split the size using space and arithmetic operators as delimiters.
- # If any of the resulting tokens are not compile time constants then
- # report the error.
- tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
- is_const = True
- skip_next = False
- for tok in tokens:
- if skip_next:
+ # Check if some verboten operator overloading is going on
+ # TODO(unknown): catch out-of-line unary operator&:
+ # class X {};
+ # int operator&(const X& x) { return 42; } // unary operator&
+ # The trick is it's hard to tell apart from binary operator&:
+ # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
+ if Search(r'\boperator\s*&\s*\(\s*\)', line):
+ error(filename, linenum, 'runtime/operator', 4,
+ 'Unary operator& is dangerous. Do not use it.')
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if Search(r'\}\s*if\s*(?:constexpr\s*)?\(', line):
+ error(filename, linenum, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ # TODO(unknown): Catch the following case. Need to change the calling
+ # convention of the whole function to process multiple line to handle it.
+ # printf(
+ # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
+ printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
+ if printf_args:
+ match = Match(r'([\w.\->()]+)$', printf_args)
+ if match and match.group(1) != '__VA_ARGS__':
+ function_name = re.search(r'\b((?:string)?printf)\s*\(', line,
+ re.I).group(1)
+ error(
+ filename, linenum, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.' %
+ (function_name, match.group(1)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+ error(
+ filename, linenum, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?' %
+ (match.group(1), match.group(2)))
+
+ if Search(r'\busing namespace\b', line):
+ error(
+ filename, linenum, 'build/namespaces', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
+
+ # Detect variable-length arrays.
+ match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (match and match.group(2) != 'return' and match.group(2) != 'delete'
+ and match.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
+ is_const = True
skip_next = False
- continue
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
- if Search(r'sizeof\(.+\)', tok): continue
- if Search(r'arraysize\(\w+\)', tok): continue
- if Search(r'base::size\(.+\)', tok): continue
- if Search(r'std::size\(.+\)', tok): continue
- if Search(r'std::extent<.+>', tok): continue
+ if Search(r'sizeof\(.+\)', tok): continue
+ if Search(r'arraysize\(\w+\)', tok): continue
+ if Search(r'base::size\(.+\)', tok): continue
+ if Search(r'std::size\(.+\)', tok): continue
+ if Search(r'std::extent<.+>', tok): continue
- tok = tok.lstrip('(')
- tok = tok.rstrip(')')
- if not tok: continue
- if Match(r'\d+', tok): continue
- if Match(r'0[xX][0-9a-fA-F]+', tok): continue
- if Match(r'k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
- # A catch all for tricky sizeof cases, including 'sizeof expression',
- # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
- # requires skipping the next token because we split on ' ' and '*'.
- if tok.startswith('sizeof'):
- skip_next = True
- continue
- is_const = False
- break
- if not is_const:
- error(filename, linenum, 'runtime/arrays', 1,
- 'Do not use variable-length arrays. Use an appropriately named '
- "('k' followed by CamelCase) compile-time constant for the size.")
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok: continue
+ if Match(r'\d+', tok): continue
+ if Match(r'0[xX][0-9a-fA-F]+', tok): continue
+ if Match(r'k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
+ if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
+ # A catch all for tricky sizeof cases, including 'sizeof
+ # expression', 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct
+ # StructName)' requires skipping the next token because we split on
+ # ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(
+ filename, linenum, 'runtime/arrays', 1,
+ 'Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size."
+ )
- # Check for use of unnamed namespaces in header files. Registration
- # macros are typically OK, so we allow use of "namespace {" on lines
- # that end with backslashes.
- if (file_extension == 'h'
- and Search(r'\bnamespace\s*{', line)
- and line[-1] != '\\'):
- error(filename, linenum, 'build/namespaces', 4,
- 'Do not use unnamed namespaces in header files. See '
- 'https://google.github.io/styleguide/cppguide.html#Namespaces'
- ' for more information.')
+ # Check for use of unnamed namespaces in header files. Registration
+ # macros are typically OK, so we allow use of "namespace {" on lines
+ # that end with backslashes.
+ if (file_extension == 'h' and Search(r'\bnamespace\s*{', line)
+ and line[-1] != '\\'):
+ error(
+ filename, linenum, 'build/namespaces', 4,
+ 'Do not use unnamed namespaces in header files. See '
+ 'https://google.github.io/styleguide/cppguide.html#Namespaces'
+ ' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
- """Check for unsafe global or static objects.
+ """Check for unsafe global or static objects.
Args:
filename: The name of the current file.
@@ -5009,60 +5103,60 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Match two lines at a time to support multiline declarations
- if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
- line += clean_lines.elided[linenum + 1].strip()
+ # Match two lines at a time to support multiline declarations
+ if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
+ line += clean_lines.elided[linenum + 1].strip()
- # Check for people declaring static/global STL strings at the top level.
- # This is dangerous because the C++ language does not guarantee that
- # globals with constructors are initialized before the first access, and
- # also because globals can be destroyed when some threads are still running.
- # TODO(unknown): Generalize this to also find static unique_ptr instances.
- # TODO(unknown): File bugs for clang-tidy to find these.
- match = Match(
- r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
- r'([a-zA-Z0-9_:]+)\b(.*)',
- line)
+ # Check for people declaring static/global STL strings at the top level.
+ # This is dangerous because the C++ language does not guarantee that
+ # globals with constructors are initialized before the first access, and
+ # also because globals can be destroyed when some threads are still running.
+ # TODO(unknown): Generalize this to also find static unique_ptr instances.
+ # TODO(unknown): File bugs for clang-tidy to find these.
+ match = Match(
+ r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
+ r'([a-zA-Z0-9_:]+)\b(.*)', line)
- # Remove false positives:
- # - String pointers (as opposed to values).
- # string *pointer
- # const string *pointer
- # string const *pointer
- # string *const pointer
- #
- # - Functions and template specializations.
- # string Function(...
- # string Class::Method(...
- #
- # - Operators. These are matched separately because operator names
- # cross non-word boundaries, and trying to match both operators
- # and functions at the same time would decrease accuracy of
- # matching identifiers.
- # string Class::operator*()
- if (match and
- not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
- not Search(r'\boperator\W', line) and
- not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
- if Search(r'\bconst\b', line):
- error(filename, linenum, 'runtime/string', 4,
- 'For a static/global string constant, use a C style string '
- 'instead: "%schar%s %s[]".' %
- (match.group(1), match.group(2) or '', match.group(3)))
- else:
- error(filename, linenum, 'runtime/string', 4,
- 'Static/global string variables are not permitted.')
+ # Remove false positives:
+ # - String pointers (as opposed to values).
+ # string *pointer
+ # const string *pointer
+ # string const *pointer
+ # string *const pointer
+ #
+ # - Functions and template specializations.
+ # string Function(...
+ # string Class::Method(...
+ #
+ # - Operators. These are matched separately because operator names
+ # cross non-word boundaries, and trying to match both operators
+ # and functions at the same time would decrease accuracy of
+ # matching identifiers.
+ # string Class::operator*()
+ if (match and
+ not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line)
+ and not Search(r'\boperator\W', line) and not Match(
+ r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
+ if Search(r'\bconst\b', line):
+ error(
+ filename, linenum, 'runtime/string', 4,
+ 'For a static/global string constant, use a C style string '
+ 'instead: "%schar%s %s[]".' %
+ (match.group(1), match.group(2) or '', match.group(3)))
+ else:
+ error(filename, linenum, 'runtime/string', 4,
+ 'Static/global string variables are not permitted.')
- if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
- Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
- error(filename, linenum, 'runtime/init', 4,
- 'You seem to be initializing a member variable with itself.')
+ if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line)
+ or Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
+ error(filename, linenum, 'runtime/init', 4,
+ 'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
- """Check for printf related issues.
+ """Check for printf related issues.
Args:
filename: The name of the current file.
@@ -5070,28 +5164,29 @@ def CheckPrintf(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # When snprintf is used, the second argument shouldn't be a literal.
- match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
- if match and match.group(2) != '0':
- # If 2nd arg is zero, snprintf is used to calculate size.
- error(filename, linenum, 'runtime/printf', 3,
- 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
- 'to snprintf.' % (match.group(1), match.group(2)))
+ # When snprintf is used, the second argument shouldn't be a literal.
+ match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if match and match.group(2) != '0':
+ # If 2nd arg is zero, snprintf is used to calculate size.
+ error(
+ filename, linenum, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (match.group(1), match.group(2)))
- # Check if some verboten C functions are being used.
- if Search(r'\bsprintf\s*\(', line):
- error(filename, linenum, 'runtime/printf', 5,
- 'Never use sprintf. Use snprintf instead.')
- match = Search(r'\b(strcpy|strcat)\s*\(', line)
- if match:
- error(filename, linenum, 'runtime/printf', 4,
- 'Almost always, snprintf is better than %s' % match.group(1))
+ # Check if some verboten C functions are being used.
+ if Search(r'\bsprintf\s*\(', line):
+ error(filename, linenum, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ match = Search(r'\b(strcpy|strcat)\s*\(', line)
+ if match:
+ error(filename, linenum, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
- """Check if current line contains an inherited function.
+ """Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -5100,20 +5195,20 @@ def IsDerivedFunction(clean_lines, linenum):
True if current line contains a function with "override"
virt-specifier.
"""
- # Scan back a few lines for start of current function
- for i in range(linenum, max(-1, linenum - 10), -1):
- match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
- if match:
- # Look for "override" after the matching closing parenthesis
- line, _, closing_paren = CloseExpression(
- clean_lines, i, len(match.group(1)))
- return (closing_paren >= 0 and
- Search(r'\boverride\b', line[closing_paren:]))
- return False
+ # Scan back a few lines for start of current function
+ for i in range(linenum, max(-1, linenum - 10), -1):
+ match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
+ if match:
+ # Look for "override" after the matching closing parenthesis
+ line, _, closing_paren = CloseExpression(clean_lines, i,
+ len(match.group(1)))
+ return (closing_paren >= 0
+ and Search(r'\boverride\b', line[closing_paren:]))
+ return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
- """Check if current line contains an out-of-line method definition.
+ """Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -5121,15 +5216,16 @@ def IsOutOfLineMethodDefinition(clean_lines, linenum):
Returns:
True if current line contains an out-of-line method definition.
"""
- # Scan back a few lines for start of current function
- for i in range(linenum, max(-1, linenum - 10), -1):
- if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
- return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
- return False
+ # Scan back a few lines for start of current function
+ for i in range(linenum, max(-1, linenum - 10), -1):
+ if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
+ return Match(r'^[^()]*\w+::\w+\(',
+ clean_lines.elided[i]) is not None
+ return False
def IsInitializerList(clean_lines, linenum):
- """Check if current line is inside constructor initializer list.
+ """Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -5138,41 +5234,42 @@ def IsInitializerList(clean_lines, linenum):
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
- for i in range(linenum, 1, -1):
- line = clean_lines.elided[i]
- if i == linenum:
- remove_function_body = Match(r'^(.*)\{\s*$', line)
- if remove_function_body:
- line = remove_function_body.group(1)
+ for i in range(linenum, 1, -1):
+ line = clean_lines.elided[i]
+ if i == linenum:
+ remove_function_body = Match(r'^(.*)\{\s*$', line)
+ if remove_function_body:
+ line = remove_function_body.group(1)
- if Search(r'\s:\s*\w+[({]', line):
- # A lone colon tend to indicate the start of a constructor
- # initializer list. It could also be a ternary operator, which
- # also tend to appear in constructor initializer lists as
- # opposed to parameter lists.
- return True
- if Search(r'\}\s*,\s*$', line):
- # A closing brace followed by a comma is probably the end of a
- # brace-initialized member in constructor initializer list.
- return True
- if Search(r'[{};]\s*$', line):
- # Found one of the following:
- # - A closing brace or semicolon, probably the end of the previous
- # function.
- # - An opening brace, probably the start of current class or namespace.
- #
- # Current line is probably not inside an initializer list since
- # we saw one of those things without seeing the starting colon.
- return False
+ if Search(r'\s:\s*\w+[({]', line):
+ # A lone colon tend to indicate the start of a constructor
+ # initializer list. It could also be a ternary operator, which
+ # also tend to appear in constructor initializer lists as
+ # opposed to parameter lists.
+ return True
+ if Search(r'\}\s*,\s*$', line):
+ # A closing brace followed by a comma is probably the end of a
+ # brace-initialized member in constructor initializer list.
+ return True
+ if Search(r'[{};]\s*$', line):
+ # Found one of the following:
+ # - A closing brace or semicolon, probably the end of the previous
+ # function.
+ # - An opening brace, probably the start of current class or
+ # namespace.
+ #
+ # Current line is probably not inside an initializer list since
+ # we saw one of those things without seeing the starting colon.
+ return False
- # Got to the beginning of the file without seeing the start of
- # constructor initializer list.
- return False
+ # Got to the beginning of the file without seeing the start of
+ # constructor initializer list.
+ return False
-def CheckForNonConstReference(filename, clean_lines, linenum,
- nesting_state, error):
- """Check for non-const references.
+def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state,
+ error):
+ """Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
@@ -5185,132 +5282,134 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
- # Do nothing if there is no '&' on current line.
- line = clean_lines.elided[linenum]
- if '&' not in line:
- return
-
- # If a function is inherited, current function doesn't have much of
- # a choice, so any non-const references should not be blamed on
- # derived function.
- if IsDerivedFunction(clean_lines, linenum):
- return
-
- # Don't warn on out-of-line method definitions, as we would warn on the
- # in-line declaration, if it isn't marked with 'override'.
- if IsOutOfLineMethodDefinition(clean_lines, linenum):
- return
-
- # Long type names may be broken across multiple lines, usually in one
- # of these forms:
- # LongType
- # ::LongTypeContinued &identifier
- # LongType::
- # LongTypeContinued &identifier
- # LongType<
- # ...>::LongTypeContinued &identifier
- #
- # If we detected a type split across two lines, join the previous
- # line to current line so that we can match const references
- # accordingly.
- #
- # Note that this only scans back one line, since scanning back
- # arbitrary number of lines would be expensive. If you have a type
- # that spans more than 2 lines, please use a typedef.
- if linenum > 1:
- previous = None
- if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
- # previous_line\n + ::current_line
- previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
- clean_lines.elided[linenum - 1])
- elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
- # previous_line::\n + current_line
- previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
- clean_lines.elided[linenum - 1])
- if previous:
- line = previous.group(1) + line.lstrip()
- else:
- # Check for templated parameter that is split across multiple lines
- endpos = line.rfind('>')
- if endpos > -1:
- (_, startline, startpos) = ReverseCloseExpression(
- clean_lines, linenum, endpos)
- if startpos > -1 and startline < linenum:
- # Found the matching < on an earlier line, collect all
- # pieces up to current line.
- line = ''
- for i in range(startline, linenum + 1):
- line += clean_lines.elided[i].strip()
-
- # Check for non-const references in function parameters. A single '&' may
- # found in the following places:
- # inside expression: binary & for bitwise AND
- # inside expression: unary & for taking the address of something
- # inside declarators: reference parameter
- # We will exclude the first two cases by checking that we are not inside a
- # function body, including one that was just introduced by a trailing '{'.
- # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
- if (nesting_state.previous_stack_top and
- not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
- isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
- # Not at toplevel, not within a class, and not within a namespace
- return
-
- # Avoid initializer lists. We only need to scan back from the
- # current line for something that starts with ':'.
- #
- # We don't need to check the current line, since the '&' would
- # appear inside the second set of parentheses on the current line as
- # opposed to the first set.
- if linenum > 0:
- for i in range(linenum - 1, max(0, linenum - 10), -1):
- previous_line = clean_lines.elided[i]
- if not Search(r'[),]\s*$', previous_line):
- break
- if Match(r'^\s*:\s+\S', previous_line):
+ # Do nothing if there is no '&' on current line.
+ line = clean_lines.elided[linenum]
+ if '&' not in line:
return
- # Avoid preprocessors
- if Search(r'\\\s*$', line):
- return
-
- # Avoid constructor initializer lists
- if IsInitializerList(clean_lines, linenum):
- return
-
- # We allow non-const references in a few standard places, like functions
- # called "swap()" or iostream operators like "<<" or ">>". Do not check
- # those function parameters.
- #
- # We also accept & in static_assert, which looks like a function but
- # it's actually a declaration expression.
- allowlisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
- r'operator\s*[<>][<>]|'
- r'static_assert|COMPILE_ASSERT'
- r')\s*\(')
- if Search(allowlisted_functions, line):
- return
- elif not Search(r'\S+\([^)]*$', line):
- # Don't see an allowlisted function on this line. Actually we
- # didn't see any function name on this line, so this is likely a
- # multi-line parameter list. Try a bit harder to catch this case.
- for i in range(2):
- if (linenum > i and
- Search(allowlisted_functions, clean_lines.elided[linenum - i - 1])):
+ # If a function is inherited, current function doesn't have much of
+ # a choice, so any non-const references should not be blamed on
+ # derived function.
+ if IsDerivedFunction(clean_lines, linenum):
return
- decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
- for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
- if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
- not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
- error(filename, linenum, 'runtime/references', 2,
- 'Is this a non-const reference? '
- 'If so, make const or use a pointer: ' +
- ReplaceAll(' *<', '<', parameter))
+ # Don't warn on out-of-line method definitions, as we would warn on the
+ # in-line declaration, if it isn't marked with 'override'.
+ if IsOutOfLineMethodDefinition(clean_lines, linenum):
+ return
+
+ # Long type names may be broken across multiple lines, usually in one
+ # of these forms:
+ # LongType
+ # ::LongTypeContinued &identifier
+ # LongType::
+ # LongTypeContinued &identifier
+ # LongType<
+ # ...>::LongTypeContinued &identifier
+ #
+ # If we detected a type split across two lines, join the previous
+ # line to current line so that we can match const references
+ # accordingly.
+ #
+ # Note that this only scans back one line, since scanning back
+ # arbitrary number of lines would be expensive. If you have a type
+ # that spans more than 2 lines, please use a typedef.
+ if linenum > 1:
+ previous = None
+ if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
+ # previous_line\n + ::current_line
+ previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
+ clean_lines.elided[linenum - 1])
+ elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
+ # previous_line::\n + current_line
+ previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
+ clean_lines.elided[linenum - 1])
+ if previous:
+ line = previous.group(1) + line.lstrip()
+ else:
+ # Check for templated parameter that is split across multiple lines
+ endpos = line.rfind('>')
+ if endpos > -1:
+ (_, startline,
+ startpos) = ReverseCloseExpression(clean_lines, linenum,
+ endpos)
+ if startpos > -1 and startline < linenum:
+ # Found the matching < on an earlier line, collect all
+ # pieces up to current line.
+ line = ''
+ for i in range(startline, linenum + 1):
+ line += clean_lines.elided[i].strip()
+
+ # Check for non-const references in function parameters. A single '&' may
+ # found in the following places:
+ # inside expression: binary & for bitwise AND
+ # inside expression: unary & for taking the address of something
+ # inside declarators: reference parameter
+ # We will exclude the first two cases by checking that we are not inside a
+ # function body, including one that was just introduced by a trailing '{'.
+ # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
+ if (nesting_state.previous_stack_top and
+ not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
+ isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
+ # Not at toplevel, not within a class, and not within a namespace
+ return
+
+ # Avoid initializer lists. We only need to scan back from the
+ # current line for something that starts with ':'.
+ #
+ # We don't need to check the current line, since the '&' would
+ # appear inside the second set of parentheses on the current line as
+ # opposed to the first set.
+ if linenum > 0:
+ for i in range(linenum - 1, max(0, linenum - 10), -1):
+ previous_line = clean_lines.elided[i]
+ if not Search(r'[),]\s*$', previous_line):
+ break
+ if Match(r'^\s*:\s+\S', previous_line):
+ return
+
+ # Avoid preprocessors
+ if Search(r'\\\s*$', line):
+ return
+
+ # Avoid constructor initializer lists
+ if IsInitializerList(clean_lines, linenum):
+ return
+
+ # We allow non-const references in a few standard places, like functions
+ # called "swap()" or iostream operators like "<<" or ">>". Do not check
+ # those function parameters.
+ #
+ # We also accept & in static_assert, which looks like a function but
+ # it's actually a declaration expression.
+ allowlisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
+ r'operator\s*[<>][<>]|'
+ r'static_assert|COMPILE_ASSERT'
+ r')\s*\(')
+ if Search(allowlisted_functions, line):
+ return
+ elif not Search(r'\S+\([^)]*$', line):
+ # Don't see an allowlisted function on this line. Actually we
+ # didn't see any function name on this line, so this is likely a
+ # multi-line parameter list. Try a bit harder to catch this case.
+ for i in range(2):
+ if (linenum > i and Search(allowlisted_functions,
+ clean_lines.elided[linenum - i - 1])):
+ return
+
+ decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
+ for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
+ if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter)
+ and not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
+ error(
+ filename, linenum, 'runtime/references', 2,
+ 'Is this a non-const reference? '
+ 'If so, make const or use a pointer: ' +
+ ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
- """Various cast related checks.
+ """Various cast related checks.
Args:
filename: The name of the current file.
@@ -5318,118 +5417,120 @@ def CheckCasts(filename, clean_lines, linenum, error):
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
- line = clean_lines.elided[linenum]
+ line = clean_lines.elided[linenum]
- # Check to see if they're using an conversion function cast.
- # I just try to capture the most common basic types, though there are more.
- # Parameterless conversion functions, such as bool(), are allowed as they are
- # probably a member operator declaration or default constructor.
- match = Search(
- r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
- r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
- r'(\([^)].*)', line)
- expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
- if match and not expecting_function:
- matched_type = match.group(2)
+ # Check to see if they're using an conversion function cast.
+ # I just try to capture the most common basic types, though there are more.
+ # Parameterless conversion functions, such as bool(), are allowed as they
+ # are probably a member operator declaration or default constructor.
+ match = Search(
+ r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
+ r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
+ r'(\([^)].*)', line)
+ expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
+ if match and not expecting_function:
+ matched_type = match.group(2)
- # matched_new_or_template is used to silence two false positives:
- # - New operators
- # - Template arguments with function types
+ # matched_new_or_template is used to silence two false positives:
+ # - New operators
+ # - Template arguments with function types
+ #
+ # For template arguments, we match on types immediately following
+ # an opening bracket without any spaces. This is a fast way to
+ # silence the common case where the function type is the first
+ # template argument. False negative with less-than comparison is
+ # avoided because those operators are usually followed by a space.
+ #
+ # function // bracket + no space = false positive
+ # value < double(42) // bracket + space = true positive
+ matched_new_or_template = match.group(1)
+
+ # Avoid arrays by looking for brackets that come after the closing
+ # parenthesis.
+ if Match(r'\([^()]+\)\s*\[', match.group(3)):
+ return
+
+ # Other things to ignore:
+ # - Function pointers
+ # - Casts to pointer types
+ # - Placement new
+ # - Alias declarations
+ matched_funcptr = match.group(3)
+ if (matched_new_or_template is None and not (
+ matched_funcptr and
+ (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr)
+ or matched_funcptr.startswith('(*)')))
+ and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line)
+ and not Search(r'new\(\S+\)\s*' + matched_type, line)):
+ error(
+ filename, linenum, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' % matched_type)
+
+ if not expecting_function:
+ CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)',
+ error)
+
+ # This doesn't catch all cases. Consider (const char * const)"hello".
#
- # For template arguments, we match on types immediately following
- # an opening bracket without any spaces. This is a fast way to
- # silence the common case where the function type is the first
- # template argument. False negative with less-than comparison is
- # avoided because those operators are usually followed by a space.
- #
- # function // bracket + no space = false positive
- # value < double(42) // bracket + space = true positive
- matched_new_or_template = match.group(1)
-
- # Avoid arrays by looking for brackets that come after the closing
- # parenthesis.
- if Match(r'\([^()]+\)\s*\[', match.group(3)):
- return
-
- # Other things to ignore:
- # - Function pointers
- # - Casts to pointer types
- # - Placement new
- # - Alias declarations
- matched_funcptr = match.group(3)
- if (matched_new_or_template is None and
- not (matched_funcptr and
- (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
- matched_funcptr) or
- matched_funcptr.startswith('(*)'))) and
- not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
- not Search(r'new\(\S+\)\s*' + matched_type, line)):
- error(filename, linenum, 'readability/casting', 4,
- 'Using deprecated casting style. '
- 'Use static_cast<%s>(...) instead' %
- matched_type)
-
- if not expecting_function:
- CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
- r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
-
- # This doesn't catch all cases. Consider (const char * const)"hello".
- #
- # (char *) "foo" should always be a const_cast (reinterpret_cast won't
- # compile).
- if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
- r'\((char\s?\*+\s?)\)\s*"', error):
- pass
- else:
- # Check pointer casts for other than string constants
- CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
- r'\((\w+\s?\*+\s?)\)', error)
-
- # In addition, we look for people taking the address of a cast. This
- # is dangerous -- casts can assign to temporaries, so the pointer doesn't
- # point where you think.
- #
- # Some non-identifier character is required before the '&' for the
- # expression to be recognized as a cast. These are casts:
- # expression = &static_cast(temporary());
- # function(&(int*)(temporary()));
- #
- # This is not a cast:
- # reference_type&(int* function_param);
- match = Search(
- r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
- r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
- if match:
- # Try a better error message when the & is bound to something
- # dereferenced by the casted pointer, as opposed to the casted
- # pointer itself.
- parenthesis_error = False
- match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
- if match:
- _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
- if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
- _, y2, x2 = CloseExpression(clean_lines, y1, x1)
- if x2 >= 0:
- extended_line = clean_lines.elided[y2][x2:]
- if y2 < clean_lines.NumLines() - 1:
- extended_line += clean_lines.elided[y2 + 1]
- if Match(r'\s*(?:->|\[)', extended_line):
- parenthesis_error = True
-
- if parenthesis_error:
- error(filename, linenum, 'readability/casting', 4,
- ('Are you taking an address of something dereferenced '
- 'from a cast? Wrapping the dereferenced expression in '
- 'parentheses will make the binding more obvious'))
+ # (char *) "foo" should always be a const_cast (reinterpret_cast won't
+ # compile).
+ if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
+ r'\((char\s?\*+\s?)\)\s*"', error):
+ pass
else:
- error(filename, linenum, 'runtime/casting', 4,
- ('Are you taking an address of a cast? '
- 'This is dangerous: could be a temp var. '
- 'Take the address before doing the cast, rather than after'))
+ # Check pointer casts for other than string constants
+ CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
+ r'\((\w+\s?\*+\s?)\)', error)
+
+ # In addition, we look for people taking the address of a cast. This
+ # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+ # point where you think.
+ #
+ # Some non-identifier character is required before the '&' for the
+ # expression to be recognized as a cast. These are casts:
+ # expression = &static_cast(temporary());
+ # function(&(int*)(temporary()));
+ #
+ # This is not a cast:
+ # reference_type&(int* function_param);
+ match = Search(
+ r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
+ r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
+ if match:
+ # Try a better error message when the & is bound to something
+ # dereferenced by the casted pointer, as opposed to the casted
+ # pointer itself.
+ parenthesis_error = False
+ match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<',
+ line)
+ if match:
+ _, y1, x1 = CloseExpression(clean_lines, linenum,
+ len(match.group(1)))
+ if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
+ _, y2, x2 = CloseExpression(clean_lines, y1, x1)
+ if x2 >= 0:
+ extended_line = clean_lines.elided[y2][x2:]
+ if y2 < clean_lines.NumLines() - 1:
+ extended_line += clean_lines.elided[y2 + 1]
+ if Match(r'\s*(?:->|\[)', extended_line):
+ parenthesis_error = True
+
+ if parenthesis_error:
+ error(filename, linenum, 'readability/casting', 4,
+ ('Are you taking an address of something dereferenced '
+ 'from a cast? Wrapping the dereferenced expression in '
+ 'parentheses will make the binding more obvious'))
+ else:
+ error(filename, linenum, 'runtime/casting', 4,
+ ('Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
- """Checks for a C-style cast by looking for the pattern.
+ """Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
@@ -5444,45 +5545,46 @@ def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
True if an error was emitted.
False otherwise.
"""
- line = clean_lines.elided[linenum]
- match = Search(pattern, line)
- if not match:
- return False
+ line = clean_lines.elided[linenum]
+ match = Search(pattern, line)
+ if not match:
+ return False
- # Exclude lines with keywords that tend to look like casts
- context = line[0:match.start(1) - 1]
- if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
- return False
+ # Exclude lines with keywords that tend to look like casts
+ context = line[0:match.start(1) - 1]
+ if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
+ return False
- # Try expanding current context to see if we one level of
- # parentheses inside a macro.
- if linenum > 0:
- for i in range(linenum - 1, max(0, linenum - 5), -1):
- context = clean_lines.elided[i] + context
- if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
- return False
+ # Try expanding current context to see if we one level of
+ # parentheses inside a macro.
+ if linenum > 0:
+ for i in range(linenum - 1, max(0, linenum - 5), -1):
+ context = clean_lines.elided[i] + context
+ if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
+ return False
- # operator++(int) and operator--(int)
- if context.endswith(' operator++') or context.endswith(' operator--'):
- return False
+ # operator++(int) and operator--(int)
+ if context.endswith(' operator++') or context.endswith(' operator--'):
+ return False
- # A single unnamed argument for a function tends to look like old style cast.
- # If we see those, don't issue warnings for deprecated casts.
- remainder = line[match.end(0):]
- if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
- remainder):
- return False
+ # A single unnamed argument for a function tends to look like old style
+ # cast. If we see those, don't issue warnings for deprecated casts.
+ remainder = line[match.end(0):]
+ if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
+ remainder):
+ return False
- # At this point, all that should be left is actual casts.
- error(filename, linenum, 'readability/casting', 4,
+ # At this point, all that should be left is actual casts.
+ error(
+ filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
- return True
+ return True
def ExpectingFunctionArgs(clean_lines, linenum):
- """Checks whether where function type arguments are expected.
+ """Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
@@ -5492,88 +5594,131 @@ def ExpectingFunctionArgs(clean_lines, linenum):
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
- line = clean_lines.elided[linenum]
- return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line)
- or _TYPE_TRAITS_RE.search(line)
- or (linenum >= 2 and
- (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
- clean_lines.elided[linenum - 1])
- or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
- clean_lines.elided[linenum - 2])
- or Search(r'\b(::function|base::FunctionRef)\s*\<\s*$',
- clean_lines.elided[linenum - 1]))))
+ line = clean_lines.elided[linenum]
+ return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line)
+ or _TYPE_TRAITS_RE.search(line)
+ or (linenum >= 2 and
+ (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
+ clean_lines.elided[linenum - 1])
+ or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
+ clean_lines.elided[linenum - 2])
+ or Search(r'\b(::function|base::FunctionRef)\s*\<\s*$',
+ clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
- ('', ('deque',)),
- ('', ('unary_function', 'binary_function',
- 'plus', 'minus', 'multiplies', 'divides', 'modulus',
- 'negate',
- 'equal_to', 'not_equal_to', 'greater', 'less',
- 'greater_equal', 'less_equal',
- 'logical_and', 'logical_or', 'logical_not',
- 'unary_negate', 'not1', 'binary_negate', 'not2',
- 'bind1st', 'bind2nd',
- 'pointer_to_unary_function',
- 'pointer_to_binary_function',
- 'ptr_fun',
- 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
- 'mem_fun_ref_t',
- 'const_mem_fun_t', 'const_mem_fun1_t',
- 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
- 'mem_fun_ref',
- )),
- ('', ('numeric_limits',)),
- ('', ('list',)),
- ('
'
- c = ansi2html.Ansi2HTMLConverter(inline=True, scheme='dracula')
+ callout_counter = 1
+ if backend == 'xhtml11':
+ preamble = (
+ '
'
+ c = ansi2html.Ansi2HTMLConverter(inline=True, scheme='dracula')
- in_code = False
- body = c.convert(output, full=False)
- for i, line in enumerate(body.splitlines()):
- if line.startswith(comment_marker):
+ in_code = False
+ body = c.convert(output, full=False)
+ for i, line in enumerate(body.splitlines()):
+ if line.startswith(comment_marker):
+ if in_code:
+ w(postamble)
+ in_code = False
+ w(line[len(comment_marker):])
+ else:
+ if not in_code:
+ w(preamble)
+ in_code = True
+ ext = ''
+ for _ in range(callouts[i]):
+ if not ext:
+ ext += ''
+ ext += '
<%d>' % callout_counter
+ callout_counter += 1
+ if ext:
+ ext += '
'
+ w(line + ext + '\n')
if in_code:
- w(postamble)
- in_code = False
- w(line[len(comment_marker):])
- else:
- if not in_code:
- w(preamble)
- in_code = True
- ext = ''
- for _ in range(callouts[i]):
- if not ext:
- ext += ''
- ext += '
<%d>' % callout_counter
- callout_counter += 1
- if ext:
- ext += '
'
- w(line + ext + '\n')
- if in_code:
- w(postamble)
- else:
- preamble = ''
- postamble = ''
+ w(postamble)
+ else:
+ preamble = ''
+ postamble = ''
- in_code = False
- body = simpleXML(output)
- for i, line in enumerate(body.splitlines()):
- if line.startswith(comment_marker):
+ in_code = False
+ body = simpleXML(output)
+ for i, line in enumerate(body.splitlines()):
+ if line.startswith(comment_marker):
+ if in_code:
+ w(postamble)
+ in_code = False
+ w(line[len(comment_marker):])
+ else:
+ if not in_code:
+ w(preamble)
+ in_code = True
+ ext = ''
+ for _ in range(callouts[i]):
+ ext += ' (%d)' % callout_counter
+ callout_counter += 1
+ w(line + ext + '\n')
if in_code:
- w(postamble)
- in_code = False
- w(line[len(comment_marker):])
- else:
- if not in_code:
- w(preamble)
- in_code = True
- ext = ''
- for _ in range(callouts[i]):
- ext += ' (%d)' % callout_counter
- callout_counter += 1
- w(line + ext + '\n')
- if in_code:
- w(postamble)
+ w(postamble)
if __name__ == '__main__':
- main()
+ main()
diff --git a/metadata/.style.yapf b/metadata/.style.yapf
deleted file mode 100644
index 557fa7bf84..0000000000
--- a/metadata/.style.yapf
+++ /dev/null
@@ -1,2 +0,0 @@
-[style]
-based_on_style = pep8
diff --git a/metadata/fields/field_types.py b/metadata/fields/field_types.py
index b1667f6010..609bc0edc1 100644
--- a/metadata/fields/field_types.py
+++ b/metadata/fields/field_types.py
@@ -58,8 +58,7 @@ class MetadataField:
Raises: NotImplementedError if called. This method must be
overridden with the actual validation of the field.
"""
- raise NotImplementedError(
- f"{self._name} field validation not defined.")
+ raise NotImplementedError(f"{self._name} field validation not defined.")
class FreeformTextField(MetadataField):
diff --git a/metadata/tests/dependency_metadata_test.py b/metadata/tests/dependency_metadata_test.py
index 22a0a9564a..a0ec1a20ed 100644
--- a/metadata/tests/dependency_metadata_test.py
+++ b/metadata/tests/dependency_metadata_test.py
@@ -52,8 +52,7 @@ class DependencyValidationTest(unittest.TestCase):
dependency.add_entry(known_fields.LICENSE_FILE.get_name(), "LICENSE")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public Domain")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
- dependency.add_entry(known_fields.NAME.get_name(),
- "Test missing field")
+ dependency.add_entry(known_fields.NAME.get_name(), "Test missing field")
# Leave URL field unspecified.
results = dependency.validate(
@@ -70,8 +69,7 @@ class DependencyValidationTest(unittest.TestCase):
dependency = dm.DependencyMetadata()
dependency.add_entry(known_fields.URL.get_name(),
"https://www.example.com")
- dependency.add_entry(known_fields.NAME.get_name(),
- "Test invalid field")
+ dependency.add_entry(known_fields.NAME.get_name(), "Test invalid field")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.LICENSE_FILE.get_name(), "LICENSE")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public domain")
diff --git a/metadata/tests/validate_test.py b/metadata/tests/validate_test.py
index 42afad08f0..657f2f5ffe 100644
--- a/metadata/tests/validate_test.py
+++ b/metadata/tests/validate_test.py
@@ -21,8 +21,8 @@ import metadata.validate
_SOURCE_FILE_DIR = os.path.join(_THIS_DIR, "data")
_VALID_METADATA_FILEPATH = os.path.join(_THIS_DIR, "data",
"README.chromium.test.multi-valid")
-_INVALID_METADATA_FILEPATH = os.path.join(
- _THIS_DIR, "data", "README.chromium.test.multi-invalid")
+_INVALID_METADATA_FILEPATH = os.path.join(_THIS_DIR, "data",
+ "README.chromium.test.multi-invalid")
class ValidateContentTest(unittest.TestCase):
diff --git a/metadata/validate.py b/metadata/validate.py
index 9c21d16172..72a5b431a3 100644
--- a/metadata/validate.py
+++ b/metadata/validate.py
@@ -47,8 +47,7 @@ def validate_content(content: str, source_file_dir: str,
return results
-def _construct_file_read_error(filepath: str,
- cause: str) -> vr.ValidationError:
+def _construct_file_read_error(filepath: str, cause: str) -> vr.ValidationError:
"""Helper function to create a validation error for a
file reading issue.
"""
diff --git a/metadata/validation_result.py b/metadata/validation_result.py
index 6c171d3867..59ce5f5c77 100644
--- a/metadata/validation_result.py
+++ b/metadata/validation_result.py
@@ -7,9 +7,8 @@ import textwrap
from typing import Dict, List, Union
_CHROMIUM_METADATA_PRESCRIPT = "Third party metadata issue:"
-_CHROMIUM_METADATA_POSTSCRIPT = (
- "Check //third_party/README.chromium.template "
- "for details.")
+_CHROMIUM_METADATA_POSTSCRIPT = ("Check //third_party/README.chromium.template "
+ "for details.")
class ValidationResult:
diff --git a/metrics.py b/metrics.py
index 4641bf2d5f..3ccbfc9a11 100644
--- a/metrics.py
+++ b/metrics.py
@@ -20,7 +20,6 @@ import gclient_utils
import metrics_utils
import subprocess2
-
DEPOT_TOOLS = os.path.dirname(os.path.abspath(__file__))
CONFIG_FILE = os.path.join(DEPOT_TOOLS, 'metrics.cfg')
UPLOAD_SCRIPT = os.path.join(DEPOT_TOOLS, 'upload_metrics.py')
@@ -32,294 +31,297 @@ DEPOT_TOOLS_ENV = ['DOGFOOD_STACKED_CHANGES']
INVALID_CONFIG_WARNING = (
'WARNING: Your metrics.cfg file was invalid or nonexistent. A new one will '
- 'be created.'
-)
+ 'be created.')
PERMISSION_DENIED_WARNING = (
'Could not write the metrics collection config:\n\t%s\n'
- 'Metrics collection will be disabled.'
-)
+ 'Metrics collection will be disabled.')
class _Config(object):
- def __init__(self):
- self._initialized = False
- self._config = {}
+ def __init__(self):
+ self._initialized = False
+ self._config = {}
- def _ensure_initialized(self):
- if self._initialized:
- return
+ def _ensure_initialized(self):
+ if self._initialized:
+ return
- # Metrics collection is disabled, so don't collect any metrics.
- if not metrics_utils.COLLECT_METRICS:
- self._config = {
- 'is-googler': False,
- 'countdown': 0,
- 'opt-in': False,
- 'version': metrics_utils.CURRENT_VERSION,
- }
- self._initialized = True
- return
+ # Metrics collection is disabled, so don't collect any metrics.
+ if not metrics_utils.COLLECT_METRICS:
+ self._config = {
+ 'is-googler': False,
+ 'countdown': 0,
+ 'opt-in': False,
+ 'version': metrics_utils.CURRENT_VERSION,
+ }
+ self._initialized = True
+ return
- # We are running on a bot. Ignore config and collect metrics.
- if metrics_utils.REPORT_BUILD:
- self._config = {
- 'is-googler': True,
- 'countdown': 0,
- 'opt-in': True,
- 'version': metrics_utils.CURRENT_VERSION,
- }
- self._initialized = True
- return
+ # We are running on a bot. Ignore config and collect metrics.
+ if metrics_utils.REPORT_BUILD:
+ self._config = {
+ 'is-googler': True,
+ 'countdown': 0,
+ 'opt-in': True,
+ 'version': metrics_utils.CURRENT_VERSION,
+ }
+ self._initialized = True
+ return
- try:
- config = json.loads(gclient_utils.FileRead(CONFIG_FILE))
- except (IOError, ValueError):
- config = {}
+ try:
+ config = json.loads(gclient_utils.FileRead(CONFIG_FILE))
+ except (IOError, ValueError):
+ config = {}
- self._config = config.copy()
+ self._config = config.copy()
- if 'is-googler' not in self._config:
- # /should-upload is only accessible from Google IPs, so we only need to
- # check if we can reach the page. An external developer would get access
- # denied.
- try:
- req = urllib.request.urlopen(metrics_utils.APP_URL + '/should-upload')
- self._config['is-googler'] = req.getcode() == 200
- except (urllib.request.URLError, urllib.request.HTTPError):
- self._config['is-googler'] = False
+ if 'is-googler' not in self._config:
+ # /should-upload is only accessible from Google IPs, so we only need
+ # to check if we can reach the page. An external developer would get
+ # access denied.
+ try:
+ req = urllib.request.urlopen(metrics_utils.APP_URL +
+ '/should-upload')
+ self._config['is-googler'] = req.getcode() == 200
+ except (urllib.request.URLError, urllib.request.HTTPError):
+ self._config['is-googler'] = False
- # Make sure the config variables we need are present, and initialize them to
- # safe values otherwise.
- self._config.setdefault('countdown', DEFAULT_COUNTDOWN)
- self._config.setdefault('opt-in', None)
- self._config.setdefault('version', metrics_utils.CURRENT_VERSION)
+ # Make sure the config variables we need are present, and initialize
+ # them to safe values otherwise.
+ self._config.setdefault('countdown', DEFAULT_COUNTDOWN)
+ self._config.setdefault('opt-in', None)
+ self._config.setdefault('version', metrics_utils.CURRENT_VERSION)
- if config != self._config:
- print(INVALID_CONFIG_WARNING, file=sys.stderr)
- self._write_config()
+ if config != self._config:
+ print(INVALID_CONFIG_WARNING, file=sys.stderr)
+ self._write_config()
- self._initialized = True
+ self._initialized = True
- def _write_config(self):
- try:
- gclient_utils.FileWrite(CONFIG_FILE, json.dumps(self._config))
- except IOError as e:
- print(PERMISSION_DENIED_WARNING % e, file=sys.stderr)
- self._config['opt-in'] = False
+ def _write_config(self):
+ try:
+ gclient_utils.FileWrite(CONFIG_FILE, json.dumps(self._config))
+ except IOError as e:
+ print(PERMISSION_DENIED_WARNING % e, file=sys.stderr)
+ self._config['opt-in'] = False
- @property
- def version(self):
- self._ensure_initialized()
- return self._config['version']
+ @property
+ def version(self):
+ self._ensure_initialized()
+ return self._config['version']
- @property
- def is_googler(self):
- self._ensure_initialized()
- return self._config['is-googler']
+ @property
+ def is_googler(self):
+ self._ensure_initialized()
+ return self._config['is-googler']
- @property
- def opted_in(self):
- self._ensure_initialized()
- return self._config['opt-in']
+ @property
+ def opted_in(self):
+ self._ensure_initialized()
+ return self._config['opt-in']
- @opted_in.setter
- def opted_in(self, value):
- self._ensure_initialized()
- self._config['opt-in'] = value
- self._config['version'] = metrics_utils.CURRENT_VERSION
- self._write_config()
+ @opted_in.setter
+ def opted_in(self, value):
+ self._ensure_initialized()
+ self._config['opt-in'] = value
+ self._config['version'] = metrics_utils.CURRENT_VERSION
+ self._write_config()
- @property
- def countdown(self):
- self._ensure_initialized()
- return self._config['countdown']
+ @property
+ def countdown(self):
+ self._ensure_initialized()
+ return self._config['countdown']
- @property
- def should_collect_metrics(self):
- # Don't report metrics if user is not a Googler.
- if not self.is_googler:
- return False
- # Don't report metrics if user has opted out.
- if self.opted_in is False:
- return False
- # Don't report metrics if countdown hasn't reached 0.
- if self.opted_in is None and self.countdown > 0:
- return False
- return True
+ @property
+ def should_collect_metrics(self):
+ # Don't report metrics if user is not a Googler.
+ if not self.is_googler:
+ return False
+ # Don't report metrics if user has opted out.
+ if self.opted_in is False:
+ return False
+ # Don't report metrics if countdown hasn't reached 0.
+ if self.opted_in is None and self.countdown > 0:
+ return False
+ return True
- def decrease_countdown(self):
- self._ensure_initialized()
- if self.countdown == 0:
- return
- self._config['countdown'] -= 1
- if self.countdown == 0:
- self._config['version'] = metrics_utils.CURRENT_VERSION
- self._write_config()
+ def decrease_countdown(self):
+ self._ensure_initialized()
+ if self.countdown == 0:
+ return
+ self._config['countdown'] -= 1
+ if self.countdown == 0:
+ self._config['version'] = metrics_utils.CURRENT_VERSION
+ self._write_config()
- def reset_config(self):
- # Only reset countdown if we're already collecting metrics.
- if self.should_collect_metrics:
- self._ensure_initialized()
- self._config['countdown'] = DEFAULT_COUNTDOWN
- self._config['opt-in'] = None
+ def reset_config(self):
+ # Only reset countdown if we're already collecting metrics.
+ if self.should_collect_metrics:
+ self._ensure_initialized()
+ self._config['countdown'] = DEFAULT_COUNTDOWN
+ self._config['opt-in'] = None
class MetricsCollector(object):
- def __init__(self):
- self._metrics_lock = threading.Lock()
- self._reported_metrics = {}
- self._config = _Config()
- self._collecting_metrics = False
- self._collect_custom_metrics = True
+ def __init__(self):
+ self._metrics_lock = threading.Lock()
+ self._reported_metrics = {}
+ self._config = _Config()
+ self._collecting_metrics = False
+ self._collect_custom_metrics = True
- @property
- def config(self):
- return self._config
+ @property
+ def config(self):
+ return self._config
- @property
- def collecting_metrics(self):
- return self._collecting_metrics
+ @property
+ def collecting_metrics(self):
+ return self._collecting_metrics
- def add(self, name, value):
- if self._collect_custom_metrics:
- with self._metrics_lock:
- self._reported_metrics[name] = value
+ def add(self, name, value):
+ if self._collect_custom_metrics:
+ with self._metrics_lock:
+ self._reported_metrics[name] = value
- def add_repeated(self, name, value):
- if self._collect_custom_metrics:
- with self._metrics_lock:
- self._reported_metrics.setdefault(name, []).append(value)
+ def add_repeated(self, name, value):
+ if self._collect_custom_metrics:
+ with self._metrics_lock:
+ self._reported_metrics.setdefault(name, []).append(value)
- @contextlib.contextmanager
- def pause_metrics_collection(self):
- collect_custom_metrics = self._collect_custom_metrics
- self._collect_custom_metrics = False
- try:
- yield
- finally:
- self._collect_custom_metrics = collect_custom_metrics
+ @contextlib.contextmanager
+ def pause_metrics_collection(self):
+ collect_custom_metrics = self._collect_custom_metrics
+ self._collect_custom_metrics = False
+ try:
+ yield
+ finally:
+ self._collect_custom_metrics = collect_custom_metrics
- def _upload_metrics_data(self):
- """Upload the metrics data to the AppEngine app."""
- p = subprocess2.Popen(['vpython3', UPLOAD_SCRIPT], stdin=subprocess2.PIPE)
- # We invoke a subprocess, and use stdin.write instead of communicate(),
- # so that we are able to return immediately, leaving the upload running in
- # the background.
- p.stdin.write(json.dumps(self._reported_metrics).encode('utf-8'))
- # ... but if we're running on a bot, wait until upload has completed.
- if metrics_utils.REPORT_BUILD:
- p.communicate()
+ def _upload_metrics_data(self):
+ """Upload the metrics data to the AppEngine app."""
+ p = subprocess2.Popen(['vpython3', UPLOAD_SCRIPT],
+ stdin=subprocess2.PIPE)
+ # We invoke a subprocess, and use stdin.write instead of communicate(),
+ # so that we are able to return immediately, leaving the upload running
+ # in the background.
+ p.stdin.write(json.dumps(self._reported_metrics).encode('utf-8'))
+ # ... but if we're running on a bot, wait until upload has completed.
+ if metrics_utils.REPORT_BUILD:
+ p.communicate()
- def _collect_metrics(self, func, command_name, *args, **kwargs):
- # If we're already collecting metrics, just execute the function.
- # e.g. git-cl split invokes git-cl upload several times to upload each
- # split CL.
- if self.collecting_metrics:
- # Don't collect metrics for this function.
- # e.g. Don't record the arguments git-cl split passes to git-cl upload.
- with self.pause_metrics_collection():
- return func(*args, **kwargs)
+ def _collect_metrics(self, func, command_name, *args, **kwargs):
+ # If we're already collecting metrics, just execute the function.
+ # e.g. git-cl split invokes git-cl upload several times to upload each
+ # split CL.
+ if self.collecting_metrics:
+ # Don't collect metrics for this function.
+ # e.g. Don't record the arguments git-cl split passes to git-cl
+ # upload.
+ with self.pause_metrics_collection():
+ return func(*args, **kwargs)
- self._collecting_metrics = True
- self.add('metrics_version', metrics_utils.CURRENT_VERSION)
- self.add('command', command_name)
- for env in DEPOT_TOOLS_ENV:
- if env in os.environ:
- self.add_repeated('env_vars', {
- 'name': env,
- 'value': os.environ.get(env)
- })
+ self._collecting_metrics = True
+ self.add('metrics_version', metrics_utils.CURRENT_VERSION)
+ self.add('command', command_name)
+ for env in DEPOT_TOOLS_ENV:
+ if env in os.environ:
+ self.add_repeated('env_vars', {
+ 'name': env,
+ 'value': os.environ.get(env)
+ })
- try:
- start = time.time()
- result = func(*args, **kwargs)
- exception = None
- # pylint: disable=bare-except
- except:
- exception = sys.exc_info()
- finally:
- self.add('execution_time', time.time() - start)
+ try:
+ start = time.time()
+ result = func(*args, **kwargs)
+ exception = None
+ # pylint: disable=bare-except
+ except:
+ exception = sys.exc_info()
+ finally:
+ self.add('execution_time', time.time() - start)
- exit_code = metrics_utils.return_code_from_exception(exception)
- self.add('exit_code', exit_code)
+ exit_code = metrics_utils.return_code_from_exception(exception)
+ self.add('exit_code', exit_code)
- # Add metrics regarding environment information.
- self.add('timestamp', int(time.time()))
- self.add('python_version', metrics_utils.get_python_version())
- self.add('host_os', gclient_utils.GetOperatingSystem())
- self.add('host_arch', detect_host_arch.HostArch())
+ # Add metrics regarding environment information.
+ self.add('timestamp', int(time.time()))
+ self.add('python_version', metrics_utils.get_python_version())
+ self.add('host_os', gclient_utils.GetOperatingSystem())
+ self.add('host_arch', detect_host_arch.HostArch())
- depot_tools_age = metrics_utils.get_repo_timestamp(DEPOT_TOOLS)
- if depot_tools_age is not None:
- self.add('depot_tools_age', int(depot_tools_age))
+ depot_tools_age = metrics_utils.get_repo_timestamp(DEPOT_TOOLS)
+ if depot_tools_age is not None:
+ self.add('depot_tools_age', int(depot_tools_age))
- git_version = metrics_utils.get_git_version()
- if git_version:
- self.add('git_version', git_version)
+ git_version = metrics_utils.get_git_version()
+ if git_version:
+ self.add('git_version', git_version)
- bot_metrics = metrics_utils.get_bot_metrics()
- if bot_metrics:
- self.add('bot_metrics', bot_metrics)
+ bot_metrics = metrics_utils.get_bot_metrics()
+ if bot_metrics:
+ self.add('bot_metrics', bot_metrics)
- self._upload_metrics_data()
- if exception:
- gclient_utils.reraise(exception[0], exception[1], exception[2])
- return result
+ self._upload_metrics_data()
+ if exception:
+ gclient_utils.reraise(exception[0], exception[1], exception[2])
+ return result
- def collect_metrics(self, command_name):
- """A decorator used to collect metrics over the life of a function.
+ def collect_metrics(self, command_name):
+ """A decorator used to collect metrics over the life of a function.
This decorator executes the function and collects metrics about the system
environment and the function performance.
"""
- def _decorator(func):
- if not self.config.should_collect_metrics:
- return func
- # Needed to preserve the __name__ and __doc__ attributes of func.
- @functools.wraps(func)
- def _inner(*args, **kwargs):
- return self._collect_metrics(func, command_name, *args, **kwargs)
- return _inner
- return _decorator
+ def _decorator(func):
+ if not self.config.should_collect_metrics:
+ return func
+ # Needed to preserve the __name__ and __doc__ attributes of func.
+ @functools.wraps(func)
+ def _inner(*args, **kwargs):
+ return self._collect_metrics(func, command_name, *args,
+ **kwargs)
- @contextlib.contextmanager
- def print_notice_and_exit(self):
- """A context manager used to print the notice and terminate execution.
+ return _inner
+
+ return _decorator
+
+ @contextlib.contextmanager
+ def print_notice_and_exit(self):
+ """A context manager used to print the notice and terminate execution.
This decorator executes the function and prints the monitoring notice if
necessary. If an exception is raised, we will catch it, and print it before
printing the metrics collection notice.
This will call sys.exit() with an appropriate exit code to ensure the notice
is the last thing printed."""
- # Needed to preserve the __name__ and __doc__ attributes of func.
- try:
- yield
- exception = None
- # pylint: disable=bare-except
- except:
- exception = sys.exc_info()
+ # Needed to preserve the __name__ and __doc__ attributes of func.
+ try:
+ yield
+ exception = None
+ # pylint: disable=bare-except
+ except:
+ exception = sys.exc_info()
- # Print the exception before the metrics notice, so that the notice is
- # clearly visible even if gclient fails.
- if exception:
- if isinstance(exception[1], KeyboardInterrupt):
- sys.stderr.write('Interrupted\n')
- elif not isinstance(exception[1], SystemExit):
- traceback.print_exception(*exception)
+ # Print the exception before the metrics notice, so that the notice is
+ # clearly visible even if gclient fails.
+ if exception:
+ if isinstance(exception[1], KeyboardInterrupt):
+ sys.stderr.write('Interrupted\n')
+ elif not isinstance(exception[1], SystemExit):
+ traceback.print_exception(*exception)
- # Check if the version has changed
- if (self.config.is_googler
- and self.config.opted_in is not False
- and self.config.version != metrics_utils.CURRENT_VERSION):
- metrics_utils.print_version_change(self.config.version)
- self.config.reset_config()
+ # Check if the version has changed
+ if (self.config.is_googler and self.config.opted_in is not False
+ and self.config.version != metrics_utils.CURRENT_VERSION):
+ metrics_utils.print_version_change(self.config.version)
+ self.config.reset_config()
- # Print the notice
- if self.config.is_googler and self.config.opted_in is None:
- metrics_utils.print_notice(self.config.countdown)
- self.config.decrease_countdown()
+ # Print the notice
+ if self.config.is_googler and self.config.opted_in is None:
+ metrics_utils.print_notice(self.config.countdown)
+ self.config.decrease_countdown()
- sys.exit(metrics_utils.return_code_from_exception(exception))
+ sys.exit(metrics_utils.return_code_from_exception(exception))
collector = MetricsCollector()
diff --git a/metrics_utils.py b/metrics_utils.py
index 931cb38e03..0664e39698 100644
--- a/metrics_utils.py
+++ b/metrics_utils.py
@@ -12,7 +12,6 @@ import subprocess2
import sys
import urllib.parse
-
# Current version of metrics recording.
# When we add new metrics, the version number will be increased, we display the
# user what has changed, and ask the user to agree again.
@@ -21,223 +20,198 @@ CURRENT_VERSION = 2
APP_URL = 'https://cit-cli-metrics.appspot.com'
REPORT_BUILD = os.getenv('DEPOT_TOOLS_REPORT_BUILD')
-COLLECT_METRICS = (
- os.getenv('DEPOT_TOOLS_COLLECT_METRICS') != '0'
- and os.getenv('DEPOT_TOOLS_METRICS') != '0')
+COLLECT_METRICS = (os.getenv('DEPOT_TOOLS_COLLECT_METRICS') != '0'
+ and os.getenv('DEPOT_TOOLS_METRICS') != '0')
SYNC_STATUS_SUCCESS = 'SYNC_STATUS_SUCCESS'
SYNC_STATUS_FAILURE = 'SYNC_STATUS_FAILURE'
def get_notice_countdown_header(countdown):
- if countdown == 0:
- yield ' METRICS COLLECTION IS TAKING PLACE'
- else:
- yield ' METRICS COLLECTION WILL START IN %d EXECUTIONS' % countdown
+ if countdown == 0:
+ yield ' METRICS COLLECTION IS TAKING PLACE'
+ else:
+ yield ' METRICS COLLECTION WILL START IN %d EXECUTIONS' % countdown
+
def get_notice_version_change_header():
- yield ' WE ARE COLLECTING ADDITIONAL METRICS'
- yield ''
- yield ' Please review the changes and opt-in again.'
+ yield ' WE ARE COLLECTING ADDITIONAL METRICS'
+ yield ''
+ yield ' Please review the changes and opt-in again.'
+
def get_notice_footer():
- yield 'To suppress this message opt in or out using:'
- yield '$ gclient metrics [--opt-in] [--opt-out]'
- yield 'For more information please see metrics.README.md'
- yield 'in your depot_tools checkout or visit'
- yield 'https://bit.ly/3MpLAYM.'
+ yield 'To suppress this message opt in or out using:'
+ yield '$ gclient metrics [--opt-in] [--opt-out]'
+ yield 'For more information please see metrics.README.md'
+ yield 'in your depot_tools checkout or visit'
+ yield 'https://bit.ly/3MpLAYM.'
+
def get_change_notice(version):
- if version == 0:
- return [] # No changes for version 0
+ if version == 0:
+ return [] # No changes for version 0
- if version == 1:
- return [
- 'We want to collect the Git version.',
- 'We want to collect information about the HTTP',
- 'requests that depot_tools makes, and the git and',
- 'cipd commands it executes.',
- '',
- 'We only collect known strings to make sure we',
- 'don\'t record PII.',
- ]
+ if version == 1:
+ return [
+ 'We want to collect the Git version.',
+ 'We want to collect information about the HTTP',
+ 'requests that depot_tools makes, and the git and',
+ 'cipd commands it executes.',
+ '',
+ 'We only collect known strings to make sure we',
+ 'don\'t record PII.',
+ ]
- if version == 2:
- return [
- 'We will start collecting metrics from bots.',
- 'There are no changes for developers.',
- 'If the DEPOT_TOOLS_REPORT_BUILD environment variable is set,',
- 'we will report information about the current build',
- '(e.g. buildbucket project, bucket, builder and build id),',
- 'and authenticate to the metrics collection server.',
- 'This information will only be recorded for requests',
- 'authenticated as bot service accounts.',
- ]
+ if version == 2:
+ return [
+ 'We will start collecting metrics from bots.',
+ 'There are no changes for developers.',
+ 'If the DEPOT_TOOLS_REPORT_BUILD environment variable is set,',
+ 'we will report information about the current build',
+ '(e.g. buildbucket project, bucket, builder and build id),',
+ 'and authenticate to the metrics collection server.',
+ 'This information will only be recorded for requests',
+ 'authenticated as bot service accounts.',
+ ]
KNOWN_PROJECT_URLS = {
- 'https://chrome-internal.googlesource.com/chrome/ios_internal',
- 'https://chrome-internal.googlesource.com/infra/infra_internal',
- 'https://chromium.googlesource.com/breakpad/breakpad',
- 'https://chromium.googlesource.com/chromium/src',
- 'https://chromium.googlesource.com/chromium/tools/depot_tools',
- 'https://chromium.googlesource.com/crashpad/crashpad',
- 'https://chromium.googlesource.com/external/gyp',
- 'https://chromium.googlesource.com/external/naclports',
- 'https://chromium.googlesource.com/infra/goma/client',
- 'https://chromium.googlesource.com/infra/infra',
- 'https://chromium.googlesource.com/native_client/',
- 'https://chromium.googlesource.com/syzygy',
- 'https://chromium.googlesource.com/v8/v8',
- 'https://dart.googlesource.com/sdk',
- 'https://pdfium.googlesource.com/pdfium',
- 'https://skia.googlesource.com/buildbot',
- 'https://skia.googlesource.com/skia',
- 'https://webrtc.googlesource.com/src',
+ 'https://chrome-internal.googlesource.com/chrome/ios_internal',
+ 'https://chrome-internal.googlesource.com/infra/infra_internal',
+ 'https://chromium.googlesource.com/breakpad/breakpad',
+ 'https://chromium.googlesource.com/chromium/src',
+ 'https://chromium.googlesource.com/chromium/tools/depot_tools',
+ 'https://chromium.googlesource.com/crashpad/crashpad',
+ 'https://chromium.googlesource.com/external/gyp',
+ 'https://chromium.googlesource.com/external/naclports',
+ 'https://chromium.googlesource.com/infra/goma/client',
+ 'https://chromium.googlesource.com/infra/infra',
+ 'https://chromium.googlesource.com/native_client/',
+ 'https://chromium.googlesource.com/syzygy',
+ 'https://chromium.googlesource.com/v8/v8',
+ 'https://dart.googlesource.com/sdk',
+ 'https://pdfium.googlesource.com/pdfium',
+ 'https://skia.googlesource.com/buildbot',
+ 'https://skia.googlesource.com/skia',
+ 'https://webrtc.googlesource.com/src',
}
KNOWN_HTTP_HOSTS = {
- 'chrome-internal-review.googlesource.com',
- 'chromium-review.googlesource.com',
- 'dart-review.googlesource.com',
- 'eu1-mirror-chromium-review.googlesource.com',
- 'pdfium-review.googlesource.com',
- 'skia-review.googlesource.com',
- 'us1-mirror-chromium-review.googlesource.com',
- 'us2-mirror-chromium-review.googlesource.com',
- 'us3-mirror-chromium-review.googlesource.com',
- 'webrtc-review.googlesource.com',
+ 'chrome-internal-review.googlesource.com',
+ 'chromium-review.googlesource.com',
+ 'dart-review.googlesource.com',
+ 'eu1-mirror-chromium-review.googlesource.com',
+ 'pdfium-review.googlesource.com',
+ 'skia-review.googlesource.com',
+ 'us1-mirror-chromium-review.googlesource.com',
+ 'us2-mirror-chromium-review.googlesource.com',
+ 'us3-mirror-chromium-review.googlesource.com',
+ 'webrtc-review.googlesource.com',
}
KNOWN_HTTP_METHODS = {
- 'DELETE',
- 'GET',
- 'PATCH',
- 'POST',
- 'PUT',
+ 'DELETE',
+ 'GET',
+ 'PATCH',
+ 'POST',
+ 'PUT',
}
KNOWN_HTTP_PATHS = {
- 'accounts':
- re.compile(r'(/a)?/accounts/.*'),
- 'changes':
- re.compile(r'(/a)?/changes/([^/]+)?$'),
- 'changes/abandon':
- re.compile(r'(/a)?/changes/.*/abandon'),
- 'changes/comments':
- re.compile(r'(/a)?/changes/.*/comments'),
- 'changes/detail':
- re.compile(r'(/a)?/changes/.*/detail'),
- 'changes/edit':
- re.compile(r'(/a)?/changes/.*/edit'),
- 'changes/message':
- re.compile(r'(/a)?/changes/.*/message'),
- 'changes/restore':
- re.compile(r'(/a)?/changes/.*/restore'),
- 'changes/reviewers':
- re.compile(r'(/a)?/changes/.*/reviewers/.*'),
- 'changes/revisions/commit':
- re.compile(r'(/a)?/changes/.*/revisions/.*/commit'),
- 'changes/revisions/review':
- re.compile(r'(/a)?/changes/.*/revisions/.*/review'),
- 'changes/submit':
- re.compile(r'(/a)?/changes/.*/submit'),
- 'projects/branches':
- re.compile(r'(/a)?/projects/.*/branches/.*'),
+ 'accounts': re.compile(r'(/a)?/accounts/.*'),
+ 'changes': re.compile(r'(/a)?/changes/([^/]+)?$'),
+ 'changes/abandon': re.compile(r'(/a)?/changes/.*/abandon'),
+ 'changes/comments': re.compile(r'(/a)?/changes/.*/comments'),
+ 'changes/detail': re.compile(r'(/a)?/changes/.*/detail'),
+ 'changes/edit': re.compile(r'(/a)?/changes/.*/edit'),
+ 'changes/message': re.compile(r'(/a)?/changes/.*/message'),
+ 'changes/restore': re.compile(r'(/a)?/changes/.*/restore'),
+ 'changes/reviewers': re.compile(r'(/a)?/changes/.*/reviewers/.*'),
+ 'changes/revisions/commit':
+ re.compile(r'(/a)?/changes/.*/revisions/.*/commit'),
+ 'changes/revisions/review':
+ re.compile(r'(/a)?/changes/.*/revisions/.*/review'),
+ 'changes/submit': re.compile(r'(/a)?/changes/.*/submit'),
+ 'projects/branches': re.compile(r'(/a)?/projects/.*/branches/.*'),
}
KNOWN_HTTP_ARGS = {
- 'ALL_REVISIONS',
- 'CURRENT_COMMIT',
- 'CURRENT_REVISION',
- 'DETAILED_ACCOUNTS',
- 'LABELS',
+ 'ALL_REVISIONS',
+ 'CURRENT_COMMIT',
+ 'CURRENT_REVISION',
+ 'DETAILED_ACCOUNTS',
+ 'LABELS',
}
-GIT_VERSION_RE = re.compile(
- r'git version (\d)\.(\d{0,2})\.(\d{0,2})'
-)
+GIT_VERSION_RE = re.compile(r'git version (\d)\.(\d{0,2})\.(\d{0,2})')
KNOWN_SUBCOMMAND_ARGS = {
- 'cc',
- 'hashtag',
- 'l=Auto-Submit+1',
- 'l=Code-Review+1',
- 'l=Code-Review+2',
- 'l=Commit-Queue+1',
- 'l=Commit-Queue+2',
- 'label',
- 'm',
- 'notify=ALL',
- 'notify=NONE',
- 'private',
- 'r',
- 'ready',
- 'topic',
- 'wip'
+ 'cc', 'hashtag', 'l=Auto-Submit+1', 'l=Code-Review+1', 'l=Code-Review+2',
+ 'l=Commit-Queue+1', 'l=Commit-Queue+2', 'label', 'm', 'notify=ALL',
+ 'notify=NONE', 'private', 'r', 'ready', 'topic', 'wip'
}
def get_python_version():
- """Return the python version in the major.minor.micro format."""
- return '{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)
+ """Return the python version in the major.minor.micro format."""
+ return '{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)
def get_git_version():
- """Return the Git version in the major.minor.micro format."""
- p = subprocess2.Popen(
- ['git', '--version'],
- stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
- stdout, _ = p.communicate()
- match = GIT_VERSION_RE.match(stdout.decode('utf-8'))
- if not match:
- return None
- return '%s.%s.%s' % match.groups()
+ """Return the Git version in the major.minor.micro format."""
+ p = subprocess2.Popen(['git', '--version'],
+ stdout=subprocess2.PIPE,
+ stderr=subprocess2.PIPE)
+ stdout, _ = p.communicate()
+ match = GIT_VERSION_RE.match(stdout.decode('utf-8'))
+ if not match:
+ return None
+ return '%s.%s.%s' % match.groups()
def get_bot_metrics():
- try:
- project, bucket, builder, build = REPORT_BUILD.split('/')
- return {
- 'build_id': int(build),
- 'builder': {
- 'project': project,
- 'bucket': bucket,
- 'builder': builder,
- },
- }
- except (AttributeError, ValueError):
- return None
-
+ try:
+ project, bucket, builder, build = REPORT_BUILD.split('/')
+ return {
+ 'build_id': int(build),
+ 'builder': {
+ 'project': project,
+ 'bucket': bucket,
+ 'builder': builder,
+ },
+ }
+ except (AttributeError, ValueError):
+ return None
def return_code_from_exception(exception):
- """Returns the exit code that would result of raising the exception."""
- if exception is None:
- return 0
- e = exception[1]
- if isinstance(e, KeyboardInterrupt):
- return 130
- if isinstance(e, SystemExit):
- return e.code
- return 1
+ """Returns the exit code that would result of raising the exception."""
+ if exception is None:
+ return 0
+ e = exception[1]
+ if isinstance(e, KeyboardInterrupt):
+ return 130
+ if isinstance(e, SystemExit):
+ return e.code
+ return 1
def extract_known_subcommand_args(args):
- """Extract the known arguments from the passed list of args."""
- known_args = []
- for arg in args:
- if arg in KNOWN_SUBCOMMAND_ARGS:
- known_args.append(arg)
- else:
- arg = arg.split('=')[0]
- if arg in KNOWN_SUBCOMMAND_ARGS:
- known_args.append(arg)
- return sorted(known_args)
+ """Extract the known arguments from the passed list of args."""
+ known_args = []
+ for arg in args:
+ if arg in KNOWN_SUBCOMMAND_ARGS:
+ known_args.append(arg)
+ else:
+ arg = arg.split('=')[0]
+ if arg in KNOWN_SUBCOMMAND_ARGS:
+ known_args.append(arg)
+ return sorted(known_args)
def extract_http_metrics(request_uri, method, status, response_time):
- """Extract metrics from the request URI.
+ """Extract metrics from the request URI.
Extracts the host, path, and arguments from the request URI, and returns them
along with the method, status and response time.
@@ -253,81 +227,82 @@ def extract_http_metrics(request_uri, method, status, response_time):
The regex defined in KNOWN_HTTP_PATH_RES are checked against the path, and
those that match will be returned.
"""
- http_metrics = {
- 'status': status,
- 'response_time': response_time,
- }
+ http_metrics = {
+ 'status': status,
+ 'response_time': response_time,
+ }
- if method in KNOWN_HTTP_METHODS:
- http_metrics['method'] = method
+ if method in KNOWN_HTTP_METHODS:
+ http_metrics['method'] = method
- parsed_url = urllib.parse.urlparse(request_uri)
+ parsed_url = urllib.parse.urlparse(request_uri)
- if parsed_url.netloc in KNOWN_HTTP_HOSTS:
- http_metrics['host'] = parsed_url.netloc
+ if parsed_url.netloc in KNOWN_HTTP_HOSTS:
+ http_metrics['host'] = parsed_url.netloc
- for name, path_re in KNOWN_HTTP_PATHS.items():
- if path_re.match(parsed_url.path):
- http_metrics['path'] = name
- break
+ for name, path_re in KNOWN_HTTP_PATHS.items():
+ if path_re.match(parsed_url.path):
+ http_metrics['path'] = name
+ break
- parsed_query = urllib.parse.parse_qs(parsed_url.query)
+ parsed_query = urllib.parse.parse_qs(parsed_url.query)
- # Collect o-parameters from the request.
- args = [
- arg for arg in parsed_query.get('o', [])
- if arg in KNOWN_HTTP_ARGS
- ]
- if args:
- http_metrics['arguments'] = args
+ # Collect o-parameters from the request.
+ args = [arg for arg in parsed_query.get('o', []) if arg in KNOWN_HTTP_ARGS]
+ if args:
+ http_metrics['arguments'] = args
- return http_metrics
+ return http_metrics
def get_repo_timestamp(path_to_repo):
- """Get an approximate timestamp for the upstream of |path_to_repo|.
+ """Get an approximate timestamp for the upstream of |path_to_repo|.
Returns the top two bits of the timestamp of the HEAD for the upstream of the
branch path_to_repo is checked out at.
"""
- # Get the upstream for the current branch. If we're not in a branch, fallback
- # to HEAD.
- try:
- upstream = scm.GIT.GetUpstreamBranch(path_to_repo) or 'HEAD'
- except subprocess2.CalledProcessError:
- upstream = 'HEAD'
+ # Get the upstream for the current branch. If we're not in a branch,
+ # fallback to HEAD.
+ try:
+ upstream = scm.GIT.GetUpstreamBranch(path_to_repo) or 'HEAD'
+ except subprocess2.CalledProcessError:
+ upstream = 'HEAD'
- # Get the timestamp of the HEAD for the upstream of the current branch.
- p = subprocess2.Popen(
- ['git', '-C', path_to_repo, 'log', '-n1', upstream, '--format=%at'],
- stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
- stdout, _ = p.communicate()
+ # Get the timestamp of the HEAD for the upstream of the current branch.
+ p = subprocess2.Popen(
+ ['git', '-C', path_to_repo, 'log', '-n1', upstream, '--format=%at'],
+ stdout=subprocess2.PIPE,
+ stderr=subprocess2.PIPE)
+ stdout, _ = p.communicate()
- # If there was an error, give up.
- if p.returncode != 0:
- return None
+ # If there was an error, give up.
+ if p.returncode != 0:
+ return None
+
+ return stdout.strip()
- return stdout.strip()
def print_boxed_text(out, min_width, lines):
- [EW, NS, SE, SW, NE, NW] = list('=|++++')
- width = max(min_width, max(len(line) for line in lines))
- out(SE + EW * (width + 2) + SW + '\n')
- for line in lines:
- out('%s %-*s %s\n' % (NS, width, line, NS))
- out(NE + EW * (width + 2) + NW + '\n')
+ [EW, NS, SE, SW, NE, NW] = list('=|++++')
+ width = max(min_width, max(len(line) for line in lines))
+ out(SE + EW * (width + 2) + SW + '\n')
+ for line in lines:
+ out('%s %-*s %s\n' % (NS, width, line, NS))
+ out(NE + EW * (width + 2) + NW + '\n')
+
def print_notice(countdown):
- """Print a notice to let the user know the status of metrics collection."""
- lines = list(get_notice_countdown_header(countdown))
- lines.append('')
- lines += list(get_notice_footer())
- print_boxed_text(sys.stderr.write, 49, lines)
+ """Print a notice to let the user know the status of metrics collection."""
+ lines = list(get_notice_countdown_header(countdown))
+ lines.append('')
+ lines += list(get_notice_footer())
+ print_boxed_text(sys.stderr.write, 49, lines)
+
def print_version_change(config_version):
- """Print a notice to let the user know we are collecting more metrics."""
- lines = list(get_notice_version_change_header())
- for version in range(config_version + 1, CURRENT_VERSION + 1):
- lines.append('')
- lines += get_change_notice(version)
- print_boxed_text(sys.stderr.write, 49, lines)
+ """Print a notice to let the user know we are collecting more metrics."""
+ lines = list(get_notice_version_change_header())
+ for version in range(config_version + 1, CURRENT_VERSION + 1):
+ lines.append('')
+ lines += get_change_notice(version)
+ print_boxed_text(sys.stderr.write, 49, lines)
diff --git a/my_activity.py b/my_activity.py
index 8db0b13f69..b1824dba67 100755
--- a/my_activity.py
+++ b/my_activity.py
@@ -2,7 +2,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Get stats about your activity.
Example:
@@ -57,259 +56,271 @@ import gclient_utils
import gerrit_util
if sys.version_info.major == 2:
- logging.critical(
- 'Python 2 is not supported. Run my_activity.py using vpython3.')
-
+ logging.critical(
+ 'Python 2 is not supported. Run my_activity.py using vpython3.')
try:
- import dateutil # pylint: disable=import-error
- import dateutil.parser
- from dateutil.relativedelta import relativedelta
+ import dateutil # pylint: disable=import-error
+ import dateutil.parser
+ from dateutil.relativedelta import relativedelta
except ImportError:
- logging.error('python-dateutil package required')
- sys.exit(1)
+ logging.error('python-dateutil package required')
+ sys.exit(1)
class DefaultFormatter(Formatter):
- def __init__(self, default = ''):
- super(DefaultFormatter, self).__init__()
- self.default = default
+ def __init__(self, default=''):
+ super(DefaultFormatter, self).__init__()
+ self.default = default
- def get_value(self, key, args, kwargs):
- if isinstance(key, str) and key not in kwargs:
- return self.default
- return Formatter.get_value(self, key, args, kwargs)
+ def get_value(self, key, args, kwargs):
+ if isinstance(key, str) and key not in kwargs:
+ return self.default
+ return Formatter.get_value(self, key, args, kwargs)
gerrit_instances = [
- {
- 'url': 'android-review.googlesource.com',
- 'shorturl': 'r.android.com',
- 'short_url_protocol': 'https',
- },
- {
- 'url': 'gerrit-review.googlesource.com',
- },
- {
- 'url': 'chrome-internal-review.googlesource.com',
- 'shorturl': 'crrev.com/i',
- 'short_url_protocol': 'https',
- },
- {
- 'url': 'chromium-review.googlesource.com',
- 'shorturl': 'crrev.com/c',
- 'short_url_protocol': 'https',
- },
- {
- 'url': 'dawn-review.googlesource.com',
- },
- {
- 'url': 'pdfium-review.googlesource.com',
- },
- {
- 'url': 'skia-review.googlesource.com',
- },
- {
- 'url': 'review.coreboot.org',
- },
+ {
+ 'url': 'android-review.googlesource.com',
+ 'shorturl': 'r.android.com',
+ 'short_url_protocol': 'https',
+ },
+ {
+ 'url': 'gerrit-review.googlesource.com',
+ },
+ {
+ 'url': 'chrome-internal-review.googlesource.com',
+ 'shorturl': 'crrev.com/i',
+ 'short_url_protocol': 'https',
+ },
+ {
+ 'url': 'chromium-review.googlesource.com',
+ 'shorturl': 'crrev.com/c',
+ 'short_url_protocol': 'https',
+ },
+ {
+ 'url': 'dawn-review.googlesource.com',
+ },
+ {
+ 'url': 'pdfium-review.googlesource.com',
+ },
+ {
+ 'url': 'skia-review.googlesource.com',
+ },
+ {
+ 'url': 'review.coreboot.org',
+ },
]
monorail_projects = {
- 'angleproject': {
- 'shorturl': 'anglebug.com',
- 'short_url_protocol': 'http',
- },
- 'chromium': {
- 'shorturl': 'crbug.com',
- 'short_url_protocol': 'https',
- },
- 'dawn': {},
- 'google-breakpad': {},
- 'gyp': {},
- 'pdfium': {
- 'shorturl': 'crbug.com/pdfium',
- 'short_url_protocol': 'https',
- },
- 'skia': {},
- 'tint': {},
- 'v8': {
- 'shorturl': 'crbug.com/v8',
- 'short_url_protocol': 'https',
- },
+ 'angleproject': {
+ 'shorturl': 'anglebug.com',
+ 'short_url_protocol': 'http',
+ },
+ 'chromium': {
+ 'shorturl': 'crbug.com',
+ 'short_url_protocol': 'https',
+ },
+ 'dawn': {},
+ 'google-breakpad': {},
+ 'gyp': {},
+ 'pdfium': {
+ 'shorturl': 'crbug.com/pdfium',
+ 'short_url_protocol': 'https',
+ },
+ 'skia': {},
+ 'tint': {},
+ 'v8': {
+ 'shorturl': 'crbug.com/v8',
+ 'short_url_protocol': 'https',
+ },
}
+
def username(email):
- """Keeps the username of an email address."""
- return email and email.split('@', 1)[0]
+ """Keeps the username of an email address."""
+ return email and email.split('@', 1)[0]
def datetime_to_midnight(date):
- return date - timedelta(hours=date.hour, minutes=date.minute,
- seconds=date.second, microseconds=date.microsecond)
+ return date - timedelta(hours=date.hour,
+ minutes=date.minute,
+ seconds=date.second,
+ microseconds=date.microsecond)
def get_quarter_of(date):
- begin = (datetime_to_midnight(date) -
- relativedelta(months=(date.month - 1) % 3, days=(date.day - 1)))
- return begin, begin + relativedelta(months=3)
+ begin = (datetime_to_midnight(date) -
+ relativedelta(months=(date.month - 1) % 3, days=(date.day - 1)))
+ return begin, begin + relativedelta(months=3)
def get_year_of(date):
- begin = (datetime_to_midnight(date) -
- relativedelta(months=(date.month - 1), days=(date.day - 1)))
- return begin, begin + relativedelta(years=1)
+ begin = (datetime_to_midnight(date) -
+ relativedelta(months=(date.month - 1), days=(date.day - 1)))
+ return begin, begin + relativedelta(years=1)
def get_week_of(date):
- begin = (datetime_to_midnight(date) - timedelta(days=date.weekday()))
- return begin, begin + timedelta(days=7)
+ begin = (datetime_to_midnight(date) - timedelta(days=date.weekday()))
+ return begin, begin + timedelta(days=7)
def get_yes_or_no(msg):
- while True:
- response = gclient_utils.AskForData(msg + ' yes/no [no] ')
- if response in ('y', 'yes'):
- return True
+ while True:
+ response = gclient_utils.AskForData(msg + ' yes/no [no] ')
+ if response in ('y', 'yes'):
+ return True
- if not response or response in ('n', 'no'):
- return False
+ if not response or response in ('n', 'no'):
+ return False
def datetime_from_gerrit(date_string):
- return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f000')
+ return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f000')
def datetime_from_monorail(date_string):
- return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S')
+ return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S')
+
def extract_bug_numbers_from_description(issue):
- # Getting the description for REST Gerrit
- revision = issue['revisions'][issue['current_revision']]
- description = revision['commit']['message']
+ # Getting the description for REST Gerrit
+ revision = issue['revisions'][issue['current_revision']]
+ description = revision['commit']['message']
- bugs = []
- # Handle both "Bug: 99999" and "BUG=99999" bug notations
- # Multiple bugs can be noted on a single line or in multiple ones.
- matches = re.findall(
- r'^(BUG=|(Bug|Fixed):\s*)((((?:[a-zA-Z0-9-]+:)?\d+)(,\s?)?)+)',
- description, flags=re.IGNORECASE | re.MULTILINE)
- if matches:
- for match in matches:
- bugs.extend(match[2].replace(' ', '').split(','))
- # Add default chromium: prefix if none specified.
- bugs = [bug if ':' in bug else 'chromium:%s' % bug for bug in bugs]
+ bugs = []
+ # Handle both "Bug: 99999" and "BUG=99999" bug notations
+ # Multiple bugs can be noted on a single line or in multiple ones.
+ matches = re.findall(
+ r'^(BUG=|(Bug|Fixed):\s*)((((?:[a-zA-Z0-9-]+:)?\d+)(,\s?)?)+)',
+ description,
+ flags=re.IGNORECASE | re.MULTILINE)
+ if matches:
+ for match in matches:
+ bugs.extend(match[2].replace(' ', '').split(','))
+ # Add default chromium: prefix if none specified.
+ bugs = [bug if ':' in bug else 'chromium:%s' % bug for bug in bugs]
+
+ return sorted(set(bugs))
- return sorted(set(bugs))
class MyActivity(object):
- def __init__(self, options):
- self.options = options
- self.modified_after = options.begin
- self.modified_before = options.end
- self.user = options.user
- self.changes = []
- self.reviews = []
- self.issues = []
- self.referenced_issues = []
- self.google_code_auth_token = None
- self.access_errors = set()
- self.skip_servers = (options.skip_servers.split(','))
+ def __init__(self, options):
+ self.options = options
+ self.modified_after = options.begin
+ self.modified_before = options.end
+ self.user = options.user
+ self.changes = []
+ self.reviews = []
+ self.issues = []
+ self.referenced_issues = []
+ self.google_code_auth_token = None
+ self.access_errors = set()
+ self.skip_servers = (options.skip_servers.split(','))
- def show_progress(self, how='.'):
- if sys.stdout.isatty():
- sys.stdout.write(how)
- sys.stdout.flush()
+ def show_progress(self, how='.'):
+ if sys.stdout.isatty():
+ sys.stdout.write(how)
+ sys.stdout.flush()
- def gerrit_changes_over_rest(self, instance, filters):
- # Convert the "key:value" filter to a list of (key, value) pairs.
- req = list(f.split(':', 1) for f in filters)
- try:
- # Instantiate the generator to force all the requests now and catch the
- # errors here.
- return list(gerrit_util.GenerateAllChanges(instance['url'], req,
- o_params=['MESSAGES', 'LABELS', 'DETAILED_ACCOUNTS',
- 'CURRENT_REVISION', 'CURRENT_COMMIT']))
- except gerrit_util.GerritError as e:
- error_message = 'Looking up %r: %s' % (instance['url'], e)
- if error_message not in self.access_errors:
- self.access_errors.add(error_message)
- return []
+ def gerrit_changes_over_rest(self, instance, filters):
+ # Convert the "key:value" filter to a list of (key, value) pairs.
+ req = list(f.split(':', 1) for f in filters)
+ try:
+ # Instantiate the generator to force all the requests now and catch
+ # the errors here.
+ return list(
+ gerrit_util.GenerateAllChanges(instance['url'],
+ req,
+ o_params=[
+ 'MESSAGES', 'LABELS',
+ 'DETAILED_ACCOUNTS',
+ 'CURRENT_REVISION',
+ 'CURRENT_COMMIT'
+ ]))
+ except gerrit_util.GerritError as e:
+ error_message = 'Looking up %r: %s' % (instance['url'], e)
+ if error_message not in self.access_errors:
+ self.access_errors.add(error_message)
+ return []
- def gerrit_search(self, instance, owner=None, reviewer=None):
- if instance['url'] in self.skip_servers:
- return []
- max_age = datetime.today() - self.modified_after
- filters = ['-age:%ss' % (max_age.days * 24 * 3600 + max_age.seconds)]
- if owner:
- assert not reviewer
- filters.append('owner:%s' % owner)
- else:
- filters.extend(('-owner:%s' % reviewer, 'reviewer:%s' % reviewer))
- # TODO(cjhopman): Should abandoned changes be filtered out when
- # merged_only is not enabled?
- if self.options.merged_only:
- filters.append('status:merged')
+ def gerrit_search(self, instance, owner=None, reviewer=None):
+ if instance['url'] in self.skip_servers:
+ return []
+ max_age = datetime.today() - self.modified_after
+ filters = ['-age:%ss' % (max_age.days * 24 * 3600 + max_age.seconds)]
+ if owner:
+ assert not reviewer
+ filters.append('owner:%s' % owner)
+ else:
+ filters.extend(('-owner:%s' % reviewer, 'reviewer:%s' % reviewer))
+ # TODO(cjhopman): Should abandoned changes be filtered out when
+ # merged_only is not enabled?
+ if self.options.merged_only:
+ filters.append('status:merged')
- issues = self.gerrit_changes_over_rest(instance, filters)
- self.show_progress()
- issues = [self.process_gerrit_issue(instance, issue)
- for issue in issues]
+ issues = self.gerrit_changes_over_rest(instance, filters)
+ self.show_progress()
+ issues = [
+ self.process_gerrit_issue(instance, issue) for issue in issues
+ ]
- issues = filter(self.filter_issue, issues)
- issues = sorted(issues, key=lambda i: i['modified'], reverse=True)
+ issues = filter(self.filter_issue, issues)
+ issues = sorted(issues, key=lambda i: i['modified'], reverse=True)
- return issues
+ return issues
- def process_gerrit_issue(self, instance, issue):
- ret = {}
- if self.options.deltas:
- ret['delta'] = DefaultFormatter().format(
- '+{insertions},-{deletions}',
- **issue)
- ret['status'] = issue['status']
- if 'shorturl' in instance:
- protocol = instance.get('short_url_protocol', 'http')
- url = instance['shorturl']
- else:
- protocol = 'https'
- url = instance['url']
- ret['review_url'] = '%s://%s/%s' % (protocol, url, issue['_number'])
+ def process_gerrit_issue(self, instance, issue):
+ ret = {}
+ if self.options.deltas:
+ ret['delta'] = DefaultFormatter().format(
+ '+{insertions},-{deletions}', **issue)
+ ret['status'] = issue['status']
+ if 'shorturl' in instance:
+ protocol = instance.get('short_url_protocol', 'http')
+ url = instance['shorturl']
+ else:
+ protocol = 'https'
+ url = instance['url']
+ ret['review_url'] = '%s://%s/%s' % (protocol, url, issue['_number'])
- ret['header'] = issue['subject']
- ret['owner'] = issue['owner'].get('email', '')
- ret['author'] = ret['owner']
- ret['created'] = datetime_from_gerrit(issue['created'])
- ret['modified'] = datetime_from_gerrit(issue['updated'])
- if 'messages' in issue:
- ret['replies'] = self.process_gerrit_issue_replies(issue['messages'])
- else:
- ret['replies'] = []
- ret['reviewers'] = set(r['author'] for r in ret['replies'])
- ret['reviewers'].discard(ret['author'])
- ret['bugs'] = extract_bug_numbers_from_description(issue)
- return ret
+ ret['header'] = issue['subject']
+ ret['owner'] = issue['owner'].get('email', '')
+ ret['author'] = ret['owner']
+ ret['created'] = datetime_from_gerrit(issue['created'])
+ ret['modified'] = datetime_from_gerrit(issue['updated'])
+ if 'messages' in issue:
+ ret['replies'] = self.process_gerrit_issue_replies(
+ issue['messages'])
+ else:
+ ret['replies'] = []
+ ret['reviewers'] = set(r['author'] for r in ret['replies'])
+ ret['reviewers'].discard(ret['author'])
+ ret['bugs'] = extract_bug_numbers_from_description(issue)
+ return ret
- @staticmethod
- def process_gerrit_issue_replies(replies):
- ret = []
- replies = filter(lambda r: 'author' in r and 'email' in r['author'],
- replies)
- for reply in replies:
- ret.append({
- 'author': reply['author']['email'],
- 'created': datetime_from_gerrit(reply['date']),
- 'content': reply['message'],
- })
- return ret
+ @staticmethod
+ def process_gerrit_issue_replies(replies):
+ ret = []
+ replies = filter(lambda r: 'author' in r and 'email' in r['author'],
+ replies)
+ for reply in replies:
+ ret.append({
+ 'author': reply['author']['email'],
+ 'created': datetime_from_gerrit(reply['date']),
+ 'content': reply['message'],
+ })
+ return ret
- def monorail_get_auth_http(self):
- # Manually use a long timeout (10m); for some users who have a
- # long history on the issue tracker, whatever the default timeout
- # is is reached.
- return auth.Authenticator().authorize(httplib2.Http(timeout=600))
+ def monorail_get_auth_http(self):
+ # Manually use a long timeout (10m); for some users who have a
+ # long history on the issue tracker, whatever the default timeout
+ # is is reached.
+ return auth.Authenticator().authorize(httplib2.Http(timeout=600))
- def filter_modified_monorail_issue(self, issue):
- """Precisely checks if an issue has been modified in the time range.
+ def filter_modified_monorail_issue(self, issue):
+ """Precisely checks if an issue has been modified in the time range.
This fetches all issue comments to check if the issue has been modified in
the time range specified by user. This is needed because monorail only
@@ -324,682 +335,719 @@ class MyActivity(object):
Returns:
Passed issue if modified, None otherwise.
"""
- http = self.monorail_get_auth_http()
- project, issue_id = issue['uid'].split(':')
- url = ('https://monorail-prod.appspot.com/_ah/api/monorail/v1/projects'
- '/%s/issues/%s/comments?maxResults=10000') % (project, issue_id)
- _, body = http.request(url)
- self.show_progress()
- content = json.loads(body)
- if not content:
- logging.error('Unable to parse %s response from monorail.', project)
- return issue
+ http = self.monorail_get_auth_http()
+ project, issue_id = issue['uid'].split(':')
+ url = ('https://monorail-prod.appspot.com/_ah/api/monorail/v1/projects'
+ '/%s/issues/%s/comments?maxResults=10000') % (project, issue_id)
+ _, body = http.request(url)
+ self.show_progress()
+ content = json.loads(body)
+ if not content:
+ logging.error('Unable to parse %s response from monorail.', project)
+ return issue
- for item in content.get('items', []):
- comment_published = datetime_from_monorail(item['published'])
- if self.filter_modified(comment_published):
- return issue
+ for item in content.get('items', []):
+ comment_published = datetime_from_monorail(item['published'])
+ if self.filter_modified(comment_published):
+ return issue
- return None
+ return None
- def monorail_query_issues(self, project, query):
- http = self.monorail_get_auth_http()
- url = ('https://monorail-prod.appspot.com/_ah/api/monorail/v1/projects'
- '/%s/issues') % project
- query_data = urllib.parse.urlencode(query)
- url = url + '?' + query_data
- _, body = http.request(url)
- self.show_progress()
- content = json.loads(body)
- if not content:
- logging.error('Unable to parse %s response from monorail.', project)
- return []
+ def monorail_query_issues(self, project, query):
+ http = self.monorail_get_auth_http()
+ url = ('https://monorail-prod.appspot.com/_ah/api/monorail/v1/projects'
+ '/%s/issues') % project
+ query_data = urllib.parse.urlencode(query)
+ url = url + '?' + query_data
+ _, body = http.request(url)
+ self.show_progress()
+ content = json.loads(body)
+ if not content:
+ logging.error('Unable to parse %s response from monorail.', project)
+ return []
- issues = []
- project_config = monorail_projects.get(project, {})
- for item in content.get('items', []):
- if project_config.get('shorturl'):
- protocol = project_config.get('short_url_protocol', 'http')
- item_url = '%s://%s/%d' % (
- protocol, project_config['shorturl'], item['id'])
- else:
- item_url = 'https://bugs.chromium.org/p/%s/issues/detail?id=%d' % (
- project, item['id'])
- issue = {
- 'uid': '%s:%s' % (project, item['id']),
- 'header': item['title'],
- 'created': datetime_from_monorail(item['published']),
- 'modified': datetime_from_monorail(item['updated']),
- 'author': item['author']['name'],
- 'url': item_url,
- 'comments': [],
- 'status': item['status'],
- 'labels': [],
- 'components': []
- }
- if 'owner' in item:
- issue['owner'] = item['owner']['name']
- else:
- issue['owner'] = 'None'
- if 'labels' in item:
- issue['labels'] = item['labels']
- if 'components' in item:
- issue['components'] = item['components']
- issues.append(issue)
+ issues = []
+ project_config = monorail_projects.get(project, {})
+ for item in content.get('items', []):
+ if project_config.get('shorturl'):
+ protocol = project_config.get('short_url_protocol', 'http')
+ item_url = '%s://%s/%d' % (protocol, project_config['shorturl'],
+ item['id'])
+ else:
+ item_url = (
+ 'https://bugs.chromium.org/p/%s/issues/detail?id=%d' %
+ (project, item['id']))
+ issue = {
+ 'uid': '%s:%s' % (project, item['id']),
+ 'header': item['title'],
+ 'created': datetime_from_monorail(item['published']),
+ 'modified': datetime_from_monorail(item['updated']),
+ 'author': item['author']['name'],
+ 'url': item_url,
+ 'comments': [],
+ 'status': item['status'],
+ 'labels': [],
+ 'components': []
+ }
+ if 'owner' in item:
+ issue['owner'] = item['owner']['name']
+ else:
+ issue['owner'] = 'None'
+ if 'labels' in item:
+ issue['labels'] = item['labels']
+ if 'components' in item:
+ issue['components'] = item['components']
+ issues.append(issue)
- return issues
+ return issues
- def monorail_issue_search(self, project):
- epoch = datetime.utcfromtimestamp(0)
- # Defaults to @chromium.org email if one wasn't provided on -u option.
- user_str = (self.options.email if self.options.email.find('@') >= 0
- else '%s@chromium.org' % self.user)
+ def monorail_issue_search(self, project):
+ epoch = datetime.utcfromtimestamp(0)
+ # Defaults to @chromium.org email if one wasn't provided on -u option.
+ user_str = (self.options.email if self.options.email.find('@') >= 0 else
+ '%s@chromium.org' % self.user)
- issues = self.monorail_query_issues(project, {
- 'maxResults': 10000,
- 'q': user_str,
- 'publishedMax': '%d' % (self.modified_before - epoch).total_seconds(),
- 'updatedMin': '%d' % (self.modified_after - epoch).total_seconds(),
- })
+ issues = self.monorail_query_issues(
+ project, {
+ 'maxResults':
+ 10000,
+ 'q':
+ user_str,
+ 'publishedMax':
+ '%d' % (self.modified_before - epoch).total_seconds(),
+ 'updatedMin':
+ '%d' % (self.modified_after - epoch).total_seconds(),
+ })
- if self.options.completed_issues:
- return [
- issue for issue in issues
- if (self.match(issue['owner']) and
- issue['status'].lower() in ('verified', 'fixed'))
- ]
+ if self.options.completed_issues:
+ return [
+ issue for issue in issues
+ if (self.match(issue['owner']) and issue['status'].lower() in (
+ 'verified', 'fixed'))
+ ]
- return [
- issue for issue in issues
- if user_str in (issue['author'], issue['owner'])]
+ return [
+ issue for issue in issues
+ if user_str in (issue['author'], issue['owner'])
+ ]
- def monorail_get_issues(self, project, issue_ids):
- return self.monorail_query_issues(project, {
- 'maxResults': 10000,
- 'q': 'id:%s' % ','.join(issue_ids)
- })
+ def monorail_get_issues(self, project, issue_ids):
+ return self.monorail_query_issues(project, {
+ 'maxResults': 10000,
+ 'q': 'id:%s' % ','.join(issue_ids)
+ })
- def print_heading(self, heading):
- print()
- print(self.options.output_format_heading.format(heading=heading))
-
- def match(self, author):
- if '@' in self.user:
- return author == self.user
- return author.startswith(self.user + '@')
-
- def print_change(self, change):
- activity = len([
- reply
- for reply in change['replies']
- if self.match(reply['author'])
- ])
- optional_values = {
- 'created': change['created'].date().isoformat(),
- 'modified': change['modified'].date().isoformat(),
- 'reviewers': ', '.join(change['reviewers']),
- 'status': change['status'],
- 'activity': activity,
- }
- if self.options.deltas:
- optional_values['delta'] = change['delta']
-
- self.print_generic(self.options.output_format,
- self.options.output_format_changes,
- change['header'],
- change['review_url'],
- change['author'],
- change['created'],
- change['modified'],
- optional_values)
-
- def print_issue(self, issue):
- optional_values = {
- 'created': issue['created'].date().isoformat(),
- 'modified': issue['modified'].date().isoformat(),
- 'owner': issue['owner'],
- 'status': issue['status'],
- }
- self.print_generic(self.options.output_format,
- self.options.output_format_issues,
- issue['header'],
- issue['url'],
- issue['author'],
- issue['created'],
- issue['modified'],
- optional_values)
-
- def print_review(self, review):
- activity = len([
- reply
- for reply in review['replies']
- if self.match(reply['author'])
- ])
- optional_values = {
- 'created': review['created'].date().isoformat(),
- 'modified': review['modified'].date().isoformat(),
- 'status': review['status'],
- 'activity': activity,
- }
- if self.options.deltas:
- optional_values['delta'] = review['delta']
-
- self.print_generic(self.options.output_format,
- self.options.output_format_reviews,
- review['header'],
- review['review_url'],
- review['author'],
- review['created'],
- review['modified'],
- optional_values)
-
- @staticmethod
- def print_generic(default_fmt, specific_fmt,
- title, url, author, created, modified,
- optional_values=None):
- output_format = specific_fmt if specific_fmt is not None else default_fmt
- values = {
- 'title': title,
- 'url': url,
- 'author': author,
- 'created': created,
- 'modified': modified,
- }
- if optional_values is not None:
- values.update(optional_values)
- print(DefaultFormatter().format(output_format, **values))
-
-
- def filter_issue(self, issue, should_filter_by_user=True):
- def maybe_filter_username(email):
- return not should_filter_by_user or username(email) == self.user
- if (maybe_filter_username(issue['author']) and
- self.filter_modified(issue['created'])):
- return True
- if (maybe_filter_username(issue['owner']) and
- (self.filter_modified(issue['created']) or
- self.filter_modified(issue['modified']))):
- return True
- for reply in issue['replies']:
- if self.filter_modified(reply['created']):
- if not should_filter_by_user:
- break
- if (username(reply['author']) == self.user
- or (self.user + '@') in reply['content']):
- break
- else:
- return False
- return True
-
- def filter_modified(self, modified):
- return self.modified_after < modified < self.modified_before
-
- def auth_for_changes(self):
- #TODO(cjhopman): Move authentication check for getting changes here.
- pass
-
- def auth_for_reviews(self):
- # Reviews use all the same instances as changes so no authentication is
- # required.
- pass
-
- def get_changes(self):
- num_instances = len(gerrit_instances)
- with contextlib.closing(ThreadPool(num_instances)) as pool:
- gerrit_changes = pool.map_async(
- lambda instance: self.gerrit_search(instance, owner=self.user),
- gerrit_instances)
- gerrit_changes = itertools.chain.from_iterable(gerrit_changes.get())
- self.changes = list(gerrit_changes)
-
- def print_changes(self):
- if self.changes:
- self.print_heading('Changes')
- for change in self.changes:
- self.print_change(change)
-
- def print_access_errors(self):
- if self.access_errors:
- logging.error('Access Errors:')
- for error in self.access_errors:
- logging.error(error.rstrip())
-
- def get_reviews(self):
- num_instances = len(gerrit_instances)
- with contextlib.closing(ThreadPool(num_instances)) as pool:
- gerrit_reviews = pool.map_async(
- lambda instance: self.gerrit_search(instance, reviewer=self.user),
- gerrit_instances)
- gerrit_reviews = itertools.chain.from_iterable(gerrit_reviews.get())
- self.reviews = list(gerrit_reviews)
-
- def print_reviews(self):
- if self.reviews:
- self.print_heading('Reviews')
- for review in self.reviews:
- self.print_review(review)
-
- def get_issues(self):
- with contextlib.closing(ThreadPool(len(monorail_projects))) as pool:
- monorail_issues = pool.map(
- self.monorail_issue_search, monorail_projects.keys())
- monorail_issues = list(itertools.chain.from_iterable(monorail_issues))
-
- if not monorail_issues:
- return
-
- with contextlib.closing(ThreadPool(len(monorail_issues))) as pool:
- filtered_issues = pool.map(
- self.filter_modified_monorail_issue, monorail_issues)
- self.issues = [issue for issue in filtered_issues if issue]
-
- def get_referenced_issues(self):
- if not self.issues:
- self.get_issues()
-
- if not self.changes:
- self.get_changes()
-
- referenced_issue_uids = set(itertools.chain.from_iterable(
- change['bugs'] for change in self.changes))
- fetched_issue_uids = set(issue['uid'] for issue in self.issues)
- missing_issue_uids = referenced_issue_uids - fetched_issue_uids
-
- missing_issues_by_project = collections.defaultdict(list)
- for issue_uid in missing_issue_uids:
- project, issue_id = issue_uid.split(':')
- missing_issues_by_project[project].append(issue_id)
-
- for project, issue_ids in missing_issues_by_project.items():
- self.referenced_issues += self.monorail_get_issues(project, issue_ids)
-
- def print_issues(self):
- if self.issues:
- self.print_heading('Issues')
- for issue in self.issues:
- self.print_issue(issue)
-
- def print_changes_by_issue(self, skip_empty_own):
- if not self.issues or not self.changes:
- return
-
- self.print_heading('Changes by referenced issue(s)')
- issues = {issue['uid']: issue for issue in self.issues}
- ref_issues = {issue['uid']: issue for issue in self.referenced_issues}
- changes_by_issue_uid = collections.defaultdict(list)
- changes_by_ref_issue_uid = collections.defaultdict(list)
- changes_without_issue = []
- for change in self.changes:
- added = False
- for issue_uid in change['bugs']:
- if issue_uid in issues:
- changes_by_issue_uid[issue_uid].append(change)
- added = True
- if issue_uid in ref_issues:
- changes_by_ref_issue_uid[issue_uid].append(change)
- added = True
- if not added:
- changes_without_issue.append(change)
-
- # Changes referencing own issues.
- for issue_uid in issues:
- if changes_by_issue_uid[issue_uid] or not skip_empty_own:
- self.print_issue(issues[issue_uid])
- if changes_by_issue_uid[issue_uid]:
- print()
- for change in changes_by_issue_uid[issue_uid]:
- print(' ', end='') # this prints no newline
- self.print_change(change)
+ def print_heading(self, heading):
print()
+ print(self.options.output_format_heading.format(heading=heading))
- # Changes referencing others' issues.
- for issue_uid in ref_issues:
- assert changes_by_ref_issue_uid[issue_uid]
- self.print_issue(ref_issues[issue_uid])
- for change in changes_by_ref_issue_uid[issue_uid]:
- print('', end=' ') # this prints one space due to comma, but no newline
- self.print_change(change)
+ def match(self, author):
+ if '@' in self.user:
+ return author == self.user
+ return author.startswith(self.user + '@')
- # Changes referencing no issues.
- if changes_without_issue:
- print(self.options.output_format_no_url.format(title='Other changes'))
- for change in changes_without_issue:
- print('', end=' ') # this prints one space due to comma, but no newline
- self.print_change(change)
+ def print_change(self, change):
+ activity = len([
+ reply for reply in change['replies'] if self.match(reply['author'])
+ ])
+ optional_values = {
+ 'created': change['created'].date().isoformat(),
+ 'modified': change['modified'].date().isoformat(),
+ 'reviewers': ', '.join(change['reviewers']),
+ 'status': change['status'],
+ 'activity': activity,
+ }
+ if self.options.deltas:
+ optional_values['delta'] = change['delta']
- def print_activity(self):
- self.print_changes()
- self.print_reviews()
- self.print_issues()
+ self.print_generic(self.options.output_format,
+ self.options.output_format_changes, change['header'],
+ change['review_url'], change['author'],
+ change['created'], change['modified'],
+ optional_values)
- def dump_json(self, ignore_keys=None):
- if ignore_keys is None:
- ignore_keys = ['replies']
+ def print_issue(self, issue):
+ optional_values = {
+ 'created': issue['created'].date().isoformat(),
+ 'modified': issue['modified'].date().isoformat(),
+ 'owner': issue['owner'],
+ 'status': issue['status'],
+ }
+ self.print_generic(self.options.output_format,
+ self.options.output_format_issues, issue['header'],
+ issue['url'], issue['author'], issue['created'],
+ issue['modified'], optional_values)
- def format_for_json_dump(in_array):
- output = {}
- for item in in_array:
- url = item.get('url') or item.get('review_url')
- if not url:
- raise Exception('Dumped item %s does not specify url' % item)
- output[url] = dict(
- (k, v) for k,v in item.items() if k not in ignore_keys)
- return output
+ def print_review(self, review):
+ activity = len([
+ reply for reply in review['replies'] if self.match(reply['author'])
+ ])
+ optional_values = {
+ 'created': review['created'].date().isoformat(),
+ 'modified': review['modified'].date().isoformat(),
+ 'status': review['status'],
+ 'activity': activity,
+ }
+ if self.options.deltas:
+ optional_values['delta'] = review['delta']
- class PythonObjectEncoder(json.JSONEncoder):
- def default(self, o): # pylint: disable=method-hidden
- if isinstance(o, datetime):
- return o.isoformat()
- if isinstance(o, set):
- return list(o)
- return json.JSONEncoder.default(self, o)
+ self.print_generic(self.options.output_format,
+ self.options.output_format_reviews, review['header'],
+ review['review_url'], review['author'],
+ review['created'], review['modified'],
+ optional_values)
- output = {
- 'reviews': format_for_json_dump(self.reviews),
- 'changes': format_for_json_dump(self.changes),
- 'issues': format_for_json_dump(self.issues)
- }
- print(json.dumps(output, indent=2, cls=PythonObjectEncoder))
+ @staticmethod
+ def print_generic(default_fmt,
+ specific_fmt,
+ title,
+ url,
+ author,
+ created,
+ modified,
+ optional_values=None):
+ output_format = (specific_fmt
+ if specific_fmt is not None else default_fmt)
+ values = {
+ 'title': title,
+ 'url': url,
+ 'author': author,
+ 'created': created,
+ 'modified': modified,
+ }
+ if optional_values is not None:
+ values.update(optional_values)
+ print(DefaultFormatter().format(output_format, **values))
+
+ def filter_issue(self, issue, should_filter_by_user=True):
+ def maybe_filter_username(email):
+ return not should_filter_by_user or username(email) == self.user
+
+ if (maybe_filter_username(issue['author'])
+ and self.filter_modified(issue['created'])):
+ return True
+ if (maybe_filter_username(issue['owner'])
+ and (self.filter_modified(issue['created'])
+ or self.filter_modified(issue['modified']))):
+ return True
+ for reply in issue['replies']:
+ if self.filter_modified(reply['created']):
+ if not should_filter_by_user:
+ break
+ if (username(reply['author']) == self.user
+ or (self.user + '@') in reply['content']):
+ break
+ else:
+ return False
+ return True
+
+ def filter_modified(self, modified):
+ return self.modified_after < modified < self.modified_before
+
+ def auth_for_changes(self):
+ #TODO(cjhopman): Move authentication check for getting changes here.
+ pass
+
+ def auth_for_reviews(self):
+ # Reviews use all the same instances as changes so no authentication is
+ # required.
+ pass
+
+ def get_changes(self):
+ num_instances = len(gerrit_instances)
+ with contextlib.closing(ThreadPool(num_instances)) as pool:
+ gerrit_changes = pool.map_async(
+ lambda instance: self.gerrit_search(instance, owner=self.user),
+ gerrit_instances)
+ gerrit_changes = itertools.chain.from_iterable(gerrit_changes.get())
+ self.changes = list(gerrit_changes)
+
+ def print_changes(self):
+ if self.changes:
+ self.print_heading('Changes')
+ for change in self.changes:
+ self.print_change(change)
+
+ def print_access_errors(self):
+ if self.access_errors:
+ logging.error('Access Errors:')
+ for error in self.access_errors:
+ logging.error(error.rstrip())
+
+ def get_reviews(self):
+ num_instances = len(gerrit_instances)
+ with contextlib.closing(ThreadPool(num_instances)) as pool:
+ gerrit_reviews = pool.map_async(
+ lambda instance: self.gerrit_search(instance,
+ reviewer=self.user),
+ gerrit_instances)
+ gerrit_reviews = itertools.chain.from_iterable(gerrit_reviews.get())
+ self.reviews = list(gerrit_reviews)
+
+ def print_reviews(self):
+ if self.reviews:
+ self.print_heading('Reviews')
+ for review in self.reviews:
+ self.print_review(review)
+
+ def get_issues(self):
+ with contextlib.closing(ThreadPool(len(monorail_projects))) as pool:
+ monorail_issues = pool.map(self.monorail_issue_search,
+ monorail_projects.keys())
+ monorail_issues = list(
+ itertools.chain.from_iterable(monorail_issues))
+
+ if not monorail_issues:
+ return
+
+ with contextlib.closing(ThreadPool(len(monorail_issues))) as pool:
+ filtered_issues = pool.map(self.filter_modified_monorail_issue,
+ monorail_issues)
+ self.issues = [issue for issue in filtered_issues if issue]
+
+ def get_referenced_issues(self):
+ if not self.issues:
+ self.get_issues()
+
+ if not self.changes:
+ self.get_changes()
+
+ referenced_issue_uids = set(
+ itertools.chain.from_iterable(change['bugs']
+ for change in self.changes))
+ fetched_issue_uids = set(issue['uid'] for issue in self.issues)
+ missing_issue_uids = referenced_issue_uids - fetched_issue_uids
+
+ missing_issues_by_project = collections.defaultdict(list)
+ for issue_uid in missing_issue_uids:
+ project, issue_id = issue_uid.split(':')
+ missing_issues_by_project[project].append(issue_id)
+
+ for project, issue_ids in missing_issues_by_project.items():
+ self.referenced_issues += self.monorail_get_issues(
+ project, issue_ids)
+
+ def print_issues(self):
+ if self.issues:
+ self.print_heading('Issues')
+ for issue in self.issues:
+ self.print_issue(issue)
+
+ def print_changes_by_issue(self, skip_empty_own):
+ if not self.issues or not self.changes:
+ return
+
+ self.print_heading('Changes by referenced issue(s)')
+ issues = {issue['uid']: issue for issue in self.issues}
+ ref_issues = {issue['uid']: issue for issue in self.referenced_issues}
+ changes_by_issue_uid = collections.defaultdict(list)
+ changes_by_ref_issue_uid = collections.defaultdict(list)
+ changes_without_issue = []
+ for change in self.changes:
+ added = False
+ for issue_uid in change['bugs']:
+ if issue_uid in issues:
+ changes_by_issue_uid[issue_uid].append(change)
+ added = True
+ if issue_uid in ref_issues:
+ changes_by_ref_issue_uid[issue_uid].append(change)
+ added = True
+ if not added:
+ changes_without_issue.append(change)
+
+ # Changes referencing own issues.
+ for issue_uid in issues:
+ if changes_by_issue_uid[issue_uid] or not skip_empty_own:
+ self.print_issue(issues[issue_uid])
+ if changes_by_issue_uid[issue_uid]:
+ print()
+ for change in changes_by_issue_uid[issue_uid]:
+ print(' ', end='') # this prints no newline
+ self.print_change(change)
+ print()
+
+ # Changes referencing others' issues.
+ for issue_uid in ref_issues:
+ assert changes_by_ref_issue_uid[issue_uid]
+ self.print_issue(ref_issues[issue_uid])
+ for change in changes_by_ref_issue_uid[issue_uid]:
+ print('', end=' '
+ ) # this prints one space due to comma, but no newline
+ self.print_change(change)
+
+ # Changes referencing no issues.
+ if changes_without_issue:
+ print(
+ self.options.output_format_no_url.format(title='Other changes'))
+ for change in changes_without_issue:
+ print('', end=' '
+ ) # this prints one space due to comma, but no newline
+ self.print_change(change)
+
+ def print_activity(self):
+ self.print_changes()
+ self.print_reviews()
+ self.print_issues()
+
+ def dump_json(self, ignore_keys=None):
+ if ignore_keys is None:
+ ignore_keys = ['replies']
+
+ def format_for_json_dump(in_array):
+ output = {}
+ for item in in_array:
+ url = item.get('url') or item.get('review_url')
+ if not url:
+ raise Exception('Dumped item %s does not specify url' %
+ item)
+ output[url] = dict(
+ (k, v) for k, v in item.items() if k not in ignore_keys)
+ return output
+
+ class PythonObjectEncoder(json.JSONEncoder):
+ def default(self, o): # pylint: disable=method-hidden
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, set):
+ return list(o)
+ return json.JSONEncoder.default(self, o)
+
+ output = {
+ 'reviews': format_for_json_dump(self.reviews),
+ 'changes': format_for_json_dump(self.changes),
+ 'issues': format_for_json_dump(self.issues)
+ }
+ print(json.dumps(output, indent=2, cls=PythonObjectEncoder))
def main():
- parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
- parser.add_option(
- '-u', '--user', metavar='',
- # Look for USER and USERNAME (Windows) environment variables.
- default=os.environ.get('USER', os.environ.get('USERNAME')),
- help='Filter on user, default=%default')
- parser.add_option(
- '-b', '--begin', metavar='',
- help='Filter issues created after the date (mm/dd/yy)')
- parser.add_option(
- '-e', '--end', metavar='',
- help='Filter issues created before the date (mm/dd/yy)')
- quarter_begin, quarter_end = get_quarter_of(datetime.today() -
- relativedelta(months=2))
- parser.add_option(
- '-Q', '--last_quarter', action='store_true',
- help='Use last quarter\'s dates, i.e. %s to %s' % (
- quarter_begin.strftime('%Y-%m-%d'), quarter_end.strftime('%Y-%m-%d')))
- parser.add_option(
- '-Y', '--this_year', action='store_true',
- help='Use this year\'s dates')
- parser.add_option(
- '-w', '--week_of', metavar='',
- help='Show issues for week of the date (mm/dd/yy)')
- parser.add_option(
- '-W', '--last_week', action='count',
- help='Show last week\'s issues. Use more times for more weeks.')
- parser.add_option(
- '-a', '--auth',
- action='store_true',
- help='Ask to authenticate for instances with no auth cookie')
- parser.add_option(
- '-d', '--deltas',
- action='store_true',
- help='Fetch deltas for changes.')
- parser.add_option(
- '--no-referenced-issues',
- action='store_true',
- help='Do not fetch issues referenced by owned changes. Useful in '
- 'combination with --changes-by-issue when you only want to list '
- 'issues that have also been modified in the same time period.')
- parser.add_option(
- '--skip_servers',
- action='store',
- default='',
- help='A comma separated list of gerrit and rietveld servers to ignore')
- parser.add_option(
- '--skip-own-issues-without-changes',
- action='store_true',
- help='Skips listing own issues without changes when showing changes '
- 'grouped by referenced issue(s). See --changes-by-issue for more '
- 'details.')
- parser.add_option(
- '-F', '--config_file', metavar='',
- help='Configuration file in JSON format, used to add additional gerrit '
- 'instances (see source code for an example).')
+ parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
+ parser.add_option(
+ '-u',
+ '--user',
+ metavar='',
+ # Look for USER and USERNAME (Windows) environment variables.
+ default=os.environ.get('USER', os.environ.get('USERNAME')),
+ help='Filter on user, default=%default')
+ parser.add_option('-b',
+ '--begin',
+ metavar='',
+ help='Filter issues created after the date (mm/dd/yy)')
+ parser.add_option('-e',
+ '--end',
+ metavar='',
+ help='Filter issues created before the date (mm/dd/yy)')
+ quarter_begin, quarter_end = get_quarter_of(datetime.today() -
+ relativedelta(months=2))
+ parser.add_option(
+ '-Q',
+ '--last_quarter',
+ action='store_true',
+ help='Use last quarter\'s dates, i.e. %s to %s' %
+ (quarter_begin.strftime('%Y-%m-%d'), quarter_end.strftime('%Y-%m-%d')))
+ parser.add_option('-Y',
+ '--this_year',
+ action='store_true',
+ help='Use this year\'s dates')
+ parser.add_option('-w',
+ '--week_of',
+ metavar='',
+ help='Show issues for week of the date (mm/dd/yy)')
+ parser.add_option(
+ '-W',
+ '--last_week',
+ action='count',
+ help='Show last week\'s issues. Use more times for more weeks.')
+ parser.add_option(
+ '-a',
+ '--auth',
+ action='store_true',
+ help='Ask to authenticate for instances with no auth cookie')
+ parser.add_option('-d',
+ '--deltas',
+ action='store_true',
+ help='Fetch deltas for changes.')
+ parser.add_option(
+ '--no-referenced-issues',
+ action='store_true',
+ help='Do not fetch issues referenced by owned changes. Useful in '
+ 'combination with --changes-by-issue when you only want to list '
+ 'issues that have also been modified in the same time period.')
+ parser.add_option(
+ '--skip_servers',
+ action='store',
+ default='',
+ help='A comma separated list of gerrit and rietveld servers to ignore')
+ parser.add_option(
+ '--skip-own-issues-without-changes',
+ action='store_true',
+ help='Skips listing own issues without changes when showing changes '
+ 'grouped by referenced issue(s). See --changes-by-issue for more '
+ 'details.')
+ parser.add_option(
+ '-F',
+ '--config_file',
+ metavar='',
+ help='Configuration file in JSON format, used to add additional gerrit '
+ 'instances (see source code for an example).')
- activity_types_group = optparse.OptionGroup(parser, 'Activity Types',
- 'By default, all activity will be looked up and '
- 'printed. If any of these are specified, only '
- 'those specified will be searched.')
- activity_types_group.add_option(
- '-c', '--changes',
- action='store_true',
- help='Show changes.')
- activity_types_group.add_option(
- '-i', '--issues',
- action='store_true',
- help='Show issues.')
- activity_types_group.add_option(
- '-r', '--reviews',
- action='store_true',
- help='Show reviews.')
- activity_types_group.add_option(
- '--changes-by-issue', action='store_true',
- help='Show changes grouped by referenced issue(s).')
- parser.add_option_group(activity_types_group)
+ activity_types_group = optparse.OptionGroup(
+ parser, 'Activity Types',
+ 'By default, all activity will be looked up and '
+ 'printed. If any of these are specified, only '
+ 'those specified will be searched.')
+ activity_types_group.add_option('-c',
+ '--changes',
+ action='store_true',
+ help='Show changes.')
+ activity_types_group.add_option('-i',
+ '--issues',
+ action='store_true',
+ help='Show issues.')
+ activity_types_group.add_option('-r',
+ '--reviews',
+ action='store_true',
+ help='Show reviews.')
+ activity_types_group.add_option(
+ '--changes-by-issue',
+ action='store_true',
+ help='Show changes grouped by referenced issue(s).')
+ parser.add_option_group(activity_types_group)
- output_format_group = optparse.OptionGroup(parser, 'Output Format',
- 'By default, all activity will be printed in the '
- 'following format: {url} {title}. This can be '
- 'changed for either all activity types or '
- 'individually for each activity type. The format '
- 'is defined as documented for '
- 'string.format(...). The variables available for '
- 'all activity types are url, title, author, '
- 'created and modified. Format options for '
- 'specific activity types will override the '
- 'generic format.')
- output_format_group.add_option(
- '-f', '--output-format', metavar='',
- default=u'{url} {title}',
- help='Specifies the format to use when printing all your activity.')
- output_format_group.add_option(
- '--output-format-changes', metavar='',
- default=None,
- help='Specifies the format to use when printing changes. Supports the '
- 'additional variable {reviewers}')
- output_format_group.add_option(
- '--output-format-issues', metavar='',
- default=None,
- help='Specifies the format to use when printing issues. Supports the '
- 'additional variable {owner}.')
- output_format_group.add_option(
- '--output-format-reviews', metavar='',
- default=None,
- help='Specifies the format to use when printing reviews.')
- output_format_group.add_option(
- '--output-format-heading', metavar='',
- default=u'{heading}:',
- help='Specifies the format to use when printing headings. '
- 'Supports the variable {heading}.')
- output_format_group.add_option(
- '--output-format-no-url', default='{title}',
- help='Specifies the format to use when printing activity without url.')
- output_format_group.add_option(
- '-m', '--markdown', action='store_true',
- help='Use markdown-friendly output (overrides --output-format '
- 'and --output-format-heading)')
- output_format_group.add_option(
- '-j', '--json', action='store_true',
- help='Output json data (overrides other format options)')
- parser.add_option_group(output_format_group)
+ output_format_group = optparse.OptionGroup(
+ parser, 'Output Format',
+ 'By default, all activity will be printed in the '
+ 'following format: {url} {title}. This can be '
+ 'changed for either all activity types or '
+ 'individually for each activity type. The format '
+ 'is defined as documented for '
+ 'string.format(...). The variables available for '
+ 'all activity types are url, title, author, '
+ 'created and modified. Format options for '
+ 'specific activity types will override the '
+ 'generic format.')
+ output_format_group.add_option(
+ '-f',
+ '--output-format',
+ metavar='',
+ default=u'{url} {title}',
+ help='Specifies the format to use when printing all your activity.')
+ output_format_group.add_option(
+ '--output-format-changes',
+ metavar='',
+ default=None,
+ help='Specifies the format to use when printing changes. Supports the '
+ 'additional variable {reviewers}')
+ output_format_group.add_option(
+ '--output-format-issues',
+ metavar='',
+ default=None,
+ help='Specifies the format to use when printing issues. Supports the '
+ 'additional variable {owner}.')
+ output_format_group.add_option(
+ '--output-format-reviews',
+ metavar='',
+ default=None,
+ help='Specifies the format to use when printing reviews.')
+ output_format_group.add_option(
+ '--output-format-heading',
+ metavar='',
+ default=u'{heading}:',
+ help='Specifies the format to use when printing headings. '
+ 'Supports the variable {heading}.')
+ output_format_group.add_option(
+ '--output-format-no-url',
+ default='{title}',
+ help='Specifies the format to use when printing activity without url.')
+ output_format_group.add_option(
+ '-m',
+ '--markdown',
+ action='store_true',
+ help='Use markdown-friendly output (overrides --output-format '
+ 'and --output-format-heading)')
+ output_format_group.add_option(
+ '-j',
+ '--json',
+ action='store_true',
+ help='Output json data (overrides other format options)')
+ parser.add_option_group(output_format_group)
- parser.add_option(
- '-v', '--verbose',
- action='store_const',
- dest='verbosity',
- default=logging.WARN,
- const=logging.INFO,
- help='Output extra informational messages.'
- )
- parser.add_option(
- '-q', '--quiet',
- action='store_const',
- dest='verbosity',
- const=logging.ERROR,
- help='Suppress non-error messages.'
- )
- parser.add_option(
- '-M', '--merged-only',
- action='store_true',
- dest='merged_only',
- default=False,
- help='Shows only changes that have been merged.')
- parser.add_option(
- '-C', '--completed-issues',
- action='store_true',
- dest='completed_issues',
- default=False,
- help='Shows only monorail issues that have completed (Fixed|Verified) '
- 'by the user.')
- parser.add_option(
- '-o', '--output', metavar='',
- help='Where to output the results. By default prints to stdout.')
+ parser.add_option('-v',
+ '--verbose',
+ action='store_const',
+ dest='verbosity',
+ default=logging.WARN,
+ const=logging.INFO,
+ help='Output extra informational messages.')
+ parser.add_option('-q',
+ '--quiet',
+ action='store_const',
+ dest='verbosity',
+ const=logging.ERROR,
+ help='Suppress non-error messages.')
+ parser.add_option('-M',
+ '--merged-only',
+ action='store_true',
+ dest='merged_only',
+ default=False,
+ help='Shows only changes that have been merged.')
+ parser.add_option(
+ '-C',
+ '--completed-issues',
+ action='store_true',
+ dest='completed_issues',
+ default=False,
+ help='Shows only monorail issues that have completed (Fixed|Verified) '
+ 'by the user.')
+ parser.add_option(
+ '-o',
+ '--output',
+ metavar='',
+ help='Where to output the results. By default prints to stdout.')
- # Remove description formatting
- parser.format_description = (
- lambda _: parser.description) # pylint: disable=no-member
+ # Remove description formatting
+ parser.format_description = (lambda _: parser.description) # pylint: disable=no-member
- options, args = parser.parse_args()
- options.local_user = os.environ.get('USER')
- if args:
- parser.error('Args unsupported')
- if not options.user:
- parser.error('USER/USERNAME is not set, please use -u')
- # Retains the original -u option as the email address.
- options.email = options.user
- options.user = username(options.email)
+ options, args = parser.parse_args()
+ options.local_user = os.environ.get('USER')
+ if args:
+ parser.error('Args unsupported')
+ if not options.user:
+ parser.error('USER/USERNAME is not set, please use -u')
+ # Retains the original -u option as the email address.
+ options.email = options.user
+ options.user = username(options.email)
- logging.basicConfig(level=options.verbosity)
+ logging.basicConfig(level=options.verbosity)
- # python-keyring provides easy access to the system keyring.
- try:
- import keyring # pylint: disable=unused-import,unused-variable,F0401
- except ImportError:
- logging.warning('Consider installing python-keyring')
+ # python-keyring provides easy access to the system keyring.
+ try:
+ import keyring # pylint: disable=unused-import,unused-variable,F0401
+ except ImportError:
+ logging.warning('Consider installing python-keyring')
- if not options.begin:
- if options.last_quarter:
- begin, end = quarter_begin, quarter_end
- elif options.this_year:
- begin, end = get_year_of(datetime.today())
- elif options.week_of:
- begin, end = (get_week_of(datetime.strptime(options.week_of, '%m/%d/%y')))
- elif options.last_week:
- begin, end = (get_week_of(datetime.today() -
- timedelta(days=1 + 7 * options.last_week)))
- else:
- begin, end = (get_week_of(datetime.today() - timedelta(days=1)))
- else:
- begin = dateutil.parser.parse(options.begin)
- if options.end:
- end = dateutil.parser.parse(options.end)
- else:
- end = datetime.today()
- options.begin, options.end = begin, end
- if begin >= end:
- # The queries fail in peculiar ways when the begin date is in the future.
- # Give a descriptive error message instead.
- logging.error('Start date (%s) is the same or later than end date (%s)' %
- (begin, end))
- return 1
-
- if options.markdown:
- options.output_format_heading = '### {heading}\n'
- options.output_format = ' * [{title}]({url})'
- options.output_format_no_url = ' * {title}'
- logging.info('Searching for activity by %s', options.user)
- logging.info('Using range %s to %s', options.begin, options.end)
-
- if options.config_file:
- with open(options.config_file) as f:
- config = json.load(f)
-
- for item, entries in config.items():
- if item == 'gerrit_instances':
- for repo, dic in entries.items():
- # Use property name as URL
- dic['url'] = repo
- gerrit_instances.append(dic)
- elif item == 'monorail_projects':
- monorail_projects.append(entries)
+ if not options.begin:
+ if options.last_quarter:
+ begin, end = quarter_begin, quarter_end
+ elif options.this_year:
+ begin, end = get_year_of(datetime.today())
+ elif options.week_of:
+ begin, end = (get_week_of(
+ datetime.strptime(options.week_of, '%m/%d/%y')))
+ elif options.last_week:
+ begin, end = (
+ get_week_of(datetime.today() -
+ timedelta(days=1 + 7 * options.last_week)))
else:
- logging.error('Invalid entry in config file.')
- return 1
-
- my_activity = MyActivity(options)
- my_activity.show_progress('Loading data')
-
- if not (options.changes or options.reviews or options.issues or
- options.changes_by_issue):
- options.changes = True
- options.issues = True
- options.reviews = True
-
- # First do any required authentication so none of the user interaction has to
- # wait for actual work.
- if options.changes or options.changes_by_issue:
- my_activity.auth_for_changes()
- if options.reviews:
- my_activity.auth_for_reviews()
-
- logging.info('Looking up activity.....')
-
- try:
- if options.changes or options.changes_by_issue:
- my_activity.get_changes()
- if options.reviews:
- my_activity.get_reviews()
- if options.issues or options.changes_by_issue:
- my_activity.get_issues()
- if not options.no_referenced_issues:
- my_activity.get_referenced_issues()
- except auth.LoginRequiredError as e:
- logging.error('auth.LoginRequiredError: %s', e)
-
- my_activity.show_progress('\n')
-
- my_activity.print_access_errors()
-
- output_file = None
- try:
- if options.output:
- output_file = open(options.output, 'w')
- logging.info('Printing output to "%s"', options.output)
- sys.stdout = output_file
- except (IOError, OSError) as e:
- logging.error('Unable to write output: %s', e)
- else:
- if options.json:
- my_activity.dump_json()
+ begin, end = (get_week_of(datetime.today() - timedelta(days=1)))
else:
- if options.changes:
- my_activity.print_changes()
- if options.reviews:
- my_activity.print_reviews()
- if options.issues:
- my_activity.print_issues()
- if options.changes_by_issue:
- my_activity.print_changes_by_issue(
- options.skip_own_issues_without_changes)
- finally:
- if output_file:
- logging.info('Done printing to file.')
- sys.stdout = sys.__stdout__
- output_file.close()
+ begin = dateutil.parser.parse(options.begin)
+ if options.end:
+ end = dateutil.parser.parse(options.end)
+ else:
+ end = datetime.today()
+ options.begin, options.end = begin, end
+ if begin >= end:
+ # The queries fail in peculiar ways when the begin date is in the
+ # future. Give a descriptive error message instead.
+ logging.error(
+ 'Start date (%s) is the same or later than end date (%s)' %
+ (begin, end))
+ return 1
- return 0
+ if options.markdown:
+ options.output_format_heading = '### {heading}\n'
+ options.output_format = ' * [{title}]({url})'
+ options.output_format_no_url = ' * {title}'
+ logging.info('Searching for activity by %s', options.user)
+ logging.info('Using range %s to %s', options.begin, options.end)
+
+ if options.config_file:
+ with open(options.config_file) as f:
+ config = json.load(f)
+
+ for item, entries in config.items():
+ if item == 'gerrit_instances':
+ for repo, dic in entries.items():
+ # Use property name as URL
+ dic['url'] = repo
+ gerrit_instances.append(dic)
+ elif item == 'monorail_projects':
+ monorail_projects.append(entries)
+ else:
+ logging.error('Invalid entry in config file.')
+ return 1
+
+ my_activity = MyActivity(options)
+ my_activity.show_progress('Loading data')
+
+ if not (options.changes or options.reviews or options.issues
+ or options.changes_by_issue):
+ options.changes = True
+ options.issues = True
+ options.reviews = True
+
+ # First do any required authentication so none of the user interaction has
+ # to wait for actual work.
+ if options.changes or options.changes_by_issue:
+ my_activity.auth_for_changes()
+ if options.reviews:
+ my_activity.auth_for_reviews()
+
+ logging.info('Looking up activity.....')
+
+ try:
+ if options.changes or options.changes_by_issue:
+ my_activity.get_changes()
+ if options.reviews:
+ my_activity.get_reviews()
+ if options.issues or options.changes_by_issue:
+ my_activity.get_issues()
+ if not options.no_referenced_issues:
+ my_activity.get_referenced_issues()
+ except auth.LoginRequiredError as e:
+ logging.error('auth.LoginRequiredError: %s', e)
+
+ my_activity.show_progress('\n')
+
+ my_activity.print_access_errors()
+
+ output_file = None
+ try:
+ if options.output:
+ output_file = open(options.output, 'w')
+ logging.info('Printing output to "%s"', options.output)
+ sys.stdout = output_file
+ except (IOError, OSError) as e:
+ logging.error('Unable to write output: %s', e)
+ else:
+ if options.json:
+ my_activity.dump_json()
+ else:
+ if options.changes:
+ my_activity.print_changes()
+ if options.reviews:
+ my_activity.print_reviews()
+ if options.issues:
+ my_activity.print_issues()
+ if options.changes_by_issue:
+ my_activity.print_changes_by_issue(
+ options.skip_own_issues_without_changes)
+ finally:
+ if output_file:
+ logging.info('Done printing to file.')
+ sys.stdout = sys.__stdout__
+ output_file.close()
+
+ return 0
if __name__ == '__main__':
- # Fix encoding to support non-ascii issue titles.
- fix_encoding.fix_encoding()
+ # Fix encoding to support non-ascii issue titles.
+ fix_encoding.fix_encoding()
- try:
- sys.exit(main())
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(1)
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(1)
diff --git a/ninja.py b/ninja.py
index 63b0a6af68..a54065f80c 100755
--- a/ninja.py
+++ b/ninja.py
@@ -15,76 +15,77 @@ import gclient_paths
def findNinjaInPath():
- env_path = os.getenv('PATH')
- if not env_path:
- return
- exe = 'ninja'
- if sys.platform in ['win32', 'cygwin']:
- exe += '.exe'
- for bin_dir in env_path.split(os.pathsep):
- if bin_dir.rstrip(os.sep).endswith('depot_tools'):
- # skip depot_tools to avoid calling ninja.py infitely.
- continue
- ninja_path = os.path.join(bin_dir, exe)
- if os.path.isfile(ninja_path):
- return ninja_path
+ env_path = os.getenv('PATH')
+ if not env_path:
+ return
+ exe = 'ninja'
+ if sys.platform in ['win32', 'cygwin']:
+ exe += '.exe'
+ for bin_dir in env_path.split(os.pathsep):
+ if bin_dir.rstrip(os.sep).endswith('depot_tools'):
+ # skip depot_tools to avoid calling ninja.py infitely.
+ continue
+ ninja_path = os.path.join(bin_dir, exe)
+ if os.path.isfile(ninja_path):
+ return ninja_path
def fallback(ninja_args):
- # Try to find ninja in PATH.
- ninja_path = findNinjaInPath()
- if ninja_path:
- return subprocess.call([ninja_path] + ninja_args)
+ # Try to find ninja in PATH.
+ ninja_path = findNinjaInPath()
+ if ninja_path:
+ return subprocess.call([ninja_path] + ninja_args)
- print(
- 'depot_tools/ninja.py: Could not find Ninja in the third_party of '
- 'the current project, nor in your PATH.\n'
- 'Please take one of the following actions to install Ninja:\n'
- '- If your project has DEPS, add a CIPD Ninja dependency to DEPS.\n'
- '- Otherwise, add Ninja to your PATH *after* depot_tools.',
- file=sys.stderr)
- return 1
+ print(
+ 'depot_tools/ninja.py: Could not find Ninja in the third_party of '
+ 'the current project, nor in your PATH.\n'
+ 'Please take one of the following actions to install Ninja:\n'
+ '- If your project has DEPS, add a CIPD Ninja dependency to DEPS.\n'
+ '- Otherwise, add Ninja to your PATH *after* depot_tools.',
+ file=sys.stderr)
+ return 1
def main(args):
- # On Windows the ninja.bat script passes along the arguments enclosed in
- # double quotes. This prevents multiple levels of parsing of the special '^'
- # characters needed when compiling a single file. When this case is detected,
- # we need to split the argument. This means that arguments containing actual
- # spaces are not supported by ninja.bat, but that is not a real limitation.
- if (sys.platform.startswith('win') and len(args) == 2):
- args = args[:1] + args[1].split()
+ # On Windows the ninja.bat script passes along the arguments enclosed in
+ # double quotes. This prevents multiple levels of parsing of the special '^'
+ # characters needed when compiling a single file. When this case is
+ # detected, we need to split the argument. This means that arguments
+ # containing actual spaces are not supported by ninja.bat, but that is not a
+ # real limitation.
+ if (sys.platform.startswith('win') and len(args) == 2):
+ args = args[:1] + args[1].split()
- # macOS's python sets CPATH, LIBRARY_PATH, SDKROOT implicitly.
- # https://openradar.appspot.com/radar?id=5608755232243712
- #
- # Removing those environment variables to avoid affecting clang's behaviors.
- if sys.platform == 'darwin':
- os.environ.pop("CPATH", None)
- os.environ.pop("LIBRARY_PATH", None)
- os.environ.pop("SDKROOT", None)
+ # macOS's python sets CPATH, LIBRARY_PATH, SDKROOT implicitly.
+ # https://openradar.appspot.com/radar?id=5608755232243712
+ #
+ # Removing those environment variables to avoid affecting clang's behaviors.
+ if sys.platform == 'darwin':
+ os.environ.pop("CPATH", None)
+ os.environ.pop("LIBRARY_PATH", None)
+ os.environ.pop("SDKROOT", None)
- # Get gclient root + src.
- primary_solution_path = gclient_paths.GetPrimarySolutionPath()
- gclient_root_path = gclient_paths.FindGclientRoot(os.getcwd())
- gclient_src_root_path = None
- if gclient_root_path:
- gclient_src_root_path = os.path.join(gclient_root_path, 'src')
+ # Get gclient root + src.
+ primary_solution_path = gclient_paths.GetPrimarySolutionPath()
+ gclient_root_path = gclient_paths.FindGclientRoot(os.getcwd())
+ gclient_src_root_path = None
+ if gclient_root_path:
+ gclient_src_root_path = os.path.join(gclient_root_path, 'src')
- for base_path in set(
- [primary_solution_path, gclient_root_path, gclient_src_root_path]):
- if not base_path:
- continue
- ninja_path = os.path.join(base_path, 'third_party', 'ninja',
- 'ninja' + gclient_paths.GetExeSuffix())
- if os.path.isfile(ninja_path):
- return subprocess.call([ninja_path] + args[1:])
+ for base_path in set(
+ [primary_solution_path, gclient_root_path, gclient_src_root_path]):
+ if not base_path:
+ continue
+ ninja_path = os.path.join(base_path, 'third_party', 'ninja',
+ 'ninja' + gclient_paths.GetExeSuffix())
+ if os.path.isfile(ninja_path):
+ return subprocess.call([ninja_path] + args[1:])
- return fallback(args[1:])
+ return fallback(args[1:])
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv))
- except KeyboardInterrupt:
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv))
+ except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/ninja_reclient.py b/ninja_reclient.py
index bc4e79bab8..7ceaf7abdd 100755
--- a/ninja_reclient.py
+++ b/ninja_reclient.py
@@ -14,14 +14,14 @@ import reclient_helper
def main(argv):
- with reclient_helper.build_context(argv, 'ninja_reclient') as ret_code:
- if ret_code:
- return ret_code
- try:
- return ninja.main(argv)
- except KeyboardInterrupt:
- return 1
+ with reclient_helper.build_context(argv, 'ninja_reclient') as ret_code:
+ if ret_code:
+ return ret_code
+ try:
+ return ninja.main(argv)
+ except KeyboardInterrupt:
+ return 1
if __name__ == '__main__':
- sys.exit(main(sys.argv))
+ sys.exit(main(sys.argv))
diff --git a/ninjalog_uploader.py b/ninjalog_uploader.py
index d8566bfc67..e99bd86d64 100755
--- a/ninjalog_uploader.py
+++ b/ninjalog_uploader.py
@@ -39,92 +39,92 @@ ALLOWLISTED_CONFIGS = ('symbol_level', 'use_goma', 'is_debug',
def IsGoogler():
- """Check whether this user is Googler or not."""
- p = subprocess.run('goma_auth info',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- universal_newlines=True,
- shell=True)
- if p.returncode != 0:
- return False
- lines = p.stdout.splitlines()
- if len(lines) == 0:
- return False
- l = lines[0]
- # |l| will be like 'Login as @google.com' for googler using goma.
- return l.startswith('Login as ') and l.endswith('@google.com')
+ """Check whether this user is Googler or not."""
+ p = subprocess.run('goma_auth info',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ shell=True)
+ if p.returncode != 0:
+ return False
+ lines = p.stdout.splitlines()
+ if len(lines) == 0:
+ return False
+ l = lines[0]
+ # |l| will be like 'Login as @google.com' for googler using goma.
+ return l.startswith('Login as ') and l.endswith('@google.com')
def ParseGNArgs(gn_args):
- """Parse gn_args as json and return config dictionary."""
- configs = json.loads(gn_args)
- build_configs = {}
+ """Parse gn_args as json and return config dictionary."""
+ configs = json.loads(gn_args)
+ build_configs = {}
- for config in configs:
- key = config["name"]
- if key not in ALLOWLISTED_CONFIGS:
- continue
- if 'current' in config:
- build_configs[key] = config['current']['value']
- else:
- build_configs[key] = config['default']['value']
+ for config in configs:
+ key = config["name"]
+ if key not in ALLOWLISTED_CONFIGS:
+ continue
+ if 'current' in config:
+ build_configs[key] = config['current']['value']
+ else:
+ build_configs[key] = config['default']['value']
- return build_configs
+ return build_configs
def GetBuildTargetFromCommandLine(cmdline):
- """Get build targets from commandline."""
+ """Get build targets from commandline."""
- # Skip argv0, argv1: ['/path/to/python3', '/path/to/depot_tools/ninja.py']
- idx = 2
+ # Skip argv0, argv1: ['/path/to/python3', '/path/to/depot_tools/ninja.py']
+ idx = 2
- # Skipping all args that involve these flags, and taking all remaining args
- # as targets.
- onearg_flags = ('-C', '-d', '-f', '-j', '-k', '-l', '-p', '-t', '-w')
- zeroarg_flags = ('--version', '-n', '-v')
+ # Skipping all args that involve these flags, and taking all remaining args
+ # as targets.
+ onearg_flags = ('-C', '-d', '-f', '-j', '-k', '-l', '-p', '-t', '-w')
+ zeroarg_flags = ('--version', '-n', '-v')
- targets = []
+ targets = []
- while idx < len(cmdline):
- arg = cmdline[idx]
- if arg in onearg_flags:
- idx += 2
- continue
+ while idx < len(cmdline):
+ arg = cmdline[idx]
+ if arg in onearg_flags:
+ idx += 2
+ continue
- if (arg[:2] in onearg_flags or arg in zeroarg_flags):
- idx += 1
- continue
+ if (arg[:2] in onearg_flags or arg in zeroarg_flags):
+ idx += 1
+ continue
- # A target doesn't start with '-'.
- if arg.startswith('-'):
- idx += 1
- continue
+ # A target doesn't start with '-'.
+ if arg.startswith('-'):
+ idx += 1
+ continue
- # Avoid uploading absolute paths accidentally. e.g. b/270907050
- if os.path.isabs(arg):
- idx += 1
- continue
+ # Avoid uploading absolute paths accidentally. e.g. b/270907050
+ if os.path.isabs(arg):
+ idx += 1
+ continue
- targets.append(arg)
- idx += 1
+ targets.append(arg)
+ idx += 1
- return targets
+ return targets
def GetJflag(cmdline):
- """Parse cmdline to get flag value for -j"""
+ """Parse cmdline to get flag value for -j"""
- for i in range(len(cmdline)):
- if (cmdline[i] == '-j' and i + 1 < len(cmdline)
- and cmdline[i + 1].isdigit()):
- return int(cmdline[i + 1])
+ for i in range(len(cmdline)):
+ if (cmdline[i] == '-j' and i + 1 < len(cmdline)
+ and cmdline[i + 1].isdigit()):
+ return int(cmdline[i + 1])
- if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):
- return int(cmdline[i][len('-j'):])
+ if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):
+ return int(cmdline[i][len('-j'):])
def GetMetadata(cmdline, ninjalog):
- """Get metadata for uploaded ninjalog.
+ """Get metadata for uploaded ninjalog.
Returned metadata has schema defined in
https://cs.chromium.org?q="type+Metadata+struct+%7B"+file:%5Einfra/go/src/infra/appengine/chromium_build_stats/ninjalog/
@@ -132,120 +132,120 @@ def GetMetadata(cmdline, ninjalog):
TODO(tikuta): Collect GOMA_* env var.
"""
- build_dir = os.path.dirname(ninjalog)
+ build_dir = os.path.dirname(ninjalog)
- build_configs = {}
-
- try:
- args = ['gn', 'args', build_dir, '--list', '--short', '--json']
- if sys.platform == 'win32':
- # gn in PATH is bat file in windows environment (except cygwin).
- args = ['cmd', '/c'] + args
-
- gn_args = subprocess.check_output(args)
- build_configs = ParseGNArgs(gn_args)
- except subprocess.CalledProcessError as e:
- logging.error("Failed to call gn %s", e)
build_configs = {}
- # Stringify config.
- for k in build_configs:
- build_configs[k] = str(build_configs[k])
+ try:
+ args = ['gn', 'args', build_dir, '--list', '--short', '--json']
+ if sys.platform == 'win32':
+ # gn in PATH is bat file in windows environment (except cygwin).
+ args = ['cmd', '/c'] + args
- metadata = {
- 'platform': platform.system(),
- 'cpu_core': multiprocessing.cpu_count(),
- 'build_configs': build_configs,
- 'targets': GetBuildTargetFromCommandLine(cmdline),
- }
+ gn_args = subprocess.check_output(args)
+ build_configs = ParseGNArgs(gn_args)
+ except subprocess.CalledProcessError as e:
+ logging.error("Failed to call gn %s", e)
+ build_configs = {}
- jflag = GetJflag(cmdline)
- if jflag is not None:
- metadata['jobs'] = jflag
+ # Stringify config.
+ for k in build_configs:
+ build_configs[k] = str(build_configs[k])
- return metadata
+ metadata = {
+ 'platform': platform.system(),
+ 'cpu_core': multiprocessing.cpu_count(),
+ 'build_configs': build_configs,
+ 'targets': GetBuildTargetFromCommandLine(cmdline),
+ }
+
+ jflag = GetJflag(cmdline)
+ if jflag is not None:
+ metadata['jobs'] = jflag
+
+ return metadata
def GetNinjalog(cmdline):
- """GetNinjalog returns the path to ninjalog from cmdline."""
- # ninjalog is in current working directory by default.
- ninjalog_dir = '.'
+ """GetNinjalog returns the path to ninjalog from cmdline."""
+ # ninjalog is in current working directory by default.
+ ninjalog_dir = '.'
- i = 0
- while i < len(cmdline):
- cmd = cmdline[i]
- i += 1
- if cmd == '-C' and i < len(cmdline):
- ninjalog_dir = cmdline[i]
- i += 1
- continue
+ i = 0
+ while i < len(cmdline):
+ cmd = cmdline[i]
+ i += 1
+ if cmd == '-C' and i < len(cmdline):
+ ninjalog_dir = cmdline[i]
+ i += 1
+ continue
- if cmd.startswith('-C') and len(cmd) > len('-C'):
- ninjalog_dir = cmd[len('-C'):]
+ if cmd.startswith('-C') and len(cmd) > len('-C'):
+ ninjalog_dir = cmd[len('-C'):]
- return os.path.join(ninjalog_dir, '.ninja_log')
+ return os.path.join(ninjalog_dir, '.ninja_log')
def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--server',
- default='chromium-build-stats.appspot.com',
- help='server to upload ninjalog file.')
- parser.add_argument('--ninjalog', help='ninjalog file to upload.')
- parser.add_argument('--verbose',
- action='store_true',
- help='Enable verbose logging.')
- parser.add_argument('--cmdline',
- required=True,
- nargs=argparse.REMAINDER,
- help='command line args passed to ninja.')
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--server',
+ default='chromium-build-stats.appspot.com',
+ help='server to upload ninjalog file.')
+ parser.add_argument('--ninjalog', help='ninjalog file to upload.')
+ parser.add_argument('--verbose',
+ action='store_true',
+ help='Enable verbose logging.')
+ parser.add_argument('--cmdline',
+ required=True,
+ nargs=argparse.REMAINDER,
+ help='command line args passed to ninja.')
- args = parser.parse_args()
+ args = parser.parse_args()
- if args.verbose:
- logging.basicConfig(level=logging.INFO)
- else:
- # Disable logging.
- logging.disable(logging.CRITICAL)
+ if args.verbose:
+ logging.basicConfig(level=logging.INFO)
+ else:
+ # Disable logging.
+ logging.disable(logging.CRITICAL)
- if not IsGoogler():
+ if not IsGoogler():
+ return 0
+
+ ninjalog = args.ninjalog or GetNinjalog(args.cmdline)
+ if not os.path.isfile(ninjalog):
+ logging.warning("ninjalog is not found in %s", ninjalog)
+ return 1
+
+ # We assume that each ninja invocation interval takes at least 2 seconds.
+ # This is not to have duplicate entry in server when current build is no-op.
+ if os.stat(ninjalog).st_mtime < time.time() - 2:
+ logging.info("ninjalog is not updated recently %s", ninjalog)
+ return 0
+
+ output = io.BytesIO()
+
+ with open(ninjalog) as f:
+ with gzip.GzipFile(fileobj=output, mode='wb') as g:
+ g.write(f.read().encode())
+ g.write(b'# end of ninja log\n')
+
+ metadata = GetMetadata(args.cmdline, ninjalog)
+ logging.info('send metadata: %s', json.dumps(metadata))
+ g.write(json.dumps(metadata).encode())
+
+ resp = request.urlopen(
+ request.Request('https://' + args.server + '/upload_ninja_log/',
+ data=output.getvalue(),
+ headers={'Content-Encoding': 'gzip'}))
+
+ if resp.status != 200:
+ logging.warning("unexpected status code for response: %s", resp.status)
+ return 1
+
+ logging.info('response header: %s', resp.headers)
+ logging.info('response content: %s', resp.read())
return 0
- ninjalog = args.ninjalog or GetNinjalog(args.cmdline)
- if not os.path.isfile(ninjalog):
- logging.warning("ninjalog is not found in %s", ninjalog)
- return 1
-
- # We assume that each ninja invocation interval takes at least 2 seconds.
- # This is not to have duplicate entry in server when current build is no-op.
- if os.stat(ninjalog).st_mtime < time.time() - 2:
- logging.info("ninjalog is not updated recently %s", ninjalog)
- return 0
-
- output = io.BytesIO()
-
- with open(ninjalog) as f:
- with gzip.GzipFile(fileobj=output, mode='wb') as g:
- g.write(f.read().encode())
- g.write(b'# end of ninja log\n')
-
- metadata = GetMetadata(args.cmdline, ninjalog)
- logging.info('send metadata: %s', json.dumps(metadata))
- g.write(json.dumps(metadata).encode())
-
- resp = request.urlopen(
- request.Request('https://' + args.server + '/upload_ninja_log/',
- data=output.getvalue(),
- headers={'Content-Encoding': 'gzip'}))
-
- if resp.status != 200:
- logging.warning("unexpected status code for response: %s", resp.status)
- return 1
-
- logging.info('response header: %s', resp.headers)
- logging.info('response content: %s', resp.read())
- return 0
-
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/ninjalog_uploader_wrapper.py b/ninjalog_uploader_wrapper.py
index 701978ce26..6a730699be 100755
--- a/ninjalog_uploader_wrapper.py
+++ b/ninjalog_uploader_wrapper.py
@@ -21,38 +21,38 @@ VERSION = 3
def LoadConfig():
- if os.path.isfile(CONFIG):
- with open(CONFIG, 'r') as f:
- try:
- config = json.load(f)
- except Exception:
- # Set default value when failed to load config.
- config = {
- 'is-googler': ninjalog_uploader.IsGoogler(),
- 'countdown': 10,
- 'version': VERSION,
- }
+ if os.path.isfile(CONFIG):
+ with open(CONFIG, 'r') as f:
+ try:
+ config = json.load(f)
+ except Exception:
+ # Set default value when failed to load config.
+ config = {
+ 'is-googler': ninjalog_uploader.IsGoogler(),
+ 'countdown': 10,
+ 'version': VERSION,
+ }
- if config['version'] == VERSION:
- config['countdown'] = max(0, config['countdown'] - 1)
- return config
+ if config['version'] == VERSION:
+ config['countdown'] = max(0, config['countdown'] - 1)
+ return config
- return {
- 'is-googler': ninjalog_uploader.IsGoogler(),
- 'countdown': 10,
- 'version': VERSION,
- }
+ return {
+ 'is-googler': ninjalog_uploader.IsGoogler(),
+ 'countdown': 10,
+ 'version': VERSION,
+ }
def SaveConfig(config):
- with open(CONFIG, 'w') as f:
- json.dump(config, f)
+ with open(CONFIG, 'w') as f:
+ json.dump(config, f)
def ShowMessage(countdown):
- whitelisted = '\n'.join(
- [' * %s' % config for config in ninjalog_uploader.ALLOWLISTED_CONFIGS])
- print("""
+ whitelisted = '\n'.join(
+ [' * %s' % config for config in ninjalog_uploader.ALLOWLISTED_CONFIGS])
+ print("""
Your ninjalog will be uploaded to build stats server. The uploaded log will be
used to analyze user side build performance.
@@ -85,51 +85,51 @@ https://chromium.googlesource.com/chromium/tools/depot_tools/+/main/ninjalog.REA
def main():
- config = LoadConfig()
+ config = LoadConfig()
- if len(sys.argv) == 2 and sys.argv[1] == 'opt-in':
- config['opt-in'] = True
- config['countdown'] = 0
- SaveConfig(config)
- print('ninjalog upload is opted in.')
- return 0
+ if len(sys.argv) == 2 and sys.argv[1] == 'opt-in':
+ config['opt-in'] = True
+ config['countdown'] = 0
+ SaveConfig(config)
+ print('ninjalog upload is opted in.')
+ return 0
- if len(sys.argv) == 2 and sys.argv[1] == 'opt-out':
- config['opt-in'] = False
- SaveConfig(config)
- print('ninjalog upload is opted out.')
- return 0
+ if len(sys.argv) == 2 and sys.argv[1] == 'opt-out':
+ config['opt-in'] = False
+ SaveConfig(config)
+ print('ninjalog upload is opted out.')
+ return 0
- if 'opt-in' in config and not config['opt-in']:
- # Upload is opted out.
- return 0
+ if 'opt-in' in config and not config['opt-in']:
+ # Upload is opted out.
+ return 0
- if not config.get("is-googler", False):
- # Not googler.
- return 0
+ if not config.get("is-googler", False):
+ # Not googler.
+ return 0
- if config.get("countdown", 0) > 0:
- # Need to show message.
- ShowMessage(config["countdown"])
- # Only save config if something has meaningfully changed.
- SaveConfig(config)
- return 0
+ if config.get("countdown", 0) > 0:
+ # Need to show message.
+ ShowMessage(config["countdown"])
+ # Only save config if something has meaningfully changed.
+ SaveConfig(config)
+ return 0
- if len(sys.argv) == 1:
- # dry-run for debugging.
- print("upload ninjalog dry-run")
- return 0
+ if len(sys.argv) == 1:
+ # dry-run for debugging.
+ print("upload ninjalog dry-run")
+ return 0
- # Run upload script without wait.
- devnull = open(os.devnull, "w")
- creationnflags = 0
- if platform.system() == 'Windows':
- creationnflags = subprocess.CREATE_NEW_PROCESS_GROUP
- subprocess2.Popen([sys.executable, UPLOADER] + sys.argv[1:],
- stdout=devnull,
- stderr=devnull,
- creationflags=creationnflags)
+ # Run upload script without wait.
+ devnull = open(os.devnull, "w")
+ creationnflags = 0
+ if platform.system() == 'Windows':
+ creationnflags = subprocess.CREATE_NEW_PROCESS_GROUP
+ subprocess2.Popen([sys.executable, UPLOADER] + sys.argv[1:],
+ stdout=devnull,
+ stderr=devnull,
+ creationflags=creationnflags)
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/owners_client.py b/owners_client.py
index cf5fb3c199..5f948e87eb 100644
--- a/owners_client.py
+++ b/owners_client.py
@@ -10,7 +10,7 @@ import git_common
class OwnersClient(object):
- """Interact with OWNERS files in a repository.
+ """Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
@@ -23,164 +23,167 @@ class OwnersClient(object):
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
- # '*' means that everyone can approve.
- EVERYONE = '*'
+ # '*' means that everyone can approve.
+ EVERYONE = '*'
- # Possible status of a file.
- # - INSUFFICIENT_REVIEWERS: The path needs owners approval, but none of its
- # owners is currently a reviewer of the change.
- # - PENDING: An owner of this path has been added as reviewer, but approval
- # has not been given yet.
- # - APPROVED: The path has been approved by an owner.
- APPROVED = 'APPROVED'
- PENDING = 'PENDING'
- INSUFFICIENT_REVIEWERS = 'INSUFFICIENT_REVIEWERS'
+ # Possible status of a file.
+ # - INSUFFICIENT_REVIEWERS: The path needs owners approval, but none of its
+ # owners is currently a reviewer of the change.
+ # - PENDING: An owner of this path has been added as reviewer, but approval
+ # has not been given yet.
+ # - APPROVED: The path has been approved by an owner.
+ APPROVED = 'APPROVED'
+ PENDING = 'PENDING'
+ INSUFFICIENT_REVIEWERS = 'INSUFFICIENT_REVIEWERS'
- def ListOwners(self, path):
- """List all owners for a file.
+ def ListOwners(self, path):
+ """List all owners for a file.
The returned list is sorted so that better owners appear first.
"""
- raise Exception('Not implemented')
+ raise Exception('Not implemented')
- def BatchListOwners(self, paths):
- """List all owners for a group of files.
+ def BatchListOwners(self, paths):
+ """List all owners for a group of files.
Returns a dictionary {path: [owners]}.
"""
- with git_common.ScopedPool(kind='threads') as pool:
- return dict(pool.imap_unordered(
- lambda p: (p, self.ListOwners(p)), paths))
+ with git_common.ScopedPool(kind='threads') as pool:
+ return dict(
+ pool.imap_unordered(lambda p: (p, self.ListOwners(p)), paths))
- def GetFilesApprovalStatus(self, paths, approvers, reviewers):
- """Check the approval status for the given paths.
+ def GetFilesApprovalStatus(self, paths, approvers, reviewers):
+ """Check the approval status for the given paths.
Utility method to check for approval status when a change has not yet been
created, given reviewers and approvers.
See GetChangeApprovalStatus for description of the returned value.
"""
- approvers = set(approvers)
- if approvers:
- approvers.add(self.EVERYONE)
- reviewers = set(reviewers)
- if reviewers:
- reviewers.add(self.EVERYONE)
- status = {}
- owners_by_path = self.BatchListOwners(paths)
- for path, owners in owners_by_path.items():
- owners = set(owners)
- if owners.intersection(approvers):
- status[path] = self.APPROVED
- elif owners.intersection(reviewers):
- status[path] = self.PENDING
- else:
- status[path] = self.INSUFFICIENT_REVIEWERS
- return status
+ approvers = set(approvers)
+ if approvers:
+ approvers.add(self.EVERYONE)
+ reviewers = set(reviewers)
+ if reviewers:
+ reviewers.add(self.EVERYONE)
+ status = {}
+ owners_by_path = self.BatchListOwners(paths)
+ for path, owners in owners_by_path.items():
+ owners = set(owners)
+ if owners.intersection(approvers):
+ status[path] = self.APPROVED
+ elif owners.intersection(reviewers):
+ status[path] = self.PENDING
+ else:
+ status[path] = self.INSUFFICIENT_REVIEWERS
+ return status
- def ScoreOwners(self, paths, exclude=None):
- """Get sorted list of owners for the given paths."""
- if not paths:
- return []
- exclude = exclude or []
- owners = []
- queues = self.BatchListOwners(paths).values()
- for i in range(max(len(q) for q in queues)):
- for q in queues:
- if i < len(q) and q[i] not in owners and q[i] not in exclude:
- owners.append(q[i])
- return owners
+ def ScoreOwners(self, paths, exclude=None):
+ """Get sorted list of owners for the given paths."""
+ if not paths:
+ return []
+ exclude = exclude or []
+ owners = []
+ queues = self.BatchListOwners(paths).values()
+ for i in range(max(len(q) for q in queues)):
+ for q in queues:
+ if i < len(q) and q[i] not in owners and q[i] not in exclude:
+ owners.append(q[i])
+ return owners
- def SuggestOwners(self, paths, exclude=None):
- """Suggest a set of owners for the given paths."""
- exclude = exclude or []
+ def SuggestOwners(self, paths, exclude=None):
+ """Suggest a set of owners for the given paths."""
+ exclude = exclude or []
- paths_by_owner = {}
- owners_by_path = self.BatchListOwners(paths)
- for path, owners in owners_by_path.items():
- for owner in owners:
- paths_by_owner.setdefault(owner, set()).add(path)
+ paths_by_owner = {}
+ owners_by_path = self.BatchListOwners(paths)
+ for path, owners in owners_by_path.items():
+ for owner in owners:
+ paths_by_owner.setdefault(owner, set()).add(path)
- selected = []
- missing = set(paths)
- for owner in self.ScoreOwners(paths, exclude=exclude):
- missing_len = len(missing)
- missing.difference_update(paths_by_owner[owner])
- if missing_len > len(missing):
- selected.append(owner)
- if not missing:
- break
+ selected = []
+ missing = set(paths)
+ for owner in self.ScoreOwners(paths, exclude=exclude):
+ missing_len = len(missing)
+ missing.difference_update(paths_by_owner[owner])
+ if missing_len > len(missing):
+ selected.append(owner)
+ if not missing:
+ break
+
+ return selected
- return selected
class GerritClient(OwnersClient):
- """Implement OwnersClient using OWNERS REST API."""
- def __init__(self, host, project, branch):
- super(GerritClient, self).__init__()
+ """Implement OwnersClient using OWNERS REST API."""
+ def __init__(self, host, project, branch):
+ super(GerritClient, self).__init__()
- self._host = host
- self._project = project
- self._branch = branch
- self._owners_cache = {}
- self._best_owners_cache = {}
+ self._host = host
+ self._project = project
+ self._branch = branch
+ self._owners_cache = {}
+ self._best_owners_cache = {}
- # Seed used by Gerrit to shuffle code owners that have the same score. Can
- # be used to make the sort order stable across several requests, e.g. to get
- # the same set of random code owners for different file paths that have the
- # same code owners.
- self._seed = random.getrandbits(30)
+ # Seed used by Gerrit to shuffle code owners that have the same score.
+ # Can be used to make the sort order stable across several requests,
+ # e.g. to get the same set of random code owners for different file
+ # paths that have the same code owners.
+ self._seed = random.getrandbits(30)
- def _FetchOwners(self, path, cache, highest_score_only=False):
- # Always use slashes as separators.
- path = path.replace(os.sep, '/')
- if path not in cache:
- # GetOwnersForFile returns a list of account details sorted by order of
- # best reviewer for path. If owners have the same score, the order is
- # random, seeded by `self._seed`.
- data = gerrit_util.GetOwnersForFile(self._host,
- self._project,
- self._branch,
- path,
- resolve_all_users=False,
- highest_score_only=highest_score_only,
- seed=self._seed)
- cache[path] = [
- d['account']['email'] for d in data['code_owners']
- if 'account' in d and 'email' in d['account']
- ]
- # If owned_by_all_users is true, add everyone as an owner at the end of
- # the owners list.
- if data.get('owned_by_all_users', False):
- cache[path].append(self.EVERYONE)
- return cache[path]
+ def _FetchOwners(self, path, cache, highest_score_only=False):
+ # Always use slashes as separators.
+ path = path.replace(os.sep, '/')
+ if path not in cache:
+ # GetOwnersForFile returns a list of account details sorted by order
+ # of best reviewer for path. If owners have the same score, the
+ # order is random, seeded by `self._seed`.
+ data = gerrit_util.GetOwnersForFile(
+ self._host,
+ self._project,
+ self._branch,
+ path,
+ resolve_all_users=False,
+ highest_score_only=highest_score_only,
+ seed=self._seed)
+ cache[path] = [
+ d['account']['email'] for d in data['code_owners']
+ if 'account' in d and 'email' in d['account']
+ ]
+ # If owned_by_all_users is true, add everyone as an owner at the end
+ # of the owners list.
+ if data.get('owned_by_all_users', False):
+ cache[path].append(self.EVERYONE)
+ return cache[path]
- def ListOwners(self, path):
- return self._FetchOwners(path, self._owners_cache)
+ def ListOwners(self, path):
+ return self._FetchOwners(path, self._owners_cache)
- def ListBestOwners(self, path):
- return self._FetchOwners(path,
- self._best_owners_cache,
- highest_score_only=True)
+ def ListBestOwners(self, path):
+ return self._FetchOwners(path,
+ self._best_owners_cache,
+ highest_score_only=True)
- def BatchListBestOwners(self, paths):
- """List only the higest-scoring owners for a group of files.
+ def BatchListBestOwners(self, paths):
+ """List only the higest-scoring owners for a group of files.
Returns a dictionary {path: [owners]}.
"""
- with git_common.ScopedPool(kind='threads') as pool:
- return dict(
- pool.imap_unordered(lambda p: (p, self.ListBestOwners(p)), paths))
+ with git_common.ScopedPool(kind='threads') as pool:
+ return dict(
+ pool.imap_unordered(lambda p: (p, self.ListBestOwners(p)),
+ paths))
def GetCodeOwnersClient(host, project, branch):
- """Get a new OwnersClient.
+ """Get a new OwnersClient.
Uses GerritClient and raises an exception if code-owners plugin is not
available."""
- if gerrit_util.IsCodeOwnersEnabledOnHost(host):
- return GerritClient(host, project, branch)
- raise Exception(
- 'code-owners plugin is not enabled. Ask your host admin to enable it '
- 'on %s. Read more about code-owners at '
- 'https://chromium-review.googlesource.com/'
- 'plugins/code-owners/Documentation/index.html.' % host)
+ if gerrit_util.IsCodeOwnersEnabledOnHost(host):
+ return GerritClient(host, project, branch)
+ raise Exception(
+ 'code-owners plugin is not enabled. Ask your host admin to enable it '
+ 'on %s. Read more about code-owners at '
+ 'https://chromium-review.googlesource.com/'
+ 'plugins/code-owners/Documentation/index.html.' % host)
diff --git a/owners_finder.py b/owners_finder.py
index d43145ae95..3d6096f43d 100644
--- a/owners_finder.py
+++ b/owners_finder.py
@@ -1,7 +1,6 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Interactive tool for finding reviewers/owners for a change."""
from __future__ import print_function
@@ -9,336 +8,344 @@ from __future__ import print_function
import os
import copy
-
import gclient_utils
def first(iterable):
- for element in iterable:
- return element
+ for element in iterable:
+ return element
class OwnersFinder(object):
- COLOR_LINK = '\033[4m'
- COLOR_BOLD = '\033[1;32m'
- COLOR_GREY = '\033[0;37m'
- COLOR_RESET = '\033[0m'
+ COLOR_LINK = '\033[4m'
+ COLOR_BOLD = '\033[1;32m'
+ COLOR_GREY = '\033[0;37m'
+ COLOR_RESET = '\033[0m'
- indentation = 0
+ indentation = 0
- def __init__(self, files, author, reviewers, owners_client,
- email_postfix='@chromium.org',
- disable_color=False,
- ignore_author=False):
- self.email_postfix = email_postfix
+ def __init__(self,
+ files,
+ author,
+ reviewers,
+ owners_client,
+ email_postfix='@chromium.org',
+ disable_color=False,
+ ignore_author=False):
+ self.email_postfix = email_postfix
- if os.name == 'nt' or disable_color:
- self.COLOR_LINK = ''
- self.COLOR_BOLD = ''
- self.COLOR_GREY = ''
- self.COLOR_RESET = ''
+ if os.name == 'nt' or disable_color:
+ self.COLOR_LINK = ''
+ self.COLOR_BOLD = ''
+ self.COLOR_GREY = ''
+ self.COLOR_RESET = ''
- self.author = author
+ self.author = author
- filtered_files = files
+ filtered_files = files
- reviewers = list(reviewers)
- if author and not ignore_author:
- reviewers.append(author)
+ reviewers = list(reviewers)
+ if author and not ignore_author:
+ reviewers.append(author)
- # Eliminate files that existing reviewers can review.
- self.owners_client = owners_client
- approval_status = self.owners_client.GetFilesApprovalStatus(
- filtered_files, reviewers, [])
- filtered_files = [
- f for f in filtered_files
- if approval_status[f] != self.owners_client.APPROVED]
+ # Eliminate files that existing reviewers can review.
+ self.owners_client = owners_client
+ approval_status = self.owners_client.GetFilesApprovalStatus(
+ filtered_files, reviewers, [])
+ filtered_files = [
+ f for f in filtered_files
+ if approval_status[f] != self.owners_client.APPROVED
+ ]
- # If some files are eliminated.
- if len(filtered_files) != len(files):
- files = filtered_files
+ # If some files are eliminated.
+ if len(filtered_files) != len(files):
+ files = filtered_files
- self.files_to_owners = self.owners_client.BatchListOwners(files)
+ self.files_to_owners = self.owners_client.BatchListOwners(files)
- self.owners_to_files = {}
- self._map_owners_to_files()
+ self.owners_to_files = {}
+ self._map_owners_to_files()
- self.original_files_to_owners = copy.deepcopy(self.files_to_owners)
+ self.original_files_to_owners = copy.deepcopy(self.files_to_owners)
- # This is the queue that will be shown in the interactive questions.
- # It is initially sorted by the score in descending order. In the
- # interactive questions a user can choose to "defer" its decision, then the
- # owner will be put to the end of the queue and shown later.
- self.owners_queue = []
+ # This is the queue that will be shown in the interactive questions.
+ # It is initially sorted by the score in descending order. In the
+ # interactive questions a user can choose to "defer" its decision, then
+ # the owner will be put to the end of the queue and shown later.
+ self.owners_queue = []
- self.unreviewed_files = set()
- self.reviewed_by = {}
- self.selected_owners = set()
- self.deselected_owners = set()
- self.reset()
+ self.unreviewed_files = set()
+ self.reviewed_by = {}
+ self.selected_owners = set()
+ self.deselected_owners = set()
+ self.reset()
- def run(self):
- self.reset()
- while self.owners_queue and self.unreviewed_files:
- owner = self.owners_queue[0]
+ def run(self):
+ self.reset()
+ while self.owners_queue and self.unreviewed_files:
+ owner = self.owners_queue[0]
- if (owner in self.selected_owners) or (owner in self.deselected_owners):
- continue
+ if (owner in self.selected_owners) or (owner
+ in self.deselected_owners):
+ continue
- if not any((file_name in self.unreviewed_files)
- for file_name in self.owners_to_files[owner]):
- self.deselect_owner(owner)
- continue
+ if not any((file_name in self.unreviewed_files)
+ for file_name in self.owners_to_files[owner]):
+ self.deselect_owner(owner)
+ continue
- self.print_info(owner)
+ self.print_info(owner)
- while True:
- inp = self.input_command(owner)
- if inp in ('y', 'yes'):
- self.select_owner(owner)
- break
+ while True:
+ inp = self.input_command(owner)
+ if inp in ('y', 'yes'):
+ self.select_owner(owner)
+ break
- if inp in ('n', 'no'):
- self.deselect_owner(owner)
- break
+ if inp in ('n', 'no'):
+ self.deselect_owner(owner)
+ break
- if inp in ('', 'd', 'defer'):
- self.owners_queue.append(self.owners_queue.pop(0))
- break
+ if inp in ('', 'd', 'defer'):
+ self.owners_queue.append(self.owners_queue.pop(0))
+ break
- if inp in ('f', 'files'):
- self.list_files()
- break
+ if inp in ('f', 'files'):
+ self.list_files()
+ break
- if inp in ('o', 'owners'):
- self.list_owners(self.owners_queue)
- break
+ if inp in ('o', 'owners'):
+ self.list_owners(self.owners_queue)
+ break
- if inp in ('p', 'pick'):
- self.pick_owner(gclient_utils.AskForData('Pick an owner: '))
- break
+ if inp in ('p', 'pick'):
+ self.pick_owner(gclient_utils.AskForData('Pick an owner: '))
+ break
- if inp.startswith('p ') or inp.startswith('pick '):
- self.pick_owner(inp.split(' ', 2)[1].strip())
- break
+ if inp.startswith('p ') or inp.startswith('pick '):
+ self.pick_owner(inp.split(' ', 2)[1].strip())
+ break
- if inp in ('r', 'restart'):
- self.reset()
- break
+ if inp in ('r', 'restart'):
+ self.reset()
+ break
- if inp in ('q', 'quit'):
- # Exit with error
- return 1
+ if inp in ('q', 'quit'):
+ # Exit with error
+ return 1
- self.print_result()
- return 0
+ self.print_result()
+ return 0
- def _map_owners_to_files(self):
- for file_name in self.files_to_owners:
- for owner in self.files_to_owners[file_name]:
- self.owners_to_files.setdefault(owner, set())
- self.owners_to_files[owner].add(file_name)
+ def _map_owners_to_files(self):
+ for file_name in self.files_to_owners:
+ for owner in self.files_to_owners[file_name]:
+ self.owners_to_files.setdefault(owner, set())
+ self.owners_to_files[owner].add(file_name)
- def reset(self):
- self.files_to_owners = copy.deepcopy(self.original_files_to_owners)
- self.unreviewed_files = set(self.files_to_owners.keys())
- self.reviewed_by = {}
- self.selected_owners = set()
- self.deselected_owners = set()
+ def reset(self):
+ self.files_to_owners = copy.deepcopy(self.original_files_to_owners)
+ self.unreviewed_files = set(self.files_to_owners.keys())
+ self.reviewed_by = {}
+ self.selected_owners = set()
+ self.deselected_owners = set()
- # Randomize owners' names so that if many reviewers have identical scores
- # they will be randomly ordered to avoid bias.
- owners = list(self.owners_client.ScoreOwners(self.files_to_owners.keys()))
- if self.author and self.author in owners:
- owners.remove(self.author)
- self.owners_queue = owners
- self.find_mandatory_owners()
+ # Randomize owners' names so that if many reviewers have identical
+ # scores they will be randomly ordered to avoid bias.
+ owners = list(
+ self.owners_client.ScoreOwners(self.files_to_owners.keys()))
+ if self.author and self.author in owners:
+ owners.remove(self.author)
+ self.owners_queue = owners
+ self.find_mandatory_owners()
- def select_owner(self, owner, findMandatoryOwners=True):
- if owner in self.selected_owners or owner in self.deselected_owners\
- or not (owner in self.owners_queue):
- return
- self.writeln('Selected: ' + owner)
- self.owners_queue.remove(owner)
- self.selected_owners.add(owner)
- for file_name in filter(
- lambda file_name: file_name in self.unreviewed_files,
- self.owners_to_files[owner]):
- self.unreviewed_files.remove(file_name)
- self.reviewed_by[file_name] = owner
- if findMandatoryOwners:
- self.find_mandatory_owners()
+ def select_owner(self, owner, findMandatoryOwners=True):
+ if owner in self.selected_owners or owner in self.deselected_owners\
+ or not (owner in self.owners_queue):
+ return
+ self.writeln('Selected: ' + owner)
+ self.owners_queue.remove(owner)
+ self.selected_owners.add(owner)
+ for file_name in filter(
+ lambda file_name: file_name in self.unreviewed_files,
+ self.owners_to_files[owner]):
+ self.unreviewed_files.remove(file_name)
+ self.reviewed_by[file_name] = owner
+ if findMandatoryOwners:
+ self.find_mandatory_owners()
- def deselect_owner(self, owner, findMandatoryOwners=True):
- if owner in self.selected_owners or owner in self.deselected_owners\
- or not (owner in self.owners_queue):
- return
- self.writeln('Deselected: ' + owner)
- self.owners_queue.remove(owner)
- self.deselected_owners.add(owner)
- for file_name in self.owners_to_files[owner] & self.unreviewed_files:
- self.files_to_owners[file_name].remove(owner)
- if findMandatoryOwners:
- self.find_mandatory_owners()
+ def deselect_owner(self, owner, findMandatoryOwners=True):
+ if owner in self.selected_owners or owner in self.deselected_owners\
+ or not (owner in self.owners_queue):
+ return
+ self.writeln('Deselected: ' + owner)
+ self.owners_queue.remove(owner)
+ self.deselected_owners.add(owner)
+ for file_name in self.owners_to_files[owner] & self.unreviewed_files:
+ self.files_to_owners[file_name].remove(owner)
+ if findMandatoryOwners:
+ self.find_mandatory_owners()
- def find_mandatory_owners(self):
- continues = True
- for owner in self.owners_queue:
- if owner in self.selected_owners:
- continue
- if owner in self.deselected_owners:
- continue
- if len(self.owners_to_files[owner] & self.unreviewed_files) == 0:
- self.deselect_owner(owner, False)
-
- while continues:
- continues = False
- for file_name in filter(
- lambda file_name: len(self.files_to_owners[file_name]) == 1,
- self.unreviewed_files):
- owner = first(self.files_to_owners[file_name])
- self.select_owner(owner, False)
+ def find_mandatory_owners(self):
continues = True
- break
+ for owner in self.owners_queue:
+ if owner in self.selected_owners:
+ continue
+ if owner in self.deselected_owners:
+ continue
+ if len(self.owners_to_files[owner] & self.unreviewed_files) == 0:
+ self.deselect_owner(owner, False)
- def print_file_info(self, file_name, except_owner=''):
- if file_name not in self.unreviewed_files:
- self.writeln(self.greyed(file_name +
- ' (by ' +
- self.bold_name(self.reviewed_by[file_name]) +
- ')'))
- else:
- if len(self.files_to_owners[file_name]) <= 3:
- other_owners = []
- for ow in self.files_to_owners[file_name]:
- if ow != except_owner:
- other_owners.append(self.bold_name(ow))
- self.writeln(file_name +
- ' [' + (', '.join(other_owners)) + ']')
- else:
- self.writeln(file_name + ' [' +
- self.bold(str(len(self.files_to_owners[file_name]))) +
- ']')
+ while continues:
+ continues = False
+ for file_name in filter(
+ lambda file_name: len(self.files_to_owners[file_name]) == 1,
+ self.unreviewed_files):
+ owner = first(self.files_to_owners[file_name])
+ self.select_owner(owner, False)
+ continues = True
+ break
- def print_file_info_detailed(self, file_name):
- self.writeln(file_name)
- self.indent()
- for ow in sorted(self.files_to_owners[file_name]):
- if ow in self.deselected_owners:
- self.writeln(self.bold_name(self.greyed(ow)))
- elif ow in self.selected_owners:
- self.writeln(self.bold_name(self.greyed(ow)))
- else:
- self.writeln(self.bold_name(ow))
- self.unindent()
+ def print_file_info(self, file_name, except_owner=''):
+ if file_name not in self.unreviewed_files:
+ self.writeln(
+ self.greyed(file_name + ' (by ' +
+ self.bold_name(self.reviewed_by[file_name]) + ')'))
+ else:
+ if len(self.files_to_owners[file_name]) <= 3:
+ other_owners = []
+ for ow in self.files_to_owners[file_name]:
+ if ow != except_owner:
+ other_owners.append(self.bold_name(ow))
+ self.writeln(file_name + ' [' + (', '.join(other_owners)) + ']')
+ else:
+ self.writeln(
+ file_name + ' [' +
+ self.bold(str(len(self.files_to_owners[file_name]))) + ']')
- def print_owned_files_for(self, owner):
- # Print owned files
- self.writeln(self.bold_name(owner))
- self.writeln(self.bold_name(owner) + ' owns ' +
- str(len(self.owners_to_files[owner])) + ' file(s):')
- self.indent()
- for file_name in sorted(self.owners_to_files[owner]):
- self.print_file_info(file_name, owner)
- self.unindent()
- self.writeln()
-
- def list_owners(self, owners_queue):
- if (len(self.owners_to_files) - len(self.deselected_owners) -
- len(self.selected_owners)) > 3:
- for ow in owners_queue:
- if ow not in self.deselected_owners and ow not in self.selected_owners:
- self.writeln(self.bold_name(ow))
- else:
- for ow in owners_queue:
- if ow not in self.deselected_owners and ow not in self.selected_owners:
- self.writeln()
- self.print_owned_files_for(ow)
-
- def list_files(self):
- self.indent()
- if len(self.unreviewed_files) > 5:
- for file_name in sorted(self.unreviewed_files):
- self.print_file_info(file_name)
- else:
- for file_name in self.unreviewed_files:
- self.print_file_info_detailed(file_name)
- self.unindent()
-
- def pick_owner(self, ow):
- # Allowing to omit domain suffixes
- if ow not in self.owners_to_files:
- if ow + self.email_postfix in self.owners_to_files:
- ow += self.email_postfix
-
- if ow not in self.owners_to_files:
- self.writeln('You cannot pick ' + self.bold_name(ow) + ' manually. ' +
- 'It\'s an invalid name or not related to the change list.')
- return False
-
- if ow in self.selected_owners:
- self.writeln('You cannot pick ' + self.bold_name(ow) + ' manually. ' +
- 'It\'s already selected.')
- return False
-
- if ow in self.deselected_owners:
- self.writeln('You cannot pick ' + self.bold_name(ow) + ' manually.' +
- 'It\'s already unselected.')
- return False
-
- self.select_owner(ow)
- return True
-
- def print_result(self):
- # Print results
- self.writeln()
- self.writeln()
- if len(self.selected_owners) == 0:
- self.writeln('This change list already has owner-reviewers for all '
- 'files.')
- self.writeln('Use --ignore-current if you want to ignore them.')
- else:
- self.writeln('** You selected these owners **')
- self.writeln()
- for owner in self.selected_owners:
- self.writeln(self.bold_name(owner) + ':')
+ def print_file_info_detailed(self, file_name):
+ self.writeln(file_name)
self.indent()
- for file_name in sorted(self.owners_to_files[owner]):
- self.writeln(file_name)
+ for ow in sorted(self.files_to_owners[file_name]):
+ if ow in self.deselected_owners:
+ self.writeln(self.bold_name(self.greyed(ow)))
+ elif ow in self.selected_owners:
+ self.writeln(self.bold_name(self.greyed(ow)))
+ else:
+ self.writeln(self.bold_name(ow))
self.unindent()
- def bold(self, text):
- return self.COLOR_BOLD + text + self.COLOR_RESET
+ def print_owned_files_for(self, owner):
+ # Print owned files
+ self.writeln(self.bold_name(owner))
+ self.writeln(
+ self.bold_name(owner) + ' owns ' +
+ str(len(self.owners_to_files[owner])) + ' file(s):')
+ self.indent()
+ for file_name in sorted(self.owners_to_files[owner]):
+ self.print_file_info(file_name, owner)
+ self.unindent()
+ self.writeln()
- def bold_name(self, name):
- return (self.COLOR_BOLD +
- name.replace(self.email_postfix, '') + self.COLOR_RESET)
+ def list_owners(self, owners_queue):
+ if (len(self.owners_to_files) - len(self.deselected_owners) -
+ len(self.selected_owners)) > 3:
+ for ow in owners_queue:
+ if (ow not in self.deselected_owners
+ and ow not in self.selected_owners):
+ self.writeln(self.bold_name(ow))
+ else:
+ for ow in owners_queue:
+ if (ow not in self.deselected_owners
+ and ow not in self.selected_owners):
+ self.writeln()
+ self.print_owned_files_for(ow)
- def greyed(self, text):
- return self.COLOR_GREY + text + self.COLOR_RESET
+ def list_files(self):
+ self.indent()
+ if len(self.unreviewed_files) > 5:
+ for file_name in sorted(self.unreviewed_files):
+ self.print_file_info(file_name)
+ else:
+ for file_name in self.unreviewed_files:
+ self.print_file_info_detailed(file_name)
+ self.unindent()
- def indent(self):
- self.indentation += 1
+ def pick_owner(self, ow):
+ # Allowing to omit domain suffixes
+ if ow not in self.owners_to_files:
+ if ow + self.email_postfix in self.owners_to_files:
+ ow += self.email_postfix
- def unindent(self):
- self.indentation -= 1
+ if ow not in self.owners_to_files:
+ self.writeln(
+ 'You cannot pick ' + self.bold_name(ow) + ' manually. ' +
+ 'It\'s an invalid name or not related to the change list.')
+ return False
- def print_indent(self):
- return ' ' * self.indentation
+ if ow in self.selected_owners:
+ self.writeln('You cannot pick ' + self.bold_name(ow) +
+ ' manually. ' + 'It\'s already selected.')
+ return False
- def writeln(self, text=''):
- print(self.print_indent() + text)
+ if ow in self.deselected_owners:
+ self.writeln('You cannot pick ' + self.bold_name(ow) +
+ ' manually.' + 'It\'s already unselected.')
+ return False
- def hr(self):
- self.writeln('=====================')
+ self.select_owner(ow)
+ return True
- def print_info(self, owner):
- self.hr()
- self.writeln(
- self.bold(str(len(self.unreviewed_files))) + ' file(s) left.')
- self.print_owned_files_for(owner)
+ def print_result(self):
+ # Print results
+ self.writeln()
+ self.writeln()
+ if len(self.selected_owners) == 0:
+ self.writeln('This change list already has owner-reviewers for all '
+ 'files.')
+ self.writeln('Use --ignore-current if you want to ignore them.')
+ else:
+ self.writeln('** You selected these owners **')
+ self.writeln()
+ for owner in self.selected_owners:
+ self.writeln(self.bold_name(owner) + ':')
+ self.indent()
+ for file_name in sorted(self.owners_to_files[owner]):
+ self.writeln(file_name)
+ self.unindent()
- def input_command(self, owner):
- self.writeln('Add ' + self.bold_name(owner) + ' as your reviewer? ')
- return gclient_utils.AskForData(
- '[yes/no/Defer/pick/files/owners/quit/restart]: ').lower()
+ def bold(self, text):
+ return self.COLOR_BOLD + text + self.COLOR_RESET
+
+ def bold_name(self, name):
+ return (self.COLOR_BOLD + name.replace(self.email_postfix, '') +
+ self.COLOR_RESET)
+
+ def greyed(self, text):
+ return self.COLOR_GREY + text + self.COLOR_RESET
+
+ def indent(self):
+ self.indentation += 1
+
+ def unindent(self):
+ self.indentation -= 1
+
+ def print_indent(self):
+ return ' ' * self.indentation
+
+ def writeln(self, text=''):
+ print(self.print_indent() + text)
+
+ def hr(self):
+ self.writeln('=====================')
+
+ def print_info(self, owner):
+ self.hr()
+ self.writeln(
+ self.bold(str(len(self.unreviewed_files))) + ' file(s) left.')
+ self.print_owned_files_for(owner)
+
+ def input_command(self, owner):
+ self.writeln('Add ' + self.bold_name(owner) + ' as your reviewer? ')
+ return gclient_utils.AskForData(
+ '[yes/no/Defer/pick/files/owners/quit/restart]: ').lower()
diff --git a/post_build_ninja_summary.py b/post_build_ninja_summary.py
index 8405639598..a36cb12beb 100755
--- a/post_build_ninja_summary.py
+++ b/post_build_ninja_summary.py
@@ -2,7 +2,6 @@
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Summarize the last ninja build, invoked with ninja's -C syntax.
This script is designed to be automatically run after each ninja build in
@@ -57,7 +56,6 @@ import fnmatch
import os
import sys
-
# The number of long build times to report:
long_count = 10
# The number of long times by extension to report
@@ -96,8 +94,8 @@ class Target:
# Allow for modest floating-point errors
epsilon = 0.000002
if (self.weighted_duration > self.Duration() + epsilon):
- print('%s > %s?' % (self.weighted_duration, self.Duration()))
- assert(self.weighted_duration <= self.Duration() + epsilon)
+ print('%s > %s?' % (self.weighted_duration, self.Duration()))
+ assert (self.weighted_duration <= self.Duration() + epsilon)
return self.weighted_duration
def DescribeTargets(self):
@@ -108,7 +106,7 @@ class Target:
result = ', '.join(self.targets)
max_length = 65
if len(result) > max_length:
- result = result[:max_length] + '...'
+ result = result[:max_length] + '...'
return result
@@ -125,10 +123,10 @@ def ReadTargets(log, show_all):
for line in log:
parts = line.strip().split('\t')
if len(parts) != 5:
- # If ninja.exe is rudely halted then the .ninja_log file may be
- # corrupt. Silently continue.
- continue
- start, end, _, name, cmdhash = parts # Ignore restat.
+ # If ninja.exe is rudely halted then the .ninja_log file may be
+ # corrupt. Silently continue.
+ continue
+ start, end, _, name, cmdhash = parts # Ignore restat.
# Convert from integral milliseconds to float seconds.
start = int(start) / 1000.0
end = int(end) / 1000.0
@@ -142,68 +140,68 @@ def ReadTargets(log, show_all):
targets_dict = {}
target = None
if cmdhash in targets_dict:
- target = targets_dict[cmdhash]
- if not show_all and (target.start != start or target.end != end):
- # If several builds in a row just run one or two build steps then
- # the end times may not go backwards so the last build may not be
- # detected as such. However in many cases there will be a build step
- # repeated in the two builds and the changed start/stop points for
- # that command, identified by the hash, can be used to detect and
- # reset the target dictionary.
- targets_dict = {}
- target = None
+ target = targets_dict[cmdhash]
+ if not show_all and (target.start != start or target.end != end):
+ # If several builds in a row just run one or two build steps
+ # then the end times may not go backwards so the last build may
+ # not be detected as such. However in many cases there will be a
+ # build step repeated in the two builds and the changed
+ # start/stop points for that command, identified by the hash,
+ # can be used to detect and reset the target dictionary.
+ targets_dict = {}
+ target = None
if not target:
- targets_dict[cmdhash] = target = Target(start, end)
+ targets_dict[cmdhash] = target = Target(start, end)
last_end_seen = end
target.targets.append(name)
return list(targets_dict.values())
def GetExtension(target, extra_patterns):
- """Return the file extension that best represents a target.
+ """Return the file extension that best represents a target.
For targets that generate multiple outputs it is important to return a
consistent 'canonical' extension. Ultimately the goal is to group build steps
by type."""
- for output in target.targets:
- if extra_patterns:
- for fn_pattern in extra_patterns.split(';'):
- if fnmatch.fnmatch(output, '*' + fn_pattern + '*'):
- return fn_pattern
- # Not a true extension, but a good grouping.
- if output.endswith('type_mappings'):
- extension = 'type_mappings'
- break
+ for output in target.targets:
+ if extra_patterns:
+ for fn_pattern in extra_patterns.split(';'):
+ if fnmatch.fnmatch(output, '*' + fn_pattern + '*'):
+ return fn_pattern
+ # Not a true extension, but a good grouping.
+ if output.endswith('type_mappings'):
+ extension = 'type_mappings'
+ break
- # Capture two extensions if present. For example: file.javac.jar should be
- # distinguished from file.interface.jar.
- root, ext1 = os.path.splitext(output)
- _, ext2 = os.path.splitext(root)
- extension = ext2 + ext1 # Preserve the order in the file name.
+ # Capture two extensions if present. For example: file.javac.jar should
+ # be distinguished from file.interface.jar.
+ root, ext1 = os.path.splitext(output)
+ _, ext2 = os.path.splitext(root)
+ extension = ext2 + ext1 # Preserve the order in the file name.
- if len(extension) == 0:
- extension = '(no extension found)'
+ if len(extension) == 0:
+ extension = '(no extension found)'
- if ext1 in ['.pdb', '.dll', '.exe']:
- extension = 'PEFile (linking)'
- # Make sure that .dll and .exe are grouped together and that the
- # .dll.lib files don't cause these to be listed as libraries
- break
- if ext1 in ['.so', '.TOC']:
- extension = '.so (linking)'
- # Attempt to identify linking, avoid identifying as '.TOC'
- break
- # Make sure .obj files don't get categorized as mojo files
- if ext1 in ['.obj', '.o']:
- break
- # Jars are the canonical output of java targets.
- if ext1 == '.jar':
- break
- # Normalize all mojo related outputs to 'mojo'.
- if output.count('.mojom') > 0:
- extension = 'mojo'
- break
- return extension
+ if ext1 in ['.pdb', '.dll', '.exe']:
+ extension = 'PEFile (linking)'
+ # Make sure that .dll and .exe are grouped together and that the
+ # .dll.lib files don't cause these to be listed as libraries
+ break
+ if ext1 in ['.so', '.TOC']:
+ extension = '.so (linking)'
+ # Attempt to identify linking, avoid identifying as '.TOC'
+ break
+ # Make sure .obj files don't get categorized as mojo files
+ if ext1 in ['.obj', '.o']:
+ break
+ # Jars are the canonical output of java targets.
+ if ext1 == '.jar':
+ break
+ # Normalize all mojo related outputs to 'mojo'.
+ if output.count('.mojom') > 0:
+ extension = 'mojo'
+ break
+ return extension
def SummarizeEntries(entries, extra_step_types, elapsed_time_sorting):
@@ -221,13 +219,13 @@ def SummarizeEntries(entries, extra_step_types, elapsed_time_sorting):
latest = 0
total_cpu_time = 0
for target in entries:
- if earliest < 0 or target.start < earliest:
- earliest = target.start
- if target.end > latest:
- latest = target.end
- total_cpu_time += target.Duration()
- task_start_stop_times.append((target.start, 'start', target))
- task_start_stop_times.append((target.end, 'stop', target))
+ if earliest < 0 or target.start < earliest:
+ earliest = target.start
+ if target.end > latest:
+ latest = target.end
+ total_cpu_time += target.Duration()
+ task_start_stop_times.append((target.start, 'start', target))
+ task_start_stop_times.append((target.end, 'stop', target))
length = latest - earliest
weighted_total = 0.0
@@ -247,40 +245,41 @@ def SummarizeEntries(entries, extra_step_types, elapsed_time_sorting):
last_weighted_time = 0.0
# Scan all start/stop events.
for event in task_start_stop_times:
- time, action_name, target = event
- # Accumulate weighted time up to now.
- num_running = len(running_tasks)
- if num_running > 0:
- # Update the total weighted time up to this moment.
- last_weighted_time += (time - last_time) / float(num_running)
- if action_name == 'start':
- # Record the total weighted task time when this task starts.
- running_tasks[target] = last_weighted_time
- if action_name == 'stop':
- # Record the change in the total weighted task time while this task ran.
- weighted_duration = last_weighted_time - running_tasks[target]
- target.SetWeightedDuration(weighted_duration)
- weighted_total += weighted_duration
- del running_tasks[target]
- last_time = time
- assert(len(running_tasks) == 0)
+ time, action_name, target = event
+ # Accumulate weighted time up to now.
+ num_running = len(running_tasks)
+ if num_running > 0:
+ # Update the total weighted time up to this moment.
+ last_weighted_time += (time - last_time) / float(num_running)
+ if action_name == 'start':
+ # Record the total weighted task time when this task starts.
+ running_tasks[target] = last_weighted_time
+ if action_name == 'stop':
+ # Record the change in the total weighted task time while this task
+ # ran.
+ weighted_duration = last_weighted_time - running_tasks[target]
+ target.SetWeightedDuration(weighted_duration)
+ weighted_total += weighted_duration
+ del running_tasks[target]
+ last_time = time
+ assert (len(running_tasks) == 0)
# Warn if the sum of weighted times is off by more than half a second.
if abs(length - weighted_total) > 500:
- print('Warning: Possible corrupt ninja log, results may be '
- 'untrustworthy. Length = %.3f, weighted total = %.3f' % (
- length, weighted_total))
+ print('Warning: Possible corrupt ninja log, results may be '
+ 'untrustworthy. Length = %.3f, weighted total = %.3f' %
+ (length, weighted_total))
# Print the slowest build steps:
print(' Longest build steps:')
if elapsed_time_sorting:
- entries.sort(key=lambda x: x.Duration())
+ entries.sort(key=lambda x: x.Duration())
else:
- entries.sort(key=lambda x: x.WeightedDuration())
+ entries.sort(key=lambda x: x.WeightedDuration())
for target in entries[-long_count:]:
- print(' %8.1f weighted s to build %s (%.1f s elapsed time)' % (
- target.WeightedDuration(),
- target.DescribeTargets(), target.Duration()))
+ print(' %8.1f weighted s to build %s (%.1f s elapsed time)' %
+ (target.WeightedDuration(), target.DescribeTargets(),
+ target.Duration()))
# Sum up the time by file extension/type of the output file
count_by_ext = {}
@@ -288,38 +287,39 @@ def SummarizeEntries(entries, extra_step_types, elapsed_time_sorting):
weighted_time_by_ext = {}
# Scan through all of the targets to build up per-extension statistics.
for target in entries:
- extension = GetExtension(target, extra_step_types)
- time_by_ext[extension] = time_by_ext.get(extension, 0) + target.Duration()
- weighted_time_by_ext[extension] = weighted_time_by_ext.get(extension,
- 0) + target.WeightedDuration()
- count_by_ext[extension] = count_by_ext.get(extension, 0) + 1
+ extension = GetExtension(target, extra_step_types)
+ time_by_ext[extension] = time_by_ext.get(extension,
+ 0) + target.Duration()
+ weighted_time_by_ext[extension] = weighted_time_by_ext.get(
+ extension, 0) + target.WeightedDuration()
+ count_by_ext[extension] = count_by_ext.get(extension, 0) + 1
print(' Time by build-step type:')
# Copy to a list with extension name and total time swapped, to (time, ext)
if elapsed_time_sorting:
- weighted_time_by_ext_sorted = sorted((y, x) for (x, y) in
- time_by_ext.items())
+ weighted_time_by_ext_sorted = sorted(
+ (y, x) for (x, y) in time_by_ext.items())
else:
- weighted_time_by_ext_sorted = sorted((y, x) for (x, y) in
- weighted_time_by_ext.items())
+ weighted_time_by_ext_sorted = sorted(
+ (y, x) for (x, y) in weighted_time_by_ext.items())
# Print the slowest build target types:
for time, extension in weighted_time_by_ext_sorted[-long_ext_count:]:
- print(' %8.1f s weighted time to generate %d %s files '
- '(%1.1f s elapsed time sum)' % (time, count_by_ext[extension],
- extension, time_by_ext[extension]))
+ print(
+ ' %8.1f s weighted time to generate %d %s files '
+ '(%1.1f s elapsed time sum)' %
+ (time, count_by_ext[extension], extension, time_by_ext[extension]))
print(' %.1f s weighted time (%.1f s elapsed time sum, %1.1fx '
- 'parallelism)' % (length, total_cpu_time,
- total_cpu_time * 1.0 / length))
- print(' %d build steps completed, average of %1.2f/s' % (
- len(entries), len(entries) / (length)))
+ 'parallelism)' %
+ (length, total_cpu_time, total_cpu_time * 1.0 / length))
+ print(' %d build steps completed, average of %1.2f/s' %
+ (len(entries), len(entries) / (length)))
def main():
log_file = '.ninja_log'
parser = argparse.ArgumentParser()
- parser.add_argument('-C', dest='build_directory',
- help='Build directory.')
+ parser.add_argument('-C', dest='build_directory', help='Build directory.')
parser.add_argument(
'-s',
'--step-types',
@@ -338,22 +338,23 @@ def main():
if args.log_file:
log_file = args.log_file
if not args.step_types:
- # Offer a convenient way to add extra step types automatically, including
- # when this script is run by autoninja. get() returns None if the variable
- # isn't set.
- args.step_types = os.environ.get('chromium_step_types')
+ # Offer a convenient way to add extra step types automatically,
+ # including when this script is run by autoninja. get() returns None if
+ # the variable isn't set.
+ args.step_types = os.environ.get('chromium_step_types')
if args.step_types:
- # Make room for the extra build types.
- global long_ext_count
- long_ext_count += len(args.step_types.split(';'))
+ # Make room for the extra build types.
+ global long_ext_count
+ long_ext_count += len(args.step_types.split(';'))
try:
- with open(log_file, 'r') as log:
- entries = ReadTargets(log, False)
- SummarizeEntries(entries, args.step_types, args.elapsed_time_sorting)
+ with open(log_file, 'r') as log:
+ entries = ReadTargets(log, False)
+ SummarizeEntries(entries, args.step_types,
+ args.elapsed_time_sorting)
except IOError:
- print('Log file %r not found, no build summary created.' % log_file)
- return errno.ENOENT
+ print('Log file %r not found, no build summary created.' % log_file)
+ return errno.ENOENT
if __name__ == '__main__':
diff --git a/presubmit_canned_checks.py b/presubmit_canned_checks.py
index 2d116ad65a..7ccbe61cc0 100644
--- a/presubmit_canned_checks.py
+++ b/presubmit_canned_checks.py
@@ -1,7 +1,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Generic presubmit checks that can be reused by other presubmit checks."""
from __future__ import print_function
@@ -13,6 +12,9 @@ import zlib
import metadata.discover
import metadata.validate
+# TODO: Should fix these warnings.
+# pylint: disable=line-too-long
+
_HERE = _os.path.dirname(_os.path.abspath(__file__))
# These filters will be disabled if callers do not explicitly supply a
@@ -30,12 +32,12 @@ _HERE = _os.path.dirname(_os.path.abspath(__file__))
# - runtime/int : Can be fixed long term; volume of errors too high
# - whitespace/braces : We have a lot of explicit scoping in chrome code
OFF_BY_DEFAULT_LINT_FILTERS = [
- '-build/include',
- '-build/include_order',
- '-build/namespaces',
- '-readability/casting',
- '-runtime/int',
- '-whitespace/braces',
+ '-build/include',
+ '-build/include_order',
+ '-build/namespaces',
+ '-readability/casting',
+ '-runtime/int',
+ '-whitespace/braces',
]
# These filters will be disabled unless callers explicitly enable them, because
@@ -72,343 +74,367 @@ _CORP_LINK_KEYWORD = '.corp.google'
def CheckChangeHasBugFieldFromChange(change, output_api, show_suggestions=True):
- """Requires that the changelist have a Bug: field. If show_suggestions is
+ """Requires that the changelist have a Bug: field. If show_suggestions is
False then only report on incorrect tags, not missing tags."""
- bugs = change.BugsFromDescription()
- results = []
- if bugs:
- if any(b.startswith('b/') for b in bugs):
- results.append(
- output_api.PresubmitNotifyResult(
- 'Buganizer bugs should be prefixed with b:, not b/.'))
- elif show_suggestions:
- results.append(
- output_api.PresubmitNotifyResult(
- 'If this change has an associated bug, add Bug: [bug number] or '
- 'Fixed: [bug number].'))
+ bugs = change.BugsFromDescription()
+ results = []
+ if bugs:
+ if any(b.startswith('b/') for b in bugs):
+ results.append(
+ output_api.PresubmitNotifyResult(
+ 'Buganizer bugs should be prefixed with b:, not b/.'))
+ elif show_suggestions:
+ results.append(
+ output_api.PresubmitNotifyResult(
+ 'If this change has an associated bug, add Bug: [bug number] '
+ 'or Fixed: [bug number].'))
- if 'Fixes' in change.GitFootersFromDescription():
- results.append(
- output_api.PresubmitError(
- 'Fixes: is the wrong footer tag, use Fixed: instead.'))
- return results
+ if 'Fixes' in change.GitFootersFromDescription():
+ results.append(
+ output_api.PresubmitError(
+ 'Fixes: is the wrong footer tag, use Fixed: instead.'))
+ return results
def CheckChangeHasBugField(input_api, output_api):
- return CheckChangeHasBugFieldFromChange(input_api.change, output_api)
+ return CheckChangeHasBugFieldFromChange(input_api.change, output_api)
def CheckChangeHasNoUnwantedTagsFromChange(change, output_api):
- UNWANTED_TAGS = {
- 'FIXED': {
- 'why': 'is not supported',
- 'instead': 'Use "Fixed:" instead.'
- },
- # TODO: BUG, ISSUE
- }
+ UNWANTED_TAGS = {
+ 'FIXED': {
+ 'why': 'is not supported',
+ 'instead': 'Use "Fixed:" instead.'
+ },
+ # TODO: BUG, ISSUE
+ }
- errors = []
- for tag, desc in UNWANTED_TAGS.items():
- if tag in change.tags:
- subs = tag, desc['why'], desc.get('instead', '')
- errors.append(('%s= %s. %s' % subs).rstrip())
+ errors = []
+ for tag, desc in UNWANTED_TAGS.items():
+ if tag in change.tags:
+ subs = tag, desc['why'], desc.get('instead', '')
+ errors.append(('%s= %s. %s' % subs).rstrip())
- return [output_api.PresubmitError('\n'.join(errors))] if errors else []
+ return [output_api.PresubmitError('\n'.join(errors))] if errors else []
def CheckChangeHasNoUnwantedTags(input_api, output_api):
- return CheckChangeHasNoUnwantedTagsFromChange(input_api.change, output_api)
+ return CheckChangeHasNoUnwantedTagsFromChange(input_api.change, output_api)
def CheckDoNotSubmitInDescription(input_api, output_api):
- """Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description.
+ """Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description.
"""
- # Keyword is concatenated to avoid presubmit check rejecting the CL.
- keyword = 'DO NOT ' + 'SUBMIT'
- if keyword in input_api.change.DescriptionText():
- return [output_api.PresubmitError(
- keyword + ' is present in the changelist description.')]
+ # Keyword is concatenated to avoid presubmit check rejecting the CL.
+ keyword = 'DO NOT ' + 'SUBMIT'
+ if keyword in input_api.change.DescriptionText():
+ return [
+ output_api.PresubmitError(
+ keyword + ' is present in the changelist description.')
+ ]
- return []
+ return []
def CheckCorpLinksInDescription(input_api, output_api):
- """Checks that the description doesn't contain corp links."""
- if _CORP_LINK_KEYWORD in input_api.change.DescriptionText():
- return [
- output_api.PresubmitPromptWarning(
- 'Corp link is present in the changelist description.')
- ]
+ """Checks that the description doesn't contain corp links."""
+ if _CORP_LINK_KEYWORD in input_api.change.DescriptionText():
+ return [
+ output_api.PresubmitPromptWarning(
+ 'Corp link is present in the changelist description.')
+ ]
- return []
+ return []
def CheckChangeHasDescription(input_api, output_api):
- """Checks the CL description is not empty."""
- text = input_api.change.DescriptionText()
- if text.strip() == '':
- if input_api.is_committing and not input_api.no_diffs:
- return [output_api.PresubmitError('Add a description to the CL.')]
+ """Checks the CL description is not empty."""
+ text = input_api.change.DescriptionText()
+ if text.strip() == '':
+ if input_api.is_committing and not input_api.no_diffs:
+ return [output_api.PresubmitError('Add a description to the CL.')]
- return [output_api.PresubmitNotifyResult('Add a description to the CL.')]
- return []
+ return [
+ output_api.PresubmitNotifyResult('Add a description to the CL.')
+ ]
+ return []
def CheckChangeWasUploaded(input_api, output_api):
- """Checks that the issue was uploaded before committing."""
- if input_api.is_committing and not input_api.change.issue:
- message = 'Issue wasn\'t uploaded. Please upload first.'
- if input_api.no_diffs:
- # Make this just a message with presubmit --all and --files
- return [output_api.PresubmitNotifyResult(message)]
- return [output_api.PresubmitError(message)]
- return []
+ """Checks that the issue was uploaded before committing."""
+ if input_api.is_committing and not input_api.change.issue:
+ message = 'Issue wasn\'t uploaded. Please upload first.'
+ if input_api.no_diffs:
+ # Make this just a message with presubmit --all and --files
+ return [output_api.PresubmitNotifyResult(message)]
+ return [output_api.PresubmitError(message)]
+ return []
def CheckDescriptionUsesColonInsteadOfEquals(input_api, output_api):
- """Checks that the CL description uses a colon after 'Bug' and 'Fixed' tags
+ """Checks that the CL description uses a colon after 'Bug' and 'Fixed' tags
instead of equals.
crbug.com only interprets the lines "Bug: xyz" and "Fixed: xyz" but not
"Bug=xyz" or "Fixed=xyz".
"""
- text = input_api.change.DescriptionText()
- if input_api.re.search(r'^(Bug|Fixed)=',
- text,
- flags=input_api.re.IGNORECASE
- | input_api.re.MULTILINE):
- return [output_api.PresubmitError('Use Bug:/Fixed: instead of Bug=/Fixed=')]
- return []
+ text = input_api.change.DescriptionText()
+ if input_api.re.search(r'^(Bug|Fixed)=',
+ text,
+ flags=input_api.re.IGNORECASE
+ | input_api.re.MULTILINE):
+ return [
+ output_api.PresubmitError('Use Bug:/Fixed: instead of Bug=/Fixed=')
+ ]
+ return []
### Content checks
def CheckAuthorizedAuthor(input_api, output_api, bot_allowlist=None):
- """For non-googler/chromites committers, verify the author's email address is
+ """For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
- if input_api.is_committing or input_api.no_diffs:
- error_type = output_api.PresubmitError
- else:
- error_type = output_api.PresubmitPromptWarning
+ if input_api.is_committing or input_api.no_diffs:
+ error_type = output_api.PresubmitError
+ else:
+ error_type = output_api.PresubmitPromptWarning
- author = input_api.change.author_email
- if not author:
- input_api.logging.info('No author, skipping AUTHOR check')
+ author = input_api.change.author_email
+ if not author:
+ input_api.logging.info('No author, skipping AUTHOR check')
+ return []
+
+ # This is used for CLs created by trusted robot accounts.
+ if bot_allowlist and author in bot_allowlist:
+ return []
+
+ authors_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
+ 'AUTHORS')
+ author_re = input_api.re.compile(r'[^#]+\s+\<(.+?)\>\s*$')
+ valid_authors = []
+ with _io.open(authors_path, encoding='utf-8') as fp:
+ for line in fp:
+ m = author_re.match(line)
+ if m:
+ valid_authors.append(m.group(1).lower())
+
+ if not any(
+ input_api.fnmatch.fnmatch(author.lower(), valid)
+ for valid in valid_authors):
+ input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
+ return [
+ error_type((
+ # pylint: disable=line-too-long
+ '%s is not in AUTHORS file. If you are a new contributor, please visit\n'
+ 'https://chromium.googlesource.com/chromium/src/+/refs/heads/main/docs/contributing.md#Legal-stuff\n'
+ # pylint: enable=line-too-long
+ 'and read the "Legal stuff" section.\n'
+ 'If you are a chromite, verify that the contributor signed the '
+ 'CLA.') % author)
+ ]
return []
- # This is used for CLs created by trusted robot accounts.
- if bot_allowlist and author in bot_allowlist:
- return []
-
- authors_path = input_api.os_path.join(
- input_api.PresubmitLocalPath(), 'AUTHORS')
- author_re = input_api.re.compile(r'[^#]+\s+\<(.+?)\>\s*$')
- valid_authors = []
- with _io.open(authors_path, encoding='utf-8') as fp:
- for line in fp:
- m = author_re.match(line)
- if m:
- valid_authors.append(m.group(1).lower())
-
- if not any(input_api.fnmatch.fnmatch(author.lower(), valid)
- for valid in valid_authors):
- input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
- return [
- error_type((
- # pylint: disable=line-too-long
- '%s is not in AUTHORS file. If you are a new contributor, please visit\n'
- 'https://chromium.googlesource.com/chromium/src/+/refs/heads/main/docs/contributing.md#Legal-stuff\n'
- # pylint: enable=line-too-long
- 'and read the "Legal stuff" section\n'
- 'If you are a chromite, verify that the contributor signed the CLA.') %
- author)
- ]
- return []
-
def CheckDoNotSubmitInFiles(input_api, output_api):
- """Checks that the user didn't add 'DO NOT ''SUBMIT' to any files."""
- # We want to check every text file, not just source files.
- file_filter = lambda x : x
+ """Checks that the user didn't add 'DO NOT ''SUBMIT' to any files."""
+ # We want to check every text file, not just source files.
+ file_filter = lambda x: x
- # Keyword is concatenated to avoid presubmit check rejecting the CL.
- keyword = 'DO NOT ' + 'SUBMIT'
- def DoNotSubmitRule(extension, line):
- try:
- return keyword not in line
- # Fallback to True for non-text content
- except UnicodeDecodeError:
- return True
+ # Keyword is concatenated to avoid presubmit check rejecting the CL.
+ keyword = 'DO NOT ' + 'SUBMIT'
- errors = _FindNewViolationsOfRule(DoNotSubmitRule, input_api, file_filter)
- text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors)
- if text:
- return [output_api.PresubmitError(text)]
- return []
+ def DoNotSubmitRule(extension, line):
+ try:
+ return keyword not in line
+ # Fallback to True for non-text content
+ except UnicodeDecodeError:
+ return True
+
+ errors = _FindNewViolationsOfRule(DoNotSubmitRule, input_api, file_filter)
+ text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors)
+ if text:
+ return [output_api.PresubmitError(text)]
+ return []
def CheckCorpLinksInFiles(input_api, output_api, source_file_filter=None):
- """Checks that files do not contain a corp link."""
- errors = _FindNewViolationsOfRule(
- lambda _, line: _CORP_LINK_KEYWORD not in line, input_api,
- source_file_filter)
- text = '\n'.join('Found corp link in %s' % loc for loc in errors)
- if text:
- return [output_api.PresubmitPromptWarning(text)]
- return []
+ """Checks that files do not contain a corp link."""
+ errors = _FindNewViolationsOfRule(
+ lambda _, line: _CORP_LINK_KEYWORD not in line, input_api,
+ source_file_filter)
+ text = '\n'.join('Found corp link in %s' % loc for loc in errors)
+ if text:
+ return [output_api.PresubmitPromptWarning(text)]
+ return []
def GetCppLintFilters(lint_filters=None):
- filters = OFF_UNLESS_MANUALLY_ENABLED_LINT_FILTERS[:]
- if lint_filters is None:
- lint_filters = OFF_BY_DEFAULT_LINT_FILTERS
- filters.extend(lint_filters)
- return filters
+ filters = OFF_UNLESS_MANUALLY_ENABLED_LINT_FILTERS[:]
+ if lint_filters is None:
+ lint_filters = OFF_BY_DEFAULT_LINT_FILTERS
+ filters.extend(lint_filters)
+ return filters
-def CheckChangeLintsClean(input_api, output_api, source_file_filter=None,
- lint_filters=None, verbose_level=None):
- """Checks that all '.cc' and '.h' files pass cpplint.py."""
- _RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$')
- result = []
+def CheckChangeLintsClean(input_api,
+ output_api,
+ source_file_filter=None,
+ lint_filters=None,
+ verbose_level=None):
+ """Checks that all '.cc' and '.h' files pass cpplint.py."""
+ _RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$')
+ result = []
- cpplint = input_api.cpplint
- # Access to a protected member _XX of a client class
- # pylint: disable=protected-access
- cpplint._cpplint_state.ResetErrorCounts()
+ cpplint = input_api.cpplint
+ # Access to a protected member _XX of a client class
+ # pylint: disable=protected-access
+ cpplint._cpplint_state.ResetErrorCounts()
- cpplint._SetFilters(','.join(GetCppLintFilters(lint_filters)))
+ cpplint._SetFilters(','.join(GetCppLintFilters(lint_filters)))
- # Use VS error format on Windows to make it easier to step through the
- # results.
- if input_api.platform == 'win32':
- cpplint._SetOutputFormat('vs7')
+ # Use VS error format on Windows to make it easier to step through the
+ # results.
+ if input_api.platform == 'win32':
+ cpplint._SetOutputFormat('vs7')
- if source_file_filter == None:
- # The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
- # Only process those extensions which are used in Chromium.
- INCLUDE_CPP_FILES_ONLY = (r'.*\.(cc|h|cpp)$', )
- source_file_filter = lambda x: input_api.FilterSourceFile(
- x,
- files_to_check=INCLUDE_CPP_FILES_ONLY,
- files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
+ if source_file_filter == None:
+ # The only valid extensions for cpplint are .cc, .h, .cpp, .cu, and .ch.
+ # Only process those extensions which are used in Chromium.
+ INCLUDE_CPP_FILES_ONLY = (r'.*\.(cc|h|cpp)$', )
+ source_file_filter = lambda x: input_api.FilterSourceFile(
+ x,
+ files_to_check=INCLUDE_CPP_FILES_ONLY,
+ files_to_skip=input_api.DEFAULT_FILES_TO_SKIP)
- # We currently are more strict with normal code than unit tests; 4 and 5 are
- # the verbosity level that would normally be passed to cpplint.py through
- # --verbose=#. Hopefully, in the future, we can be more verbose.
- files = [f.AbsoluteLocalPath() for f in
- input_api.AffectedSourceFiles(source_file_filter)]
- for file_name in files:
- if _RE_IS_TEST.match(file_name):
- level = 5
- else:
- level = 4
-
- verbose_level = verbose_level or level
- cpplint.ProcessFile(file_name, verbose_level)
-
- if cpplint._cpplint_state.error_count > 0:
- # cpplint errors currently cannot be counted as errors during upload
- # presubmits because some directories only run cpplint during upload and
- # therefore are far from cpplint clean.
- if input_api.is_committing:
- res_type = output_api.PresubmitError
- else:
- res_type = output_api.PresubmitPromptWarning
- result = [
- res_type('Changelist failed cpplint.py check. '
- 'Search the output for "(cpplint)"')
+ # We currently are more strict with normal code than unit tests; 4 and 5 are
+ # the verbosity level that would normally be passed to cpplint.py through
+ # --verbose=#. Hopefully, in the future, we can be more verbose.
+ files = [
+ f.AbsoluteLocalPath()
+ for f in input_api.AffectedSourceFiles(source_file_filter)
]
+ for file_name in files:
+ if _RE_IS_TEST.match(file_name):
+ level = 5
+ else:
+ level = 4
- return result
+ verbose_level = verbose_level or level
+ cpplint.ProcessFile(file_name, verbose_level)
+
+ if cpplint._cpplint_state.error_count > 0:
+ # cpplint errors currently cannot be counted as errors during upload
+ # presubmits because some directories only run cpplint during upload and
+ # therefore are far from cpplint clean.
+ if input_api.is_committing:
+ res_type = output_api.PresubmitError
+ else:
+ res_type = output_api.PresubmitPromptWarning
+ result = [
+ res_type('Changelist failed cpplint.py check. '
+ 'Search the output for "(cpplint)"')
+ ]
+
+ return result
def CheckChangeHasNoCR(input_api, output_api, source_file_filter=None):
- """Checks no '\r' (CR) character is in any source files."""
- cr_files = []
- for f in input_api.AffectedSourceFiles(source_file_filter):
- if '\r' in input_api.ReadFile(f, 'rb'):
- cr_files.append(f.LocalPath())
- if cr_files:
- return [output_api.PresubmitPromptWarning(
- 'Found a CR character in these files:', items=cr_files)]
- return []
+ """Checks no '\r' (CR) character is in any source files."""
+ cr_files = []
+ for f in input_api.AffectedSourceFiles(source_file_filter):
+ if '\r' in input_api.ReadFile(f, 'rb'):
+ cr_files.append(f.LocalPath())
+ if cr_files:
+ return [
+ output_api.PresubmitPromptWarning(
+ 'Found a CR character in these files:', items=cr_files)
+ ]
+ return []
def CheckChangeHasOnlyOneEol(input_api, output_api, source_file_filter=None):
- """Checks the files ends with one and only one \n (LF)."""
- eof_files = []
- for f in input_api.AffectedSourceFiles(source_file_filter):
- contents = input_api.ReadFile(f, 'rb')
- # Check that the file ends in one and only one newline character.
- if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
- eof_files.append(f.LocalPath())
+ """Checks the files ends with one and only one \n (LF)."""
+ eof_files = []
+ for f in input_api.AffectedSourceFiles(source_file_filter):
+ contents = input_api.ReadFile(f, 'rb')
+ # Check that the file ends in one and only one newline character.
+ if len(contents) > 1 and (contents[-1:] != '\n'
+ or contents[-2:-1] == '\n'):
+ eof_files.append(f.LocalPath())
- if eof_files:
- return [output_api.PresubmitPromptWarning(
- 'These files should end in one (and only one) newline character:',
- items=eof_files)]
- return []
+ if eof_files:
+ return [
+ output_api.PresubmitPromptWarning(
+ 'These files should end in one (and only one) newline character:',
+ items=eof_files)
+ ]
+ return []
-def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api,
+def CheckChangeHasNoCrAndHasOnlyOneEol(input_api,
+ output_api,
source_file_filter=None):
- """Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass.
+ """Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass.
It is faster because it is reading the file only once.
"""
- cr_files = []
- eof_files = []
- for f in input_api.AffectedSourceFiles(source_file_filter):
- contents = input_api.ReadFile(f, 'rb')
- if '\r' in contents:
- cr_files.append(f.LocalPath())
- # Check that the file ends in one and only one newline character.
- if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'):
- eof_files.append(f.LocalPath())
- outputs = []
- if cr_files:
- outputs.append(output_api.PresubmitPromptWarning(
- 'Found a CR character in these files:', items=cr_files))
- if eof_files:
- outputs.append(output_api.PresubmitPromptWarning(
- 'These files should end in one (and only one) newline character:',
- items=eof_files))
- return outputs
+ cr_files = []
+ eof_files = []
+ for f in input_api.AffectedSourceFiles(source_file_filter):
+ contents = input_api.ReadFile(f, 'rb')
+ if '\r' in contents:
+ cr_files.append(f.LocalPath())
+ # Check that the file ends in one and only one newline character.
+ if len(contents) > 1 and (contents[-1:] != '\n'
+ or contents[-2:-1] == '\n'):
+ eof_files.append(f.LocalPath())
+ outputs = []
+ if cr_files:
+ outputs.append(
+ output_api.PresubmitPromptWarning(
+ 'Found a CR character in these files:', items=cr_files))
+ if eof_files:
+ outputs.append(
+ output_api.PresubmitPromptWarning(
+ 'These files should end in one (and only one) newline character:',
+ items=eof_files))
+ return outputs
def CheckGenderNeutral(input_api, output_api, source_file_filter=None):
- """Checks that there are no gendered pronouns in any of the text files to be
+ """Checks that there are no gendered pronouns in any of the text files to be
submitted.
"""
- if input_api.no_diffs:
+ if input_api.no_diffs:
+ return []
+
+ gendered_re = input_api.re.compile(
+ r'(^|\s|\(|\[)([Hh]e|[Hh]is|[Hh]ers?|[Hh]im|[Ss]he|[Gg]uys?)\\b')
+
+ errors = []
+ for f in input_api.AffectedFiles(include_deletes=False,
+ file_filter=source_file_filter):
+ for line_num, line in f.ChangedContents():
+ if gendered_re.search(line):
+ errors.append('%s (%d): %s' % (f.LocalPath(), line_num, line))
+
+ if errors:
+ return [
+ output_api.PresubmitPromptWarning('Found a gendered pronoun in:',
+ long_text='\n'.join(errors))
+ ]
return []
- gendered_re = input_api.re.compile(
- r'(^|\s|\(|\[)([Hh]e|[Hh]is|[Hh]ers?|[Hh]im|[Ss]he|[Gg]uys?)\\b')
-
- errors = []
- for f in input_api.AffectedFiles(include_deletes=False,
- file_filter=source_file_filter):
- for line_num, line in f.ChangedContents():
- if gendered_re.search(line):
- errors.append('%s (%d): %s' % (f.LocalPath(), line_num, line))
-
- if errors:
- return [output_api.PresubmitPromptWarning('Found a gendered pronoun in:',
- long_text='\n'.join(errors))]
- return []
-
def _ReportErrorFileAndLine(filename, line_num, dummy_line):
- """Default error formatter for _FindNewViolationsOfRule."""
- return '%s:%s' % (filename, line_num)
+ """Default error formatter for _FindNewViolationsOfRule."""
+ return '%s:%s' % (filename, line_num)
def _GenerateAffectedFileExtList(input_api, source_file_filter):
- """Generate a list of (file, extension) tuples from affected files.
+ """Generate a list of (file, extension) tuples from affected files.
The result can be fed to _FindNewViolationsOfRule() directly, or
could be filtered before doing that.
@@ -420,16 +446,16 @@ def _GenerateAffectedFileExtList(input_api, source_file_filter):
A list of (file, extension) tuples, where |file| is an affected
file, and |extension| its file path extension.
"""
- for f in input_api.AffectedFiles(
- include_deletes=False, file_filter=source_file_filter):
- extension = str(f.LocalPath()).rsplit('.', 1)[-1]
- yield (f, extension)
+ for f in input_api.AffectedFiles(include_deletes=False,
+ file_filter=source_file_filter):
+ extension = str(f.LocalPath()).rsplit('.', 1)[-1]
+ yield (f, extension)
def _FindNewViolationsOfRuleForList(callable_rule,
file_ext_list,
error_formatter=_ReportErrorFileAndLine):
- """Find all newly introduced violations of a per-line rule (a callable).
+ """Find all newly introduced violations of a per-line rule (a callable).
Prefer calling _FindNewViolationsOfRule() instead of this function, unless
the list of affected files need to be filtered in a special way.
@@ -445,27 +471,27 @@ def _FindNewViolationsOfRuleForList(callable_rule,
Returns:
A list of the newly-introduced violations reported by the rule.
"""
- errors = []
- for f, extension in file_ext_list:
- # For speed, we do two passes, checking first the full file. Shelling out
- # to the SCM to determine the changed region can be quite expensive on
- # Win32. Assuming that most files will be kept problem-free, we can
- # skip the SCM operations most of the time.
- if all(callable_rule(extension, line) for line in f.NewContents()):
- continue # No violation found in full text: can skip considering diff.
+ errors = []
+ for f, extension in file_ext_list:
+ # For speed, we do two passes, checking first the full file. Shelling
+ # out to the SCM to determine the changed region can be quite expensive
+ # on Win32. Assuming that most files will be kept problem-free, we can
+ # skip the SCM operations most of the time.
+ if all(callable_rule(extension, line) for line in f.NewContents()):
+ continue # No violation found in full text: can skip considering diff.
- for line_num, line in f.ChangedContents():
- if not callable_rule(extension, line):
- errors.append(error_formatter(f.LocalPath(), line_num, line))
+ for line_num, line in f.ChangedContents():
+ if not callable_rule(extension, line):
+ errors.append(error_formatter(f.LocalPath(), line_num, line))
- return errors
+ return errors
def _FindNewViolationsOfRule(callable_rule,
input_api,
source_file_filter=None,
error_formatter=_ReportErrorFileAndLine):
- """Find all newly introduced violations of a per-line rule (a callable).
+ """Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
@@ -478,367 +504,396 @@ def _FindNewViolationsOfRule(callable_rule,
Returns:
A list of the newly-introduced violations reported by the rule.
"""
- if input_api.no_diffs:
- return []
- return _FindNewViolationsOfRuleForList(
- callable_rule, _GenerateAffectedFileExtList(
- input_api, source_file_filter), error_formatter)
+ if input_api.no_diffs:
+ return []
+ return _FindNewViolationsOfRuleForList(
+ callable_rule,
+ _GenerateAffectedFileExtList(input_api, source_file_filter),
+ error_formatter)
def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None):
- """Checks that there are no tab characters in any of the text files to be
+ """Checks that there are no tab characters in any of the text files to be
submitted.
"""
- # In addition to the filter, make sure that makefiles are skipped.
- if not source_file_filter:
- # It's the default filter.
- source_file_filter = input_api.FilterSourceFile
- def filter_more(affected_file):
- basename = input_api.os_path.basename(affected_file.LocalPath())
- return (not (basename in ('Makefile', 'makefile') or
- basename.endswith('.mk')) and
- source_file_filter(affected_file))
+ # In addition to the filter, make sure that makefiles are skipped.
+ if not source_file_filter:
+ # It's the default filter.
+ source_file_filter = input_api.FilterSourceFile
- tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line,
- input_api, filter_more)
+ def filter_more(affected_file):
+ basename = input_api.os_path.basename(affected_file.LocalPath())
+ return (not (basename in ('Makefile', 'makefile')
+ or basename.endswith('.mk'))
+ and source_file_filter(affected_file))
- if tabs:
- return [output_api.PresubmitPromptWarning('Found a tab character in:',
- long_text='\n'.join(tabs))]
- return []
+ tabs = _FindNewViolationsOfRule(lambda _, line: '\t' not in line, input_api,
+ filter_more)
+
+ if tabs:
+ return [
+ output_api.PresubmitPromptWarning('Found a tab character in:',
+ long_text='\n'.join(tabs))
+ ]
+ return []
def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None):
- """Checks that the user didn't add TODO(name) without an owner."""
+ """Checks that the user didn't add TODO(name) without an owner."""
- unowned_todo = input_api.re.compile('TO''DO[^(]')
- errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x),
- input_api, source_file_filter)
- errors = ['Found TO''DO with no owner in ' + x for x in errors]
- if errors:
- return [output_api.PresubmitPromptWarning('\n'.join(errors))]
- return []
+ unowned_todo = input_api.re.compile('TO' 'DO[^(]')
+ errors = _FindNewViolationsOfRule(lambda _, x: not unowned_todo.search(x),
+ input_api, source_file_filter)
+ errors = ['Found TO' 'DO with no owner in ' + x for x in errors]
+ if errors:
+ return [output_api.PresubmitPromptWarning('\n'.join(errors))]
+ return []
-def CheckChangeHasNoStrayWhitespace(input_api, output_api,
+def CheckChangeHasNoStrayWhitespace(input_api,
+ output_api,
source_file_filter=None):
- """Checks that there is no stray whitespace at source lines end."""
- errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line,
- input_api, source_file_filter)
- if errors:
- return [output_api.PresubmitPromptWarning(
- 'Found line ending with white spaces in:',
- long_text='\n'.join(errors))]
- return []
+ """Checks that there is no stray whitespace at source lines end."""
+ errors = _FindNewViolationsOfRule(lambda _, line: line.rstrip() == line,
+ input_api, source_file_filter)
+ if errors:
+ return [
+ output_api.PresubmitPromptWarning(
+ 'Found line ending with white spaces in:',
+ long_text='\n'.join(errors))
+ ]
+ return []
def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None):
- """Checks that there aren't any lines longer than maxlen characters in any of
+ """Checks that there aren't any lines longer than maxlen characters in any of
the text files to be submitted.
"""
- if input_api.no_diffs:
- return []
- maxlens = {
- 'java': 100,
- # This is specifically for Android's handwritten makefiles (Android.mk).
- 'mk': 200,
- 'rs': 100,
- '': maxlen,
- }
+ if input_api.no_diffs:
+ return []
+ maxlens = {
+ 'java': 100,
+ # This is specifically for Android's handwritten makefiles (Android.mk).
+ 'mk': 200,
+ 'rs': 100,
+ '': maxlen,
+ }
- # Language specific exceptions to max line length.
- # '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a
- # superset of CPP_EXCEPTIONS.
- CPP_FILE_EXTS = ('c', 'cc')
- CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma')
- HTML_FILE_EXTS = ('html',)
- HTML_EXCEPTIONS = (' extra_maxlen:
- return False
+ if line_len > extra_maxlen:
+ return False
- if 'url(' in line and file_extension == 'css':
- return True
+ if 'url(' in line and file_extension == 'css':
+ return True
- if '= 0:
+ if is_global_pylint_directive(line, pos):
+ global_check_enabled = False # Global disable
+ else:
+ continue # Local disable.
+
+ do_check = global_check_enabled
+
+ pos = line.find('pylint: enable=line-too-long')
+ if pos >= 0:
+ if is_global_pylint_directive(line, pos):
+ global_check_enabled = True # Global enable
+ do_check = True # Ensure it applies to current line as well.
+ else:
+ do_check = True # Local enable
+
+ if do_check and not line_is_short:
+ errors.append(error_formatter(file_path, line_num, line))
+
+ return errors
+
+ def format_error(filename, line_num, line):
+ return '%s, line %s, %s chars' % (filename, line_num, len(line))
+
+ file_ext_list = list(
+ _GenerateAffectedFileExtList(input_api, source_file_filter))
- def check_python_long_lines(affected_files, error_formatter):
errors = []
- global_check_enabled = True
- for f in affected_files:
- file_path = f.LocalPath()
- for idx, line in enumerate(f.NewContents()):
- line_num = idx + 1
- line_is_short = no_long_lines(PY_FILE_EXTS[0], line)
+ # For non-Python files, a simple line-based rule check is enough.
+ non_py_file_ext_list = [
+ x for x in file_ext_list if x[1] not in PY_FILE_EXTS
+ ]
+ if non_py_file_ext_list:
+ errors += _FindNewViolationsOfRuleForList(no_long_lines,
+ non_py_file_ext_list,
+ error_formatter=format_error)
- pos = line.find('pylint: disable=line-too-long')
- if pos >= 0:
- if is_global_pylint_directive(line, pos):
- global_check_enabled = False # Global disable
- else:
- continue # Local disable.
+ # However, Python files need more sophisticated checks that need parsing
+ # the whole source file.
+ py_file_list = [x[0] for x in file_ext_list if x[1] in PY_FILE_EXTS]
+ if py_file_list:
+ errors += check_python_long_lines(py_file_list,
+ error_formatter=format_error)
+ if errors:
+ msg = 'Found %d lines longer than %s characters (first 5 shown).' % (
+ len(errors), maxlen)
+ return [output_api.PresubmitPromptWarning(msg, items=errors[:5])]
- do_check = global_check_enabled
-
- pos = line.find('pylint: enable=line-too-long')
- if pos >= 0:
- if is_global_pylint_directive(line, pos):
- global_check_enabled = True # Global enable
- do_check = True # Ensure it applies to current line as well.
- else:
- do_check = True # Local enable
-
- if do_check and not line_is_short:
- errors.append(error_formatter(file_path, line_num, line))
-
- return errors
-
- def format_error(filename, line_num, line):
- return '%s, line %s, %s chars' % (filename, line_num, len(line))
-
- file_ext_list = list(
- _GenerateAffectedFileExtList(input_api, source_file_filter))
-
- errors = []
-
- # For non-Python files, a simple line-based rule check is enough.
- non_py_file_ext_list = [x for x in file_ext_list if x[1] not in PY_FILE_EXTS]
- if non_py_file_ext_list:
- errors += _FindNewViolationsOfRuleForList(
- no_long_lines, non_py_file_ext_list, error_formatter=format_error)
-
- # However, Python files need more sophisticated checks that need parsing
- # the whole source file.
- py_file_list = [x[0] for x in file_ext_list if x[1] in PY_FILE_EXTS]
- if py_file_list:
- errors += check_python_long_lines(
- py_file_list, error_formatter=format_error)
- if errors:
- msg = 'Found %d lines longer than %s characters (first 5 shown).' % (
- len(errors), maxlen)
- return [output_api.PresubmitPromptWarning(msg, items=errors[:5])]
-
- return []
+ return []
-def CheckLicense(input_api, output_api, license_re_param=None,
- project_name=None, source_file_filter=None, accept_empty_files=True):
- """Verifies the license header.
+def CheckLicense(input_api,
+ output_api,
+ license_re_param=None,
+ project_name=None,
+ source_file_filter=None,
+ accept_empty_files=True):
+ """Verifies the license header.
"""
- # Early-out if the license_re is guaranteed to match everything.
- if license_re_param and license_re_param == '.*':
- return []
+ # Early-out if the license_re is guaranteed to match everything.
+ if license_re_param and license_re_param == '.*':
+ return []
- current_year = int(input_api.time.strftime('%Y'))
+ current_year = int(input_api.time.strftime('%Y'))
- if license_re_param:
- new_license_re = license_re = license_re_param
- else:
- project_name = project_name or 'Chromium'
+ if license_re_param:
+ new_license_re = license_re = license_re_param
+ else:
+ project_name = project_name or 'Chromium'
- # Accept any year number from 2006 to the current year, or the special
- # 2006-20xx string used on the oldest files. 2006-20xx is deprecated, but
- # tolerated on old files. On new files the current year must be specified.
- allowed_years = (str(s) for s in reversed(range(2006, current_year + 1)))
- years_re = '(' + '|'.join(allowed_years) + '|2006-2008|2006-2009|2006-2010)'
+ # Accept any year number from 2006 to the current year, or the special
+ # 2006-20xx string used on the oldest files. 2006-20xx is deprecated,
+ # but tolerated on old files. On new files the current year must be
+ # specified.
+ allowed_years = (str(s)
+ for s in reversed(range(2006, current_year + 1)))
+ years_re = '(' + '|'.join(
+ allowed_years) + '|2006-2008|2006-2009|2006-2010)'
- # Reduce duplication between the two regex expressions.
- key_line = ('Use of this source code is governed by a BSD-style license '
- 'that can be')
- # The (c) is deprecated, but tolerate it until it's removed from all files.
- # "All rights reserved" is also deprecated, but tolerate it until it's
- # removed from all files.
- license_re = (r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors'
- r'(\. All rights reserved\.)?\n'
- r'.*? %(key_line)s\n'
- r'.*? found in the LICENSE file\.(?: \*/)?\n') % {
- 'year': years_re,
- 'project': project_name,
- 'key_line': key_line,
- }
- # On new files don't tolerate any digression from the ideal.
- new_license_re = (r'.*? Copyright %(year)s The %(project)s Authors\n'
+ # Reduce duplication between the two regex expressions.
+ key_line = (
+ 'Use of this source code is governed by a BSD-style license '
+ 'that can be')
+ # The (c) is deprecated, but tolerate it until it's removed from all
+ # files. "All rights reserved" is also deprecated, but tolerate it until
+ # it's removed from all files.
+ license_re = (r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors'
+ r'(\. All rights reserved\.)?\n'
r'.*? %(key_line)s\n'
r'.*? found in the LICENSE file\.(?: \*/)?\n') % {
'year': years_re,
'project': project_name,
'key_line': key_line,
}
-
- license_re = input_api.re.compile(license_re, input_api.re.MULTILINE)
- new_license_re = input_api.re.compile(new_license_re, input_api.re.MULTILINE)
- bad_files = []
- wrong_year_new_files = []
- bad_new_files = []
- for f in input_api.AffectedSourceFiles(source_file_filter):
- # Only examine the first 1,000 bytes of the file to avoid expensive and
- # fruitless regex searches over long files with no license.
- # re.match would also avoid this but can't be used because some files have
- # a shebang line ahead of the license.
- # The \r\n fixup is because it is possible on Windows to copy/paste the
- # license in such a way that \r\n line endings are inserted. This leads to
- # confusing license error messages - it's better to let the separate \r\n
- # check handle those.
- contents = input_api.ReadFile(f, 'r')[:1000].replace('\r\n', '\n')
- if accept_empty_files and not contents:
- continue
- if f.Action() == 'A':
- # Stricter checking for new files (but might actually be moved).
- match = new_license_re.search(contents)
- if not match:
- # License is totally wrong.
- bad_new_files.append(f.LocalPath())
- elif not license_re_param and match.groups()[0] != str(current_year):
- # If we're using the built-in license_re on a new file then make sure
- # the year is correct.
- wrong_year_new_files.append(f.LocalPath())
- elif not license_re.search(contents):
- bad_files.append(f.LocalPath())
- results = []
- if bad_new_files:
- if license_re_param:
- error_message = ('License on new files must match:\n\n%s\n' %
- license_re_param)
- else:
- # Verbatim text that can be copy-pasted into new files (possibly adjusting
- # the leading comment delimiter).
- new_license_text = ('// Copyright %(year)s The %(project)s Authors\n'
- '// %(key_line)s\n'
- '// found in the LICENSE file.\n') % {
- 'year': current_year,
+ # On new files don't tolerate any digression from the ideal.
+ new_license_re = (r'.*? Copyright %(year)s The %(project)s Authors\n'
+ r'.*? %(key_line)s\n'
+ r'.*? found in the LICENSE file\.(?: \*/)?\n') % {
+ 'year': years_re,
'project': project_name,
'key_line': key_line,
}
- error_message = (
- 'License on new files must be:\n\n%s\n' % new_license_text +
- '(adjusting the comment delimiter accordingly).\n\n' +
- 'If this is a moved file, then update the license but do not ' +
- 'update the year.\n\n')
- error_message += 'Found a bad license header in these new or moved files:'
- results.append(output_api.PresubmitError(error_message,
- items=bad_new_files))
- if wrong_year_new_files:
- # We can't distinguish between new and moved files, so this has to be a
- # warning rather than an error.
- results.append(
- output_api.PresubmitPromptWarning(
- 'License doesn\'t list the current year. If this is a new file, '
- 'use the current year. If this is a moved file then ignore this '
- 'warning.',
- items=wrong_year_new_files))
- if bad_files:
- results.append(
- output_api.PresubmitPromptWarning(
- 'License must match:\n%s\n' % license_re.pattern +
- 'Found a bad license header in these files:',
- items=bad_files))
- return results
+
+ license_re = input_api.re.compile(license_re, input_api.re.MULTILINE)
+ new_license_re = input_api.re.compile(new_license_re,
+ input_api.re.MULTILINE)
+ bad_files = []
+ wrong_year_new_files = []
+ bad_new_files = []
+ for f in input_api.AffectedSourceFiles(source_file_filter):
+ # Only examine the first 1,000 bytes of the file to avoid expensive and
+ # fruitless regex searches over long files with no license.
+ # re.match would also avoid this but can't be used because some files
+ # have a shebang line ahead of the license. The \r\n fixup is because it
+ # is possible on Windows to copy/paste the license in such a way that
+ # \r\n line endings are inserted. This leads to confusing license error
+ # messages - it's better to let the separate \r\n check handle those.
+ contents = input_api.ReadFile(f, 'r')[:1000].replace('\r\n', '\n')
+ if accept_empty_files and not contents:
+ continue
+ if f.Action() == 'A':
+ # Stricter checking for new files (but might actually be moved).
+ match = new_license_re.search(contents)
+ if not match:
+ # License is totally wrong.
+ bad_new_files.append(f.LocalPath())
+ elif not license_re_param and match.groups()[0] != str(
+ current_year):
+ # If we're using the built-in license_re on a new file then make
+ # sure the year is correct.
+ wrong_year_new_files.append(f.LocalPath())
+ elif not license_re.search(contents):
+ bad_files.append(f.LocalPath())
+ results = []
+ if bad_new_files:
+ if license_re_param:
+ error_message = ('License on new files must match:\n\n%s\n' %
+ license_re_param)
+ else:
+ # Verbatim text that can be copy-pasted into new files (possibly
+ # adjusting the leading comment delimiter).
+ new_license_text = (
+ '// Copyright %(year)s The %(project)s Authors\n'
+ '// %(key_line)s\n'
+ '// found in the LICENSE file.\n') % {
+ 'year': current_year,
+ 'project': project_name,
+ 'key_line': key_line,
+ }
+ error_message = (
+ 'License on new files must be:\n\n%s\n' % new_license_text +
+ '(adjusting the comment delimiter accordingly).\n\n' +
+ 'If this is a moved file, then update the license but do not ' +
+ 'update the year.\n\n')
+ error_message += 'Found a bad license header in these new or moved files:'
+ results.append(
+ output_api.PresubmitError(error_message, items=bad_new_files))
+ if wrong_year_new_files:
+ # We can't distinguish between new and moved files, so this has to be a
+ # warning rather than an error.
+ results.append(
+ output_api.PresubmitPromptWarning(
+ 'License doesn\'t list the current year. If this is a new file, '
+ 'use the current year. If this is a moved file then ignore this '
+ 'warning.',
+ items=wrong_year_new_files))
+ if bad_files:
+ results.append(
+ output_api.PresubmitPromptWarning(
+ 'License must match:\n%s\n' % license_re.pattern +
+ 'Found a bad license header in these files:',
+ items=bad_files))
+ return results
def CheckChromiumDependencyMetadata(input_api, output_api, file_filter=None):
- """Check files for Chromium third party dependency metadata have sufficient
+ """Check files for Chromium third party dependency metadata have sufficient
information, and are correctly formatted.
See the README.chromium.template at
https://chromium.googlesource.com/chromium/src/+/main/third_party/README.chromium.template
"""
- # If the file filter is unspecified, filter to known Chromium metadata files.
- if file_filter is None:
- file_filter = lambda f: metadata.discover.is_metadata_file(f.LocalPath())
+ # If the file filter is unspecified, filter to known Chromium metadata
+ # files.
+ if file_filter is None:
+ file_filter = lambda f: metadata.discover.is_metadata_file(f.LocalPath(
+ ))
- # The repo's root directory is required to check license files.
- repo_root_dir = input_api.change.RepositoryRoot()
+ # The repo's root directory is required to check license files.
+ repo_root_dir = input_api.change.RepositoryRoot()
- outputs = []
- for f in input_api.AffectedFiles(file_filter=file_filter):
- if f.Action() == 'D':
- # No need to validate a deleted file.
- continue
+ outputs = []
+ for f in input_api.AffectedFiles(file_filter=file_filter):
+ if f.Action() == 'D':
+ # No need to validate a deleted file.
+ continue
- errors, warnings = metadata.validate.check_file(
- filepath=f.AbsoluteLocalPath(),
- repo_root_dir=repo_root_dir,
- reader=input_api.ReadFile,
- )
+ errors, warnings = metadata.validate.check_file(
+ filepath=f.AbsoluteLocalPath(),
+ repo_root_dir=repo_root_dir,
+ reader=input_api.ReadFile,
+ )
- for warning in warnings:
- outputs.append(output_api.PresubmitPromptWarning(warning, [f]))
+ for warning in warnings:
+ outputs.append(output_api.PresubmitPromptWarning(warning, [f]))
- for error in errors:
- outputs.append(output_api.PresubmitError(error, [f]))
+ for error in errors:
+ outputs.append(output_api.PresubmitError(error, [f]))
- return outputs
+ return outputs
### Other checks
+
def CheckDoNotSubmit(input_api, output_api):
- return (
- CheckDoNotSubmitInDescription(input_api, output_api) +
- CheckDoNotSubmitInFiles(input_api, output_api)
- )
+ return (CheckDoNotSubmitInDescription(input_api, output_api) +
+ CheckDoNotSubmitInFiles(input_api, output_api))
-def CheckTreeIsOpen(input_api, output_api,
- url=None, closed=None, json_url=None):
- """Check whether to allow commit without prompt.
+def CheckTreeIsOpen(input_api,
+ output_api,
+ url=None,
+ closed=None,
+ json_url=None):
+ """Check whether to allow commit without prompt.
Supports two styles:
1. Checks that an url's content doesn't match a regexp that would mean that
@@ -851,35 +906,42 @@ def CheckTreeIsOpen(input_api, output_api,
closed: regex to match for closed status.
json_url: url to download json style status.
"""
- if not input_api.is_committing or \
- 'PRESUBMIT_SKIP_NETWORK' in _os.environ:
+ if not input_api.is_committing or \
+ 'PRESUBMIT_SKIP_NETWORK' in _os.environ:
+ return []
+ try:
+ if json_url:
+ connection = input_api.urllib_request.urlopen(json_url)
+ status = input_api.json.loads(connection.read())
+ connection.close()
+ if not status['can_commit_freely']:
+ short_text = 'Tree state is: ' + status['general_state']
+ long_text = status['message'] + '\n' + json_url
+ if input_api.no_diffs:
+ return [
+ output_api.PresubmitPromptWarning(short_text,
+ long_text=long_text)
+ ]
+ return [
+ output_api.PresubmitError(short_text, long_text=long_text)
+ ]
+ else:
+ # TODO(bradnelson): drop this once all users are gone.
+ connection = input_api.urllib_request.urlopen(url)
+ status = connection.read()
+ connection.close()
+ if input_api.re.match(closed, status):
+ long_text = status + '\n' + url
+ return [
+ output_api.PresubmitError('The tree is closed.',
+ long_text=long_text)
+ ]
+ except IOError as e:
+ return [
+ output_api.PresubmitError('Error fetching tree status.',
+ long_text=str(e))
+ ]
return []
- try:
- if json_url:
- connection = input_api.urllib_request.urlopen(json_url)
- status = input_api.json.loads(connection.read())
- connection.close()
- if not status['can_commit_freely']:
- short_text = 'Tree state is: ' + status['general_state']
- long_text = status['message'] + '\n' + json_url
- if input_api.no_diffs:
- return [
- output_api.PresubmitPromptWarning(short_text, long_text=long_text)
- ]
- return [output_api.PresubmitError(short_text, long_text=long_text)]
- else:
- # TODO(bradnelson): drop this once all users are gone.
- connection = input_api.urllib_request.urlopen(url)
- status = connection.read()
- connection.close()
- if input_api.re.match(closed, status):
- long_text = status + '\n' + url
- return [output_api.PresubmitError('The tree is closed.',
- long_text=long_text)]
- except IOError as e:
- return [output_api.PresubmitError('Error fetching tree status.',
- long_text=str(e))]
- return []
def GetUnitTestsInDirectory(input_api,
@@ -893,45 +955,45 @@ def GetUnitTestsInDirectory(input_api,
skip_shebang_check=True,
allowlist=None,
blocklist=None):
- """Lists all files in a directory and runs them. Doesn't recurse.
+ """Lists all files in a directory and runs them. Doesn't recurse.
It's mainly a wrapper for RunUnitTests. Use allowlist and blocklist to filter
tests accordingly. run_on_python2, run_on_python3, and skip_shebang_check are
no longer used but have to be retained because of the many callers in other
repos that pass them in.
"""
- del run_on_python2
- del run_on_python3
- del skip_shebang_check
+ del run_on_python2
+ del run_on_python3
+ del skip_shebang_check
- unit_tests = []
- test_path = input_api.os_path.abspath(
- input_api.os_path.join(input_api.PresubmitLocalPath(), directory))
+ unit_tests = []
+ test_path = input_api.os_path.abspath(
+ input_api.os_path.join(input_api.PresubmitLocalPath(), directory))
- def check(filename, filters):
- return any(True for i in filters if input_api.re.match(i, filename))
+ def check(filename, filters):
+ return any(True for i in filters if input_api.re.match(i, filename))
- to_run = found = 0
- for filename in input_api.os_listdir(test_path):
- found += 1
- fullpath = input_api.os_path.join(test_path, filename)
- if not input_api.os_path.isfile(fullpath):
- continue
- if files_to_check and not check(filename, files_to_check):
- continue
- if files_to_skip and check(filename, files_to_skip):
- continue
- unit_tests.append(input_api.os_path.join(directory, filename))
- to_run += 1
- input_api.logging.debug('Found %d files, running %d unit tests'
- % (found, to_run))
- if not to_run:
- return [
- output_api.PresubmitPromptWarning(
- 'Out of %d files, found none that matched c=%r, s=%r in directory %s'
- % (found, files_to_check, files_to_skip, directory))
- ]
- return GetUnitTests(input_api, output_api, unit_tests, env)
+ to_run = found = 0
+ for filename in input_api.os_listdir(test_path):
+ found += 1
+ fullpath = input_api.os_path.join(test_path, filename)
+ if not input_api.os_path.isfile(fullpath):
+ continue
+ if files_to_check and not check(filename, files_to_check):
+ continue
+ if files_to_skip and check(filename, files_to_skip):
+ continue
+ unit_tests.append(input_api.os_path.join(directory, filename))
+ to_run += 1
+ input_api.logging.debug('Found %d files, running %d unit tests' %
+ (found, to_run))
+ if not to_run:
+ return [
+ output_api.PresubmitPromptWarning(
+ 'Out of %d files, found none that matched c=%r, s=%r in directory %s'
+ % (found, files_to_check, files_to_skip, directory))
+ ]
+ return GetUnitTests(input_api, output_api, unit_tests, env)
def GetUnitTests(input_api,
@@ -941,45 +1003,45 @@ def GetUnitTests(input_api,
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True):
- """Runs all unit tests in a directory.
+ """Runs all unit tests in a directory.
On Windows, sys.executable is used for unit tests ending with ".py".
run_on_python2, run_on_python3, and skip_shebang_check are no longer used but
have to be retained because of the many callers in other repos that pass them
in.
"""
- del run_on_python2
- del run_on_python3
- del skip_shebang_check
+ del run_on_python2
+ del run_on_python3
+ del skip_shebang_check
- # We don't want to hinder users from uploading incomplete patches, but we do
- # want to report errors as errors when doing presubmit --all testing.
- if input_api.is_committing or input_api.no_diffs:
- message_type = output_api.PresubmitError
- else:
- message_type = output_api.PresubmitPromptWarning
-
- results = []
- for unit_test in unit_tests:
- cmd = [unit_test]
- if input_api.verbose:
- cmd.append('--verbose')
- kwargs = {'cwd': input_api.PresubmitLocalPath()}
- if env:
- kwargs['env'] = env
- if not unit_test.endswith('.py'):
- results.append(input_api.Command(
- name=unit_test,
- cmd=cmd,
- kwargs=kwargs,
- message=message_type))
+ # We don't want to hinder users from uploading incomplete patches, but we do
+ # want to report errors as errors when doing presubmit --all testing.
+ if input_api.is_committing or input_api.no_diffs:
+ message_type = output_api.PresubmitError
else:
- results.append(
- input_api.Command(name=unit_test,
- cmd=cmd,
- kwargs=kwargs,
- message=message_type))
- return results
+ message_type = output_api.PresubmitPromptWarning
+
+ results = []
+ for unit_test in unit_tests:
+ cmd = [unit_test]
+ if input_api.verbose:
+ cmd.append('--verbose')
+ kwargs = {'cwd': input_api.PresubmitLocalPath()}
+ if env:
+ kwargs['env'] = env
+ if not unit_test.endswith('.py'):
+ results.append(
+ input_api.Command(name=unit_test,
+ cmd=cmd,
+ kwargs=kwargs,
+ message=message_type))
+ else:
+ results.append(
+ input_api.Command(name=unit_test,
+ cmd=cmd,
+ kwargs=kwargs,
+ message=message_type))
+ return results
def GetUnitTestsRecursively(input_api,
@@ -990,145 +1052,151 @@ def GetUnitTestsRecursively(input_api,
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True):
- """Gets all files in the directory tree (git repo) that match files_to_check.
+ """Gets all files in the directory tree (git repo) that match files_to_check.
Restricts itself to only find files within the Change's source repo, not
dependencies. run_on_python2, run_on_python3, and skip_shebang_check are no
longer used but have to be retained because of the many callers in other repos
that pass them in.
"""
- del run_on_python2
- del run_on_python3
- del skip_shebang_check
+ del run_on_python2
+ del run_on_python3
+ del skip_shebang_check
- def check(filename):
- return (any(input_api.re.match(f, filename) for f in files_to_check) and
- not any(input_api.re.match(f, filename) for f in files_to_skip))
+ def check(filename):
+ return (any(input_api.re.match(f, filename) for f in files_to_check) and
+ not any(input_api.re.match(f, filename) for f in files_to_skip))
- tests = []
+ tests = []
- to_run = found = 0
- for filepath in input_api.change.AllFiles(directory):
- found += 1
- if check(filepath):
- to_run += 1
- tests.append(filepath)
- input_api.logging.debug('Found %d files, running %d' % (found, to_run))
- if not to_run:
- return [
- output_api.PresubmitPromptWarning(
- 'Out of %d files, found none that matched c=%r, s=%r in directory %s'
- % (found, files_to_check, files_to_skip, directory))
- ]
+ to_run = found = 0
+ for filepath in input_api.change.AllFiles(directory):
+ found += 1
+ if check(filepath):
+ to_run += 1
+ tests.append(filepath)
+ input_api.logging.debug('Found %d files, running %d' % (found, to_run))
+ if not to_run:
+ return [
+ output_api.PresubmitPromptWarning(
+ 'Out of %d files, found none that matched c=%r, s=%r in directory %s'
+ % (found, files_to_check, files_to_skip, directory))
+ ]
- return GetUnitTests(input_api, output_api, tests)
+ return GetUnitTests(input_api, output_api, tests)
def GetPythonUnitTests(input_api, output_api, unit_tests, python3=False):
- """Run the unit tests out of process, capture the output and use the result
+ """Run the unit tests out of process, capture the output and use the result
code to determine success.
DEPRECATED.
"""
- # We don't want to hinder users from uploading incomplete patches.
- if input_api.is_committing or input_api.no_diffs:
- message_type = output_api.PresubmitError
- else:
- message_type = output_api.PresubmitNotifyResult
- results = []
- for unit_test in unit_tests:
- # Run the unit tests out of process. This is because some unit tests
- # stub out base libraries and don't clean up their mess. It's too easy to
- # get subtle bugs.
- cwd = None
- env = None
- unit_test_name = unit_test
- # 'python -m test.unit_test' doesn't work. We need to change to the right
- # directory instead.
- if '.' in unit_test:
- # Tests imported in submodules (subdirectories) assume that the current
- # directory is in the PYTHONPATH. Manually fix that.
- unit_test = unit_test.replace('.', '/')
- cwd = input_api.os_path.dirname(unit_test)
- unit_test = input_api.os_path.basename(unit_test)
- env = input_api.environ.copy()
- # At least on Windows, it seems '.' must explicitly be in PYTHONPATH
- backpath = [
- '.', input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1))
- ]
- # We convert to str, since on Windows on Python 2 only strings are allowed
- # as environment variables, but literals are unicode since we're importing
- # unicode_literals from __future__.
- if env.get('PYTHONPATH'):
- backpath.append(env.get('PYTHONPATH'))
- env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath))
- env.pop('VPYTHON_CLEAR_PYTHONPATH', None)
- cmd = [input_api.python3_executable, '-m', '%s' % unit_test]
- results.append(input_api.Command(
- name=unit_test_name,
- cmd=cmd,
- kwargs={'env': env, 'cwd': cwd},
- message=message_type))
- return results
+ # We don't want to hinder users from uploading incomplete patches.
+ if input_api.is_committing or input_api.no_diffs:
+ message_type = output_api.PresubmitError
+ else:
+ message_type = output_api.PresubmitNotifyResult
+ results = []
+ for unit_test in unit_tests:
+ # Run the unit tests out of process. This is because some unit tests
+ # stub out base libraries and don't clean up their mess. It's too easy
+ # to get subtle bugs.
+ cwd = None
+ env = None
+ unit_test_name = unit_test
+ # 'python -m test.unit_test' doesn't work. We need to change to the
+ # right directory instead.
+ if '.' in unit_test:
+ # Tests imported in submodules (subdirectories) assume that the
+ # current directory is in the PYTHONPATH. Manually fix that.
+ unit_test = unit_test.replace('.', '/')
+ cwd = input_api.os_path.dirname(unit_test)
+ unit_test = input_api.os_path.basename(unit_test)
+ env = input_api.environ.copy()
+ # At least on Windows, it seems '.' must explicitly be in PYTHONPATH
+ backpath = [
+ '.',
+ input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1))
+ ]
+ # We convert to str, since on Windows on Python 2 only strings are
+ # allowed as environment variables, but literals are unicode since
+ # we're importing unicode_literals from __future__.
+ if env.get('PYTHONPATH'):
+ backpath.append(env.get('PYTHONPATH'))
+ env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath))
+ env.pop('VPYTHON_CLEAR_PYTHONPATH', None)
+ cmd = [input_api.python3_executable, '-m', '%s' % unit_test]
+ results.append(
+ input_api.Command(name=unit_test_name,
+ cmd=cmd,
+ kwargs={
+ 'env': env,
+ 'cwd': cwd
+ },
+ message=message_type))
+ return results
def RunUnitTestsInDirectory(input_api, *args, **kwargs):
- """Run tests in a directory serially.
+ """Run tests in a directory serially.
For better performance, use GetUnitTestsInDirectory and then
pass to input_api.RunTests.
"""
- return input_api.RunTests(
- GetUnitTestsInDirectory(input_api, *args, **kwargs), False)
+ return input_api.RunTests(
+ GetUnitTestsInDirectory(input_api, *args, **kwargs), False)
def RunUnitTests(input_api, *args, **kwargs):
- """Run tests serially.
+ """Run tests serially.
For better performance, use GetUnitTests and then pass to
input_api.RunTests.
"""
- return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False)
+ return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False)
def RunPythonUnitTests(input_api, *args, **kwargs):
- """Run python tests in a directory serially.
+ """Run python tests in a directory serially.
DEPRECATED
"""
- return input_api.RunTests(
- GetPythonUnitTests(input_api, *args, **kwargs), False)
+ return input_api.RunTests(GetPythonUnitTests(input_api, *args, **kwargs),
+ False)
def _FetchAllFiles(input_api, files_to_check, files_to_skip):
- """Hack to fetch all files."""
- # We cannot use AffectedFiles here because we want to test every python
- # file on each single python change. It's because a change in a python file
- # can break another unmodified file.
- # Use code similar to InputApi.FilterSourceFile()
- def Find(filepath, filters):
- if input_api.platform == 'win32':
- filepath = filepath.replace('\\', '/')
+ """Hack to fetch all files."""
- for item in filters:
- if input_api.re.match(item, filepath):
- return True
- return False
+ # We cannot use AffectedFiles here because we want to test every python
+ # file on each single python change. It's because a change in a python file
+ # can break another unmodified file.
+ # Use code similar to InputApi.FilterSourceFile()
+ def Find(filepath, filters):
+ if input_api.platform == 'win32':
+ filepath = filepath.replace('\\', '/')
- files = []
- path_len = len(input_api.PresubmitLocalPath())
- for dirpath, dirnames, filenames in input_api.os_walk(
- input_api.PresubmitLocalPath()):
- # Passes dirnames in block list to speed up search.
- for item in dirnames[:]:
- filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
- if Find(filepath, files_to_skip):
- dirnames.remove(item)
- for item in filenames:
- filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
- if Find(filepath, files_to_check) and not Find(filepath, files_to_skip):
- files.append(filepath)
- return files
+ for item in filters:
+ if input_api.re.match(item, filepath):
+ return True
+ return False
+
+ files = []
+ path_len = len(input_api.PresubmitLocalPath())
+ for dirpath, dirnames, filenames in input_api.os_walk(
+ input_api.PresubmitLocalPath()):
+ # Passes dirnames in block list to speed up search.
+ for item in dirnames[:]:
+ filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
+ if Find(filepath, files_to_skip):
+ dirnames.remove(item)
+ for item in filenames:
+ filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
+ if Find(filepath,
+ files_to_check) and not Find(filepath, files_to_skip):
+ files.append(filepath)
+ return files
def GetPylint(input_api,
@@ -1139,325 +1207,342 @@ def GetPylint(input_api,
extra_paths_list=None,
pylintrc=None,
version='2.7'):
- """Run pylint on python files.
+ """Run pylint on python files.
The default files_to_check enforces looking only at *.py files.
Currently only pylint version '2.6' and '2.7' are supported.
"""
- files_to_check = tuple(files_to_check or (r'.*\.py$', ))
- files_to_skip = tuple(files_to_skip or input_api.DEFAULT_FILES_TO_SKIP)
- extra_paths_list = extra_paths_list or []
+ files_to_check = tuple(files_to_check or (r'.*\.py$', ))
+ files_to_skip = tuple(files_to_skip or input_api.DEFAULT_FILES_TO_SKIP)
+ extra_paths_list = extra_paths_list or []
- assert version in ('2.6', '2.7'), 'Unsupported pylint version: %s' % version
+ assert version in ('2.6', '2.7'), 'Unsupported pylint version: %s' % version
- if input_api.is_committing or input_api.no_diffs:
- error_type = output_api.PresubmitError
- else:
- error_type = output_api.PresubmitPromptWarning
-
- # Only trigger if there is at least one python file affected.
- def rel_path(regex):
- """Modifies a regex for a subject to accept paths relative to root."""
- def samefile(a, b):
- # Default implementation for platforms lacking os.path.samefile
- # (like Windows).
- return input_api.os_path.abspath(a) == input_api.os_path.abspath(b)
- samefile = getattr(input_api.os_path, 'samefile', samefile)
- if samefile(input_api.PresubmitLocalPath(),
- input_api.change.RepositoryRoot()):
- return regex
-
- prefix = input_api.os_path.join(input_api.os_path.relpath(
- input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()), '')
- return input_api.re.escape(prefix) + regex
- src_filter = lambda x: input_api.FilterSourceFile(
- x, map(rel_path, files_to_check), map(rel_path, files_to_skip))
- if not input_api.AffectedSourceFiles(src_filter):
- input_api.logging.info('Skipping pylint: no matching changes.')
- return []
-
- if pylintrc is not None:
- pylintrc = input_api.os_path.join(input_api.PresubmitLocalPath(), pylintrc)
- else:
- pylintrc = input_api.os_path.join(_HERE, 'pylintrc')
- extra_args = ['--rcfile=%s' % pylintrc]
- if disabled_warnings:
- extra_args.extend(['-d', ','.join(disabled_warnings)])
-
- files = _FetchAllFiles(input_api, files_to_check, files_to_skip)
- if not files:
- return []
- files.sort()
-
- input_api.logging.info('Running pylint %s on %d files', version, len(files))
- input_api.logging.debug('Running pylint on: %s', files)
- env = input_api.environ.copy()
- env['PYTHONPATH'] = input_api.os_path.pathsep.join(extra_paths_list)
- env.pop('VPYTHON_CLEAR_PYTHONPATH', None)
- input_api.logging.debug(' with extra PYTHONPATH: %r', extra_paths_list)
- files_per_job = 10
-
- def GetPylintCmd(flist, extra, parallel):
- # Windows needs help running python files so we explicitly specify
- # the interpreter to use. It also has limitations on the size of
- # the command-line, so we pass arguments via a pipe.
- tool = input_api.os_path.join(_HERE, 'pylint-' + version)
- kwargs = {'env': env}
- if input_api.platform == 'win32':
- # On Windows, scripts on the current directory take precedence over PATH.
- # When `pylint.bat` calls `vpython`, it will execute the `vpython` of the
- # depot_tools under test instead of the one in the bot.
- # As a workaround, we run the tests from the parent directory instead.
- cwd = input_api.change.RepositoryRoot()
- if input_api.os_path.basename(cwd) == 'depot_tools':
- kwargs['cwd'] = input_api.os_path.dirname(cwd)
- flist = [input_api.os_path.join('depot_tools', f) for f in flist]
- tool += '.bat'
-
- cmd = [tool, '--args-on-stdin']
- if len(flist) == 1:
- description = flist[0]
+ if input_api.is_committing or input_api.no_diffs:
+ error_type = output_api.PresubmitError
else:
- description = '%s files' % len(flist)
+ error_type = output_api.PresubmitPromptWarning
- args = extra_args[:]
- if extra:
- args.extend(extra)
- description += ' using %s' % (extra,)
- if parallel:
- # Make sure we don't request more parallelism than is justified for the
- # number of files we have to process. PyLint child-process startup time is
- # significant.
- jobs = min(input_api.cpu_count, 1 + len(flist) // files_per_job)
- if jobs > 1:
- args.append('--jobs=%s' % jobs)
- description += ' on %d processes' % jobs
+ # Only trigger if there is at least one python file affected.
+ def rel_path(regex):
+ """Modifies a regex for a subject to accept paths relative to root."""
+ def samefile(a, b):
+ # Default implementation for platforms lacking os.path.samefile
+ # (like Windows).
+ return input_api.os_path.abspath(a) == input_api.os_path.abspath(b)
- kwargs['stdin'] = '\n'.join(args + flist).encode('utf-8')
+ samefile = getattr(input_api.os_path, 'samefile', samefile)
+ if samefile(input_api.PresubmitLocalPath(),
+ input_api.change.RepositoryRoot()):
+ return regex
- return input_api.Command(name='Pylint (%s)' % description,
- cmd=cmd,
- kwargs=kwargs,
- message=error_type,
- python3=True)
+ prefix = input_api.os_path.join(
+ input_api.os_path.relpath(input_api.PresubmitLocalPath(),
+ input_api.change.RepositoryRoot()), '')
+ return input_api.re.escape(prefix) + regex
+
+ src_filter = lambda x: input_api.FilterSourceFile(
+ x, map(rel_path, files_to_check), map(rel_path, files_to_skip))
+ if not input_api.AffectedSourceFiles(src_filter):
+ input_api.logging.info('Skipping pylint: no matching changes.')
+ return []
+
+ if pylintrc is not None:
+ pylintrc = input_api.os_path.join(input_api.PresubmitLocalPath(),
+ pylintrc)
+ else:
+ pylintrc = input_api.os_path.join(_HERE, 'pylintrc')
+ extra_args = ['--rcfile=%s' % pylintrc]
+ if disabled_warnings:
+ extra_args.extend(['-d', ','.join(disabled_warnings)])
+
+ files = _FetchAllFiles(input_api, files_to_check, files_to_skip)
+ if not files:
+ return []
+ files.sort()
+
+ input_api.logging.info('Running pylint %s on %d files', version, len(files))
+ input_api.logging.debug('Running pylint on: %s', files)
+ env = input_api.environ.copy()
+ env['PYTHONPATH'] = input_api.os_path.pathsep.join(extra_paths_list)
+ env.pop('VPYTHON_CLEAR_PYTHONPATH', None)
+ input_api.logging.debug(' with extra PYTHONPATH: %r', extra_paths_list)
+ files_per_job = 10
+
+ def GetPylintCmd(flist, extra, parallel):
+ # Windows needs help running python files so we explicitly specify
+ # the interpreter to use. It also has limitations on the size of
+ # the command-line, so we pass arguments via a pipe.
+ tool = input_api.os_path.join(_HERE, 'pylint-' + version)
+ kwargs = {'env': env}
+ if input_api.platform == 'win32':
+ # On Windows, scripts on the current directory take precedence over
+ # PATH. When `pylint.bat` calls `vpython`, it will execute the
+ # `vpython` of the depot_tools under test instead of the one in the
+ # bot. As a workaround, we run the tests from the parent directory
+ # instead.
+ cwd = input_api.change.RepositoryRoot()
+ if input_api.os_path.basename(cwd) == 'depot_tools':
+ kwargs['cwd'] = input_api.os_path.dirname(cwd)
+ flist = [
+ input_api.os_path.join('depot_tools', f) for f in flist
+ ]
+ tool += '.bat'
+
+ cmd = [tool, '--args-on-stdin']
+ if len(flist) == 1:
+ description = flist[0]
+ else:
+ description = '%s files' % len(flist)
+
+ args = extra_args[:]
+ if extra:
+ args.extend(extra)
+ description += ' using %s' % (extra, )
+ if parallel:
+ # Make sure we don't request more parallelism than is justified for
+ # the number of files we have to process. PyLint child-process
+ # startup time is significant.
+ jobs = min(input_api.cpu_count, 1 + len(flist) // files_per_job)
+ if jobs > 1:
+ args.append('--jobs=%s' % jobs)
+ description += ' on %d processes' % jobs
+
+ kwargs['stdin'] = '\n'.join(args + flist).encode('utf-8')
+
+ return input_api.Command(name='Pylint (%s)' % description,
+ cmd=cmd,
+ kwargs=kwargs,
+ message=error_type,
+ python3=True)
+
+ # pylint's cycle detection doesn't work in parallel, so spawn a second,
+ # single-threaded job for just that check. However, only do this if there
+ # are actually enough files to process to justify parallelism in the first
+ # place. Some PRESUBMITs explicitly mention cycle detection.
+ if len(files) >= files_per_job and not any(
+ 'R0401' in a or 'cyclic-import' in a for a in extra_args):
+ return [
+ GetPylintCmd(files, ["--disable=cyclic-import"], True),
+ GetPylintCmd(files, ["--disable=all", "--enable=cyclic-import"],
+ False),
+ ]
- # pylint's cycle detection doesn't work in parallel, so spawn a second,
- # single-threaded job for just that check. However, only do this if there are
- # actually enough files to process to justify parallelism in the first place.
- # Some PRESUBMITs explicitly mention cycle detection.
- if len(files) >= files_per_job and not any(
- 'R0401' in a or 'cyclic-import' in a for a in extra_args):
return [
- GetPylintCmd(files, ["--disable=cyclic-import"], True),
- GetPylintCmd(files, ["--disable=all", "--enable=cyclic-import"], False),
+ GetPylintCmd(files, [], True),
]
- return [
- GetPylintCmd(files, [], True),
- ]
-
def RunPylint(input_api, *args, **kwargs):
- """Legacy presubmit function.
+ """Legacy presubmit function.
For better performance, get all tests and then pass to
input_api.RunTests.
"""
- return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False)
+ return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False)
def CheckDirMetadataFormat(input_api, output_api, dirmd_bin=None):
- # TODO(crbug.com/1102997): Remove OWNERS once DIR_METADATA migration is
- # complete.
- file_filter = lambda f: (
- input_api.basename(f.LocalPath()) in ('DIR_METADATA', 'OWNERS'))
- affected_files = {
- f.AbsoluteLocalPath()
- for f in input_api.change.AffectedFiles(
- include_deletes=False, file_filter=file_filter)
- }
- if not affected_files:
- return []
+ # TODO(crbug.com/1102997): Remove OWNERS once DIR_METADATA migration is
+ # complete.
+ file_filter = lambda f: (input_api.basename(f.LocalPath()) in
+ ('DIR_METADATA', 'OWNERS'))
+ affected_files = {
+ f.AbsoluteLocalPath()
+ for f in input_api.change.AffectedFiles(include_deletes=False,
+ file_filter=file_filter)
+ }
+ if not affected_files:
+ return []
- name = 'Validate metadata in OWNERS and DIR_METADATA files'
+ name = 'Validate metadata in OWNERS and DIR_METADATA files'
- if dirmd_bin is None:
- dirmd_bin = 'dirmd.bat' if input_api.is_windows else 'dirmd'
+ if dirmd_bin is None:
+ dirmd_bin = 'dirmd.bat' if input_api.is_windows else 'dirmd'
- # When running git cl presubmit --all this presubmit may be asked to check
- # ~7,500 files, leading to a command line that is about 500,000 characters.
- # This goes past the Windows 8191 character cmd.exe limit and causes cryptic
- # failures. To avoid these we break the command up into smaller pieces. The
- # non-Windows limit is chosen so that the code that splits up commands will
- # get some exercise on other platforms.
- # Depending on how long the command is on Windows the error may be:
- # The command line is too long.
- # Or it may be:
- # OSError: Execution failed with error: [WinError 206] The filename or
- # extension is too long.
- # I suspect that the latter error comes from CreateProcess hitting its 32768
- # character limit.
- files_per_command = 50 if input_api.is_windows else 1000
- affected_files = sorted(affected_files)
- results = []
- for i in range(0, len(affected_files), files_per_command):
- kwargs = {}
- cmd = [dirmd_bin, 'validate'] + affected_files[i : i + files_per_command]
- results.extend([input_api.Command(
- name, cmd, kwargs, output_api.PresubmitError)])
- return results
+ # When running git cl presubmit --all this presubmit may be asked to check
+ # ~7,500 files, leading to a command line that is about 500,000 characters.
+ # This goes past the Windows 8191 character cmd.exe limit and causes cryptic
+ # failures. To avoid these we break the command up into smaller pieces. The
+ # non-Windows limit is chosen so that the code that splits up commands will
+ # get some exercise on other platforms.
+ # Depending on how long the command is on Windows the error may be:
+ # The command line is too long.
+ # Or it may be:
+ # OSError: Execution failed with error: [WinError 206] The filename or
+ # extension is too long.
+ # I suspect that the latter error comes from CreateProcess hitting its 32768
+ # character limit.
+ files_per_command = 50 if input_api.is_windows else 1000
+ affected_files = sorted(affected_files)
+ results = []
+ for i in range(0, len(affected_files), files_per_command):
+ kwargs = {}
+ cmd = [dirmd_bin, 'validate'] + affected_files[i:i + files_per_command]
+ results.extend(
+ [input_api.Command(name, cmd, kwargs, output_api.PresubmitError)])
+ return results
def CheckNoNewMetadataInOwners(input_api, output_api):
- """Check that no metadata is added to OWNERS files."""
- if input_api.no_diffs:
- return []
+ """Check that no metadata is added to OWNERS files."""
+ if input_api.no_diffs:
+ return []
- _METADATA_LINE_RE = input_api.re.compile(
- r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$',
- input_api.re.MULTILINE | input_api.re.IGNORECASE)
- affected_files = input_api.change.AffectedFiles(
- include_deletes=False,
- file_filter=lambda f: input_api.basename(f.LocalPath()) == 'OWNERS')
+ _METADATA_LINE_RE = input_api.re.compile(
+ r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$',
+ input_api.re.MULTILINE | input_api.re.IGNORECASE)
+ affected_files = input_api.change.AffectedFiles(
+ include_deletes=False,
+ file_filter=lambda f: input_api.basename(f.LocalPath()) == 'OWNERS')
- errors = []
- for f in affected_files:
- for _, line in f.ChangedContents():
- if _METADATA_LINE_RE.search(line):
- errors.append(f.AbsoluteLocalPath())
- break
+ errors = []
+ for f in affected_files:
+ for _, line in f.ChangedContents():
+ if _METADATA_LINE_RE.search(line):
+ errors.append(f.AbsoluteLocalPath())
+ break
- if not errors:
- return []
+ if not errors:
+ return []
- return [output_api.PresubmitError(
- 'New metadata was added to the following OWNERS files, but should '
- 'have been added to DIR_METADATA files instead:\n' +
- '\n'.join(errors) + '\n' +
- 'See https://source.chromium.org/chromium/infra/infra/+/HEAD:'
- 'go/src/infra/tools/dirmd/proto/dir_metadata.proto for details.')]
+ return [
+ output_api.PresubmitError(
+ 'New metadata was added to the following OWNERS files, but should '
+ 'have been added to DIR_METADATA files instead:\n' +
+ '\n'.join(errors) + '\n' +
+ 'See https://source.chromium.org/chromium/infra/infra/+/HEAD:'
+ 'go/src/infra/tools/dirmd/proto/dir_metadata.proto for details.')
+ ]
def CheckOwnersDirMetadataExclusive(input_api, output_api):
- """Check that metadata in OWNERS files and DIR_METADATA files are mutually
+ """Check that metadata in OWNERS files and DIR_METADATA files are mutually
exclusive.
"""
- _METADATA_LINE_RE = input_api.re.compile(
- r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$',
- input_api.re.MULTILINE)
- file_filter = (
- lambda f: input_api.basename(f.LocalPath()) in ('OWNERS', 'DIR_METADATA'))
- affected_dirs = {
- input_api.os_path.dirname(f.AbsoluteLocalPath())
- for f in input_api.change.AffectedFiles(
- include_deletes=False, file_filter=file_filter)
- }
+ _METADATA_LINE_RE = input_api.re.compile(
+ r'^#\s*(TEAM|COMPONENT|OS|WPT-NOTIFY)+\s*:\s*\S+$',
+ input_api.re.MULTILINE)
+ file_filter = (lambda f: input_api.basename(f.LocalPath()) in
+ ('OWNERS', 'DIR_METADATA'))
+ affected_dirs = {
+ input_api.os_path.dirname(f.AbsoluteLocalPath())
+ for f in input_api.change.AffectedFiles(include_deletes=False,
+ file_filter=file_filter)
+ }
- errors = []
- for path in affected_dirs:
- owners_path = input_api.os_path.join(path, 'OWNERS')
- dir_metadata_path = input_api.os_path.join(path, 'DIR_METADATA')
- if (not input_api.os_path.isfile(dir_metadata_path)
- or not input_api.os_path.isfile(owners_path)):
- continue
- if _METADATA_LINE_RE.search(input_api.ReadFile(owners_path)):
- errors.append(owners_path)
+ errors = []
+ for path in affected_dirs:
+ owners_path = input_api.os_path.join(path, 'OWNERS')
+ dir_metadata_path = input_api.os_path.join(path, 'DIR_METADATA')
+ if (not input_api.os_path.isfile(dir_metadata_path)
+ or not input_api.os_path.isfile(owners_path)):
+ continue
+ if _METADATA_LINE_RE.search(input_api.ReadFile(owners_path)):
+ errors.append(owners_path)
- if not errors:
- return []
+ if not errors:
+ return []
- return [output_api.PresubmitError(
- 'The following OWNERS files should contain no metadata, as there is a '
- 'DIR_METADATA file present in the same directory:\n'
- + '\n'.join(errors))]
+ return [
+ output_api.PresubmitError(
+ 'The following OWNERS files should contain no metadata, as there is a '
+ 'DIR_METADATA file present in the same directory:\n' +
+ '\n'.join(errors))
+ ]
def CheckOwnersFormat(input_api, output_api):
- if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo():
- return []
+ if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo():
+ return []
- return [
- output_api.PresubmitError(
- 'code-owners is not enabled. Ask your host enable it on your gerrit '
- 'host. Read more about code-owners at '
- 'https://chromium-review.googlesource.com/'
- 'plugins/code-owners/Documentation/index.html.')
- ]
+ return [
+ output_api.PresubmitError(
+ 'code-owners is not enabled. Ask your host enable it on your gerrit '
+ 'host. Read more about code-owners at '
+ 'https://chromium-review.googlesource.com/'
+ 'plugins/code-owners/Documentation/index.html.')
+ ]
-def CheckOwners(
- input_api, output_api, source_file_filter=None, allow_tbr=True):
- # Skip OWNERS check when Owners-Override label is approved. This is intended
- # for global owners, trusted bots, and on-call sheriffs. Review is still
- # required for these changes.
- if (input_api.change.issue
- and input_api.gerrit.IsOwnersOverrideApproved(input_api.change.issue)):
- return []
+def CheckOwners(input_api, output_api, source_file_filter=None, allow_tbr=True):
+ # Skip OWNERS check when Owners-Override label is approved. This is intended
+ # for global owners, trusted bots, and on-call sheriffs. Review is still
+ # required for these changes.
+ if (input_api.change.issue and input_api.gerrit.IsOwnersOverrideApproved(
+ input_api.change.issue)):
+ return []
- if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo():
- return []
+ if input_api.gerrit and input_api.gerrit.IsCodeOwnersEnabledOnRepo():
+ return []
- return [
- output_api.PresubmitError(
- 'code-owners is not enabled. Ask your host enable it on your gerrit '
- 'host. Read more about code-owners at '
- 'https://chromium-review.googlesource.com/'
- 'plugins/code-owners/Documentation/index.html.')
- ]
+ return [
+ output_api.PresubmitError(
+ 'code-owners is not enabled. Ask your host enable it on your gerrit '
+ 'host. Read more about code-owners at '
+ 'https://chromium-review.googlesource.com/'
+ 'plugins/code-owners/Documentation/index.html.')
+ ]
-def GetCodereviewOwnerAndReviewers(
- input_api, _email_regexp=None, approval_needed=True):
- """Return the owner and reviewers of a change, if any.
+def GetCodereviewOwnerAndReviewers(input_api,
+ _email_regexp=None,
+ approval_needed=True):
+ """Return the owner and reviewers of a change, if any.
If approval_needed is True, only reviewers who have approved the change
will be returned.
"""
- # Recognizes 'X@Y' email addresses. Very simplistic.
- EMAIL_REGEXP = input_api.re.compile(r'^[\w\-\+\%\.]+\@[\w\-\+\%\.]+$')
- issue = input_api.change.issue
- if not issue:
- return None, (set() if approval_needed else
- _ReviewersFromChange(input_api.change))
+ # Recognizes 'X@Y' email addresses. Very simplistic.
+ EMAIL_REGEXP = input_api.re.compile(r'^[\w\-\+\%\.]+\@[\w\-\+\%\.]+$')
+ issue = input_api.change.issue
+ if not issue:
+ return None, (set() if approval_needed else _ReviewersFromChange(
+ input_api.change))
- owner_email = input_api.gerrit.GetChangeOwner(issue)
- reviewers = set(
- r for r in input_api.gerrit.GetChangeReviewers(issue, approval_needed)
- if _match_reviewer_email(r, owner_email, EMAIL_REGEXP))
- input_api.logging.debug('owner: %s; approvals given by: %s',
- owner_email, ', '.join(sorted(reviewers)))
- return owner_email, reviewers
+ owner_email = input_api.gerrit.GetChangeOwner(issue)
+ reviewers = set(
+ r for r in input_api.gerrit.GetChangeReviewers(issue, approval_needed)
+ if _match_reviewer_email(r, owner_email, EMAIL_REGEXP))
+ input_api.logging.debug('owner: %s; approvals given by: %s', owner_email,
+ ', '.join(sorted(reviewers)))
+ return owner_email, reviewers
def _ReviewersFromChange(change):
- """Return the reviewers specified in the |change|, if any."""
- reviewers = set()
- reviewers.update(change.ReviewersFromDescription())
- reviewers.update(change.TBRsFromDescription())
+ """Return the reviewers specified in the |change|, if any."""
+ reviewers = set()
+ reviewers.update(change.ReviewersFromDescription())
+ reviewers.update(change.TBRsFromDescription())
- # Drop reviewers that aren't specified in email address format.
- return set(reviewer for reviewer in reviewers if '@' in reviewer)
+ # Drop reviewers that aren't specified in email address format.
+ return set(reviewer for reviewer in reviewers if '@' in reviewer)
def _match_reviewer_email(r, owner_email, email_regexp):
- return email_regexp.match(r) and r != owner_email
+ return email_regexp.match(r) and r != owner_email
def CheckSingletonInHeaders(input_api, output_api, source_file_filter=None):
- """Deprecated, must be removed."""
- return [
- output_api.PresubmitNotifyResult(
- 'CheckSingletonInHeaders is deprecated, please remove it.')
- ]
+ """Deprecated, must be removed."""
+ return [
+ output_api.PresubmitNotifyResult(
+ 'CheckSingletonInHeaders is deprecated, please remove it.')
+ ]
-def PanProjectChecks(input_api, output_api,
- excluded_paths=None, text_files=None,
- license_header=None, project_name=None,
- owners_check=True, maxlen=80, global_checks=True):
- """Checks that ALL chromium orbit projects should use.
+def PanProjectChecks(input_api,
+ output_api,
+ excluded_paths=None,
+ text_files=None,
+ license_header=None,
+ project_name=None,
+ owners_check=True,
+ maxlen=80,
+ global_checks=True):
+ """Checks that ALL chromium orbit projects should use.
These are checks to be run on all Chromium orbit project, including:
Chromium
@@ -1479,94 +1564,111 @@ def PanProjectChecks(input_api, output_api,
Returns:
A list of warning or error objects.
"""
- excluded_paths = tuple(excluded_paths or [])
- text_files = tuple(text_files or (
- r'.+\.txt$',
- r'.+\.json$',
- ))
+ excluded_paths = tuple(excluded_paths or [])
+ text_files = tuple(text_files or (
+ r'.+\.txt$',
+ r'.+\.json$',
+ ))
- results = []
- # This code loads the default skip list (e.g. third_party, experimental, etc)
- # and add our skip list (breakpad, skia and v8 are still not following
- # google style and are not really living this repository).
- # See presubmit_support.py InputApi.FilterSourceFile for the (simple) usage.
- files_to_skip = input_api.DEFAULT_FILES_TO_SKIP + excluded_paths
- files_to_check = input_api.DEFAULT_FILES_TO_CHECK + text_files
- sources = lambda x: input_api.FilterSourceFile(x, files_to_skip=files_to_skip)
- text_files = lambda x: input_api.FilterSourceFile(
- x, files_to_skip=files_to_skip, files_to_check=files_to_check)
+ results = []
+ # This code loads the default skip list (e.g. third_party, experimental,
+ # etc) and add our skip list (breakpad, skia and v8 are still not following
+ # google style and are not really living this repository). See
+ # presubmit_support.py InputApi.FilterSourceFile for the (simple) usage.
+ files_to_skip = input_api.DEFAULT_FILES_TO_SKIP + excluded_paths
+ files_to_check = input_api.DEFAULT_FILES_TO_CHECK + text_files
+ sources = lambda x: input_api.FilterSourceFile(x,
+ files_to_skip=files_to_skip)
+ text_files = lambda x: input_api.FilterSourceFile(
+ x, files_to_skip=files_to_skip, files_to_check=files_to_check)
- snapshot_memory = []
- def snapshot(msg):
- """Measures & prints performance warning if a rule is running slow."""
- dt2 = input_api.time.time()
- if snapshot_memory:
- delta_s = dt2 - snapshot_memory[0]
- if delta_s > 0.5:
- print(" %s took a long time: %.1fs" % (snapshot_memory[1], delta_s))
- snapshot_memory[:] = (dt2, msg)
+ snapshot_memory = []
- snapshot("checking owners files format")
- try:
- if not 'PRESUBMIT_SKIP_NETWORK' in _os.environ and owners_check:
- snapshot("checking owners")
- results.extend(
- input_api.canned_checks.CheckOwnersFormat(input_api, output_api))
- results.extend(
- input_api.canned_checks.CheckOwners(input_api,
- output_api,
- source_file_filter=None))
- except Exception as e:
- print('Failed to check owners - %s' % str(e))
+ def snapshot(msg):
+ """Measures & prints performance warning if a rule is running slow."""
+ dt2 = input_api.time.time()
+ if snapshot_memory:
+ delta_s = dt2 - snapshot_memory[0]
+ if delta_s > 0.5:
+ print(" %s took a long time: %.1fs" %
+ (snapshot_memory[1], delta_s))
+ snapshot_memory[:] = (dt2, msg)
- snapshot("checking long lines")
- results.extend(input_api.canned_checks.CheckLongLines(
- input_api, output_api, maxlen, source_file_filter=sources))
- snapshot( "checking tabs")
- results.extend(input_api.canned_checks.CheckChangeHasNoTabs(
- input_api, output_api, source_file_filter=sources))
- snapshot( "checking stray whitespace")
- results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
- input_api, output_api, source_file_filter=sources))
- snapshot("checking license")
- results.extend(input_api.canned_checks.CheckLicense(
- input_api, output_api, license_header, project_name,
- source_file_filter=sources))
- snapshot("checking corp links in files")
- results.extend(
- input_api.canned_checks.CheckCorpLinksInFiles(input_api,
+ snapshot("checking owners files format")
+ try:
+ if not 'PRESUBMIT_SKIP_NETWORK' in _os.environ and owners_check:
+ snapshot("checking owners")
+ results.extend(
+ input_api.canned_checks.CheckOwnersFormat(
+ input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckOwners(input_api,
output_api,
- source_file_filter=sources))
+ source_file_filter=None))
+ except Exception as e:
+ print('Failed to check owners - %s' % str(e))
+
+ snapshot("checking long lines")
+ results.extend(
+ input_api.canned_checks.CheckLongLines(input_api,
+ output_api,
+ maxlen,
+ source_file_filter=sources))
+ snapshot("checking tabs")
+ results.extend(
+ input_api.canned_checks.CheckChangeHasNoTabs(
+ input_api, output_api, source_file_filter=sources))
+ snapshot("checking stray whitespace")
+ results.extend(
+ input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
+ input_api, output_api, source_file_filter=sources))
+ snapshot("checking license")
+ results.extend(
+ input_api.canned_checks.CheckLicense(input_api,
+ output_api,
+ license_header,
+ project_name,
+ source_file_filter=sources))
+ snapshot("checking corp links in files")
+ results.extend(
+ input_api.canned_checks.CheckCorpLinksInFiles(
+ input_api, output_api, source_file_filter=sources))
+
+ if input_api.is_committing:
+ if global_checks:
+ # These changes verify state that is global to the tree and can
+ # therefore be skipped when run from PRESUBMIT.py scripts deeper in
+ # the tree. Skipping these saves a bit of time and avoids having
+ # redundant output. This was initially designed for use by
+ # third_party/blink/PRESUBMIT.py.
+ snapshot("checking was uploaded")
+ results.extend(
+ input_api.canned_checks.CheckChangeWasUploaded(
+ input_api, output_api))
+ snapshot("checking description")
+ results.extend(
+ input_api.canned_checks.CheckChangeHasDescription(
+ input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckDoNotSubmitInDescription(
+ input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckCorpLinksInDescription(
+ input_api, output_api))
+ snapshot("checking do not submit in files")
+ results.extend(
+ input_api.canned_checks.CheckDoNotSubmitInFiles(
+ input_api, output_api))
- if input_api.is_committing:
if global_checks:
- # These changes verify state that is global to the tree and can therefore
- # be skipped when run from PRESUBMIT.py scripts deeper in the tree.
- # Skipping these saves a bit of time and avoids having redundant output.
- # This was initially designed for use by third_party/blink/PRESUBMIT.py.
- snapshot("checking was uploaded")
- results.extend(input_api.canned_checks.CheckChangeWasUploaded(
- input_api, output_api))
- snapshot("checking description")
- results.extend(input_api.canned_checks.CheckChangeHasDescription(
- input_api, output_api))
- results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription(
- input_api, output_api))
- results.extend(
- input_api.canned_checks.CheckCorpLinksInDescription(
- input_api, output_api))
- snapshot("checking do not submit in files")
- results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles(
- input_api, output_api))
+ if input_api.change.scm == 'git':
+ snapshot("checking for commit objects in tree")
+ results.extend(
+ input_api.canned_checks.CheckForCommitObjects(
+ input_api, output_api))
- if global_checks:
- if input_api.change.scm == 'git':
- snapshot("checking for commit objects in tree")
- results.extend(
- input_api.canned_checks.CheckForCommitObjects(input_api, output_api))
-
- snapshot("done")
- return results
+ snapshot("done")
+ return results
def CheckPatchFormatted(input_api,
@@ -1576,81 +1678,85 @@ def CheckPatchFormatted(input_api,
check_js=False,
check_python=None,
result_factory=None):
- result_factory = result_factory or output_api.PresubmitPromptWarning
- import git_cl
+ result_factory = result_factory or output_api.PresubmitPromptWarning
+ import git_cl
- display_args = []
- if not check_clang_format:
- display_args.append('--no-clang-format')
+ display_args = []
+ if not check_clang_format:
+ display_args.append('--no-clang-format')
- if check_js:
- display_args.append('--js')
+ if check_js:
+ display_args.append('--js')
- # Explicitly setting check_python to will enable/disable python formatting
- # on all files. Leaving it as None will enable checking patch formatting
- # on files that have a .style.yapf file in a parent directory.
- if check_python is not None:
- if check_python:
- display_args.append('--python')
- else:
- display_args.append('--no-python')
+ # Explicitly setting check_python to will enable/disable python formatting
+ # on all files. Leaving it as None will enable checking patch formatting
+ # on files that have a .style.yapf file in a parent directory.
+ if check_python is not None:
+ if check_python:
+ display_args.append('--python')
+ else:
+ display_args.append('--no-python')
- cmd = ['-C', input_api.change.RepositoryRoot(),
- 'cl', 'format', '--dry-run', '--presubmit'] + display_args
+ cmd = [
+ '-C',
+ input_api.change.RepositoryRoot(), 'cl', 'format', '--dry-run',
+ '--presubmit'
+ ] + display_args
- # Make sure the passed --upstream branch is applied to a dry run.
- if input_api.change.UpstreamBranch():
- cmd.extend(['--upstream', input_api.change.UpstreamBranch()])
+ # Make sure the passed --upstream branch is applied to a dry run.
+ if input_api.change.UpstreamBranch():
+ cmd.extend(['--upstream', input_api.change.UpstreamBranch()])
- presubmit_subdir = input_api.os_path.relpath(
- input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot())
- if presubmit_subdir.startswith('..') or presubmit_subdir == '.':
- presubmit_subdir = ''
- # If the PRESUBMIT.py is in a parent repository, then format the entire
- # subrepository. Otherwise, format only the code in the directory that
- # contains the PRESUBMIT.py.
- if presubmit_subdir:
- cmd.append(input_api.PresubmitLocalPath())
-
- code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=bypass_warnings)
- # bypass_warnings? Only fail with code 2.
- # As this is just a warning, ignore all other errors if the user
- # happens to have a broken clang-format, doesn't use git, etc etc.
- if code == 2 or (code and not bypass_warnings):
+ presubmit_subdir = input_api.os_path.relpath(
+ input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot())
+ if presubmit_subdir.startswith('..') or presubmit_subdir == '.':
+ presubmit_subdir = ''
+ # If the PRESUBMIT.py is in a parent repository, then format the entire
+ # subrepository. Otherwise, format only the code in the directory that
+ # contains the PRESUBMIT.py.
if presubmit_subdir:
- short_path = presubmit_subdir
- else:
- short_path = input_api.basename(input_api.change.RepositoryRoot())
- display_args.append(presubmit_subdir)
- return [result_factory(
- 'The %s directory requires source formatting. '
- 'Please run: git cl format %s' %
- (short_path, ' '.join(display_args)))]
- return []
+ cmd.append(input_api.PresubmitLocalPath())
+
+ code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=bypass_warnings)
+ # bypass_warnings? Only fail with code 2.
+ # As this is just a warning, ignore all other errors if the user
+ # happens to have a broken clang-format, doesn't use git, etc etc.
+ if code == 2 or (code and not bypass_warnings):
+ if presubmit_subdir:
+ short_path = presubmit_subdir
+ else:
+ short_path = input_api.basename(input_api.change.RepositoryRoot())
+ display_args.append(presubmit_subdir)
+ return [
+ result_factory('The %s directory requires source formatting. '
+ 'Please run: git cl format %s' %
+ (short_path, ' '.join(display_args)))
+ ]
+ return []
def CheckGNFormatted(input_api, output_api):
- import gn
- affected_files = input_api.AffectedFiles(
- include_deletes=False,
- file_filter=lambda x: x.LocalPath().endswith('.gn') or
- x.LocalPath().endswith('.gni') or
- x.LocalPath().endswith('.typemap'))
- warnings = []
- for f in affected_files:
- cmd = ['gn', 'format', '--dry-run', f.AbsoluteLocalPath()]
- rc = gn.main(cmd)
- if rc == 2:
- warnings.append(output_api.PresubmitPromptWarning(
- '%s requires formatting. Please run:\n gn format %s' % (
- f.AbsoluteLocalPath(), f.LocalPath())))
- # It's just a warning, so ignore other types of failures assuming they'll be
- # caught elsewhere.
- return warnings
+ import gn
+ affected_files = input_api.AffectedFiles(
+ include_deletes=False,
+ file_filter=lambda x: x.LocalPath().endswith('.gn') or x.LocalPath(
+ ).endswith('.gni') or x.LocalPath().endswith('.typemap'))
+ warnings = []
+ for f in affected_files:
+ cmd = ['gn', 'format', '--dry-run', f.AbsoluteLocalPath()]
+ rc = gn.main(cmd)
+ if rc == 2:
+ warnings.append(
+ output_api.PresubmitPromptWarning(
+ '%s requires formatting. Please run:\n gn format %s' %
+ (f.AbsoluteLocalPath(), f.LocalPath())))
+ # It's just a warning, so ignore other types of failures assuming they'll be
+ # caught elsewhere.
+ return warnings
def CheckCIPDManifest(input_api, output_api, path=None, content=None):
- """Verifies that a CIPD ensure file manifest is valid against all platforms.
+ """Verifies that a CIPD ensure file manifest is valid against all platforms.
Exactly one of "path" or "content" must be provided. An assertion will occur
if neither or both are provided.
@@ -1659,59 +1765,55 @@ def CheckCIPDManifest(input_api, output_api, path=None, content=None):
path (str): If provided, the filesystem path to the manifest to verify.
content (str): If provided, the raw content of the manifest to veirfy.
"""
- cipd_bin = 'cipd' if not input_api.is_windows else 'cipd.bat'
- cmd = [cipd_bin, 'ensure-file-verify']
- kwargs = {}
+ cipd_bin = 'cipd' if not input_api.is_windows else 'cipd.bat'
+ cmd = [cipd_bin, 'ensure-file-verify']
+ kwargs = {}
- if input_api.is_windows:
- # Needs to be able to resolve "cipd.bat".
- kwargs['shell'] = True
+ if input_api.is_windows:
+ # Needs to be able to resolve "cipd.bat".
+ kwargs['shell'] = True
- if input_api.verbose:
- cmd += ['-log-level', 'debug']
+ if input_api.verbose:
+ cmd += ['-log-level', 'debug']
- if path:
- assert content is None, 'Cannot provide both "path" and "content".'
- cmd += ['-ensure-file', path]
- name = 'Check CIPD manifest %r' % path
- elif content:
- assert path is None, 'Cannot provide both "path" and "content".'
- cmd += ['-ensure-file=-']
- kwargs['stdin'] = content.encode('utf-8')
- # quick and dirty parser to extract checked packages.
- packages = [
- l.split()[0] for l in (ll.strip() for ll in content.splitlines())
- if ' ' in l and not l.startswith('$')
- ]
- name = 'Check CIPD packages from string: %r' % (packages,)
- else:
- raise Exception('Exactly one of "path" or "content" must be provided.')
+ if path:
+ assert content is None, 'Cannot provide both "path" and "content".'
+ cmd += ['-ensure-file', path]
+ name = 'Check CIPD manifest %r' % path
+ elif content:
+ assert path is None, 'Cannot provide both "path" and "content".'
+ cmd += ['-ensure-file=-']
+ kwargs['stdin'] = content.encode('utf-8')
+ # quick and dirty parser to extract checked packages.
+ packages = [
+ l.split()[0] for l in (ll.strip() for ll in content.splitlines())
+ if ' ' in l and not l.startswith('$')
+ ]
+ name = 'Check CIPD packages from string: %r' % (packages, )
+ else:
+ raise Exception('Exactly one of "path" or "content" must be provided.')
- return input_api.Command(
- name,
- cmd,
- kwargs,
- output_api.PresubmitError)
+ return input_api.Command(name, cmd, kwargs, output_api.PresubmitError)
def CheckCIPDPackages(input_api, output_api, platforms, packages):
- """Verifies that all named CIPD packages can be resolved against all supplied
+ """Verifies that all named CIPD packages can be resolved against all supplied
platforms.
Args:
platforms (list): List of CIPD platforms to verify.
packages (dict): Mapping of package name to version.
"""
- manifest = []
- for p in platforms:
- manifest.append('$VerifiedPlatform %s' % (p,))
- for k, v in packages.items():
- manifest.append('%s %s' % (k, v))
- return CheckCIPDManifest(input_api, output_api, content='\n'.join(manifest))
+ manifest = []
+ for p in platforms:
+ manifest.append('$VerifiedPlatform %s' % (p, ))
+ for k, v in packages.items():
+ manifest.append('%s %s' % (k, v))
+ return CheckCIPDManifest(input_api, output_api, content='\n'.join(manifest))
def CheckCIPDClientDigests(input_api, output_api, client_version_file):
- """Verifies that *.digests file was correctly regenerated.
+ """Verifies that *.digests file was correctly regenerated.
.digests file contains pinned hashes of the CIPD client.
It is consulted during CIPD client bootstrap and self-update. It should be
@@ -1720,21 +1822,24 @@ def CheckCIPDClientDigests(input_api, output_api, client_version_file):
Args:
client_version_file (str): Path to a text file with CIPD client version.
"""
- cmd = [
- 'cipd' if not input_api.is_windows else 'cipd.bat',
- 'selfupdate-roll', '-check', '-version-file', client_version_file,
- ]
- if input_api.verbose:
- cmd += ['-log-level', 'debug']
- return input_api.Command(
- 'Check CIPD client_version_file.digests file',
- cmd,
- {'shell': True} if input_api.is_windows else {}, # to resolve cipd.bat
- output_api.PresubmitError)
+ cmd = [
+ 'cipd' if not input_api.is_windows else 'cipd.bat',
+ 'selfupdate-roll',
+ '-check',
+ '-version-file',
+ client_version_file,
+ ]
+ if input_api.verbose:
+ cmd += ['-log-level', 'debug']
+ return input_api.Command(
+ 'Check CIPD client_version_file.digests file',
+ cmd,
+ {'shell': True} if input_api.is_windows else {}, # to resolve cipd.bat
+ output_api.PresubmitError)
def CheckForCommitObjects(input_api, output_api):
- """Validates that commit objects match DEPS.
+ """Validates that commit objects match DEPS.
Commit objects are put into the git tree typically by submodule tooling.
Because we use gclient to handle external repository references instead,
@@ -1747,27 +1852,27 @@ def CheckForCommitObjects(input_api, output_api):
Returns:
A presubmit error if a commit object is not expected.
"""
- # Get DEPS file.
- deps_file = input_api.os_path.join(input_api.PresubmitLocalPath(), 'DEPS')
- if not input_api.os_path.isfile(deps_file):
- # No DEPS file, carry on!
- return []
+ # Get DEPS file.
+ deps_file = input_api.os_path.join(input_api.PresubmitLocalPath(), 'DEPS')
+ if not input_api.os_path.isfile(deps_file):
+ # No DEPS file, carry on!
+ return []
- with open(deps_file) as f:
- deps_content = f.read()
- deps = _ParseDeps(deps_content)
- # set default
- if 'deps' not in deps:
- deps['deps'] = {}
- if 'git_dependencies' not in deps:
- deps['git_dependencies'] = 'DEPS'
+ with open(deps_file) as f:
+ deps_content = f.read()
+ deps = _ParseDeps(deps_content)
+ # set default
+ if 'deps' not in deps:
+ deps['deps'] = {}
+ if 'git_dependencies' not in deps:
+ deps['git_dependencies'] = 'DEPS'
- if deps['git_dependencies'] == 'SUBMODULES':
- # git submodule is source of truth, so no further action needed.
- return []
+ if deps['git_dependencies'] == 'SUBMODULES':
+ # git submodule is source of truth, so no further action needed.
+ return []
- def parse_tree_entry(ent):
- """Splits a tree entry into components
+ def parse_tree_entry(ent):
+ """Splits a tree entry into components
Args:
ent: a tree entry in the form "filemode type hash\tname"
@@ -1775,118 +1880,120 @@ def CheckForCommitObjects(input_api, output_api):
Returns:
The tree entry split into component parts
"""
- tabparts = ent.split('\t', 1)
- spaceparts = tabparts[0].split(' ', 2)
- return (spaceparts[0], spaceparts[1], spaceparts[2], tabparts[1])
+ tabparts = ent.split('\t', 1)
+ spaceparts = tabparts[0].split(' ', 2)
+ return (spaceparts[0], spaceparts[1], spaceparts[2], tabparts[1])
- full_tree = input_api.subprocess.check_output(
- ['git', 'ls-tree', '-r', '--full-tree', '-z', 'HEAD'],
- cwd=input_api.PresubmitLocalPath())
+ full_tree = input_api.subprocess.check_output(
+ ['git', 'ls-tree', '-r', '--full-tree', '-z', 'HEAD'],
+ cwd=input_api.PresubmitLocalPath())
- # commit_tree_entries holds all commit entries (ie gitlink, submodule record).
- commit_tree_entries = []
- for entry in full_tree.strip().split(b'\00'):
- if not entry.startswith(b'160000'):
- # Remove entries that we don't care about. 160000 indicates a gitlink.
- continue
- tree_entry = parse_tree_entry(entry.decode('utf-8'))
- if tree_entry[1] == 'commit':
- commit_tree_entries.append(tree_entry)
+ # commit_tree_entries holds all commit entries (ie gitlink, submodule
+ # record).
+ commit_tree_entries = []
+ for entry in full_tree.strip().split(b'\00'):
+ if not entry.startswith(b'160000'):
+ # Remove entries that we don't care about. 160000 indicates a
+ # gitlink.
+ continue
+ tree_entry = parse_tree_entry(entry.decode('utf-8'))
+ if tree_entry[1] == 'commit':
+ commit_tree_entries.append(tree_entry)
+
+ # No gitlinks found, return early.
+ if len(commit_tree_entries) == 0:
+ return []
+
+ if deps['git_dependencies'] == 'DEPS':
+ commit_tree_entries = [x[3] for x in commit_tree_entries]
+ return [
+ output_api.PresubmitError(
+ 'Commit objects present within tree.\n'
+ 'This may be due to submodule-related interactions;\n'
+ 'the presence of a commit object in the tree may lead to odd\n'
+ 'situations where files are inconsistently checked-out.\n'
+ 'Remove these commit entries and validate your changeset '
+ 'again:\n', commit_tree_entries)
+ ]
+
+ assert deps['git_dependencies'] == 'SYNC', 'unexpected git_dependencies.'
+
+ # Create mapping HASH -> PATH
+ git_submodules = {}
+ for commit_tree_entry in commit_tree_entries:
+ git_submodules[commit_tree_entry[2]] = commit_tree_entry[3]
+
+ mismatch_entries = []
+ deps_msg = ""
+ for dep_path, dep in deps['deps'].items():
+ if 'dep_type' in dep and dep['dep_type'] != 'git':
+ continue
+
+ url = dep if isinstance(dep, str) else dep['url']
+ commit_hash = url.split('@')[-1]
+ # Two exceptions were in made in two projects prior to this check
+ # enforcement. We need to address those exceptions, but in the meantime
+ # we can't fail this global presubmit check
+ # https://chromium.googlesource.com/infra/infra/+/refs/heads/main/DEPS#45
+ if dep_path == 'recipes-py' and commit_hash == 'refs/heads/main':
+ continue
+
+ # https://chromium.googlesource.com/angle/angle/+/refs/heads/main/DEPS#412
+ if dep_path == 'third_party/dummy_chromium':
+ continue
+
+ if commit_hash in git_submodules:
+ git_submodules.pop(commit_hash)
+ else:
+ mismatch_entries.append(dep_path)
+ deps_msg += f"\n [DEPS] {dep_path} -> {commit_hash}"
+
+ for commit_hash, path in git_submodules.items():
+ mismatch_entries.append(path)
+ deps_msg += f"\n [gitlink] {path} -> {commit_hash}"
+
+ if mismatch_entries:
+ return [
+ output_api.PresubmitError(
+ 'DEPS file indicates git submodule migration is in progress,\n'
+ 'but the commit objects do not match DEPS entries.\n\n'
+ 'To reset all git submodule git entries to match DEPS, run\n'
+ 'the following command in the root of this repository:\n'
+ ' gclient gitmodules'
+ '\n\n'
+ 'The following entries diverged: ' + deps_msg)
+ ]
- # No gitlinks found, return early.
- if len(commit_tree_entries) == 0:
return []
- if deps['git_dependencies'] == 'DEPS':
- commit_tree_entries = [x[3] for x in commit_tree_entries]
- return [
- output_api.PresubmitError(
- 'Commit objects present within tree.\n'
- 'This may be due to submodule-related interactions;\n'
- 'the presence of a commit object in the tree may lead to odd\n'
- 'situations where files are inconsistently checked-out.\n'
- 'Remove these commit entries and validate your changeset '
- 'again:\n', commit_tree_entries)
- ]
-
- assert deps['git_dependencies'] == 'SYNC', 'unexpected git_dependencies.'
-
- # Create mapping HASH -> PATH
- git_submodules = {}
- for commit_tree_entry in commit_tree_entries:
- git_submodules[commit_tree_entry[2]] = commit_tree_entry[3]
-
- mismatch_entries = []
- deps_msg = ""
- for dep_path, dep in deps['deps'].items():
- if 'dep_type' in dep and dep['dep_type'] != 'git':
- continue
-
- url = dep if isinstance(dep, str) else dep['url']
- commit_hash = url.split('@')[-1]
- # Two exceptions were in made in two projects prior to this check
- # enforcement. We need to address those exceptions, but in the meantime we
- # can't fail this global presubmit check
- # https://chromium.googlesource.com/infra/infra/+/refs/heads/main/DEPS#45
- if dep_path == 'recipes-py' and commit_hash == 'refs/heads/main':
- continue
-
- # https://chromium.googlesource.com/angle/angle/+/refs/heads/main/DEPS#412
- if dep_path == 'third_party/dummy_chromium':
- continue
-
- if commit_hash in git_submodules:
- git_submodules.pop(commit_hash)
- else:
- mismatch_entries.append(dep_path)
- deps_msg += f"\n [DEPS] {dep_path} -> {commit_hash}"
-
- for commit_hash, path in git_submodules.items():
- mismatch_entries.append(path)
- deps_msg += f"\n [gitlink] {path} -> {commit_hash}"
-
- if mismatch_entries:
- return [
- output_api.PresubmitError(
- 'DEPS file indicates git submodule migration is in progress,\n'
- 'but the commit objects do not match DEPS entries.\n\n'
- 'To reset all git submodule git entries to match DEPS, run\n'
- 'the following command in the root of this repository:\n'
- ' gclient gitmodules'
- '\n\n'
- 'The following entries diverged: ' + deps_msg)
- ]
-
- return []
-
def _ParseDeps(contents):
- """Simple helper for parsing DEPS files."""
+ """Simple helper for parsing DEPS files."""
- # Stubs for handling special syntax in the root DEPS file.
- class _VarImpl:
- def __init__(self, local_scope):
- self._local_scope = local_scope
+ # Stubs for handling special syntax in the root DEPS file.
+ class _VarImpl:
+ def __init__(self, local_scope):
+ self._local_scope = local_scope
- def Lookup(self, var_name):
- """Implements the Var syntax."""
- try:
- return self._local_scope['vars'][var_name]
- except KeyError:
- raise Exception('Var is not defined: %s' % var_name)
+ def Lookup(self, var_name):
+ """Implements the Var syntax."""
+ try:
+ return self._local_scope['vars'][var_name]
+ except KeyError:
+ raise Exception('Var is not defined: %s' % var_name)
- local_scope = {}
- global_scope = {
- 'Var': _VarImpl(local_scope).Lookup,
- 'Str': str,
- }
+ local_scope = {}
+ global_scope = {
+ 'Var': _VarImpl(local_scope).Lookup,
+ 'Str': str,
+ }
- exec(contents, global_scope, local_scope)
- return local_scope
+ exec(contents, global_scope, local_scope)
+ return local_scope
def CheckVPythonSpec(input_api, output_api, file_filter=None):
- """Validates any changed .vpython and .vpython3 files with vpython
+ """Validates any changed .vpython and .vpython3 files with vpython
verification tool.
Args:
@@ -1900,24 +2007,25 @@ def CheckVPythonSpec(input_api, output_api, file_filter=None):
Returns:
A list of input_api.Command objects containing verification commands.
"""
- file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython') or
- f.LocalPath().endswith('.vpython3'))
- affected_files = input_api.AffectedTestableFiles(file_filter=file_filter)
- affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files)
+ file_filter = file_filter or (lambda f: f.LocalPath().endswith('.vpython')
+ or f.LocalPath().endswith('.vpython3'))
+ affected_files = input_api.AffectedTestableFiles(file_filter=file_filter)
+ affected_files = map(lambda f: f.AbsoluteLocalPath(), affected_files)
- commands = []
- for f in affected_files:
- commands.append(
- input_api.Command('Verify %s' % f, [
- input_api.python3_executable, '-vpython-spec', f, '-vpython-tool',
- 'verify'
- ], {'stderr': input_api.subprocess.STDOUT}, output_api.PresubmitError))
+ commands = []
+ for f in affected_files:
+ commands.append(
+ input_api.Command('Verify %s' % f, [
+ input_api.python3_executable, '-vpython-spec', f,
+ '-vpython-tool', 'verify'
+ ], {'stderr': input_api.subprocess.STDOUT},
+ output_api.PresubmitError))
- return commands
+ return commands
def CheckChangedLUCIConfigs(input_api, output_api):
- """Validates the changed config file against LUCI Config.
+ """Validates the changed config file against LUCI Config.
Only return the warning and/or error for files in input_api.AffectedFiles().
@@ -1927,155 +2035,169 @@ def CheckChangedLUCIConfigs(input_api, output_api):
A list presubmit errors and/or warnings from the validation result of files
in input_api.AffectedFiles()
"""
- import json
- import logging
+ import json
+ import logging
- import auth
- import git_cl
+ import auth
+ import git_cl
- LUCI_CONFIG_HOST_NAME = 'luci-config.appspot.com'
+ LUCI_CONFIG_HOST_NAME = 'luci-config.appspot.com'
- cl = git_cl.Changelist()
- if input_api.change.issue and input_api.gerrit:
- remote_branch = input_api.gerrit.GetDestRef(input_api.change.issue)
- else:
- remote, remote_branch = cl.GetRemoteBranch()
- if remote_branch.startswith('refs/remotes/%s/' % remote):
- remote_branch = remote_branch.replace(
- 'refs/remotes/%s/' % remote, 'refs/heads/', 1)
- if remote_branch.startswith('refs/remotes/branch-heads/'):
- remote_branch = remote_branch.replace(
- 'refs/remotes/branch-heads/', 'refs/branch-heads/', 1)
+ cl = git_cl.Changelist()
+ if input_api.change.issue and input_api.gerrit:
+ remote_branch = input_api.gerrit.GetDestRef(input_api.change.issue)
+ else:
+ remote, remote_branch = cl.GetRemoteBranch()
+ if remote_branch.startswith('refs/remotes/%s/' % remote):
+ remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
+ 'refs/heads/', 1)
+ if remote_branch.startswith('refs/remotes/branch-heads/'):
+ remote_branch = remote_branch.replace('refs/remotes/branch-heads/',
+ 'refs/branch-heads/', 1)
- remote_host_url = cl.GetRemoteUrl()
- if not remote_host_url:
- return [output_api.PresubmitError(
- 'Remote host url for git has not been defined')]
- remote_host_url = remote_host_url.rstrip('/')
- if remote_host_url.endswith('.git'):
- remote_host_url = remote_host_url[:-len('.git')]
-
- # authentication
- try:
- acc_tkn = auth.Authenticator().get_access_token()
- except auth.LoginRequiredError as e:
- return [output_api.PresubmitError(
- 'Error in authenticating user.', long_text=str(e))]
-
- def request(endpoint, body=None):
- api_url = ('https://%s/_ah/api/config/v1/%s'
- % (LUCI_CONFIG_HOST_NAME, endpoint))
- req = input_api.urllib_request.Request(api_url)
- req.add_header('Authorization', 'Bearer %s' % acc_tkn.token)
- if body is not None:
- req.data = zlib.compress(json.dumps(body).encode('utf-8'))
- req.add_header('Content-Type', 'application/json-zlib')
- return json.load(input_api.urllib_request.urlopen(req))
-
- try:
- config_sets = request('config-sets').get('config_sets')
- except input_api.urllib_error.HTTPError as e:
- return [output_api.PresubmitError(
- 'Config set request to luci-config failed', long_text=str(e))]
- if not config_sets:
- return [output_api.PresubmitPromptWarning('No config_sets were returned')]
- loc_pref = '%s/+/%s/' % (remote_host_url, remote_branch)
- logging.debug('Derived location prefix: %s', loc_pref)
- dir_to_config_set = {}
- for cs in config_sets:
- if cs['location'].startswith(loc_pref) or ('%s/' %
- cs['location']) == loc_pref:
- path = cs['location'][len(loc_pref):].rstrip('/')
- d = input_api.os_path.join(*path.split('/')) if path else '.'
- dir_to_config_set[d] = cs['config_set']
- if not dir_to_config_set:
- warning_long_text_lines = [
- 'No config_set found for %s.' % loc_pref,
- 'Found the following:',
- ]
- for loc in sorted(cs['location'] for cs in config_sets):
- warning_long_text_lines.append(' %s' % loc)
- warning_long_text_lines.append('')
- warning_long_text_lines.append(
- 'If the requested location is internal,'
- ' the requester may not have access.')
-
- return [output_api.PresubmitPromptWarning(
- warning_long_text_lines[0],
- long_text='\n'.join(warning_long_text_lines))]
-
- dir_to_fileSet = {}
- for f in input_api.AffectedFiles(include_deletes=False):
- for d in dir_to_config_set:
- if d != '.' and not f.LocalPath().startswith(d):
- continue # file doesn't belong to this config set
- rel_path = f.LocalPath() if d == '.' else input_api.os_path.relpath(
- f.LocalPath(), start=d)
- fileSet = dir_to_fileSet.setdefault(d, set())
- fileSet.add(rel_path.replace(_os.sep, '/'))
- dir_to_fileSet[d] = fileSet
-
- outputs = []
- lucicfg = 'lucicfg' if not input_api.is_windows else 'lucicfg.bat'
- log_level = 'debug' if input_api.verbose else 'warning'
- repo_root = input_api.change.RepositoryRoot()
- for d, fileSet in dir_to_fileSet.items():
- config_set = dir_to_config_set[d]
- with input_api.CreateTemporaryFile() as f:
- cmd = [
- lucicfg, 'validate', d, '-config-set', config_set, '-log-level',
- log_level, '-json-output', f.name
- ]
- # return code is not important as the validation failure will be retrieved
- # from the output json file.
- out, _ = input_api.subprocess.communicate(
- cmd,
- stderr=input_api.subprocess.PIPE,
- shell=input_api.is_windows, # to resolve *.bat
- cwd=repo_root,
- )
- logging.debug('running %s\nSTDOUT:\n%s\nSTDERR:\n%s', cmd, out[0], out[1])
- try:
- result = json.load(f)
- except json.JSONDecodeError as e:
- outputs.append(
+ remote_host_url = cl.GetRemoteUrl()
+ if not remote_host_url:
+ return [
output_api.PresubmitError(
- 'Error when parsing lucicfg validate output', long_text=str(e)))
- else:
- result = result.get('result', None)
- if result:
- non_affected_file_msg_count = 0
- for validation_result in (result.get('validation', None) or []):
- for msg in (validation_result.get('messages', None) or []):
- if d != '.' and msg['path'] not in fileSet:
- non_affected_file_msg_count += 1
- continue
- sev = msg['severity']
- if sev == 'WARNING':
- out_f = output_api.PresubmitPromptWarning
- elif sev in ('ERROR', 'CRITICAL'):
- out_f = output_api.PresubmitError
- else:
- out_f = output_api.PresubmitNotifyResult
- outputs.append(
- out_f('Config validation for file(%s): %s' %
- (msg['path'], msg['text'])))
- if non_affected_file_msg_count:
- reproduce_cmd = [
- lucicfg, 'validate',
- repo_root if d == '.' else input_api.os_path.join(repo_root, d),
- '-config-set', config_set
+ 'Remote host url for git has not been defined')
+ ]
+ remote_host_url = remote_host_url.rstrip('/')
+ if remote_host_url.endswith('.git'):
+ remote_host_url = remote_host_url[:-len('.git')]
+
+ # authentication
+ try:
+ acc_tkn = auth.Authenticator().get_access_token()
+ except auth.LoginRequiredError as e:
+ return [
+ output_api.PresubmitError('Error in authenticating user.',
+ long_text=str(e))
+ ]
+
+ def request(endpoint, body=None):
+ api_url = ('https://%s/_ah/api/config/v1/%s' %
+ (LUCI_CONFIG_HOST_NAME, endpoint))
+ req = input_api.urllib_request.Request(api_url)
+ req.add_header('Authorization', 'Bearer %s' % acc_tkn.token)
+ if body is not None:
+ req.data = zlib.compress(json.dumps(body).encode('utf-8'))
+ req.add_header('Content-Type', 'application/json-zlib')
+ return json.load(input_api.urllib_request.urlopen(req))
+
+ try:
+ config_sets = request('config-sets').get('config_sets')
+ except input_api.urllib_error.HTTPError as e:
+ return [
+ output_api.PresubmitError(
+ 'Config set request to luci-config failed', long_text=str(e))
+ ]
+ if not config_sets:
+ return [
+ output_api.PresubmitPromptWarning('No config_sets were returned')
+ ]
+ loc_pref = '%s/+/%s/' % (remote_host_url, remote_branch)
+ logging.debug('Derived location prefix: %s', loc_pref)
+ dir_to_config_set = {}
+ for cs in config_sets:
+ if cs['location'].startswith(loc_pref) or ('%s/' %
+ cs['location']) == loc_pref:
+ path = cs['location'][len(loc_pref):].rstrip('/')
+ d = input_api.os_path.join(*path.split('/')) if path else '.'
+ dir_to_config_set[d] = cs['config_set']
+ if not dir_to_config_set:
+ warning_long_text_lines = [
+ 'No config_set found for %s.' % loc_pref,
+ 'Found the following:',
+ ]
+ for loc in sorted(cs['location'] for cs in config_sets):
+ warning_long_text_lines.append(' %s' % loc)
+ warning_long_text_lines.append('')
+ warning_long_text_lines.append('If the requested location is internal,'
+ ' the requester may not have access.')
+
+ return [
+ output_api.PresubmitPromptWarning(
+ warning_long_text_lines[0],
+ long_text='\n'.join(warning_long_text_lines))
+ ]
+
+ dir_to_fileSet = {}
+ for f in input_api.AffectedFiles(include_deletes=False):
+ for d in dir_to_config_set:
+ if d != '.' and not f.LocalPath().startswith(d):
+ continue # file doesn't belong to this config set
+ rel_path = f.LocalPath() if d == '.' else input_api.os_path.relpath(
+ f.LocalPath(), start=d)
+ fileSet = dir_to_fileSet.setdefault(d, set())
+ fileSet.add(rel_path.replace(_os.sep, '/'))
+ dir_to_fileSet[d] = fileSet
+
+ outputs = []
+ lucicfg = 'lucicfg' if not input_api.is_windows else 'lucicfg.bat'
+ log_level = 'debug' if input_api.verbose else 'warning'
+ repo_root = input_api.change.RepositoryRoot()
+ for d, fileSet in dir_to_fileSet.items():
+ config_set = dir_to_config_set[d]
+ with input_api.CreateTemporaryFile() as f:
+ cmd = [
+ lucicfg, 'validate', d, '-config-set', config_set, '-log-level',
+ log_level, '-json-output', f.name
]
- outputs.append(
- output_api.PresubmitPromptWarning(
- 'Found %d additional errors/warnings in files that are not '
- 'modified, run `%s` to reveal them' %
- (non_affected_file_msg_count, ' '.join(reproduce_cmd))))
- return outputs
+ # return code is not important as the validation failure will be
+ # retrieved from the output json file.
+ out, _ = input_api.subprocess.communicate(
+ cmd,
+ stderr=input_api.subprocess.PIPE,
+ shell=input_api.is_windows, # to resolve *.bat
+ cwd=repo_root,
+ )
+ logging.debug('running %s\nSTDOUT:\n%s\nSTDERR:\n%s', cmd, out[0],
+ out[1])
+ try:
+ result = json.load(f)
+ except json.JSONDecodeError as e:
+ outputs.append(
+ output_api.PresubmitError(
+ 'Error when parsing lucicfg validate output',
+ long_text=str(e)))
+ else:
+ result = result.get('result', None)
+ if result:
+ non_affected_file_msg_count = 0
+ for validation_result in (result.get('validation', None)
+ or []):
+ for msg in (validation_result.get('messages', None)
+ or []):
+ if d != '.' and msg['path'] not in fileSet:
+ non_affected_file_msg_count += 1
+ continue
+ sev = msg['severity']
+ if sev == 'WARNING':
+ out_f = output_api.PresubmitPromptWarning
+ elif sev in ('ERROR', 'CRITICAL'):
+ out_f = output_api.PresubmitError
+ else:
+ out_f = output_api.PresubmitNotifyResult
+ outputs.append(
+ out_f('Config validation for file(%s): %s' %
+ (msg['path'], msg['text'])))
+ if non_affected_file_msg_count:
+ reproduce_cmd = [
+ lucicfg, 'validate',
+ repo_root if d == '.' else input_api.os_path.join(
+ repo_root, d), '-config-set', config_set
+ ]
+ outputs.append(
+ output_api.PresubmitPromptWarning(
+ 'Found %d additional errors/warnings in files that are not '
+ 'modified, run `%s` to reveal them' %
+ (non_affected_file_msg_count,
+ ' '.join(reproduce_cmd))))
+ return outputs
def CheckLucicfgGenOutput(input_api, output_api, entry_script):
- """Verifies configs produced by `lucicfg` are up-to-date and pass validation.
+ """Verifies configs produced by `lucicfg` are up-to-date and pass validation.
Runs the check unconditionally, regardless of what files are modified. Examine
input_api.AffectedFiles() yourself before using CheckLucicfgGenOutput if this
@@ -2091,41 +2213,46 @@ def CheckLucicfgGenOutput(input_api, output_api, entry_script):
Returns:
A list of input_api.Command objects containing verification commands.
"""
- return [
- input_api.Command(
- 'lucicfg validate "%s"' % entry_script,
- [
- 'lucicfg' if not input_api.is_windows else 'lucicfg.bat',
- 'validate', entry_script,
- '-log-level', 'debug' if input_api.verbose else 'warning',
- ],
- {
- 'stderr': input_api.subprocess.STDOUT,
- 'shell': input_api.is_windows, # to resolve *.bat
- 'cwd': input_api.PresubmitLocalPath(),
- },
- output_api.PresubmitError)
- ]
+ return [
+ input_api.Command(
+ 'lucicfg validate "%s"' % entry_script,
+ [
+ 'lucicfg' if not input_api.is_windows else 'lucicfg.bat',
+ 'validate',
+ entry_script,
+ '-log-level',
+ 'debug' if input_api.verbose else 'warning',
+ ],
+ {
+ 'stderr': input_api.subprocess.STDOUT,
+ 'shell': input_api.is_windows, # to resolve *.bat
+ 'cwd': input_api.PresubmitLocalPath(),
+ },
+ output_api.PresubmitError)
+ ]
+
def CheckJsonParses(input_api, output_api, file_filter=None):
- """Verifies that all JSON files at least parse as valid JSON. By default,
+ """Verifies that all JSON files at least parse as valid JSON. By default,
file_filter will look for all files that end with .json"""
- import json
- if file_filter is None:
- file_filter = lambda x: x.LocalPath().endswith('.json')
- affected_files = input_api.AffectedFiles(
- include_deletes=False,
- file_filter=file_filter)
- warnings = []
- for f in affected_files:
- with _io.open(f.AbsoluteLocalPath(), encoding='utf-8') as j:
- try:
- json.load(j)
- except ValueError:
- # Just a warning for now, in case people are using JSON5 somewhere.
- warnings.append(output_api.PresubmitPromptWarning(
- '%s does not appear to be valid JSON.' % f.LocalPath()))
- return warnings
+ import json
+ if file_filter is None:
+ file_filter = lambda x: x.LocalPath().endswith('.json')
+ affected_files = input_api.AffectedFiles(include_deletes=False,
+ file_filter=file_filter)
+ warnings = []
+ for f in affected_files:
+ with _io.open(f.AbsoluteLocalPath(), encoding='utf-8') as j:
+ try:
+ json.load(j)
+ except ValueError:
+ # Just a warning for now, in case people are using JSON5
+ # somewhere.
+ warnings.append(
+ output_api.PresubmitPromptWarning(
+ '%s does not appear to be valid JSON.' % f.LocalPath()))
+ return warnings
+
# string pattern, sequence of strings to show when pattern matches,
# error flag. True if match is a presubmit error, otherwise it's a warning.
@@ -2150,135 +2277,137 @@ _NON_INCLUSIVE_TERMS = (
def _GetMessageForMatchingTerm(input_api, affected_file, line_number, line,
term, message):
- """Helper method for CheckInclusiveLanguage.
+ """Helper method for CheckInclusiveLanguage.
Returns an string composed of the name of the file, the line number where the
match has been found and the additional text passed as |message| in case the
target type name matches the text inside the line passed as parameter.
"""
- result = []
+ result = []
+
+ # A // nocheck comment will bypass this error.
+ if line.endswith(" nocheck") or line.endswith(""):
+ return result
+
+ # Ignore C-style single-line comments about banned terms.
+ if input_api.re.search(r"//.*$", line):
+ line = input_api.re.sub(r"//.*$", "", line)
+
+ # Ignore lines from C-style multi-line comments.
+ if input_api.re.search(r"^\s*\*", line):
+ return result
+
+ # Ignore Python-style comments about banned terms.
+ # This actually removes comment text from the first # on.
+ if input_api.re.search(r"#.*$", line):
+ line = input_api.re.sub(r"#.*$", "", line)
+
+ matched = False
+ if term[0:1] == '/':
+ regex = term[1:]
+ if input_api.re.search(regex, line):
+ matched = True
+ elif term in line:
+ matched = True
+
+ if matched:
+ result.append(' %s:%d:' % (affected_file.LocalPath(), line_number))
+ for message_line in message:
+ result.append(' %s' % message_line)
- # A // nocheck comment will bypass this error.
- if line.endswith(" nocheck") or line.endswith(""):
return result
- # Ignore C-style single-line comments about banned terms.
- if input_api.re.search(r"//.*$", line):
- line = input_api.re.sub(r"//.*$", "", line)
- # Ignore lines from C-style multi-line comments.
- if input_api.re.search(r"^\s*\*", line):
- return result
-
- # Ignore Python-style comments about banned terms.
- # This actually removes comment text from the first # on.
- if input_api.re.search(r"#.*$", line):
- line = input_api.re.sub(r"#.*$", "", line)
-
- matched = False
- if term[0:1] == '/':
- regex = term[1:]
- if input_api.re.search(regex, line):
- matched = True
- elif term in line:
- matched = True
-
- if matched:
- result.append(' %s:%d:' % (affected_file.LocalPath(), line_number))
- for message_line in message:
- result.append(' %s' % message_line)
-
- return result
-
-
-def CheckInclusiveLanguage(input_api, output_api,
+def CheckInclusiveLanguage(input_api,
+ output_api,
excluded_directories_relative_path=None,
non_inclusive_terms=_NON_INCLUSIVE_TERMS):
- """Make sure that banned non-inclusive terms are not used."""
+ """Make sure that banned non-inclusive terms are not used."""
- # Presubmit checks may run on a bot where the changes are actually
- # in a repo that isn't chromium/src (e.g., when testing src + tip-of-tree
- # ANGLE), but this particular check only makes sense for changes to
- # chromium/src.
- if input_api.change.RepositoryRoot() != input_api.PresubmitLocalPath():
- return []
- if input_api.no_diffs:
- return []
+ # Presubmit checks may run on a bot where the changes are actually
+ # in a repo that isn't chromium/src (e.g., when testing src + tip-of-tree
+ # ANGLE), but this particular check only makes sense for changes to
+ # chromium/src.
+ if input_api.change.RepositoryRoot() != input_api.PresubmitLocalPath():
+ return []
+ if input_api.no_diffs:
+ return []
- warnings = []
- errors = []
+ warnings = []
+ errors = []
- if excluded_directories_relative_path is None:
- excluded_directories_relative_path = [
- 'infra',
- 'inclusive_language_presubmit_exempt_dirs.txt'
- ]
+ if excluded_directories_relative_path is None:
+ excluded_directories_relative_path = [
+ 'infra', 'inclusive_language_presubmit_exempt_dirs.txt'
+ ]
- # Note that this matches exact path prefixes, and does not match
- # subdirectories. Only files directly in an excluded path will
- # match.
- def IsExcludedFile(affected_file, excluded_paths):
- local_dir = input_api.os_path.dirname(affected_file.LocalPath())
+ # Note that this matches exact path prefixes, and does not match
+ # subdirectories. Only files directly in an excluded path will
+ # match.
+ def IsExcludedFile(affected_file, excluded_paths):
+ local_dir = input_api.os_path.dirname(affected_file.LocalPath())
- # Excluded paths use forward slashes.
- if input_api.platform == 'win32':
- local_dir = local_dir.replace('\\', '/')
+ # Excluded paths use forward slashes.
+ if input_api.platform == 'win32':
+ local_dir = local_dir.replace('\\', '/')
- return local_dir in excluded_paths
+ return local_dir in excluded_paths
- def CheckForMatch(affected_file, line_num, line, term, message, error):
- problems = _GetMessageForMatchingTerm(input_api, affected_file, line_num,
- line, term, message)
+ def CheckForMatch(affected_file, line_num, line, term, message, error):
+ problems = _GetMessageForMatchingTerm(input_api, affected_file,
+ line_num, line, term, message)
- if problems:
- if error:
- errors.extend(problems)
- else:
- warnings.extend(problems)
+ if problems:
+ if error:
+ errors.extend(problems)
+ else:
+ warnings.extend(problems)
- excluded_paths = []
- dirs_file_path = input_api.os_path.join(input_api.change.RepositoryRoot(),
- *excluded_directories_relative_path)
- f = input_api.ReadFile(dirs_file_path)
+ excluded_paths = []
+ dirs_file_path = input_api.os_path.join(input_api.change.RepositoryRoot(),
+ *excluded_directories_relative_path)
+ f = input_api.ReadFile(dirs_file_path)
- for line in f.splitlines():
- path = line.split()[0]
- if len(path) > 0:
- excluded_paths.append(path)
+ for line in f.splitlines():
+ path = line.split()[0]
+ if len(path) > 0:
+ excluded_paths.append(path)
- excluded_paths = set(excluded_paths)
- for f in input_api.AffectedFiles():
- for line_num, line in f.ChangedContents():
- for term, message, error in non_inclusive_terms:
- if IsExcludedFile(f, excluded_paths):
- continue
- CheckForMatch(f, line_num, line, term, message, error)
+ excluded_paths = set(excluded_paths)
+ for f in input_api.AffectedFiles():
+ for line_num, line in f.ChangedContents():
+ for term, message, error in non_inclusive_terms:
+ if IsExcludedFile(f, excluded_paths):
+ continue
+ CheckForMatch(f, line_num, line, term, message, error)
- result = []
- if (warnings):
- result.append(
- output_api.PresubmitPromptWarning(
- 'Banned non-inclusive language was used.\n' + '\n'.join(warnings)))
- if (errors):
- result.append(
- output_api.PresubmitError('Banned non-inclusive language was used.\n' +
- '\n'.join(errors)))
- return result
+ result = []
+ if (warnings):
+ result.append(
+ output_api.PresubmitPromptWarning(
+ 'Banned non-inclusive language was used.\n' +
+ '\n'.join(warnings)))
+ if (errors):
+ result.append(
+ output_api.PresubmitError(
+ 'Banned non-inclusive language was used.\n' +
+ '\n'.join(errors)))
+ return result
def CheckUpdateOwnersFileReferences(input_api, output_api):
- """Checks whether an OWNERS file is being (re)moved and if so asks the
+ """Checks whether an OWNERS file is being (re)moved and if so asks the
contributor to update any file:// references to it."""
- files = []
- # AffectedFiles() includes owner files, not AffectedSourceFiles().
- for f in input_api.AffectedFiles():
- # Moved files appear here as one deletion and one addition.
- if f.LocalPath().endswith('OWNERS') and f.Action() == 'D':
- files.append(f.LocalPath())
- if not files:
- return []
- return [
- output_api.PresubmitPromptWarning(
- 'OWNERS files being moved/removed, please update any file:// ' +
- 'references to them in other OWNERS files', files)
- ]
+ files = []
+ # AffectedFiles() includes owner files, not AffectedSourceFiles().
+ for f in input_api.AffectedFiles():
+ # Moved files appear here as one deletion and one addition.
+ if f.LocalPath().endswith('OWNERS') and f.Action() == 'D':
+ files.append(f.LocalPath())
+ if not files:
+ return []
+ return [
+ output_api.PresubmitPromptWarning(
+ 'OWNERS files being moved/removed, please update any file:// ' +
+ 'references to them in other OWNERS files', files)
+ ]
diff --git a/presubmit_support.py b/presubmit_support.py
index de8ce6ade5..b986b7bc77 100755
--- a/presubmit_support.py
+++ b/presubmit_support.py
@@ -2,7 +2,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
@@ -53,6 +52,8 @@ import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
+# TODO: Should fix these warnings.
+# pylint: disable=line-too-long
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
@@ -63,30 +64,29 @@ _SHOW_CALLSTACKS = False
def time_time():
- # Use this so that it can be mocked in tests without interfering with python
- # system machinery.
- return time.time()
+ # Use this so that it can be mocked in tests without interfering with python
+ # system machinery.
+ return time.time()
class PresubmitFailure(Exception):
- pass
+ pass
class CommandData(object):
- def __init__(self, name, cmd, kwargs, message, python3=True):
- # The python3 argument is ignored but has to be retained because of the many
- # callers in other repos that pass it in.
- del python3
- self.name = name
- self.cmd = cmd
- self.stdin = kwargs.get('stdin', None)
- self.kwargs = kwargs.copy()
- self.kwargs['stdout'] = subprocess.PIPE
- self.kwargs['stderr'] = subprocess.STDOUT
- self.kwargs['stdin'] = subprocess.PIPE
- self.message = message
- self.info = None
-
+ def __init__(self, name, cmd, kwargs, message, python3=True):
+ # The python3 argument is ignored but has to be retained because of the
+ # many callers in other repos that pass it in.
+ del python3
+ self.name = name
+ self.cmd = cmd
+ self.stdin = kwargs.get('stdin', None)
+ self.kwargs = kwargs.copy()
+ self.kwargs['stdout'] = subprocess.PIPE
+ self.kwargs['stderr'] = subprocess.STDOUT
+ self.kwargs['stdin'] = subprocess.PIPE
+ self.message = message
+ self.info = None
# Adapted from
@@ -102,476 +102,514 @@ class CommandData(object):
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
- sigint_returncodes = {-signal.SIGINT, # Unix
- -1073741510, # Windows
- }
- def __init__(self):
- self.__lock = threading.Lock()
- self.__processes = set()
- self.__got_sigint = False
- self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
+ sigint_returncodes = {
+ -signal.SIGINT, # Unix
+ -1073741510, # Windows
+ }
- def __on_sigint(self):
- self.__got_sigint = True
- while self.__processes:
- try:
- self.__processes.pop().terminate()
- except OSError:
- pass
+ def __init__(self):
+ self.__lock = threading.Lock()
+ self.__processes = set()
+ self.__got_sigint = False
+ self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
- def interrupt(self, signal_num, frame):
- with self.__lock:
- self.__on_sigint()
- self.__previous_signal(signal_num, frame)
+ def __on_sigint(self):
+ self.__got_sigint = True
+ while self.__processes:
+ try:
+ self.__processes.pop().terminate()
+ except OSError:
+ pass
- def got_sigint(self):
- with self.__lock:
- return self.__got_sigint
+ def interrupt(self, signal_num, frame):
+ with self.__lock:
+ self.__on_sigint()
+ self.__previous_signal(signal_num, frame)
+
+ def got_sigint(self):
+ with self.__lock:
+ return self.__got_sigint
+
+ def wait(self, p, stdin):
+ with self.__lock:
+ if self.__got_sigint:
+ p.terminate()
+ self.__processes.add(p)
+ stdout, stderr = p.communicate(stdin)
+ code = p.returncode
+ with self.__lock:
+ self.__processes.discard(p)
+ if code in self.sigint_returncodes:
+ self.__on_sigint()
+ return stdout, stderr
- def wait(self, p, stdin):
- with self.__lock:
- if self.__got_sigint:
- p.terminate()
- self.__processes.add(p)
- stdout, stderr = p.communicate(stdin)
- code = p.returncode
- with self.__lock:
- self.__processes.discard(p)
- if code in self.sigint_returncodes:
- self.__on_sigint()
- return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
- def __init__(self, timeout, fn):
- self.completed = False
- self._fn = fn
- self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
+ def __init__(self, timeout, fn):
+ self.completed = False
+ self._fn = fn
+ self._timer = threading.Timer(timeout,
+ self._onTimer) if timeout else None
- def __enter__(self):
- if self._timer:
- self._timer.start()
- return self
+ def __enter__(self):
+ if self._timer:
+ self._timer.start()
+ return self
- def __exit__(self, _type, _value, _traceback):
- if self._timer:
- self._timer.cancel()
+ def __exit__(self, _type, _value, _traceback):
+ if self._timer:
+ self._timer.cancel()
- def _onTimer(self):
- self._fn()
- self.completed = True
+ def _onTimer(self):
+ self._fn()
+ self.completed = True
class ThreadPool(object):
- def __init__(self, pool_size=None, timeout=None):
- self.timeout = timeout
- self._pool_size = pool_size or multiprocessing.cpu_count()
- if sys.platform == 'win32':
- # TODO(crbug.com/1190269) - we can't use more than 56 child processes on
- # Windows or Python3 may hang.
- self._pool_size = min(self._pool_size, 56)
- self._messages = []
- self._messages_lock = threading.Lock()
- self._tests = []
- self._tests_lock = threading.Lock()
- self._nonparallel_tests = []
+ def __init__(self, pool_size=None, timeout=None):
+ self.timeout = timeout
+ self._pool_size = pool_size or multiprocessing.cpu_count()
+ if sys.platform == 'win32':
+ # TODO(crbug.com/1190269) - we can't use more than 56 child
+ # processes on Windows or Python3 may hang.
+ self._pool_size = min(self._pool_size, 56)
+ self._messages = []
+ self._messages_lock = threading.Lock()
+ self._tests = []
+ self._tests_lock = threading.Lock()
+ self._nonparallel_tests = []
- def _GetCommand(self, test):
- vpython = 'vpython3'
- if sys.platform == 'win32':
- vpython += '.bat'
+ def _GetCommand(self, test):
+ vpython = 'vpython3'
+ if sys.platform == 'win32':
+ vpython += '.bat'
- cmd = test.cmd
- if cmd[0] == 'python':
- cmd = list(cmd)
- cmd[0] = vpython
- elif cmd[0].endswith('.py'):
- cmd = [vpython] + cmd
+ cmd = test.cmd
+ if cmd[0] == 'python':
+ cmd = list(cmd)
+ cmd[0] = vpython
+ elif cmd[0].endswith('.py'):
+ cmd = [vpython] + cmd
- # On Windows, scripts on the current directory take precedence over PATH, so
- # that when testing depot_tools on Windows, calling `vpython.bat` will
- # execute the copy of vpython of the depot_tools under test instead of the
- # one in the bot.
- # As a workaround, we run the tests from the parent directory instead.
- if (cmd[0] == vpython and
- 'cwd' in test.kwargs and
- os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
- test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
- cmd[1] = os.path.join('depot_tools', cmd[1])
+ # On Windows, scripts on the current directory take precedence over
+ # PATH, so that when testing depot_tools on Windows, calling
+ # `vpython.bat` will execute the copy of vpython of the depot_tools
+ # under test instead of the one in the bot. As a workaround, we run the
+ # tests from the parent directory instead.
+ if (cmd[0] == vpython and 'cwd' in test.kwargs
+ and os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
+ test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
+ cmd[1] = os.path.join('depot_tools', cmd[1])
- return cmd
+ return cmd
- def _RunWithTimeout(self, cmd, stdin, kwargs):
- p = subprocess.Popen(cmd, **kwargs)
- with Timer(self.timeout, p.terminate) as timer:
- stdout, _ = sigint_handler.wait(p, stdin)
- stdout = stdout.decode('utf-8', 'ignore')
- if timer.completed:
- stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
- return p.returncode, stdout
+ def _RunWithTimeout(self, cmd, stdin, kwargs):
+ p = subprocess.Popen(cmd, **kwargs)
+ with Timer(self.timeout, p.terminate) as timer:
+ stdout, _ = sigint_handler.wait(p, stdin)
+ stdout = stdout.decode('utf-8', 'ignore')
+ if timer.completed:
+ stdout = 'Process timed out after %ss\n%s' % (self.timeout,
+ stdout)
+ return p.returncode, stdout
- def CallCommand(self, test, show_callstack=None):
- """Runs an external program.
+ def CallCommand(self, test, show_callstack=None):
+ """Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
- cmd = self._GetCommand(test)
- try:
- start = time_time()
- returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
- duration = time_time() - start
- except Exception:
- duration = time_time() - start
- return test.message(
- '%s\n%s exec failure (%4.2fs)\n%s' %
- (test.name, ' '.join(cmd), duration, traceback.format_exc()),
- show_callstack=show_callstack)
+ cmd = self._GetCommand(test)
+ try:
+ start = time_time()
+ returncode, stdout = self._RunWithTimeout(cmd, test.stdin,
+ test.kwargs)
+ duration = time_time() - start
+ except Exception:
+ duration = time_time() - start
+ return test.message(
+ '%s\n%s exec failure (%4.2fs)\n%s' %
+ (test.name, ' '.join(cmd), duration, traceback.format_exc()),
+ show_callstack=show_callstack)
- if returncode != 0:
- return test.message('%s\n%s (%4.2fs) failed\n%s' %
- (test.name, ' '.join(cmd), duration, stdout),
- show_callstack=show_callstack)
+ if returncode != 0:
+ return test.message('%s\n%s (%4.2fs) failed\n%s' %
+ (test.name, ' '.join(cmd), duration, stdout),
+ show_callstack=show_callstack)
- if test.info:
- return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration),
- show_callstack=show_callstack)
+ if test.info:
+ return test.info('%s\n%s (%4.2fs)' %
+ (test.name, ' '.join(cmd), duration),
+ show_callstack=show_callstack)
- def AddTests(self, tests, parallel=True):
- if parallel:
- self._tests.extend(tests)
- else:
- self._nonparallel_tests.extend(tests)
+ def AddTests(self, tests, parallel=True):
+ if parallel:
+ self._tests.extend(tests)
+ else:
+ self._nonparallel_tests.extend(tests)
- def RunAsync(self):
- self._messages = []
+ def RunAsync(self):
+ self._messages = []
- def _WorkerFn():
- while True:
- test = None
- with self._tests_lock:
- if not self._tests:
- break
- test = self._tests.pop()
- result = self.CallCommand(test, show_callstack=False)
- if result:
- with self._messages_lock:
- self._messages.append(result)
+ def _WorkerFn():
+ while True:
+ test = None
+ with self._tests_lock:
+ if not self._tests:
+ break
+ test = self._tests.pop()
+ result = self.CallCommand(test, show_callstack=False)
+ if result:
+ with self._messages_lock:
+ self._messages.append(result)
- def _StartDaemon():
- t = threading.Thread(target=_WorkerFn)
- t.daemon = True
- t.start()
- return t
+ def _StartDaemon():
+ t = threading.Thread(target=_WorkerFn)
+ t.daemon = True
+ t.start()
+ return t
- while self._nonparallel_tests:
- test = self._nonparallel_tests.pop()
- result = self.CallCommand(test)
- if result:
- self._messages.append(result)
+ while self._nonparallel_tests:
+ test = self._nonparallel_tests.pop()
+ result = self.CallCommand(test)
+ if result:
+ self._messages.append(result)
- if self._tests:
- threads = [_StartDaemon() for _ in range(self._pool_size)]
- for worker in threads:
- worker.join()
+ if self._tests:
+ threads = [_StartDaemon() for _ in range(self._pool_size)]
+ for worker in threads:
+ worker.join()
- return self._messages
+ return self._messages
def normpath(path):
- '''Version of os.path.normpath that also changes backward slashes to
+ '''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
- # This is safe to always do because the Windows version of os.path.normpath
- # will replace forward slashes with backward slashes.
- path = path.replace(os.sep, '/')
- return os.path.normpath(path)
+ # This is safe to always do because the Windows version of os.path.normpath
+ # will replace forward slashes with backward slashes.
+ path = path.replace(os.sep, '/')
+ return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
- """Implements RightHandSideLines for InputApi and GclChange."""
- for af in affected_files:
- lines = af.ChangedContents()
- for line in lines:
- yield (af, line[0], line[1])
+ """Implements RightHandSideLines for InputApi and GclChange."""
+ for af in affected_files:
+ lines = af.ChangedContents()
+ for line in lines:
+ yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
- sys.stdout.write(prompt_string)
- sys.stdout.flush()
- response = sys.stdin.readline().strip().lower()
- return response in ('y', 'yes')
+ sys.stdout.write(prompt_string)
+ sys.stdout.flush()
+ response = sys.stdin.readline().strip().lower()
+ return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
- """Base class for result objects."""
- fatal = False
- should_prompt = False
+ """Base class for result objects."""
+ fatal = False
+ should_prompt = False
- def __init__(self, message, items=None, long_text='', show_callstack=None):
- """
+ def __init__(self, message, items=None, long_text='', show_callstack=None):
+ """
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
- self._message = _PresubmitResult._ensure_str(message)
- self._items = items or []
- self._long_text = _PresubmitResult._ensure_str(long_text.rstrip())
- if show_callstack is None:
- show_callstack = _SHOW_CALLSTACKS
- if show_callstack:
- self._long_text += 'Presubmit result call stack is:\n'
- self._long_text += ''.join(traceback.format_stack(None, 8))
+ self._message = _PresubmitResult._ensure_str(message)
+ self._items = items or []
+ self._long_text = _PresubmitResult._ensure_str(long_text.rstrip())
+ if show_callstack is None:
+ show_callstack = _SHOW_CALLSTACKS
+ if show_callstack:
+ self._long_text += 'Presubmit result call stack is:\n'
+ self._long_text += ''.join(traceback.format_stack(None, 8))
- @staticmethod
- def _ensure_str(val):
- """
+ @staticmethod
+ def _ensure_str(val):
+ """
val: A "stringish" value. Can be any of str or bytes.
returns: A str after applying encoding/decoding as needed.
Assumes/uses UTF-8 for relevant inputs/outputs.
"""
- if isinstance(val, str):
- return val
- if isinstance(val, bytes):
- return val.decode()
- raise ValueError("Unknown string type %s" % type(val))
+ if isinstance(val, str):
+ return val
+ if isinstance(val, bytes):
+ return val.decode()
+ raise ValueError("Unknown string type %s" % type(val))
- def handle(self):
- sys.stdout.write(self._message)
- sys.stdout.write('\n')
- for item in self._items:
- sys.stdout.write(' ')
- # Write separately in case it's unicode.
- sys.stdout.write(str(item))
- sys.stdout.write('\n')
- if self._long_text:
- sys.stdout.write('\n***************\n')
- # Write separately in case it's unicode.
- sys.stdout.write(self._long_text)
- sys.stdout.write('\n***************\n')
+ def handle(self):
+ sys.stdout.write(self._message)
+ sys.stdout.write('\n')
+ for item in self._items:
+ sys.stdout.write(' ')
+ # Write separately in case it's unicode.
+ sys.stdout.write(str(item))
+ sys.stdout.write('\n')
+ if self._long_text:
+ sys.stdout.write('\n***************\n')
+ # Write separately in case it's unicode.
+ sys.stdout.write(self._long_text)
+ sys.stdout.write('\n***************\n')
- def json_format(self):
- return {
- 'message': self._message,
- 'items': [str(item) for item in self._items],
- 'long_text': self._long_text,
- 'fatal': self.fatal
- }
+ def json_format(self):
+ return {
+ 'message': self._message,
+ 'items': [str(item) for item in self._items],
+ 'long_text': self._long_text,
+ 'fatal': self.fatal
+ }
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
- """A hard presubmit error."""
- fatal = True
+ """A hard presubmit error."""
+ fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
- """An warning that prompts the user if they want to continue."""
- should_prompt = True
+ """An warning that prompts the user if they want to continue."""
+ should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
- """Just print something to the screen -- but it's not even a warning."""
+ """Just print something to the screen -- but it's not even a warning."""
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
- """A warning that should be included in the review request email."""
- def __init__(self, *args, **kwargs):
- super(_MailTextResult, self).__init__()
- raise NotImplementedError()
+ """A warning that should be included in the review request email."""
+ def __init__(self, *args, **kwargs):
+ super(_MailTextResult, self).__init__()
+ raise NotImplementedError()
+
class GerritAccessor(object):
- """Limited Gerrit functionality for canned presubmit checks to work.
+ """Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
+ def __init__(self, url=None, project=None, branch=None):
+ self.host = urlparse.urlparse(url).netloc if url else None
+ self.project = project
+ self.branch = branch
+ self.cache = {}
+ self.code_owners_enabled = None
- def __init__(self, url=None, project=None, branch=None):
- self.host = urlparse.urlparse(url).netloc if url else None
- self.project = project
- self.branch = branch
- self.cache = {}
- self.code_owners_enabled = None
+ def _FetchChangeDetail(self, issue):
+ # Separate function to be easily mocked in tests.
+ try:
+ return gerrit_util.GetChangeDetail(
+ self.host, str(issue),
+ ['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
+ except gerrit_util.GerritError as e:
+ if e.http_status == 404:
+ raise Exception('Either Gerrit issue %s doesn\'t exist, or '
+ 'no credentials to fetch issue details' % issue)
+ raise
- def _FetchChangeDetail(self, issue):
- # Separate function to be easily mocked in tests.
- try:
- return gerrit_util.GetChangeDetail(
- self.host, str(issue),
- ['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
- except gerrit_util.GerritError as e:
- if e.http_status == 404:
- raise Exception('Either Gerrit issue %s doesn\'t exist, or '
- 'no credentials to fetch issue details' % issue)
- raise
-
- def GetChangeInfo(self, issue):
- """Returns labels and all revisions (patchsets) for this issue.
+ def GetChangeInfo(self, issue):
+ """Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
- assert issue
- cache_key = int(issue)
- if cache_key not in self.cache:
- self.cache[cache_key] = self._FetchChangeDetail(issue)
- return self.cache[cache_key]
+ assert issue
+ cache_key = int(issue)
+ if cache_key not in self.cache:
+ self.cache[cache_key] = self._FetchChangeDetail(issue)
+ return self.cache[cache_key]
- def GetChangeDescription(self, issue, patchset=None):
- """If patchset is none, fetches current patchset."""
- info = self.GetChangeInfo(issue)
- # info is a reference to cache. We'll modify it here adding description to
- # it to the right patchset, if it is not yet there.
+ def GetChangeDescription(self, issue, patchset=None):
+ """If patchset is none, fetches current patchset."""
+ info = self.GetChangeInfo(issue)
+ # info is a reference to cache. We'll modify it here adding description
+ # to it to the right patchset, if it is not yet there.
- # Find revision info for the patchset we want.
- if patchset is not None:
- for rev, rev_info in info['revisions'].items():
- if str(rev_info['_number']) == str(patchset):
- break
- else:
- raise Exception('patchset %s doesn\'t exist in issue %s' % (
- patchset, issue))
- else:
- rev = info['current_revision']
- rev_info = info['revisions'][rev]
+ # Find revision info for the patchset we want.
+ if patchset is not None:
+ for rev, rev_info in info['revisions'].items():
+ if str(rev_info['_number']) == str(patchset):
+ break
+ else:
+ raise Exception('patchset %s doesn\'t exist in issue %s' %
+ (patchset, issue))
+ else:
+ rev = info['current_revision']
+ rev_info = info['revisions'][rev]
- return rev_info['commit']['message']
+ return rev_info['commit']['message']
- def GetDestRef(self, issue):
- ref = self.GetChangeInfo(issue)['branch']
- if not ref.startswith('refs/'):
- # NOTE: it is possible to create 'refs/x' branch,
- # aka 'refs/heads/refs/x'. However, this is ill-advised.
- ref = 'refs/heads/%s' % ref
- return ref
+ def GetDestRef(self, issue):
+ ref = self.GetChangeInfo(issue)['branch']
+ if not ref.startswith('refs/'):
+ # NOTE: it is possible to create 'refs/x' branch,
+ # aka 'refs/heads/refs/x'. However, this is ill-advised.
+ ref = 'refs/heads/%s' % ref
+ return ref
- def _GetApproversForLabel(self, issue, label):
- change_info = self.GetChangeInfo(issue)
- label_info = change_info.get('labels', {}).get(label, {})
- values = label_info.get('values', {}).keys()
- if not values:
- return []
- max_value = max(int(v) for v in values)
- return [v for v in label_info.get('all', [])
- if v.get('value', 0) == max_value]
+ def _GetApproversForLabel(self, issue, label):
+ change_info = self.GetChangeInfo(issue)
+ label_info = change_info.get('labels', {}).get(label, {})
+ values = label_info.get('values', {}).keys()
+ if not values:
+ return []
+ max_value = max(int(v) for v in values)
+ return [
+ v for v in label_info.get('all', [])
+ if v.get('value', 0) == max_value
+ ]
- def IsBotCommitApproved(self, issue):
- return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
+ def IsBotCommitApproved(self, issue):
+ return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
- def IsOwnersOverrideApproved(self, issue):
- return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
+ def IsOwnersOverrideApproved(self, issue):
+ return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
- def GetChangeOwner(self, issue):
- return self.GetChangeInfo(issue)['owner']['email']
+ def GetChangeOwner(self, issue):
+ return self.GetChangeInfo(issue)['owner']['email']
- def GetChangeReviewers(self, issue, approving_only=True):
- changeinfo = self.GetChangeInfo(issue)
- if approving_only:
- reviewers = self._GetApproversForLabel(issue, 'Code-Review')
- else:
- reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
- return [r.get('email') for r in reviewers]
+ def GetChangeReviewers(self, issue, approving_only=True):
+ changeinfo = self.GetChangeInfo(issue)
+ if approving_only:
+ reviewers = self._GetApproversForLabel(issue, 'Code-Review')
+ else:
+ reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
+ return [r.get('email') for r in reviewers]
- def UpdateDescription(self, description, issue):
- gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
+ def UpdateDescription(self, description, issue):
+ gerrit_util.SetCommitMessage(self.host,
+ issue,
+ description,
+ notify='NONE')
- def IsCodeOwnersEnabledOnRepo(self):
- if self.code_owners_enabled is None:
- self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
- self.host, self.project)
- return self.code_owners_enabled
+ def IsCodeOwnersEnabledOnRepo(self):
+ if self.code_owners_enabled is None:
+ self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
+ self.host, self.project)
+ return self.code_owners_enabled
class OutputApi(object):
- """An instance of OutputApi gets passed to presubmit scripts so that they
+ """An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
- PresubmitResult = _PresubmitResult
- PresubmitError = _PresubmitError
- PresubmitPromptWarning = _PresubmitPromptWarning
- PresubmitNotifyResult = _PresubmitNotifyResult
- MailTextResult = _MailTextResult
+ PresubmitResult = _PresubmitResult
+ PresubmitError = _PresubmitError
+ PresubmitPromptWarning = _PresubmitPromptWarning
+ PresubmitNotifyResult = _PresubmitNotifyResult
+ MailTextResult = _MailTextResult
- def __init__(self, is_committing):
- self.is_committing = is_committing
- self.more_cc = []
+ def __init__(self, is_committing):
+ self.is_committing = is_committing
+ self.more_cc = []
- def AppendCC(self, cc):
- """Appends a user to cc for this change."""
- self.more_cc.append(cc)
+ def AppendCC(self, cc):
+ """Appends a user to cc for this change."""
+ self.more_cc.append(cc)
- def PresubmitPromptOrNotify(self, *args, **kwargs):
- """Warn the user when uploading, but only notify if committing."""
- if self.is_committing:
- return self.PresubmitNotifyResult(*args, **kwargs)
- return self.PresubmitPromptWarning(*args, **kwargs)
+ def PresubmitPromptOrNotify(self, *args, **kwargs):
+ """Warn the user when uploading, but only notify if committing."""
+ if self.is_committing:
+ return self.PresubmitNotifyResult(*args, **kwargs)
+ return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
- """An instance of this object is passed to presubmit scripts so they can
+ """An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
- # Method could be a function
- # pylint: disable=no-self-use
+ # Method could be a function
+ # pylint: disable=no-self-use
- # File extensions that are considered source files from a style guide
- # perspective. Don't modify this list from a presubmit script!
- #
- # Files without an extension aren't included in the list. If you want to
- # filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
- # Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
- DEFAULT_FILES_TO_CHECK = (
- # C++ and friends
- r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
- r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
- # Scripts
- r'.+\.js$', r'.+\.ts$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$',
- r'.+\.pm$',
- # Other
- r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
- r'.+\.fidl$', r'.+\.rs$',
- )
+ # File extensions that are considered source files from a style guide
+ # perspective. Don't modify this list from a presubmit script!
+ #
+ # Files without an extension aren't included in the list. If you want to
+ # filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
+ # Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
+ DEFAULT_FILES_TO_CHECK = (
+ # C++ and friends
+ r'.+\.c$',
+ r'.+\.cc$',
+ r'.+\.cpp$',
+ r'.+\.h$',
+ r'.+\.m$',
+ r'.+\.mm$',
+ r'.+\.inl$',
+ r'.+\.asm$',
+ r'.+\.hxx$',
+ r'.+\.hpp$',
+ r'.+\.s$',
+ r'.+\.S$',
+ # Scripts
+ r'.+\.js$',
+ r'.+\.ts$',
+ r'.+\.py$',
+ r'.+\.sh$',
+ r'.+\.rb$',
+ r'.+\.pl$',
+ r'.+\.pm$',
+ # Other
+ r'.+\.java$',
+ r'.+\.mk$',
+ r'.+\.am$',
+ r'.+\.css$',
+ r'.+\.mojom$',
+ r'.+\.fidl$',
+ r'.+\.rs$',
+ )
- # Path regexp that should be excluded from being considered containing source
- # files. Don't modify this list from a presubmit script!
- DEFAULT_FILES_TO_SKIP = (
- r'testing_support[\\\/]google_appengine[\\\/].*',
- r'.*\bexperimental[\\\/].*',
- # Exclude third_party/.* but NOT third_party/{WebKit,blink}
- # (crbug.com/539768 and crbug.com/836555).
- r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
- # Output directories (just in case)
- r'.*\bDebug[\\\/].*',
- r'.*\bRelease[\\\/].*',
- r'.*\bxcodebuild[\\\/].*',
- r'.*\bout[\\\/].*',
- # All caps files like README and LICENCE.
- r'.*\b[A-Z0-9_]{2,}$',
- # SCM (can happen in dual SCM configuration). (Slightly over aggressive)
- r'(|.*[\\\/])\.git[\\\/].*',
- r'(|.*[\\\/])\.svn[\\\/].*',
- # There is no point in processing a patch file.
- r'.+\.diff$',
- r'.+\.patch$',
- )
+ # Path regexp that should be excluded from being considered containing
+ # source files. Don't modify this list from a presubmit script!
+ DEFAULT_FILES_TO_SKIP = (
+ r'testing_support[\\\/]google_appengine[\\\/].*',
+ r'.*\bexperimental[\\\/].*',
+ # Exclude third_party/.* but NOT third_party/{WebKit,blink}
+ # (crbug.com/539768 and crbug.com/836555).
+ r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
+ # Output directories (just in case)
+ r'.*\bDebug[\\\/].*',
+ r'.*\bRelease[\\\/].*',
+ r'.*\bxcodebuild[\\\/].*',
+ r'.*\bout[\\\/].*',
+ # All caps files like README and LICENCE.
+ r'.*\b[A-Z0-9_]{2,}$',
+ # SCM (can happen in dual SCM configuration). (Slightly over aggressive)
+ r'(|.*[\\\/])\.git[\\\/].*',
+ r'(|.*[\\\/])\.svn[\\\/].*',
+ # There is no point in processing a patch file.
+ r'.+\.diff$',
+ r'.+\.patch$',
+ )
- def __init__(self, change, presubmit_path, is_committing,
- verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False,
- no_diffs=False):
- """Builds an InputApi object.
+ def __init__(self,
+ change,
+ presubmit_path,
+ is_committing,
+ verbose,
+ gerrit_obj,
+ dry_run=None,
+ thread_pool=None,
+ parallel=False,
+ no_diffs=False):
+ """Builds an InputApi object.
Args:
change: A presubmit.Change object.
@@ -584,157 +622,161 @@ class InputApi(object):
no_diffs: if true, implies that --files or --all was specified so some
checks can be skipped, and some errors will be messages.
"""
- # Version number of the presubmit_support script.
- self.version = [int(x) for x in __version__.split('.')]
- self.change = change
- self.is_committing = is_committing
- self.gerrit = gerrit_obj
- self.dry_run = dry_run
- self.no_diffs = no_diffs
+ # Version number of the presubmit_support script.
+ self.version = [int(x) for x in __version__.split('.')]
+ self.change = change
+ self.is_committing = is_committing
+ self.gerrit = gerrit_obj
+ self.dry_run = dry_run
+ self.no_diffs = no_diffs
- self.parallel = parallel
- self.thread_pool = thread_pool or ThreadPool()
+ self.parallel = parallel
+ self.thread_pool = thread_pool or ThreadPool()
- # We expose various modules and functions as attributes of the input_api
- # so that presubmit scripts don't have to import them.
- self.ast = ast
- self.basename = os.path.basename
- self.cpplint = cpplint
- self.fnmatch = fnmatch
- self.gclient_paths = gclient_paths
- self.glob = glob.glob
- self.json = json
- self.logging = logging.getLogger('PRESUBMIT')
- self.os_listdir = os.listdir
- self.os_path = os.path
- self.os_stat = os.stat
- self.os_walk = os.walk
- self.re = re
- self.subprocess = subprocess
- self.sys = sys
- self.tempfile = tempfile
- self.time = time
- self.unittest = unittest
- self.urllib_request = urllib_request
- self.urllib_error = urllib_error
+ # We expose various modules and functions as attributes of the input_api
+ # so that presubmit scripts don't have to import them.
+ self.ast = ast
+ self.basename = os.path.basename
+ self.cpplint = cpplint
+ self.fnmatch = fnmatch
+ self.gclient_paths = gclient_paths
+ self.glob = glob.glob
+ self.json = json
+ self.logging = logging.getLogger('PRESUBMIT')
+ self.os_listdir = os.listdir
+ self.os_path = os.path
+ self.os_stat = os.stat
+ self.os_walk = os.walk
+ self.re = re
+ self.subprocess = subprocess
+ self.sys = sys
+ self.tempfile = tempfile
+ self.time = time
+ self.unittest = unittest
+ self.urllib_request = urllib_request
+ self.urllib_error = urllib_error
- self.is_windows = sys.platform == 'win32'
+ self.is_windows = sys.platform == 'win32'
- # Set python_executable to 'vpython3' in order to allow scripts in other
- # repos (e.g. src.git) to automatically pick up that repo's .vpython file,
- # instead of inheriting the one in depot_tools.
- self.python_executable = 'vpython3'
- # Offer a python 3 executable for use during the migration off of python 2.
- self.python3_executable = 'vpython3'
- self.environ = os.environ
+ # Set python_executable to 'vpython3' in order to allow scripts in other
+ # repos (e.g. src.git) to automatically pick up that repo's .vpython
+ # file, instead of inheriting the one in depot_tools.
+ self.python_executable = 'vpython3'
+ # Offer a python 3 executable for use during the migration off of python
+ # 2.
+ self.python3_executable = 'vpython3'
+ self.environ = os.environ
- # InputApi.platform is the platform you're currently running on.
- self.platform = sys.platform
+ # InputApi.platform is the platform you're currently running on.
+ self.platform = sys.platform
- self.cpu_count = multiprocessing.cpu_count()
- if self.is_windows:
- # TODO(crbug.com/1190269) - we can't use more than 56 child processes on
- # Windows or Python3 may hang.
- self.cpu_count = min(self.cpu_count, 56)
+ self.cpu_count = multiprocessing.cpu_count()
+ if self.is_windows:
+ # TODO(crbug.com/1190269) - we can't use more than 56 child
+ # processes on Windows or Python3 may hang.
+ self.cpu_count = min(self.cpu_count, 56)
- # The local path of the currently-being-processed presubmit script.
- self._current_presubmit_path = os.path.dirname(presubmit_path)
+ # The local path of the currently-being-processed presubmit script.
+ self._current_presubmit_path = os.path.dirname(presubmit_path)
- # We carry the canned checks so presubmit scripts can easily use them.
- self.canned_checks = presubmit_canned_checks
+ # We carry the canned checks so presubmit scripts can easily use them.
+ self.canned_checks = presubmit_canned_checks
- # Temporary files we must manually remove at the end of a run.
- self._named_temporary_files = []
+ # Temporary files we must manually remove at the end of a run.
+ self._named_temporary_files = []
- self.owners_client = None
- if self.gerrit and not 'PRESUBMIT_SKIP_NETWORK' in self.environ:
- try:
- self.owners_client = owners_client.GetCodeOwnersClient(
- host=self.gerrit.host,
- project=self.gerrit.project,
- branch=self.gerrit.branch)
- except Exception as e:
- print('Failed to set owners_client - %s' % str(e))
- self.owners_finder = owners_finder.OwnersFinder
- self.verbose = verbose
- self.Command = CommandData
+ self.owners_client = None
+ if self.gerrit and not 'PRESUBMIT_SKIP_NETWORK' in self.environ:
+ try:
+ self.owners_client = owners_client.GetCodeOwnersClient(
+ host=self.gerrit.host,
+ project=self.gerrit.project,
+ branch=self.gerrit.branch)
+ except Exception as e:
+ print('Failed to set owners_client - %s' % str(e))
+ self.owners_finder = owners_finder.OwnersFinder
+ self.verbose = verbose
+ self.Command = CommandData
- # Replace and as headers that need to be included
- # with 'base/containers/hash_tables.h' instead.
- # Access to a protected member _XX of a client class
- # pylint: disable=protected-access
- self.cpplint._re_pattern_templates = [
- (a, b, 'base/containers/hash_tables.h')
- if header in ('', '') else (a, b, header)
- for (a, b, header) in cpplint._re_pattern_templates
- ]
+ # Replace and as headers that need to be included
+ # with 'base/containers/hash_tables.h' instead.
+ # Access to a protected member _XX of a client class
+ # pylint: disable=protected-access
+ self.cpplint._re_pattern_templates = [
+ (a, b,
+ 'base/containers/hash_tables.h') if header in ('',
+ '') else
+ (a, b, header) for (a, b, header) in cpplint._re_pattern_templates
+ ]
- def SetTimeout(self, timeout):
- self.thread_pool.timeout = timeout
+ def SetTimeout(self, timeout):
+ self.thread_pool.timeout = timeout
- def PresubmitLocalPath(self):
- """Returns the local path of the presubmit script currently being run.
+ def PresubmitLocalPath(self):
+ """Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
- return self._current_presubmit_path
+ return self._current_presubmit_path
- def AffectedFiles(self, include_deletes=True, file_filter=None):
- """Same as input_api.change.AffectedFiles() except only lists files
+ def AffectedFiles(self, include_deletes=True, file_filter=None):
+ """Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
- dir_with_slash = normpath(self.PresubmitLocalPath())
- # normpath strips trailing path separators, so the trailing separator has to
- # be added after the normpath call.
- if len(dir_with_slash) > 0:
- dir_with_slash += os.path.sep
+ dir_with_slash = normpath(self.PresubmitLocalPath())
+ # normpath strips trailing path separators, so the trailing separator
+ # has to be added after the normpath call.
+ if len(dir_with_slash) > 0:
+ dir_with_slash += os.path.sep
- return list(filter(
- lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
- self.change.AffectedFiles(include_deletes, file_filter)))
+ return list(
+ filter(
+ lambda x: normpath(x.AbsoluteLocalPath()).startswith(
+ dir_with_slash),
+ self.change.AffectedFiles(include_deletes, file_filter)))
- def LocalPaths(self):
- """Returns local paths of input_api.AffectedFiles()."""
- paths = [af.LocalPath() for af in self.AffectedFiles()]
- logging.debug('LocalPaths: %s', paths)
- return paths
+ def LocalPaths(self):
+ """Returns local paths of input_api.AffectedFiles()."""
+ paths = [af.LocalPath() for af in self.AffectedFiles()]
+ logging.debug('LocalPaths: %s', paths)
+ return paths
- def AbsoluteLocalPaths(self):
- """Returns absolute local paths of input_api.AffectedFiles()."""
- return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
+ def AbsoluteLocalPaths(self):
+ """Returns absolute local paths of input_api.AffectedFiles()."""
+ return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
- def AffectedTestableFiles(self, include_deletes=None, **kwargs):
- """Same as input_api.change.AffectedTestableFiles() except only lists files
+ def AffectedTestableFiles(self, include_deletes=None, **kwargs):
+ """Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
- if include_deletes is not None:
- warn('AffectedTestableFiles(include_deletes=%s)'
- ' is deprecated and ignored' % str(include_deletes),
- category=DeprecationWarning,
- stacklevel=2)
- # pylint: disable=consider-using-generator
- return [
- x for x in self.AffectedFiles(include_deletes=False, **kwargs)
- if x.IsTestableFile()
- ]
+ if include_deletes is not None:
+ warn('AffectedTestableFiles(include_deletes=%s)'
+ ' is deprecated and ignored' % str(include_deletes),
+ category=DeprecationWarning,
+ stacklevel=2)
+ # pylint: disable=consider-using-generator
+ return [
+ x for x in self.AffectedFiles(include_deletes=False, **kwargs)
+ if x.IsTestableFile()
+ ]
- def AffectedTextFiles(self, include_deletes=None):
- """An alias to AffectedTestableFiles for backwards compatibility."""
- return self.AffectedTestableFiles(include_deletes=include_deletes)
+ def AffectedTextFiles(self, include_deletes=None):
+ """An alias to AffectedTestableFiles for backwards compatibility."""
+ return self.AffectedTestableFiles(include_deletes=include_deletes)
- def FilterSourceFile(self,
- affected_file,
- files_to_check=None,
- files_to_skip=None,
- allow_list=None,
- block_list=None):
- """Filters out files that aren't considered 'source file'.
+ def FilterSourceFile(self,
+ affected_file,
+ files_to_check=None,
+ files_to_skip=None,
+ allow_list=None,
+ block_list=None):
+ """Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
@@ -746,36 +788,37 @@ class InputApi(object):
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
- if files_to_check is None:
- files_to_check = self.DEFAULT_FILES_TO_CHECK
- if files_to_skip is None:
- files_to_skip = self.DEFAULT_FILES_TO_SKIP
+ if files_to_check is None:
+ files_to_check = self.DEFAULT_FILES_TO_CHECK
+ if files_to_skip is None:
+ files_to_skip = self.DEFAULT_FILES_TO_SKIP
- def Find(affected_file, items):
- local_path = affected_file.LocalPath()
- for item in items:
- if self.re.match(item, local_path):
- return True
- # Handle the cases where the files regex only handles /, but the local
- # path uses \.
- if self.is_windows and self.re.match(item, local_path.replace(
- '\\', '/')):
- return True
- return False
- return (Find(affected_file, files_to_check) and
- not Find(affected_file, files_to_skip))
+ def Find(affected_file, items):
+ local_path = affected_file.LocalPath()
+ for item in items:
+ if self.re.match(item, local_path):
+ return True
+ # Handle the cases where the files regex only handles /, but the
+ # local path uses \.
+ if self.is_windows and self.re.match(
+ item, local_path.replace('\\', '/')):
+ return True
+ return False
- def AffectedSourceFiles(self, source_file):
- """Filter the list of AffectedTestableFiles by the function source_file.
+ return (Find(affected_file, files_to_check)
+ and not Find(affected_file, files_to_skip))
+
+ def AffectedSourceFiles(self, source_file):
+ """Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
- if not source_file:
- source_file = self.FilterSourceFile
- return list(filter(source_file, self.AffectedTestableFiles()))
+ if not source_file:
+ source_file = self.FilterSourceFile
+ return list(filter(source_file, self.AffectedTestableFiles()))
- def RightHandSideLines(self, source_file_filter=None):
- """An iterator over all text lines in 'new' version of changed files.
+ def RightHandSideLines(self, source_file_filter=None):
+ """An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
@@ -791,22 +834,22 @@ class InputApi(object):
Note: The carriage return (LF or CR) is stripped off.
"""
- files = self.AffectedSourceFiles(source_file_filter)
- return _RightHandSideLinesImpl(files)
+ files = self.AffectedSourceFiles(source_file_filter)
+ return _RightHandSideLinesImpl(files)
- def ReadFile(self, file_item, mode='r'):
- """Reads an arbitrary file.
+ def ReadFile(self, file_item, mode='r'):
+ """Reads an arbitrary file.
Deny reading anything outside the repository.
"""
- if isinstance(file_item, AffectedFile):
- file_item = file_item.AbsoluteLocalPath()
- if not file_item.startswith(self.change.RepositoryRoot()):
- raise IOError('Access outside the repository root is denied.')
- return gclient_utils.FileRead(file_item, mode)
+ if isinstance(file_item, AffectedFile):
+ file_item = file_item.AbsoluteLocalPath()
+ if not file_item.startswith(self.change.RepositoryRoot()):
+ raise IOError('Access outside the repository root is denied.')
+ return gclient_utils.FileRead(file_item, mode)
- def CreateTemporaryFile(self, **kwargs):
- """Returns a named temporary file that must be removed with a call to
+ def CreateTemporaryFile(self, **kwargs):
+ """Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
@@ -826,158 +869,163 @@ class InputApi(object):
any temporary file; this is done transparently by the presubmit handling
code.
"""
- if 'delete' in kwargs:
- # Prevent users from passing |delete|; we take care of file deletion
- # ourselves and this prevents unintuitive error messages when we pass
- # delete=False and 'delete' is also in kwargs.
- raise TypeError('CreateTemporaryFile() does not take a "delete" '
- 'argument, file deletion is handled automatically by '
- 'the same presubmit_support code that creates InputApi '
- 'objects.')
- temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
- self._named_temporary_files.append(temp_file.name)
- return temp_file
+ if 'delete' in kwargs:
+ # Prevent users from passing |delete|; we take care of file deletion
+ # ourselves and this prevents unintuitive error messages when we
+ # pass delete=False and 'delete' is also in kwargs.
+ raise TypeError(
+ 'CreateTemporaryFile() does not take a "delete" '
+ 'argument, file deletion is handled automatically by '
+ 'the same presubmit_support code that creates InputApi '
+ 'objects.')
+ temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
+ self._named_temporary_files.append(temp_file.name)
+ return temp_file
- @property
- def tbr(self):
- """Returns if a change is TBR'ed."""
- return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
+ @property
+ def tbr(self):
+ """Returns if a change is TBR'ed."""
+ return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
- def RunTests(self, tests_mix, parallel=True):
- tests = []
- msgs = []
- for t in tests_mix:
- if isinstance(t, OutputApi.PresubmitResult) and t:
- msgs.append(t)
- else:
- assert issubclass(t.message, _PresubmitResult)
- tests.append(t)
- if self.verbose:
- t.info = _PresubmitNotifyResult
- if not t.kwargs.get('cwd'):
- t.kwargs['cwd'] = self.PresubmitLocalPath()
- self.thread_pool.AddTests(tests, parallel)
- # When self.parallel is True (i.e. --parallel is passed as an option)
- # RunTests doesn't actually run tests. It adds them to a ThreadPool that
- # will run all tests once all PRESUBMIT files are processed.
- # Otherwise, it will run them and return the results.
- if not self.parallel:
- msgs.extend(self.thread_pool.RunAsync())
- return msgs
+ def RunTests(self, tests_mix, parallel=True):
+ tests = []
+ msgs = []
+ for t in tests_mix:
+ if isinstance(t, OutputApi.PresubmitResult) and t:
+ msgs.append(t)
+ else:
+ assert issubclass(t.message, _PresubmitResult)
+ tests.append(t)
+ if self.verbose:
+ t.info = _PresubmitNotifyResult
+ if not t.kwargs.get('cwd'):
+ t.kwargs['cwd'] = self.PresubmitLocalPath()
+ self.thread_pool.AddTests(tests, parallel)
+ # When self.parallel is True (i.e. --parallel is passed as an option)
+ # RunTests doesn't actually run tests. It adds them to a ThreadPool that
+ # will run all tests once all PRESUBMIT files are processed.
+ # Otherwise, it will run them and return the results.
+ if not self.parallel:
+ msgs.extend(self.thread_pool.RunAsync())
+ return msgs
class _DiffCache(object):
- """Caches diffs retrieved from a particular SCM."""
- def __init__(self, upstream=None):
- """Stores the upstream revision against which all diffs will be computed."""
- self._upstream = upstream
+ """Caches diffs retrieved from a particular SCM."""
+ def __init__(self, upstream=None):
+ """Stores the upstream revision against which all diffs will be computed."""
+ self._upstream = upstream
- def GetDiff(self, path, local_root):
- """Get the diff for a particular path."""
- raise NotImplementedError()
+ def GetDiff(self, path, local_root):
+ """Get the diff for a particular path."""
+ raise NotImplementedError()
- def GetOldContents(self, path, local_root):
- """Get the old version for a particular path."""
- raise NotImplementedError()
+ def GetOldContents(self, path, local_root):
+ """Get the old version for a particular path."""
+ raise NotImplementedError()
class _GitDiffCache(_DiffCache):
- """DiffCache implementation for git; gets all file diffs at once."""
- def __init__(self, upstream):
- super(_GitDiffCache, self).__init__(upstream=upstream)
- self._diffs_by_file = None
+ """DiffCache implementation for git; gets all file diffs at once."""
+ def __init__(self, upstream):
+ super(_GitDiffCache, self).__init__(upstream=upstream)
+ self._diffs_by_file = None
- def GetDiff(self, path, local_root):
- # Compare against None to distinguish between None and an initialized but
- # empty dictionary.
- if self._diffs_by_file == None:
- # Compute a single diff for all files and parse the output; should
- # with git this is much faster than computing one diff for each file.
- diffs = {}
+ def GetDiff(self, path, local_root):
+ # Compare against None to distinguish between None and an initialized
+ # but empty dictionary.
+ if self._diffs_by_file == None:
+ # Compute a single diff for all files and parse the output; should
+ # with git this is much faster than computing one diff for each
+ # file.
+ diffs = {}
- # Don't specify any filenames below, because there are command line length
- # limits on some platforms and GenerateDiff would fail.
- unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
- branch=self._upstream)
+ # Don't specify any filenames below, because there are command line
+ # length limits on some platforms and GenerateDiff would fail.
+ unified_diff = scm.GIT.GenerateDiff(local_root,
+ files=[],
+ full_move=True,
+ branch=self._upstream)
- # This regex matches the path twice, separated by a space. Note that
- # filename itself may contain spaces.
- file_marker = re.compile('^diff --git (?P.*) (?P=filename)$')
- current_diff = []
- keep_line_endings = True
- for x in unified_diff.splitlines(keep_line_endings):
- match = file_marker.match(x)
- if match:
- # Marks the start of a new per-file section.
- diffs[match.group('filename')] = current_diff = [x]
- elif x.startswith('diff --git'):
- raise PresubmitFailure('Unexpected diff line: %s' % x)
- else:
- current_diff.append(x)
+ # This regex matches the path twice, separated by a space. Note that
+ # filename itself may contain spaces.
+ file_marker = re.compile(
+ '^diff --git (?P.*) (?P=filename)$')
+ current_diff = []
+ keep_line_endings = True
+ for x in unified_diff.splitlines(keep_line_endings):
+ match = file_marker.match(x)
+ if match:
+ # Marks the start of a new per-file section.
+ diffs[match.group('filename')] = current_diff = [x]
+ elif x.startswith('diff --git'):
+ raise PresubmitFailure('Unexpected diff line: %s' % x)
+ else:
+ current_diff.append(x)
- self._diffs_by_file = dict(
- (normpath(path), ''.join(diff)) for path, diff in diffs.items())
+ self._diffs_by_file = dict(
+ (normpath(path), ''.join(diff)) for path, diff in diffs.items())
- if path not in self._diffs_by_file:
- # SCM didn't have any diff on this file. It could be that the file was not
- # modified at all (e.g. user used --all flag in git cl presubmit).
- # Intead of failing, return empty string.
- # See: https://crbug.com/808346.
- return ''
+ if path not in self._diffs_by_file:
+ # SCM didn't have any diff on this file. It could be that the file
+ # was not modified at all (e.g. user used --all flag in git cl
+ # presubmit). Intead of failing, return empty string. See:
+ # https://crbug.com/808346.
+ return ''
- return self._diffs_by_file[path]
+ return self._diffs_by_file[path]
- def GetOldContents(self, path, local_root):
- return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
+ def GetOldContents(self, path, local_root):
+ return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
- """Representation of a file in a change."""
+ """Representation of a file in a change."""
- DIFF_CACHE = _DiffCache
+ DIFF_CACHE = _DiffCache
- # Method could be a function
- # pylint: disable=no-self-use
- def __init__(self, path, action, repository_root, diff_cache):
- self._path = path
- self._action = action
- self._local_root = repository_root
- self._is_directory = None
- self._cached_changed_contents = None
- self._cached_new_contents = None
- self._diff_cache = diff_cache
- logging.debug('%s(%s)', self.__class__.__name__, self._path)
+ # Method could be a function
+ # pylint: disable=no-self-use
+ def __init__(self, path, action, repository_root, diff_cache):
+ self._path = path
+ self._action = action
+ self._local_root = repository_root
+ self._is_directory = None
+ self._cached_changed_contents = None
+ self._cached_new_contents = None
+ self._diff_cache = diff_cache
+ logging.debug('%s(%s)', self.__class__.__name__, self._path)
- def LocalPath(self):
- """Returns the path of this file on the local disk relative to client root.
+ def LocalPath(self):
+ """Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
- return normpath(self._path)
+ return normpath(self._path)
- def AbsoluteLocalPath(self):
- """Returns the absolute path of this file on the local disk.
+ def AbsoluteLocalPath(self):
+ """Returns the absolute path of this file on the local disk.
"""
- return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
+ return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
- def Action(self):
- """Returns the action on this opened file, e.g. A, M, D, etc."""
- return self._action
+ def Action(self):
+ """Returns the action on this opened file, e.g. A, M, D, etc."""
+ return self._action
- def IsTestableFile(self):
- """Returns True if the file is a text file and not a binary file.
+ def IsTestableFile(self):
+ """Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
- raise NotImplementedError() # Implement when needed
+ raise NotImplementedError() # Implement when needed
- def IsTextFile(self):
- """An alias to IsTestableFile for backwards compatibility."""
- return self.IsTestableFile()
+ def IsTextFile(self):
+ """An alias to IsTestableFile for backwards compatibility."""
+ return self.IsTestableFile()
- def OldContents(self):
- """Returns an iterator over the lines in the old version of file.
+ def OldContents(self):
+ """Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
@@ -985,11 +1033,11 @@ class AffectedFile(object):
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
- return self._diff_cache.GetOldContents(self.LocalPath(),
- self._local_root).splitlines()
+ return self._diff_cache.GetOldContents(self.LocalPath(),
+ self._local_root).splitlines()
- def NewContents(self):
- """Returns an iterator over the lines in the new version of file.
+ def NewContents(self):
+ """Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
@@ -997,83 +1045,84 @@ class AffectedFile(object):
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
- if self._cached_new_contents is None:
- self._cached_new_contents = []
- try:
- self._cached_new_contents = gclient_utils.FileRead(
- self.AbsoluteLocalPath(), 'rU').splitlines()
- except IOError:
- pass # File not found? That's fine; maybe it was deleted.
- except UnicodeDecodeError as e:
- # log the filename since we're probably trying to read a binary
- # file, and shouldn't be.
- print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
- raise
+ if self._cached_new_contents is None:
+ self._cached_new_contents = []
+ try:
+ self._cached_new_contents = gclient_utils.FileRead(
+ self.AbsoluteLocalPath(), 'rU').splitlines()
+ except IOError:
+ pass # File not found? That's fine; maybe it was deleted.
+ except UnicodeDecodeError as e:
+ # log the filename since we're probably trying to read a binary
+ # file, and shouldn't be.
+ print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
+ raise
- return self._cached_new_contents[:]
+ return self._cached_new_contents[:]
- def ChangedContents(self, keeplinebreaks=False):
- """Returns a list of tuples (line number, line text) of all new lines.
+ def ChangedContents(self, keeplinebreaks=False):
+ """Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ , , @@$
"""
- # Don't return cached results when line breaks are requested.
- if not keeplinebreaks and self._cached_changed_contents is not None:
- return self._cached_changed_contents[:]
- result = []
- line_num = 0
+ # Don't return cached results when line breaks are requested.
+ if not keeplinebreaks and self._cached_changed_contents is not None:
+ return self._cached_changed_contents[:]
+ result = []
+ line_num = 0
- # The keeplinebreaks parameter to splitlines must be True or else the
- # CheckForWindowsLineEndings presubmit will be a NOP.
- for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
- m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
- if m:
- line_num = int(m.groups(1)[0])
- continue
- if line.startswith('+') and not line.startswith('++'):
- result.append((line_num, line[1:]))
- if not line.startswith('-'):
- line_num += 1
- # Don't cache results with line breaks.
- if keeplinebreaks:
- return result;
- self._cached_changed_contents = result
- return self._cached_changed_contents[:]
+ # The keeplinebreaks parameter to splitlines must be True or else the
+ # CheckForWindowsLineEndings presubmit will be a NOP.
+ for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
+ m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
+ if m:
+ line_num = int(m.groups(1)[0])
+ continue
+ if line.startswith('+') and not line.startswith('++'):
+ result.append((line_num, line[1:]))
+ if not line.startswith('-'):
+ line_num += 1
+ # Don't cache results with line breaks.
+ if keeplinebreaks:
+ return result
+ self._cached_changed_contents = result
+ return self._cached_changed_contents[:]
- def __str__(self):
- return self.LocalPath()
+ def __str__(self):
+ return self.LocalPath()
- def GenerateScmDiff(self):
- return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
+ def GenerateScmDiff(self):
+ return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
- """Representation of a file in a change out of a git checkout."""
- # Method 'NNN' is abstract in class 'NNN' but is not overridden
- # pylint: disable=abstract-method
+ """Representation of a file in a change out of a git checkout."""
+ # Method 'NNN' is abstract in class 'NNN' but is not overridden
+ # pylint: disable=abstract-method
- DIFF_CACHE = _GitDiffCache
+ DIFF_CACHE = _GitDiffCache
- def __init__(self, *args, **kwargs):
- AffectedFile.__init__(self, *args, **kwargs)
- self._server_path = None
- self._is_testable_file = None
+ def __init__(self, *args, **kwargs):
+ AffectedFile.__init__(self, *args, **kwargs)
+ self._server_path = None
+ self._is_testable_file = None
- def IsTestableFile(self):
- if self._is_testable_file is None:
- if self.Action() == 'D':
- # A deleted file is not testable.
- self._is_testable_file = False
- else:
- self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
- return self._is_testable_file
+ def IsTestableFile(self):
+ if self._is_testable_file is None:
+ if self.Action() == 'D':
+ # A deleted file is not testable.
+ self._is_testable_file = False
+ else:
+ self._is_testable_file = os.path.isfile(
+ self.AbsoluteLocalPath())
+ return self._is_testable_file
class Change(object):
- """Describe a change.
+ """Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
@@ -1083,84 +1132,90 @@ class Change(object):
self.KEY: equivalent to tags['KEY']
"""
- _AFFECTED_FILES = AffectedFile
+ _AFFECTED_FILES = AffectedFile
- # Matches key/value (or 'tag') lines in changelist descriptions.
- TAG_LINE_RE = re.compile(
- '^[ \t]*(?P[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P.*?)[ \t]*$')
- scm = ''
+ # Matches key/value (or 'tag') lines in changelist descriptions.
+ TAG_LINE_RE = re.compile(
+ '^[ \t]*(?P[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P.*?)[ \t]*$')
+ scm = ''
- def __init__(
- self, name, description, local_root, files, issue, patchset, author,
- upstream=None):
- if files is None:
- files = []
- self._name = name
- # Convert root into an absolute path.
- self._local_root = os.path.abspath(local_root)
- self._upstream = upstream
- self.issue = issue
- self.patchset = patchset
- self.author_email = author
+ def __init__(self,
+ name,
+ description,
+ local_root,
+ files,
+ issue,
+ patchset,
+ author,
+ upstream=None):
+ if files is None:
+ files = []
+ self._name = name
+ # Convert root into an absolute path.
+ self._local_root = os.path.abspath(local_root)
+ self._upstream = upstream
+ self.issue = issue
+ self.patchset = patchset
+ self.author_email = author
- self._full_description = ''
- self.tags = {}
- self._description_without_tags = ''
- self.SetDescriptionText(description)
+ self._full_description = ''
+ self.tags = {}
+ self._description_without_tags = ''
+ self.SetDescriptionText(description)
- assert all(
- (isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
+ assert all((isinstance(f, (list, tuple)) and len(f) == 2)
+ for f in files), files
- diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
- self._affected_files = [
- self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
- for action, path in files
- ]
+ diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
+ self._affected_files = [
+ self._AFFECTED_FILES(path, action.strip(), self._local_root,
+ diff_cache) for action, path in files
+ ]
- def UpstreamBranch(self):
- """Returns the upstream branch for the change."""
- return self._upstream
+ def UpstreamBranch(self):
+ """Returns the upstream branch for the change."""
+ return self._upstream
- def Name(self):
- """Returns the change name."""
- return self._name
+ def Name(self):
+ """Returns the change name."""
+ return self._name
- def DescriptionText(self):
- """Returns the user-entered changelist description, minus tags.
+ def DescriptionText(self):
+ """Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
- return self._description_without_tags
+ return self._description_without_tags
- def FullDescriptionText(self):
- """Returns the complete changelist description including tags."""
- return self._full_description
+ def FullDescriptionText(self):
+ """Returns the complete changelist description including tags."""
+ return self._full_description
- def SetDescriptionText(self, description):
- """Sets the full description text (including tags) to |description|.
+ def SetDescriptionText(self, description):
+ """Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
- self._full_description = description
+ self._full_description = description
- # From the description text, build up a dictionary of key/value pairs
- # plus the description minus all key/value or 'tag' lines.
- description_without_tags = []
- self.tags = {}
- for line in self._full_description.splitlines():
- m = self.TAG_LINE_RE.match(line)
- if m:
- self.tags[m.group('key')] = m.group('value')
- else:
- description_without_tags.append(line)
+ # From the description text, build up a dictionary of key/value pairs
+ # plus the description minus all key/value or 'tag' lines.
+ description_without_tags = []
+ self.tags = {}
+ for line in self._full_description.splitlines():
+ m = self.TAG_LINE_RE.match(line)
+ if m:
+ self.tags[m.group('key')] = m.group('value')
+ else:
+ description_without_tags.append(line)
- # Change back to text and remove whitespace at end.
- self._description_without_tags = (
- '\n'.join(description_without_tags).rstrip())
+ # Change back to text and remove whitespace at end.
+ self._description_without_tags = (
+ '\n'.join(description_without_tags).rstrip())
- def AddDescriptionFooter(self, key, value):
- """Adds the given footer to the change description.
+ def AddDescriptionFooter(self, key, value):
+ """Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
@@ -1168,79 +1223,86 @@ class Change(object):
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
- description = git_footers.add_footer(
- self.FullDescriptionText(), git_footers.normalize_name(key), value)
- self.SetDescriptionText(description)
+ description = git_footers.add_footer(self.FullDescriptionText(),
+ git_footers.normalize_name(key),
+ value)
+ self.SetDescriptionText(description)
- def RepositoryRoot(self):
- """Returns the repository (checkout) root directory for this change,
+ def RepositoryRoot(self):
+ """Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
- return self._local_root
+ return self._local_root
- def __getattr__(self, attr):
- """Return tags directly as attributes on the object."""
- if not re.match(r'^[A-Z_]*$', attr):
- raise AttributeError(self, attr)
- return self.tags.get(attr)
+ def __getattr__(self, attr):
+ """Return tags directly as attributes on the object."""
+ if not re.match(r'^[A-Z_]*$', attr):
+ raise AttributeError(self, attr)
+ return self.tags.get(attr)
- def GitFootersFromDescription(self):
- """Return the git footers present in the description.
+ def GitFootersFromDescription(self):
+ """Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
- return git_footers.parse_footers(self.FullDescriptionText())
+ return git_footers.parse_footers(self.FullDescriptionText())
- def BugsFromDescription(self):
- """Returns all bugs referenced in the commit description."""
- bug_tags = ['BUG', 'FIXED']
+ def BugsFromDescription(self):
+ """Returns all bugs referenced in the commit description."""
+ bug_tags = ['BUG', 'FIXED']
- tags = []
- for tag in bug_tags:
- values = self.tags.get(tag)
- if values:
- tags += [value.strip() for value in values.split(',')]
+ tags = []
+ for tag in bug_tags:
+ values = self.tags.get(tag)
+ if values:
+ tags += [value.strip() for value in values.split(',')]
- footers = []
- parsed = self.GitFootersFromDescription()
- unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
- for unsplit_footer in unsplit_footers:
- footers += [b.strip() for b in unsplit_footer.split(',')]
- return sorted(set(tags + footers))
+ footers = []
+ parsed = self.GitFootersFromDescription()
+ unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
+ for unsplit_footer in unsplit_footers:
+ footers += [b.strip() for b in unsplit_footer.split(',')]
+ return sorted(set(tags + footers))
- def ReviewersFromDescription(self):
- """Returns all reviewers listed in the commit description."""
- # We don't support a 'R:' git-footer for reviewers; that is in metadata.
- tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
- return sorted(set(tags))
+ def ReviewersFromDescription(self):
+ """Returns all reviewers listed in the commit description."""
+ # We don't support a 'R:' git-footer for reviewers; that is in metadata.
+ tags = [
+ r.strip() for r in self.tags.get('R', '').split(',') if r.strip()
+ ]
+ return sorted(set(tags))
- def TBRsFromDescription(self):
- """Returns all TBR reviewers listed in the commit description."""
- tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
- # TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
- # programmatically determined by self-CR+1s.
- footers = self.GitFootersFromDescription().get('Tbr', [])
- return sorted(set(tags + footers))
+ def TBRsFromDescription(self):
+ """Returns all TBR reviewers listed in the commit description."""
+ tags = [
+ r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()
+ ]
+ # TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
+ # programmatically determined by self-CR+1s.
+ footers = self.GitFootersFromDescription().get('Tbr', [])
+ return sorted(set(tags + footers))
- # TODO(crbug.com/753425): Delete these once we're sure they're unused.
- @property
- def BUG(self):
- return ','.join(self.BugsFromDescription())
- @property
- def R(self):
- return ','.join(self.ReviewersFromDescription())
- @property
- def TBR(self):
- return ','.join(self.TBRsFromDescription())
+ # TODO(crbug.com/753425): Delete these once we're sure they're unused.
+ @property
+ def BUG(self):
+ return ','.join(self.BugsFromDescription())
- def AllFiles(self, root=None):
- """List all files under source control in the repo."""
- raise NotImplementedError()
+ @property
+ def R(self):
+ return ','.join(self.ReviewersFromDescription())
- def AffectedFiles(self, include_deletes=True, file_filter=None):
- """Returns a list of AffectedFile instances for all files in the change.
+ @property
+ def TBR(self):
+ return ','.join(self.TBRsFromDescription())
+
+ def AllFiles(self, root=None):
+ """List all files under source control in the repo."""
+ raise NotImplementedError()
+
+ def AffectedFiles(self, include_deletes=True, file_filter=None):
+ """Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
@@ -1249,37 +1311,37 @@ class Change(object):
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
- affected = list(filter(file_filter, self._affected_files))
+ affected = list(filter(file_filter, self._affected_files))
- if include_deletes:
- return affected
- return list(filter(lambda x: x.Action() != 'D', affected))
+ if include_deletes:
+ return affected
+ return list(filter(lambda x: x.Action() != 'D', affected))
- def AffectedTestableFiles(self, include_deletes=None, **kwargs):
- """Return a list of the existing text files in a change."""
- if include_deletes is not None:
- warn('AffectedTeestableFiles(include_deletes=%s)'
- ' is deprecated and ignored' % str(include_deletes),
- category=DeprecationWarning,
- stacklevel=2)
- return list(filter(
- lambda x: x.IsTestableFile(),
- self.AffectedFiles(include_deletes=False, **kwargs)))
+ def AffectedTestableFiles(self, include_deletes=None, **kwargs):
+ """Return a list of the existing text files in a change."""
+ if include_deletes is not None:
+ warn('AffectedTeestableFiles(include_deletes=%s)'
+ ' is deprecated and ignored' % str(include_deletes),
+ category=DeprecationWarning,
+ stacklevel=2)
+ return list(
+ filter(lambda x: x.IsTestableFile(),
+ self.AffectedFiles(include_deletes=False, **kwargs)))
- def AffectedTextFiles(self, include_deletes=None):
- """An alias to AffectedTestableFiles for backwards compatibility."""
- return self.AffectedTestableFiles(include_deletes=include_deletes)
+ def AffectedTextFiles(self, include_deletes=None):
+ """An alias to AffectedTestableFiles for backwards compatibility."""
+ return self.AffectedTestableFiles(include_deletes=include_deletes)
- def LocalPaths(self):
- """Convenience function."""
- return [af.LocalPath() for af in self.AffectedFiles()]
+ def LocalPaths(self):
+ """Convenience function."""
+ return [af.LocalPath() for af in self.AffectedFiles()]
- def AbsoluteLocalPaths(self):
- """Convenience function."""
- return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
+ def AbsoluteLocalPaths(self):
+ """Convenience function."""
+ return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
- def RightHandSideLines(self):
- """An iterator over all text lines in 'new' version of changed files.
+ def RightHandSideLines(self):
+ """An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
@@ -1292,32 +1354,33 @@ class Change(object):
integer line number (1-based); and
the contents of the line as a string.
"""
- return _RightHandSideLinesImpl(
- x for x in self.AffectedFiles(include_deletes=False)
- if x.IsTestableFile())
+ return _RightHandSideLinesImpl(
+ x for x in self.AffectedFiles(include_deletes=False)
+ if x.IsTestableFile())
- def OriginalOwnersFiles(self):
- """A map from path names of affected OWNERS files to their old content."""
- def owners_file_filter(f):
- return 'OWNERS' in os.path.split(f.LocalPath())[1]
- files = self.AffectedFiles(file_filter=owners_file_filter)
- return {f.LocalPath(): f.OldContents() for f in files}
+ def OriginalOwnersFiles(self):
+ """A map from path names of affected OWNERS files to their old content."""
+ def owners_file_filter(f):
+ return 'OWNERS' in os.path.split(f.LocalPath())[1]
+
+ files = self.AffectedFiles(file_filter=owners_file_filter)
+ return {f.LocalPath(): f.OldContents() for f in files}
class GitChange(Change):
- _AFFECTED_FILES = GitAffectedFile
- scm = 'git'
+ _AFFECTED_FILES = GitAffectedFile
+ scm = 'git'
- def AllFiles(self, root=None):
- """List all files under source control in the repo."""
- root = root or self.RepositoryRoot()
- return subprocess.check_output(
- ['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
- cwd=root).decode('utf-8', 'ignore').splitlines()
+ def AllFiles(self, root=None):
+ """List all files under source control in the repo."""
+ root = root or self.RepositoryRoot()
+ return subprocess.check_output(
+ ['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
+ cwd=root).decode('utf-8', 'ignore').splitlines()
def ListRelevantPresubmitFiles(files, root):
- """Finds all presubmit files that apply to a given set of source files.
+ """Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
@@ -1329,58 +1392,59 @@ def ListRelevantPresubmitFiles(files, root):
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
- files = [normpath(os.path.join(root, f)) for f in files]
+ files = [normpath(os.path.join(root, f)) for f in files]
- # List all the individual directories containing files.
- directories = {os.path.dirname(f) for f in files}
+ # List all the individual directories containing files.
+ directories = {os.path.dirname(f) for f in files}
- # Ignore root if inherit-review-settings-ok is present.
- if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
- root = None
+ # Ignore root if inherit-review-settings-ok is present.
+ if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
+ root = None
- # Collect all unique directories that may contain PRESUBMIT.py.
- candidates = set()
- for directory in directories:
- while True:
- if directory in candidates:
- break
- candidates.add(directory)
- if directory == root:
- break
- parent_dir = os.path.dirname(directory)
- if parent_dir == directory:
- # We hit the system root directory.
- break
- directory = parent_dir
+ # Collect all unique directories that may contain PRESUBMIT.py.
+ candidates = set()
+ for directory in directories:
+ while True:
+ if directory in candidates:
+ break
+ candidates.add(directory)
+ if directory == root:
+ break
+ parent_dir = os.path.dirname(directory)
+ if parent_dir == directory:
+ # We hit the system root directory.
+ break
+ directory = parent_dir
- # Look for PRESUBMIT.py in all candidate directories.
- results = []
- for directory in sorted(list(candidates)):
- try:
- for f in os.listdir(directory):
- p = os.path.join(directory, f)
- if os.path.isfile(p) and re.match(
- r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
- results.append(p)
- except OSError:
- pass
+ # Look for PRESUBMIT.py in all candidate directories.
+ results = []
+ for directory in sorted(list(candidates)):
+ try:
+ for f in os.listdir(directory):
+ p = os.path.join(directory, f)
+ if os.path.isfile(p) and re.match(
+ r'PRESUBMIT.*\.py$',
+ f) and not f.startswith('PRESUBMIT_test'):
+ results.append(p)
+ except OSError:
+ pass
- logging.debug('Presubmit files: %s', ','.join(results))
- return results
+ logging.debug('Presubmit files: %s', ','.join(results))
+ return results
class GetPostUploadExecuter(object):
- def __init__(self, change, gerrit_obj):
- """
+ def __init__(self, change, gerrit_obj):
+ """
Args:
change: The Change object.
gerrit_obj: provides basic Gerrit codereview functionality.
"""
- self.change = change
- self.gerrit = gerrit_obj
+ self.change = change
+ self.gerrit = gerrit_obj
- def ExecPresubmitScript(self, script_text, presubmit_path):
- """Executes PostUploadHook() from a single presubmit script.
+ def ExecPresubmitScript(self, script_text, presubmit_path):
+ """Executes PostUploadHook() from a single presubmit script.
Caller is responsible for validating whether the hook should be executed
and should only call this function if it should be.
@@ -1391,97 +1455,107 @@ class GetPostUploadExecuter(object):
Return:
A list of results objects.
"""
- # Change to the presubmit file's directory to support local imports.
- presubmit_dir = os.path.dirname(presubmit_path)
- main_path = os.getcwd()
- try:
- os.chdir(presubmit_dir)
- return self._execute_with_local_working_directory(script_text,
- presubmit_dir,
- presubmit_path)
- finally:
- # Return the process to the original working directory.
- os.chdir(main_path)
+ # Change to the presubmit file's directory to support local imports.
+ presubmit_dir = os.path.dirname(presubmit_path)
+ main_path = os.getcwd()
+ try:
+ os.chdir(presubmit_dir)
+ return self._execute_with_local_working_directory(
+ script_text, presubmit_dir, presubmit_path)
+ finally:
+ # Return the process to the original working directory.
+ os.chdir(main_path)
- def _execute_with_local_working_directory(self, script_text, presubmit_dir,
- presubmit_path):
- context = {}
- try:
- exec(compile(script_text, presubmit_path, 'exec', dont_inherit=True),
- context)
- except Exception as e:
- raise PresubmitFailure('"%s" had an exception.\n%s'
- % (presubmit_path, e))
+ def _execute_with_local_working_directory(self, script_text, presubmit_dir,
+ presubmit_path):
+ context = {}
+ try:
+ exec(
+ compile(script_text, presubmit_path, 'exec', dont_inherit=True),
+ context)
+ except Exception as e:
+ raise PresubmitFailure('"%s" had an exception.\n%s' %
+ (presubmit_path, e))
- function_name = 'PostUploadHook'
- if function_name not in context:
- return {}
- post_upload_hook = context[function_name]
- if not len(inspect.getfullargspec(post_upload_hook)[0]) == 3:
- raise PresubmitFailure(
- 'Expected function "PostUploadHook" to take three arguments.')
- return post_upload_hook(self.gerrit, self.change, OutputApi(False))
+ function_name = 'PostUploadHook'
+ if function_name not in context:
+ return {}
+ post_upload_hook = context[function_name]
+ if not len(inspect.getfullargspec(post_upload_hook)[0]) == 3:
+ raise PresubmitFailure(
+ 'Expected function "PostUploadHook" to take three arguments.')
+ return post_upload_hook(self.gerrit, self.change, OutputApi(False))
def _MergeMasters(masters1, masters2):
- """Merges two master maps. Merges also the tests of each builder."""
- result = {}
- for (master, builders) in itertools.chain(masters1.items(),
- masters2.items()):
- new_builders = result.setdefault(master, {})
- for (builder, tests) in builders.items():
- new_builders.setdefault(builder, set([])).update(tests)
- return result
+ """Merges two master maps. Merges also the tests of each builder."""
+ result = {}
+ for (master, builders) in itertools.chain(masters1.items(),
+ masters2.items()):
+ new_builders = result.setdefault(master, {})
+ for (builder, tests) in builders.items():
+ new_builders.setdefault(builder, set([])).update(tests)
+ return result
def DoPostUploadExecuter(change, gerrit_obj, verbose):
- """Execute the post upload hook.
+ """Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
- python_version = 'Python %s' % sys.version_info.major
- sys.stdout.write('Running %s post upload checks ...\n' % python_version)
- presubmit_files = ListRelevantPresubmitFiles(
- change.LocalPaths(), change.RepositoryRoot())
- if not presubmit_files and verbose:
- sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
- results = []
- executer = GetPostUploadExecuter(change, gerrit_obj)
- # The root presubmit file should be executed after the ones in subdirectories.
- # i.e. the specific post upload hooks should run before the general ones.
- # Thus, reverse the order provided by ListRelevantPresubmitFiles.
- presubmit_files.reverse()
+ python_version = 'Python %s' % sys.version_info.major
+ sys.stdout.write('Running %s post upload checks ...\n' % python_version)
+ presubmit_files = ListRelevantPresubmitFiles(change.LocalPaths(),
+ change.RepositoryRoot())
+ if not presubmit_files and verbose:
+ sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
+ results = []
+ executer = GetPostUploadExecuter(change, gerrit_obj)
+ # The root presubmit file should be executed after the ones in
+ # subdirectories. i.e. the specific post upload hooks should run before the
+ # general ones. Thus, reverse the order provided by
+ # ListRelevantPresubmitFiles.
+ presubmit_files.reverse()
- for filename in presubmit_files:
- filename = os.path.abspath(filename)
- # Accept CRLF presubmit script.
- presubmit_script = gclient_utils.FileRead(filename).replace('\r\n', '\n')
- if verbose:
- sys.stdout.write('Running %s\n' % filename)
- results.extend(executer.ExecPresubmitScript(presubmit_script, filename))
+ for filename in presubmit_files:
+ filename = os.path.abspath(filename)
+ # Accept CRLF presubmit script.
+ presubmit_script = gclient_utils.FileRead(filename).replace(
+ '\r\n', '\n')
+ if verbose:
+ sys.stdout.write('Running %s\n' % filename)
+ results.extend(executer.ExecPresubmitScript(presubmit_script, filename))
- if not results:
- return 0
+ if not results:
+ return 0
- sys.stdout.write('\n')
- sys.stdout.write('** Post Upload Hook Messages **\n')
-
- exit_code = 0
- for result in results:
- if result.fatal:
- exit_code = 1
- result.handle()
sys.stdout.write('\n')
+ sys.stdout.write('** Post Upload Hook Messages **\n')
+
+ exit_code = 0
+ for result in results:
+ if result.fatal:
+ exit_code = 1
+ result.handle()
+ sys.stdout.write('\n')
+
+ return exit_code
- return exit_code
class PresubmitExecuter(object):
- def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
- thread_pool=None, parallel=False, no_diffs=False):
- """
+ def __init__(self,
+ change,
+ committing,
+ verbose,
+ gerrit_obj,
+ dry_run=None,
+ thread_pool=None,
+ parallel=False,
+ no_diffs=False):
+ """
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
@@ -1492,18 +1566,18 @@ class PresubmitExecuter(object):
no_diffs: if true, implies that --files or --all was specified so some
checks can be skipped, and some errors will be messages.
"""
- self.change = change
- self.committing = committing
- self.gerrit = gerrit_obj
- self.verbose = verbose
- self.dry_run = dry_run
- self.more_cc = []
- self.thread_pool = thread_pool
- self.parallel = parallel
- self.no_diffs = no_diffs
+ self.change = change
+ self.committing = committing
+ self.gerrit = gerrit_obj
+ self.verbose = verbose
+ self.dry_run = dry_run
+ self.more_cc = []
+ self.thread_pool = thread_pool
+ self.parallel = parallel
+ self.no_diffs = no_diffs
- def ExecPresubmitScript(self, script_text, presubmit_path):
- """Executes a single presubmit script.
+ def ExecPresubmitScript(self, script_text, presubmit_path):
+ """Executes a single presubmit script.
Caller is responsible for validating whether the hook should be executed
and should only call this function if it should be.
@@ -1515,107 +1589,119 @@ class PresubmitExecuter(object):
Return:
A list of result objects, empty if no problems.
"""
- # Change to the presubmit file's directory to support local imports.
- presubmit_dir = os.path.dirname(presubmit_path)
- main_path = os.getcwd()
- try:
- os.chdir(presubmit_dir)
- return self._execute_with_local_working_directory(script_text,
- presubmit_dir,
- presubmit_path)
- finally:
- # Return the process to the original working directory.
- os.chdir(main_path)
+ # Change to the presubmit file's directory to support local imports.
+ presubmit_dir = os.path.dirname(presubmit_path)
+ main_path = os.getcwd()
+ try:
+ os.chdir(presubmit_dir)
+ return self._execute_with_local_working_directory(
+ script_text, presubmit_dir, presubmit_path)
+ finally:
+ # Return the process to the original working directory.
+ os.chdir(main_path)
- def _execute_with_local_working_directory(self, script_text, presubmit_dir,
- presubmit_path):
- # Load the presubmit script into context.
- input_api = InputApi(self.change, presubmit_path, self.committing,
- self.verbose, gerrit_obj=self.gerrit,
- dry_run=self.dry_run, thread_pool=self.thread_pool,
- parallel=self.parallel, no_diffs=self.no_diffs)
- output_api = OutputApi(self.committing)
- context = {}
+ def _execute_with_local_working_directory(self, script_text, presubmit_dir,
+ presubmit_path):
+ # Load the presubmit script into context.
+ input_api = InputApi(self.change,
+ presubmit_path,
+ self.committing,
+ self.verbose,
+ gerrit_obj=self.gerrit,
+ dry_run=self.dry_run,
+ thread_pool=self.thread_pool,
+ parallel=self.parallel,
+ no_diffs=self.no_diffs)
+ output_api = OutputApi(self.committing)
+ context = {}
- try:
- exec(compile(script_text, presubmit_path, 'exec', dont_inherit=True),
- context)
- except Exception as e:
- raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
+ try:
+ exec(
+ compile(script_text, presubmit_path, 'exec', dont_inherit=True),
+ context)
+ except Exception as e:
+ raise PresubmitFailure('"%s" had an exception.\n%s' %
+ (presubmit_path, e))
- context['__args'] = (input_api, output_api)
+ context['__args'] = (input_api, output_api)
- # Get path of presubmit directory relative to repository root.
- # Always use forward slashes, so that path is same in *nix and Windows
- root = input_api.change.RepositoryRoot()
- rel_path = os.path.relpath(presubmit_dir, root)
- rel_path = rel_path.replace(os.path.sep, '/')
+ # Get path of presubmit directory relative to repository root.
+ # Always use forward slashes, so that path is same in *nix and Windows
+ root = input_api.change.RepositoryRoot()
+ rel_path = os.path.relpath(presubmit_dir, root)
+ rel_path = rel_path.replace(os.path.sep, '/')
- # Get the URL of git remote origin and use it to identify host and project
- host = project = ''
- if self.gerrit:
- host = self.gerrit.host or ''
- project = self.gerrit.project or ''
+ # Get the URL of git remote origin and use it to identify host and
+ # project
+ host = project = ''
+ if self.gerrit:
+ host = self.gerrit.host or ''
+ project = self.gerrit.project or ''
- # Prefix for test names
- prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
+ # Prefix for test names
+ prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
- # Perform all the desired presubmit checks.
- results = []
+ # Perform all the desired presubmit checks.
+ results = []
- try:
- version = [
- int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
- ]
+ try:
+ version = [
+ int(x)
+ for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
+ ]
- with rdb_wrapper.client(prefix) as sink:
- if version >= [2, 0, 0]:
- # Copy the keys to prevent "dictionary changed size during iteration"
- # exception if checks add globals to context. E.g. sometimes the
- # Python runtime will add __warningregistry__.
- for function_name in list(context.keys()):
- if not function_name.startswith('Check'):
- continue
- if function_name.endswith('Commit') and not self.committing:
- continue
- if function_name.endswith('Upload') and self.committing:
- continue
- logging.debug('Running %s in %s', function_name, presubmit_path)
- results.extend(
- self._run_check_function(function_name, context, sink,
- presubmit_path))
- logging.debug('Running %s done.', function_name)
- self.more_cc.extend(output_api.more_cc)
- # Clear the CC list between running each presubmit check to prevent
- # CCs from being repeatedly appended.
- output_api.more_cc = []
+ with rdb_wrapper.client(prefix) as sink:
+ if version >= [2, 0, 0]:
+ # Copy the keys to prevent "dictionary changed size during
+ # iteration" exception if checks add globals to context.
+ # E.g. sometimes the Python runtime will add
+ # __warningregistry__.
+ for function_name in list(context.keys()):
+ if not function_name.startswith('Check'):
+ continue
+ if function_name.endswith(
+ 'Commit') and not self.committing:
+ continue
+ if function_name.endswith('Upload') and self.committing:
+ continue
+ logging.debug('Running %s in %s', function_name,
+ presubmit_path)
+ results.extend(
+ self._run_check_function(function_name, context,
+ sink, presubmit_path))
+ logging.debug('Running %s done.', function_name)
+ self.more_cc.extend(output_api.more_cc)
+ # Clear the CC list between running each presubmit check
+ # to prevent CCs from being repeatedly appended.
+ output_api.more_cc = []
- else: # Old format
- if self.committing:
- function_name = 'CheckChangeOnCommit'
- else:
- function_name = 'CheckChangeOnUpload'
- if function_name in list(context.keys()):
- logging.debug('Running %s in %s', function_name, presubmit_path)
- results.extend(
- self._run_check_function(function_name, context, sink,
- presubmit_path))
- logging.debug('Running %s done.', function_name)
- self.more_cc.extend(output_api.more_cc)
- # Clear the CC list between running each presubmit check to prevent
- # CCs from being repeatedly appended.
- output_api.more_cc = []
+ else: # Old format
+ if self.committing:
+ function_name = 'CheckChangeOnCommit'
+ else:
+ function_name = 'CheckChangeOnUpload'
+ if function_name in list(context.keys()):
+ logging.debug('Running %s in %s', function_name,
+ presubmit_path)
+ results.extend(
+ self._run_check_function(function_name, context,
+ sink, presubmit_path))
+ logging.debug('Running %s done.', function_name)
+ self.more_cc.extend(output_api.more_cc)
+ # Clear the CC list between running each presubmit check
+ # to prevent CCs from being repeatedly appended.
+ output_api.more_cc = []
- finally:
- for f in input_api._named_temporary_files:
- os.remove(f)
+ finally:
+ for f in input_api._named_temporary_files:
+ os.remove(f)
- self.more_cc = sorted(set(self.more_cc))
+ self.more_cc = sorted(set(self.more_cc))
- return results
+ return results
- def _run_check_function(self, function_name, context, sink, presubmit_path):
- """Evaluates and returns the result of a given presubmit function.
+ def _run_check_function(self, function_name, context, sink, presubmit_path):
+ """Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
@@ -1627,48 +1713,50 @@ class PresubmitExecuter(object):
Returns:
the result of the presubmit function call.
"""
- start_time = time_time()
- try:
- result = eval(function_name + '(*__args)', context)
- self._check_result_type(result)
- except Exception:
- _, e_value, _ = sys.exc_info()
- result = [
- OutputApi.PresubmitError(
- 'Evaluation of %s failed: %s, %s' %
- (function_name, e_value, traceback.format_exc()))
- ]
+ start_time = time_time()
+ try:
+ result = eval(function_name + '(*__args)', context)
+ self._check_result_type(result)
+ except Exception:
+ _, e_value, _ = sys.exc_info()
+ result = [
+ OutputApi.PresubmitError(
+ 'Evaluation of %s failed: %s, %s' %
+ (function_name, e_value, traceback.format_exc()))
+ ]
- elapsed_time = time_time() - start_time
- if elapsed_time > 10.0:
- sys.stdout.write('%6.1fs to run %s from %s.\n' %
- (elapsed_time, function_name, presubmit_path))
- if sink:
- failure_reason = None
- status = rdb_wrapper.STATUS_PASS
- if any(r.fatal for r in result):
- status = rdb_wrapper.STATUS_FAIL
- failure_reasons = []
- for r in result:
- fields = r.json_format()
- message = fields['message']
- items = '\n'.join(' %s' % item for item in fields['items'])
- failure_reasons.append('\n'.join([message, items]))
- if failure_reasons:
- failure_reason = '\n'.join(failure_reasons)
- sink.report(function_name, status, elapsed_time, failure_reason)
+ elapsed_time = time_time() - start_time
+ if elapsed_time > 10.0:
+ sys.stdout.write('%6.1fs to run %s from %s.\n' %
+ (elapsed_time, function_name, presubmit_path))
+ if sink:
+ failure_reason = None
+ status = rdb_wrapper.STATUS_PASS
+ if any(r.fatal for r in result):
+ status = rdb_wrapper.STATUS_FAIL
+ failure_reasons = []
+ for r in result:
+ fields = r.json_format()
+ message = fields['message']
+ items = '\n'.join(' %s' % item for item in fields['items'])
+ failure_reasons.append('\n'.join([message, items]))
+ if failure_reasons:
+ failure_reason = '\n'.join(failure_reasons)
+ sink.report(function_name, status, elapsed_time, failure_reason)
- return result
+ return result
- def _check_result_type(self, result):
- """Helper function which ensures result is a list, and all elements are
+ def _check_result_type(self, result):
+ """Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
- if not isinstance(result, (tuple, list)):
- raise PresubmitFailure('Presubmit functions must return a tuple or list')
- if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
- raise PresubmitFailure(
- 'All presubmit results must be of types derived from '
- 'output_api.PresubmitResult')
+ if not isinstance(result, (tuple, list)):
+ raise PresubmitFailure(
+ 'Presubmit functions must return a tuple or list')
+ if not all(
+ isinstance(res, OutputApi.PresubmitResult) for res in result):
+ raise PresubmitFailure(
+ 'All presubmit results must be of types derived from '
+ 'output_api.PresubmitResult')
def DoPresubmitChecks(change,
@@ -1681,7 +1769,7 @@ def DoPresubmitChecks(change,
parallel=False,
json_output=None,
no_diffs=False):
- """Runs all presubmit checks that apply to the files in the change.
+ """Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
@@ -1706,144 +1794,148 @@ def DoPresubmitChecks(change,
Return:
1 if presubmit checks failed or 0 otherwise.
"""
- old_environ = os.environ
- try:
- # Make sure python subprocesses won't generate .pyc files.
- os.environ = os.environ.copy()
- os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
+ old_environ = os.environ
+ try:
+ # Make sure python subprocesses won't generate .pyc files.
+ os.environ = os.environ.copy()
+ os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
- python_version = 'Python %s' % sys.version_info.major
- if committing:
- sys.stdout.write('Running %s presubmit commit checks ...\n' %
- python_version)
- else:
- sys.stdout.write('Running %s presubmit upload checks ...\n' %
- python_version)
- start_time = time_time()
- presubmit_files = ListRelevantPresubmitFiles(
- change.AbsoluteLocalPaths(), change.RepositoryRoot())
- if not presubmit_files and verbose:
- sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
- results = []
- thread_pool = ThreadPool()
- executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
- dry_run, thread_pool, parallel, no_diffs)
- if default_presubmit:
- if verbose:
- sys.stdout.write('Running default presubmit script.\n')
- fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
- results += executer.ExecPresubmitScript(default_presubmit, fake_path)
- for filename in presubmit_files:
- filename = os.path.abspath(filename)
- # Accept CRLF presubmit script.
- presubmit_script = gclient_utils.FileRead(filename).replace('\r\n', '\n')
- if verbose:
- sys.stdout.write('Running %s\n' % filename)
- results += executer.ExecPresubmitScript(presubmit_script, filename)
+ python_version = 'Python %s' % sys.version_info.major
+ if committing:
+ sys.stdout.write('Running %s presubmit commit checks ...\n' %
+ python_version)
+ else:
+ sys.stdout.write('Running %s presubmit upload checks ...\n' %
+ python_version)
+ start_time = time_time()
+ presubmit_files = ListRelevantPresubmitFiles(
+ change.AbsoluteLocalPaths(), change.RepositoryRoot())
+ if not presubmit_files and verbose:
+ sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
+ results = []
+ thread_pool = ThreadPool()
+ executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
+ dry_run, thread_pool, parallel, no_diffs)
+ if default_presubmit:
+ if verbose:
+ sys.stdout.write('Running default presubmit script.\n')
+ fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
+ results += executer.ExecPresubmitScript(default_presubmit,
+ fake_path)
+ for filename in presubmit_files:
+ filename = os.path.abspath(filename)
+ # Accept CRLF presubmit script.
+ presubmit_script = gclient_utils.FileRead(filename).replace(
+ '\r\n', '\n')
+ if verbose:
+ sys.stdout.write('Running %s\n' % filename)
+ results += executer.ExecPresubmitScript(presubmit_script, filename)
- results += thread_pool.RunAsync()
+ results += thread_pool.RunAsync()
- messages = {}
- should_prompt = False
- presubmits_failed = False
- for result in results:
- if result.fatal:
- presubmits_failed = True
- messages.setdefault('ERRORS', []).append(result)
- elif result.should_prompt:
- should_prompt = True
- messages.setdefault('Warnings', []).append(result)
- else:
- messages.setdefault('Messages', []).append(result)
+ messages = {}
+ should_prompt = False
+ presubmits_failed = False
+ for result in results:
+ if result.fatal:
+ presubmits_failed = True
+ messages.setdefault('ERRORS', []).append(result)
+ elif result.should_prompt:
+ should_prompt = True
+ messages.setdefault('Warnings', []).append(result)
+ else:
+ messages.setdefault('Messages', []).append(result)
- # Print the different message types in a consistent order. ERRORS go last
- # so that they will be most visible in the local-presubmit output.
- for name in ['Messages', 'Warnings', 'ERRORS']:
- if name in messages:
- items = messages[name]
- sys.stdout.write('** Presubmit %s: %d **\n' % (name, len(items)))
- for item in items:
- item.handle()
- sys.stdout.write('\n')
+ # Print the different message types in a consistent order. ERRORS go
+ # last so that they will be most visible in the local-presubmit output.
+ for name in ['Messages', 'Warnings', 'ERRORS']:
+ if name in messages:
+ items = messages[name]
+ sys.stdout.write('** Presubmit %s: %d **\n' %
+ (name, len(items)))
+ for item in items:
+ item.handle()
+ sys.stdout.write('\n')
- total_time = time_time() - start_time
- if total_time > 1.0:
- sys.stdout.write(
- 'Presubmit checks took %.1fs to calculate.\n' % total_time)
+ total_time = time_time() - start_time
+ if total_time > 1.0:
+ sys.stdout.write('Presubmit checks took %.1fs to calculate.\n' %
+ total_time)
- if not should_prompt and not presubmits_failed:
- sys.stdout.write('%s presubmit checks passed.\n\n' % python_version)
- elif should_prompt and not presubmits_failed:
- sys.stdout.write('There were %s presubmit warnings. ' % python_version)
- if may_prompt:
- presubmits_failed = not prompt_should_continue(
- 'Are you sure you wish to continue? (y/N): ')
- else:
- sys.stdout.write('\n')
- else:
- sys.stdout.write('There were %s presubmit errors.\n' % python_version)
+ if not should_prompt and not presubmits_failed:
+ sys.stdout.write('%s presubmit checks passed.\n\n' % python_version)
+ elif should_prompt and not presubmits_failed:
+ sys.stdout.write('There were %s presubmit warnings. ' %
+ python_version)
+ if may_prompt:
+ presubmits_failed = not prompt_should_continue(
+ 'Are you sure you wish to continue? (y/N): ')
+ else:
+ sys.stdout.write('\n')
+ else:
+ sys.stdout.write('There were %s presubmit errors.\n' %
+ python_version)
- if json_output:
- # Write the presubmit results to json output
- presubmit_results = {
- 'errors': [
- error.json_format()
- for error in messages.get('ERRORS', [])
- ],
- 'notifications': [
- notification.json_format()
- for notification in messages.get('Messages', [])
- ],
- 'warnings': [
- warning.json_format()
- for warning in messages.get('Warnings', [])
- ],
- 'more_cc': executer.more_cc,
- }
+ if json_output:
+ # Write the presubmit results to json output
+ presubmit_results = {
+ 'errors':
+ [error.json_format() for error in messages.get('ERRORS', [])],
+ 'notifications': [
+ notification.json_format()
+ for notification in messages.get('Messages', [])
+ ],
+ 'warnings': [
+ warning.json_format()
+ for warning in messages.get('Warnings', [])
+ ],
+ 'more_cc':
+ executer.more_cc,
+ }
- gclient_utils.FileWrite(
- json_output, json.dumps(presubmit_results, sort_keys=True))
+ gclient_utils.FileWrite(
+ json_output, json.dumps(presubmit_results, sort_keys=True))
- global _ASKED_FOR_FEEDBACK
- # Ask for feedback one time out of 5.
- if (results and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
- sys.stdout.write(
- 'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
- 'to figure out which PRESUBMIT.py was run, then run git blame\n'
- 'on the file to figure out who to ask for help.\n')
- _ASKED_FOR_FEEDBACK = True
+ global _ASKED_FOR_FEEDBACK
+ # Ask for feedback one time out of 5.
+ if (results and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
+ sys.stdout.write(
+ 'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
+ 'to figure out which PRESUBMIT.py was run, then run git blame\n'
+ 'on the file to figure out who to ask for help.\n')
+ _ASKED_FOR_FEEDBACK = True
- return 1 if presubmits_failed else 0
- finally:
- os.environ = old_environ
+ return 1 if presubmits_failed else 0
+ finally:
+ os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
- if not recursive:
- return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
+ if not recursive:
+ return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
- results = []
- for root, dirs, files in os.walk('.'):
- if '.svn' in dirs:
- dirs.remove('.svn')
- if '.git' in dirs:
- dirs.remove('.git')
- for name in files:
- if fnmatch.fnmatch(name, mask):
- results.append(os.path.join(root, name))
- return results
+ results = []
+ for root, dirs, files in os.walk('.'):
+ if '.svn' in dirs:
+ dirs.remove('.svn')
+ if '.git' in dirs:
+ dirs.remove('.git')
+ for name in files:
+ if fnmatch.fnmatch(name, mask):
+ results.append(os.path.join(root, name))
+ return results
def _parse_files(args, recursive):
- logging.debug('Searching for %s', args)
- files = []
- for arg in args:
- files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
- return files
+ logging.debug('Searching for %s', args)
+ files = []
+ for arg in args:
+ files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
+ return files
def _parse_change(parser, options):
- """Process change options.
+ """Process change options.
Args:
parser: The parser used to parse the arguments from command line.
@@ -1851,47 +1943,46 @@ def _parse_change(parser, options):
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
- if options.files and options.all_files:
- parser.error(' cannot be specified when --all-files is set.')
+ if options.files and options.all_files:
+ parser.error(' cannot be specified when --all-files is set.')
- change_scm = scm.determine_scm(options.root)
- if change_scm != 'git' and not options.files:
- parser.error(' is not optional for unversioned directories.')
+ change_scm = scm.determine_scm(options.root)
+ if change_scm != 'git' and not options.files:
+ parser.error(' is not optional for unversioned directories.')
- if options.files:
- if options.source_controlled_only:
- # Get the filtered set of files from SCM.
- change_files = []
- for name in scm.GIT.GetAllFiles(options.root):
- for mask in options.files:
- if fnmatch.fnmatch(name, mask):
- change_files.append(('M', name))
- break
+ if options.files:
+ if options.source_controlled_only:
+ # Get the filtered set of files from SCM.
+ change_files = []
+ for name in scm.GIT.GetAllFiles(options.root):
+ for mask in options.files:
+ if fnmatch.fnmatch(name, mask):
+ change_files.append(('M', name))
+ break
+ else:
+ # Get the filtered set of files from a directory scan.
+ change_files = _parse_files(options.files, options.recursive)
+ elif options.all_files:
+ change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
- # Get the filtered set of files from a directory scan.
- change_files = _parse_files(options.files, options.recursive)
- elif options.all_files:
- change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
- else:
- change_files = scm.GIT.CaptureStatus(
- options.root, options.upstream or None)
+ change_files = scm.GIT.CaptureStatus(options.root, options.upstream
+ or None)
- logging.info('Found %d file(s).', len(change_files))
+ logging.info('Found %d file(s).', len(change_files))
- change_class = GitChange if change_scm == 'git' else Change
- return change_class(
- options.name,
- options.description,
- options.root,
- change_files,
- options.issue,
- options.patchset,
- options.author,
- upstream=options.upstream)
+ change_class = GitChange if change_scm == 'git' else Change
+ return change_class(options.name,
+ options.description,
+ options.root,
+ change_files,
+ options.issue,
+ options.patchset,
+ options.author,
+ upstream=options.upstream)
def _parse_gerrit_options(parser, options):
- """Process gerrit options.
+ """Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
@@ -1902,153 +1993,174 @@ def _parse_gerrit_options(parser, options):
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
- gerrit_obj = None
- if options.gerrit_url:
- gerrit_obj = GerritAccessor(
- url=options.gerrit_url,
- project=options.gerrit_project,
- branch=options.gerrit_branch)
+ gerrit_obj = None
+ if options.gerrit_url:
+ gerrit_obj = GerritAccessor(url=options.gerrit_url,
+ project=options.gerrit_project,
+ branch=options.gerrit_branch)
+
+ if not options.gerrit_fetch:
+ return gerrit_obj
+
+ if not options.gerrit_url or not options.issue or not options.patchset:
+ parser.error(
+ '--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
+
+ options.author = gerrit_obj.GetChangeOwner(options.issue)
+ options.description = gerrit_obj.GetChangeDescription(
+ options.issue, options.patchset)
+
+ logging.info('Got author: "%s"', options.author)
+ logging.info('Got description: """\n%s\n"""', options.description)
- if not options.gerrit_fetch:
return gerrit_obj
- if not options.gerrit_url or not options.issue or not options.patchset:
- parser.error(
- '--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
-
- options.author = gerrit_obj.GetChangeOwner(options.issue)
- options.description = gerrit_obj.GetChangeDescription(
- options.issue, options.patchset)
-
- logging.info('Got author: "%s"', options.author)
- logging.info('Got description: """\n%s\n"""', options.description)
-
- return gerrit_obj
-
@contextlib.contextmanager
def canned_check_filter(method_names):
- filtered = {}
- try:
- for method_name in method_names:
- if not hasattr(presubmit_canned_checks, method_name):
- logging.warning('Skipping unknown "canned" check %s' % method_name)
- continue
- filtered[method_name] = getattr(presubmit_canned_checks, method_name)
- setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
- yield
- finally:
- for name, method in filtered.items():
- setattr(presubmit_canned_checks, name, method)
+ filtered = {}
+ try:
+ for method_name in method_names:
+ if not hasattr(presubmit_canned_checks, method_name):
+ logging.warning('Skipping unknown "canned" check %s' %
+ method_name)
+ continue
+ filtered[method_name] = getattr(presubmit_canned_checks,
+ method_name)
+ setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
+ yield
+ finally:
+ for name, method in filtered.items():
+ setattr(presubmit_canned_checks, name, method)
def main(argv=None):
- parser = argparse.ArgumentParser(usage='%(prog)s [options] ')
- hooks = parser.add_mutually_exclusive_group()
- hooks.add_argument('-c', '--commit', action='store_true',
- help='Use commit instead of upload checks.')
- hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
- help='Use upload instead of commit checks.')
- hooks.add_argument('--post_upload', action='store_true',
- help='Run post-upload commit hooks.')
- parser.add_argument('-r', '--recursive', action='store_true',
- help='Act recursively.')
- parser.add_argument('-v', '--verbose', action='count', default=0,
- help='Use 2 times for more debug info.')
- parser.add_argument('--name', default='no name')
- parser.add_argument('--author')
- desc = parser.add_mutually_exclusive_group()
- desc.add_argument('--description', default='', help='The change description.')
- desc.add_argument('--description_file',
- help='File to read change description from.')
- parser.add_argument('--issue', type=int, default=0)
- parser.add_argument('--patchset', type=int, default=0)
- parser.add_argument('--root', default=os.getcwd(),
- help='Search for PRESUBMIT.py up to this directory. '
- 'If inherit-review-settings-ok is present in this '
- 'directory, parent directories up to the root file '
- 'system directories will also be searched.')
- parser.add_argument('--upstream',
- help='Git only: the base ref or upstream branch against '
- 'which the diff should be computed.')
- parser.add_argument('--default_presubmit')
- parser.add_argument('--may_prompt', action='store_true', default=False)
- parser.add_argument('--skip_canned', action='append', default=[],
- help='A list of checks to skip which appear in '
- 'presubmit_canned_checks. Can be provided multiple times '
- 'to skip multiple canned checks.')
- parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
- parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
- parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
- parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
- parser.add_argument('--gerrit_fetch', action='store_true',
- help=argparse.SUPPRESS)
- parser.add_argument('--parallel', action='store_true',
- help='Run all tests specified by input_api.RunTests in '
- 'all PRESUBMIT files in parallel.')
- parser.add_argument('--json_output',
- help='Write presubmit errors to json output.')
- parser.add_argument('--all_files', action='store_true',
- help='Mark all files under source control as modified.')
+ parser = argparse.ArgumentParser(usage='%(prog)s [options] ')
+ hooks = parser.add_mutually_exclusive_group()
+ hooks.add_argument('-c',
+ '--commit',
+ action='store_true',
+ help='Use commit instead of upload checks.')
+ hooks.add_argument('-u',
+ '--upload',
+ action='store_false',
+ dest='commit',
+ help='Use upload instead of commit checks.')
+ hooks.add_argument('--post_upload',
+ action='store_true',
+ help='Run post-upload commit hooks.')
+ parser.add_argument('-r',
+ '--recursive',
+ action='store_true',
+ help='Act recursively.')
+ parser.add_argument('-v',
+ '--verbose',
+ action='count',
+ default=0,
+ help='Use 2 times for more debug info.')
+ parser.add_argument('--name', default='no name')
+ parser.add_argument('--author')
+ desc = parser.add_mutually_exclusive_group()
+ desc.add_argument('--description',
+ default='',
+ help='The change description.')
+ desc.add_argument('--description_file',
+ help='File to read change description from.')
+ parser.add_argument('--issue', type=int, default=0)
+ parser.add_argument('--patchset', type=int, default=0)
+ parser.add_argument('--root',
+ default=os.getcwd(),
+ help='Search for PRESUBMIT.py up to this directory. '
+ 'If inherit-review-settings-ok is present in this '
+ 'directory, parent directories up to the root file '
+ 'system directories will also be searched.')
+ parser.add_argument(
+ '--upstream',
+ help='Git only: the base ref or upstream branch against '
+ 'which the diff should be computed.')
+ parser.add_argument('--default_presubmit')
+ parser.add_argument('--may_prompt', action='store_true', default=False)
+ parser.add_argument(
+ '--skip_canned',
+ action='append',
+ default=[],
+ help='A list of checks to skip which appear in '
+ 'presubmit_canned_checks. Can be provided multiple times '
+ 'to skip multiple canned checks.')
+ parser.add_argument('--dry_run',
+ action='store_true',
+ help=argparse.SUPPRESS)
+ parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
+ parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
+ parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
+ parser.add_argument('--gerrit_fetch',
+ action='store_true',
+ help=argparse.SUPPRESS)
+ parser.add_argument('--parallel',
+ action='store_true',
+ help='Run all tests specified by input_api.RunTests in '
+ 'all PRESUBMIT files in parallel.')
+ parser.add_argument('--json_output',
+ help='Write presubmit errors to json output.')
+ parser.add_argument('--all_files',
+ action='store_true',
+ help='Mark all files under source control as modified.')
- parser.add_argument('files', nargs='*',
- help='List of files to be marked as modified when '
- 'executing presubmit or post-upload hooks. fnmatch '
- 'wildcards can also be used.')
- parser.add_argument('--source_controlled_only', action='store_true',
- help='Constrain \'files\' to those in source control.')
- parser.add_argument('--no_diffs', action='store_true',
- help='Assume that all "modified" files have no diffs.')
- options = parser.parse_args(argv)
+ parser.add_argument('files',
+ nargs='*',
+ help='List of files to be marked as modified when '
+ 'executing presubmit or post-upload hooks. fnmatch '
+ 'wildcards can also be used.')
+ parser.add_argument('--source_controlled_only',
+ action='store_true',
+ help='Constrain \'files\' to those in source control.')
+ parser.add_argument('--no_diffs',
+ action='store_true',
+ help='Assume that all "modified" files have no diffs.')
+ options = parser.parse_args(argv)
- log_level = logging.ERROR
- if options.verbose >= 2:
- log_level = logging.DEBUG
- elif options.verbose:
- log_level = logging.INFO
- log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
- '%(filename)s] %(message)s')
- logging.basicConfig(format=log_format, level=log_level)
+ log_level = logging.ERROR
+ if options.verbose >= 2:
+ log_level = logging.DEBUG
+ elif options.verbose:
+ log_level = logging.INFO
+ log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
+ '%(filename)s] %(message)s')
+ logging.basicConfig(format=log_format, level=log_level)
- # Print call stacks when _PresubmitResult objects are created with -v -v is
- # specified. This helps track down where presubmit messages are coming from.
- if options.verbose >= 2:
- global _SHOW_CALLSTACKS
- _SHOW_CALLSTACKS = True
+ # Print call stacks when _PresubmitResult objects are created with -v -v is
+ # specified. This helps track down where presubmit messages are coming from.
+ if options.verbose >= 2:
+ global _SHOW_CALLSTACKS
+ _SHOW_CALLSTACKS = True
- if options.description_file:
- options.description = gclient_utils.FileRead(options.description_file)
- gerrit_obj = _parse_gerrit_options(parser, options)
- change = _parse_change(parser, options)
+ if options.description_file:
+ options.description = gclient_utils.FileRead(options.description_file)
+ gerrit_obj = _parse_gerrit_options(parser, options)
+ change = _parse_change(parser, options)
- try:
- if options.post_upload:
- return DoPostUploadExecuter(change, gerrit_obj, options.verbose)
- with canned_check_filter(options.skip_canned):
- return DoPresubmitChecks(
- change,
- options.commit,
- options.verbose,
- options.default_presubmit,
- options.may_prompt,
- gerrit_obj,
- options.dry_run,
- options.parallel,
- options.json_output,
- options.no_diffs)
- except PresubmitFailure as e:
- import utils
- print(e, file=sys.stderr)
- print('Maybe your depot_tools is out of date?', file=sys.stderr)
- print('depot_tools version: %s' % utils.depot_tools_version(),
- file=sys.stderr)
- return 2
+ try:
+ if options.post_upload:
+ return DoPostUploadExecuter(change, gerrit_obj, options.verbose)
+ with canned_check_filter(options.skip_canned):
+ return DoPresubmitChecks(change, options.commit, options.verbose,
+ options.default_presubmit,
+ options.may_prompt, gerrit_obj,
+ options.dry_run, options.parallel,
+ options.json_output, options.no_diffs)
+ except PresubmitFailure as e:
+ import utils
+ print(e, file=sys.stderr)
+ print('Maybe your depot_tools is out of date?', file=sys.stderr)
+ print('depot_tools version: %s' % utils.depot_tools_version(),
+ file=sys.stderr)
+ return 2
if __name__ == '__main__':
- fix_encoding.fix_encoding()
- try:
- sys.exit(main())
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(2)
+ fix_encoding.fix_encoding()
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(2)
diff --git a/pylint-2.6 b/pylint-2.6
index e65fd5d9f1..a7a541b690 100755
--- a/pylint-2.6
+++ b/pylint-2.6
@@ -69,4 +69,4 @@ import sys
import pylint_main
if __name__ == '__main__':
- sys.exit(pylint_main.main(sys.argv[1:]))
+ sys.exit(pylint_main.main(sys.argv[1:]))
diff --git a/pylint-2.7 b/pylint-2.7
index d8c52ac8d4..ae30b899b3 100755
--- a/pylint-2.7
+++ b/pylint-2.7
@@ -69,4 +69,4 @@ import sys
import pylint_main
if __name__ == '__main__':
- sys.exit(pylint_main.main(sys.argv[1:]))
+ sys.exit(pylint_main.main(sys.argv[1:]))
diff --git a/pylint_main.py b/pylint_main.py
index 25cb40b6a3..e9717ebb07 100755
--- a/pylint_main.py
+++ b/pylint_main.py
@@ -2,7 +2,6 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Chromium wrapper for pylint for passing args via stdin.
This will be executed by vpython with the right pylint versions.
@@ -21,36 +20,37 @@ ARGS_ON_STDIN = '--args-on-stdin'
def main(argv):
- """Our main wrapper."""
- # Add support for a custom mode where arguments are fed line by line on
- # stdin. This allows us to get around command line length limitations.
- if ARGS_ON_STDIN in argv:
- argv = [x for x in argv if x != ARGS_ON_STDIN]
- argv.extend(x.strip() for x in sys.stdin)
+ """Our main wrapper."""
+ # Add support for a custom mode where arguments are fed line by line on
+ # stdin. This allows us to get around command line length limitations.
+ if ARGS_ON_STDIN in argv:
+ argv = [x for x in argv if x != ARGS_ON_STDIN]
+ argv.extend(x.strip() for x in sys.stdin)
- # Set default config options with the PYLINTRC environment variable. This will
- # allow overriding with "more local" config file options, such as a local
- # "pylintrc" file, the "--rcfile" command-line flag, or an existing PYLINTRC.
- #
- # Note that this is not quite the same thing as replacing pylint's built-in
- # defaults, since, based on config file precedence, it will not be overridden
- # by "more global" config file options, such as ~/.pylintrc,
- # ~/.config/pylintrc, or /etc/pylintrc. This is generally the desired
- # behavior, since we want to enforce these defaults in most cases, but allow
- # them to be overridden for specific code or repos.
- #
- # If someone really doesn't ever want the depot_tools pylintrc, they can set
- # their own PYLINTRC, or set an empty PYLINTRC to use pylint's normal config
- # file resolution, which would include the "more global" options that are
- # normally overridden by the depot_tools config.
- if os.path.isfile(RC_FILE) and 'PYLINTRC' not in os.environ:
- os.environ['PYLINTRC'] = RC_FILE
+ # Set default config options with the PYLINTRC environment variable. This
+ # will allow overriding with "more local" config file options, such as a
+ # local "pylintrc" file, the "--rcfile" command-line flag, or an existing
+ # PYLINTRC.
+ #
+ # Note that this is not quite the same thing as replacing pylint's built-in
+ # defaults, since, based on config file precedence, it will not be
+ # overridden by "more global" config file options, such as ~/.pylintrc,
+ # ~/.config/pylintrc, or /etc/pylintrc. This is generally the desired
+ # behavior, since we want to enforce these defaults in most cases, but allow
+ # them to be overridden for specific code or repos.
+ #
+ # If someone really doesn't ever want the depot_tools pylintrc, they can set
+ # their own PYLINTRC, or set an empty PYLINTRC to use pylint's normal config
+ # file resolution, which would include the "more global" options that are
+ # normally overridden by the depot_tools config.
+ if os.path.isfile(RC_FILE) and 'PYLINTRC' not in os.environ:
+ os.environ['PYLINTRC'] = RC_FILE
- # This import has to happen after PYLINTRC is set because the module tries to
- # resolve the config file location on load.
- from pylint import lint # pylint: disable=bad-option-value,import-outside-toplevel
- lint.Run(argv)
+ # This import has to happen after PYLINTRC is set because the module tries
+ # to resolve the config file location on load.
+ from pylint import lint # pylint: disable=bad-option-value,import-outside-toplevel
+ lint.Run(argv)
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/rdb_wrapper.py b/rdb_wrapper.py
index b6021e627a..2e0843a92c 100644
--- a/rdb_wrapper.py
+++ b/rdb_wrapper.py
@@ -15,23 +15,21 @@ STATUS_CRASH = 'CRASH'
STATUS_ABORT = 'ABORT'
STATUS_SKIP = 'SKIP'
-
# ResultDB limits failure reasons to 1024 characters.
_FAILURE_REASON_LENGTH_LIMIT = 1024
-
# Message to use at the end of a truncated failure reason.
_FAILURE_REASON_TRUNCATE_TEXT = '\n...\nFailure reason was truncated.'
class ResultSink(object):
- def __init__(self, session, url, prefix):
- self._session = session
- self._url = url
- self._prefix = prefix
+ def __init__(self, session, url, prefix):
+ self._session = session
+ self._url = url
+ self._prefix = prefix
- def report(self, function_name, status, elapsed_time, failure_reason=None):
- """Reports the result and elapsed time of a presubmit function call.
+ def report(self, function_name, status, elapsed_time, failure_reason=None):
+ """Reports the result and elapsed time of a presubmit function call.
Args:
function_name (str): The name of the presubmit function
@@ -39,24 +37,24 @@ class ResultSink(object):
elapsed_time: the time taken to invoke the presubmit function
failure_reason (str or None): if set, the failure reason
"""
- tr = {
- 'testId': self._prefix + function_name,
- 'status': status,
- 'expected': status == STATUS_PASS,
- 'duration': '{:.9f}s'.format(elapsed_time)
- }
- if failure_reason:
- if len(failure_reason) > _FAILURE_REASON_LENGTH_LIMIT:
- failure_reason = failure_reason[
- :-len(_FAILURE_REASON_TRUNCATE_TEXT) - 1]
- failure_reason += _FAILURE_REASON_TRUNCATE_TEXT
- tr['failureReason'] = {'primaryErrorMessage': failure_reason}
- self._session.post(self._url, json={'testResults': [tr]})
+ tr = {
+ 'testId': self._prefix + function_name,
+ 'status': status,
+ 'expected': status == STATUS_PASS,
+ 'duration': '{:.9f}s'.format(elapsed_time)
+ }
+ if failure_reason:
+ if len(failure_reason) > _FAILURE_REASON_LENGTH_LIMIT:
+ failure_reason = failure_reason[:-len(
+ _FAILURE_REASON_TRUNCATE_TEXT) - 1]
+ failure_reason += _FAILURE_REASON_TRUNCATE_TEXT
+ tr['failureReason'] = {'primaryErrorMessage': failure_reason}
+ self._session.post(self._url, json={'testResults': [tr]})
@contextlib.contextmanager
def client(prefix):
- """Returns a client for ResultSink.
+ """Returns a client for ResultSink.
This is a context manager that returns a client for ResultSink,
if LUCI_CONTEXT with a section of result_sink is present. When the context
@@ -71,24 +69,24 @@ def client(prefix):
Returns:
An instance of ResultSink() if the luci context is present. None, otherwise.
"""
- luci_ctx = os.environ.get('LUCI_CONTEXT')
- if not luci_ctx:
- yield None
- return
+ luci_ctx = os.environ.get('LUCI_CONTEXT')
+ if not luci_ctx:
+ yield None
+ return
- sink_ctx = None
- with open(luci_ctx) as f:
- sink_ctx = json.load(f).get('result_sink')
- if not sink_ctx:
- yield None
- return
+ sink_ctx = None
+ with open(luci_ctx) as f:
+ sink_ctx = json.load(f).get('result_sink')
+ if not sink_ctx:
+ yield None
+ return
- url = 'http://{0}/prpc/luci.resultsink.v1.Sink/ReportTestResults'.format(
- sink_ctx['address'])
- with requests.Session() as s:
- s.headers = {
- 'Content-Type': 'application/json',
- 'Accept': 'application/json',
- 'Authorization': 'ResultSink {0}'.format(sink_ctx['auth_token'])
- }
- yield ResultSink(s, url, prefix)
+ url = 'http://{0}/prpc/luci.resultsink.v1.Sink/ReportTestResults'.format(
+ sink_ctx['address'])
+ with requests.Session() as s:
+ s.headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization': 'ResultSink {0}'.format(sink_ctx['auth_token'])
+ }
+ yield ResultSink(s, url, prefix)
diff --git a/fetch_configs/.style.yapf b/recipes/.style.yapf
similarity index 73%
rename from fetch_configs/.style.yapf
rename to recipes/.style.yapf
index 4741fb4f3b..24681e21f7 100644
--- a/fetch_configs/.style.yapf
+++ b/recipes/.style.yapf
@@ -1,3 +1,4 @@
[style]
based_on_style = pep8
+indent_width = 2
column_limit = 80
diff --git a/reclient_helper.py b/reclient_helper.py
index 15723d72f3..05fdc3b03a 100644
--- a/reclient_helper.py
+++ b/reclient_helper.py
@@ -19,65 +19,66 @@ import reclient_metrics
def find_reclient_bin_dir():
- tools_path = gclient_paths.GetBuildtoolsPath()
- if not tools_path:
- return None
+ tools_path = gclient_paths.GetBuildtoolsPath()
+ if not tools_path:
+ return None
- reclient_bin_dir = os.path.join(tools_path, 'reclient')
- if os.path.isdir(reclient_bin_dir):
- return reclient_bin_dir
- return None
+ reclient_bin_dir = os.path.join(tools_path, 'reclient')
+ if os.path.isdir(reclient_bin_dir):
+ return reclient_bin_dir
+ return None
def find_reclient_cfg():
- tools_path = gclient_paths.GetBuildtoolsPath()
- if not tools_path:
- return None
+ tools_path = gclient_paths.GetBuildtoolsPath()
+ if not tools_path:
+ return None
- reclient_cfg = os.path.join(tools_path, 'reclient_cfgs', 'reproxy.cfg')
- if os.path.isfile(reclient_cfg):
- return reclient_cfg
- return None
+ reclient_cfg = os.path.join(tools_path, 'reclient_cfgs', 'reproxy.cfg')
+ if os.path.isfile(reclient_cfg):
+ return reclient_cfg
+ return None
def run(cmd_args):
- if os.environ.get('NINJA_SUMMARIZE_BUILD') == '1':
- print(' '.join(cmd_args))
- return subprocess.call(cmd_args)
+ if os.environ.get('NINJA_SUMMARIZE_BUILD') == '1':
+ print(' '.join(cmd_args))
+ return subprocess.call(cmd_args)
def start_reproxy(reclient_cfg, reclient_bin_dir):
- return run([
- os.path.join(reclient_bin_dir,
- 'bootstrap' + gclient_paths.GetExeSuffix()), '--re_proxy=' +
- os.path.join(reclient_bin_dir, 'reproxy' + gclient_paths.GetExeSuffix()),
- '--cfg=' + reclient_cfg
- ])
+ return run([
+ os.path.join(reclient_bin_dir,
+ 'bootstrap' + gclient_paths.GetExeSuffix()),
+ '--re_proxy=' + os.path.join(reclient_bin_dir,
+ 'reproxy' + gclient_paths.GetExeSuffix()),
+ '--cfg=' + reclient_cfg
+ ])
def stop_reproxy(reclient_cfg, reclient_bin_dir):
- return run([
- os.path.join(reclient_bin_dir,
- 'bootstrap' + gclient_paths.GetExeSuffix()), '--shutdown',
- '--cfg=' + reclient_cfg
- ])
+ return run([
+ os.path.join(reclient_bin_dir,
+ 'bootstrap' + gclient_paths.GetExeSuffix()), '--shutdown',
+ '--cfg=' + reclient_cfg
+ ])
def find_ninja_out_dir(args):
- # Ninja uses getopt_long, which allows to intermix non-option arguments.
- # To leave non supported parameters untouched, we do not use getopt.
- for index, arg in enumerate(args[1:]):
- if arg == '-C':
- # + 1 to get the next argument and +1 because we trimmed off args[0]
- return args[index + 2]
- if arg.startswith('-C'):
- # Support -Cout/Default
- return arg[2:]
- return '.'
+ # Ninja uses getopt_long, which allows to intermix non-option arguments.
+ # To leave non supported parameters untouched, we do not use getopt.
+ for index, arg in enumerate(args[1:]):
+ if arg == '-C':
+ # + 1 to get the next argument and +1 because we trimmed off args[0]
+ return args[index + 2]
+ if arg.startswith('-C'):
+ # Support -Cout/Default
+ return arg[2:]
+ return '.'
def find_cache_dir(tmp_dir):
- """Helper to find the correct cache directory for a build.
+ """Helper to find the correct cache directory for a build.
tmp_dir should be a build specific temp directory within the out directory.
@@ -86,15 +87,15 @@ def find_cache_dir(tmp_dir):
If this is not called from within a gclient checkout, the cache dir will be:
tmp_dir/cache
"""
- gclient_root = gclient_paths.FindGclientRoot(os.getcwd())
- if gclient_root:
- return os.path.join(gclient_root, '.reproxy_cache',
- hashlib.md5(tmp_dir.encode()).hexdigest())
- return os.path.join(tmp_dir, 'cache')
+ gclient_root = gclient_paths.FindGclientRoot(os.getcwd())
+ if gclient_root:
+ return os.path.join(gclient_root, '.reproxy_cache',
+ hashlib.md5(tmp_dir.encode()).hexdigest())
+ return os.path.join(tmp_dir, 'cache')
def set_reproxy_metrics_flags(tool):
- """Helper to setup metrics collection flags for reproxy.
+ """Helper to setup metrics collection flags for reproxy.
The following env vars are set if not already set:
RBE_metrics_project=chromium-reclient-metrics
@@ -103,23 +104,23 @@ def set_reproxy_metrics_flags(tool):
RBE_metrics_labels=source=developer,tool={tool}
RBE_metrics_prefix=go.chromium.org
"""
- autoninja_id = os.environ.get("AUTONINJA_BUILD_ID")
- if autoninja_id is not None:
- os.environ.setdefault("RBE_invocation_id", autoninja_id)
- os.environ.setdefault("RBE_metrics_project", "chromium-reclient-metrics")
- os.environ.setdefault("RBE_metrics_table", "rbe_metrics.builds")
- os.environ.setdefault("RBE_metrics_labels", "source=developer,tool=" + tool)
- os.environ.setdefault("RBE_metrics_prefix", "go.chromium.org")
+ autoninja_id = os.environ.get("AUTONINJA_BUILD_ID")
+ if autoninja_id is not None:
+ os.environ.setdefault("RBE_invocation_id", autoninja_id)
+ os.environ.setdefault("RBE_metrics_project", "chromium-reclient-metrics")
+ os.environ.setdefault("RBE_metrics_table", "rbe_metrics.builds")
+ os.environ.setdefault("RBE_metrics_labels", "source=developer,tool=" + tool)
+ os.environ.setdefault("RBE_metrics_prefix", "go.chromium.org")
def remove_mdproxy_from_path():
- os.environ["PATH"] = os.pathsep.join(
- d for d in os.environ.get("PATH", "").split(os.pathsep)
- if "mdproxy" not in d)
+ os.environ["PATH"] = os.pathsep.join(
+ d for d in os.environ.get("PATH", "").split(os.pathsep)
+ if "mdproxy" not in d)
def set_reproxy_path_flags(out_dir, make_dirs=True):
- """Helper to setup the logs and cache directories for reclient.
+ """Helper to setup the logs and cache directories for reclient.
Creates the following directory structure if make_dirs is true:
If in a gclient checkout
@@ -146,98 +147,100 @@ def set_reproxy_path_flags(out_dir, make_dirs=True):
Windows Only:
RBE_server_address=pipe://md5(out_dir/.reproxy_tmp)/reproxy.pipe
"""
- tmp_dir = os.path.abspath(os.path.join(out_dir, '.reproxy_tmp'))
- log_dir = os.path.join(tmp_dir, 'logs')
- racing_dir = os.path.join(tmp_dir, 'racing')
- cache_dir = find_cache_dir(tmp_dir)
- if make_dirs:
- if os.path.exists(log_dir):
- try:
- # Clear log dir before each build to ensure correct metric aggregation.
- shutil.rmtree(log_dir)
- except OSError:
- print(
- "Couldn't clear logs because reproxy did "
- "not shutdown after the last build",
- file=sys.stderr)
- os.makedirs(tmp_dir, exist_ok=True)
- os.makedirs(log_dir, exist_ok=True)
- os.makedirs(cache_dir, exist_ok=True)
- os.makedirs(racing_dir, exist_ok=True)
- os.environ.setdefault("RBE_output_dir", log_dir)
- os.environ.setdefault("RBE_proxy_log_dir", log_dir)
- os.environ.setdefault("RBE_log_dir", log_dir)
- os.environ.setdefault("RBE_cache_dir", cache_dir)
- os.environ.setdefault("RBE_racing_tmp_dir", racing_dir)
- if sys.platform.startswith('win'):
- pipe_dir = hashlib.md5(tmp_dir.encode()).hexdigest()
- os.environ.setdefault("RBE_server_address",
- "pipe://%s/reproxy.pipe" % pipe_dir)
- else:
- # unix domain socket has path length limit, so use fixed size path here.
- # ref: https://www.man7.org/linux/man-pages/man7/unix.7.html
- os.environ.setdefault(
- "RBE_server_address", "unix:///tmp/reproxy_%s.sock" %
- hashlib.sha256(tmp_dir.encode()).hexdigest())
+ tmp_dir = os.path.abspath(os.path.join(out_dir, '.reproxy_tmp'))
+ log_dir = os.path.join(tmp_dir, 'logs')
+ racing_dir = os.path.join(tmp_dir, 'racing')
+ cache_dir = find_cache_dir(tmp_dir)
+ if make_dirs:
+ if os.path.exists(log_dir):
+ try:
+ # Clear log dir before each build to ensure correct metric
+ # aggregation.
+ shutil.rmtree(log_dir)
+ except OSError:
+ print(
+ "Couldn't clear logs because reproxy did "
+ "not shutdown after the last build",
+ file=sys.stderr)
+ os.makedirs(tmp_dir, exist_ok=True)
+ os.makedirs(log_dir, exist_ok=True)
+ os.makedirs(cache_dir, exist_ok=True)
+ os.makedirs(racing_dir, exist_ok=True)
+ os.environ.setdefault("RBE_output_dir", log_dir)
+ os.environ.setdefault("RBE_proxy_log_dir", log_dir)
+ os.environ.setdefault("RBE_log_dir", log_dir)
+ os.environ.setdefault("RBE_cache_dir", cache_dir)
+ os.environ.setdefault("RBE_racing_tmp_dir", racing_dir)
+ if sys.platform.startswith('win'):
+ pipe_dir = hashlib.md5(tmp_dir.encode()).hexdigest()
+ os.environ.setdefault("RBE_server_address",
+ "pipe://%s/reproxy.pipe" % pipe_dir)
+ else:
+ # unix domain socket has path length limit, so use fixed size path here.
+ # ref: https://www.man7.org/linux/man-pages/man7/unix.7.html
+ os.environ.setdefault(
+ "RBE_server_address", "unix:///tmp/reproxy_%s.sock" %
+ hashlib.sha256(tmp_dir.encode()).hexdigest())
def set_racing_defaults():
- os.environ.setdefault("RBE_local_resource_fraction", "0.2")
- os.environ.setdefault("RBE_racing_bias", "0.95")
+ os.environ.setdefault("RBE_local_resource_fraction", "0.2")
+ os.environ.setdefault("RBE_racing_bias", "0.95")
@contextlib.contextmanager
def build_context(argv, tool):
- # If use_remoteexec is set, but the reclient binaries or configs don't
- # exist, display an error message and stop. Otherwise, the build will
- # attempt to run with rewrapper wrapping actions, but will fail with
- # possible non-obvious problems.
- reclient_bin_dir = find_reclient_bin_dir()
- reclient_cfg = find_reclient_cfg()
- if reclient_bin_dir is None or reclient_cfg is None:
- print(('Build is configured to use reclient but necessary binaries '
- "or config files can't be found.\n"
- 'Please check if `"download_remoteexec_cfg": True` custom var is set'
- ' in `.gclient`, and run `gclient sync`.'),
- file=sys.stderr)
- yield 1
- return
+ # If use_remoteexec is set, but the reclient binaries or configs don't
+ # exist, display an error message and stop. Otherwise, the build will
+ # attempt to run with rewrapper wrapping actions, but will fail with
+ # possible non-obvious problems.
+ reclient_bin_dir = find_reclient_bin_dir()
+ reclient_cfg = find_reclient_cfg()
+ if reclient_bin_dir is None or reclient_cfg is None:
+ print(
+ 'Build is configured to use reclient but necessary binaries '
+ "or config files can't be found.\n"
+ 'Please check if `"download_remoteexec_cfg": True` custom var is '
+ 'set in `.gclient`, and run `gclient sync`.',
+ file=sys.stderr)
+ yield 1
+ return
- ninja_out = find_ninja_out_dir(argv)
+ ninja_out = find_ninja_out_dir(argv)
- try:
- set_reproxy_path_flags(ninja_out)
- except OSError:
- print("Error creating reproxy_tmp in output dir", file=sys.stderr)
- yield 1
- return
+ try:
+ set_reproxy_path_flags(ninja_out)
+ except OSError:
+ print("Error creating reproxy_tmp in output dir", file=sys.stderr)
+ yield 1
+ return
- if reclient_metrics.check_status(ninja_out):
- set_reproxy_metrics_flags(tool)
+ if reclient_metrics.check_status(ninja_out):
+ set_reproxy_metrics_flags(tool)
- if os.environ.get('RBE_instance', None):
- print('WARNING: Using RBE_instance=%s\n' %
- os.environ.get('RBE_instance', ''))
+ if os.environ.get('RBE_instance', None):
+ print('WARNING: Using RBE_instance=%s\n' %
+ os.environ.get('RBE_instance', ''))
- remote_disabled = os.environ.get('RBE_remote_disabled')
- if remote_disabled not in ('1', 't', 'T', 'true', 'TRUE', 'True'):
- set_racing_defaults()
+ remote_disabled = os.environ.get('RBE_remote_disabled')
+ if remote_disabled not in ('1', 't', 'T', 'true', 'TRUE', 'True'):
+ set_racing_defaults()
- # TODO(b/292523514) remove this once a fix is landed in reproxy
- remove_mdproxy_from_path()
+ # TODO(b/292523514) remove this once a fix is landed in reproxy
+ remove_mdproxy_from_path()
- start = time.time()
- reproxy_ret_code = start_reproxy(reclient_cfg, reclient_bin_dir)
- elapsed = time.time() - start
- print('%1.3f s to start reproxy' % elapsed)
- if reproxy_ret_code != 0:
- yield reproxy_ret_code
- return
- try:
- yield
- finally:
- print("Shutting down reproxy...", file=sys.stderr)
start = time.time()
- stop_reproxy(reclient_cfg, reclient_bin_dir)
+ reproxy_ret_code = start_reproxy(reclient_cfg, reclient_bin_dir)
elapsed = time.time() - start
- print('%1.3f s to stop reproxy' % elapsed)
+ print('%1.3f s to start reproxy' % elapsed)
+ if reproxy_ret_code != 0:
+ yield reproxy_ret_code
+ return
+ try:
+ yield
+ finally:
+ print("Shutting down reproxy...", file=sys.stderr)
+ start = time.time()
+ stop_reproxy(reclient_cfg, reclient_bin_dir)
+ elapsed = time.time() - start
+ print('%1.3f s to stop reproxy' % elapsed)
diff --git a/reclient_metrics.py b/reclient_metrics.py
index 51164b66a0..c6158d69ee 100755
--- a/reclient_metrics.py
+++ b/reclient_metrics.py
@@ -16,36 +16,36 @@ VERSION = 1
def default_config():
- return {
- 'is-googler': is_googler(),
- 'countdown': 10,
- 'version': VERSION,
- }
+ return {
+ 'is-googler': is_googler(),
+ 'countdown': 10,
+ 'version': VERSION,
+ }
def load_config():
- config = None
- try:
- with open(CONFIG) as f:
- raw_config = json.load(f)
- if raw_config['version'] == VERSION:
- raw_config['countdown'] = max(0, raw_config['countdown'] - 1)
- config = raw_config
- except Exception:
- pass
- if not config:
- config = default_config()
- save_config(config)
- return config
+ config = None
+ try:
+ with open(CONFIG) as f:
+ raw_config = json.load(f)
+ if raw_config['version'] == VERSION:
+ raw_config['countdown'] = max(0, raw_config['countdown'] - 1)
+ config = raw_config
+ except Exception:
+ pass
+ if not config:
+ config = default_config()
+ save_config(config)
+ return config
def save_config(config):
- with open(CONFIG, 'w') as f:
- json.dump(config, f)
+ with open(CONFIG, 'w') as f:
+ json.dump(config, f)
def show_message(config, ninja_out):
- print("""
+ print("""
Your reclient metrics will be uploaded to the chromium build metrics database. The uploaded metrics will be used to analyze user side build performance.
We upload the contents of {ninja_out_abs}.
@@ -73,71 +73,71 @@ You can find a more detailed explanation in
or
https://chromium.googlesource.com/chromium/tools/depot_tools/+/main/reclient_metrics.README.md
""".format(
- ninja_out_abs=os.path.abspath(
- os.path.join(ninja_out, ".reproxy_tmp", "logs", "rbe_metrics.txt")),
- config_count=config.get("countdown", 0),
- file_path=__file__,
- metrics_readme_path=os.path.abspath(
- os.path.join(THIS_DIR, "reclient_metrics.README.md")),
- ))
+ ninja_out_abs=os.path.abspath(
+ os.path.join(ninja_out, ".reproxy_tmp", "logs", "rbe_metrics.txt")),
+ config_count=config.get("countdown", 0),
+ file_path=__file__,
+ metrics_readme_path=os.path.abspath(
+ os.path.join(THIS_DIR, "reclient_metrics.README.md")),
+ ))
def is_googler(config=None):
- """Check whether this user is Googler or not."""
- if config is not None and 'is-googler' in config:
- return config['is-googler']
- # Use cipd auth-info to check for googler status as
- # downloading rewrapper configs already requires cipd to be logged in
- p = subprocess.run('cipd auth-info',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- shell=True)
- if p.returncode != 0:
- return False
- lines = p.stdout.splitlines()
- if len(lines) == 0:
- return False
- l = lines[0]
- # |l| will be like 'Logged in as @google.com.' for googlers.
- return l.startswith('Logged in as ') and l.endswith('@google.com.')
+ """Check whether this user is Googler or not."""
+ if config is not None and 'is-googler' in config:
+ return config['is-googler']
+ # Use cipd auth-info to check for googler status as
+ # downloading rewrapper configs already requires cipd to be logged in
+ p = subprocess.run('cipd auth-info',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ shell=True)
+ if p.returncode != 0:
+ return False
+ lines = p.stdout.splitlines()
+ if len(lines) == 0:
+ return False
+ l = lines[0]
+ # |l| will be like 'Logged in as @google.com.' for googlers.
+ return l.startswith('Logged in as ') and l.endswith('@google.com.')
def check_status(ninja_out):
- """Checks metrics collections status and shows notice to user if needed.
+ """Checks metrics collections status and shows notice to user if needed.
Returns True if metrics should be collected."""
- config = load_config()
- if not is_googler(config):
- return False
- if 'opt-in' in config:
- return config['opt-in']
- if config.get("countdown", 0) > 0:
- show_message(config, ninja_out)
- return False
- return True
+ config = load_config()
+ if not is_googler(config):
+ return False
+ if 'opt-in' in config:
+ return config['opt-in']
+ if config.get("countdown", 0) > 0:
+ show_message(config, ninja_out)
+ return False
+ return True
def main(argv):
- cfg = load_config()
+ cfg = load_config()
- if not is_googler(cfg):
- save_config(cfg)
- return 0
+ if not is_googler(cfg):
+ save_config(cfg)
+ return 0
- if len(argv) == 2 and argv[1] == 'opt-in':
- cfg['opt-in'] = True
- cfg['countdown'] = 0
- save_config(cfg)
- print('reclient metrics upload is opted in.')
- return 0
+ if len(argv) == 2 and argv[1] == 'opt-in':
+ cfg['opt-in'] = True
+ cfg['countdown'] = 0
+ save_config(cfg)
+ print('reclient metrics upload is opted in.')
+ return 0
- if len(argv) == 2 and argv[1] == 'opt-out':
- cfg['opt-in'] = False
- save_config(cfg)
- print('reclient metrics upload is opted out.')
- return 0
+ if len(argv) == 2 and argv[1] == 'opt-out':
+ cfg['opt-in'] = False
+ save_config(cfg)
+ print('reclient metrics upload is opted out.')
+ return 0
if __name__ == '__main__':
- sys.exit(main(sys.argv))
+ sys.exit(main(sys.argv))
diff --git a/reclientreport.py b/reclientreport.py
index 8f514520a8..69eaca9f4b 100644
--- a/reclientreport.py
+++ b/reclientreport.py
@@ -21,43 +21,43 @@ import reclient_helper
# TODO(b/296402157): Remove once reclientreport binary saves all logs on windows
def temp_win_impl__b_296402157(out_dir):
- '''Temporary implementation until b/296402157 is fixed'''
- log_dir = os.path.abspath(os.path.join(out_dir, '.reproxy_tmp', 'logs'))
- with tempfile.NamedTemporaryFile(prefix='reclientreport',
- suffix='.tar.gz',
- delete=False) as f:
- with tarfile.open(fileobj=f, mode='w:gz') as tar:
- tar.add(log_dir, arcname=os.path.basename(log_dir))
- print(
- f'Created log file at {f.name}. Please attach this to your bug report!')
+ '''Temporary implementation until b/296402157 is fixed'''
+ log_dir = os.path.abspath(os.path.join(out_dir, '.reproxy_tmp', 'logs'))
+ with tempfile.NamedTemporaryFile(prefix='reclientreport',
+ suffix='.tar.gz',
+ delete=False) as f:
+ with tarfile.open(fileobj=f, mode='w:gz') as tar:
+ tar.add(log_dir, arcname=os.path.basename(log_dir))
+ print(f'Created log file at {f.name}. Please attach this to your bug '
+ 'report!')
def main():
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument("--ninja_out",
- "-C",
- required=True,
- help="ninja out directory used for the autoninja build")
- parser.add_argument('args', nargs=argparse.REMAINDER)
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument("--ninja_out",
+ "-C",
+ required=True,
+ help="ninja out directory used for the autoninja build")
+ parser.add_argument('args', nargs=argparse.REMAINDER)
- args, extras = parser.parse_known_args()
- if sys.platform.startswith('win'):
- temp_win_impl__b_296402157(args.ninja_out)
- return
- if args.args and args.args[0] == '--':
- args.args.pop(0)
- if extras:
- args.args = extras + args.args
+ args, extras = parser.parse_known_args()
+ if sys.platform.startswith('win'):
+ temp_win_impl__b_296402157(args.ninja_out)
+ return
+ if args.args and args.args[0] == '--':
+ args.args.pop(0)
+ if extras:
+ args.args = extras + args.args
- reclient_helper.set_reproxy_path_flags(args.ninja_out, make_dirs=False)
- reclient_bin_dir = reclient_helper.find_reclient_bin_dir()
- code = subprocess.call([os.path.join(reclient_bin_dir, 'reclientreport')] +
- args.args)
- if code != 0:
- print("Failed to collect logs, make sure that %s/.reproxy_tmp exists" %
- args.ninja_out,
- file=sys.stderr)
+ reclient_helper.set_reproxy_path_flags(args.ninja_out, make_dirs=False)
+ reclient_bin_dir = reclient_helper.find_reclient_bin_dir()
+ code = subprocess.call([os.path.join(reclient_bin_dir, 'reclientreport')] +
+ args.args)
+ if code != 0:
+ print("Failed to collect logs, make sure that %s/.reproxy_tmp exists" %
+ args.ninja_out,
+ file=sys.stderr)
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/repo b/repo
index 9dc4dc5e76..9f8357d87d 100755
--- a/repo
+++ b/repo
@@ -2,7 +2,6 @@
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Wrapper around repo to auto-update depot_tools during sync.
gclient keeps depot_tools up-to-date automatically for Chromium developers.
@@ -16,7 +15,6 @@ from pathlib import Path
import subprocess
import sys
-
# Some useful paths.
DEPOT_TOOLS_DIR = Path(__file__).resolve().parent
UPDATE_DEPOT_TOOLS = DEPOT_TOOLS_DIR / 'update_depot_tools'
@@ -24,35 +22,37 @@ REPO = DEPOT_TOOLS_DIR / 'repo_launcher'
def _UpdateDepotTools():
- """Help CrOS users keep their depot_tools checkouts up-to-date."""
- if os.getenv('DEPOT_TOOLS_UPDATE') == '0':
- return
+ """Help CrOS users keep their depot_tools checkouts up-to-date."""
+ if os.getenv('DEPOT_TOOLS_UPDATE') == '0':
+ return
- # We don't update the copy that's part of the CrOS repo client checkout.
- path = DEPOT_TOOLS_DIR
- while path != path.parent:
- if (path / '.repo').is_dir() and (path / 'chromite').is_dir():
- return
- path = path.parent
+ # We don't update the copy that's part of the CrOS repo client checkout.
+ path = DEPOT_TOOLS_DIR
+ while path != path.parent:
+ if (path / '.repo').is_dir() and (path / 'chromite').is_dir():
+ return
+ path = path.parent
- if UPDATE_DEPOT_TOOLS.exists():
- subprocess.run([UPDATE_DEPOT_TOOLS], check=True)
- else:
- print(f'warning: {UPDATE_DEPOT_TOOLS} does not exist; export '
- 'DEPOT_TOOLS_UPDATE=0 to disable.', file=sys.stderr)
+ if UPDATE_DEPOT_TOOLS.exists():
+ subprocess.run([UPDATE_DEPOT_TOOLS], check=True)
+ else:
+ print(
+ f'warning: {UPDATE_DEPOT_TOOLS} does not exist; export '
+ 'DEPOT_TOOLS_UPDATE=0 to disable.',
+ file=sys.stderr)
def main(argv):
- # This is a bit hacky, but should be "good enough". If repo itself gains
- # support for sync hooks, we could switch to that.
- if argv and argv[0] == 'sync':
- _UpdateDepotTools()
+ # This is a bit hacky, but should be "good enough". If repo itself gains
+ # support for sync hooks, we could switch to that.
+ if argv and argv[0] == 'sync':
+ _UpdateDepotTools()
- # Set the default to our fork.
- os.environ["REPO_URL"] = "https://chromium.googlesource.com/external/repo"
+ # Set the default to our fork.
+ os.environ["REPO_URL"] = "https://chromium.googlesource.com/external/repo"
- os.execv(sys.executable, [sys.executable, str(REPO)] + argv)
+ os.execv(sys.executable, [sys.executable, str(REPO)] + argv)
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/roll_dep.py b/roll_dep.py
index 266bca4856..fc15c6a4dc 100755
--- a/roll_dep.py
+++ b/roll_dep.py
@@ -2,7 +2,6 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Rolls DEPS controlled dependency.
Works only with git checkout and git dependencies. Currently this script will
@@ -20,9 +19,8 @@ import sys
import tempfile
NEED_SHELL = sys.platform.startswith('win')
-GCLIENT_PATH = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'gclient.py')
-
+GCLIENT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'gclient.py')
# Commit subject that will be considered a roll. In the format generated by the
# git log used, so it's "-- "
@@ -33,309 +31,318 @@ _ROLL_SUBJECT = re.compile(
r'[^ ]+ '
# Subject
r'('
- # Generated by
- # https://skia.googlesource.com/buildbot/+/HEAdA/autoroll/go/repo_manager/deps_repo_manager.go
- r'Roll [^ ]+ [a-f0-9]+\.\.[a-f0-9]+ \(\d+ commits\)'
- r'|'
- # Generated by
- # https://chromium.googlesource.com/infra/infra/+/HEAD/recipes/recipe_modules/recipe_autoroller/api.py
- r'Roll recipe dependencies \(trivial\)\.'
+ # Generated by
+ # https://skia.googlesource.com/buildbot/+/HEAdA/autoroll/go/repo_manager/deps_repo_manager.go
+ r'Roll [^ ]+ [a-f0-9]+\.\.[a-f0-9]+ \(\d+ commits\)'
+ r'|'
+ # Generated by
+ # https://chromium.googlesource.com/infra/infra/+/HEAD/recipes/recipe_modules/recipe_autoroller/api.py
+ r'Roll recipe dependencies \(trivial\)\.'
r')$')
class Error(Exception):
- pass
+ pass
class AlreadyRolledError(Error):
- pass
+ pass
def check_output(*args, **kwargs):
- """subprocess2.check_output() passing shell=True on Windows for git."""
- kwargs.setdefault('shell', NEED_SHELL)
- return subprocess2.check_output(*args, **kwargs).decode('utf-8')
+ """subprocess2.check_output() passing shell=True on Windows for git."""
+ kwargs.setdefault('shell', NEED_SHELL)
+ return subprocess2.check_output(*args, **kwargs).decode('utf-8')
def check_call(*args, **kwargs):
- """subprocess2.check_call() passing shell=True on Windows for git."""
- kwargs.setdefault('shell', NEED_SHELL)
- subprocess2.check_call(*args, **kwargs)
+ """subprocess2.check_call() passing shell=True on Windows for git."""
+ kwargs.setdefault('shell', NEED_SHELL)
+ subprocess2.check_call(*args, **kwargs)
def return_code(*args, **kwargs):
- """subprocess2.call() passing shell=True on Windows for git and
+ """subprocess2.call() passing shell=True on Windows for git and
subprocess2.DEVNULL for stdout and stderr."""
- kwargs.setdefault('shell', NEED_SHELL)
- kwargs.setdefault('stdout', subprocess2.DEVNULL)
- kwargs.setdefault('stderr', subprocess2.DEVNULL)
- return subprocess2.call(*args, **kwargs)
+ kwargs.setdefault('shell', NEED_SHELL)
+ kwargs.setdefault('stdout', subprocess2.DEVNULL)
+ kwargs.setdefault('stderr', subprocess2.DEVNULL)
+ return subprocess2.call(*args, **kwargs)
def is_pristine(root):
- """Returns True if a git checkout is pristine."""
- # `git rev-parse --verify` has a non-zero return code if the revision
- # doesn't exist.
- diff_cmd = ['git', 'diff', '--ignore-submodules', 'origin/main']
- return (not check_output(diff_cmd, cwd=root).strip() and
- not check_output(diff_cmd + ['--cached'], cwd=root).strip())
-
+ """Returns True if a git checkout is pristine."""
+ # `git rev-parse --verify` has a non-zero return code if the revision
+ # doesn't exist.
+ diff_cmd = ['git', 'diff', '--ignore-submodules', 'origin/main']
+ return (not check_output(diff_cmd, cwd=root).strip()
+ and not check_output(diff_cmd + ['--cached'], cwd=root).strip())
def get_log_url(upstream_url, head, tot):
- """Returns an URL to read logs via a Web UI if applicable."""
- if re.match(r'https://[^/]*\.googlesource\.com/', upstream_url):
- # gitiles
- return '%s/+log/%s..%s' % (upstream_url, head[:12], tot[:12])
- if upstream_url.startswith('https://github.com/'):
- upstream_url = upstream_url.rstrip('/')
- if upstream_url.endswith('.git'):
- upstream_url = upstream_url[:-len('.git')]
- return '%s/compare/%s...%s' % (upstream_url, head[:12], tot[:12])
- return None
+ """Returns an URL to read logs via a Web UI if applicable."""
+ if re.match(r'https://[^/]*\.googlesource\.com/', upstream_url):
+ # gitiles
+ return '%s/+log/%s..%s' % (upstream_url, head[:12], tot[:12])
+ if upstream_url.startswith('https://github.com/'):
+ upstream_url = upstream_url.rstrip('/')
+ if upstream_url.endswith('.git'):
+ upstream_url = upstream_url[:-len('.git')]
+ return '%s/compare/%s...%s' % (upstream_url, head[:12], tot[:12])
+ return None
def should_show_log(upstream_url):
- """Returns True if a short log should be included in the tree."""
- # Skip logs for very active projects.
- if upstream_url.endswith('/v8/v8.git'):
- return False
- if 'webrtc' in upstream_url:
- return False
- return True
+ """Returns True if a short log should be included in the tree."""
+ # Skip logs for very active projects.
+ if upstream_url.endswith('/v8/v8.git'):
+ return False
+ if 'webrtc' in upstream_url:
+ return False
+ return True
def gclient(args):
- """Executes gclient with the given args and returns the stdout."""
- return check_output([sys.executable, GCLIENT_PATH] + args).strip()
+ """Executes gclient with the given args and returns the stdout."""
+ return check_output([sys.executable, GCLIENT_PATH] + args).strip()
-def generate_commit_message(
- full_dir, dependency, head, roll_to, no_log, log_limit):
- """Creates the commit message for this specific roll."""
- commit_range = '%s..%s' % (head, roll_to)
- commit_range_for_header = '%s..%s' % (head[:9], roll_to[:9])
- upstream_url = check_output(
- ['git', 'config', 'remote.origin.url'], cwd=full_dir).strip()
- log_url = get_log_url(upstream_url, head, roll_to)
- cmd = ['git', 'log', commit_range, '--date=short', '--no-merges']
- logs = check_output(
- # Args with '=' are automatically quoted.
- cmd + ['--format=%ad %ae %s', '--'],
- cwd=full_dir).rstrip()
- logs = re.sub(r'(?m)^(\d\d\d\d-\d\d-\d\d [^@]+)@[^ ]+( .*)$', r'\1\2', logs)
- lines = logs.splitlines()
- cleaned_lines = [l for l in lines if not _ROLL_SUBJECT.match(l)]
- logs = '\n'.join(cleaned_lines) + '\n'
+def generate_commit_message(full_dir, dependency, head, roll_to, no_log,
+ log_limit):
+ """Creates the commit message for this specific roll."""
+ commit_range = '%s..%s' % (head, roll_to)
+ commit_range_for_header = '%s..%s' % (head[:9], roll_to[:9])
+ upstream_url = check_output(['git', 'config', 'remote.origin.url'],
+ cwd=full_dir).strip()
+ log_url = get_log_url(upstream_url, head, roll_to)
+ cmd = ['git', 'log', commit_range, '--date=short', '--no-merges']
+ logs = check_output(
+ # Args with '=' are automatically quoted.
+ cmd + ['--format=%ad %ae %s', '--'],
+ cwd=full_dir).rstrip()
+ logs = re.sub(r'(?m)^(\d\d\d\d-\d\d-\d\d [^@]+)@[^ ]+( .*)$', r'\1\2', logs)
+ lines = logs.splitlines()
+ cleaned_lines = [l for l in lines if not _ROLL_SUBJECT.match(l)]
+ logs = '\n'.join(cleaned_lines) + '\n'
- nb_commits = len(lines)
- rolls = nb_commits - len(cleaned_lines)
- header = 'Roll %s/ %s (%d commit%s%s)\n\n' % (
- dependency,
- commit_range_for_header,
- nb_commits,
- 's' if nb_commits > 1 else '',
- ('; %s trivial rolls' % rolls) if rolls else '')
- log_section = ''
- if log_url:
- log_section = log_url + '\n\n'
- log_section += '$ %s ' % ' '.join(cmd)
- log_section += '--format=\'%ad %ae %s\'\n'
- log_section = log_section.replace(commit_range, commit_range_for_header)
- # It is important that --no-log continues to work, as it is used by
- # internal -> external rollers. Please do not remove or break it.
- if not no_log and should_show_log(upstream_url):
- if len(cleaned_lines) > log_limit:
- # Keep the first N/2 log entries and last N/2 entries.
- lines = logs.splitlines(True)
- lines = lines[:log_limit//2] + ['(...)\n'] + lines[-log_limit//2:]
- logs = ''.join(lines)
- log_section += logs
- return header + log_section
+ nb_commits = len(lines)
+ rolls = nb_commits - len(cleaned_lines)
+ header = 'Roll %s/ %s (%d commit%s%s)\n\n' % (
+ dependency, commit_range_for_header, nb_commits,
+ 's' if nb_commits > 1 else '',
+ ('; %s trivial rolls' % rolls) if rolls else '')
+ log_section = ''
+ if log_url:
+ log_section = log_url + '\n\n'
+ log_section += '$ %s ' % ' '.join(cmd)
+ log_section += '--format=\'%ad %ae %s\'\n'
+ log_section = log_section.replace(commit_range, commit_range_for_header)
+ # It is important that --no-log continues to work, as it is used by
+ # internal -> external rollers. Please do not remove or break it.
+ if not no_log and should_show_log(upstream_url):
+ if len(cleaned_lines) > log_limit:
+ # Keep the first N/2 log entries and last N/2 entries.
+ lines = logs.splitlines(True)
+ lines = lines[:log_limit // 2] + ['(...)\n'
+ ] + lines[-log_limit // 2:]
+ logs = ''.join(lines)
+ log_section += logs
+ return header + log_section
def is_submoduled():
- """Returns true if gclient root has submodules"""
- return os.path.isfile(os.path.join(gclient(['root']), ".gitmodules"))
+ """Returns true if gclient root has submodules"""
+ return os.path.isfile(os.path.join(gclient(['root']), ".gitmodules"))
def get_submodule_rev(submodule):
- """Returns revision of the given submodule path"""
- rev_output = check_output(['git', 'submodule', 'status', submodule],
- cwd=gclient(['root'])).strip()
+ """Returns revision of the given submodule path"""
+ rev_output = check_output(['git', 'submodule', 'status', submodule],
+ cwd=gclient(['root'])).strip()
- # git submodule status returns all submodules with its rev in the
- # pattern: `(+|-| )() (submodule.path)`
- revision = rev_output.split(' ')[0]
- return revision[1:] if revision[0] in ('+', '-') else revision
+ # git submodule status returns all submodules with its rev in the
+ # pattern: `(+|-| )() (submodule.path)`
+ revision = rev_output.split(' ')[0]
+ return revision[1:] if revision[0] in ('+', '-') else revision
def calculate_roll(full_dir, dependency, roll_to):
- """Calculates the roll for a dependency by processing gclient_dict, and
+ """Calculates the roll for a dependency by processing gclient_dict, and
fetching the dependency via git.
"""
- # if the super-project uses submodules, get rev directly using git.
- if is_submoduled():
- head = get_submodule_rev(dependency)
- else:
- head = gclient(['getdep', '-r', dependency])
- if not head:
- raise Error('%s is unpinned.' % dependency)
- check_call(['git', 'fetch', 'origin', '--quiet'], cwd=full_dir)
- if roll_to == 'origin/HEAD':
- check_output(['git', 'remote', 'set-head', 'origin', '-a'], cwd=full_dir)
-
- roll_to = check_output(['git', 'rev-parse', roll_to], cwd=full_dir).strip()
- return head, roll_to
+ # if the super-project uses submodules, get rev directly using git.
+ if is_submoduled():
+ head = get_submodule_rev(dependency)
+ else:
+ head = gclient(['getdep', '-r', dependency])
+ if not head:
+ raise Error('%s is unpinned.' % dependency)
+ check_call(['git', 'fetch', 'origin', '--quiet'], cwd=full_dir)
+ if roll_to == 'origin/HEAD':
+ check_output(['git', 'remote', 'set-head', 'origin', '-a'],
+ cwd=full_dir)
+ roll_to = check_output(['git', 'rev-parse', roll_to], cwd=full_dir).strip()
+ return head, roll_to
def gen_commit_msg(logs, cmdline, reviewers, bug):
- """Returns the final commit message."""
- commit_msg = ''
- if len(logs) > 1:
- commit_msg = 'Rolling %d dependencies\n\n' % len(logs)
- commit_msg += '\n\n'.join(logs)
- commit_msg += '\nCreated with:\n ' + cmdline + '\n'
- commit_msg += 'R=%s\n' % ','.join(reviewers) if reviewers else ''
- commit_msg += '\nBug: %s\n' % bug if bug else ''
- return commit_msg
+ """Returns the final commit message."""
+ commit_msg = ''
+ if len(logs) > 1:
+ commit_msg = 'Rolling %d dependencies\n\n' % len(logs)
+ commit_msg += '\n\n'.join(logs)
+ commit_msg += '\nCreated with:\n ' + cmdline + '\n'
+ commit_msg += 'R=%s\n' % ','.join(reviewers) if reviewers else ''
+ commit_msg += '\nBug: %s\n' % bug if bug else ''
+ return commit_msg
def finalize(commit_msg, current_dir, rolls):
- """Commits changes to the DEPS file, then uploads a CL."""
- print('Commit message:')
- print('\n'.join(' ' + i for i in commit_msg.splitlines()))
+ """Commits changes to the DEPS file, then uploads a CL."""
+ print('Commit message:')
+ print('\n'.join(' ' + i for i in commit_msg.splitlines()))
- # Pull the dependency to the right revision. This is surprising to users
- # otherwise. The revision update is done before commiting to update
- # submodule revision if present.
- for dependency, (_head, roll_to, full_dir) in sorted(rolls.items()):
- check_call(['git', 'checkout', '--quiet', roll_to], cwd=full_dir)
+ # Pull the dependency to the right revision. This is surprising to users
+ # otherwise. The revision update is done before commiting to update
+ # submodule revision if present.
+ for dependency, (_head, roll_to, full_dir) in sorted(rolls.items()):
+ check_call(['git', 'checkout', '--quiet', roll_to], cwd=full_dir)
- # This adds the submodule revision update to the commit.
- if is_submoduled():
- check_call([
- 'git', 'update-index', '--add', '--cacheinfo', '160000,{},{}'.format(
- roll_to, dependency)
- ],
- cwd=current_dir)
+ # This adds the submodule revision update to the commit.
+ if is_submoduled():
+ check_call([
+ 'git', 'update-index', '--add', '--cacheinfo',
+ '160000,{},{}'.format(roll_to, dependency)
+ ],
+ cwd=current_dir)
- check_call(['git', 'add', 'DEPS'], cwd=current_dir)
- # We have to set delete=False and then let the object go out of scope so
- # that the file can be opened by name on Windows.
- with tempfile.NamedTemporaryFile('w+', newline='', delete=False) as f:
- commit_filename = f.name
- f.write(commit_msg)
- check_call(['git', 'commit', '--quiet', '--file', commit_filename],
- cwd=current_dir)
- os.remove(commit_filename)
+ check_call(['git', 'add', 'DEPS'], cwd=current_dir)
+ # We have to set delete=False and then let the object go out of scope so
+ # that the file can be opened by name on Windows.
+ with tempfile.NamedTemporaryFile('w+', newline='', delete=False) as f:
+ commit_filename = f.name
+ f.write(commit_msg)
+ check_call(['git', 'commit', '--quiet', '--file', commit_filename],
+ cwd=current_dir)
+ os.remove(commit_filename)
def main():
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--ignore-dirty-tree', action='store_true',
- help='Roll anyways, even if there is a diff.')
- parser.add_argument(
- '-r',
- '--reviewer',
- action='append',
- help=
- 'To specify multiple reviewers, either use a comma separated list, e.g. '
- '-r joe,jane,john or provide the flag multiple times, e.g. '
- '-r joe -r jane. Defaults to @chromium.org')
- parser.add_argument('-b', '--bug', help='Associate a bug number to the roll')
- # It is important that --no-log continues to work, as it is used by
- # internal -> external rollers. Please do not remove or break it.
- parser.add_argument(
- '--no-log', action='store_true',
- help='Do not include the short log in the commit message')
- parser.add_argument(
- '--log-limit', type=int, default=100,
- help='Trim log after N commits (default: %(default)s)')
- parser.add_argument(
- '--roll-to', default='origin/HEAD',
- help='Specify the new commit to roll to (default: %(default)s)')
- parser.add_argument(
- '--key', action='append', default=[],
- help='Regex(es) for dependency in DEPS file')
- parser.add_argument('dep_path', nargs='+', help='Path(s) to dependency')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--ignore-dirty-tree',
+ action='store_true',
+ help='Roll anyways, even if there is a diff.')
+ parser.add_argument(
+ '-r',
+ '--reviewer',
+ action='append',
+ help='To specify multiple reviewers, either use a comma separated '
+ 'list, e.g. -r joe,jane,john or provide the flag multiple times, e.g. '
+ '-r joe -r jane. Defaults to @chromium.org')
+ parser.add_argument('-b',
+ '--bug',
+ help='Associate a bug number to the roll')
+ # It is important that --no-log continues to work, as it is used by
+ # internal -> external rollers. Please do not remove or break it.
+ parser.add_argument(
+ '--no-log',
+ action='store_true',
+ help='Do not include the short log in the commit message')
+ parser.add_argument('--log-limit',
+ type=int,
+ default=100,
+ help='Trim log after N commits (default: %(default)s)')
+ parser.add_argument(
+ '--roll-to',
+ default='origin/HEAD',
+ help='Specify the new commit to roll to (default: %(default)s)')
+ parser.add_argument('--key',
+ action='append',
+ default=[],
+ help='Regex(es) for dependency in DEPS file')
+ parser.add_argument('dep_path', nargs='+', help='Path(s) to dependency')
+ args = parser.parse_args()
- if len(args.dep_path) > 1:
- if args.roll_to != 'origin/HEAD':
- parser.error(
- 'Can\'t use multiple paths to roll simultaneously and --roll-to')
- if args.key:
- parser.error(
- 'Can\'t use multiple paths to roll simultaneously and --key')
- reviewers = None
- if args.reviewer:
- reviewers = list(itertools.chain(*[r.split(',') for r in args.reviewer]))
- for i, r in enumerate(reviewers):
- if not '@' in r:
- reviewers[i] = r + '@chromium.org'
+ if len(args.dep_path) > 1:
+ if args.roll_to != 'origin/HEAD':
+ parser.error(
+ 'Can\'t use multiple paths to roll simultaneously and --roll-to'
+ )
+ if args.key:
+ parser.error(
+ 'Can\'t use multiple paths to roll simultaneously and --key')
+ reviewers = None
+ if args.reviewer:
+ reviewers = list(itertools.chain(*[r.split(',')
+ for r in args.reviewer]))
+ for i, r in enumerate(reviewers):
+ if not '@' in r:
+ reviewers[i] = r + '@chromium.org'
- gclient_root = gclient(['root'])
- current_dir = os.getcwd()
- dependencies = sorted(d.replace('\\', '/').rstrip('/') for d in args.dep_path)
- cmdline = 'roll-dep ' + ' '.join(dependencies) + ''.join(
- ' --key ' + k for k in args.key)
- try:
- if not args.ignore_dirty_tree and not is_pristine(current_dir):
- raise Error(
- 'Ensure %s is clean first (no non-merged commits).' % current_dir)
- # First gather all the information without modifying anything, except for a
- # git fetch.
- rolls = {}
- for dependency in dependencies:
- full_dir = os.path.normpath(os.path.join(gclient_root, dependency))
- if not os.path.isdir(full_dir):
- print('Dependency %s not found at %s' % (dependency, full_dir))
- full_dir = os.path.normpath(os.path.join(current_dir, dependency))
- print('Will look for relative dependency at %s' % full_dir)
- if not os.path.isdir(full_dir):
- raise Error('Directory not found: %s (%s)' % (dependency, full_dir))
+ gclient_root = gclient(['root'])
+ current_dir = os.getcwd()
+ dependencies = sorted(
+ d.replace('\\', '/').rstrip('/') for d in args.dep_path)
+ cmdline = 'roll-dep ' + ' '.join(dependencies) + ''.join(' --key ' + k
+ for k in args.key)
+ try:
+ if not args.ignore_dirty_tree and not is_pristine(current_dir):
+ raise Error('Ensure %s is clean first (no non-merged commits).' %
+ current_dir)
+ # First gather all the information without modifying anything, except
+ # for a git fetch.
+ rolls = {}
+ for dependency in dependencies:
+ full_dir = os.path.normpath(os.path.join(gclient_root, dependency))
+ if not os.path.isdir(full_dir):
+ print('Dependency %s not found at %s' % (dependency, full_dir))
+ full_dir = os.path.normpath(
+ os.path.join(current_dir, dependency))
+ print('Will look for relative dependency at %s' % full_dir)
+ if not os.path.isdir(full_dir):
+ raise Error('Directory not found: %s (%s)' %
+ (dependency, full_dir))
- head, roll_to = calculate_roll(full_dir, dependency, args.roll_to)
- if roll_to == head:
- if len(dependencies) == 1:
- raise AlreadyRolledError('No revision to roll!')
- print('%s: Already at latest commit %s' % (dependency, roll_to))
- else:
- print(
- '%s: Rolling from %s to %s' % (dependency, head[:10], roll_to[:10]))
- rolls[dependency] = (head, roll_to, full_dir)
+ head, roll_to = calculate_roll(full_dir, dependency, args.roll_to)
+ if roll_to == head:
+ if len(dependencies) == 1:
+ raise AlreadyRolledError('No revision to roll!')
+ print('%s: Already at latest commit %s' % (dependency, roll_to))
+ else:
+ print('%s: Rolling from %s to %s' %
+ (dependency, head[:10], roll_to[:10]))
+ rolls[dependency] = (head, roll_to, full_dir)
- logs = []
- setdep_args = []
- for dependency, (head, roll_to, full_dir) in sorted(rolls.items()):
- log = generate_commit_message(
- full_dir, dependency, head, roll_to, args.no_log, args.log_limit)
- logs.append(log)
- setdep_args.extend(['-r', '{}@{}'.format(dependency, roll_to)])
+ logs = []
+ setdep_args = []
+ for dependency, (head, roll_to, full_dir) in sorted(rolls.items()):
+ log = generate_commit_message(full_dir, dependency, head, roll_to,
+ args.no_log, args.log_limit)
+ logs.append(log)
+ setdep_args.extend(['-r', '{}@{}'.format(dependency, roll_to)])
- # DEPS is updated even if the repository uses submodules.
- gclient(['setdep'] + setdep_args)
+ # DEPS is updated even if the repository uses submodules.
+ gclient(['setdep'] + setdep_args)
- commit_msg = gen_commit_msg(logs, cmdline, reviewers, args.bug)
- finalize(commit_msg, current_dir, rolls)
- except Error as e:
- sys.stderr.write('error: %s\n' % e)
- return 2 if isinstance(e, AlreadyRolledError) else 1
- except subprocess2.CalledProcessError:
- return 1
+ commit_msg = gen_commit_msg(logs, cmdline, reviewers, args.bug)
+ finalize(commit_msg, current_dir, rolls)
+ except Error as e:
+ sys.stderr.write('error: %s\n' % e)
+ return 2 if isinstance(e, AlreadyRolledError) else 1
+ except subprocess2.CalledProcessError:
+ return 1
- print('')
- if not reviewers:
- print('You forgot to pass -r, make sure to insert a R=foo@example.com line')
- print('to the commit description before emailing.')
print('')
- print('Run:')
- print(' git cl upload --send-mail')
- return 0
+ if not reviewers:
+ print('You forgot to pass -r, make sure to insert a R=foo@example.com '
+ 'line')
+ print('to the commit description before emailing.')
+ print('')
+ print('Run:')
+ print(' git cl upload --send-mail')
+ return 0
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/rustfmt.py b/rustfmt.py
index aae33a12d8..65246aef57 100755
--- a/rustfmt.py
+++ b/rustfmt.py
@@ -15,56 +15,56 @@ import sys
class NotFoundError(Exception):
- """A file could not be found."""
-
- def __init__(self, e):
- Exception.__init__(
- self, 'Problem while looking for rustfmt in Chromium source tree:\n'
- '%s' % e)
+ """A file could not be found."""
+ def __init__(self, e):
+ Exception.__init__(
+ self, 'Problem while looking for rustfmt in Chromium source tree:\n'
+ '%s' % e)
def FindRustfmtToolInChromiumTree():
- """Return a path to the rustfmt executable, or die trying."""
- chromium_src_path = gclient_paths.GetPrimarySolutionPath()
- if not chromium_src_path:
- raise NotFoundError(
- 'Could not find checkout in any parent of the current path.\n'
- 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium checkout.')
+ """Return a path to the rustfmt executable, or die trying."""
+ chromium_src_path = gclient_paths.GetPrimarySolutionPath()
+ if not chromium_src_path:
+ raise NotFoundError(
+ 'Could not find checkout in any parent of the current path.\n'
+ 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium '
+ 'checkout.')
- tool_path = os.path.join(chromium_src_path, 'third_party', 'rust-toolchain',
- 'bin', 'rustfmt' + gclient_paths.GetExeSuffix())
- if not os.path.exists(tool_path):
- raise NotFoundError('File does not exist: %s' % tool_path)
- return tool_path
+ tool_path = os.path.join(chromium_src_path, 'third_party', 'rust-toolchain',
+ 'bin', 'rustfmt' + gclient_paths.GetExeSuffix())
+ if not os.path.exists(tool_path):
+ raise NotFoundError('File does not exist: %s' % tool_path)
+ return tool_path
def IsRustfmtSupported():
- try:
- FindRustfmtToolInChromiumTree()
- return True
- except NotFoundError:
- return False
+ try:
+ FindRustfmtToolInChromiumTree()
+ return True
+ except NotFoundError:
+ return False
def main(args):
- try:
- tool = FindRustfmtToolInChromiumTree()
- except NotFoundError as e:
- sys.stderr.write("%s\n" % str(e))
- return 1
+ try:
+ tool = FindRustfmtToolInChromiumTree()
+ except NotFoundError as e:
+ sys.stderr.write("%s\n" % str(e))
+ return 1
- # Add some visibility to --help showing where the tool lives, since this
- # redirection can be a little opaque.
- help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
- if any(match in args for match in help_syntax):
- print('\nDepot tools redirects you to the rustfmt at:\n %s\n' % tool)
+ # Add some visibility to --help showing where the tool lives, since this
+ # redirection can be a little opaque.
+ help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
+ if any(match in args for match in help_syntax):
+ print('\nDepot tools redirects you to the rustfmt at:\n %s\n' % tool)
- return subprocess.call([tool] + args)
+ return subprocess.call([tool] + args)
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(1)
diff --git a/scm.py b/scm.py
index 6921824ffb..904f67c887 100644
--- a/scm.py
+++ b/scm.py
@@ -1,7 +1,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""SCM-specific utility classes."""
import distutils.version
@@ -15,463 +14,478 @@ import sys
import gclient_utils
import subprocess2
+# TODO: Should fix these warnings.
+# pylint: disable=line-too-long
+
def ValidateEmail(email):
- return (
- re.match(r"^[a-zA-Z0-9._%\-+]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$", email)
- is not None)
+ return (re.match(r"^[a-zA-Z0-9._%\-+]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$",
+ email) is not None)
def GetCasedPath(path):
- """Elcheapos way to get the real path case on Windows."""
- if sys.platform.startswith('win') and os.path.exists(path):
- # Reconstruct the path.
- path = os.path.abspath(path)
- paths = path.split('\\')
- for i in range(len(paths)):
- if i == 0:
- # Skip drive letter.
- continue
- subpath = '\\'.join(paths[:i+1])
- prev = len('\\'.join(paths[:i]))
- # glob.glob will return the cased path for the last item only. This is why
- # we are calling it in a loop. Extract the data we want and put it back
- # into the list.
- paths[i] = glob.glob(subpath + '*')[0][prev+1:len(subpath)]
- path = '\\'.join(paths)
- return path
+ """Elcheapos way to get the real path case on Windows."""
+ if sys.platform.startswith('win') and os.path.exists(path):
+ # Reconstruct the path.
+ path = os.path.abspath(path)
+ paths = path.split('\\')
+ for i in range(len(paths)):
+ if i == 0:
+ # Skip drive letter.
+ continue
+ subpath = '\\'.join(paths[:i + 1])
+ prev = len('\\'.join(paths[:i]))
+ # glob.glob will return the cased path for the last item only. This
+ # is why we are calling it in a loop. Extract the data we want and
+ # put it back into the list.
+ paths[i] = glob.glob(subpath + '*')[0][prev + 1:len(subpath)]
+ path = '\\'.join(paths)
+ return path
def GenFakeDiff(filename):
- """Generates a fake diff from a file."""
- file_content = gclient_utils.FileRead(filename, 'rb').splitlines(True)
- filename = filename.replace(os.sep, '/')
- nb_lines = len(file_content)
- # We need to use / since patch on unix will fail otherwise.
- data = io.StringIO()
- data.write("Index: %s\n" % filename)
- data.write('=' * 67 + '\n')
- # Note: Should we use /dev/null instead?
- data.write("--- %s\n" % filename)
- data.write("+++ %s\n" % filename)
- data.write("@@ -0,0 +1,%d @@\n" % nb_lines)
- # Prepend '+' to every lines.
- for line in file_content:
- data.write('+')
- data.write(line)
- result = data.getvalue()
- data.close()
- return result
+ """Generates a fake diff from a file."""
+ file_content = gclient_utils.FileRead(filename, 'rb').splitlines(True)
+ filename = filename.replace(os.sep, '/')
+ nb_lines = len(file_content)
+ # We need to use / since patch on unix will fail otherwise.
+ data = io.StringIO()
+ data.write("Index: %s\n" % filename)
+ data.write('=' * 67 + '\n')
+ # Note: Should we use /dev/null instead?
+ data.write("--- %s\n" % filename)
+ data.write("+++ %s\n" % filename)
+ data.write("@@ -0,0 +1,%d @@\n" % nb_lines)
+ # Prepend '+' to every lines.
+ for line in file_content:
+ data.write('+')
+ data.write(line)
+ result = data.getvalue()
+ data.close()
+ return result
def determine_scm(root):
- """Similar to upload.py's version but much simpler.
+ """Similar to upload.py's version but much simpler.
Returns 'git' or None.
"""
- if os.path.isdir(os.path.join(root, '.git')):
- return 'git'
+ if os.path.isdir(os.path.join(root, '.git')):
+ return 'git'
- try:
- subprocess2.check_call(
- ['git', 'rev-parse', '--show-cdup'],
- stdout=subprocess2.DEVNULL,
- stderr=subprocess2.DEVNULL,
- cwd=root)
- return 'git'
- except (OSError, subprocess2.CalledProcessError):
- return None
+ try:
+ subprocess2.check_call(['git', 'rev-parse', '--show-cdup'],
+ stdout=subprocess2.DEVNULL,
+ stderr=subprocess2.DEVNULL,
+ cwd=root)
+ return 'git'
+ except (OSError, subprocess2.CalledProcessError):
+ return None
def only_int(val):
- if val.isdigit():
- return int(val)
+ if val.isdigit():
+ return int(val)
- return 0
+ return 0
class GIT(object):
- current_version = None
+ current_version = None
- @staticmethod
- def ApplyEnvVars(kwargs):
- env = kwargs.pop('env', None) or os.environ.copy()
- # Don't prompt for passwords; just fail quickly and noisily.
- # By default, git will use an interactive terminal prompt when a username/
- # password is needed. That shouldn't happen in the chromium workflow,
- # and if it does, then gclient may hide the prompt in the midst of a flood
- # of terminal spew. The only indication that something has gone wrong
- # will be when gclient hangs unresponsively. Instead, we disable the
- # password prompt and simply allow git to fail noisily. The error
- # message produced by git will be copied to gclient's output.
- env.setdefault('GIT_ASKPASS', 'true')
- env.setdefault('SSH_ASKPASS', 'true')
- # 'cat' is a magical git string that disables pagers on all platforms.
- env.setdefault('GIT_PAGER', 'cat')
- return env
+ @staticmethod
+ def ApplyEnvVars(kwargs):
+ env = kwargs.pop('env', None) or os.environ.copy()
+ # Don't prompt for passwords; just fail quickly and noisily.
+ # By default, git will use an interactive terminal prompt when a
+ # username/ password is needed. That shouldn't happen in the chromium
+ # workflow, and if it does, then gclient may hide the prompt in the
+ # midst of a flood of terminal spew. The only indication that something
+ # has gone wrong will be when gclient hangs unresponsively. Instead, we
+ # disable the password prompt and simply allow git to fail noisily. The
+ # error message produced by git will be copied to gclient's output.
+ env.setdefault('GIT_ASKPASS', 'true')
+ env.setdefault('SSH_ASKPASS', 'true')
+ # 'cat' is a magical git string that disables pagers on all platforms.
+ env.setdefault('GIT_PAGER', 'cat')
+ return env
- @staticmethod
- def Capture(args, cwd=None, strip_out=True, **kwargs):
- env = GIT.ApplyEnvVars(kwargs)
- output = subprocess2.check_output(
- ['git'] + args, cwd=cwd, stderr=subprocess2.PIPE, env=env, **kwargs)
- output = output.decode('utf-8', 'replace')
- return output.strip() if strip_out else output
+ @staticmethod
+ def Capture(args, cwd=None, strip_out=True, **kwargs):
+ env = GIT.ApplyEnvVars(kwargs)
+ output = subprocess2.check_output(['git'] + args,
+ cwd=cwd,
+ stderr=subprocess2.PIPE,
+ env=env,
+ **kwargs)
+ output = output.decode('utf-8', 'replace')
+ return output.strip() if strip_out else output
- @staticmethod
- def CaptureStatus(cwd, upstream_branch, end_commit=None):
- # type: (str, str, Optional[str]) -> Sequence[Tuple[str, str]]
- """Returns git status.
+ @staticmethod
+ def CaptureStatus(cwd, upstream_branch, end_commit=None):
+ # type: (str, str, Optional[str]) -> Sequence[Tuple[str, str]]
+ """Returns git status.
Returns an array of (status, file) tuples."""
- if end_commit is None:
- end_commit = ''
- if upstream_branch is None:
- upstream_branch = GIT.GetUpstreamBranch(cwd)
- if upstream_branch is None:
- raise gclient_utils.Error('Cannot determine upstream branch')
- command = [
- '-c', 'core.quotePath=false', 'diff', '--name-status', '--no-renames',
- '--ignore-submodules=all', '-r',
- '%s...%s' % (upstream_branch, end_commit)
- ]
- status = GIT.Capture(command, cwd)
- results = []
- if status:
- for statusline in status.splitlines():
- # 3-way merges can cause the status can be 'MMM' instead of 'M'. This
- # can happen when the user has 2 local branches and he diffs between
- # these 2 branches instead diffing to upstream.
- m = re.match(r'^(\w)+\t(.+)$', statusline)
- if not m:
- raise gclient_utils.Error(
- 'status currently unsupported: %s' % statusline)
- # Only grab the first letter.
- results.append(('%s ' % m.group(1)[0], m.group(2)))
- return results
+ if end_commit is None:
+ end_commit = ''
+ if upstream_branch is None:
+ upstream_branch = GIT.GetUpstreamBranch(cwd)
+ if upstream_branch is None:
+ raise gclient_utils.Error('Cannot determine upstream branch')
+ command = [
+ '-c', 'core.quotePath=false', 'diff', '--name-status',
+ '--no-renames', '--ignore-submodules=all', '-r',
+ '%s...%s' % (upstream_branch, end_commit)
+ ]
+ status = GIT.Capture(command, cwd)
+ results = []
+ if status:
+ for statusline in status.splitlines():
+ # 3-way merges can cause the status can be 'MMM' instead of 'M'.
+ # This can happen when the user has 2 local branches and he
+ # diffs between these 2 branches instead diffing to upstream.
+ m = re.match(r'^(\w)+\t(.+)$', statusline)
+ if not m:
+ raise gclient_utils.Error(
+ 'status currently unsupported: %s' % statusline)
+ # Only grab the first letter.
+ results.append(('%s ' % m.group(1)[0], m.group(2)))
+ return results
- @staticmethod
- def GetConfig(cwd, key, default=None):
- try:
- return GIT.Capture(['config', key], cwd=cwd)
- except subprocess2.CalledProcessError:
- return default
+ @staticmethod
+ def GetConfig(cwd, key, default=None):
+ try:
+ return GIT.Capture(['config', key], cwd=cwd)
+ except subprocess2.CalledProcessError:
+ return default
- @staticmethod
- def GetBranchConfig(cwd, branch, key, default=None):
- assert branch, 'A branch must be given'
- key = 'branch.%s.%s' % (branch, key)
- return GIT.GetConfig(cwd, key, default)
+ @staticmethod
+ def GetBranchConfig(cwd, branch, key, default=None):
+ assert branch, 'A branch must be given'
+ key = 'branch.%s.%s' % (branch, key)
+ return GIT.GetConfig(cwd, key, default)
- @staticmethod
- def SetConfig(cwd, key, value=None):
- if value is None:
- args = ['config', '--unset', key]
- else:
- args = ['config', key, value]
- GIT.Capture(args, cwd=cwd)
+ @staticmethod
+ def SetConfig(cwd, key, value=None):
+ if value is None:
+ args = ['config', '--unset', key]
+ else:
+ args = ['config', key, value]
+ GIT.Capture(args, cwd=cwd)
- @staticmethod
- def SetBranchConfig(cwd, branch, key, value=None):
- assert branch, 'A branch must be given'
- key = 'branch.%s.%s' % (branch, key)
- GIT.SetConfig(cwd, key, value)
+ @staticmethod
+ def SetBranchConfig(cwd, branch, key, value=None):
+ assert branch, 'A branch must be given'
+ key = 'branch.%s.%s' % (branch, key)
+ GIT.SetConfig(cwd, key, value)
- @staticmethod
- def IsWorkTreeDirty(cwd):
- return GIT.Capture(['status', '-s'], cwd=cwd) != ''
+ @staticmethod
+ def IsWorkTreeDirty(cwd):
+ return GIT.Capture(['status', '-s'], cwd=cwd) != ''
- @staticmethod
- def GetEmail(cwd):
- """Retrieves the user email address if known."""
- return GIT.GetConfig(cwd, 'user.email', '')
+ @staticmethod
+ def GetEmail(cwd):
+ """Retrieves the user email address if known."""
+ return GIT.GetConfig(cwd, 'user.email', '')
- @staticmethod
- def ShortBranchName(branch):
- """Converts a name like 'refs/heads/foo' to just 'foo'."""
- return branch.replace('refs/heads/', '')
+ @staticmethod
+ def ShortBranchName(branch):
+ """Converts a name like 'refs/heads/foo' to just 'foo'."""
+ return branch.replace('refs/heads/', '')
- @staticmethod
- def GetBranchRef(cwd):
- """Returns the full branch reference, e.g. 'refs/heads/main'."""
- try:
- return GIT.Capture(['symbolic-ref', 'HEAD'], cwd=cwd)
- except subprocess2.CalledProcessError:
- return None
+ @staticmethod
+ def GetBranchRef(cwd):
+ """Returns the full branch reference, e.g. 'refs/heads/main'."""
+ try:
+ return GIT.Capture(['symbolic-ref', 'HEAD'], cwd=cwd)
+ except subprocess2.CalledProcessError:
+ return None
- @staticmethod
- def GetRemoteHeadRef(cwd, url, remote):
- """Returns the full default remote branch reference, e.g.
+ @staticmethod
+ def GetRemoteHeadRef(cwd, url, remote):
+ """Returns the full default remote branch reference, e.g.
'refs/remotes/origin/main'."""
- if os.path.exists(cwd):
- try:
- # Try using local git copy first
- ref = 'refs/remotes/%s/HEAD' % remote
- ref = GIT.Capture(['symbolic-ref', ref], cwd=cwd)
- if not ref.endswith('master'):
- return ref
- # Check if there are changes in the default branch for this particular
- # repository.
- GIT.Capture(['remote', 'set-head', '-a', remote], cwd=cwd)
- return GIT.Capture(['symbolic-ref', ref], cwd=cwd)
- except subprocess2.CalledProcessError:
- pass
+ if os.path.exists(cwd):
+ try:
+ # Try using local git copy first
+ ref = 'refs/remotes/%s/HEAD' % remote
+ ref = GIT.Capture(['symbolic-ref', ref], cwd=cwd)
+ if not ref.endswith('master'):
+ return ref
+ # Check if there are changes in the default branch for this
+ # particular repository.
+ GIT.Capture(['remote', 'set-head', '-a', remote], cwd=cwd)
+ return GIT.Capture(['symbolic-ref', ref], cwd=cwd)
+ except subprocess2.CalledProcessError:
+ pass
- try:
- # Fetch information from git server
- resp = GIT.Capture(['ls-remote', '--symref', url, 'HEAD'])
- regex = r'^ref: (.*)\tHEAD$'
- for line in resp.split('\n'):
- m = re.match(regex, line)
- if m:
- return ''.join(GIT.RefToRemoteRef(m.group(1), remote))
- except subprocess2.CalledProcessError:
- pass
- # Return default branch
- return 'refs/remotes/%s/main' % remote
+ try:
+ # Fetch information from git server
+ resp = GIT.Capture(['ls-remote', '--symref', url, 'HEAD'])
+ regex = r'^ref: (.*)\tHEAD$'
+ for line in resp.split('\n'):
+ m = re.match(regex, line)
+ if m:
+ return ''.join(GIT.RefToRemoteRef(m.group(1), remote))
+ except subprocess2.CalledProcessError:
+ pass
+ # Return default branch
+ return 'refs/remotes/%s/main' % remote
- @staticmethod
- def GetBranch(cwd):
- """Returns the short branch name, e.g. 'main'."""
- branchref = GIT.GetBranchRef(cwd)
- if branchref:
- return GIT.ShortBranchName(branchref)
- return None
+ @staticmethod
+ def GetBranch(cwd):
+ """Returns the short branch name, e.g. 'main'."""
+ branchref = GIT.GetBranchRef(cwd)
+ if branchref:
+ return GIT.ShortBranchName(branchref)
+ return None
- @staticmethod
- def GetRemoteBranches(cwd):
- return GIT.Capture(['branch', '-r'], cwd=cwd).split()
+ @staticmethod
+ def GetRemoteBranches(cwd):
+ return GIT.Capture(['branch', '-r'], cwd=cwd).split()
- @staticmethod
- def FetchUpstreamTuple(cwd, branch=None):
- """Returns a tuple containing remote and remote ref,
+ @staticmethod
+ def FetchUpstreamTuple(cwd, branch=None):
+ """Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/main'
"""
- try:
- branch = branch or GIT.GetBranch(cwd)
- except subprocess2.CalledProcessError:
- pass
- if branch:
- upstream_branch = GIT.GetBranchConfig(cwd, branch, 'merge')
- if upstream_branch:
- remote = GIT.GetBranchConfig(cwd, branch, 'remote', '.')
- return remote, upstream_branch
+ try:
+ branch = branch or GIT.GetBranch(cwd)
+ except subprocess2.CalledProcessError:
+ pass
+ if branch:
+ upstream_branch = GIT.GetBranchConfig(cwd, branch, 'merge')
+ if upstream_branch:
+ remote = GIT.GetBranchConfig(cwd, branch, 'remote', '.')
+ return remote, upstream_branch
- upstream_branch = GIT.GetConfig(cwd, 'rietveld.upstream-branch')
- if upstream_branch:
- remote = GIT.GetConfig(cwd, 'rietveld.upstream-remote', '.')
- return remote, upstream_branch
+ upstream_branch = GIT.GetConfig(cwd, 'rietveld.upstream-branch')
+ if upstream_branch:
+ remote = GIT.GetConfig(cwd, 'rietveld.upstream-remote', '.')
+ return remote, upstream_branch
- # Else, try to guess the origin remote.
- remote_branches = GIT.GetRemoteBranches(cwd)
- if 'origin/main' in remote_branches:
- # Fall back on origin/main if it exits.
- return 'origin', 'refs/heads/main'
+ # Else, try to guess the origin remote.
+ remote_branches = GIT.GetRemoteBranches(cwd)
+ if 'origin/main' in remote_branches:
+ # Fall back on origin/main if it exits.
+ return 'origin', 'refs/heads/main'
- if 'origin/master' in remote_branches:
- # Fall back on origin/master if it exits.
- return 'origin', 'refs/heads/master'
+ if 'origin/master' in remote_branches:
+ # Fall back on origin/master if it exits.
+ return 'origin', 'refs/heads/master'
- return None, None
+ return None, None
- @staticmethod
- def RefToRemoteRef(ref, remote):
- """Convert a checkout ref to the equivalent remote ref.
+ @staticmethod
+ def RefToRemoteRef(ref, remote):
+ """Convert a checkout ref to the equivalent remote ref.
Returns:
A tuple of the remote ref's (common prefix, unique suffix), or None if it
doesn't appear to refer to a remote ref (e.g. it's a commit hash).
"""
- # TODO(mmoss): This is just a brute-force mapping based of the expected git
- # config. It's a bit better than the even more brute-force replace('heads',
- # ...), but could still be smarter (like maybe actually using values gleaned
- # from the git config).
- m = re.match('^(refs/(remotes/)?)?branch-heads/', ref or '')
- if m:
- return ('refs/remotes/branch-heads/', ref.replace(m.group(0), ''))
+ # TODO(mmoss): This is just a brute-force mapping based of the expected
+ # git config. It's a bit better than the even more brute-force
+ # replace('heads', ...), but could still be smarter (like maybe actually
+ # using values gleaned from the git config).
+ m = re.match('^(refs/(remotes/)?)?branch-heads/', ref or '')
+ if m:
+ return ('refs/remotes/branch-heads/', ref.replace(m.group(0), ''))
- m = re.match('^((refs/)?remotes/)?%s/|(refs/)?heads/' % remote, ref or '')
- if m:
- return ('refs/remotes/%s/' % remote, ref.replace(m.group(0), ''))
+ m = re.match('^((refs/)?remotes/)?%s/|(refs/)?heads/' % remote, ref
+ or '')
+ if m:
+ return ('refs/remotes/%s/' % remote, ref.replace(m.group(0), ''))
- return None
+ return None
- @staticmethod
- def RemoteRefToRef(ref, remote):
- assert remote, 'A remote must be given'
- if not ref or not ref.startswith('refs/'):
- return None
- if not ref.startswith('refs/remotes/'):
- return ref
- if ref.startswith('refs/remotes/branch-heads/'):
- return 'refs' + ref[len('refs/remotes'):]
- if ref.startswith('refs/remotes/%s/' % remote):
- return 'refs/heads' + ref[len('refs/remotes/%s' % remote):]
- return None
+ @staticmethod
+ def RemoteRefToRef(ref, remote):
+ assert remote, 'A remote must be given'
+ if not ref or not ref.startswith('refs/'):
+ return None
+ if not ref.startswith('refs/remotes/'):
+ return ref
+ if ref.startswith('refs/remotes/branch-heads/'):
+ return 'refs' + ref[len('refs/remotes'):]
+ if ref.startswith('refs/remotes/%s/' % remote):
+ return 'refs/heads' + ref[len('refs/remotes/%s' % remote):]
+ return None
- @staticmethod
- def GetUpstreamBranch(cwd):
- """Gets the current branch's upstream branch."""
- remote, upstream_branch = GIT.FetchUpstreamTuple(cwd)
- if remote != '.' and upstream_branch:
- remote_ref = GIT.RefToRemoteRef(upstream_branch, remote)
- if remote_ref:
- upstream_branch = ''.join(remote_ref)
- return upstream_branch
+ @staticmethod
+ def GetUpstreamBranch(cwd):
+ """Gets the current branch's upstream branch."""
+ remote, upstream_branch = GIT.FetchUpstreamTuple(cwd)
+ if remote != '.' and upstream_branch:
+ remote_ref = GIT.RefToRemoteRef(upstream_branch, remote)
+ if remote_ref:
+ upstream_branch = ''.join(remote_ref)
+ return upstream_branch
- @staticmethod
- def IsAncestor(maybe_ancestor, ref, cwd=None):
- # type: (string, string, Optional[string]) -> bool
- """Verifies if |maybe_ancestor| is an ancestor of |ref|."""
- try:
- GIT.Capture(['merge-base', '--is-ancestor', maybe_ancestor, ref], cwd=cwd)
- return True
- except subprocess2.CalledProcessError:
- return False
+ @staticmethod
+ def IsAncestor(maybe_ancestor, ref, cwd=None):
+ # type: (string, string, Optional[string]) -> bool
+ """Verifies if |maybe_ancestor| is an ancestor of |ref|."""
+ try:
+ GIT.Capture(['merge-base', '--is-ancestor', maybe_ancestor, ref],
+ cwd=cwd)
+ return True
+ except subprocess2.CalledProcessError:
+ return False
- @staticmethod
- def GetOldContents(cwd, filename, branch=None):
- if not branch:
- branch = GIT.GetUpstreamBranch(cwd)
- if platform.system() == 'Windows':
- # git show : wants a posix path.
- filename = filename.replace('\\', '/')
- command = ['show', '%s:%s' % (branch, filename)]
- try:
- return GIT.Capture(command, cwd=cwd, strip_out=False)
- except subprocess2.CalledProcessError:
- return ''
+ @staticmethod
+ def GetOldContents(cwd, filename, branch=None):
+ if not branch:
+ branch = GIT.GetUpstreamBranch(cwd)
+ if platform.system() == 'Windows':
+ # git show : wants a posix path.
+ filename = filename.replace('\\', '/')
+ command = ['show', '%s:%s' % (branch, filename)]
+ try:
+ return GIT.Capture(command, cwd=cwd, strip_out=False)
+ except subprocess2.CalledProcessError:
+ return ''
- @staticmethod
- def GenerateDiff(cwd, branch=None, branch_head='HEAD', full_move=False,
- files=None):
- """Diffs against the upstream branch or optionally another branch.
+ @staticmethod
+ def GenerateDiff(cwd,
+ branch=None,
+ branch_head='HEAD',
+ full_move=False,
+ files=None):
+ """Diffs against the upstream branch or optionally another branch.
full_move means that move or copy operations should completely recreate the
files, usually in the prospect to apply the patch for a try job."""
- if not branch:
- branch = GIT.GetUpstreamBranch(cwd)
- command = ['-c', 'core.quotePath=false', 'diff',
- '-p', '--no-color', '--no-prefix', '--no-ext-diff',
- branch + "..." + branch_head]
- if full_move:
- command.append('--no-renames')
- else:
- command.append('-C')
- # TODO(maruel): --binary support.
- if files:
- command.append('--')
- command.extend(files)
- diff = GIT.Capture(command, cwd=cwd, strip_out=False).splitlines(True)
- for i in range(len(diff)):
- # In the case of added files, replace /dev/null with the path to the
- # file being added.
- if diff[i].startswith('--- /dev/null'):
- diff[i] = '--- %s' % diff[i+1][4:]
- return ''.join(diff)
+ if not branch:
+ branch = GIT.GetUpstreamBranch(cwd)
+ command = [
+ '-c', 'core.quotePath=false', 'diff', '-p', '--no-color',
+ '--no-prefix', '--no-ext-diff', branch + "..." + branch_head
+ ]
+ if full_move:
+ command.append('--no-renames')
+ else:
+ command.append('-C')
+ # TODO(maruel): --binary support.
+ if files:
+ command.append('--')
+ command.extend(files)
+ diff = GIT.Capture(command, cwd=cwd, strip_out=False).splitlines(True)
+ for i in range(len(diff)):
+ # In the case of added files, replace /dev/null with the path to the
+ # file being added.
+ if diff[i].startswith('--- /dev/null'):
+ diff[i] = '--- %s' % diff[i + 1][4:]
+ return ''.join(diff)
- @staticmethod
- def GetDifferentFiles(cwd, branch=None, branch_head='HEAD'):
- """Returns the list of modified files between two branches."""
- if not branch:
- branch = GIT.GetUpstreamBranch(cwd)
- command = ['-c', 'core.quotePath=false', 'diff',
- '--name-only', branch + "..." + branch_head]
- return GIT.Capture(command, cwd=cwd).splitlines(False)
+ @staticmethod
+ def GetDifferentFiles(cwd, branch=None, branch_head='HEAD'):
+ """Returns the list of modified files between two branches."""
+ if not branch:
+ branch = GIT.GetUpstreamBranch(cwd)
+ command = [
+ '-c', 'core.quotePath=false', 'diff', '--name-only',
+ branch + "..." + branch_head
+ ]
+ return GIT.Capture(command, cwd=cwd).splitlines(False)
- @staticmethod
- def GetAllFiles(cwd):
- """Returns the list of all files under revision control."""
- command = ['-c', 'core.quotePath=false', 'ls-files', '-s', '--', '.']
- files = GIT.Capture(command, cwd=cwd).splitlines(False)
- # return only files
- return [f.split(maxsplit=3)[-1] for f in files if f.startswith('100')]
+ @staticmethod
+ def GetAllFiles(cwd):
+ """Returns the list of all files under revision control."""
+ command = ['-c', 'core.quotePath=false', 'ls-files', '-s', '--', '.']
+ files = GIT.Capture(command, cwd=cwd).splitlines(False)
+ # return only files
+ return [f.split(maxsplit=3)[-1] for f in files if f.startswith('100')]
- @staticmethod
- def GetSubmoduleCommits(cwd, submodules):
- # type: (string, List[string]) => Mapping[string][string]
- """Returns a mapping of staged or committed new commits for submodules."""
- if not submodules:
- return {}
- result = subprocess2.check_output(['git', 'ls-files', '-s', '--'] +
- submodules,
- cwd=cwd).decode('utf-8')
- commit_hashes = {}
- for r in result.splitlines():
- # ['', '', '', ''].
- record = r.strip().split(maxsplit=3) # path can contain spaces.
- assert record[0] == '160000', 'file is not a gitlink: %s' % record
- commit_hashes[record[3]] = record[1]
- return commit_hashes
+ @staticmethod
+ def GetSubmoduleCommits(cwd, submodules):
+ # type: (string, List[string]) => Mapping[string][string]
+ """Returns a mapping of staged or committed new commits for submodules."""
+ if not submodules:
+ return {}
+ result = subprocess2.check_output(['git', 'ls-files', '-s', '--'] +
+ submodules,
+ cwd=cwd).decode('utf-8')
+ commit_hashes = {}
+ for r in result.splitlines():
+ # ['', '', '', ''].
+ record = r.strip().split(maxsplit=3) # path can contain spaces.
+ assert record[0] == '160000', 'file is not a gitlink: %s' % record
+ commit_hashes[record[3]] = record[1]
+ return commit_hashes
- @staticmethod
- def GetPatchName(cwd):
- """Constructs a name for this patch."""
- short_sha = GIT.Capture(['rev-parse', '--short=4', 'HEAD'], cwd=cwd)
- return "%s#%s" % (GIT.GetBranch(cwd), short_sha)
+ @staticmethod
+ def GetPatchName(cwd):
+ """Constructs a name for this patch."""
+ short_sha = GIT.Capture(['rev-parse', '--short=4', 'HEAD'], cwd=cwd)
+ return "%s#%s" % (GIT.GetBranch(cwd), short_sha)
- @staticmethod
- def GetCheckoutRoot(cwd):
- """Returns the top level directory of a git checkout as an absolute path.
+ @staticmethod
+ def GetCheckoutRoot(cwd):
+ """Returns the top level directory of a git checkout as an absolute path.
"""
- root = GIT.Capture(['rev-parse', '--show-cdup'], cwd=cwd)
- return os.path.abspath(os.path.join(cwd, root))
+ root = GIT.Capture(['rev-parse', '--show-cdup'], cwd=cwd)
+ return os.path.abspath(os.path.join(cwd, root))
- @staticmethod
- def GetGitDir(cwd):
- return os.path.abspath(GIT.Capture(['rev-parse', '--git-dir'], cwd=cwd))
+ @staticmethod
+ def GetGitDir(cwd):
+ return os.path.abspath(GIT.Capture(['rev-parse', '--git-dir'], cwd=cwd))
- @staticmethod
- def IsInsideWorkTree(cwd):
- try:
- return GIT.Capture(['rev-parse', '--is-inside-work-tree'], cwd=cwd)
- except (OSError, subprocess2.CalledProcessError):
- return False
+ @staticmethod
+ def IsInsideWorkTree(cwd):
+ try:
+ return GIT.Capture(['rev-parse', '--is-inside-work-tree'], cwd=cwd)
+ except (OSError, subprocess2.CalledProcessError):
+ return False
- @staticmethod
- def IsDirectoryVersioned(cwd, relative_dir):
- """Checks whether the given |relative_dir| is part of cwd's repo."""
- return bool(GIT.Capture(['ls-tree', 'HEAD', relative_dir], cwd=cwd))
+ @staticmethod
+ def IsDirectoryVersioned(cwd, relative_dir):
+ """Checks whether the given |relative_dir| is part of cwd's repo."""
+ return bool(GIT.Capture(['ls-tree', 'HEAD', relative_dir], cwd=cwd))
- @staticmethod
- def CleanupDir(cwd, relative_dir):
- """Cleans up untracked file inside |relative_dir|."""
- return bool(GIT.Capture(['clean', '-df', relative_dir], cwd=cwd))
+ @staticmethod
+ def CleanupDir(cwd, relative_dir):
+ """Cleans up untracked file inside |relative_dir|."""
+ return bool(GIT.Capture(['clean', '-df', relative_dir], cwd=cwd))
- @staticmethod
- def ResolveCommit(cwd, rev):
- # We do this instead of rev-parse --verify rev^{commit}, since on Windows
- # git can be either an executable or batch script, each of which requires
- # escaping the caret (^) a different way.
- if gclient_utils.IsFullGitSha(rev):
- # git-rev parse --verify FULL_GIT_SHA always succeeds, even if we don't
- # have FULL_GIT_SHA locally. Removing the last character forces git to
- # check if FULL_GIT_SHA refers to an object in the local database.
- rev = rev[:-1]
- try:
- return GIT.Capture(['rev-parse', '--quiet', '--verify', rev], cwd=cwd)
- except subprocess2.CalledProcessError:
- return None
+ @staticmethod
+ def ResolveCommit(cwd, rev):
+ # We do this instead of rev-parse --verify rev^{commit}, since on
+ # Windows git can be either an executable or batch script, each of which
+ # requires escaping the caret (^) a different way.
+ if gclient_utils.IsFullGitSha(rev):
+ # git-rev parse --verify FULL_GIT_SHA always succeeds, even if we
+ # don't have FULL_GIT_SHA locally. Removing the last character
+ # forces git to check if FULL_GIT_SHA refers to an object in the
+ # local database.
+ rev = rev[:-1]
+ try:
+ return GIT.Capture(['rev-parse', '--quiet', '--verify', rev],
+ cwd=cwd)
+ except subprocess2.CalledProcessError:
+ return None
- @staticmethod
- def IsValidRevision(cwd, rev, sha_only=False):
- """Verifies the revision is a proper git revision.
+ @staticmethod
+ def IsValidRevision(cwd, rev, sha_only=False):
+ """Verifies the revision is a proper git revision.
sha_only: Fail unless rev is a sha hash.
"""
- sha = GIT.ResolveCommit(cwd, rev)
- if sha is None:
- return False
- if sha_only:
- return sha == rev.lower()
- return True
+ sha = GIT.ResolveCommit(cwd, rev)
+ if sha is None:
+ return False
+ if sha_only:
+ return sha == rev.lower()
+ return True
- @classmethod
- def AssertVersion(cls, min_version):
- """Asserts git's version is at least min_version."""
- if cls.current_version is None:
- current_version = cls.Capture(['--version'], '.')
- matched = re.search(r'git version (.+)', current_version)
- cls.current_version = distutils.version.LooseVersion(matched.group(1))
- min_version = distutils.version.LooseVersion(min_version)
- return (min_version <= cls.current_version, cls.current_version)
+ @classmethod
+ def AssertVersion(cls, min_version):
+ """Asserts git's version is at least min_version."""
+ if cls.current_version is None:
+ current_version = cls.Capture(['--version'], '.')
+ matched = re.search(r'git version (.+)', current_version)
+ cls.current_version = distutils.version.LooseVersion(
+ matched.group(1))
+ min_version = distutils.version.LooseVersion(min_version)
+ return (min_version <= cls.current_version, cls.current_version)
diff --git a/setup_color.py b/setup_color.py
index 40e96b5f6e..af039c82ad 100644
--- a/setup_color.py
+++ b/setup_color.py
@@ -16,115 +16,120 @@ OUT_TYPE = 'unknown'
def enable_native_ansi():
- """Enables native ANSI sequences in console. Windows 10 only.
+ """Enables native ANSI sequences in console. Windows 10 only.
Returns whether successful.
"""
- kernel32 = ctypes.windll.kernel32
- ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x04
+ kernel32 = ctypes.windll.kernel32
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x04
- out_handle = kernel32.GetStdHandle(subprocess.STD_OUTPUT_HANDLE)
+ out_handle = kernel32.GetStdHandle(subprocess.STD_OUTPUT_HANDLE)
- # GetConsoleMode fails if the terminal isn't native.
- mode = ctypes.wintypes.DWORD()
- if kernel32.GetConsoleMode(out_handle, ctypes.byref(mode)) == 0:
- return False
+ # GetConsoleMode fails if the terminal isn't native.
+ mode = ctypes.wintypes.DWORD()
+ if kernel32.GetConsoleMode(out_handle, ctypes.byref(mode)) == 0:
+ return False
- if not (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING):
- if kernel32.SetConsoleMode(
- out_handle, mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0:
- print(
- 'kernel32.SetConsoleMode to enable ANSI sequences failed',
- file=sys.stderr)
- return False
+ if not (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING):
+ if kernel32.SetConsoleMode(
+ out_handle,
+ mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0:
+ print('kernel32.SetConsoleMode to enable ANSI sequences failed',
+ file=sys.stderr)
+ return False
- return True
+ return True
def init():
- # should_wrap instructs colorama to wrap stdout/stderr with an ANSI colorcode
- # interpreter that converts them to SetConsoleTextAttribute calls. This only
- # should be True in cases where we're connected to cmd.exe's console. Setting
- # this to True on non-windows systems has no effect.
- should_wrap = False
- global IS_TTY, OUT_TYPE
- IS_TTY = sys.stdout.isatty()
- is_windows = sys.platform.startswith('win')
- if IS_TTY:
- # Yay! We detected a console in the normal way. It doesn't really matter
- # if it's windows or not, we win.
- OUT_TYPE = 'console'
- should_wrap = True
- elif is_windows:
- # assume this is some sort of file
- OUT_TYPE = 'file (win)'
+ # should_wrap instructs colorama to wrap stdout/stderr with an ANSI
+ # colorcode interpreter that converts them to SetConsoleTextAttribute calls.
+ # This only should be True in cases where we're connected to cmd.exe's
+ # console. Setting this to True on non-windows systems has no effect.
+ should_wrap = False
+ global IS_TTY, OUT_TYPE
+ IS_TTY = sys.stdout.isatty()
+ is_windows = sys.platform.startswith('win')
+ if IS_TTY:
+ # Yay! We detected a console in the normal way. It doesn't really matter
+ # if it's windows or not, we win.
+ OUT_TYPE = 'console'
+ should_wrap = True
+ elif is_windows:
+ # assume this is some sort of file
+ OUT_TYPE = 'file (win)'
- import msvcrt
- h = msvcrt.get_osfhandle(sys.stdout.fileno())
- # h is the win32 HANDLE for stdout.
- ftype = ctypes.windll.kernel32.GetFileType(h)
- if ftype == 2: # FILE_TYPE_CHAR
- # This is a normal cmd console, but we'll only get here if we're running
- # inside a `git command` which is actually git->bash->command. Not sure
- # why isatty doesn't detect this case.
- OUT_TYPE = 'console (cmd via msys)'
- IS_TTY = True
- should_wrap = True
- elif ftype == 3: # FILE_TYPE_PIPE
- OUT_TYPE = 'pipe (win)'
- # This is some kind of pipe on windows. This could either be a real pipe
- # or this could be msys using a pipe to emulate a pty. We use the same
- # algorithm that msys-git uses to determine if it's connected to a pty or
- # not.
+ import msvcrt
+ h = msvcrt.get_osfhandle(sys.stdout.fileno())
+ # h is the win32 HANDLE for stdout.
+ ftype = ctypes.windll.kernel32.GetFileType(h)
+ if ftype == 2: # FILE_TYPE_CHAR
+ # This is a normal cmd console, but we'll only get here if we're
+ # running inside a `git command` which is actually
+ # git->bash->command. Not sure why isatty doesn't detect this case.
+ OUT_TYPE = 'console (cmd via msys)'
+ IS_TTY = True
+ should_wrap = True
+ elif ftype == 3: # FILE_TYPE_PIPE
+ OUT_TYPE = 'pipe (win)'
- # This function and the structures are defined in the MSDN documentation
- # using the same names.
- def NT_SUCCESS(status):
- # The first two bits of status are the severity. The success
- # severities are 0 and 1, and the !success severities are 2 and 3.
- # Therefore since ctypes interprets the default restype of the call
- # to be an 'C int' (which is guaranteed to be signed 32 bits), All
- # success codes are positive, and all !success codes are negative.
- return status >= 0
+ # This is some kind of pipe on windows. This could either be a real
+ # pipe or this could be msys using a pipe to emulate a pty. We use
+ # the same algorithm that msys-git uses to determine if it's
+ # connected to a pty or not.
- class UNICODE_STRING(ctypes.Structure):
- _fields_ = [('Length', ctypes.c_ushort),
- ('MaximumLength', ctypes.c_ushort),
- ('Buffer', ctypes.c_wchar_p)]
+ # This function and the structures are defined in the MSDN
+ # documentation using the same names.
+ def NT_SUCCESS(status):
+ # The first two bits of status are the severity. The success
+ # severities are 0 and 1, and the !success severities are 2 and
+ # 3. Therefore since ctypes interprets the default restype of
+ # the call to be an 'C int' (which is guaranteed to be signed 32
+ # bits), All success codes are positive, and all !success codes
+ # are negative.
+ return status >= 0
- class OBJECT_NAME_INFORMATION(ctypes.Structure):
- _fields_ = [('Name', UNICODE_STRING),
- ('NameBuffer', ctypes.c_wchar_p)]
+ class UNICODE_STRING(ctypes.Structure):
+ _fields_ = [('Length', ctypes.c_ushort),
+ ('MaximumLength', ctypes.c_ushort),
+ ('Buffer', ctypes.c_wchar_p)]
- buf = ctypes.create_string_buffer(1024)
- # Ask NT what the name of the object our stdout HANDLE is. It would be
- # possible to use GetFileInformationByHandleEx, but it's only available
- # on Vista+. If you're reading this in 2017 or later, feel free to
- # refactor this out.
- #
- # The '1' here is ObjectNameInformation
- if NT_SUCCESS(ctypes.windll.ntdll.NtQueryObject(h, 1, buf, len(buf)-2,
- None)):
- out = OBJECT_NAME_INFORMATION.from_buffer(buf)
- name = out.Name.Buffer.split('\\')[-1]
- IS_TTY = name.startswith('msys-') and '-pty' in name
- if IS_TTY:
- OUT_TYPE = 'bash (msys)'
+ class OBJECT_NAME_INFORMATION(ctypes.Structure):
+ _fields_ = [('Name', UNICODE_STRING),
+ ('NameBuffer', ctypes.c_wchar_p)]
+
+ buf = ctypes.create_string_buffer(1024)
+ # Ask NT what the name of the object our stdout HANDLE is. It would
+ # be possible to use GetFileInformationByHandleEx, but it's only
+ # available on Vista+. If you're reading this in 2017 or later, feel
+ # free to refactor this out.
+ #
+ # The '1' here is ObjectNameInformation
+ if NT_SUCCESS(
+ ctypes.windll.ntdll.NtQueryObject(h, 1, buf,
+ len(buf) - 2, None)):
+ out = OBJECT_NAME_INFORMATION.from_buffer(buf)
+ name = out.Name.Buffer.split('\\')[-1]
+ IS_TTY = name.startswith('msys-') and '-pty' in name
+ if IS_TTY:
+ OUT_TYPE = 'bash (msys)'
+ else:
+ # A normal file, or an unknown file type.
+ pass
else:
- # A normal file, or an unknown file type.
- pass
- else:
- # This is non-windows, so we trust isatty.
- OUT_TYPE = 'pipe or file'
+ # This is non-windows, so we trust isatty.
+ OUT_TYPE = 'pipe or file'
- if IS_TTY and is_windows:
- # Wrapping may cause errors on some Windows versions (crbug.com/1114548).
- if platform.release() != '10' or enable_native_ansi():
- should_wrap = False
+ if IS_TTY and is_windows:
+ # Wrapping may cause errors on some Windows versions
+ # (crbug.com/1114548).
+ if platform.release() != '10' or enable_native_ansi():
+ should_wrap = False
+
+ colorama.init(wrap=should_wrap)
- colorama.init(wrap=should_wrap)
if __name__ == '__main__':
- init()
- print('IS_TTY:', IS_TTY)
- print('OUT_TYPE:', OUT_TYPE)
+ init()
+ print('IS_TTY:', IS_TTY)
+ print('OUT_TYPE:', OUT_TYPE)
diff --git a/siso.py b/siso.py
index eaa660a8ab..fb73451f92 100644
--- a/siso.py
+++ b/siso.py
@@ -15,68 +15,72 @@ import gclient_paths
def main(args):
- # On Windows the siso.bat script passes along the arguments enclosed in
- # double quotes. This prevents multiple levels of parsing of the special '^'
- # characters needed when compiling a single file. When this case is detected,
- # we need to split the argument. This means that arguments containing actual
- # spaces are not supported by siso.bat, but that is not a real limitation.
- if sys.platform.startswith('win') and len(args) == 2:
- args = args[:1] + args[1].split()
+ # On Windows the siso.bat script passes along the arguments enclosed in
+ # double quotes. This prevents multiple levels of parsing of the special '^'
+ # characters needed when compiling a single file. When this case is
+ # detected, we need to split the argument. This means that arguments
+ # containing actual spaces are not supported by siso.bat, but that is not a
+ # real limitation.
+ if sys.platform.startswith('win') and len(args) == 2:
+ args = args[:1] + args[1].split()
- # macOS's python sets CPATH, LIBRARY_PATH, SDKROOT implicitly.
- # https://openradar.appspot.com/radar?id=5608755232243712
- #
- # Removing those environment variables to avoid affecting clang's behaviors.
- if sys.platform == 'darwin':
- os.environ.pop("CPATH", None)
- os.environ.pop("LIBRARY_PATH", None)
- os.environ.pop("SDKROOT", None)
+ # macOS's python sets CPATH, LIBRARY_PATH, SDKROOT implicitly.
+ # https://openradar.appspot.com/radar?id=5608755232243712
+ #
+ # Removing those environment variables to avoid affecting clang's behaviors.
+ if sys.platform == 'darwin':
+ os.environ.pop("CPATH", None)
+ os.environ.pop("LIBRARY_PATH", None)
+ os.environ.pop("SDKROOT", None)
- environ = os.environ.copy()
+ environ = os.environ.copy()
- # Get gclient root + src.
- primary_solution_path = gclient_paths.GetPrimarySolutionPath()
- gclient_root_path = gclient_paths.FindGclientRoot(os.getcwd())
- gclient_src_root_path = None
- if gclient_root_path:
- gclient_src_root_path = os.path.join(gclient_root_path, 'src')
+ # Get gclient root + src.
+ primary_solution_path = gclient_paths.GetPrimarySolutionPath()
+ gclient_root_path = gclient_paths.FindGclientRoot(os.getcwd())
+ gclient_src_root_path = None
+ if gclient_root_path:
+ gclient_src_root_path = os.path.join(gclient_root_path, 'src')
- siso_override_path = os.environ.get('SISO_PATH')
- if siso_override_path:
- print('depot_tools/siso.py: Using Siso binary from SISO_PATH: %s.' %
- siso_override_path)
- if not os.path.isfile(siso_override_path):
- print('depot_tools/siso.py: Could not find Siso at provided SISO_PATH.',
- file=sys.stderr)
- return 1
+ siso_override_path = os.environ.get('SISO_PATH')
+ if siso_override_path:
+ print('depot_tools/siso.py: Using Siso binary from SISO_PATH: %s.' %
+ siso_override_path)
+ if not os.path.isfile(siso_override_path):
+ print(
+ 'depot_tools/siso.py: Could not find Siso at provided '
+ 'SISO_PATH.',
+ file=sys.stderr)
+ return 1
- for base_path in set(
- [primary_solution_path, gclient_root_path, gclient_src_root_path]):
- if not base_path:
- continue
- env = environ.copy()
- sisoenv_path = os.path.join(base_path, 'build', 'config', 'siso',
- '.sisoenv')
- if not os.path.exists(sisoenv_path):
- continue
- with open(sisoenv_path) as f:
- for line in f.readlines():
- k, v = line.rstrip().split('=', 1)
- env[k] = v
- siso_path = siso_override_path or os.path.join(
- base_path, 'third_party', 'siso', 'siso' + gclient_paths.GetExeSuffix())
- if os.path.isfile(siso_path):
- return subprocess.call([siso_path] + args[1:], env=env)
+ for base_path in set(
+ [primary_solution_path, gclient_root_path, gclient_src_root_path]):
+ if not base_path:
+ continue
+ env = environ.copy()
+ sisoenv_path = os.path.join(base_path, 'build', 'config', 'siso',
+ '.sisoenv')
+ if not os.path.exists(sisoenv_path):
+ continue
+ with open(sisoenv_path) as f:
+ for line in f.readlines():
+ k, v = line.rstrip().split('=', 1)
+ env[k] = v
+ siso_path = siso_override_path or os.path.join(
+ base_path, 'third_party', 'siso',
+ 'siso' + gclient_paths.GetExeSuffix())
+ if os.path.isfile(siso_path):
+ return subprocess.call([siso_path] + args[1:], env=env)
- print(
- 'depot_tools/siso.py: Could not find .sisoenv under build/config/siso of '
- 'the current project. Did you run gclient sync?',
- file=sys.stderr)
- return 1
+ print(
+ 'depot_tools/siso.py: Could not find .sisoenv under build/config/siso '
+ 'of the current project. Did you run gclient sync?',
+ file=sys.stderr)
+ return 1
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv))
- except KeyboardInterrupt:
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv))
+ except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/split_cl.py b/split_cl.py
index 16cb8f25de..70768755c6 100644
--- a/split_cl.py
+++ b/split_cl.py
@@ -2,7 +2,6 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Splits a branch into smaller branches and uploads CLs."""
from __future__ import print_function
@@ -19,7 +18,6 @@ import scm
import git_common as git
-
# If a call to `git cl split` will generate more than this number of CLs, the
# command will prompt the user to make sure they know what they're doing. Large
# numbers of CLs generated by `git cl split` have caused infrastructure issues
@@ -36,62 +34,62 @@ FilesAndOwnersDirectory = collections.namedtuple("FilesAndOwnersDirectory",
def EnsureInGitRepository():
- """Throws an exception if the current directory is not a git repository."""
- git.run('rev-parse')
+ """Throws an exception if the current directory is not a git repository."""
+ git.run('rev-parse')
def CreateBranchForDirectories(prefix, directories, upstream):
- """Creates a branch named |prefix| + "_" + |directories[0]| + "_split".
+ """Creates a branch named |prefix| + "_" + |directories[0]| + "_split".
Return false if the branch already exists. |upstream| is used as upstream for
the created branch.
"""
- existing_branches = set(git.branches(use_limit = False))
- branch_name = prefix + '_' + directories[0] + '_split'
- if branch_name in existing_branches:
- return False
- git.run('checkout', '-t', upstream, '-b', branch_name)
- return True
+ existing_branches = set(git.branches(use_limit=False))
+ branch_name = prefix + '_' + directories[0] + '_split'
+ if branch_name in existing_branches:
+ return False
+ git.run('checkout', '-t', upstream, '-b', branch_name)
+ return True
def FormatDirectoriesForPrinting(directories, prefix=None):
- """Formats directory list for printing
+ """Formats directory list for printing
Uses dedicated format for single-item list."""
- prefixed = directories
- if prefix:
- prefixed = [(prefix + d) for d in directories]
+ prefixed = directories
+ if prefix:
+ prefixed = [(prefix + d) for d in directories]
- return str(prefixed) if len(prefixed) > 1 else str(prefixed[0])
+ return str(prefixed) if len(prefixed) > 1 else str(prefixed[0])
def FormatDescriptionOrComment(txt, directories):
- """Replaces $directory with |directories| in |txt|."""
- to_insert = FormatDirectoriesForPrinting(directories, prefix='/')
- return txt.replace('$directory', to_insert)
+ """Replaces $directory with |directories| in |txt|."""
+ to_insert = FormatDirectoriesForPrinting(directories, prefix='/')
+ return txt.replace('$directory', to_insert)
def AddUploadedByGitClSplitToDescription(description):
- """Adds a 'This CL was uploaded by git cl split.' line to |description|.
+ """Adds a 'This CL was uploaded by git cl split.' line to |description|.
The line is added before footers, or at the end of |description| if it has no
footers.
"""
- split_footers = git_footers.split_footers(description)
- lines = split_footers[0]
- if lines[-1] and not lines[-1].isspace():
- lines = lines + ['']
- lines = lines + ['This CL was uploaded by git cl split.']
- if split_footers[1]:
- lines += [''] + split_footers[1]
- return '\n'.join(lines)
+ split_footers = git_footers.split_footers(description)
+ lines = split_footers[0]
+ if lines[-1] and not lines[-1].isspace():
+ lines = lines + ['']
+ lines = lines + ['This CL was uploaded by git cl split.']
+ if split_footers[1]:
+ lines += [''] + split_footers[1]
+ return '\n'.join(lines)
def UploadCl(refactor_branch, refactor_branch_upstream, directories, files,
description, comment, reviewers, changelist, cmd_upload,
cq_dry_run, enable_auto_submit, topic, repository_root):
- """Uploads a CL with all changes to |files| in |refactor_branch|.
+ """Uploads a CL with all changes to |files| in |refactor_branch|.
Args:
refactor_branch: Name of the branch that contains the changes to upload.
@@ -108,89 +106,92 @@ def UploadCl(refactor_branch, refactor_branch_upstream, directories, files,
enable_auto_submit: If CL uploads should also enable auto submit.
topic: Topic to associate with uploaded CLs.
"""
- # Create a branch.
- if not CreateBranchForDirectories(refactor_branch, directories,
- refactor_branch_upstream):
- print('Skipping ' + FormatDirectoriesForPrinting(directories) +
- ' for which a branch already exists.')
- return
+ # Create a branch.
+ if not CreateBranchForDirectories(refactor_branch, directories,
+ refactor_branch_upstream):
+ print('Skipping ' + FormatDirectoriesForPrinting(directories) +
+ ' for which a branch already exists.')
+ return
- # Checkout all changes to files in |files|.
- deleted_files = []
- modified_files = []
- for action, f in files:
- abspath = os.path.abspath(os.path.join(repository_root, f))
- if action == 'D':
- deleted_files.append(abspath)
- else:
- modified_files.append(abspath)
+ # Checkout all changes to files in |files|.
+ deleted_files = []
+ modified_files = []
+ for action, f in files:
+ abspath = os.path.abspath(os.path.join(repository_root, f))
+ if action == 'D':
+ deleted_files.append(abspath)
+ else:
+ modified_files.append(abspath)
- if deleted_files:
- git.run(*['rm'] + deleted_files)
- if modified_files:
- git.run(*['checkout', refactor_branch, '--'] + modified_files)
+ if deleted_files:
+ git.run(*['rm'] + deleted_files)
+ if modified_files:
+ git.run(*['checkout', refactor_branch, '--'] + modified_files)
- # Commit changes. The temporary file is created with delete=False so that it
- # can be deleted manually after git has read it rather than automatically
- # when it is closed.
- with gclient_utils.temporary_file() as tmp_file:
- gclient_utils.FileWrite(
- tmp_file, FormatDescriptionOrComment(description, directories))
- git.run('commit', '-F', tmp_file)
+ # Commit changes. The temporary file is created with delete=False so that it
+ # can be deleted manually after git has read it rather than automatically
+ # when it is closed.
+ with gclient_utils.temporary_file() as tmp_file:
+ gclient_utils.FileWrite(
+ tmp_file, FormatDescriptionOrComment(description, directories))
+ git.run('commit', '-F', tmp_file)
- # Upload a CL.
- upload_args = ['-f']
- if reviewers:
- upload_args.extend(['-r', ','.join(sorted(reviewers))])
- if cq_dry_run:
- upload_args.append('--cq-dry-run')
- if not comment:
- upload_args.append('--send-mail')
- if enable_auto_submit:
- upload_args.append('--enable-auto-submit')
- if topic:
- upload_args.append('--topic={}'.format(topic))
- print('Uploading CL for ' + FormatDirectoriesForPrinting(directories) + '...')
+ # Upload a CL.
+ upload_args = ['-f']
+ if reviewers:
+ upload_args.extend(['-r', ','.join(sorted(reviewers))])
+ if cq_dry_run:
+ upload_args.append('--cq-dry-run')
+ if not comment:
+ upload_args.append('--send-mail')
+ if enable_auto_submit:
+ upload_args.append('--enable-auto-submit')
+ if topic:
+ upload_args.append('--topic={}'.format(topic))
+ print('Uploading CL for ' + FormatDirectoriesForPrinting(directories) +
+ '...')
- ret = cmd_upload(upload_args)
- if ret != 0:
- print('Uploading failed.')
- print('Note: git cl split has built-in resume capabilities.')
- print('Delete ' + git.current_branch() +
- ' then run git cl split again to resume uploading.')
+ ret = cmd_upload(upload_args)
+ if ret != 0:
+ print('Uploading failed.')
+ print('Note: git cl split has built-in resume capabilities.')
+ print('Delete ' + git.current_branch() +
+ ' then run git cl split again to resume uploading.')
- if comment:
- changelist().AddComment(FormatDescriptionOrComment(comment, directories),
- publish=True)
+ if comment:
+ changelist().AddComment(FormatDescriptionOrComment(
+ comment, directories),
+ publish=True)
def GetFilesSplitByOwners(files, max_depth):
- """Returns a map of files split by OWNERS file.
+ """Returns a map of files split by OWNERS file.
Returns:
A map where keys are paths to directories containing an OWNERS file and
values are lists of files sharing an OWNERS file.
"""
- files_split_by_owners = {}
- for action, path in files:
- # normpath() is important to normalize separators here, in prepration for
- # str.split() before. It would be nicer to use something like pathlib here
- # but alas...
- dir_with_owners = os.path.normpath(os.path.dirname(path))
- if max_depth >= 1:
- dir_with_owners = os.path.join(
- *dir_with_owners.split(os.path.sep)[:max_depth])
- # Find the closest parent directory with an OWNERS file.
- while (dir_with_owners not in files_split_by_owners
- and not os.path.isfile(os.path.join(dir_with_owners, 'OWNERS'))):
- dir_with_owners = os.path.dirname(dir_with_owners)
- files_split_by_owners.setdefault(dir_with_owners, []).append((action, path))
- return files_split_by_owners
+ files_split_by_owners = {}
+ for action, path in files:
+ # normpath() is important to normalize separators here, in prepration
+ # for str.split() before. It would be nicer to use something like
+ # pathlib here but alas...
+ dir_with_owners = os.path.normpath(os.path.dirname(path))
+ if max_depth >= 1:
+ dir_with_owners = os.path.join(
+ *dir_with_owners.split(os.path.sep)[:max_depth])
+ # Find the closest parent directory with an OWNERS file.
+ while (dir_with_owners not in files_split_by_owners
+ and not os.path.isfile(os.path.join(dir_with_owners, 'OWNERS'))):
+ dir_with_owners = os.path.dirname(dir_with_owners)
+ files_split_by_owners.setdefault(dir_with_owners, []).append(
+ (action, path))
+ return files_split_by_owners
def PrintClInfo(cl_index, num_cls, directories, file_paths, description,
reviewers, enable_auto_submit, topic):
- """Prints info about a CL.
+ """Prints info about a CL.
Args:
cl_index: The index of this CL in the list of CLs to upload.
@@ -203,23 +204,23 @@ def PrintClInfo(cl_index, num_cls, directories, file_paths, description,
enable_auto_submit: If the CL should also have auto submit enabled.
topic: Topic to set for this CL.
"""
- description_lines = FormatDescriptionOrComment(description,
- directories).splitlines()
- indented_description = '\n'.join([' ' + l for l in description_lines])
+ description_lines = FormatDescriptionOrComment(description,
+ directories).splitlines()
+ indented_description = '\n'.join([' ' + l for l in description_lines])
- print('CL {}/{}'.format(cl_index, num_cls))
- print('Paths: {}'.format(FormatDirectoriesForPrinting(directories)))
- print('Reviewers: {}'.format(', '.join(reviewers)))
- print('Auto-Submit: {}'.format(enable_auto_submit))
- print('Topic: {}'.format(topic))
- print('\n' + indented_description + '\n')
- print('\n'.join(file_paths))
- print()
+ print('CL {}/{}'.format(cl_index, num_cls))
+ print('Paths: {}'.format(FormatDirectoriesForPrinting(directories)))
+ print('Reviewers: {}'.format(', '.join(reviewers)))
+ print('Auto-Submit: {}'.format(enable_auto_submit))
+ print('Topic: {}'.format(topic))
+ print('\n' + indented_description + '\n')
+ print('\n'.join(file_paths))
+ print()
def SplitCl(description_file, comment_file, changelist, cmd_upload, dry_run,
cq_dry_run, enable_auto_submit, max_depth, topic, repository_root):
- """"Splits a branch into smaller branches and uploads CLs.
+ """"Splits a branch into smaller branches and uploads CLs.
Args:
description_file: File containing the description of uploaded CLs.
@@ -236,89 +237,90 @@ def SplitCl(description_file, comment_file, changelist, cmd_upload, dry_run,
Returns:
0 in case of success. 1 in case of error.
"""
- description = AddUploadedByGitClSplitToDescription(
- gclient_utils.FileRead(description_file))
- comment = gclient_utils.FileRead(comment_file) if comment_file else None
+ description = AddUploadedByGitClSplitToDescription(
+ gclient_utils.FileRead(description_file))
+ comment = gclient_utils.FileRead(comment_file) if comment_file else None
- try:
- EnsureInGitRepository()
+ try:
+ EnsureInGitRepository()
- cl = changelist()
- upstream = cl.GetCommonAncestorWithUpstream()
- files = [
- (action.strip(), f)
- for action, f in scm.GIT.CaptureStatus(repository_root, upstream)
- ]
+ cl = changelist()
+ upstream = cl.GetCommonAncestorWithUpstream()
+ files = [
+ (action.strip(), f)
+ for action, f in scm.GIT.CaptureStatus(repository_root, upstream)
+ ]
- if not files:
- print('Cannot split an empty CL.')
- return 1
+ if not files:
+ print('Cannot split an empty CL.')
+ return 1
- author = git.run('config', 'user.email').strip() or None
- refactor_branch = git.current_branch()
- assert refactor_branch, "Can't run from detached branch."
- refactor_branch_upstream = git.upstream(refactor_branch)
- assert refactor_branch_upstream, \
- "Branch %s must have an upstream." % refactor_branch
+ author = git.run('config', 'user.email').strip() or None
+ refactor_branch = git.current_branch()
+ assert refactor_branch, "Can't run from detached branch."
+ refactor_branch_upstream = git.upstream(refactor_branch)
+ assert refactor_branch_upstream, \
+ "Branch %s must have an upstream." % refactor_branch
- if not CheckDescriptionBugLink(description):
- return 0
+ if not CheckDescriptionBugLink(description):
+ return 0
- files_split_by_reviewers = SelectReviewersForFiles(cl, author, files,
- max_depth)
+ files_split_by_reviewers = SelectReviewersForFiles(
+ cl, author, files, max_depth)
- num_cls = len(files_split_by_reviewers)
- print('Will split current branch (' + refactor_branch + ') into ' +
- str(num_cls) + ' CLs.\n')
- if cq_dry_run and num_cls > CL_SPLIT_FORCE_LIMIT:
- print(
- 'This will generate "%r" CLs. This many CLs can potentially generate'
- ' too much load on the build infrastructure. Please email'
- ' infra-dev@chromium.org to ensure that this won\'t break anything.'
- ' The infra team reserves the right to cancel your jobs if they are'
- ' overloading the CQ.' % num_cls)
- answer = gclient_utils.AskForData('Proceed? (y/n):')
- if answer.lower() != 'y':
- return 0
+ num_cls = len(files_split_by_reviewers)
+ print('Will split current branch (' + refactor_branch + ') into ' +
+ str(num_cls) + ' CLs.\n')
+ if cq_dry_run and num_cls > CL_SPLIT_FORCE_LIMIT:
+ print(
+ 'This will generate "%r" CLs. This many CLs can potentially'
+ ' generate too much load on the build infrastructure. Please'
+ ' email infra-dev@chromium.org to ensure that this won\'t break'
+ ' anything. The infra team reserves the right to cancel your'
+ ' jobs if they are overloading the CQ.' % num_cls)
+ answer = gclient_utils.AskForData('Proceed? (y/n):')
+ if answer.lower() != 'y':
+ return 0
- cls_per_reviewer = collections.defaultdict(int)
- for cl_index, (reviewers, cl_info) in \
- enumerate(files_split_by_reviewers.items(), 1):
- # Convert reviewers from tuple to set.
- reviewer_set = set(reviewers)
- if dry_run:
- file_paths = [f for _, f in cl_info.files]
- PrintClInfo(cl_index, num_cls, cl_info.owners_directories, file_paths,
- description, reviewer_set, enable_auto_submit, topic)
- else:
- UploadCl(refactor_branch, refactor_branch_upstream,
- cl_info.owners_directories, cl_info.files, description,
- comment, reviewer_set, changelist, cmd_upload, cq_dry_run,
- enable_auto_submit, topic, repository_root)
+ cls_per_reviewer = collections.defaultdict(int)
+ for cl_index, (reviewers, cl_info) in \
+ enumerate(files_split_by_reviewers.items(), 1):
+ # Convert reviewers from tuple to set.
+ reviewer_set = set(reviewers)
+ if dry_run:
+ file_paths = [f for _, f in cl_info.files]
+ PrintClInfo(cl_index, num_cls, cl_info.owners_directories,
+ file_paths, description, reviewer_set,
+ enable_auto_submit, topic)
+ else:
+ UploadCl(refactor_branch, refactor_branch_upstream,
+ cl_info.owners_directories, cl_info.files, description,
+ comment, reviewer_set, changelist, cmd_upload,
+ cq_dry_run, enable_auto_submit, topic, repository_root)
- for reviewer in reviewers:
- cls_per_reviewer[reviewer] += 1
+ for reviewer in reviewers:
+ cls_per_reviewer[reviewer] += 1
- # List the top reviewers that will be sent the most CLs as a result of the
- # split.
- reviewer_rankings = sorted(cls_per_reviewer.items(),
- key=lambda item: item[1],
- reverse=True)
- print('The top reviewers are:')
- for reviewer, count in reviewer_rankings[:CL_SPLIT_TOP_REVIEWERS]:
- print(f' {reviewer}: {count} CLs')
+ # List the top reviewers that will be sent the most CLs as a result of
+ # the split.
+ reviewer_rankings = sorted(cls_per_reviewer.items(),
+ key=lambda item: item[1],
+ reverse=True)
+ print('The top reviewers are:')
+ for reviewer, count in reviewer_rankings[:CL_SPLIT_TOP_REVIEWERS]:
+ print(f' {reviewer}: {count} CLs')
- # Go back to the original branch.
- git.run('checkout', refactor_branch)
+ # Go back to the original branch.
+ git.run('checkout', refactor_branch)
- except subprocess2.CalledProcessError as cpe:
- sys.stderr.write(cpe.stderr)
- return 1
- return 0
+ except subprocess2.CalledProcessError as cpe:
+ sys.stderr.write(cpe.stderr)
+ return 1
+ return 0
def CheckDescriptionBugLink(description):
- """Verifies that the description contains a bug link.
+ """Verifies that the description contains a bug link.
Examples:
Bug: 123
@@ -326,17 +328,17 @@ def CheckDescriptionBugLink(description):
Prompts user if the description does not contain a bug link.
"""
- bug_pattern = re.compile(r"^Bug:\s*(?:[a-zA-Z]+:)?[0-9]+", re.MULTILINE)
- matches = re.findall(bug_pattern, description)
- answer = 'y'
- if not matches:
- answer = gclient_utils.AskForData(
- 'Description does not include a bug link. Proceed? (y/n):')
- return answer.lower() == 'y'
+ bug_pattern = re.compile(r"^Bug:\s*(?:[a-zA-Z]+:)?[0-9]+", re.MULTILINE)
+ matches = re.findall(bug_pattern, description)
+ answer = 'y'
+ if not matches:
+ answer = gclient_utils.AskForData(
+ 'Description does not include a bug link. Proceed? (y/n):')
+ return answer.lower() == 'y'
def SelectReviewersForFiles(cl, author, files, max_depth):
- """Selects reviewers for passed-in files
+ """Selects reviewers for passed-in files
Args:
cl: Changelist class instance
@@ -345,24 +347,24 @@ def SelectReviewersForFiles(cl, author, files, max_depth):
max_depth: The maximum directory depth to search for OWNERS files. A value
less than 1 means no limit.
"""
- info_split_by_owners = GetFilesSplitByOwners(files, max_depth)
+ info_split_by_owners = GetFilesSplitByOwners(files, max_depth)
- info_split_by_reviewers = {}
+ info_split_by_reviewers = {}
- for (directory, split_files) in info_split_by_owners.items():
- # Use '/' as a path separator in the branch name and the CL description
- # and comment.
- directory = directory.replace(os.path.sep, '/')
- file_paths = [f for _, f in split_files]
- # Convert reviewers list to tuple in order to use reviewers as key to
- # dictionary.
- reviewers = tuple(
- cl.owners_client.SuggestOwners(
- file_paths, exclude=[author, cl.owners_client.EVERYONE]))
+ for (directory, split_files) in info_split_by_owners.items():
+ # Use '/' as a path separator in the branch name and the CL description
+ # and comment.
+ directory = directory.replace(os.path.sep, '/')
+ file_paths = [f for _, f in split_files]
+ # Convert reviewers list to tuple in order to use reviewers as key to
+ # dictionary.
+ reviewers = tuple(
+ cl.owners_client.SuggestOwners(
+ file_paths, exclude=[author, cl.owners_client.EVERYONE]))
- if not reviewers in info_split_by_reviewers:
- info_split_by_reviewers[reviewers] = FilesAndOwnersDirectory([], [])
- info_split_by_reviewers[reviewers].files.extend(split_files)
- info_split_by_reviewers[reviewers].owners_directories.append(directory)
+ if not reviewers in info_split_by_reviewers:
+ info_split_by_reviewers[reviewers] = FilesAndOwnersDirectory([], [])
+ info_split_by_reviewers[reviewers].files.extend(split_files)
+ info_split_by_reviewers[reviewers].owners_directories.append(directory)
- return info_split_by_reviewers
+ return info_split_by_reviewers
diff --git a/subcommand.py b/subcommand.py
index 9d468d60c8..0d5a8aec76 100644
--- a/subcommand.py
+++ b/subcommand.py
@@ -1,7 +1,6 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Manages subcommands in a script.
Each subcommand should look like this:
@@ -46,51 +45,55 @@ import textwrap
def usage(more):
- """Adds a 'usage_more' property to a CMD function."""
- def hook(fn):
- fn.usage_more = more
- return fn
- return hook
+ """Adds a 'usage_more' property to a CMD function."""
+ def hook(fn):
+ fn.usage_more = more
+ return fn
+
+ return hook
def epilog(text):
- """Adds an 'epilog' property to a CMD function.
+ """Adds an 'epilog' property to a CMD function.
It will be shown in the epilog. Usually useful for examples.
"""
- def hook(fn):
- fn.epilog = text
- return fn
- return hook
+ def hook(fn):
+ fn.epilog = text
+ return fn
+
+ return hook
def CMDhelp(parser, args):
- """Prints list of commands or help for a specific command."""
- # This is the default help implementation. It can be disabled or overridden if
- # wanted.
- if not any(i in ('-h', '--help') for i in args):
- args = args + ['--help']
- parser.parse_args(args)
- # Never gets there.
- assert False
+ """Prints list of commands or help for a specific command."""
+ # This is the default help implementation. It can be disabled or overridden
+ # if wanted.
+ if not any(i in ('-h', '--help') for i in args):
+ args = args + ['--help']
+ parser.parse_args(args)
+ # Never gets there.
+ assert False
def _get_color_module():
- """Returns the colorama module if available.
+ """Returns the colorama module if available.
If so, assumes colors are supported and return the module handle.
"""
- return sys.modules.get('colorama') or sys.modules.get('third_party.colorama')
+ return sys.modules.get('colorama') or sys.modules.get(
+ 'third_party.colorama')
def _function_to_name(name):
- """Returns the name of a CMD function."""
- return name[3:].replace('_', '-')
+ """Returns the name of a CMD function."""
+ return name[3:].replace('_', '-')
class CommandDispatcher(object):
- def __init__(self, module):
- """module is the name of the main python module where to look for commands.
+ def __init__(self, module):
+ """module is the name of the main python module where to look for
+ commands.
The python builtin variable __name__ MUST be used for |module|. If the
script is executed in the form 'python script.py', __name__ == '__main__'
@@ -98,10 +101,10 @@ class CommandDispatcher(object):
tested, __main__ will be the unit test's module so it has to reference to
itself with 'script'. __name__ always match the right value.
"""
- self.module = sys.modules[module]
+ self.module = sys.modules[module]
- def enumerate_commands(self):
- """Returns a dict of command and their handling function.
+ def enumerate_commands(self):
+ """Returns a dict of command and their handling function.
The commands must be in the '__main__' modules. To import a command from a
submodule, use:
@@ -115,149 +118,147 @@ class CommandDispatcher(object):
e.g.:
CMDhelp = None
"""
- cmds = dict(
- (_function_to_name(name), getattr(self.module, name))
- for name in dir(self.module) if name.startswith('CMD'))
- cmds.setdefault('help', CMDhelp)
- return cmds
+ cmds = dict((_function_to_name(name), getattr(self.module, name))
+ for name in dir(self.module) if name.startswith('CMD'))
+ cmds.setdefault('help', CMDhelp)
+ return cmds
- def find_nearest_command(self, name_asked):
- """Retrieves the function to handle a command as supplied by the user.
+ def find_nearest_command(self, name_asked):
+ """Retrieves the function to handle a command as supplied by the user.
It automatically tries to guess the _intended command_ by handling typos
and/or incomplete names.
"""
- commands = self.enumerate_commands()
- name_to_dash = name_asked.replace('_', '-')
- if name_to_dash in commands:
- return commands[name_to_dash]
+ commands = self.enumerate_commands()
+ name_to_dash = name_asked.replace('_', '-')
+ if name_to_dash in commands:
+ return commands[name_to_dash]
- # An exact match was not found. Try to be smart and look if there's
- # something similar.
- commands_with_prefix = [c for c in commands if c.startswith(name_asked)]
- if len(commands_with_prefix) == 1:
- return commands[commands_with_prefix[0]]
+ # An exact match was not found. Try to be smart and look if there's
+ # something similar.
+ commands_with_prefix = [c for c in commands if c.startswith(name_asked)]
+ if len(commands_with_prefix) == 1:
+ return commands[commands_with_prefix[0]]
- # A #closeenough approximation of levenshtein distance.
- def close_enough(a, b):
- return difflib.SequenceMatcher(a=a, b=b).ratio()
+ # A #closeenough approximation of levenshtein distance.
+ def close_enough(a, b):
+ return difflib.SequenceMatcher(a=a, b=b).ratio()
- hamming_commands = sorted(
- ((close_enough(c, name_asked), c) for c in commands),
- reverse=True)
- if (hamming_commands[0][0] - hamming_commands[1][0]) < 0.3:
- # Too ambiguous.
- return None
+ hamming_commands = sorted(
+ ((close_enough(c, name_asked), c) for c in commands), reverse=True)
+ if (hamming_commands[0][0] - hamming_commands[1][0]) < 0.3:
+ # Too ambiguous.
+ return None
- if hamming_commands[0][0] < 0.8:
- # Not similar enough. Don't be a fool and run a random command.
- return None
+ if hamming_commands[0][0] < 0.8:
+ # Not similar enough. Don't be a fool and run a random command.
+ return None
- return commands[hamming_commands[0][1]]
+ return commands[hamming_commands[0][1]]
- def _gen_commands_list(self):
- """Generates the short list of supported commands."""
- commands = self.enumerate_commands()
- docs = sorted(
- (cmd_name, self._create_command_summary(cmd_name, handler))
- for cmd_name, handler in commands.items())
- # Skip commands without a docstring.
- docs = [i for i in docs if i[1]]
- # Then calculate maximum length for alignment:
- length = max(len(c) for c in commands)
+ def _gen_commands_list(self):
+ """Generates the short list of supported commands."""
+ commands = self.enumerate_commands()
+ docs = sorted(
+ (cmd_name, self._create_command_summary(cmd_name, handler))
+ for cmd_name, handler in commands.items())
+ # Skip commands without a docstring.
+ docs = [i for i in docs if i[1]]
+ # Then calculate maximum length for alignment:
+ length = max(len(c) for c in commands)
- # Look if color is supported.
- colors = _get_color_module()
- green = reset = ''
- if colors:
- green = colors.Fore.GREEN
- reset = colors.Fore.RESET
- return (
- 'Commands are:\n' +
- ''.join(
- ' %s%-*s%s %s\n' % (green, length, cmd_name, reset, doc)
- for cmd_name, doc in docs))
+ # Look if color is supported.
+ colors = _get_color_module()
+ green = reset = ''
+ if colors:
+ green = colors.Fore.GREEN
+ reset = colors.Fore.RESET
+ return ('Commands are:\n' +
+ ''.join(' %s%-*s%s %s\n' %
+ (green, length, cmd_name, reset, doc)
+ for cmd_name, doc in docs))
- def _add_command_usage(self, parser, command):
- """Modifies an OptionParser object with the function's documentation."""
- cmd_name = _function_to_name(command.__name__)
- if cmd_name == 'help':
- cmd_name = ''
- # Use the module's docstring as the description for the 'help' command if
- # available.
- parser.description = (self.module.__doc__ or '').rstrip()
- if parser.description:
- parser.description += '\n\n'
- parser.description += self._gen_commands_list()
- # Do not touch epilog.
- else:
- # Use the command's docstring if available. For commands, unlike module
- # docstring, realign.
- lines = (command.__doc__ or '').rstrip().splitlines()
- if lines[:1]:
- rest = textwrap.dedent('\n'.join(lines[1:]))
- parser.description = '\n'.join((lines[0], rest))
- else:
- parser.description = lines[0] if lines else ''
- if parser.description:
- parser.description += '\n'
- parser.epilog = getattr(command, 'epilog', None)
- if parser.epilog:
- parser.epilog = '\n' + parser.epilog.strip() + '\n'
+ def _add_command_usage(self, parser, command):
+ """Modifies an OptionParser object with the function's documentation."""
+ cmd_name = _function_to_name(command.__name__)
+ if cmd_name == 'help':
+ cmd_name = ''
+ # Use the module's docstring as the description for the 'help'
+ # command if available.
+ parser.description = (self.module.__doc__ or '').rstrip()
+ if parser.description:
+ parser.description += '\n\n'
+ parser.description += self._gen_commands_list()
+ # Do not touch epilog.
+ else:
+ # Use the command's docstring if available. For commands, unlike
+ # module docstring, realign.
+ lines = (command.__doc__ or '').rstrip().splitlines()
+ if lines[:1]:
+ rest = textwrap.dedent('\n'.join(lines[1:]))
+ parser.description = '\n'.join((lines[0], rest))
+ else:
+ parser.description = lines[0] if lines else ''
+ if parser.description:
+ parser.description += '\n'
+ parser.epilog = getattr(command, 'epilog', None)
+ if parser.epilog:
+ parser.epilog = '\n' + parser.epilog.strip() + '\n'
- more = getattr(command, 'usage_more', '')
- extra = '' if not more else ' ' + more
- parser.set_usage('usage: %%prog %s [options]%s' % (cmd_name, extra))
+ more = getattr(command, 'usage_more', '')
+ extra = '' if not more else ' ' + more
+ parser.set_usage('usage: %%prog %s [options]%s' % (cmd_name, extra))
- @staticmethod
- def _create_command_summary(cmd_name, command):
- """Creates a oneliner summary from the command's docstring."""
- if cmd_name != _function_to_name(command.__name__):
- # Skip aliases. For example using at module level:
- # CMDfoo = CMDbar
- return ''
- doc = command.__doc__ or ''
- line = doc.split('\n', 1)[0].rstrip('.')
- if not line:
- return line
- return (line[0].lower() + line[1:]).strip()
+ @staticmethod
+ def _create_command_summary(cmd_name, command):
+ """Creates a oneliner summary from the command's docstring."""
+ if cmd_name != _function_to_name(command.__name__):
+ # Skip aliases. For example using at module level:
+ # CMDfoo = CMDbar
+ return ''
+ doc = command.__doc__ or ''
+ line = doc.split('\n', 1)[0].rstrip('.')
+ if not line:
+ return line
+ return (line[0].lower() + line[1:]).strip()
- def execute(self, parser, args):
- """Dispatches execution to the right command.
+ def execute(self, parser, args):
+ """Dispatches execution to the right command.
Fallbacks to 'help' if not disabled.
"""
- # Unconditionally disable format_description() and format_epilog().
- # Technically, a formatter should be used but it's not worth (yet) the
- # trouble.
- parser.format_description = lambda _: parser.description or ''
- parser.format_epilog = lambda _: parser.epilog or ''
+ # Unconditionally disable format_description() and format_epilog().
+ # Technically, a formatter should be used but it's not worth (yet) the
+ # trouble.
+ parser.format_description = lambda _: parser.description or ''
+ parser.format_epilog = lambda _: parser.epilog or ''
- if args:
- if args[0] in ('-h', '--help') and len(args) > 1:
- # Reverse the argument order so 'tool --help cmd' is rewritten to
- # 'tool cmd --help'.
- args = [args[1], args[0]] + args[2:]
- command = self.find_nearest_command(args[0])
- if command:
- if command.__name__ == 'CMDhelp' and len(args) > 1:
- # Reverse the argument order so 'tool help cmd' is rewritten to
- # 'tool cmd --help'. Do it here since we want 'tool help cmd' to work
- # too.
- args = [args[1], '--help'] + args[2:]
- command = self.find_nearest_command(args[0]) or command
+ if args:
+ if args[0] in ('-h', '--help') and len(args) > 1:
+ # Reverse the argument order so 'tool --help cmd' is rewritten
+ # to 'tool cmd --help'.
+ args = [args[1], args[0]] + args[2:]
+ command = self.find_nearest_command(args[0])
+ if command:
+ if command.__name__ == 'CMDhelp' and len(args) > 1:
+ # Reverse the argument order so 'tool help cmd' is rewritten
+ # to 'tool cmd --help'. Do it here since we want 'tool help
+ # cmd' to work too.
+ args = [args[1], '--help'] + args[2:]
+ command = self.find_nearest_command(args[0]) or command
- # "fix" the usage and the description now that we know the subcommand.
- self._add_command_usage(parser, command)
- return command(parser, args[1:])
+ # "fix" the usage and the description now that we know the
+ # subcommand.
+ self._add_command_usage(parser, command)
+ return command(parser, args[1:])
- cmdhelp = self.enumerate_commands().get('help')
- if cmdhelp:
- # Not a known command. Default to help.
- self._add_command_usage(parser, cmdhelp)
- # Don't pass list of arguments as those may not be supported by cmdhelp.
- # See: https://crbug.com/1352093
- return cmdhelp(parser, [])
+ cmdhelp = self.enumerate_commands().get('help')
+ if cmdhelp:
+ # Not a known command. Default to help.
+ self._add_command_usage(parser, cmdhelp)
+ # Don't pass list of arguments as those may not be supported by
+ # cmdhelp. See: https://crbug.com/1352093
+ return cmdhelp(parser, [])
- # Nothing can be done.
- return 2
+ # Nothing can be done.
+ return 2
diff --git a/subprocess2.py b/subprocess2.py
index 481917a383..bcee43138c 100644
--- a/subprocess2.py
+++ b/subprocess2.py
@@ -15,7 +15,6 @@ import subprocess
import sys
import threading
-
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
@@ -23,72 +22,74 @@ DEVNULL = subprocess.DEVNULL
class CalledProcessError(subprocess.CalledProcessError):
- """Augment the standard exception with more data."""
- def __init__(self, returncode, cmd, cwd, stdout, stderr):
- super(CalledProcessError, self).__init__(returncode, cmd, output=stdout)
- self.stdout = self.output # for backward compatibility.
- self.stderr = stderr
- self.cwd = cwd
+ """Augment the standard exception with more data."""
+ def __init__(self, returncode, cmd, cwd, stdout, stderr):
+ super(CalledProcessError, self).__init__(returncode, cmd, output=stdout)
+ self.stdout = self.output # for backward compatibility.
+ self.stderr = stderr
+ self.cwd = cwd
- def __str__(self):
- out = 'Command %r returned non-zero exit status %s' % (
- ' '.join(self.cmd), self.returncode)
- if self.cwd:
- out += ' in ' + self.cwd
- if self.stdout:
- out += '\n' + self.stdout.decode('utf-8', 'ignore')
- if self.stderr:
- out += '\n' + self.stderr.decode('utf-8', 'ignore')
- return out
+ def __str__(self):
+ out = 'Command %r returned non-zero exit status %s' % (' '.join(
+ self.cmd), self.returncode)
+ if self.cwd:
+ out += ' in ' + self.cwd
+ if self.stdout:
+ out += '\n' + self.stdout.decode('utf-8', 'ignore')
+ if self.stderr:
+ out += '\n' + self.stderr.decode('utf-8', 'ignore')
+ return out
class CygwinRebaseError(CalledProcessError):
- """Occurs when cygwin's fork() emulation fails due to rebased dll."""
+ """Occurs when cygwin's fork() emulation fails due to rebased dll."""
## Utility functions
def kill_pid(pid):
- """Kills a process by its process id."""
- try:
- # Unable to import 'module'
- # pylint: disable=no-member,F0401
- import signal
- return os.kill(pid, signal.SIGTERM)
- except ImportError:
- pass
+ """Kills a process by its process id."""
+ try:
+ # Unable to import 'module'
+ # pylint: disable=no-member,F0401
+ import signal
+ return os.kill(pid, signal.SIGTERM)
+ except ImportError:
+ pass
def get_english_env(env):
- """Forces LANG and/or LANGUAGE to be English.
+ """Forces LANG and/or LANGUAGE to be English.
Forces encoding to utf-8 for subprocesses.
Returns None if it is unnecessary.
"""
- if sys.platform == 'win32':
- return None
- env = env or os.environ
+ if sys.platform == 'win32':
+ return None
+ env = env or os.environ
- # Test if it is necessary at all.
- is_english = lambda name: env.get(name, 'en').startswith('en')
+ # Test if it is necessary at all.
+ is_english = lambda name: env.get(name, 'en').startswith('en')
- if is_english('LANG') and is_english('LANGUAGE'):
- return None
+ if is_english('LANG') and is_english('LANGUAGE'):
+ return None
- # Requires modifications.
- env = env.copy()
- def fix_lang(name):
- if not is_english(name):
- env[name] = 'en_US.UTF-8'
- fix_lang('LANG')
- fix_lang('LANGUAGE')
- return env
+ # Requires modifications.
+ env = env.copy()
+
+ def fix_lang(name):
+ if not is_english(name):
+ env[name] = 'en_US.UTF-8'
+
+ fix_lang('LANG')
+ fix_lang('LANGUAGE')
+ return env
class Popen(subprocess.Popen):
- """Wraps subprocess.Popen() with various workarounds.
+ """Wraps subprocess.Popen() with various workarounds.
- Forces English output since it's easier to parse the stdout if it is always
in English.
@@ -100,69 +101,68 @@ class Popen(subprocess.Popen):
Note: Popen() can throw OSError when cwd or args[0] doesn't exist. Translate
exceptions generated by cygwin when it fails trying to emulate fork().
"""
- # subprocess.Popen.__init__() is not threadsafe; there is a race between
- # creating the exec-error pipe for the child and setting it to CLOEXEC during
- # which another thread can fork and cause the pipe to be inherited by its
- # descendents, which will cause the current Popen to hang until all those
- # descendents exit. Protect this with a lock so that only one fork/exec can
- # happen at a time.
- popen_lock = threading.Lock()
+ # subprocess.Popen.__init__() is not threadsafe; there is a race between
+ # creating the exec-error pipe for the child and setting it to CLOEXEC
+ # during which another thread can fork and cause the pipe to be inherited by
+ # its descendents, which will cause the current Popen to hang until all
+ # those descendents exit. Protect this with a lock so that only one
+ # fork/exec can happen at a time.
+ popen_lock = threading.Lock()
- def __init__(self, args, **kwargs):
- env = get_english_env(kwargs.get('env'))
- if env:
- kwargs['env'] = env
- if kwargs.get('env') is not None:
- # Subprocess expects environment variables to be strings in Python 3.
- def ensure_str(value):
- if isinstance(value, bytes):
- return value.decode()
- return value
+ def __init__(self, args, **kwargs):
+ env = get_english_env(kwargs.get('env'))
+ if env:
+ kwargs['env'] = env
+ if kwargs.get('env') is not None:
+ # Subprocess expects environment variables to be strings in Python
+ # 3.
+ def ensure_str(value):
+ if isinstance(value, bytes):
+ return value.decode()
+ return value
- kwargs['env'] = {
- ensure_str(k): ensure_str(v)
- for k, v in kwargs['env'].items()
- }
- if kwargs.get('shell') is None:
- # *Sigh*: Windows needs shell=True, or else it won't search %PATH% for
- # the executable, but shell=True makes subprocess on Linux fail when it's
- # called with a list because it only tries to execute the first item in
- # the list.
- kwargs['shell'] = bool(sys.platform=='win32')
+ kwargs['env'] = {
+ ensure_str(k): ensure_str(v)
+ for k, v in kwargs['env'].items()
+ }
+ if kwargs.get('shell') is None:
+ # *Sigh*: Windows needs shell=True, or else it won't search %PATH%
+ # for the executable, but shell=True makes subprocess on Linux fail
+ # when it's called with a list because it only tries to execute the
+ # first item in the list.
+ kwargs['shell'] = bool(sys.platform == 'win32')
- if isinstance(args, (str, bytes)):
- tmp_str = args
- elif isinstance(args, (list, tuple)):
- tmp_str = ' '.join(args)
- else:
- raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
- if kwargs.get('cwd', None):
- tmp_str += '; cwd=%s' % kwargs['cwd']
- logging.debug(tmp_str)
+ if isinstance(args, (str, bytes)):
+ tmp_str = args
+ elif isinstance(args, (list, tuple)):
+ tmp_str = ' '.join(args)
+ else:
+ raise CalledProcessError(None, args, kwargs.get('cwd'), None, None)
+ if kwargs.get('cwd', None):
+ tmp_str += '; cwd=%s' % kwargs['cwd']
+ logging.debug(tmp_str)
- try:
- with self.popen_lock:
- super(Popen, self).__init__(args, **kwargs)
- except OSError as e:
- if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
- # Convert fork() emulation failure into a CygwinRebaseError().
- raise CygwinRebaseError(
- e.errno,
- args,
- kwargs.get('cwd'),
- None,
- 'Visit '
- 'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure '
- 'to learn how to fix this error; you need to rebase your cygwin '
- 'dlls')
- # Popen() can throw OSError when cwd or args[0] doesn't exist.
- raise OSError('Execution failed with error: %s.\n'
- 'Check that %s or %s exist and have execution permission.'
- % (str(e), kwargs.get('cwd'), args[0]))
+ try:
+ with self.popen_lock:
+ super(Popen, self).__init__(args, **kwargs)
+ except OSError as e:
+ if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
+ # Convert fork() emulation failure into a CygwinRebaseError().
+ raise CygwinRebaseError(
+ e.errno, args, kwargs.get('cwd'), None, 'Visit '
+ 'http://code.google.com/p/chromium/wiki/'
+ 'CygwinDllRemappingFailure '
+ 'to learn how to fix this error; you need to rebase your '
+ 'cygwin dlls')
+ # Popen() can throw OSError when cwd or args[0] doesn't exist.
+ raise OSError(
+ 'Execution failed with error: %s.\n'
+ 'Check that %s or %s exist and have execution permission.' %
+ (str(e), kwargs.get('cwd'), args[0]))
def communicate(args, **kwargs):
- """Wraps subprocess.Popen().communicate().
+ """Wraps subprocess.Popen().communicate().
Returns ((stdout, stderr), returncode).
@@ -170,19 +170,19 @@ def communicate(args, **kwargs):
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
- stdin = None
- # When stdin is passed as an argument, use it as the actual input data and
- # set the Popen() parameter accordingly.
- if 'stdin' in kwargs and isinstance(kwargs['stdin'], (str, bytes)):
- stdin = kwargs['stdin']
- kwargs['stdin'] = PIPE
+ stdin = None
+ # When stdin is passed as an argument, use it as the actual input data and
+ # set the Popen() parameter accordingly.
+ if 'stdin' in kwargs and isinstance(kwargs['stdin'], (str, bytes)):
+ stdin = kwargs['stdin']
+ kwargs['stdin'] = PIPE
- proc = Popen(args, **kwargs)
- return proc.communicate(stdin), proc.returncode
+ proc = Popen(args, **kwargs)
+ return proc.communicate(stdin), proc.returncode
def call(args, **kwargs):
- """Emulates subprocess.call().
+ """Emulates subprocess.call().
Automatically convert stdout=PIPE or stderr=PIPE to DEVNULL.
In no case they can be returned since no code path raises
@@ -190,47 +190,47 @@ def call(args, **kwargs):
Returns exit code.
"""
- if kwargs.get('stdout') == PIPE:
- kwargs['stdout'] = DEVNULL
- if kwargs.get('stderr') == PIPE:
- kwargs['stderr'] = DEVNULL
- return communicate(args, **kwargs)[1]
+ if kwargs.get('stdout') == PIPE:
+ kwargs['stdout'] = DEVNULL
+ if kwargs.get('stderr') == PIPE:
+ kwargs['stderr'] = DEVNULL
+ return communicate(args, **kwargs)[1]
def check_call_out(args, **kwargs):
- """Improved version of subprocess.check_call().
+ """Improved version of subprocess.check_call().
Returns (stdout, stderr), unlike subprocess.check_call().
"""
- out, returncode = communicate(args, **kwargs)
- if returncode:
- raise CalledProcessError(
- returncode, args, kwargs.get('cwd'), out[0], out[1])
- return out
+ out, returncode = communicate(args, **kwargs)
+ if returncode:
+ raise CalledProcessError(returncode, args, kwargs.get('cwd'), out[0],
+ out[1])
+ return out
def check_call(args, **kwargs):
- """Emulate subprocess.check_call()."""
- check_call_out(args, **kwargs)
- return 0
+ """Emulate subprocess.check_call()."""
+ check_call_out(args, **kwargs)
+ return 0
def capture(args, **kwargs):
- """Captures stdout of a process call and returns it.
+ """Captures stdout of a process call and returns it.
Returns stdout.
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
- kwargs.setdefault('stdin', DEVNULL)
+ kwargs.setdefault('stdin', DEVNULL)
- # Like check_output, deny the caller from using stdout arg.
- return communicate(args, stdout=PIPE, **kwargs)[0][0]
+ # Like check_output, deny the caller from using stdout arg.
+ return communicate(args, stdout=PIPE, **kwargs)[0][0]
def check_output(args, **kwargs):
- """Emulates subprocess.check_output().
+ """Emulates subprocess.check_output().
Captures stdout of a process call and returns stdout only.
@@ -238,7 +238,7 @@ def check_output(args, **kwargs):
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
- kwargs.setdefault('stdin', DEVNULL)
- if 'stdout' in kwargs:
- raise ValueError('stdout argument not allowed, it would be overridden.')
- return check_call_out(args, stdout=PIPE, **kwargs)[0]
+ kwargs.setdefault('stdin', DEVNULL)
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it would be overridden.')
+ return check_call_out(args, stdout=PIPE, **kwargs)[0]
diff --git a/swift_format.py b/swift_format.py
index 219211da7c..df31e7cc48 100644
--- a/swift_format.py
+++ b/swift_format.py
@@ -15,60 +15,60 @@ import sys
class NotFoundError(Exception):
- """A file could not be found."""
-
- def __init__(self, e):
- Exception.__init__(
- self,
- 'Problem while looking for swift-format in Chromium source tree:\n'
- '%s' % e)
+ """A file could not be found."""
+ def __init__(self, e):
+ Exception.__init__(
+ self,
+ 'Problem while looking for swift-format in Chromium source tree:\n'
+ '%s' % e)
def FindSwiftFormatToolInChromiumTree():
- """Return a path to the rustfmt executable, or die trying."""
- chromium_src_path = gclient_paths.GetPrimarySolutionPath()
- if not chromium_src_path:
- raise NotFoundError(
- 'Could not find checkout in any parent of the current path.\n'
- 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium checkout.')
+ """Return a path to the rustfmt executable, or die trying."""
+ chromium_src_path = gclient_paths.GetPrimarySolutionPath()
+ if not chromium_src_path:
+ raise NotFoundError(
+ 'Could not find checkout in any parent of the current path.\n'
+ 'Set CHROMIUM_BUILDTOOLS_PATH to use outside of a chromium '
+ 'checkout.')
- tool_path = os.path.join(chromium_src_path, 'third_party', 'swift-format',
- 'swift-format')
- if not os.path.exists(tool_path):
- raise NotFoundError('File does not exist: %s' % tool_path)
- return tool_path
+ tool_path = os.path.join(chromium_src_path, 'third_party', 'swift-format',
+ 'swift-format')
+ if not os.path.exists(tool_path):
+ raise NotFoundError('File does not exist: %s' % tool_path)
+ return tool_path
def IsSwiftFormatSupported():
- if sys.platform != 'darwin':
- return False
- try:
- FindSwiftFormatToolInChromiumTree()
- return True
- except NotFoundError:
- return False
+ if sys.platform != 'darwin':
+ return False
+ try:
+ FindSwiftFormatToolInChromiumTree()
+ return True
+ except NotFoundError:
+ return False
def main(args):
- try:
- tool = FindSwiftFormatToolInChromiumTree()
- except NotFoundError as e:
- sys.stderr.write("%s\n" % str(e))
- return 1
+ try:
+ tool = FindSwiftFormatToolInChromiumTree()
+ except NotFoundError as e:
+ sys.stderr.write("%s\n" % str(e))
+ return 1
- # Add some visibility to --help showing where the tool lives, since this
- # redirection can be a little opaque.
- help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
- if any(match in args for match in help_syntax):
- print('\nDepot tools redirects you to the swift-format at:\n %s\n' %
- tool)
+ # Add some visibility to --help showing where the tool lives, since this
+ # redirection can be a little opaque.
+ help_syntax = ('-h', '--help', '-help', '-help-list', '--help-list')
+ if any(match in args for match in help_syntax):
+ print('\nDepot tools redirects you to the swift-format at:\n %s\n' %
+ tool)
- return subprocess.call([tool] + args)
+ return subprocess.call([tool] + args)
if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(1)
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(1)
diff --git a/testing_support/.style.yapf b/testing_support/.style.yapf
deleted file mode 100644
index 4741fb4f3b..0000000000
--- a/testing_support/.style.yapf
+++ /dev/null
@@ -1,3 +0,0 @@
-[style]
-based_on_style = pep8
-column_limit = 80
diff --git a/testing_support/coverage_utils.py b/testing_support/coverage_utils.py
index c3f9b18b4d..0fefab3e52 100644
--- a/testing_support/coverage_utils.py
+++ b/testing_support/coverage_utils.py
@@ -63,9 +63,8 @@ def covered_main(includes,
sys.path.insert(0, os.path.join(ROOT_PATH, 'third_party'))
import coverage
else:
- print(
- "ERROR: python-coverage (%s) is required to be installed on "
- "your PYTHONPATH to run this test." % require_native)
+ print("ERROR: python-coverage (%s) is required to be installed on "
+ "your PYTHONPATH to run this test." % require_native)
sys.exit(1)
COVERAGE = coverage.coverage(include=includes)
diff --git a/tests/.style.yapf b/tests/.style.yapf
deleted file mode 100644
index 4741fb4f3b..0000000000
--- a/tests/.style.yapf
+++ /dev/null
@@ -1,3 +0,0 @@
-[style]
-based_on_style = pep8
-column_limit = 80
diff --git a/update_depot_tools_toggle.py b/update_depot_tools_toggle.py
index 46fb109814..452ddd5308 100755
--- a/update_depot_tools_toggle.py
+++ b/update_depot_tools_toggle.py
@@ -2,7 +2,6 @@
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Small utility script to enable/disable `depot_tools` automatic updating."""
import argparse
@@ -10,29 +9,31 @@ import datetime
import os
import sys
-
DEPOT_TOOLS_ROOT = os.path.abspath(os.path.dirname(__file__))
SENTINEL_PATH = os.path.join(DEPOT_TOOLS_ROOT, '.disable_auto_update')
def main():
- parser = argparse.ArgumentParser()
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument('--enable', action='store_true',
- help='Enable auto-updating.')
- group.add_argument('--disable', action='store_true',
- help='Disable auto-updating.')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser()
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--enable',
+ action='store_true',
+ help='Enable auto-updating.')
+ group.add_argument('--disable',
+ action='store_true',
+ help='Disable auto-updating.')
+ args = parser.parse_args()
- if args.enable:
- if os.path.exists(SENTINEL_PATH):
- os.unlink(SENTINEL_PATH)
- if args.disable:
- if not os.path.exists(SENTINEL_PATH):
- with open(SENTINEL_PATH, 'w') as fd:
- fd.write('Disabled by %s at %s\n' % (__file__, datetime.datetime.now()))
- return 0
+ if args.enable:
+ if os.path.exists(SENTINEL_PATH):
+ os.unlink(SENTINEL_PATH)
+ if args.disable:
+ if not os.path.exists(SENTINEL_PATH):
+ with open(SENTINEL_PATH, 'w') as fd:
+ fd.write('Disabled by %s at %s\n' %
+ (__file__, datetime.datetime.now()))
+ return 0
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/upload_metrics.py b/upload_metrics.py
index 2174b0c1b1..0502790601 100644
--- a/upload_metrics.py
+++ b/upload_metrics.py
@@ -11,23 +11,24 @@ import urllib.request
import auth
import metrics_utils
-def main():
- metrics = input()
- try:
- headers = {}
- if 'bot_metrics' in metrics:
- token = auth.Authenticator().get_access_token().token
- headers = {'Authorization': 'Bearer ' + token}
- urllib.request.urlopen(urllib.request.Request(
- url=metrics_utils.APP_URL + '/upload',
- data=metrics.encode('utf-8'),
- headers=headers))
- except (urllib.error.HTTPError, urllib.error.URLError,
- http.client.RemoteDisconnected):
- pass
- return 0
+def main():
+ metrics = input()
+ try:
+ headers = {}
+ if 'bot_metrics' in metrics:
+ token = auth.Authenticator().get_access_token().token
+ headers = {'Authorization': 'Bearer ' + token}
+ urllib.request.urlopen(
+ urllib.request.Request(url=metrics_utils.APP_URL + '/upload',
+ data=metrics.encode('utf-8'),
+ headers=headers))
+ except (urllib.error.HTTPError, urllib.error.URLError,
+ http.client.RemoteDisconnected):
+ pass
+
+ return 0
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/upload_to_google_storage.py b/upload_to_google_storage.py
index 3a590abbd0..bf6d392b46 100755
--- a/upload_to_google_storage.py
+++ b/upload_to_google_storage.py
@@ -2,7 +2,6 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Uploads files to Google Storage content addressed."""
from __future__ import print_function
@@ -44,270 +43,292 @@ find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 -b bkt -
def get_md5(filename):
- md5_calculator = hashlib.md5()
- with open(filename, 'rb') as f:
- while True:
- chunk = f.read(1024*1024)
- if not chunk:
- break
- md5_calculator.update(chunk)
- return md5_calculator.hexdigest()
+ md5_calculator = hashlib.md5()
+ with open(filename, 'rb') as f:
+ while True:
+ chunk = f.read(1024 * 1024)
+ if not chunk:
+ break
+ md5_calculator.update(chunk)
+ return md5_calculator.hexdigest()
def get_md5_cached(filename):
- """Don't calculate the MD5 if we can find a .md5 file."""
- # See if we can find an existing MD5 sum stored in a file.
- if os.path.exists('%s.md5' % filename):
- with open('%s.md5' % filename, 'rb') as f:
- md5_match = re.search('([a-z0-9]{32})', f.read().decode())
- if md5_match:
- return md5_match.group(1)
- else:
- md5_hash = get_md5(filename)
- with open('%s.md5' % filename, 'wb') as f:
- f.write(md5_hash.encode())
- return md5_hash
+ """Don't calculate the MD5 if we can find a .md5 file."""
+ # See if we can find an existing MD5 sum stored in a file.
+ if os.path.exists('%s.md5' % filename):
+ with open('%s.md5' % filename, 'rb') as f:
+ md5_match = re.search('([a-z0-9]{32})', f.read().decode())
+ if md5_match:
+ return md5_match.group(1)
+ else:
+ md5_hash = get_md5(filename)
+ with open('%s.md5' % filename, 'wb') as f:
+ f.write(md5_hash.encode())
+ return md5_hash
-def _upload_worker(
- thread_num, upload_queue, base_url, gsutil, md5_lock, force,
- use_md5, stdout_queue, ret_codes, gzip):
- while True:
- filename, sha1_sum = upload_queue.get()
- if not filename:
- break
- file_url = '%s/%s' % (base_url, sha1_sum)
- if gsutil.check_call('ls', file_url)[0] == 0 and not force:
- # File exists, check MD5 hash.
- _, out, _ = gsutil.check_call_with_retries('ls', '-L', file_url)
- etag_match = re.search(r'ETag:\s+\S+', out)
- if etag_match:
- stdout_queue.put(
- '%d> File with url %s already exists' % (thread_num, file_url))
- remote_md5 = etag_match.group(0).split()[1]
- # Calculate the MD5 checksum to match it to Google Storage's ETag.
- with md5_lock:
- if use_md5:
- local_md5 = get_md5_cached(filename)
- else:
- local_md5 = get_md5(filename)
- if local_md5 == remote_md5:
- stdout_queue.put(
- '%d> File %s already exists and MD5 matches, upload skipped' %
- (thread_num, filename))
- continue
- stdout_queue.put('%d> Uploading %s...' % (
- thread_num, filename))
- gsutil_args = ['-h', 'Cache-Control:public, max-age=31536000', 'cp']
- if gzip:
- gsutil_args.extend(['-z', gzip])
- gsutil_args.extend([filename, file_url])
- code, _, err = gsutil.check_call_with_retries(*gsutil_args)
- if code != 0:
- ret_codes.put(
- (code,
- 'Encountered error on uploading %s to %s\n%s' %
- (filename, file_url, err)))
- continue
-
- # Mark executable files with the header "x-goog-meta-executable: 1" which
- # the download script will check for to preserve the executable bit.
- if not sys.platform.startswith('win'):
- if os.stat(filename).st_mode & stat.S_IEXEC:
- code, _, err = gsutil.check_call_with_retries(
- 'setmeta', '-h', 'x-goog-meta-executable:1', file_url)
+def _upload_worker(thread_num, upload_queue, base_url, gsutil, md5_lock, force,
+ use_md5, stdout_queue, ret_codes, gzip):
+ while True:
+ filename, sha1_sum = upload_queue.get()
+ if not filename:
+ break
+ file_url = '%s/%s' % (base_url, sha1_sum)
+ if gsutil.check_call('ls', file_url)[0] == 0 and not force:
+ # File exists, check MD5 hash.
+ _, out, _ = gsutil.check_call_with_retries('ls', '-L', file_url)
+ etag_match = re.search(r'ETag:\s+\S+', out)
+ if etag_match:
+ stdout_queue.put('%d> File with url %s already exists' %
+ (thread_num, file_url))
+ remote_md5 = etag_match.group(0).split()[1]
+ # Calculate the MD5 checksum to match it to Google Storage's
+ # ETag.
+ with md5_lock:
+ if use_md5:
+ local_md5 = get_md5_cached(filename)
+ else:
+ local_md5 = get_md5(filename)
+ if local_md5 == remote_md5:
+ stdout_queue.put(
+ '%d> File %s already exists and MD5 matches, upload '
+ 'skipped' % (thread_num, filename))
+ continue
+ stdout_queue.put('%d> Uploading %s...' % (thread_num, filename))
+ gsutil_args = ['-h', 'Cache-Control:public, max-age=31536000', 'cp']
+ if gzip:
+ gsutil_args.extend(['-z', gzip])
+ gsutil_args.extend([filename, file_url])
+ code, _, err = gsutil.check_call_with_retries(*gsutil_args)
if code != 0:
- ret_codes.put(
- (code,
- 'Encountered error on setting metadata on %s\n%s' %
- (file_url, err)))
+ ret_codes.put((code, 'Encountered error on uploading %s to %s\n%s' %
+ (filename, file_url, err)))
+ continue
+
+ # Mark executable files with the header "x-goog-meta-executable: 1"
+ # which the download script will check for to preserve the executable
+ # bit.
+ if not sys.platform.startswith('win'):
+ if os.stat(filename).st_mode & stat.S_IEXEC:
+ code, _, err = gsutil.check_call_with_retries(
+ 'setmeta', '-h', 'x-goog-meta-executable:1', file_url)
+ if code != 0:
+ ret_codes.put(
+ (code,
+ 'Encountered error on setting metadata on %s\n%s' %
+ (file_url, err)))
def get_targets(args, parser, use_null_terminator):
- if not args:
- parser.error('Missing target.')
+ if not args:
+ parser.error('Missing target.')
- if len(args) == 1 and args[0] == '-':
- # Take stdin as a newline or null separated list of files.
- if use_null_terminator:
- return sys.stdin.read().split('\0')
+ if len(args) == 1 and args[0] == '-':
+ # Take stdin as a newline or null separated list of files.
+ if use_null_terminator:
+ return sys.stdin.read().split('\0')
- return sys.stdin.read().splitlines()
+ return sys.stdin.read().splitlines()
- return args
+ return args
-def upload_to_google_storage(
- input_filenames, base_url, gsutil, force,
- use_md5, num_threads, skip_hashing, gzip):
- # We only want one MD5 calculation happening at a time to avoid HD thrashing.
- md5_lock = threading.Lock()
+def upload_to_google_storage(input_filenames, base_url, gsutil, force, use_md5,
+ num_threads, skip_hashing, gzip):
+ # We only want one MD5 calculation happening at a time to avoid HD
+ # thrashing.
+ md5_lock = threading.Lock()
- # Start up all the worker threads plus the printer thread.
- all_threads = []
- ret_codes = queue.Queue()
- ret_codes.put((0, None))
- upload_queue = queue.Queue()
- upload_timer = time.time()
- stdout_queue = queue.Queue()
- printer_thread = PrinterThread(stdout_queue)
- printer_thread.daemon = True
- printer_thread.start()
- for thread_num in range(num_threads):
- t = threading.Thread(
- target=_upload_worker,
- args=[thread_num, upload_queue, base_url, gsutil, md5_lock,
- force, use_md5, stdout_queue, ret_codes, gzip])
- t.daemon = True
- t.start()
- all_threads.append(t)
+ # Start up all the worker threads plus the printer thread.
+ all_threads = []
+ ret_codes = queue.Queue()
+ ret_codes.put((0, None))
+ upload_queue = queue.Queue()
+ upload_timer = time.time()
+ stdout_queue = queue.Queue()
+ printer_thread = PrinterThread(stdout_queue)
+ printer_thread.daemon = True
+ printer_thread.start()
+ for thread_num in range(num_threads):
+ t = threading.Thread(target=_upload_worker,
+ args=[
+ thread_num, upload_queue, base_url, gsutil,
+ md5_lock, force, use_md5, stdout_queue,
+ ret_codes, gzip
+ ])
+ t.daemon = True
+ t.start()
+ all_threads.append(t)
- # We want to hash everything in a single thread since its faster.
- # The bottleneck is in disk IO, not CPU.
- hashing_start = time.time()
- has_missing_files = False
- for filename in input_filenames:
- if not os.path.exists(filename):
- stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
- has_missing_files = True
- continue
- if os.path.exists('%s.sha1' % filename) and skip_hashing:
- stdout_queue.put(
- 'Main> Found hash for %s, sha1 calculation skipped.' % filename)
- with open(filename + '.sha1', 'rb') as f:
- sha1_file = f.read(1024)
- if not re.match('^([a-z0-9]{40})$', sha1_file.decode()):
- print('Invalid sha1 hash file %s.sha1' % filename, file=sys.stderr)
- return 1
- upload_queue.put((filename, sha1_file.decode()))
- continue
- stdout_queue.put('Main> Calculating hash for %s...' % filename)
- sha1_sum = get_sha1(filename)
- with open(filename + '.sha1', 'wb') as f:
- f.write(sha1_sum.encode())
- stdout_queue.put('Main> Done calculating hash for %s.' % filename)
- upload_queue.put((filename, sha1_sum))
- hashing_duration = time.time() - hashing_start
+ # We want to hash everything in a single thread since its faster.
+ # The bottleneck is in disk IO, not CPU.
+ hashing_start = time.time()
+ has_missing_files = False
+ for filename in input_filenames:
+ if not os.path.exists(filename):
+ stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
+ has_missing_files = True
+ continue
+ if os.path.exists('%s.sha1' % filename) and skip_hashing:
+ stdout_queue.put(
+ 'Main> Found hash for %s, sha1 calculation skipped.' % filename)
+ with open(filename + '.sha1', 'rb') as f:
+ sha1_file = f.read(1024)
+ if not re.match('^([a-z0-9]{40})$', sha1_file.decode()):
+ print('Invalid sha1 hash file %s.sha1' % filename,
+ file=sys.stderr)
+ return 1
+ upload_queue.put((filename, sha1_file.decode()))
+ continue
+ stdout_queue.put('Main> Calculating hash for %s...' % filename)
+ sha1_sum = get_sha1(filename)
+ with open(filename + '.sha1', 'wb') as f:
+ f.write(sha1_sum.encode())
+ stdout_queue.put('Main> Done calculating hash for %s.' % filename)
+ upload_queue.put((filename, sha1_sum))
+ hashing_duration = time.time() - hashing_start
- # Wait for everything to finish.
- for _ in all_threads:
- upload_queue.put((None, None)) # To mark the end of the work queue.
- for t in all_threads:
- t.join()
- stdout_queue.put(None)
- printer_thread.join()
+ # Wait for everything to finish.
+ for _ in all_threads:
+ upload_queue.put((None, None)) # To mark the end of the work queue.
+ for t in all_threads:
+ t.join()
+ stdout_queue.put(None)
+ printer_thread.join()
- # Print timing information.
- print('Hashing %s files took %1f seconds' % (
- len(input_filenames), hashing_duration))
- print('Uploading took %1f seconds' % (time.time() - upload_timer))
+ # Print timing information.
+ print('Hashing %s files took %1f seconds' %
+ (len(input_filenames), hashing_duration))
+ print('Uploading took %1f seconds' % (time.time() - upload_timer))
- # See if we ran into any errors.
- max_ret_code = 0
- for ret_code, message in ret_codes.queue:
- max_ret_code = max(ret_code, max_ret_code)
- if message:
- print(message, file=sys.stderr)
- if has_missing_files:
- print('One or more input files missing', file=sys.stderr)
- max_ret_code = max(1, max_ret_code)
+ # See if we ran into any errors.
+ max_ret_code = 0
+ for ret_code, message in ret_codes.queue:
+ max_ret_code = max(ret_code, max_ret_code)
+ if message:
+ print(message, file=sys.stderr)
+ if has_missing_files:
+ print('One or more input files missing', file=sys.stderr)
+ max_ret_code = max(1, max_ret_code)
- if not max_ret_code:
- print('Success!')
+ if not max_ret_code:
+ print('Success!')
- return max_ret_code
+ return max_ret_code
def create_archives(dirs):
- archive_names = []
- for name in dirs:
- tarname = '%s.tar.gz' % name
- with tarfile.open(tarname, 'w:gz') as tar:
- tar.add(name)
- archive_names.append(tarname)
- return archive_names
+ archive_names = []
+ for name in dirs:
+ tarname = '%s.tar.gz' % name
+ with tarfile.open(tarname, 'w:gz') as tar:
+ tar.add(name)
+ archive_names.append(tarname)
+ return archive_names
def validate_archive_dirs(dirs):
- for d in dirs:
- # We don't allow .. in paths in our archives.
- if d == '..':
- return False
- # We only allow dirs.
- if not os.path.isdir(d):
- return False
- # We don't allow sym links in our archives.
- if os.path.islink(d):
- return False
- # We required that the subdirectories we are archiving are all just below
- # cwd.
- if d not in next(os.walk('.'))[1]:
- return False
+ for d in dirs:
+ # We don't allow .. in paths in our archives.
+ if d == '..':
+ return False
+ # We only allow dirs.
+ if not os.path.isdir(d):
+ return False
+ # We don't allow sym links in our archives.
+ if os.path.islink(d):
+ return False
+ # We required that the subdirectories we are archiving are all just
+ # below cwd.
+ if d not in next(os.walk('.'))[1]:
+ return False
- return True
+ return True
def main():
- parser = optparse.OptionParser(USAGE_STRING)
- parser.add_option('-b', '--bucket',
- help='Google Storage bucket to upload to.')
- parser.add_option('-e', '--boto', help='Specify a custom boto file.')
- parser.add_option('-a', '--archive', action='store_true',
- help='Archive directory as a tar.gz file')
- parser.add_option('-f', '--force', action='store_true',
- help='Force upload even if remote file exists.')
- parser.add_option('-g', '--gsutil_path', default=GSUTIL_DEFAULT_PATH,
- help='Path to the gsutil script.')
- parser.add_option('-m', '--use_md5', action='store_true',
- help='Generate MD5 files when scanning, and don\'t check '
- 'the MD5 checksum if a .md5 file is found.')
- parser.add_option('-t', '--num_threads', default=1, type='int',
- help='Number of uploader threads to run.')
- parser.add_option('-s', '--skip_hashing', action='store_true',
- help='Skip hashing if .sha1 file exists.')
- parser.add_option('-0', '--use_null_terminator', action='store_true',
- help='Use \\0 instead of \\n when parsing '
- 'the file list from stdin. This is useful if the input '
- 'is coming from "find ... -print0".')
- parser.add_option('-z', '--gzip', metavar='ext',
- help='Gzip files which end in ext. '
- 'ext is a comma-separated list')
- (options, args) = parser.parse_args()
+ parser = optparse.OptionParser(USAGE_STRING)
+ parser.add_option('-b',
+ '--bucket',
+ help='Google Storage bucket to upload to.')
+ parser.add_option('-e', '--boto', help='Specify a custom boto file.')
+ parser.add_option('-a',
+ '--archive',
+ action='store_true',
+ help='Archive directory as a tar.gz file')
+ parser.add_option('-f',
+ '--force',
+ action='store_true',
+ help='Force upload even if remote file exists.')
+ parser.add_option('-g',
+ '--gsutil_path',
+ default=GSUTIL_DEFAULT_PATH,
+ help='Path to the gsutil script.')
+ parser.add_option('-m',
+ '--use_md5',
+ action='store_true',
+ help='Generate MD5 files when scanning, and don\'t check '
+ 'the MD5 checksum if a .md5 file is found.')
+ parser.add_option('-t',
+ '--num_threads',
+ default=1,
+ type='int',
+ help='Number of uploader threads to run.')
+ parser.add_option('-s',
+ '--skip_hashing',
+ action='store_true',
+ help='Skip hashing if .sha1 file exists.')
+ parser.add_option('-0',
+ '--use_null_terminator',
+ action='store_true',
+ help='Use \\0 instead of \\n when parsing '
+ 'the file list from stdin. This is useful if the input '
+ 'is coming from "find ... -print0".')
+ parser.add_option('-z',
+ '--gzip',
+ metavar='ext',
+ help='Gzip files which end in ext. '
+ 'ext is a comma-separated list')
+ (options, args) = parser.parse_args()
- # Enumerate our inputs.
- input_filenames = get_targets(args, parser, options.use_null_terminator)
+ # Enumerate our inputs.
+ input_filenames = get_targets(args, parser, options.use_null_terminator)
- if options.archive:
- if not validate_archive_dirs(input_filenames):
- parser.error('Only directories just below cwd are valid entries when '
- 'using the --archive argument. Entries can not contain .. '
- ' and entries can not be symlinks. Entries was %s' %
- input_filenames)
- return 1
- input_filenames = create_archives(input_filenames)
+ if options.archive:
+ if not validate_archive_dirs(input_filenames):
+ parser.error(
+ 'Only directories just below cwd are valid entries when '
+ 'using the --archive argument. Entries can not contain .. '
+ ' and entries can not be symlinks. Entries was %s' %
+ input_filenames)
+ return 1
+ input_filenames = create_archives(input_filenames)
- # Make sure we can find a working instance of gsutil.
- if os.path.exists(GSUTIL_DEFAULT_PATH):
- gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
- else:
- gsutil = None
- for path in os.environ["PATH"].split(os.pathsep):
- if os.path.exists(path) and 'gsutil' in os.listdir(path):
- gsutil = Gsutil(os.path.join(path, 'gsutil'), boto_path=options.boto)
- if not gsutil:
- parser.error('gsutil not found in %s, bad depot_tools checkout?' %
- GSUTIL_DEFAULT_PATH)
+ # Make sure we can find a working instance of gsutil.
+ if os.path.exists(GSUTIL_DEFAULT_PATH):
+ gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
+ else:
+ gsutil = None
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(path) and 'gsutil' in os.listdir(path):
+ gsutil = Gsutil(os.path.join(path, 'gsutil'),
+ boto_path=options.boto)
+ if not gsutil:
+ parser.error('gsutil not found in %s, bad depot_tools checkout?' %
+ GSUTIL_DEFAULT_PATH)
- base_url = 'gs://%s' % options.bucket
+ base_url = 'gs://%s' % options.bucket
- return upload_to_google_storage(
- input_filenames, base_url, gsutil, options.force, options.use_md5,
- options.num_threads, options.skip_hashing, options.gzip)
+ return upload_to_google_storage(input_filenames, base_url, gsutil,
+ options.force, options.use_md5,
+ options.num_threads, options.skip_hashing,
+ options.gzip)
if __name__ == '__main__':
- try:
- sys.exit(main())
- except KeyboardInterrupt:
- sys.stderr.write('interrupted\n')
- sys.exit(1)
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ sys.stderr.write('interrupted\n')
+ sys.exit(1)
diff --git a/utils.py b/utils.py
index 3f16ce98ba..a85d2f722e 100644
--- a/utils.py
+++ b/utils.py
@@ -7,19 +7,19 @@ import subprocess
def depot_tools_version():
- depot_tools_root = os.path.dirname(os.path.abspath(__file__))
- try:
- commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
- cwd=depot_tools_root).decode(
- 'utf-8', 'ignore')
- return 'git-%s' % commit_hash
- except Exception:
- pass
+ depot_tools_root = os.path.dirname(os.path.abspath(__file__))
+ try:
+ commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'],
+ cwd=depot_tools_root).decode(
+ 'utf-8', 'ignore')
+ return 'git-%s' % commit_hash
+ except Exception:
+ pass
- # git check failed, let's check last modification of frequently checked file
- try:
- mtime = os.path.getmtime(
- os.path.join(depot_tools_root, 'infra', 'config', 'recipes.cfg'))
- return 'recipes.cfg-%d' % (mtime)
- except Exception:
- return 'unknown'
+ # git check failed, let's check last modification of frequently checked file
+ try:
+ mtime = os.path.getmtime(
+ os.path.join(depot_tools_root, 'infra', 'config', 'recipes.cfg'))
+ return 'recipes.cfg-%d' % (mtime)
+ except Exception:
+ return 'unknown'
diff --git a/watchlists.py b/watchlists.py
index fca78a9238..7c6d776d69 100755
--- a/watchlists.py
+++ b/watchlists.py
@@ -2,7 +2,6 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Watchlists
Watchlists is a mechanism that allow a developer (a "watcher") to watch over
@@ -27,7 +26,7 @@ import sys
class Watchlists(object):
- """Manage Watchlists.
+ """Manage Watchlists.
This class provides mechanism to load watchlists for a repo and identify
watchers.
@@ -37,77 +36,79 @@ class Watchlists(object):
"/path/to/file2",])
"""
- _RULES = "WATCHLISTS"
- _RULES_FILENAME = _RULES
- _repo_root = None
- _defns = {} # Definitions
- _path_regexps = {} # Name -> Regular expression mapping
- _watchlists = {} # name to email mapping
+ _RULES = "WATCHLISTS"
+ _RULES_FILENAME = _RULES
+ _repo_root = None
+ _defns = {} # Definitions
+ _path_regexps = {} # Name -> Regular expression mapping
+ _watchlists = {} # name to email mapping
- def __init__(self, repo_root):
- self._repo_root = repo_root
- self._LoadWatchlistRules()
+ def __init__(self, repo_root):
+ self._repo_root = repo_root
+ self._LoadWatchlistRules()
- def _GetRulesFilePath(self):
- """Returns path to WATCHLISTS file."""
- return os.path.join(self._repo_root, self._RULES_FILENAME)
+ def _GetRulesFilePath(self):
+ """Returns path to WATCHLISTS file."""
+ return os.path.join(self._repo_root, self._RULES_FILENAME)
- def _HasWatchlistsFile(self):
- """Determine if watchlists are available for this repo."""
- return os.path.exists(self._GetRulesFilePath())
+ def _HasWatchlistsFile(self):
+ """Determine if watchlists are available for this repo."""
+ return os.path.exists(self._GetRulesFilePath())
- def _ContentsOfWatchlistsFile(self):
- """Read the WATCHLISTS file and return its contents."""
- try:
- watchlists_file = open(self._GetRulesFilePath())
- contents = watchlists_file.read()
- watchlists_file.close()
- return contents
- except IOError as e:
- logging.error("Cannot read %s: %s" % (self._GetRulesFilePath(), e))
- return ''
+ def _ContentsOfWatchlistsFile(self):
+ """Read the WATCHLISTS file and return its contents."""
+ try:
+ watchlists_file = open(self._GetRulesFilePath())
+ contents = watchlists_file.read()
+ watchlists_file.close()
+ return contents
+ except IOError as e:
+ logging.error("Cannot read %s: %s" % (self._GetRulesFilePath(), e))
+ return ''
- def _LoadWatchlistRules(self):
- """Load watchlists from WATCHLISTS file. Does nothing if not present."""
- if not self._HasWatchlistsFile():
- return
+ def _LoadWatchlistRules(self):
+ """Load watchlists from WATCHLISTS file. Does nothing if not present."""
+ if not self._HasWatchlistsFile():
+ return
- contents = self._ContentsOfWatchlistsFile()
- watchlists_data = None
- try:
- watchlists_data = eval(contents, {'__builtins__': None}, None)
- except SyntaxError as e:
- logging.error("Cannot parse %s. %s" % (self._GetRulesFilePath(), e))
- return
+ contents = self._ContentsOfWatchlistsFile()
+ watchlists_data = None
+ try:
+ watchlists_data = eval(contents, {'__builtins__': None}, None)
+ except SyntaxError as e:
+ logging.error("Cannot parse %s. %s" % (self._GetRulesFilePath(), e))
+ return
- defns = watchlists_data.get("WATCHLIST_DEFINITIONS")
- if not defns:
- logging.error("WATCHLIST_DEFINITIONS not defined in %s" %
- self._GetRulesFilePath())
- return
- watchlists = watchlists_data.get("WATCHLISTS")
- if not watchlists:
- logging.error("WATCHLISTS not defined in %s" % self._GetRulesFilePath())
- return
- self._defns = defns
- self._watchlists = watchlists
+ defns = watchlists_data.get("WATCHLIST_DEFINITIONS")
+ if not defns:
+ logging.error("WATCHLIST_DEFINITIONS not defined in %s" %
+ self._GetRulesFilePath())
+ return
+ watchlists = watchlists_data.get("WATCHLISTS")
+ if not watchlists:
+ logging.error("WATCHLISTS not defined in %s" %
+ self._GetRulesFilePath())
+ return
+ self._defns = defns
+ self._watchlists = watchlists
- # Compile the regular expressions ahead of time to avoid creating them
- # on-the-fly multiple times per file.
- self._path_regexps = {}
- for name, rule in defns.items():
- filepath = rule.get('filepath')
- if not filepath:
- continue
- self._path_regexps[name] = re.compile(filepath)
+ # Compile the regular expressions ahead of time to avoid creating them
+ # on-the-fly multiple times per file.
+ self._path_regexps = {}
+ for name, rule in defns.items():
+ filepath = rule.get('filepath')
+ if not filepath:
+ continue
+ self._path_regexps[name] = re.compile(filepath)
- # Verify that all watchlist names are defined
- for name in watchlists:
- if name not in defns:
- logging.error("%s not defined in %s" % (name, self._GetRulesFilePath()))
+ # Verify that all watchlist names are defined
+ for name in watchlists:
+ if name not in defns:
+ logging.error("%s not defined in %s" %
+ (name, self._GetRulesFilePath()))
- def GetWatchersForPaths(self, paths):
- """Fetch the list of watchers for |paths|
+ def GetWatchersForPaths(self, paths):
+ """Fetch the list of watchers for |paths|
Args:
paths: [path1, path2, ...]
@@ -115,28 +116,28 @@ class Watchlists(object):
Returns:
[u1@chromium.org, u2@gmail.com, ...]
"""
- watchers = set() # A set, to avoid duplicates
- for path in paths:
- path = path.replace(os.sep, '/')
- for name, rule in self._path_regexps.items():
- if name not in self._watchlists:
- continue
- if rule.search(path):
- for watchlist in self._watchlists[name]:
- watchers.add(watchlist)
- return sorted(watchers)
+ watchers = set() # A set, to avoid duplicates
+ for path in paths:
+ path = path.replace(os.sep, '/')
+ for name, rule in self._path_regexps.items():
+ if name not in self._watchlists:
+ continue
+ if rule.search(path):
+ for watchlist in self._watchlists[name]:
+ watchers.add(watchlist)
+ return sorted(watchers)
def main(argv):
- # Confirm that watchlists can be parsed and spew out the watchers
- if len(argv) < 2:
- print("Usage (from the base of repo):")
- print(" %s [file-1] [file-2] ...." % argv[0])
- return 1
- wl = Watchlists(os.getcwd())
- watchers = wl.GetWatchersForPaths(argv[1:])
- print(watchers)
+ # Confirm that watchlists can be parsed and spew out the watchers
+ if len(argv) < 2:
+ print("Usage (from the base of repo):")
+ print(" %s [file-1] [file-2] ...." % argv[0])
+ return 1
+ wl = Watchlists(os.getcwd())
+ watchers = wl.GetWatchersForPaths(argv[1:])
+ print(watchers)
if __name__ == '__main__':
- main(sys.argv)
+ main(sys.argv)
diff --git a/weekly b/weekly
index 853522597d..431440205a 100755
--- a/weekly
+++ b/weekly
@@ -2,7 +2,6 @@
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Display log of checkins of one particular developer since a particular
date. Only works on git dependencies at the moment."""
@@ -17,38 +16,40 @@ import sys
def show_log(path, authors, since='1 week ago'):
- """Display log in a single git repo."""
+ """Display log in a single git repo."""
- author_option = ' '.join(['--author=' + author for author in authors])
- command = ' '.join(['git log', author_option, '--since="%s"' % since,
- 'origin/HEAD', '| git shortlog'])
- status = subprocess.Popen(['sh', '-c', command],
- cwd=path,
- stdout=subprocess.PIPE).communicate()[0].rstrip()
+ author_option = ' '.join(['--author=' + author for author in authors])
+ command = ' '.join([
+ 'git log', author_option,
+ '--since="%s"' % since, 'origin/HEAD', '| git shortlog'
+ ])
+ status = subprocess.Popen(['sh', '-c', command],
+ cwd=path,
+ stdout=subprocess.PIPE).communicate()[0].rstrip()
- if len(status.splitlines()) > 0:
- print('---------- %s ----------' % path)
- print(status)
+ if len(status.splitlines()) > 0:
+ print('---------- %s ----------' % path)
+ print(status)
def main():
- """Take no arguments."""
+ """Take no arguments."""
- option_parser = optparse.OptionParser()
- option_parser.add_option("-a", "--author", action="append", default=[])
- option_parser.add_option("-s", "--since", default="1 week ago")
- options, args = option_parser.parse_args()
+ option_parser = optparse.OptionParser()
+ option_parser.add_option("-a", "--author", action="append", default=[])
+ option_parser.add_option("-s", "--since", default="1 week ago")
+ options, args = option_parser.parse_args()
- root, entries = gclient_utils.GetGClientRootAndEntries()
+ root, entries = gclient_utils.GetGClientRootAndEntries()
- # which entries map to a git repos?
- paths = [k for k, v in entries.items() if not re.search('svn', v)]
- paths.sort()
+ # which entries map to a git repos?
+ paths = [k for k, v in entries.items() if not re.search('svn', v)]
+ paths.sort()
- for path in paths:
- dir = os.path.normpath(os.path.join(root, path))
- show_log(dir, options.author, options.since)
+ for path in paths:
+ dir = os.path.normpath(os.path.join(root, path))
+ show_log(dir, options.author, options.since)
if __name__ == '__main__':
- main()
+ main()
diff --git a/win32imports.py b/win32imports.py
index 6de5471126..d5c2867fcf 100644
--- a/win32imports.py
+++ b/win32imports.py
@@ -14,13 +14,13 @@ LOCKFILE_FAIL_IMMEDIATELY = 0x00000001
class Overlapped(ctypes.Structure):
- """Overlapped is required and used in LockFileEx and UnlockFileEx."""
- _fields_ = [('Internal', ctypes.wintypes.LPVOID),
- ('InternalHigh', ctypes.wintypes.LPVOID),
- ('Offset', ctypes.wintypes.DWORD),
- ('OffsetHigh', ctypes.wintypes.DWORD),
- ('Pointer', ctypes.wintypes.LPVOID),
- ('hEvent', ctypes.wintypes.HANDLE)]
+ """Overlapped is required and used in LockFileEx and UnlockFileEx."""
+ _fields_ = [('Internal', ctypes.wintypes.LPVOID),
+ ('InternalHigh', ctypes.wintypes.LPVOID),
+ ('Offset', ctypes.wintypes.DWORD),
+ ('OffsetHigh', ctypes.wintypes.DWORD),
+ ('Pointer', ctypes.wintypes.LPVOID),
+ ('hEvent', ctypes.wintypes.HANDLE)]
# https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
diff --git a/win_toolchain/get_toolchain_if_necessary.py b/win_toolchain/get_toolchain_if_necessary.py
index e4d606f2f0..7942f9a11c 100755
--- a/win_toolchain/get_toolchain_if_necessary.py
+++ b/win_toolchain/get_toolchain_if_necessary.py
@@ -2,7 +2,6 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Downloads and unpacks a toolchain for building on Windows. The contents are
matched by sha1 which will be updated when the toolchain is updated.
@@ -42,568 +41,597 @@ ENV_TOOLCHAIN_ROOT = 'DEPOT_TOOLS_WIN_TOOLCHAIN_ROOT'
# winreg isn't natively available under CygWin
if sys.platform == "win32":
- try:
- import winreg
- except ImportError:
- import _winreg as winreg
+ try:
+ import winreg
+ except ImportError:
+ import _winreg as winreg
elif sys.platform == "cygwin":
- try:
- import cygwinreg as winreg
- except ImportError:
- print('')
- print('CygWin does not natively support winreg but a replacement exists.')
- print('https://pypi.python.org/pypi/cygwinreg/')
- print('')
- print('Try: easy_install cygwinreg')
- print('')
- raise
+ try:
+ import cygwinreg as winreg
+ except ImportError:
+ print('')
+ print(
+ 'CygWin does not natively support winreg but a replacement exists.')
+ print('https://pypi.python.org/pypi/cygwinreg/')
+ print('')
+ print('Try: easy_install cygwinreg')
+ print('')
+ raise
BASEDIR = os.path.dirname(os.path.abspath(__file__))
DEPOT_TOOLS_PATH = os.path.join(BASEDIR, '..')
sys.path.append(DEPOT_TOOLS_PATH)
try:
- import download_from_google_storage
+ import download_from_google_storage
except ImportError:
- # Allow use of utility functions in this script from package_from_installed
- # on bare VM that doesn't have a full depot_tools.
- pass
+ # Allow use of utility functions in this script from package_from_installed
+ # on bare VM that doesn't have a full depot_tools.
+ pass
def GetFileList(root):
- """Gets a normalized list of files under |root|."""
- assert not os.path.isabs(root)
- assert os.path.normpath(root) == root
- file_list = []
- # Ignore WER ReportQueue entries that vctip/cl leave in the bin dir if/when
- # they crash. Also ignores the content of the
- # Windows Kits/10/debuggers/x(86|64)/(sym|src)/ directories as this is just
- # the temporarily location that Windbg might use to store the symbol files
- # and downloaded sources.
- #
- # Note: These files are only created on a Windows host, so the
- # ignored_directories list isn't relevant on non-Windows hosts.
+ """Gets a normalized list of files under |root|."""
+ assert not os.path.isabs(root)
+ assert os.path.normpath(root) == root
+ file_list = []
+ # Ignore WER ReportQueue entries that vctip/cl leave in the bin dir if/when
+ # they crash. Also ignores the content of the
+ # Windows Kits/10/debuggers/x(86|64)/(sym|src)/ directories as this is just
+ # the temporarily location that Windbg might use to store the symbol files
+ # and downloaded sources.
+ #
+ # Note: These files are only created on a Windows host, so the
+ # ignored_directories list isn't relevant on non-Windows hosts.
- # The Windows SDK is either in `win_sdk` or in `Windows Kits\10`. This
- # script must work with both layouts, so check which one it is.
- # This can be different in each |root|.
- if os.path.isdir(os.path.join(root, 'Windows Kits', '10')):
- win_sdk = 'Windows Kits\\10'
- else:
- win_sdk = 'win_sdk'
+ # The Windows SDK is either in `win_sdk` or in `Windows Kits\10`. This
+ # script must work with both layouts, so check which one it is.
+ # This can be different in each |root|.
+ if os.path.isdir(os.path.join(root, 'Windows Kits', '10')):
+ win_sdk = 'Windows Kits\\10'
+ else:
+ win_sdk = 'win_sdk'
- ignored_directories = ['wer\\reportqueue',
- win_sdk + '\\debuggers\\x86\\sym\\',
- win_sdk + '\\debuggers\\x64\\sym\\',
- win_sdk + '\\debuggers\\x86\\src\\',
- win_sdk + '\\debuggers\\x64\\src\\']
- ignored_directories = [d.lower() for d in ignored_directories]
+ ignored_directories = [
+ 'wer\\reportqueue', win_sdk + '\\debuggers\\x86\\sym\\',
+ win_sdk + '\\debuggers\\x64\\sym\\',
+ win_sdk + '\\debuggers\\x86\\src\\', win_sdk + '\\debuggers\\x64\\src\\'
+ ]
+ ignored_directories = [d.lower() for d in ignored_directories]
- for base, _, files in os.walk(root):
- paths = [os.path.join(base, f) for f in files]
- for p in paths:
- if any(ignored_dir in p.lower() for ignored_dir in ignored_directories):
- continue
- file_list.append(p)
- return sorted(file_list, key=lambda s: s.replace('/', '\\').lower())
+ for base, _, files in os.walk(root):
+ paths = [os.path.join(base, f) for f in files]
+ for p in paths:
+ if any(ignored_dir in p.lower()
+ for ignored_dir in ignored_directories):
+ continue
+ file_list.append(p)
+ return sorted(file_list, key=lambda s: s.replace('/', '\\').lower())
def MakeTimestampsFileName(root, sha1):
- return os.path.join(root, os.pardir, '%s.timestamps' % sha1)
+ return os.path.join(root, os.pardir, '%s.timestamps' % sha1)
def CalculateHash(root, expected_hash):
- """Calculates the sha1 of the paths to all files in the given |root| and the
+ """Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string.
|expected_hash| is the expected hash value for this toolchain if it has
already been installed.
"""
- if expected_hash:
- full_root_path = os.path.join(root, expected_hash)
- else:
- full_root_path = root
- file_list = GetFileList(full_root_path)
- # Check whether we previously saved timestamps in $root/../{sha1}.timestamps.
- # If we didn't, or they don't match, then do the full calculation, otherwise
- # return the saved value.
- timestamps_file = MakeTimestampsFileName(root, expected_hash)
- timestamps_data = {'files': [], 'sha1': ''}
- if os.path.exists(timestamps_file):
- with open(timestamps_file, 'rb') as f:
- try:
- timestamps_data = json.load(f)
- except ValueError:
- # json couldn't be loaded, empty data will force a re-hash.
- pass
-
- matches = len(file_list) == len(timestamps_data['files'])
- # Don't check the timestamp of the version file as we touch this file to
- # indicates which versions of the toolchain are still being used.
- vc_dir = os.path.join(full_root_path, 'VC').lower()
- if matches:
- for disk, cached in zip(file_list, timestamps_data['files']):
- if disk != cached[0] or (
- disk != vc_dir and os.path.getmtime(disk) != cached[1]):
- matches = False
- break
- elif os.path.exists(timestamps_file):
- # Print some information about the extra/missing files. Don't do this if we
- # don't have a timestamp file, as all the files will be considered as
- # missing.
- timestamps_data_files = []
- for f in timestamps_data['files']:
- timestamps_data_files.append(f[0])
- missing_files = [f for f in timestamps_data_files if f not in file_list]
- if len(missing_files):
- print('%d files missing from the %s version of the toolchain:' %
- (len(missing_files), expected_hash))
- for f in missing_files[:10]:
- print('\t%s' % f)
- if len(missing_files) > 10:
- print('\t...')
- extra_files = [f for f in file_list if f not in timestamps_data_files]
- if len(extra_files):
- print('%d extra files in the %s version of the toolchain:' %
- (len(extra_files), expected_hash))
- for f in extra_files[:10]:
- print('\t%s' % f)
- if len(extra_files) > 10:
- print('\t...')
- if matches:
- return timestamps_data['sha1']
-
- # Make long hangs when updating the toolchain less mysterious.
- print('Calculating hash of toolchain in %s. Please wait...' % full_root_path)
- sys.stdout.flush()
- digest = hashlib.sha1()
- for path in file_list:
- path_without_hash = str(path).replace('/', '\\')
if expected_hash:
- path_without_hash = path_without_hash.replace(
- os.path.join(root, expected_hash).replace('/', '\\'), root)
- digest.update(bytes(path_without_hash.lower(), 'utf-8'))
- with open(path, 'rb') as f:
- digest.update(f.read())
+ full_root_path = os.path.join(root, expected_hash)
+ else:
+ full_root_path = root
+ file_list = GetFileList(full_root_path)
+ # Check whether we previously saved timestamps in
+ # $root/../{sha1}.timestamps. If we didn't, or they don't match, then do the
+ # full calculation, otherwise return the saved value.
+ timestamps_file = MakeTimestampsFileName(root, expected_hash)
+ timestamps_data = {'files': [], 'sha1': ''}
+ if os.path.exists(timestamps_file):
+ with open(timestamps_file, 'rb') as f:
+ try:
+ timestamps_data = json.load(f)
+ except ValueError:
+ # json couldn't be loaded, empty data will force a re-hash.
+ pass
- # Save the timestamp file if the calculated hash is the expected one.
- # The expected hash may be shorter, to reduce path lengths, in which case just
- # compare that many characters.
- if expected_hash and digest.hexdigest().startswith(expected_hash):
- SaveTimestampsAndHash(root, digest.hexdigest())
- # Return the (potentially truncated) expected_hash.
- return expected_hash
- return digest.hexdigest()
+ matches = len(file_list) == len(timestamps_data['files'])
+ # Don't check the timestamp of the version file as we touch this file to
+ # indicates which versions of the toolchain are still being used.
+ vc_dir = os.path.join(full_root_path, 'VC').lower()
+ if matches:
+ for disk, cached in zip(file_list, timestamps_data['files']):
+ if disk != cached[0] or (disk != vc_dir
+ and os.path.getmtime(disk) != cached[1]):
+ matches = False
+ break
+ elif os.path.exists(timestamps_file):
+ # Print some information about the extra/missing files. Don't do this if
+ # we don't have a timestamp file, as all the files will be considered as
+ # missing.
+ timestamps_data_files = []
+ for f in timestamps_data['files']:
+ timestamps_data_files.append(f[0])
+ missing_files = [f for f in timestamps_data_files if f not in file_list]
+ if len(missing_files):
+ print('%d files missing from the %s version of the toolchain:' %
+ (len(missing_files), expected_hash))
+ for f in missing_files[:10]:
+ print('\t%s' % f)
+ if len(missing_files) > 10:
+ print('\t...')
+ extra_files = [f for f in file_list if f not in timestamps_data_files]
+ if len(extra_files):
+ print('%d extra files in the %s version of the toolchain:' %
+ (len(extra_files), expected_hash))
+ for f in extra_files[:10]:
+ print('\t%s' % f)
+ if len(extra_files) > 10:
+ print('\t...')
+ if matches:
+ return timestamps_data['sha1']
+
+ # Make long hangs when updating the toolchain less mysterious.
+ print('Calculating hash of toolchain in %s. Please wait...' %
+ full_root_path)
+ sys.stdout.flush()
+ digest = hashlib.sha1()
+ for path in file_list:
+ path_without_hash = str(path).replace('/', '\\')
+ if expected_hash:
+ path_without_hash = path_without_hash.replace(
+ os.path.join(root, expected_hash).replace('/', '\\'), root)
+ digest.update(bytes(path_without_hash.lower(), 'utf-8'))
+ with open(path, 'rb') as f:
+ digest.update(f.read())
+
+ # Save the timestamp file if the calculated hash is the expected one.
+ # The expected hash may be shorter, to reduce path lengths, in which case
+ # just compare that many characters.
+ if expected_hash and digest.hexdigest().startswith(expected_hash):
+ SaveTimestampsAndHash(root, digest.hexdigest())
+ # Return the (potentially truncated) expected_hash.
+ return expected_hash
+ return digest.hexdigest()
def CalculateToolchainHashes(root, remove_corrupt_toolchains):
- """Calculate the hash of the different toolchains installed in the |root|
+ """Calculate the hash of the different toolchains installed in the |root|
directory."""
- hashes = []
- dir_list = [
- d for d in os.listdir(root) if os.path.isdir(os.path.join(root, d))]
- for d in dir_list:
- toolchain_hash = CalculateHash(root, d)
- if toolchain_hash != d:
- print('The hash of a version of the toolchain has an unexpected value ('
- '%s instead of %s)%s.' % (toolchain_hash, d,
- ', removing it' if remove_corrupt_toolchains else ''))
- if remove_corrupt_toolchains:
- RemoveToolchain(root, d, True)
- else:
- hashes.append(toolchain_hash)
- return hashes
+ hashes = []
+ dir_list = [
+ d for d in os.listdir(root) if os.path.isdir(os.path.join(root, d))
+ ]
+ for d in dir_list:
+ toolchain_hash = CalculateHash(root, d)
+ if toolchain_hash != d:
+ print(
+ 'The hash of a version of the toolchain has an unexpected value ('
+ '%s instead of %s)%s.' %
+ (toolchain_hash, d,
+ ', removing it' if remove_corrupt_toolchains else ''))
+ if remove_corrupt_toolchains:
+ RemoveToolchain(root, d, True)
+ else:
+ hashes.append(toolchain_hash)
+ return hashes
def SaveTimestampsAndHash(root, sha1):
- """Saves timestamps and the final hash to be able to early-out more quickly
+ """Saves timestamps and the final hash to be able to early-out more quickly
next time."""
- file_list = GetFileList(os.path.join(root, sha1))
- timestamps_data = {
- 'files': [[f, os.path.getmtime(f)] for f in file_list],
- 'sha1': sha1,
- }
- with open(MakeTimestampsFileName(root, sha1), 'wb') as f:
- f.write(json.dumps(timestamps_data).encode('utf-8'))
+ file_list = GetFileList(os.path.join(root, sha1))
+ timestamps_data = {
+ 'files': [[f, os.path.getmtime(f)] for f in file_list],
+ 'sha1': sha1,
+ }
+ with open(MakeTimestampsFileName(root, sha1), 'wb') as f:
+ f.write(json.dumps(timestamps_data).encode('utf-8'))
def HaveSrcInternalAccess():
- """Checks whether access to src-internal is available."""
- with open(os.devnull, 'w') as nul:
- # This is required to avoid modal dialog boxes after Git 2.14.1 and Git
- # Credential Manager for Windows 1.12. See https://crbug.com/755694 and
- # https://github.com/Microsoft/Git-Credential-Manager-for-Windows/issues/482.
- child_env = dict(os.environ, GCM_INTERACTIVE='NEVER')
- return subprocess.call(
- ['git', '-c', 'core.askpass=true', 'remote', 'show',
- 'https://chrome-internal.googlesource.com/chrome/src-internal/'],
- shell=True, stdin=nul, stdout=nul, stderr=nul, env=child_env) == 0
+ """Checks whether access to src-internal is available."""
+ with open(os.devnull, 'w') as nul:
+ # This is required to avoid modal dialog boxes after Git 2.14.1 and Git
+ # Credential Manager for Windows 1.12. See https://crbug.com/755694 and
+ # https://github.com/Microsoft/Git-Credential-Manager-for-Windows/issues/482.
+ child_env = dict(os.environ, GCM_INTERACTIVE='NEVER')
+ return subprocess.call([
+ 'git', '-c', 'core.askpass=true', 'remote', 'show',
+ 'https://chrome-internal.googlesource.com/chrome/src-internal/'
+ ],
+ shell=True,
+ stdin=nul,
+ stdout=nul,
+ stderr=nul,
+ env=child_env) == 0
def LooksLikeGoogler():
- """Checks for a USERDOMAIN environment variable of 'GOOGLE', which
+ """Checks for a USERDOMAIN environment variable of 'GOOGLE', which
probably implies the current user is a Googler."""
- return os.environ.get('USERDOMAIN', '').upper() == 'GOOGLE'
+ return os.environ.get('USERDOMAIN', '').upper() == 'GOOGLE'
def CanAccessToolchainBucket():
- """Checks whether the user has access to gs://chrome-wintoolchain/."""
- gsutil = download_from_google_storage.Gsutil(
- download_from_google_storage.GSUTIL_DEFAULT_PATH, boto_path=None)
- code, stdout, stderr = gsutil.check_call('ls', 'gs://chrome-wintoolchain/')
- if code != 0:
- # Make sure any error messages are made visible to the user.
- print(stderr, file=sys.stderr, end='')
- print(stdout, end='')
- return code == 0
+ """Checks whether the user has access to gs://chrome-wintoolchain/."""
+ gsutil = download_from_google_storage.Gsutil(
+ download_from_google_storage.GSUTIL_DEFAULT_PATH, boto_path=None)
+ code, stdout, stderr = gsutil.check_call('ls', 'gs://chrome-wintoolchain/')
+ if code != 0:
+ # Make sure any error messages are made visible to the user.
+ print(stderr, file=sys.stderr, end='')
+ print(stdout, end='')
+ return code == 0
def ToolchainBaseURL():
- base_url = os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN_BASE_URL', '')
- if base_url.startswith('file://'):
- base_url = base_url[len('file://'):]
- return base_url
+ base_url = os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN_BASE_URL', '')
+ if base_url.startswith('file://'):
+ base_url = base_url[len('file://'):]
+ return base_url
def UsesToolchainFromFile():
- return os.path.isdir(ToolchainBaseURL())
+ return os.path.isdir(ToolchainBaseURL())
def UsesToolchainFromHttp():
- url = ToolchainBaseURL()
- return url.startswith('http://') or url.startswith('https://')
+ url = ToolchainBaseURL()
+ return url.startswith('http://') or url.startswith('https://')
def RequestGsAuthentication():
- """Requests that the user authenticate to be able to access gs:// as a
+ """Requests that the user authenticate to be able to access gs:// as a
Googler. This allows much faster downloads, and pulling (old) toolchains
that match src/ revisions.
"""
- print('Access to gs://chrome-wintoolchain/ not configured.')
- print('-----------------------------------------------------------------')
- print()
- print('You appear to be a Googler.')
- print()
- print('I\'m sorry for the hassle, but you need to do a one-time manual')
- print('authentication. Please run:')
- print()
- print(' download_from_google_storage --config')
- print()
- print('and follow the instructions.')
- print()
- print('NOTE 1: Use your google.com credentials, not chromium.org.')
- print('NOTE 2: Enter 0 when asked for a "project-id".')
- print()
- print('-----------------------------------------------------------------')
- print()
- sys.stdout.flush()
- sys.exit(1)
+ print('Access to gs://chrome-wintoolchain/ not configured.')
+ print('-----------------------------------------------------------------')
+ print()
+ print('You appear to be a Googler.')
+ print()
+ print('I\'m sorry for the hassle, but you need to do a one-time manual')
+ print('authentication. Please run:')
+ print()
+ print(' download_from_google_storage --config')
+ print()
+ print('and follow the instructions.')
+ print()
+ print('NOTE 1: Use your google.com credentials, not chromium.org.')
+ print('NOTE 2: Enter 0 when asked for a "project-id".')
+ print()
+ print('-----------------------------------------------------------------')
+ print()
+ sys.stdout.flush()
+ sys.exit(1)
def DelayBeforeRemoving(target_dir):
- """A grace period before deleting the out of date toolchain directory."""
- if (os.path.isdir(target_dir) and
- not bool(int(os.environ.get('CHROME_HEADLESS', '0')))):
- for i in range(9, 0, -1):
- sys.stdout.write(
- '\rRemoving old toolchain in %ds... (Ctrl-C to cancel)' % i)
- sys.stdout.flush()
- time.sleep(1)
- print()
+ """A grace period before deleting the out of date toolchain directory."""
+ if (os.path.isdir(target_dir)
+ and not bool(int(os.environ.get('CHROME_HEADLESS', '0')))):
+ for i in range(9, 0, -1):
+ sys.stdout.write(
+ '\rRemoving old toolchain in %ds... (Ctrl-C to cancel)' % i)
+ sys.stdout.flush()
+ time.sleep(1)
+ print()
def DownloadUsingHttp(filename):
- """Downloads the given file from a url defined in
+ """Downloads the given file from a url defined in
DEPOT_TOOLS_WIN_TOOLCHAIN_BASE_URL environment variable."""
- temp_dir = tempfile.mkdtemp()
- assert os.path.basename(filename) == filename
- target_path = os.path.join(temp_dir, filename)
- base_url = ToolchainBaseURL()
- src_url = urljoin(base_url, filename)
- try:
- with closing(urlopen(src_url)) as fsrc, \
- open(target_path, 'wb') as fdst:
- shutil.copyfileobj(fsrc, fdst)
- except URLError as e:
- RmDir(temp_dir)
- sys.exit('Failed to retrieve file: %s' % e)
- return temp_dir, target_path
+ temp_dir = tempfile.mkdtemp()
+ assert os.path.basename(filename) == filename
+ target_path = os.path.join(temp_dir, filename)
+ base_url = ToolchainBaseURL()
+ src_url = urljoin(base_url, filename)
+ try:
+ with closing(urlopen(src_url)) as fsrc, \
+ open(target_path, 'wb') as fdst:
+ shutil.copyfileobj(fsrc, fdst)
+ except URLError as e:
+ RmDir(temp_dir)
+ sys.exit('Failed to retrieve file: %s' % e)
+ return temp_dir, target_path
def DownloadUsingGsutil(filename):
- """Downloads the given file from Google Storage chrome-wintoolchain bucket."""
- temp_dir = tempfile.mkdtemp()
- assert os.path.basename(filename) == filename
- target_path = os.path.join(temp_dir, filename)
- gsutil = download_from_google_storage.Gsutil(
- download_from_google_storage.GSUTIL_DEFAULT_PATH, boto_path=None)
- code = gsutil.call('cp', 'gs://chrome-wintoolchain/' + filename, target_path)
- if code != 0:
- sys.exit('gsutil failed')
- return temp_dir, target_path
+ """Downloads the given file from Google Storage chrome-wintoolchain bucket."""
+ temp_dir = tempfile.mkdtemp()
+ assert os.path.basename(filename) == filename
+ target_path = os.path.join(temp_dir, filename)
+ gsutil = download_from_google_storage.Gsutil(
+ download_from_google_storage.GSUTIL_DEFAULT_PATH, boto_path=None)
+ code = gsutil.call('cp', 'gs://chrome-wintoolchain/' + filename,
+ target_path)
+ if code != 0:
+ sys.exit('gsutil failed')
+ return temp_dir, target_path
def RmDir(path):
- """Deletes path and all the files it contains."""
- if sys.platform != 'win32':
- shutil.rmtree(path, ignore_errors=True)
- else:
- # shutil.rmtree() doesn't delete read-only files on Windows.
- subprocess.check_call('rmdir /s/q "%s"' % path, shell=True)
+ """Deletes path and all the files it contains."""
+ if sys.platform != 'win32':
+ shutil.rmtree(path, ignore_errors=True)
+ else:
+ # shutil.rmtree() doesn't delete read-only files on Windows.
+ subprocess.check_call('rmdir /s/q "%s"' % path, shell=True)
def DoTreeMirror(target_dir, tree_sha1):
- """In order to save temporary space on bots that do not have enough space to
+ """In order to save temporary space on bots that do not have enough space to
download ISOs, unpack them, and copy to the target location, the whole tree
is uploaded as a zip to internal storage, and then mirrored here."""
- if UsesToolchainFromFile():
- temp_dir = None
- local_zip = os.path.join(ToolchainBaseURL(), tree_sha1 + '.zip')
- if not os.path.isfile(local_zip):
- sys.exit('%s is not a valid file.' % local_zip)
- elif UsesToolchainFromHttp():
- temp_dir, local_zip = DownloadUsingHttp(tree_sha1 + '.zip')
- else:
- temp_dir, local_zip = DownloadUsingGsutil(tree_sha1 + '.zip')
- sys.stdout.write('Extracting %s...\n' % local_zip)
- sys.stdout.flush()
- with zipfile.ZipFile(local_zip, 'r', zipfile.ZIP_DEFLATED, True) as zf:
- zf.extractall(target_dir)
- if temp_dir:
- RmDir(temp_dir)
+ if UsesToolchainFromFile():
+ temp_dir = None
+ local_zip = os.path.join(ToolchainBaseURL(), tree_sha1 + '.zip')
+ if not os.path.isfile(local_zip):
+ sys.exit('%s is not a valid file.' % local_zip)
+ elif UsesToolchainFromHttp():
+ temp_dir, local_zip = DownloadUsingHttp(tree_sha1 + '.zip')
+ else:
+ temp_dir, local_zip = DownloadUsingGsutil(tree_sha1 + '.zip')
+ sys.stdout.write('Extracting %s...\n' % local_zip)
+ sys.stdout.flush()
+ with zipfile.ZipFile(local_zip, 'r', zipfile.ZIP_DEFLATED, True) as zf:
+ zf.extractall(target_dir)
+ if temp_dir:
+ RmDir(temp_dir)
def RemoveToolchain(root, sha1, delay_before_removing):
- """Remove the |sha1| version of the toolchain from |root|."""
- toolchain_target_dir = os.path.join(root, sha1)
- if delay_before_removing:
- DelayBeforeRemoving(toolchain_target_dir)
- if sys.platform == 'win32':
- # These stay resident and will make the rmdir below fail.
- kill_list = [
- 'mspdbsrv.exe',
- 'vctip.exe', # Compiler and tools experience improvement data uploader.
- ]
- for process_name in kill_list:
- with open(os.devnull, 'wb') as nul:
- subprocess.call(['taskkill', '/f', '/im', process_name],
- stdin=nul, stdout=nul, stderr=nul)
- if os.path.isdir(toolchain_target_dir):
- RmDir(toolchain_target_dir)
+ """Remove the |sha1| version of the toolchain from |root|."""
+ toolchain_target_dir = os.path.join(root, sha1)
+ if delay_before_removing:
+ DelayBeforeRemoving(toolchain_target_dir)
+ if sys.platform == 'win32':
+ # These stay resident and will make the rmdir below fail.
+ kill_list = [
+ 'mspdbsrv.exe',
+ 'vctip.exe', # Compiler and tools experience improvement data uploader.
+ ]
+ for process_name in kill_list:
+ with open(os.devnull, 'wb') as nul:
+ subprocess.call(['taskkill', '/f', '/im', process_name],
+ stdin=nul,
+ stdout=nul,
+ stderr=nul)
+ if os.path.isdir(toolchain_target_dir):
+ RmDir(toolchain_target_dir)
- timestamp_file = MakeTimestampsFileName(root, sha1)
- if os.path.exists(timestamp_file):
- os.remove(timestamp_file)
+ timestamp_file = MakeTimestampsFileName(root, sha1)
+ if os.path.exists(timestamp_file):
+ os.remove(timestamp_file)
def RemoveUnusedToolchains(root):
- """Remove the versions of the toolchain that haven't been used recently."""
- valid_toolchains = []
- dirs_to_remove = []
+ """Remove the versions of the toolchain that haven't been used recently."""
+ valid_toolchains = []
+ dirs_to_remove = []
- for d in os.listdir(root):
- full_path = os.path.join(root, d)
- if os.path.isdir(full_path):
- if not os.path.exists(MakeTimestampsFileName(root, d)):
- dirs_to_remove.append(d)
- else:
- vc_dir = os.path.join(full_path, 'VC')
- valid_toolchains.append((os.path.getmtime(vc_dir), d))
- elif os.path.isfile(full_path):
- os.remove(full_path)
+ for d in os.listdir(root):
+ full_path = os.path.join(root, d)
+ if os.path.isdir(full_path):
+ if not os.path.exists(MakeTimestampsFileName(root, d)):
+ dirs_to_remove.append(d)
+ else:
+ vc_dir = os.path.join(full_path, 'VC')
+ valid_toolchains.append((os.path.getmtime(vc_dir), d))
+ elif os.path.isfile(full_path):
+ os.remove(full_path)
- for d in dirs_to_remove:
- print('Removing %s as it doesn\'t correspond to any known toolchain.' %
- os.path.join(root, d))
- # Use the RemoveToolchain function to remove these directories as they might
- # contain an older version of the toolchain.
- RemoveToolchain(root, d, False)
+ for d in dirs_to_remove:
+ print('Removing %s as it doesn\'t correspond to any known toolchain.' %
+ os.path.join(root, d))
+ # Use the RemoveToolchain function to remove these directories as they
+ # might contain an older version of the toolchain.
+ RemoveToolchain(root, d, False)
- # Remove the versions of the toolchains that haven't been used in the past 30
- # days.
- toolchain_expiration_time = 60 * 60 * 24 * 30
- for toolchain in valid_toolchains:
- toolchain_age_in_sec = time.time() - toolchain[0]
- if toolchain_age_in_sec > toolchain_expiration_time:
- print('Removing version %s of the Win toolchain as it hasn\'t been used'
- ' in the past %d days.' % (toolchain[1],
- toolchain_age_in_sec / 60 / 60 / 24))
- RemoveToolchain(root, toolchain[1], True)
+ # Remove the versions of the toolchains that haven't been used in the past
+ # 30 days.
+ toolchain_expiration_time = 60 * 60 * 24 * 30
+ for toolchain in valid_toolchains:
+ toolchain_age_in_sec = time.time() - toolchain[0]
+ if toolchain_age_in_sec > toolchain_expiration_time:
+ print(
+ 'Removing version %s of the Win toolchain as it hasn\'t been used'
+ ' in the past %d days.' %
+ (toolchain[1], toolchain_age_in_sec / 60 / 60 / 24))
+ RemoveToolchain(root, toolchain[1], True)
def EnableCrashDumpCollection():
- """Tell Windows Error Reporting to record crash dumps so that we can diagnose
+ """Tell Windows Error Reporting to record crash dumps so that we can diagnose
linker crashes and other toolchain failures. Documented at:
https://msdn.microsoft.com/en-us/library/windows/desktop/bb787181.aspx
"""
- if sys.platform == 'win32' and os.environ.get('CHROME_HEADLESS') == '1':
- key_name = r'SOFTWARE\Microsoft\Windows\Windows Error Reporting'
- try:
- key = winreg.CreateKeyEx(winreg.HKEY_LOCAL_MACHINE, key_name, 0,
- winreg.KEY_WOW64_64KEY | winreg.KEY_ALL_ACCESS)
- # Merely creating LocalDumps is sufficient to enable the defaults.
- winreg.CreateKey(key, "LocalDumps")
- # Disable the WER UI, as documented here:
- # https://msdn.microsoft.com/en-us/library/windows/desktop/bb513638.aspx
- winreg.SetValueEx(key, "DontShowUI", 0, winreg.REG_DWORD, 1)
- # Trap OSError instead of WindowsError so pylint will succeed on Linux.
- # Catching errors is important because some build machines are not elevated
- # and writing to HKLM requires elevation.
- except OSError:
- pass
+ if sys.platform == 'win32' and os.environ.get('CHROME_HEADLESS') == '1':
+ key_name = r'SOFTWARE\Microsoft\Windows\Windows Error Reporting'
+ try:
+ key = winreg.CreateKeyEx(
+ winreg.HKEY_LOCAL_MACHINE, key_name, 0,
+ winreg.KEY_WOW64_64KEY | winreg.KEY_ALL_ACCESS)
+ # Merely creating LocalDumps is sufficient to enable the defaults.
+ winreg.CreateKey(key, "LocalDumps")
+ # Disable the WER UI, as documented here:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/bb513638.aspx
+ winreg.SetValueEx(key, "DontShowUI", 0, winreg.REG_DWORD, 1)
+ # Trap OSError instead of WindowsError so pylint will succeed on Linux.
+ # Catching errors is important because some build machines are not
+ # elevated and writing to HKLM requires elevation.
+ except OSError:
+ pass
def main():
- parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.RawDescriptionHelpFormatter,
- )
- parser.add_argument('--output-json', metavar='FILE',
- help='write information about toolchain to FILE')
- parser.add_argument('--force', action='store_true',
- help='force script to run on non-Windows hosts')
- parser.add_argument('--no-download', action='store_true',
- help='configure if present but don\'t download')
- parser.add_argument('--toolchain-dir',
- default=os.getenv(ENV_TOOLCHAIN_ROOT, BASEDIR),
- help='directory to install toolchain into')
- parser.add_argument('desired_hash', metavar='desired-hash',
- help='toolchain hash to download')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument('--output-json',
+ metavar='FILE',
+ help='write information about toolchain to FILE')
+ parser.add_argument('--force',
+ action='store_true',
+ help='force script to run on non-Windows hosts')
+ parser.add_argument('--no-download',
+ action='store_true',
+ help='configure if present but don\'t download')
+ parser.add_argument('--toolchain-dir',
+ default=os.getenv(ENV_TOOLCHAIN_ROOT, BASEDIR),
+ help='directory to install toolchain into')
+ parser.add_argument('desired_hash',
+ metavar='desired-hash',
+ help='toolchain hash to download')
+ args = parser.parse_args()
- if not (sys.platform.startswith(('cygwin', 'win32')) or args.force):
- return 0
+ if not (sys.platform.startswith(('cygwin', 'win32')) or args.force):
+ return 0
- if sys.platform == 'cygwin':
- # This script requires Windows Python, so invoke with depot_tools' Python.
- def winpath(path):
- return subprocess.check_output(['cygpath', '-w', path]).strip()
- python = os.path.join(DEPOT_TOOLS_PATH, 'python3.bat')
- cmd = [python, winpath(__file__)]
- if args.output_json:
- cmd.extend(['--output-json', winpath(args.output_json)])
- cmd.append(args.desired_hash)
- sys.exit(subprocess.call(cmd))
- assert sys.platform != 'cygwin'
+ if sys.platform == 'cygwin':
+ # This script requires Windows Python, so invoke with depot_tools'
+ # Python.
+ def winpath(path):
+ return subprocess.check_output(['cygpath', '-w', path]).strip()
- # Create our toolchain destination and "chdir" to it.
- toolchain_dir = os.path.abspath(args.toolchain_dir)
- if not os.path.isdir(toolchain_dir):
- os.makedirs(toolchain_dir)
- os.chdir(toolchain_dir)
+ python = os.path.join(DEPOT_TOOLS_PATH, 'python3.bat')
+ cmd = [python, winpath(__file__)]
+ if args.output_json:
+ cmd.extend(['--output-json', winpath(args.output_json)])
+ cmd.append(args.desired_hash)
+ sys.exit(subprocess.call(cmd))
+ assert sys.platform != 'cygwin'
- # Move to depot_tools\win_toolchain where we'll store our files, and where
- # the downloader script is.
- target_dir = 'vs_files'
- if not os.path.isdir(target_dir):
- os.mkdir(target_dir)
- toolchain_target_dir = os.path.join(target_dir, args.desired_hash)
+ # Create our toolchain destination and "chdir" to it.
+ toolchain_dir = os.path.abspath(args.toolchain_dir)
+ if not os.path.isdir(toolchain_dir):
+ os.makedirs(toolchain_dir)
+ os.chdir(toolchain_dir)
- abs_toolchain_target_dir = os.path.abspath(toolchain_target_dir)
+ # Move to depot_tools\win_toolchain where we'll store our files, and where
+ # the downloader script is.
+ target_dir = 'vs_files'
+ if not os.path.isdir(target_dir):
+ os.mkdir(target_dir)
+ toolchain_target_dir = os.path.join(target_dir, args.desired_hash)
- got_new_toolchain = False
+ abs_toolchain_target_dir = os.path.abspath(toolchain_target_dir)
- # If the current hash doesn't match what we want in the file, nuke and pave.
- # Typically this script is only run when the .sha1 one file is updated, but
- # directly calling "gclient runhooks" will also run it, so we cache
- # based on timestamps to make that case fast.
- current_hashes = CalculateToolchainHashes(target_dir, True)
- if args.desired_hash not in current_hashes:
- if args.no_download:
- raise SystemExit('Toolchain is out of date. Run "gclient runhooks" to '
- 'update the toolchain, or set '
- 'DEPOT_TOOLS_WIN_TOOLCHAIN=0 to use the locally '
- 'installed toolchain.')
- should_use_file = False
- should_use_http = False
- should_use_gs = False
- if UsesToolchainFromFile():
- should_use_file = True
- elif UsesToolchainFromHttp():
- should_use_http = True
- elif (HaveSrcInternalAccess() or
- LooksLikeGoogler() or
- CanAccessToolchainBucket()):
- should_use_gs = True
- if not CanAccessToolchainBucket():
- RequestGsAuthentication()
- if not should_use_file and not should_use_gs and not should_use_http:
- if sys.platform not in ('win32', 'cygwin'):
- doc = 'https://chromium.googlesource.com/chromium/src/+/HEAD/docs/' \
- 'win_cross.md'
- print('\n\n\nPlease follow the instructions at %s\n\n' % doc)
- else:
- doc = 'https://chromium.googlesource.com/chromium/src/+/HEAD/docs/' \
- 'windows_build_instructions.md'
- print('\n\n\nNo downloadable toolchain found. In order to use your '
- 'locally installed version of Visual Studio to build Chrome '
- 'please set DEPOT_TOOLS_WIN_TOOLCHAIN=0.\n'
- 'For details search for DEPOT_TOOLS_WIN_TOOLCHAIN in the '
- 'instructions at %s\n\n' % doc)
- return 1
- print('Windows toolchain out of date or doesn\'t exist, updating (Pro)...')
- print(' current_hashes: %s' % ', '.join(current_hashes))
- print(' desired_hash: %s' % args.desired_hash)
- sys.stdout.flush()
+ got_new_toolchain = False
- DoTreeMirror(toolchain_target_dir, args.desired_hash)
-
- got_new_toolchain = True
-
- # The Windows SDK is either in `win_sdk` or in `Windows Kits\10`. This
- # script must work with both layouts, so check which one it is.
- win_sdk_in_windows_kits = os.path.isdir(
- os.path.join(abs_toolchain_target_dir, 'Windows Kits', '10'))
- if win_sdk_in_windows_kits:
- win_sdk = os.path.join(abs_toolchain_target_dir, 'Windows Kits', '10')
- else:
- win_sdk = os.path.join(abs_toolchain_target_dir, 'win_sdk')
-
- version_file = os.path.join(toolchain_target_dir, 'VS_VERSION')
- vc_dir = os.path.join(toolchain_target_dir, 'VC')
- with open(version_file, 'rb') as f:
- vs_version = f.read().decode('utf-8').strip()
- # Touch the VC directory so we can use its timestamp to know when this
- # version of the toolchain has been used for the last time.
- os.utime(vc_dir, None)
-
- data = {
- 'path': abs_toolchain_target_dir,
- 'version': vs_version,
- 'win_sdk': win_sdk,
- 'wdk': os.path.join(abs_toolchain_target_dir, 'wdk'),
- 'runtime_dirs': [
- os.path.join(abs_toolchain_target_dir, 'sys64'),
- os.path.join(abs_toolchain_target_dir, 'sys32'),
- os.path.join(abs_toolchain_target_dir, 'sysarm64'),
- ],
- }
- data_json = json.dumps(data, indent=2)
- data_path = os.path.join(target_dir, '..', 'data.json')
- if not os.path.exists(data_path) or open(data_path).read() != data_json:
- with open(data_path, 'w') as f:
- f.write(data_json)
-
- if got_new_toolchain:
- current_hashes = CalculateToolchainHashes(target_dir, False)
+ # If the current hash doesn't match what we want in the file, nuke and pave.
+ # Typically this script is only run when the .sha1 one file is updated, but
+ # directly calling "gclient runhooks" will also run it, so we cache
+ # based on timestamps to make that case fast.
+ current_hashes = CalculateToolchainHashes(target_dir, True)
if args.desired_hash not in current_hashes:
- print(
- 'Got wrong hash after pulling a new toolchain. '
- 'Wanted \'%s\', got one of \'%s\'.' % (
- args.desired_hash, ', '.join(current_hashes)), file=sys.stderr)
- return 1
- SaveTimestampsAndHash(target_dir, args.desired_hash)
+ if args.no_download:
+ raise SystemExit(
+ 'Toolchain is out of date. Run "gclient runhooks" to '
+ 'update the toolchain, or set '
+ 'DEPOT_TOOLS_WIN_TOOLCHAIN=0 to use the locally '
+ 'installed toolchain.')
+ should_use_file = False
+ should_use_http = False
+ should_use_gs = False
+ if UsesToolchainFromFile():
+ should_use_file = True
+ elif UsesToolchainFromHttp():
+ should_use_http = True
+ elif (HaveSrcInternalAccess() or LooksLikeGoogler()
+ or CanAccessToolchainBucket()):
+ should_use_gs = True
+ if not CanAccessToolchainBucket():
+ RequestGsAuthentication()
+ if not should_use_file and not should_use_gs and not should_use_http:
+ if sys.platform not in ('win32', 'cygwin'):
+ doc = 'https://chromium.googlesource.com/chromium/src/+/HEAD/docs/' \
+ 'win_cross.md'
+ print('\n\n\nPlease follow the instructions at %s\n\n' % doc)
+ else:
+ doc = 'https://chromium.googlesource.com/chromium/src/+/HEAD/docs/' \
+ 'windows_build_instructions.md'
+ print(
+ '\n\n\nNo downloadable toolchain found. In order to use your '
+ 'locally installed version of Visual Studio to build Chrome '
+ 'please set DEPOT_TOOLS_WIN_TOOLCHAIN=0.\n'
+ 'For details search for DEPOT_TOOLS_WIN_TOOLCHAIN in the '
+ 'instructions at %s\n\n' % doc)
+ return 1
+ print(
+ 'Windows toolchain out of date or doesn\'t exist, updating (Pro)...'
+ )
+ print(' current_hashes: %s' % ', '.join(current_hashes))
+ print(' desired_hash: %s' % args.desired_hash)
+ sys.stdout.flush()
- if args.output_json:
- if (not os.path.exists(args.output_json) or
- not filecmp.cmp(data_path, args.output_json)):
- shutil.copyfile(data_path, args.output_json)
+ DoTreeMirror(toolchain_target_dir, args.desired_hash)
- EnableCrashDumpCollection()
+ got_new_toolchain = True
- RemoveUnusedToolchains(target_dir)
+ # The Windows SDK is either in `win_sdk` or in `Windows Kits\10`. This
+ # script must work with both layouts, so check which one it is.
+ win_sdk_in_windows_kits = os.path.isdir(
+ os.path.join(abs_toolchain_target_dir, 'Windows Kits', '10'))
+ if win_sdk_in_windows_kits:
+ win_sdk = os.path.join(abs_toolchain_target_dir, 'Windows Kits', '10')
+ else:
+ win_sdk = os.path.join(abs_toolchain_target_dir, 'win_sdk')
- return 0
+ version_file = os.path.join(toolchain_target_dir, 'VS_VERSION')
+ vc_dir = os.path.join(toolchain_target_dir, 'VC')
+ with open(version_file, 'rb') as f:
+ vs_version = f.read().decode('utf-8').strip()
+ # Touch the VC directory so we can use its timestamp to know when this
+ # version of the toolchain has been used for the last time.
+ os.utime(vc_dir, None)
+
+ data = {
+ 'path':
+ abs_toolchain_target_dir,
+ 'version':
+ vs_version,
+ 'win_sdk':
+ win_sdk,
+ 'wdk':
+ os.path.join(abs_toolchain_target_dir, 'wdk'),
+ 'runtime_dirs': [
+ os.path.join(abs_toolchain_target_dir, 'sys64'),
+ os.path.join(abs_toolchain_target_dir, 'sys32'),
+ os.path.join(abs_toolchain_target_dir, 'sysarm64'),
+ ],
+ }
+ data_json = json.dumps(data, indent=2)
+ data_path = os.path.join(target_dir, '..', 'data.json')
+ if not os.path.exists(data_path) or open(data_path).read() != data_json:
+ with open(data_path, 'w') as f:
+ f.write(data_json)
+
+ if got_new_toolchain:
+ current_hashes = CalculateToolchainHashes(target_dir, False)
+ if args.desired_hash not in current_hashes:
+ print('Got wrong hash after pulling a new toolchain. '
+ 'Wanted \'%s\', got one of \'%s\'.' %
+ (args.desired_hash, ', '.join(current_hashes)),
+ file=sys.stderr)
+ return 1
+ SaveTimestampsAndHash(target_dir, args.desired_hash)
+
+ if args.output_json:
+ if (not os.path.exists(args.output_json)
+ or not filecmp.cmp(data_path, args.output_json)):
+ shutil.copyfile(data_path, args.output_json)
+
+ EnableCrashDumpCollection()
+
+ RemoveUnusedToolchains(target_dir)
+
+ return 0
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/win_toolchain/package_from_installed.py b/win_toolchain/package_from_installed.py
index 38430d448d..2567b926fa 100644
--- a/win_toolchain/package_from_installed.py
+++ b/win_toolchain/package_from_installed.py
@@ -2,7 +2,6 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""
From a system-installed copy of the toolchain, packages all the required bits
into a .zip file.
@@ -47,7 +46,6 @@ import zipfile
import get_toolchain_if_necessary
-
_vs_version = None
_win_version = None
_vc_tools = None
@@ -56,527 +54,579 @@ _allow_multiple_vs_installs = False
def GetVSPath():
- # Use vswhere to find the VS installation. This will find prerelease
- # versions because -prerelease is specified. This assumes that only one
- # version is installed.
- command = (r'C:\Program Files (x86)\Microsoft Visual Studio\Installer'
- r'\vswhere.exe -prerelease')
- vs_version_marker = 'catalog_productLineVersion: '
- vs_path_marker = 'installationPath: '
- output = subprocess.check_output(command, universal_newlines=True)
- vs_path = None
- vs_installs_count = 0
- matching_vs_path = ""
- for line in output.splitlines():
- if line.startswith(vs_path_marker):
- # The path information comes first
- vs_path = line[len(vs_path_marker):]
- vs_installs_count += 1
- if line.startswith(vs_version_marker):
- # The version for that path comes later
- if line[len(vs_version_marker):] == _vs_version:
- matching_vs_path = vs_path
+ # Use vswhere to find the VS installation. This will find prerelease
+ # versions because -prerelease is specified. This assumes that only one
+ # version is installed.
+ command = (r'C:\Program Files (x86)\Microsoft Visual Studio\Installer'
+ r'\vswhere.exe -prerelease')
+ vs_version_marker = 'catalog_productLineVersion: '
+ vs_path_marker = 'installationPath: '
+ output = subprocess.check_output(command, universal_newlines=True)
+ vs_path = None
+ vs_installs_count = 0
+ matching_vs_path = ""
+ for line in output.splitlines():
+ if line.startswith(vs_path_marker):
+ # The path information comes first
+ vs_path = line[len(vs_path_marker):]
+ vs_installs_count += 1
+ if line.startswith(vs_version_marker):
+ # The version for that path comes later
+ if line[len(vs_version_marker):] == _vs_version:
+ matching_vs_path = vs_path
- if vs_installs_count == 0:
- raise Exception('VS %s path not found in vswhere output' % (_vs_version))
- if vs_installs_count > 1:
- if not _allow_multiple_vs_installs:
- raise Exception('Multiple VS installs detected. This is unsupported. '
- 'It is recommended that packaging be done on a clean VM '
- 'with just one version installed. To proceed anyway add '
- 'the --allow_multiple_vs_installs flag to this script')
- else:
- print('Multiple VS installs were detected. This is unsupported. '
- 'Proceeding anyway')
- return matching_vs_path
+ if vs_installs_count == 0:
+ raise Exception('VS %s path not found in vswhere output' %
+ (_vs_version))
+ if vs_installs_count > 1:
+ if not _allow_multiple_vs_installs:
+ raise Exception(
+ 'Multiple VS installs detected. This is unsupported. '
+ 'It is recommended that packaging be done on a clean VM '
+ 'with just one version installed. To proceed anyway add '
+ 'the --allow_multiple_vs_installs flag to this script')
+ else:
+ print('Multiple VS installs were detected. This is unsupported. '
+ 'Proceeding anyway')
+ return matching_vs_path
def ExpandWildcards(root, sub_dir):
- # normpath is needed to change '/' to '\\' characters.
- path = os.path.normpath(os.path.join(root, sub_dir))
- matches = glob.glob(path)
- if len(matches) != 1:
- raise Exception('%s had %d matches - should be one' % (path, len(matches)))
- return matches[0]
+ # normpath is needed to change '/' to '\\' characters.
+ path = os.path.normpath(os.path.join(root, sub_dir))
+ matches = glob.glob(path)
+ if len(matches) != 1:
+ raise Exception('%s had %d matches - should be one' %
+ (path, len(matches)))
+ return matches[0]
def BuildRepackageFileList(src_dir):
- # Strip off a trailing separator if present
- if src_dir.endswith(os.path.sep):
- src_dir = src_dir[:-len(os.path.sep)]
+ # Strip off a trailing separator if present
+ if src_dir.endswith(os.path.sep):
+ src_dir = src_dir[:-len(os.path.sep)]
- # Ensure .\Windows Kits\10\Debuggers exists and fail to repackage if it
- # doesn't.
- debuggers_path = os.path.join(src_dir, 'Windows Kits', '10', 'Debuggers')
- if not os.path.exists(debuggers_path):
- raise Exception('Repacking failed. Missing %s.' % (debuggers_path))
+ # Ensure .\Windows Kits\10\Debuggers exists and fail to repackage if it
+ # doesn't.
+ debuggers_path = os.path.join(src_dir, 'Windows Kits', '10', 'Debuggers')
+ if not os.path.exists(debuggers_path):
+ raise Exception('Repacking failed. Missing %s.' % (debuggers_path))
- result = []
- for root, _, files in os.walk(src_dir):
- for f in files:
- final_from = os.path.normpath(os.path.join(root, f))
- dest = final_from[len(src_dir) + 1:]
- result.append((final_from, dest))
- return result
+ result = []
+ for root, _, files in os.walk(src_dir):
+ for f in files:
+ final_from = os.path.normpath(os.path.join(root, f))
+ dest = final_from[len(src_dir) + 1:]
+ result.append((final_from, dest))
+ return result
def BuildFileList(override_dir, include_arm, vs_path):
- result = []
+ result = []
- # Subset of VS corresponding roughly to VC.
- paths = [
- 'DIA SDK/bin',
- 'DIA SDK/idl',
- 'DIA SDK/include',
- 'DIA SDK/lib',
- _vc_tools + '/atlmfc',
- _vc_tools + '/crt',
- 'VC/redist',
- ]
+ # Subset of VS corresponding roughly to VC.
+ paths = [
+ 'DIA SDK/bin',
+ 'DIA SDK/idl',
+ 'DIA SDK/include',
+ 'DIA SDK/lib',
+ _vc_tools + '/atlmfc',
+ _vc_tools + '/crt',
+ 'VC/redist',
+ ]
+
+ if override_dir:
+ paths += [
+ (os.path.join(override_dir, 'bin'), _vc_tools + '/bin'),
+ (os.path.join(override_dir, 'include'), _vc_tools + '/include'),
+ (os.path.join(override_dir, 'lib'), _vc_tools + '/lib'),
+ ]
+ else:
+ paths += [
+ _vc_tools + '/bin',
+ _vc_tools + '/include',
+ _vc_tools + '/lib',
+ ]
- if override_dir:
paths += [
- (os.path.join(override_dir, 'bin'), _vc_tools + '/bin'),
- (os.path.join(override_dir, 'include'), _vc_tools + '/include'),
- (os.path.join(override_dir, 'lib'), _vc_tools + '/lib'),
+ ('VC/redist/MSVC/14.*.*/x86/Microsoft.VC*.CRT', 'sys32'),
+ ('VC/redist/MSVC/14.*.*/x86/Microsoft.VC*.CRT',
+ 'Windows Kits/10//bin/x86'),
+ ('VC/redist/MSVC/14.*.*/debug_nonredist/x86/Microsoft.VC*.DebugCRT',
+ 'sys32'),
+ ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'sys64'),
+ ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'VC/bin/amd64_x86'),
+ ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'VC/bin/amd64'),
+ ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT',
+ 'Windows Kits/10/bin/x64'),
+ ('VC/redist/MSVC/14.*.*/debug_nonredist/x64/Microsoft.VC*.DebugCRT',
+ 'sys64'),
]
- else:
- paths += [
- _vc_tools + '/bin',
- _vc_tools + '/include',
- _vc_tools + '/lib',
+ if include_arm:
+ paths += [
+ ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT', 'sysarm64'),
+ ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT',
+ 'VC/bin/amd64_arm64'),
+ ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT', 'VC/bin/arm64'),
+ ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT',
+ 'Windows Kits/10/bin/arm64'),
+ ('VC/redist/MSVC/14.*.*/debug_nonredist/arm64/Microsoft.VC*.DebugCRT',
+ 'sysarm64'),
+ ]
+
+ for path in paths:
+ src = path[0] if isinstance(path, tuple) else path
+ # Note that vs_path is ignored if src is an absolute path.
+ combined = ExpandWildcards(vs_path, src)
+ if not os.path.exists(combined):
+ raise Exception('%s missing.' % combined)
+ if not os.path.isdir(combined):
+ raise Exception('%s not a directory.' % combined)
+ for root, _, files in os.walk(combined):
+ for f in files:
+ # vctip.exe doesn't shutdown, leaving locks on directories. It's
+ # optional so let's avoid this problem by not packaging it.
+ # https://crbug.com/735226
+ if f.lower() == 'vctip.exe':
+ continue
+ final_from = os.path.normpath(os.path.join(root, f))
+ if isinstance(path, tuple):
+ assert final_from.startswith(combined)
+ dest = final_from[len(combined) + 1:]
+ result.append((final_from,
+ os.path.normpath(os.path.join(path[1],
+ dest))))
+ else:
+ assert final_from.startswith(vs_path)
+ dest = final_from[len(vs_path) + 1:]
+ result.append((final_from, dest))
+
+ command = (
+ r'reg query "HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots"'
+ r' /v KitsRoot10')
+ marker = " KitsRoot10 REG_SZ "
+ sdk_path = None
+ output = subprocess.check_output(command, universal_newlines=True)
+ for line in output.splitlines():
+ if line.startswith(marker):
+ sdk_path = line[len(marker):]
+
+ # Strip off a trailing slash if present
+ if sdk_path.endswith(os.path.sep):
+ sdk_path = sdk_path[:-len(os.path.sep)]
+
+ debuggers_path = os.path.join(sdk_path, 'Debuggers')
+ if not os.path.exists(debuggers_path):
+ raise Exception('Packaging failed. Missing %s.' % (debuggers_path))
+
+ for root, _, files in os.walk(sdk_path):
+ for f in files:
+ combined = os.path.normpath(os.path.join(root, f))
+ # Some of the files in this directory are exceedingly long (and
+ # exceed _MAX_PATH for any moderately long root), so exclude them.
+ # We don't need them anyway. Exclude/filter/skip others just to save
+ # space.
+ tail = combined[len(sdk_path) + 1:]
+ skip_dir = False
+ for dir in [
+ 'References\\', 'Windows Performance Toolkit\\',
+ 'Testing\\', 'App Certification Kit\\', 'Extension SDKs\\',
+ 'Assessment and Deployment Kit\\'
+ ]:
+ if tail.startswith(dir):
+ skip_dir = True
+ if skip_dir:
+ continue
+ # There may be many Include\Lib\Source\bin directories for many
+ # different versions of Windows and packaging them all wastes ~450
+ # MB (uncompressed) per version and wastes time. Only copy the
+ # specified version. Note that the SDK version number started being
+ # part of the bin path with 10.0.15063.0.
+ if (tail.startswith('Include\\') or tail.startswith('Lib\\')
+ or tail.startswith('Source\\') or tail.startswith('bin\\')):
+ if tail.count(_win_version) == 0:
+ continue
+ to = os.path.join('Windows Kits', '10', tail)
+ result.append((combined, to))
+
+ # Copy the x86 ucrt DLLs to all directories with x86 binaries that are
+ # added to the path by SetEnv.cmd, and to sys32. Starting with the 17763
+ # SDK the ucrt files are in _win_version\ucrt instead of just ucrt.
+ ucrt_dir = os.path.join(sdk_path, 'redist', _win_version, r'ucrt\dlls\x86')
+ if not os.path.exists(ucrt_dir):
+ ucrt_dir = os.path.join(sdk_path, r'redist\ucrt\dlls\x86')
+ ucrt_paths = glob.glob(ucrt_dir + r'\*')
+ assert (len(ucrt_paths) > 0)
+ for ucrt_path in ucrt_paths:
+ ucrt_file = os.path.split(ucrt_path)[1]
+ for dest_dir in [r'Windows Kits\10\bin\x86', 'sys32']:
+ result.append((ucrt_path, os.path.join(dest_dir, ucrt_file)))
+
+ # Copy the x64 ucrt DLLs to all directories with x64 binaries that are
+ # added to the path by SetEnv.cmd, and to sys64.
+ ucrt_dir = os.path.join(sdk_path, 'redist', _win_version, r'ucrt\dlls\x64')
+ if not os.path.exists(ucrt_dir):
+ ucrt_dir = os.path.join(sdk_path, r'redist\ucrt\dlls\x64')
+ ucrt_paths = glob.glob(ucrt_dir + r'\*')
+ assert (len(ucrt_paths) > 0)
+ for ucrt_path in ucrt_paths:
+ ucrt_file = os.path.split(ucrt_path)[1]
+ for dest_dir in [
+ r'VC\bin\amd64_x86', r'VC\bin\amd64',
+ r'Windows Kits\10\bin\x64', 'sys64'
+ ]:
+ result.append((ucrt_path, os.path.join(dest_dir, ucrt_file)))
+
+ system_crt_files = [
+ # Needed to let debug binaries run.
+ 'ucrtbased.dll',
]
-
- paths += [
- ('VC/redist/MSVC/14.*.*/x86/Microsoft.VC*.CRT', 'sys32'),
- ('VC/redist/MSVC/14.*.*/x86/Microsoft.VC*.CRT',
- 'Windows Kits/10//bin/x86'),
- ('VC/redist/MSVC/14.*.*/debug_nonredist/x86/Microsoft.VC*.DebugCRT',
- 'sys32'),
- ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'sys64'),
- ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'VC/bin/amd64_x86'),
- ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT', 'VC/bin/amd64'),
- ('VC/redist/MSVC/14.*.*/x64/Microsoft.VC*.CRT',
- 'Windows Kits/10/bin/x64'),
- ('VC/redist/MSVC/14.*.*/debug_nonredist/x64/Microsoft.VC*.DebugCRT',
- 'sys64'),
- ]
- if include_arm:
- paths += [
- ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT', 'sysarm64'),
- ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT', 'VC/bin/amd64_arm64'),
- ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT', 'VC/bin/arm64'),
- ('VC/redist/MSVC/14.*.*/arm64/Microsoft.VC*.CRT',
- 'Windows Kits/10/bin/arm64'),
- ('VC/redist/MSVC/14.*.*/debug_nonredist/arm64/Microsoft.VC*.DebugCRT',
- 'sysarm64'),
+ cpu_pairs = [
+ ('x86', 'sys32'),
+ ('x64', 'sys64'),
]
+ if include_arm:
+ cpu_pairs += [
+ ('arm64', 'sysarm64'),
+ ]
+ for system_crt_file in system_crt_files:
+ for cpu_pair in cpu_pairs:
+ target_cpu, dest_dir = cpu_pair
+ src_path = os.path.join(sdk_path, 'bin', _win_version, target_cpu,
+ 'ucrt')
+ result.append((os.path.join(src_path, system_crt_file),
+ os.path.join(dest_dir, system_crt_file)))
- for path in paths:
- src = path[0] if isinstance(path, tuple) else path
- # Note that vs_path is ignored if src is an absolute path.
- combined = ExpandWildcards(vs_path, src)
- if not os.path.exists(combined):
- raise Exception('%s missing.' % combined)
- if not os.path.isdir(combined):
- raise Exception('%s not a directory.' % combined)
- for root, _, files in os.walk(combined):
- for f in files:
- # vctip.exe doesn't shutdown, leaving locks on directories. It's
- # optional so let's avoid this problem by not packaging it.
- # https://crbug.com/735226
- if f.lower() =='vctip.exe':
- continue
- final_from = os.path.normpath(os.path.join(root, f))
- if isinstance(path, tuple):
- assert final_from.startswith(combined)
- dest = final_from[len(combined) + 1:]
- result.append(
- (final_from, os.path.normpath(os.path.join(path[1], dest))))
- else:
- assert final_from.startswith(vs_path)
- dest = final_from[len(vs_path) + 1:]
- result.append((final_from, dest))
+ # Generically drop all arm stuff that we don't need, and
+ # drop .msi files because we don't need installers and drop
+ # samples since those are not used by any tools.
+ def is_skippable(f):
+ return ('arm\\' in f.lower()
+ or (not include_arm and 'arm64\\' in f.lower())
+ or 'samples\\' in f.lower() or f.lower().endswith(
+ ('.msi', '.msm')))
- command = (r'reg query "HKLM\SOFTWARE\Microsoft\Windows Kits\Installed Roots"'
- r' /v KitsRoot10')
- marker = " KitsRoot10 REG_SZ "
- sdk_path = None
- output = subprocess.check_output(command, universal_newlines=True)
- for line in output.splitlines():
- if line.startswith(marker):
- sdk_path = line[len(marker):]
+ return [(f, t) for f, t in result if not is_skippable(f)]
- # Strip off a trailing slash if present
- if sdk_path.endswith(os.path.sep):
- sdk_path = sdk_path[:-len(os.path.sep)]
-
- debuggers_path = os.path.join(sdk_path, 'Debuggers')
- if not os.path.exists(debuggers_path):
- raise Exception('Packaging failed. Missing %s.' % (debuggers_path))
-
- for root, _, files in os.walk(sdk_path):
- for f in files:
- combined = os.path.normpath(os.path.join(root, f))
- # Some of the files in this directory are exceedingly long (and exceed
- # _MAX_PATH for any moderately long root), so exclude them. We don't need
- # them anyway. Exclude/filter/skip others just to save space.
- tail = combined[len(sdk_path) + 1:]
- skip_dir = False
- for dir in ['References\\', 'Windows Performance Toolkit\\', 'Testing\\',
- 'App Certification Kit\\', 'Extension SDKs\\',
- 'Assessment and Deployment Kit\\']:
- if tail.startswith(dir):
- skip_dir = True
- if skip_dir:
- continue
- # There may be many Include\Lib\Source\bin directories for many different
- # versions of Windows and packaging them all wastes ~450 MB
- # (uncompressed) per version and wastes time. Only copy the specified
- # version. Note that the SDK version number started being part of the bin
- # path with 10.0.15063.0.
- if (tail.startswith('Include\\') or tail.startswith('Lib\\') or
- tail.startswith('Source\\') or tail.startswith('bin\\')):
- if tail.count(_win_version) == 0:
- continue
- to = os.path.join('Windows Kits', '10', tail)
- result.append((combined, to))
-
- # Copy the x86 ucrt DLLs to all directories with x86 binaries that are
- # added to the path by SetEnv.cmd, and to sys32. Starting with the 17763
- # SDK the ucrt files are in _win_version\ucrt instead of just ucrt.
- ucrt_dir = os.path.join(sdk_path, 'redist', _win_version, r'ucrt\dlls\x86')
- if not os.path.exists(ucrt_dir):
- ucrt_dir = os.path.join(sdk_path, r'redist\ucrt\dlls\x86')
- ucrt_paths = glob.glob(ucrt_dir + r'\*')
- assert(len(ucrt_paths) > 0)
- for ucrt_path in ucrt_paths:
- ucrt_file = os.path.split(ucrt_path)[1]
- for dest_dir in [ r'Windows Kits\10\bin\x86', 'sys32' ]:
- result.append((ucrt_path, os.path.join(dest_dir, ucrt_file)))
-
- # Copy the x64 ucrt DLLs to all directories with x64 binaries that are
- # added to the path by SetEnv.cmd, and to sys64.
- ucrt_dir = os.path.join(sdk_path, 'redist', _win_version, r'ucrt\dlls\x64')
- if not os.path.exists(ucrt_dir):
- ucrt_dir = os.path.join(sdk_path, r'redist\ucrt\dlls\x64')
- ucrt_paths = glob.glob(ucrt_dir + r'\*')
- assert(len(ucrt_paths) > 0)
- for ucrt_path in ucrt_paths:
- ucrt_file = os.path.split(ucrt_path)[1]
- for dest_dir in [ r'VC\bin\amd64_x86', r'VC\bin\amd64',
- r'Windows Kits\10\bin\x64', 'sys64']:
- result.append((ucrt_path, os.path.join(dest_dir, ucrt_file)))
-
- system_crt_files = [
- # Needed to let debug binaries run.
- 'ucrtbased.dll',
- ]
- cpu_pairs = [
- ('x86', 'sys32'),
- ('x64', 'sys64'),
- ]
- if include_arm:
- cpu_pairs += [
- ('arm64', 'sysarm64'),
- ]
- for system_crt_file in system_crt_files:
- for cpu_pair in cpu_pairs:
- target_cpu, dest_dir = cpu_pair
- src_path = os.path.join(sdk_path, 'bin', _win_version, target_cpu, 'ucrt')
- result.append((os.path.join(src_path, system_crt_file),
- os.path.join(dest_dir, system_crt_file)))
-
- # Generically drop all arm stuff that we don't need, and
- # drop .msi files because we don't need installers and drop
- # samples since those are not used by any tools.
- def is_skippable(f):
- return ('arm\\' in f.lower() or
- (not include_arm and 'arm64\\' in f.lower()) or
- 'samples\\' in f.lower() or
- f.lower().endswith(('.msi',
- '.msm')))
- return [(f, t) for f, t in result if not is_skippable(f)]
def GenerateSetEnvCmd(target_dir):
- """Generate a batch file that gyp expects to exist to set up the compiler
+ """Generate a batch file that gyp expects to exist to set up the compiler
environment.
This is normally generated by a full install of the SDK, but we
do it here manually since we do not do a full install."""
- vc_tools_parts = _vc_tools.split('/')
+ vc_tools_parts = _vc_tools.split('/')
- # All these paths are relative to the root of the toolchain package.
- include_dirs = [
- ['Windows Kits', '10', 'Include', _win_version, 'um'],
- ['Windows Kits', '10', 'Include', _win_version, 'shared'],
- ['Windows Kits', '10', 'Include', _win_version, 'winrt'],
- ]
- include_dirs.append(['Windows Kits', '10', 'Include', _win_version, 'ucrt'])
- include_dirs.extend([
- vc_tools_parts + ['include'],
- vc_tools_parts + ['atlmfc', 'include'],
- ])
- libpath_dirs = [
- vc_tools_parts + ['lib', 'x86', 'store', 'references'],
- ['Windows Kits', '10', 'UnionMetadata', _win_version],
- ]
- # Common to x86, x64, and arm64
- env = collections.OrderedDict([
- # Yuck: These have a trailing \ character. No good way to represent this in
- # an OS-independent way.
- ('VSINSTALLDIR', [['.\\']]),
- ('VCINSTALLDIR', [['VC\\']]),
- ('INCLUDE', include_dirs),
- ('LIBPATH', libpath_dirs),
- ])
- # x86. Always use amd64_x86 cross, not x86 on x86.
- env['VCToolsInstallDir'] = [vc_tools_parts[:]]
- # Yuck: This one ends in a path separator as well.
- env['VCToolsInstallDir'][0][-1] += os.path.sep
- env_x86 = collections.OrderedDict([
- (
- 'PATH',
- [
- ['Windows Kits', '10', 'bin', _win_version, 'x64'],
- vc_tools_parts + ['bin', 'HostX64', 'x86'],
- vc_tools_parts + ['bin', 'HostX64', 'x64'
- ], # Needed for mspdb1x0.dll.
- ]),
- ('LIB', [
- vc_tools_parts + ['lib', 'x86'],
- ['Windows Kits', '10', 'Lib', _win_version, 'um', 'x86'],
- ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'x86'],
- vc_tools_parts + ['atlmfc', 'lib', 'x86'],
- ]),
- ])
+ # All these paths are relative to the root of the toolchain package.
+ include_dirs = [
+ ['Windows Kits', '10', 'Include', _win_version, 'um'],
+ ['Windows Kits', '10', 'Include', _win_version, 'shared'],
+ ['Windows Kits', '10', 'Include', _win_version, 'winrt'],
+ ]
+ include_dirs.append(['Windows Kits', '10', 'Include', _win_version, 'ucrt'])
+ include_dirs.extend([
+ vc_tools_parts + ['include'],
+ vc_tools_parts + ['atlmfc', 'include'],
+ ])
+ libpath_dirs = [
+ vc_tools_parts + ['lib', 'x86', 'store', 'references'],
+ ['Windows Kits', '10', 'UnionMetadata', _win_version],
+ ]
+ # Common to x86, x64, and arm64
+ env = collections.OrderedDict([
+ # Yuck: These have a trailing \ character. No good way to represent this
+ # in an OS-independent way.
+ ('VSINSTALLDIR', [['.\\']]),
+ ('VCINSTALLDIR', [['VC\\']]),
+ ('INCLUDE', include_dirs),
+ ('LIBPATH', libpath_dirs),
+ ])
+ # x86. Always use amd64_x86 cross, not x86 on x86.
+ env['VCToolsInstallDir'] = [vc_tools_parts[:]]
+ # Yuck: This one ends in a path separator as well.
+ env['VCToolsInstallDir'][0][-1] += os.path.sep
+ env_x86 = collections.OrderedDict([
+ (
+ 'PATH',
+ [
+ ['Windows Kits', '10', 'bin', _win_version, 'x64'],
+ vc_tools_parts + ['bin', 'HostX64', 'x86'],
+ vc_tools_parts +
+ ['bin', 'HostX64', 'x64'], # Needed for mspdb1x0.dll.
+ ]),
+ ('LIB', [
+ vc_tools_parts + ['lib', 'x86'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'um', 'x86'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'x86'],
+ vc_tools_parts + ['atlmfc', 'lib', 'x86'],
+ ]),
+ ])
- # x64.
- env_x64 = collections.OrderedDict([
- ('PATH', [
- ['Windows Kits', '10', 'bin', _win_version, 'x64'],
- vc_tools_parts + ['bin', 'HostX64', 'x64'],
- ]),
- ('LIB', [
- vc_tools_parts + ['lib', 'x64'],
- ['Windows Kits', '10', 'Lib', _win_version, 'um', 'x64'],
- ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'x64'],
- vc_tools_parts + ['atlmfc', 'lib', 'x64'],
- ]),
- ])
+ # x64.
+ env_x64 = collections.OrderedDict([
+ ('PATH', [
+ ['Windows Kits', '10', 'bin', _win_version, 'x64'],
+ vc_tools_parts + ['bin', 'HostX64', 'x64'],
+ ]),
+ ('LIB', [
+ vc_tools_parts + ['lib', 'x64'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'um', 'x64'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'x64'],
+ vc_tools_parts + ['atlmfc', 'lib', 'x64'],
+ ]),
+ ])
- # arm64.
- env_arm64 = collections.OrderedDict([
- ('PATH', [
- ['Windows Kits', '10', 'bin', _win_version, 'x64'],
- vc_tools_parts + ['bin', 'HostX64', 'arm64'],
- vc_tools_parts + ['bin', 'HostX64', 'x64'],
- ]),
- ('LIB', [
- vc_tools_parts + ['lib', 'arm64'],
- ['Windows Kits', '10', 'Lib', _win_version, 'um', 'arm64'],
- ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'arm64'],
- vc_tools_parts + ['atlmfc', 'lib', 'arm64'],
- ]),
- ])
+ # arm64.
+ env_arm64 = collections.OrderedDict([
+ ('PATH', [
+ ['Windows Kits', '10', 'bin', _win_version, 'x64'],
+ vc_tools_parts + ['bin', 'HostX64', 'arm64'],
+ vc_tools_parts + ['bin', 'HostX64', 'x64'],
+ ]),
+ ('LIB', [
+ vc_tools_parts + ['lib', 'arm64'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'um', 'arm64'],
+ ['Windows Kits', '10', 'Lib', _win_version, 'ucrt', 'arm64'],
+ vc_tools_parts + ['atlmfc', 'lib', 'arm64'],
+ ]),
+ ])
- def BatDirs(dirs):
- return ';'.join(['%cd%\\' + os.path.join(*d) for d in dirs])
- set_env_prefix = os.path.join(target_dir, r'Windows Kits\10\bin\SetEnv')
- with open(set_env_prefix + '.cmd', 'w') as f:
- # The prologue changes the current directory to the root of the toolchain
- # package, so that path entries can be set up without needing ..\..\..\
- # components.
- f.write('@echo off\n'
- ':: Generated by win_toolchain\\package_from_installed.py.\n'
- 'pushd %~dp0..\..\..\n')
- for var, dirs in env.items():
- f.write('set %s=%s\n' % (var, BatDirs(dirs)))
- f.write('if "%1"=="/x64" goto x64\n')
- f.write('if "%1"=="/arm64" goto arm64\n')
+ def BatDirs(dirs):
+ return ';'.join(['%cd%\\' + os.path.join(*d) for d in dirs])
- for var, dirs in env_x86.items():
- f.write('set %s=%s%s\n' % (
- var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
- f.write('goto :END\n')
+ set_env_prefix = os.path.join(target_dir, r'Windows Kits\10\bin\SetEnv')
+ with open(set_env_prefix + '.cmd', 'w') as f:
+ # The prologue changes the current directory to the root of the
+ # toolchain package, so that path entries can be set up without needing
+ # ..\..\..\ components.
+ f.write('@echo off\n'
+ ':: Generated by win_toolchain\\package_from_installed.py.\n'
+ 'pushd %~dp0..\..\..\n')
+ for var, dirs in env.items():
+ f.write('set %s=%s\n' % (var, BatDirs(dirs)))
+ f.write('if "%1"=="/x64" goto x64\n')
+ f.write('if "%1"=="/arm64" goto arm64\n')
- f.write(':x64\n')
- for var, dirs in env_x64.items():
- f.write('set %s=%s%s\n' % (
- var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
- f.write('goto :END\n')
+ for var, dirs in env_x86.items():
+ f.write('set %s=%s%s\n' %
+ (var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
+ f.write('goto :END\n')
- f.write(':arm64\n')
- for var, dirs in env_arm64.items():
- f.write('set %s=%s%s\n' % (
- var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
- f.write('goto :END\n')
- f.write(':END\n')
- # Restore the original directory.
- f.write('popd\n')
- with open(set_env_prefix + '.x86.json', 'wt', newline='') as f:
- assert not set(env.keys()) & set(env_x86.keys()), 'dupe keys'
- json.dump({'env': collections.OrderedDict(list(env.items()) + list(env_x86.items()))},
- f)
- with open(set_env_prefix + '.x64.json', 'wt', newline='') as f:
- assert not set(env.keys()) & set(env_x64.keys()), 'dupe keys'
- json.dump({'env': collections.OrderedDict(list(env.items()) + list(env_x64.items()))},
- f)
- with open(set_env_prefix + '.arm64.json', 'wt', newline='') as f:
- assert not set(env.keys()) & set(env_arm64.keys()), 'dupe keys'
- json.dump({'env': collections.OrderedDict(list(env.items()) + list(env_arm64.items()))},
- f)
+ f.write(':x64\n')
+ for var, dirs in env_x64.items():
+ f.write('set %s=%s%s\n' %
+ (var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
+ f.write('goto :END\n')
+
+ f.write(':arm64\n')
+ for var, dirs in env_arm64.items():
+ f.write('set %s=%s%s\n' %
+ (var, BatDirs(dirs), ';%PATH%' if var == 'PATH' else ''))
+ f.write('goto :END\n')
+ f.write(':END\n')
+ # Restore the original directory.
+ f.write('popd\n')
+ with open(set_env_prefix + '.x86.json', 'wt', newline='') as f:
+ assert not set(env.keys()) & set(env_x86.keys()), 'dupe keys'
+ json.dump(
+ {
+ 'env':
+ collections.OrderedDict(
+ list(env.items()) + list(env_x86.items()))
+ }, f)
+ with open(set_env_prefix + '.x64.json', 'wt', newline='') as f:
+ assert not set(env.keys()) & set(env_x64.keys()), 'dupe keys'
+ json.dump(
+ {
+ 'env':
+ collections.OrderedDict(
+ list(env.items()) + list(env_x64.items()))
+ }, f)
+ with open(set_env_prefix + '.arm64.json', 'wt', newline='') as f:
+ assert not set(env.keys()) & set(env_arm64.keys()), 'dupe keys'
+ json.dump(
+ {
+ 'env':
+ collections.OrderedDict(
+ list(env.items()) + list(env_arm64.items()))
+ }, f)
def AddEnvSetup(files, include_arm):
- """We need to generate this file in the same way that the "from pieces"
+ """We need to generate this file in the same way that the "from pieces"
script does, so pull that in here."""
- tempdir = tempfile.mkdtemp()
- os.makedirs(os.path.join(tempdir, 'Windows Kits', '10', 'bin'))
- GenerateSetEnvCmd(tempdir)
- files.append((
- os.path.join(tempdir, 'Windows Kits', '10', 'bin', 'SetEnv.cmd'),
- 'Windows Kits\\10\\bin\\SetEnv.cmd'))
- files.append((
- os.path.join(tempdir, 'Windows Kits', '10', 'bin', 'SetEnv.x86.json'),
- 'Windows Kits\\10\\bin\\SetEnv.x86.json'))
- files.append((
- os.path.join(tempdir, 'Windows Kits', '10', 'bin', 'SetEnv.x64.json'),
- 'Windows Kits\\10\\bin\\SetEnv.x64.json'))
- if include_arm:
- files.append((
- os.path.join(tempdir, 'Windows Kits', '10', 'bin', 'SetEnv.arm64.json'),
- 'Windows Kits\\10\\bin\\SetEnv.arm64.json'))
- vs_version_file = os.path.join(tempdir, 'VS_VERSION')
- with open(vs_version_file, 'wt', newline='') as version:
- print(_vs_version, file=version)
- files.append((vs_version_file, 'VS_VERSION'))
+ tempdir = tempfile.mkdtemp()
+ os.makedirs(os.path.join(tempdir, 'Windows Kits', '10', 'bin'))
+ GenerateSetEnvCmd(tempdir)
+ files.append(
+ (os.path.join(tempdir, 'Windows Kits', '10', 'bin',
+ 'SetEnv.cmd'), 'Windows Kits\\10\\bin\\SetEnv.cmd'))
+ files.append((os.path.join(tempdir, 'Windows Kits', '10', 'bin',
+ 'SetEnv.x86.json'),
+ 'Windows Kits\\10\\bin\\SetEnv.x86.json'))
+ files.append((os.path.join(tempdir, 'Windows Kits', '10', 'bin',
+ 'SetEnv.x64.json'),
+ 'Windows Kits\\10\\bin\\SetEnv.x64.json'))
+ if include_arm:
+ files.append((os.path.join(tempdir, 'Windows Kits', '10', 'bin',
+ 'SetEnv.arm64.json'),
+ 'Windows Kits\\10\\bin\\SetEnv.arm64.json'))
+ vs_version_file = os.path.join(tempdir, 'VS_VERSION')
+ with open(vs_version_file, 'wt', newline='') as version:
+ print(_vs_version, file=version)
+ files.append((vs_version_file, 'VS_VERSION'))
def RenameToSha1(output):
- """Determine the hash in the same way that the unzipper does to rename the
+ """Determine the hash in the same way that the unzipper does to rename the
# .zip file."""
- print('Extracting to determine hash...')
- tempdir = tempfile.mkdtemp()
- old_dir = os.getcwd()
- os.chdir(tempdir)
- rel_dir = 'vs_files'
- with zipfile.ZipFile(
- os.path.join(old_dir, output), 'r', zipfile.ZIP_DEFLATED, True) as zf:
- zf.extractall(rel_dir)
- print('Hashing...')
- sha1 = get_toolchain_if_necessary.CalculateHash(rel_dir, None)
- # Shorten from forty characters to ten. This is still enough to avoid
- # collisions, while being less unwieldy and reducing the risk of MAX_PATH
- # failures.
- sha1 = sha1[:10]
- os.chdir(old_dir)
- shutil.rmtree(tempdir)
- final_name = sha1 + '.zip'
- os.rename(output, final_name)
- print('Renamed %s to %s.' % (output, final_name))
+ print('Extracting to determine hash...')
+ tempdir = tempfile.mkdtemp()
+ old_dir = os.getcwd()
+ os.chdir(tempdir)
+ rel_dir = 'vs_files'
+ with zipfile.ZipFile(os.path.join(old_dir, output), 'r',
+ zipfile.ZIP_DEFLATED, True) as zf:
+ zf.extractall(rel_dir)
+ print('Hashing...')
+ sha1 = get_toolchain_if_necessary.CalculateHash(rel_dir, None)
+ # Shorten from forty characters to ten. This is still enough to avoid
+ # collisions, while being less unwieldy and reducing the risk of MAX_PATH
+ # failures.
+ sha1 = sha1[:10]
+ os.chdir(old_dir)
+ shutil.rmtree(tempdir)
+ final_name = sha1 + '.zip'
+ os.rename(output, final_name)
+ print('Renamed %s to %s.' % (output, final_name))
def main():
- if sys.version_info[0] < 3:
- print('This script requires Python 3')
- sys.exit(10)
- usage = 'usage: %prog [options] 2022'
- parser = optparse.OptionParser(usage)
- parser.add_option('-w', '--winver', action='store', type='string',
- dest='winver', default='10.0.22621.0',
- help='Windows SDK version, such as 10.0.22621.0')
- parser.add_option('-d', '--dryrun', action='store_true', dest='dryrun',
- default=False,
- help='scan for file existence and prints statistics')
- parser.add_option('--noarm', action='store_false', dest='arm',
- default=True,
- help='Avoids arm parts of the SDK')
- parser.add_option('--override', action='store', type='string',
- dest='override_dir', default=None,
- help='Specify alternate bin/include/lib directory')
- parser.add_option('--repackage', action='store', type='string',
- dest='repackage_dir', default=None,
- help='Specify raw directory to be packaged, for hot fixes.')
- parser.add_option('--allow_multiple_vs_installs', action='store_true',
- default=False, dest='allow_multiple_vs_installs',
- help='Specify if multiple VS installs are allowed.')
- (options, args) = parser.parse_args()
+ if sys.version_info[0] < 3:
+ print('This script requires Python 3')
+ sys.exit(10)
+ usage = 'usage: %prog [options] 2022'
+ parser = optparse.OptionParser(usage)
+ parser.add_option('-w',
+ '--winver',
+ action='store',
+ type='string',
+ dest='winver',
+ default='10.0.22621.0',
+ help='Windows SDK version, such as 10.0.22621.0')
+ parser.add_option('-d',
+ '--dryrun',
+ action='store_true',
+ dest='dryrun',
+ default=False,
+ help='scan for file existence and prints statistics')
+ parser.add_option('--noarm',
+ action='store_false',
+ dest='arm',
+ default=True,
+ help='Avoids arm parts of the SDK')
+ parser.add_option('--override',
+ action='store',
+ type='string',
+ dest='override_dir',
+ default=None,
+ help='Specify alternate bin/include/lib directory')
+ parser.add_option(
+ '--repackage',
+ action='store',
+ type='string',
+ dest='repackage_dir',
+ default=None,
+ help='Specify raw directory to be packaged, for hot fixes.')
+ parser.add_option('--allow_multiple_vs_installs',
+ action='store_true',
+ default=False,
+ dest='allow_multiple_vs_installs',
+ help='Specify if multiple VS installs are allowed.')
+ (options, args) = parser.parse_args()
- if options.repackage_dir:
- files = BuildRepackageFileList(options.repackage_dir)
- else:
- if len(args) != 1 or args[0] not in SUPPORTED_VS_VERSIONS:
- print('Must specify 2022')
- parser.print_help();
- return 1
+ if options.repackage_dir:
+ files = BuildRepackageFileList(options.repackage_dir)
+ else:
+ if len(args) != 1 or args[0] not in SUPPORTED_VS_VERSIONS:
+ print('Must specify 2022')
+ parser.print_help()
+ return 1
- if options.override_dir:
- if (not os.path.exists(os.path.join(options.override_dir, 'bin')) or
- not os.path.exists(os.path.join(options.override_dir, 'include')) or
- not os.path.exists(os.path.join(options.override_dir, 'lib'))):
- print('Invalid override directory - must contain bin/include/lib dirs')
- return 1
+ if options.override_dir:
+ if (not os.path.exists(os.path.join(options.override_dir, 'bin'))
+ or not os.path.exists(
+ os.path.join(options.override_dir, 'include'))
+ or not os.path.exists(
+ os.path.join(options.override_dir, 'lib'))):
+ print(
+ 'Invalid override directory - must contain bin/include/lib dirs'
+ )
+ return 1
- global _vs_version
- _vs_version = args[0]
- global _win_version
- _win_version = options.winver
- global _vc_tools
- global _allow_multiple_vs_installs
- _allow_multiple_vs_installs = options.allow_multiple_vs_installs
- vs_path = GetVSPath()
- temp_tools_path = ExpandWildcards(vs_path, 'VC/Tools/MSVC/14.*.*')
- # Strip off the leading vs_path characters and switch back to / separators.
- _vc_tools = temp_tools_path[len(vs_path) + 1:].replace('\\', '/')
+ global _vs_version
+ _vs_version = args[0]
+ global _win_version
+ _win_version = options.winver
+ global _vc_tools
+ global _allow_multiple_vs_installs
+ _allow_multiple_vs_installs = options.allow_multiple_vs_installs
+ vs_path = GetVSPath()
+ temp_tools_path = ExpandWildcards(vs_path, 'VC/Tools/MSVC/14.*.*')
+ # Strip off the leading vs_path characters and switch back to /
+ # separators.
+ _vc_tools = temp_tools_path[len(vs_path) + 1:].replace('\\', '/')
- print('Building file list for VS %s Windows %s...' % (_vs_version, _win_version))
- files = BuildFileList(options.override_dir, options.arm, vs_path)
+ print('Building file list for VS %s Windows %s...' %
+ (_vs_version, _win_version))
+ files = BuildFileList(options.override_dir, options.arm, vs_path)
- AddEnvSetup(files, options.arm)
+ AddEnvSetup(files, options.arm)
+
+ if False:
+ for f in files:
+ print(f[0], '->', f[1])
+ return 0
+
+ output = 'out.zip'
+ if os.path.exists(output):
+ os.unlink(output)
+ count = 0
+ version_match_count = 0
+ total_size = 0
+ missing_files = False
+ with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED, True) as zf:
+ for disk_name, archive_name in files:
+ sys.stdout.write('\r%d/%d ...%s' %
+ (count, len(files), disk_name[-40:]))
+ sys.stdout.flush()
+ count += 1
+ if not options.repackage_dir and disk_name.count(_win_version) > 0:
+ version_match_count += 1
+ if os.path.exists(disk_name):
+ total_size += os.path.getsize(disk_name)
+ if not options.dryrun:
+ zf.write(disk_name, archive_name)
+ else:
+ missing_files = True
+ sys.stdout.write('\r%s does not exist.\n\n' % disk_name)
+ sys.stdout.flush()
+ sys.stdout.write(
+ '\r%1.3f GB of data in %d files, %d files for %s.%s\n' %
+ (total_size / 1e9, count, version_match_count, _win_version, ' ' * 50))
+ if options.dryrun:
+ return 0
+ if missing_files:
+ raise Exception('One or more files were missing - aborting')
+ if not options.repackage_dir and version_match_count == 0:
+ raise Exception('No files found that match the specified winversion')
+ sys.stdout.write('\rWrote to %s.%s\n' % (output, ' ' * 50))
+ sys.stdout.flush()
+
+ RenameToSha1(output)
- if False:
- for f in files:
- print(f[0], '->', f[1])
return 0
- output = 'out.zip'
- if os.path.exists(output):
- os.unlink(output)
- count = 0
- version_match_count = 0
- total_size = 0
- missing_files = False
- with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED, True) as zf:
- for disk_name, archive_name in files:
- sys.stdout.write('\r%d/%d ...%s' % (count, len(files), disk_name[-40:]))
- sys.stdout.flush()
- count += 1
- if not options.repackage_dir and disk_name.count(_win_version) > 0:
- version_match_count += 1
- if os.path.exists(disk_name):
- total_size += os.path.getsize(disk_name)
- if not options.dryrun:
- zf.write(disk_name, archive_name)
- else:
- missing_files = True
- sys.stdout.write('\r%s does not exist.\n\n' % disk_name)
- sys.stdout.flush()
- sys.stdout.write('\r%1.3f GB of data in %d files, %d files for %s.%s\n' %
- (total_size / 1e9, count, version_match_count, _win_version, ' '*50))
- if options.dryrun:
- return 0
- if missing_files:
- raise Exception('One or more files were missing - aborting')
- if not options.repackage_dir and version_match_count == 0:
- raise Exception('No files found that match the specified winversion')
- sys.stdout.write('\rWrote to %s.%s\n' % (output, ' '*50))
- sys.stdout.flush()
-
- RenameToSha1(output)
-
- return 0
-
if __name__ == '__main__':
- sys.exit(main())
+ sys.exit(main())
diff --git a/wtf b/wtf
index da4095d9cd..3ac4c57928 100755
--- a/wtf
+++ b/wtf
@@ -2,7 +2,6 @@
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Display active git branches and code changes in a chromiumos workspace."""
from __future__ import print_function
@@ -15,67 +14,66 @@ import sys
def show_dir(full_name, relative_name, color):
- """Display active work in a single git repo."""
+ """Display active work in a single git repo."""
+ def show_name():
+ """Display the directory name."""
- def show_name():
- """Display the directory name."""
+ if color:
+ sys.stdout.write('========= %s[44m%s[37m%s%s[0m ========\n' %
+ (chr(27), chr(27), relative_name, chr(27)))
+ else:
+ sys.stdout.write('========= %s ========\n' % relative_name)
+ lines_printed = 0
+
+ cmd = ['git', 'branch', '-v']
if color:
- sys.stdout.write('========= %s[44m%s[37m%s%s[0m ========\n' %
- (chr(27), chr(27), relative_name, chr(27)))
- else:
- sys.stdout.write('========= %s ========\n' % relative_name)
+ cmd.append('--color')
- lines_printed = 0
+ branch = subprocess.Popen(cmd, cwd=full_name,
+ stdout=subprocess.PIPE).communicate()[0].rstrip()
- cmd = ['git', 'branch', '-v']
- if color:
- cmd.append('--color')
+ if len(branch.splitlines()) > 1:
+ if lines_printed == 0:
+ show_name()
+ lines_printed += 1
+ print(branch)
- branch = subprocess.Popen(cmd,
- cwd=full_name,
- stdout=subprocess.PIPE).communicate()[0].rstrip()
+ status = subprocess.Popen(['git', 'status'],
+ cwd=full_name,
+ stdout=subprocess.PIPE).communicate()[0].rstrip()
- if len(branch.splitlines()) > 1:
- if lines_printed == 0:
- show_name()
- lines_printed += 1
- print(branch)
-
- status = subprocess.Popen(['git', 'status'],
- cwd=full_name,
- stdout=subprocess.PIPE).communicate()[0].rstrip()
-
- if len(status.splitlines()) > 2:
- if lines_printed == 0:
- show_name()
- if lines_printed == 1:
- print('---------------')
- print(status)
+ if len(status.splitlines()) > 2:
+ if lines_printed == 0:
+ show_name()
+ if lines_printed == 1:
+ print('---------------')
+ print(status)
def main():
- """Take no arguments."""
+ """Take no arguments."""
- color = False
+ color = False
- if os.isatty(1):
- color = True
+ if os.isatty(1):
+ color = True
- base = os.path.basename(os.getcwd())
- root, entries = gclient_utils.GetGClientRootAndEntries()
+ base = os.path.basename(os.getcwd())
+ root, entries = gclient_utils.GetGClientRootAndEntries()
- # which entries map to a git repos?
- raw = [k for k, v in entries.items() if v and not re.search('svn', v)]
- raw.sort()
+ # which entries map to a git repos?
+ raw = [k for k, v in entries.items() if v and not re.search('svn', v)]
+ raw.sort()
- # We want to use the full path for testing, but we want to use the relative
- # path for display.
- fulldirs = [os.path.normpath(os.path.join(root, p)) for p in raw]
- reldirs = [re.sub('^' + base, '.', p) for p in raw]
+ # We want to use the full path for testing, but we want to use the relative
+ # path for display.
+ fulldirs = [os.path.normpath(os.path.join(root, p)) for p in raw]
+ reldirs = [re.sub('^' + base, '.', p) for p in raw]
+
+ for full_path, relative_path in zip(fulldirs, reldirs):
+ show_dir(full_path, relative_path, color)
- for full_path, relative_path in zip(fulldirs, reldirs):
- show_dir(full_path, relative_path, color)
if __name__ == '__main__':
- main()
+ main()