[ssci] PEP8 formatting for metadata directory

All files in metadata/ are new, so they should follow the PEP-8 style.

Change-Id: I5d8424536c3d7b703e6b8087e0e2d70c06a1549c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/4834909
Reviewed-by: Rachael Newitt <renewitt@google.com>
Commit-Queue: Rachael Newitt <renewitt@google.com>
This commit is contained in:
Anne Redulla
2023-09-04 22:02:36 +00:00
committed by LUCI CQ
parent 693e0b3121
commit 6715758ed9
21 changed files with 1182 additions and 1113 deletions

2
metadata/.style.yapf Normal file
View File

@@ -0,0 +1,2 @@
[style]
based_on_style = pep8

View File

@@ -16,8 +16,8 @@ _ROOT_DIR = os.path.abspath(os.path.join(_THIS_DIR, ".."))
sys.path.insert(0, _ROOT_DIR)
import metadata.fields.field_types as field_types
import metadata.fields.custom.license
import metadata.fields.custom.version
import metadata.fields.custom.license as license_util
import metadata.fields.custom.version as version_util
import metadata.fields.known as known_fields
import metadata.fields.util as util
import metadata.validation_result as vr
@@ -44,7 +44,8 @@ class DependencyMetadata:
self._metadata: Dict[field_types.MetadataField, str] = {}
# The record of how many times a field entry was added.
self._occurrences: Dict[field_types.MetadataField, int] = defaultdict(int)
self._occurrences: Dict[field_types.MetadataField,
int] = defaultdict(int)
def add_entry(self, field_name: str, field_value: str):
value = field_value.strip()
@@ -62,13 +63,15 @@ class DependencyMetadata:
return list(self._entries)
def _assess_required_fields(self) -> Set[field_types.MetadataField]:
"""Returns the set of required fields, based on the current metadata."""
"""Returns the set of required fields, based on the current
metadata.
"""
required = set(self._MANDATORY_FIELDS)
# The date and revision are required if the version has not been specified.
# The date and revision are required if the version has not
# been specified.
version_value = self._metadata.get(known_fields.VERSION)
if (version_value is None
or metadata.fields.custom.version.is_unknown(version_value)):
if version_value is None or version_util.is_unknown(version_value):
required.add(known_fields.DATE)
required.add(known_fields.REVISION)
@@ -81,12 +84,13 @@ class DependencyMetadata:
# A license file is required if the dependency is shipped.
required.add(known_fields.LICENSE_FILE)
# License compatibility with Android must be set if the package is shipped
# and the license is not in the allowlist.
# License compatibility with Android must be set if the
# package is shipped and the license is not in the
# allowlist.
has_allowlisted = False
license_value = self._metadata.get(known_fields.LICENSE)
if license_value:
licenses = metadata.fields.custom.license.process_license_value(
licenses = license_util.process_license_value(
license_value,
atomic_delimiter=known_fields.LICENSE.VALUE_DELIMITER)
for _, allowed in licenses:
@@ -104,7 +108,8 @@ class DependencyMetadata:
"""Validates all the metadata.
Args:
source_file_dir: the directory of the file that the metadata is from.
source_file_dir: the directory of the file that the metadata
is from.
repo_root_dir: the repository's root directory.
Returns: the metadata's validation results.
@@ -149,7 +154,8 @@ class DependencyMetadata:
repo_root_dir=repo_root_dir,
)
if result:
result.set_tag(tag="field", value=known_fields.LICENSE_FILE.get_name())
result.set_tag(tag="field",
value=known_fields.LICENSE_FILE.get_name())
results.append(result)
return results

View File

@@ -17,15 +17,15 @@ def is_metadata_file(path: str) -> bool:
def find_metadata_files(root: str) -> List[str]:
"""Finds all metadata files within the given root directory, including
subdirectories.
"""Finds all metadata files within the given root directory,
including subdirectories.
Args:
root: the absolute path to the root directory within which to search.
root: the absolute path to the root directory within which to
search.
Returns:
the absolute full paths for all the metadata files within the root
directory.
Returns: the absolute full paths for all the metadata files within
the root directory.
"""
metadata_files = []
for item in os.listdir(root):

View File

@@ -28,12 +28,13 @@ _PATTERN_CPE_FORMATTED_STRING = re.compile(r"^cpe:2\.3:[aho\-\*](:[^:]+){10}$")
def is_uri_cpe(value: str) -> bool:
"""Returns whether the value conforms to the CPE 2.3 URI binding (which is
compatible with CPE 2.2), with the additional constraint that at least one
component other than "part" has been specified.
"""Returns whether the value conforms to the CPE 2.3 URI binding
(which is compatible with CPE 2.2), with the additional constraint
that at least one component other than "part" has been specified.
For reference, see section 6.1 of the CPE Naming Specification Version 2.3 at
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
For reference, see section 6.1 of the CPE Naming Specification
Version 2.3 at
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf.
"""
if not util.matches(_PATTERN_CPE_URI, value):
return False
@@ -52,11 +53,12 @@ def is_uri_cpe(value: str) -> bool:
def is_formatted_string_cpe(value: str) -> bool:
"""Returns whether the value conforms to the CPE 2.3 formatted string
binding.
"""Returns whether the value conforms to the CPE 2.3 formatted
string binding.
For reference, see section 6.2 of the CPE Naming Specification Version 2.3 at
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
For reference, see section 6.2 of the CPE Naming Specification
Version 2.3 at
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf.
"""
return util.matches(_PATTERN_CPE_FORMATTED_STRING, value)
@@ -67,8 +69,8 @@ class CPEPrefixField(field_types.MetadataField):
super().__init__(name="CPEPrefix", one_liner=True)
def validate(self, value: str) -> Union[vr.ValidationResult, None]:
"""Checks the given value is either 'unknown', or conforms to either the
CPE 2.3 or 2.2 format.
"""Checks the given value is either 'unknown', or conforms to
either the CPE 2.3 or 2.2 format.
"""
if (util.is_unknown(value) or is_formatted_string_cpe(value)
or is_uri_cpe(value)):
@@ -77,7 +79,8 @@ class CPEPrefixField(field_types.MetadataField):
return vr.ValidationError(
reason=f"{self._name} is invalid.",
additional=[
"This field should be a CPE (version 2.3 or 2.2), or 'unknown'.",
"This field should be a CPE (version 2.3 or 2.2), "
"or 'unknown'.",
"Search for a CPE tag for the package at "
"https://nvd.nist.gov/products/cpe/search.",
f"Current value: '{value}'.",

View File

@@ -55,17 +55,19 @@ def process_license_value(value: str,
"""Process a license field value, which may list multiple licenses.
Args:
value: the value to process, which may include both verbose and atomic
delimiters, e.g. "Apache, 2.0 and MIT and custom"
atomic_delimiter: the delimiter to use as a final step; values will not be
further split after using this delimiter.
value: the value to process, which may include both verbose and
atomic delimiters, e.g. "Apache, 2.0 and MIT and custom"
atomic_delimiter: the delimiter to use as a final step; values
will not be further split after using this
delimiter.
Returns: a list of the constituent licenses within the given value,
and whether the constituent license is on the allowlist.
e.g. [("Apache, 2.0", True), ("MIT", True), ("custom", False)]
e.g. [("Apache, 2.0", True), ("MIT", True),
("custom", False)]
"""
# Check if the value is on the allowlist as-is, and thus does not require
# further processing.
# Check if the value is on the allowlist as-is, and thus does not
# require further processing.
if is_license_allowlisted(value):
return [(value, True)]
@@ -76,17 +78,20 @@ def process_license_value(value: str,
breakdown.extend(
process_license_value(component.strip(), atomic_delimiter))
else:
# Split using the standard value delimiter. This results in atomic values;
# there is no further splitting possible.
# Split using the standard value delimiter. This results in
# atomic values; there is no further splitting possible.
for atomic_value in value.split(atomic_delimiter):
atomic_value = atomic_value.strip()
breakdown.append((atomic_value, is_license_allowlisted(atomic_value)))
breakdown.append(
(atomic_value, is_license_allowlisted(atomic_value)))
return breakdown
def is_license_allowlisted(value: str) -> bool:
"""Returns whether the value is in the allowlist for license types."""
"""Returns whether the value is in the allowlist for license
types.
"""
return util.matches(_PATTERN_LICENSE_ALLOWED, value)
@@ -108,7 +113,8 @@ class LicenseField(field_types.MetadataField):
atomic_delimiter=self.VALUE_DELIMITER)
for license, allowed in licenses:
if util.is_empty(license):
return vr.ValidationError(reason=f"{self._name} has an empty value.")
return vr.ValidationError(
reason=f"{self._name} has an empty value.")
if not allowed:
not_allowlisted.append(license)
@@ -117,7 +123,8 @@ class LicenseField(field_types.MetadataField):
reason=f"{self._name} has a license not in the allowlist.",
additional=[
f"Separate licenses using a '{self.VALUE_DELIMITER}'.",
f"Licenses not allowlisted: {util.quoted(not_allowlisted)}.",
"Licenses not allowlisted: "
f"{util.quoted(not_allowlisted)}.",
])
# Suggest using the standard value delimiter when possible.

View File

@@ -32,11 +32,12 @@ class LicenseFileField(field_types.MetadataField):
super().__init__(name="License File", one_liner=True)
def validate(self, value: str) -> Union[vr.ValidationResult, None]:
"""Checks the given value consists of non-empty paths with no backward
directory navigation (i.e. no "../").
"""Checks the given value consists of non-empty paths with no
backward directory navigation (i.e. no "../").
This validation is rudimentary. To check if the license file(s) exist on
disk, see the `LicenseFileField.validate_on_disk` method.
This validation is rudimentary. To check if the license file(s)
exist on disk, see the `LicenseFileField.validate_on_disk`
method.
Note: this field supports multiple values.
"""
@@ -50,7 +51,8 @@ class LicenseFileField(field_types.MetadataField):
invalid_values = []
for path in value.split(self.VALUE_DELIMITER):
path = path.strip()
if util.is_empty(path) or util.matches(_PATTERN_PATH_BACKWARD, path):
if util.is_empty(path) or util.matches(_PATTERN_PATH_BACKWARD,
path):
invalid_values.append(path)
if invalid_values:
@@ -58,7 +60,8 @@ class LicenseFileField(field_types.MetadataField):
reason=f"{self._name} is invalid.",
additional=[
"File paths cannot be empty, or include '../'.",
f"Separate license files using a '{self.VALUE_DELIMITER}'.",
"Separate license files using a "
f"'{self.VALUE_DELIMITER}'.",
f"Invalid values: {util.quoted(invalid_values)}.",
])
@@ -70,20 +73,23 @@ class LicenseFileField(field_types.MetadataField):
source_file_dir: str,
repo_root_dir: str,
) -> Union[vr.ValidationResult, None]:
"""Checks the given value consists of file paths which exist on disk.
"""Checks the given value consists of file paths which exist on
disk.
Note: this field supports multiple values.
Args:
value: the value to validate.
source_file_dir: the directory of the metadata file that the license file
value is from; this is needed to construct file paths to
source_file_dir: the directory of the metadata file that the
license file value is from; this is needed
to construct file paths to license files.
repo_root_dir: the repository's root directory; this is
needed to construct file paths to
license files.
repo_root_dir: the repository's root directory; this is needed to
construct file paths to license files.
Returns: a validation result based on the license file value, and whether
the license file(s) exist on disk, otherwise None.
Returns: a validation result based on the license file value,
and whether the license file(s) exist on disk,
otherwise None.
"""
if value == _NOT_SHIPPED:
return vr.ValidationWarning(
@@ -97,10 +103,11 @@ class LicenseFileField(field_types.MetadataField):
license_filename = license_filename.strip()
if license_filename.startswith("/"):
license_filepath = os.path.join(
repo_root_dir, os.path.normpath(license_filename.lstrip("/")))
repo_root_dir,
os.path.normpath(license_filename.lstrip("/")))
else:
license_filepath = os.path.join(source_file_dir,
os.path.normpath(license_filename))
license_filepath = os.path.join(
source_file_dir, os.path.normpath(license_filename))
if not os.path.exists(license_filepath):
invalid_values.append(license_filepath)

View File

@@ -47,7 +47,8 @@ class URLField(field_types.MetadataField):
return vr.ValidationError(
reason=f"{self._name} is invalid.",
additional=[
"URLs must use a protocol scheme in [http, https, ftp, git].",
"URLs must use a protocol scheme in "
"[http, https, ftp, git].",
f"Separate URLs using a '{self.VALUE_DELIMITER}'.",
f"Invalid values: {util.quoted(invalid_values)}.",
])

View File

@@ -34,24 +34,24 @@ class VersionField(field_types.MetadataField):
super().__init__(name="Version", one_liner=True)
def validate(self, value: str) -> Union[vr.ValidationResult, None]:
"""Checks the given value is acceptable - there must be at least one
non-whitespace character, and "N/A" is preferred over "0" if the version is
unknown.
"""Checks the given value is acceptable - there must be at least
one non-whitespace character, and "N/A" is preferred over "0" if
the version is unknown.
"""
if value == "0" or util.is_unknown(value):
return vr.ValidationWarning(
reason=f"{self._name} is '{value}'.",
additional=[
"Set this field to 'N/A' if this package does not version or is "
"versioned by date or revision.",
"Set this field to 'N/A' if this package does not version "
"or is versioned by date or revision.",
])
if util.is_empty(value):
return vr.ValidationError(
reason=f"{self._name} is empty.",
additional=[
"Set this field to 'N/A' if this package does not version or is "
"versioned by date or revision.",
"Set this field to 'N/A' if this package does not version "
"or is versioned by date or revision.",
])
return None

View File

@@ -55,16 +55,19 @@ class MetadataField:
def validate(self, value: str) -> Union[vr.ValidationResult, None]:
"""Checks the given value is acceptable for the field.
Raises: NotImplementedError if called. This method must be overridden with
the actual validation of the field.
Raises: NotImplementedError if called. This method must be
overridden with the actual validation of the field.
"""
raise NotImplementedError(f"{self._name} field validation not defined.")
raise NotImplementedError(
f"{self._name} field validation not defined.")
class FreeformTextField(MetadataField):
"""Field where the value is freeform text."""
def validate(self, value: str) -> Union[vr.ValidationResult, None]:
"""Checks the given value has at least one non-whitespace character."""
"""Checks the given value has at least one non-whitespace
character.
"""
if util.is_empty(value):
return vr.ValidationError(reason=f"{self._name} is empty.")

View File

@@ -10,17 +10,20 @@ from typing import List
YES = "yes"
NO = "no"
# Pattern used to check if the entire string is "unknown", case-insensitive.
# Pattern used to check if the entire string is "unknown",
# case-insensitive.
_PATTERN_UNKNOWN = re.compile(r"^unknown$", re.IGNORECASE)
# Pattern used to check if the entire string is functionally empty, i.e.
# empty string, or all characters are only whitespace.
_PATTERN_ONLY_WHITESPACE = re.compile(r"^\s*$")
# Pattern used to check if the string starts with "yes", case-insensitive.
# Pattern used to check if the string starts with "yes",
# case-insensitive.
_PATTERN_STARTS_WITH_YES = re.compile(r"^yes", re.IGNORECASE)
# Pattern used to check if the string starts with "no", case-insensitive.
# Pattern used to check if the string starts with "no",
# case-insensitive.
_PATTERN_STARTS_WITH_NO = re.compile(r"^no", re.IGNORECASE)
@@ -40,7 +43,9 @@ def is_unknown(value: str) -> bool:
def quoted(values: List[str]) -> str:
"""Returns a string of the given values, each being individually quoted."""
"""Returns a string of the given values, each being individually
quoted.
"""
return ", ".join([f"'{entry}'" for entry in values])

View File

@@ -24,11 +24,11 @@ DEPENDENCY_DIVIDER = re.compile(r"^-{20} DEPENDENCY DIVIDER -{20}$")
# Delimiter used to separate a field's name from its value.
FIELD_DELIMITER = ":"
# Pattern used to check if a line from a metadata file declares a new field.
# Pattern used to check if a line from a metadata file declares a new
# field.
_PATTERN_FIELD_DECLARATION = re.compile(
"^({}){}".format("|".join(known_fields.ALL_FIELD_NAMES), FIELD_DELIMITER),
re.IGNORECASE,
)
re.IGNORECASE)
def parse_content(content: str) -> List[dm.DependencyMetadata]:
@@ -37,9 +37,8 @@ def parse_content(content: str) -> List[dm.DependencyMetadata]:
Args:
content: the string to parse metadata from.
Returns:
all the metadata, which may be for zero or more dependencies, from the
given string.
Returns: all the metadata, which may be for zero or more
dependencies, from the given string.
"""
dependencies = []
current_metadata = dm.DependencyMetadata()
@@ -50,11 +49,13 @@ def parse_content(content: str) -> List[dm.DependencyMetadata]:
if DEPENDENCY_DIVIDER.match(line):
if current_field_name:
# Save the field value for the previous dependency.
current_metadata.add_entry(current_field_name, current_field_value)
current_metadata.add_entry(current_field_name,
current_field_value)
if current_metadata.has_entries():
# Add the previous dependency to the results.
dependencies.append(current_metadata)
# Reset for the new dependency's metadata, and reset the field state.
# Reset for the new dependency's metadata,
# and reset the field state.
current_metadata = dm.DependencyMetadata()
current_field_name = None
current_field_value = ""
@@ -62,23 +63,27 @@ def parse_content(content: str) -> List[dm.DependencyMetadata]:
elif _PATTERN_FIELD_DECLARATION.match(line):
# Save the field value to the current dependency's metadata.
if current_field_name:
current_metadata.add_entry(current_field_name, current_field_value)
current_metadata.add_entry(current_field_name,
current_field_value)
current_field_name, current_field_value = line.split(FIELD_DELIMITER, 1)
current_field_name, current_field_value = line.split(
FIELD_DELIMITER, 1)
field = known_fields.get_field(current_field_name)
if field and field.is_one_liner():
# The field should be on one line, so it can be added now.
current_metadata.add_entry(current_field_name, current_field_value)
# The field should be on one line, so add it now.
current_metadata.add_entry(current_field_name,
current_field_value)
# Reset the field state.
current_field_name = None
current_field_value = ""
elif current_field_name:
# The field is on multiple lines, so add this line to the field value.
# The field is on multiple lines, so add this line to the
# field value.
current_field_value += line
# At this point, the end of the file has been reached. Save any remaining
# field data and metadata.
# At this point, the end of the file has been reached.
# Save any remaining field data and metadata.
if current_field_name:
current_metadata.add_entry(current_field_name, current_field_value)
if current_metadata.has_entries():

View File

@@ -44,8 +44,8 @@ def parse_args() -> argparse.Namespace:
def main() -> None:
"""Runs validation on all metadata files within the directory specified by the
repo_root_dir arg.
"""Runs validation on all metadata files within the directory
specified by the repo_root_dir arg.
"""
config = parse_args()
src_dir = os.path.abspath(config.repo_root_dir)
@@ -69,7 +69,8 @@ def main() -> None:
for result in file_results:
print(f" {result}")
summary_key = "{severity} - {reason}".format(
severity=result.get_severity_prefix(), reason=result.get_reason())
severity=result.get_severity_prefix(),
reason=result.get_reason())
all_reasons[summary_key].append(relpath)
if result.is_fatal():
invalid = True
@@ -85,8 +86,8 @@ def main() -> None:
for affected_file in affected_files:
print(f" {affected_file}")
print(f"\n\n{invalid_file_count} / {file_count} metadata files are invalid, "
"i.e. the file has at least one fatal validation issue.")
print(f"\n\n{invalid_file_count} / {file_count} metadata files are "
"invalid, i.e. the file has at least one fatal validation issue.")
if __name__ == "__main__":

View File

@@ -21,10 +21,14 @@ import metadata.validation_result as vr
class DependencyValidationTest(unittest.TestCase):
def test_repeated_field(self):
"""Check that a validation error is returned for a repeated field."""
"""Check that a validation error is returned for a repeated
field.
"""
dependency = dm.DependencyMetadata()
dependency.add_entry(known_fields.NAME.get_name(), "Test repeated field")
dependency.add_entry(known_fields.URL.get_name(), "https://www.example.com")
dependency.add_entry(known_fields.NAME.get_name(),
"Test repeated field")
dependency.add_entry(known_fields.URL.get_name(),
"https://www.example.com")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public Domain")
dependency.add_entry(known_fields.LICENSE_FILE.get_name(), "LICENSE")
@@ -48,7 +52,8 @@ class DependencyValidationTest(unittest.TestCase):
dependency.add_entry(known_fields.LICENSE_FILE.get_name(), "LICENSE")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public Domain")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.NAME.get_name(), "Test missing field")
dependency.add_entry(known_fields.NAME.get_name(),
"Test missing field")
# Leave URL field unspecified.
results = dependency.validate(
@@ -63,8 +68,10 @@ class DependencyValidationTest(unittest.TestCase):
def test_invalid_field(self):
"""Check field validation issues are returned."""
dependency = dm.DependencyMetadata()
dependency.add_entry(known_fields.URL.get_name(), "https://www.example.com")
dependency.add_entry(known_fields.NAME.get_name(), "Test invalid field")
dependency.add_entry(known_fields.URL.get_name(),
"https://www.example.com")
dependency.add_entry(known_fields.NAME.get_name(),
"Test invalid field")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.LICENSE_FILE.get_name(), "LICENSE")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public domain")
@@ -77,13 +84,16 @@ class DependencyValidationTest(unittest.TestCase):
)
self.assertEqual(len(results), 1)
self.assertTrue(isinstance(results[0], vr.ValidationError))
self.assertEqual(results[0].get_reason(), "Security Critical is invalid.")
self.assertEqual(results[0].get_reason(),
"Security Critical is invalid.")
def test_invalid_license_file_path(self):
"""Check license file path validation issues are returned."""
dependency = dm.DependencyMetadata()
dependency.add_entry(known_fields.NAME.get_name(), "Test license file path")
dependency.add_entry(known_fields.URL.get_name(), "https://www.example.com")
dependency.add_entry(known_fields.NAME.get_name(),
"Test license file path")
dependency.add_entry(known_fields.URL.get_name(),
"https://www.example.com")
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public domain")
dependency.add_entry(known_fields.LICENSE_FILE.get_name(),
@@ -102,7 +112,8 @@ class DependencyValidationTest(unittest.TestCase):
def test_multiple_validation_issues(self):
"""Check all validation issues are returned."""
dependency = dm.DependencyMetadata()
dependency.add_entry(known_fields.NAME.get_name(), "Test multiple errors")
dependency.add_entry(known_fields.NAME.get_name(),
"Test multiple errors")
# Leave URL field unspecified.
dependency.add_entry(known_fields.VERSION.get_name(), "1.0.0")
dependency.add_entry(known_fields.LICENSE.get_name(), "Public domain")

View File

@@ -31,22 +31,28 @@ class FieldValidationTest(unittest.TestCase):
self.assertIsNone(field.validate(value), value)
for value in error_values:
self.assertIsInstance(field.validate(value), vr.ValidationError, value)
self.assertIsInstance(field.validate(value), vr.ValidationError,
value)
for value in warning_values:
self.assertIsInstance(field.validate(value), vr.ValidationWarning, value)
self.assertIsInstance(field.validate(value), vr.ValidationWarning,
value)
def test_freeform_text_validation(self):
# Check validation of a freeform text field that should be on one line.
# Check validation of a freeform text field that should be on
# one line.
self._run_field_validation(
field=field_types.FreeformTextField("Freeform single", one_liner=True),
field=field_types.FreeformTextField("Freeform single",
one_liner=True),
valid_values=["Text on single line", "a", "1"],
error_values=["", "\n", " "],
)
# Check validation of a freeform text field that can span multiple lines.
# Check validation of a freeform text field that can span
# multiple lines.
self._run_field_validation(
field=field_types.FreeformTextField("Freeform multi", one_liner=False),
field=field_types.FreeformTextField("Freeform multi",
one_liner=False),
valid_values=[
"This is text spanning multiple lines:\n"
" * with this point\n"
@@ -117,13 +123,15 @@ class FieldValidationTest(unittest.TestCase):
field=known_fields.LICENSE_FILE,
valid_values=[
"LICENSE", "src/LICENSE.txt",
"LICENSE, //third_party_test/LICENSE-TEST", "src/MISSING_LICENSE"
"LICENSE, //third_party_test/LICENSE-TEST",
"src/MISSING_LICENSE"
],
error_values=["", "\n", ","],
warning_values=["NOT_SHIPPED"],
)
# Check relative path from README directory, and multiple license files.
# Check relative path from README directory, and multiple
# license files.
result = known_fields.LICENSE_FILE.validate_on_disk(
value="LICENSE, src/LICENSE.txt",
source_file_dir=os.path.join(_THIS_DIR, "data"),

View File

@@ -79,8 +79,8 @@ class ParseTest(unittest.TestCase):
],
)
# Check the parser handles different casing for field names, and strips
# leading and trailing whitespace from values.
# Check the parser handles different casing for field names, and
# strips leading and trailing whitespace from values.
self.assertListEqual(
all_metadata[1].get_entries(),
[

View File

@@ -21,8 +21,8 @@ import metadata.validate
_SOURCE_FILE_DIR = os.path.join(_THIS_DIR, "data")
_VALID_METADATA_FILEPATH = os.path.join(_THIS_DIR, "data",
"README.chromium.test.multi-valid")
_INVALID_METADATA_FILEPATH = os.path.join(_THIS_DIR, "data",
"README.chromium.test.multi-invalid")
_INVALID_METADATA_FILEPATH = os.path.join(
_THIS_DIR, "data", "README.chromium.test.multi-invalid")
class ValidateContentTest(unittest.TestCase):
@@ -111,8 +111,8 @@ class CheckFileTest(unittest.TestCase):
filepath=os.path.join(_THIS_DIR, "data", "MISSING.chromium"),
repo_root_dir=_THIS_DIR,
)
# TODO(aredulla): update this test once validation errors can be returned
# as errors. Bug: b/285453019.
# TODO(aredulla): update this test once validation errors can be
# returned as errors. Bug: b/285453019.
# self.assertEqual(len(errors), 1)
# self.assertEqual(len(warnings), 0)
self.assertEqual(len(errors), 0)
@@ -133,8 +133,8 @@ class CheckFileTest(unittest.TestCase):
filepath=_INVALID_METADATA_FILEPATH,
repo_root_dir=_THIS_DIR,
)
# TODO(aredulla): update this test once validation errors can be returned
# as errors. Bug: b/285453019.
# TODO(aredulla): update this test once validation errors can be
# returned as errors. Bug: b/285453019.
# self.assertEqual(len(errors), 9)
# self.assertEqual(len(warnings), 2)
self.assertEqual(len(errors), 0)

View File

@@ -24,12 +24,13 @@ def validate_content(content: str, source_file_dir: str,
"""Validate the content as a metadata file.
Args:
content: the entire content of a file to be validated as a metadata file.
source_file_dir: the directory of the metadata file that the license file
value is from; this is needed to construct file paths to
license files.
repo_root_dir: the repository's root directory; this is needed to construct
file paths to license files.
content: the entire content of a file to be validated as a
metadata file.
source_file_dir: the directory of the metadata file that the
license file value is from; this is needed to
construct file paths to license files.
repo_root_dir: the repository's root directory; this is needed
to construct file paths to license files.
Returns: the validation results for the given content.
"""
@@ -40,14 +41,17 @@ def validate_content(content: str, source_file_dir: str,
return [result]
for dependency in dependencies:
dependency_results = dependency.validate(source_file_dir=source_file_dir,
repo_root_dir=repo_root_dir)
dependency_results = dependency.validate(
source_file_dir=source_file_dir, repo_root_dir=repo_root_dir)
results.extend(dependency_results)
return results
def _construct_file_read_error(filepath: str, cause: str) -> vr.ValidationError:
"""Helper function to create a validation error for a file reading issue."""
def _construct_file_read_error(filepath: str,
cause: str) -> vr.ValidationError:
"""Helper function to create a validation error for a
file reading issue.
"""
result = vr.ValidationError(
reason="Cannot read metadata file.",
additional=[f"Attempted to read '{filepath}' but {cause}."])
@@ -59,18 +63,19 @@ def validate_file(
repo_root_dir: str,
reader: Callable[[str], Union[str, bytes]] = None,
) -> List[vr.ValidationResult]:
"""Validate the item located at the given filepath is a valid dependency
metadata file.
"""Validate the item located at the given filepath is a valid
dependency metadata file.
Args:
filepath: the path to validate,
e.g. "/chromium/src/third_party/libname/README.chromium"
repo_root_dir: the repository's root directory; this is needed to construct
file paths to license files.
reader (optional): callable function/method to read the content of the file.
filepath: the path to validate, e.g.
"/chromium/src/third_party/libname/README.chromium"
repo_root_dir: the repository's root directory; this is needed
to construct file paths to license files.
reader (optional): callable function/method to read the content
of the file.
Returns: the validation results for the given filepath and its contents, if
it exists.
Returns: the validation results for the given filepath and its
contents, if it exists.
"""
if reader is None:
reader = gclient_utils.FileRead
@@ -82,7 +87,9 @@ def validate_file(
except PermissionError:
return [_construct_file_read_error(filepath, "access denied")]
except Exception as e:
return [_construct_file_read_error(filepath, f"unexpected error: '{e}'")]
return [
_construct_file_read_error(filepath, f"unexpected error: '{e}'")
]
else:
if not isinstance(content, str):
return [_construct_file_read_error(filepath, "decoding failed")]
@@ -99,21 +106,22 @@ def check_file(
repo_root_dir: str,
reader: Callable[[str], Union[str, bytes]] = None,
) -> Tuple[List[str], List[str]]:
"""Run metadata validation on the given filepath, and return all validation
errors and validation warnings.
"""Run metadata validation on the given filepath, and return all
validation errors and validation warnings.
Args:
filepath: the path to a metadata file,
e.g. "/chromium/src/third_party/libname/README.chromium"
repo_root_dir: the repository's root directory; this is needed to construct
file paths to license files.
reader (optional): callable function/method to read the content of the file.
filepath: the path to a metadata file, e.g.
"/chromium/src/third_party/libname/README.chromium"
repo_root_dir: the repository's root directory; this is needed
to construct file paths to license files.
reader (optional): callable function/method to read the content
of the file.
Returns:
error_messages: the fatal validation issues present in the file;
i.e. presubmit should fail.
warning_messages: the non-fatal validation issues present in the file;
i.e. presubmit should still pass.
warning_messages: the non-fatal validation issues present in the
file; i.e. presubmit should still pass.
"""
results = validate_file(filepath=filepath,
repo_root_dir=repo_root_dir,
@@ -124,11 +132,12 @@ def check_file(
for result in results:
message = result.get_message(width=60)
# TODO(aredulla): Actually distinguish between validation errors and
# warnings. The quality of metadata is currently being uplifted, but is not
# yet guaranteed to pass validation. So for now, all validation results will
# be returned as warnings so CLs are not blocked by invalid metadata in
# presubmits yet. Bug: b/285453019.
# TODO(aredulla): Actually distinguish between validation errors
# and warnings. The quality of metadata is currently being
# uplifted, but is not yet guaranteed to pass validation. So for
# now, all validation results will be returned as warnings so
# CLs are not blocked by invalid metadata in presubmits yet.
# Bug: b/285453019.
# if result.is_fatal():
# error_messages.append(message)
# else:

View File

@@ -6,9 +6,9 @@
import textwrap
from typing import Dict, List, Union
_CHROMIUM_METADATA_PRESCRIPT = "Third party metadata issue:"
_CHROMIUM_METADATA_POSTSCRIPT = ("Check //third_party/README.chromium.template "
_CHROMIUM_METADATA_POSTSCRIPT = (
"Check //third_party/README.chromium.template "
"for details.")
@@ -20,9 +20,10 @@ class ValidationResult:
Args:
reason: the root cause of the issue.
fatal: whether the issue is fatal.
additional: details that should be included in the validation message,
e.g. advice on how to address the issue, or specific
problematic values.
additional: details that should be included in the
validation message, e.g. advice on how to
address the issue, or specific problematic
values.
"""
self._reason = reason
self._fatal = fatal