2013-04-05 00:50:04 +00:00
|
|
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
|
|
|
|
# file at the top-level directory of this distribution.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
# option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
2015-07-27 10:39:04 +00:00
|
|
|
import contextlib
|
2014-07-11 11:57:48 +00:00
|
|
|
import os
|
2014-09-10 04:46:34 +00:00
|
|
|
import fnmatch
|
2015-03-07 17:48:51 +00:00
|
|
|
import itertools
|
2015-04-21 17:59:39 +00:00
|
|
|
import re
|
2015-07-27 10:39:04 +00:00
|
|
|
import StringIO
|
2015-06-04 15:41:36 +00:00
|
|
|
import sys
|
2014-09-10 04:46:34 +00:00
|
|
|
from licenseck import licenses
|
2013-04-05 00:50:04 +00:00
|
|
|
|
2015-09-01 20:38:48 +00:00
|
|
|
filetypes_to_check = [".rs", ".rc", ".cpp", ".c", ".h", ".lock", ".py", ".toml", ".webidl"]
|
2015-08-24 23:19:02 +00:00
|
|
|
reftest_dir = "./tests/ref"
|
2015-03-07 17:48:51 +00:00
|
|
|
reftest_filetype = ".list"
|
2013-04-05 00:50:04 +00:00
|
|
|
|
2014-09-10 04:46:34 +00:00
|
|
|
ignored_files = [
|
|
|
|
# Upstream
|
2015-09-09 04:29:04 +00:00
|
|
|
os.path.join(".", "support", "*"),
|
2015-11-04 10:13:28 +00:00
|
|
|
os.path.join(".", "tests", "wpt", "css-tests", "*"),
|
|
|
|
os.path.join(".", "tests", "wpt", "harness", "*"),
|
2015-11-10 13:14:20 +00:00
|
|
|
os.path.join(".", "tests", "wpt", "sync", "*"),
|
|
|
|
os.path.join(".", "tests", "wpt", "sync_css", "*"),
|
2015-11-04 10:13:28 +00:00
|
|
|
os.path.join(".", "tests", "wpt", "update", "*"),
|
|
|
|
os.path.join(".", "tests", "wpt", "web-platform-tests", "*"),
|
2015-09-09 04:29:04 +00:00
|
|
|
os.path.join(".", "python", "mach", "*"),
|
|
|
|
os.path.join(".", "components", "script", "dom", "bindings", "codegen", "parser", "*"),
|
|
|
|
os.path.join(".", "components", "script", "dom", "bindings", "codegen", "ply", "*"),
|
|
|
|
os.path.join(".", "python", "_virtualenv", "*"),
|
2014-07-11 11:57:48 +00:00
|
|
|
|
2014-09-10 04:46:34 +00:00
|
|
|
# Generated and upstream code combined with our own. Could use cleanup
|
2015-09-09 04:29:04 +00:00
|
|
|
os.path.join(".", "target", "*"),
|
|
|
|
os.path.join(".", "ports", "gonk", "src", "native_window_glue.cpp"),
|
|
|
|
os.path.join(".", "ports", "cef", "*"),
|
2015-01-08 15:00:57 +00:00
|
|
|
|
|
|
|
# MIT license
|
2015-09-09 04:29:04 +00:00
|
|
|
os.path.join(".", "components", "util", "deque", "mod.rs"),
|
2015-06-03 16:12:34 +00:00
|
|
|
|
|
|
|
# Hidden files/directories
|
2015-09-09 04:29:04 +00:00
|
|
|
os.path.join(".", ".*"),
|
2014-09-10 04:46:34 +00:00
|
|
|
]
|
2013-04-05 00:50:04 +00:00
|
|
|
|
|
|
|
|
2014-09-10 04:46:34 +00:00
|
|
|
def should_check(file_name):
|
2015-12-03 07:28:29 +00:00
|
|
|
if os.path.basename(file_name) == "Cargo.lock":
|
|
|
|
return True
|
2014-09-10 04:46:34 +00:00
|
|
|
if ".#" in file_name:
|
|
|
|
return False
|
|
|
|
if os.path.splitext(file_name)[1] not in filetypes_to_check:
|
|
|
|
return False
|
|
|
|
for pattern in ignored_files:
|
|
|
|
if fnmatch.fnmatch(file_name, pattern):
|
|
|
|
return False
|
|
|
|
return True
|
2014-07-11 11:57:48 +00:00
|
|
|
|
|
|
|
|
2015-03-07 17:48:51 +00:00
|
|
|
def should_check_reftest(file_name):
|
|
|
|
return file_name.endswith(reftest_filetype)
|
|
|
|
|
|
|
|
|
2015-07-09 13:55:27 +00:00
|
|
|
EMACS_HEADER = "/* -*- Mode:"
|
|
|
|
VIM_HEADER = "/* vim:"
|
2015-12-26 13:14:12 +00:00
|
|
|
MAX_LICENSE_LINESPAN = max(len(license.splitlines()) for license in licenses)
|
2015-07-09 13:55:27 +00:00
|
|
|
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def check_license(file_name, lines):
|
2015-09-01 20:38:48 +00:00
|
|
|
if file_name.endswith(".toml") or file_name.endswith(".lock"):
|
2015-07-27 10:39:04 +00:00
|
|
|
raise StopIteration
|
2015-12-26 13:14:12 +00:00
|
|
|
while lines and (lines[0].startswith(EMACS_HEADER) or lines[0].startswith(VIM_HEADER)):
|
|
|
|
lines = lines[1:]
|
|
|
|
contents = "".join(lines[:MAX_LICENSE_LINESPAN])
|
2014-09-10 04:46:34 +00:00
|
|
|
valid_license = any(contents.startswith(license) for license in licenses)
|
2015-12-26 13:14:12 +00:00
|
|
|
acknowledged_bad_license = "xfail-license" in contents
|
2014-09-10 04:46:34 +00:00
|
|
|
if not (valid_license or acknowledged_bad_license):
|
|
|
|
yield (1, "incorrect license")
|
2014-07-11 11:57:48 +00:00
|
|
|
|
|
|
|
|
2015-09-11 09:07:34 +00:00
|
|
|
def check_length(file_name, idx, line):
|
|
|
|
if file_name.endswith(".lock"):
|
|
|
|
raise StopIteration
|
2015-08-21 19:01:05 +00:00
|
|
|
max_length = 120
|
2015-09-23 10:17:16 +00:00
|
|
|
if len(line.rstrip('\n')) > max_length:
|
2015-08-21 19:01:05 +00:00
|
|
|
yield (idx + 1, "Line is longer than %d characters" % max_length)
|
2015-01-19 15:33:48 +00:00
|
|
|
|
2015-06-04 15:41:36 +00:00
|
|
|
|
2015-10-16 01:25:53 +00:00
|
|
|
def check_whatwg_specific_url(idx, line):
|
2015-05-06 10:25:28 +00:00
|
|
|
match = re.search(r"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\:-]+)", line)
|
|
|
|
if match is not None:
|
|
|
|
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1))
|
|
|
|
yield (idx + 1, "link to WHATWG may break in the future, use this format instead: {}".format(preferred_link))
|
2015-01-19 15:33:48 +00:00
|
|
|
|
2015-06-04 15:41:36 +00:00
|
|
|
|
2015-10-16 01:25:53 +00:00
|
|
|
def check_whatwg_single_page_url(idx, line):
|
|
|
|
match = re.search(r"https://html\.spec\.whatwg\.org/#([\w\:-]+)", line)
|
|
|
|
if match is not None:
|
|
|
|
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1))
|
|
|
|
yield (idx + 1, "links to WHATWG single-page url, change to multi page: {}".format(preferred_link))
|
|
|
|
|
|
|
|
|
2015-04-21 17:59:39 +00:00
|
|
|
def check_whitespace(idx, line):
|
|
|
|
if line[-1] == "\n":
|
|
|
|
line = line[:-1]
|
|
|
|
else:
|
|
|
|
yield (idx + 1, "no newline at EOF")
|
|
|
|
|
|
|
|
if line.endswith(" "):
|
|
|
|
yield (idx + 1, "trailing whitespace")
|
2014-07-11 11:57:48 +00:00
|
|
|
|
2015-04-21 17:59:39 +00:00
|
|
|
if "\t" in line:
|
|
|
|
yield (idx + 1, "tab on line")
|
2014-07-11 11:57:48 +00:00
|
|
|
|
2015-04-21 17:59:39 +00:00
|
|
|
if "\r" in line:
|
|
|
|
yield (idx + 1, "CR on line")
|
2013-04-05 00:50:04 +00:00
|
|
|
|
2015-06-04 15:41:36 +00:00
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def check_by_line(file_name, lines):
|
2015-04-21 17:59:39 +00:00
|
|
|
for idx, line in enumerate(lines):
|
2015-06-03 13:04:11 +00:00
|
|
|
errors = itertools.chain(
|
2015-09-11 09:07:34 +00:00
|
|
|
check_length(file_name, idx, line),
|
2015-06-03 13:04:11 +00:00
|
|
|
check_whitespace(idx, line),
|
2015-10-16 01:25:53 +00:00
|
|
|
check_whatwg_specific_url(idx, line),
|
|
|
|
check_whatwg_single_page_url(idx, line),
|
2015-06-03 13:04:11 +00:00
|
|
|
)
|
2015-09-27 14:19:30 +00:00
|
|
|
|
2015-06-03 13:04:11 +00:00
|
|
|
for error in errors:
|
2015-04-21 17:59:39 +00:00
|
|
|
yield error
|
2014-07-11 11:57:48 +00:00
|
|
|
|
2013-04-05 00:50:04 +00:00
|
|
|
|
2015-07-27 10:39:04 +00:00
|
|
|
def check_flake8(file_name, contents):
|
|
|
|
from flake8.main import check_code
|
|
|
|
|
|
|
|
if not file_name.endswith(".py"):
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def stdout_redirect(where):
|
|
|
|
sys.stdout = where
|
|
|
|
try:
|
|
|
|
yield where
|
|
|
|
finally:
|
|
|
|
sys.stdout = sys.__stdout__
|
2015-06-04 15:41:36 +00:00
|
|
|
|
|
|
|
ignore = {
|
|
|
|
"W291", # trailing whitespace; the standard tidy process will enforce no trailing whitespace
|
|
|
|
"E501", # 80 character line length; the standard tidy process will enforce line length
|
|
|
|
}
|
|
|
|
|
2015-07-27 10:39:04 +00:00
|
|
|
output = StringIO.StringIO()
|
|
|
|
with stdout_redirect(output):
|
|
|
|
check_code(contents, ignore=ignore)
|
|
|
|
for error in output.getvalue().splitlines():
|
2015-08-06 08:32:43 +00:00
|
|
|
_, line_num, _, message = error.split(":", 3)
|
2015-07-27 10:39:04 +00:00
|
|
|
yield line_num, message.strip()
|
2015-06-04 15:41:36 +00:00
|
|
|
|
|
|
|
|
2015-09-01 20:38:48 +00:00
|
|
|
def check_lock(file_name, contents):
|
2015-12-08 04:51:55 +00:00
|
|
|
def find_reverse_dependencies(dependency, version, content):
|
|
|
|
dependency_prefix = "{} {}".format(dependency, version)
|
|
|
|
for package in itertools.chain([content["root"]], content["package"]):
|
|
|
|
for dependency in package.get("dependencies", []):
|
|
|
|
if dependency.startswith(dependency_prefix):
|
|
|
|
yield package["name"]
|
|
|
|
|
2015-09-01 20:38:48 +00:00
|
|
|
if not file_name.endswith(".lock"):
|
|
|
|
raise StopIteration
|
2015-10-12 21:06:31 +00:00
|
|
|
|
|
|
|
# package names to be neglected (as named by cargo)
|
2015-12-08 20:06:35 +00:00
|
|
|
exceptions = ["libc", "cocoa"]
|
2015-12-08 04:51:55 +00:00
|
|
|
|
|
|
|
import toml
|
|
|
|
content = toml.loads(contents)
|
|
|
|
|
|
|
|
packages = {}
|
|
|
|
for package in content.get("package", []):
|
|
|
|
packages.setdefault(package["name"], []).append(package["version"])
|
|
|
|
|
|
|
|
for (name, versions) in packages.iteritems():
|
|
|
|
if name in exceptions or len(versions) <= 1:
|
|
|
|
continue
|
|
|
|
|
|
|
|
highest = max(versions)
|
|
|
|
for version in versions:
|
|
|
|
if version != highest:
|
|
|
|
reverse_dependencies = "\n".join(
|
|
|
|
"\t\t{}".format(n)
|
|
|
|
for n in find_reverse_dependencies(name, version, content)
|
|
|
|
)
|
|
|
|
substitutions = {
|
|
|
|
"package": name,
|
|
|
|
"old_version": version,
|
|
|
|
"new_version": highest,
|
|
|
|
"reverse_dependencies": reverse_dependencies
|
|
|
|
}
|
|
|
|
message = """
|
|
|
|
duplicate versions for package "{package}"
|
|
|
|
\t\033[93mfound dependency on version {old_version}\033[0m
|
|
|
|
\t\033[91mbut highest version is {new_version}\033[0m
|
|
|
|
\t\033[93mtry upgrading with\033[0m \033[96m./mach cargo-update -p {package}:{old_version}\033[0m
|
|
|
|
\tThe following packages depend on version {old_version}:
|
|
|
|
{reverse_dependencies}
|
|
|
|
""".format(**substitutions).strip()
|
|
|
|
yield (1, message)
|
2015-09-01 20:38:48 +00:00
|
|
|
|
|
|
|
|
2015-12-03 07:28:29 +00:00
|
|
|
def maybe_int(value):
|
|
|
|
try:
|
|
|
|
return int(value)
|
|
|
|
except ValueError:
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def check_toml(file_name, lines):
|
2015-07-27 10:39:04 +00:00
|
|
|
if not file_name.endswith(".toml"):
|
|
|
|
raise StopIteration
|
2015-12-26 13:14:12 +00:00
|
|
|
for idx, line in enumerate(lines):
|
2015-07-20 20:43:05 +00:00
|
|
|
if line.find("*") != -1:
|
|
|
|
yield (idx + 1, "found asterisk instead of minimum version number")
|
|
|
|
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def check_rust(file_name, lines):
|
2015-08-16 14:37:40 +00:00
|
|
|
if not file_name.endswith(".rs") or \
|
|
|
|
file_name.endswith("properties.mako.rs") or \
|
2015-09-09 04:29:04 +00:00
|
|
|
file_name.endswith(os.path.join("style", "build.rs")) or \
|
|
|
|
file_name.endswith(os.path.join("unit", "style", "stylesheets.rs")):
|
2015-08-16 14:37:40 +00:00
|
|
|
raise StopIteration
|
|
|
|
comment_depth = 0
|
|
|
|
merged_lines = ''
|
2015-08-20 13:43:56 +00:00
|
|
|
|
2015-09-19 19:34:51 +00:00
|
|
|
import_block = False
|
|
|
|
whitespace = False
|
|
|
|
|
2015-12-06 11:53:43 +00:00
|
|
|
prev_use = None
|
|
|
|
current_indent = 0
|
2015-11-28 14:04:11 +00:00
|
|
|
prev_crate = {}
|
2015-12-06 11:53:43 +00:00
|
|
|
prev_mod = {}
|
|
|
|
|
|
|
|
decl_message = "{} is not in alphabetical order"
|
|
|
|
decl_expected = "\n\t\033[93mexpected: {}\033[0m"
|
|
|
|
decl_found = "\n\t\033[91mfound: {}\033[0m"
|
2015-09-18 20:44:48 +00:00
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
for idx, original_line in enumerate(lines):
|
2015-08-16 14:37:40 +00:00
|
|
|
# simplify the analysis
|
2015-11-28 14:04:11 +00:00
|
|
|
line = original_line.strip()
|
|
|
|
|
2015-08-16 14:37:40 +00:00
|
|
|
# Simple heuristic to avoid common case of no comments.
|
|
|
|
if '/' in line:
|
|
|
|
comment_depth += line.count('/*')
|
|
|
|
comment_depth -= line.count('*/')
|
|
|
|
|
|
|
|
if line.endswith('\\'):
|
|
|
|
merged_lines += line[:-1]
|
|
|
|
continue
|
|
|
|
if comment_depth:
|
|
|
|
merged_lines += line
|
|
|
|
continue
|
|
|
|
if merged_lines:
|
|
|
|
line = merged_lines + line
|
|
|
|
merged_lines = ''
|
|
|
|
|
2015-09-19 19:34:51 +00:00
|
|
|
# Keep track of whitespace to enable checking for a merged import block
|
|
|
|
#
|
|
|
|
# Ignore attributes, comments, and imports
|
|
|
|
if import_block:
|
|
|
|
if not (line_is_comment(line) or line_is_attribute(line) or line.startswith("use ")):
|
|
|
|
whitespace = line == ""
|
|
|
|
|
|
|
|
if not whitespace:
|
|
|
|
import_block = False
|
|
|
|
|
2015-09-01 14:33:02 +00:00
|
|
|
# get rid of strings and chars because cases like regex expression, keep attributes
|
|
|
|
if not line_is_attribute(line):
|
|
|
|
line = re.sub('".*?"|\'.*?\'', '', line)
|
2015-08-16 14:37:40 +00:00
|
|
|
|
2015-09-01 14:33:02 +00:00
|
|
|
# get rid of comments
|
|
|
|
line = re.sub('//.*?$|/\*.*?$|^\*.*?$', '', line)
|
|
|
|
|
|
|
|
# get rid of attributes that do not contain =
|
|
|
|
line = re.sub('^#[A-Za-z0-9\(\)\[\]_]*?$', '', line)
|
2015-08-16 14:37:40 +00:00
|
|
|
|
2015-09-02 18:05:58 +00:00
|
|
|
match = re.search(r",[^\s]", line)
|
|
|
|
if match and '$' not in line:
|
2015-08-16 14:37:40 +00:00
|
|
|
yield (idx + 1, "missing space after ,")
|
|
|
|
|
2015-09-01 14:33:02 +00:00
|
|
|
if line_is_attribute(line):
|
|
|
|
pre_space_re = r"[A-Za-z0-9]="
|
|
|
|
post_space_re = r"=[A-Za-z0-9\"]"
|
|
|
|
else:
|
|
|
|
# - not included because of scientific notation (1e-6)
|
|
|
|
pre_space_re = r"[A-Za-z0-9][\+/\*%=]"
|
|
|
|
# * not included because of dereferencing and casting
|
|
|
|
# - not included because of unary negation
|
|
|
|
post_space_re = r"[\+/\%=][A-Za-z0-9\"]"
|
|
|
|
|
|
|
|
match = re.search(pre_space_re, line)
|
2015-08-16 14:37:40 +00:00
|
|
|
if match and not is_associated_type(match, line, 1):
|
|
|
|
yield (idx + 1, "missing space before %s" % match.group(0)[1])
|
|
|
|
|
2015-09-01 14:33:02 +00:00
|
|
|
match = re.search(post_space_re, line)
|
2015-08-16 14:37:40 +00:00
|
|
|
if match and not is_associated_type(match, line, 0):
|
|
|
|
yield (idx + 1, "missing space after %s" % match.group(0)[0])
|
|
|
|
|
|
|
|
match = re.search(r"\)->", line)
|
|
|
|
if match:
|
|
|
|
yield (idx + 1, "missing space before ->")
|
|
|
|
|
|
|
|
match = re.search(r"->[A-Za-z]", line)
|
|
|
|
if match:
|
|
|
|
yield (idx + 1, "missing space after ->")
|
|
|
|
|
2015-12-24 05:25:48 +00:00
|
|
|
line_len = len(line)
|
|
|
|
arrow_pos = line.find("=>")
|
|
|
|
if arrow_pos != -1:
|
|
|
|
if arrow_pos and line[arrow_pos - 1] != ' ':
|
|
|
|
yield (idx + 1, "missing space before =>")
|
|
|
|
if arrow_pos + 2 < line_len and line[arrow_pos + 2] != ' ':
|
|
|
|
yield (idx + 1, "missing space after =>")
|
|
|
|
elif arrow_pos + 3 < line_len and line[arrow_pos + 3] == ' ':
|
|
|
|
yield (idx + 1, "extra space after =>")
|
|
|
|
|
2015-08-16 14:37:40 +00:00
|
|
|
# Avoid flagging ::crate::mod and `trait Foo : Bar`
|
|
|
|
match = line.find(" :")
|
|
|
|
if match != -1:
|
|
|
|
if line[0:match].find('trait ') == -1 and line[match + 2] != ':':
|
|
|
|
yield (idx + 1, "extra space before :")
|
|
|
|
|
|
|
|
# Avoid flagging crate::mod
|
|
|
|
match = re.search(r"[^:]:[A-Za-z]", line)
|
|
|
|
if match:
|
|
|
|
# Avoid flagging macros like $t1:expr
|
|
|
|
if line[0:match.end()].rfind('$') == -1:
|
|
|
|
yield (idx + 1, "missing space after :")
|
|
|
|
|
|
|
|
match = re.search(r"[A-Za-z0-9\)]{", line)
|
|
|
|
if match:
|
|
|
|
yield (idx + 1, "missing space before {")
|
|
|
|
|
2015-10-21 13:57:32 +00:00
|
|
|
# ignored cases like {}, }` and }}
|
|
|
|
match = re.search(r"[^\s{}]}[^`]", line)
|
2015-08-31 19:18:23 +00:00
|
|
|
if match and not (line.startswith("use") or line.startswith("pub use")):
|
|
|
|
yield (idx + 1, "missing space before }")
|
|
|
|
|
2015-10-21 13:57:32 +00:00
|
|
|
# ignored cases like {}, `{ and {{
|
|
|
|
match = re.search(r"[^`]{[^\s{}]", line)
|
2015-08-31 19:18:23 +00:00
|
|
|
if match and not (line.startswith("use") or line.startswith("pub use")):
|
|
|
|
yield (idx + 1, "missing space after {")
|
|
|
|
|
2015-12-06 11:53:43 +00:00
|
|
|
# check extern crates
|
|
|
|
if line.startswith("extern crate "):
|
|
|
|
crate_name = line[len("extern crate "):-1]
|
2015-12-24 05:25:48 +00:00
|
|
|
indent = len(original_line) - line_len
|
2015-12-06 11:53:43 +00:00
|
|
|
if indent not in prev_crate:
|
|
|
|
prev_crate[indent] = ""
|
|
|
|
if prev_crate[indent] > crate_name:
|
|
|
|
yield(idx + 1, decl_message.format("extern crate declaration")
|
|
|
|
+ decl_expected.format(prev_crate[indent])
|
|
|
|
+ decl_found.format(crate_name))
|
|
|
|
prev_crate[indent] = crate_name
|
|
|
|
|
2015-09-19 19:34:51 +00:00
|
|
|
# imports must be in the same line, alphabetically sorted, and merged
|
|
|
|
# into a single import block
|
2015-12-06 11:53:43 +00:00
|
|
|
elif line.startswith("use "):
|
2015-09-19 19:34:51 +00:00
|
|
|
import_block = True
|
2015-08-20 13:43:56 +00:00
|
|
|
use = line[4:]
|
2015-12-24 05:25:48 +00:00
|
|
|
indent = len(original_line) - line_len
|
2015-08-25 17:29:56 +00:00
|
|
|
if not use.endswith(";"):
|
2015-08-20 13:43:56 +00:00
|
|
|
yield (idx + 1, "use statement spans multiple lines")
|
2015-12-06 11:53:43 +00:00
|
|
|
current_use = use[:len(use) - 1]
|
|
|
|
if indent == current_indent and prev_use and current_use < prev_use:
|
|
|
|
yield(idx + 1, decl_message.format("use statement")
|
|
|
|
+ decl_expected.format(prev_use)
|
|
|
|
+ decl_found.format(current_use))
|
|
|
|
prev_use = current_use
|
|
|
|
current_indent = indent
|
|
|
|
|
|
|
|
if whitespace or not import_block:
|
|
|
|
current_indent = 0
|
2015-08-16 14:37:40 +00:00
|
|
|
|
2015-09-19 19:34:51 +00:00
|
|
|
if import_block and whitespace and line.startswith("use "):
|
|
|
|
whitespace = False
|
|
|
|
yield(idx, "encountered whitespace following a use statement")
|
|
|
|
|
2015-09-18 20:44:48 +00:00
|
|
|
# modules must be in the same line and alphabetically sorted
|
|
|
|
if line.startswith("mod ") or line.startswith("pub mod "):
|
2015-12-24 05:25:48 +00:00
|
|
|
indent = len(original_line) - line_len
|
2015-12-06 11:53:43 +00:00
|
|
|
mod = line[4:] if line.startswith("mod ") else line[8:]
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
if idx < 0 or "#[macro_use]" not in lines[idx - 1]:
|
2015-12-06 11:53:43 +00:00
|
|
|
match = line.find(" {")
|
|
|
|
if indent not in prev_mod:
|
|
|
|
prev_mod[indent] = ""
|
|
|
|
if match == -1 and not mod.endswith(";"):
|
|
|
|
yield (idx + 1, "mod declaration spans multiple lines")
|
|
|
|
mod = mod[:len(mod) - 1]
|
|
|
|
if len(prev_mod[indent]) > 0 and mod < prev_mod[indent]:
|
|
|
|
yield(idx + 1, decl_message.format("mod declaration")
|
|
|
|
+ decl_expected.format(prev_mod[indent])
|
|
|
|
+ decl_found.format(mod))
|
|
|
|
prev_mod[indent] = mod
|
|
|
|
else:
|
|
|
|
# we now erase previous entries
|
|
|
|
prev_mod = {}
|
2015-09-18 20:44:48 +00:00
|
|
|
|
2015-09-27 14:19:30 +00:00
|
|
|
# There should not be any extra pointer dereferencing
|
2015-10-10 03:22:06 +00:00
|
|
|
if ": &Vec<" in line:
|
2015-09-27 14:19:30 +00:00
|
|
|
yield (idx + 1, "use &[T] instead of &Vec<T>")
|
|
|
|
|
2015-11-26 23:18:38 +00:00
|
|
|
# No benefit over using &str
|
|
|
|
if ": &String" in line:
|
|
|
|
yield (idx + 1, "use &str instead of &String")
|
|
|
|
|
2015-08-16 14:37:40 +00:00
|
|
|
|
2015-09-01 14:33:02 +00:00
|
|
|
# Avoid flagging <Item=Foo> constructs
|
|
|
|
def is_associated_type(match, line, index):
|
|
|
|
open_angle = line[0:match.end()].rfind('<')
|
|
|
|
close_angle = line[open_angle:].find('>') if open_angle != -1 else -1
|
|
|
|
is_equals = match.group(0)[index] == '='
|
|
|
|
generic_open = open_angle != -1 and open_angle < match.start()
|
|
|
|
generic_close = close_angle != -1 and close_angle + open_angle >= match.end()
|
|
|
|
return is_equals and generic_open and generic_close
|
|
|
|
|
|
|
|
|
|
|
|
def line_is_attribute(line):
|
|
|
|
return re.search(r"#\[.*\]", line)
|
|
|
|
|
|
|
|
|
2015-09-19 19:34:51 +00:00
|
|
|
def line_is_comment(line):
|
|
|
|
return re.search(r"^//|^/\*|^\*", line)
|
|
|
|
|
|
|
|
|
2015-07-27 10:39:04 +00:00
|
|
|
def check_webidl_spec(file_name, contents):
|
2015-07-22 19:44:12 +00:00
|
|
|
# Sorted by this function (in pseudo-Rust). The idea is to group the same
|
|
|
|
# organization together.
|
|
|
|
# fn sort_standards(a: &Url, b: &Url) -> Ordering {
|
|
|
|
# let a_domain = a.domain().split(".");
|
|
|
|
# a_domain.pop();
|
|
|
|
# a_domain.reverse();
|
|
|
|
# let b_domain = b.domain().split(".");
|
|
|
|
# b_domain.pop();
|
|
|
|
# b_domain.reverse();
|
|
|
|
# for i in a_domain.into_iter().zip(b_domain.into_iter()) {
|
|
|
|
# match i.0.cmp(b.0) {
|
|
|
|
# Less => return Less,
|
|
|
|
# Greater => return Greater,
|
|
|
|
# _ => (),
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
# a_domain.path().cmp(b_domain.path())
|
|
|
|
# }
|
2015-07-27 10:39:04 +00:00
|
|
|
if not file_name.endswith(".webidl"):
|
|
|
|
raise StopIteration
|
2015-07-22 19:44:12 +00:00
|
|
|
standards = [
|
|
|
|
"//www.khronos.org/registry/webgl/specs",
|
|
|
|
"//developer.mozilla.org/en-US/docs/Web/API",
|
|
|
|
"//dev.w3.org/2006/webapi",
|
|
|
|
"//dev.w3.org/csswg",
|
|
|
|
"//dev.w3.org/fxtf",
|
|
|
|
"//dvcs.w3.org/hg",
|
|
|
|
"//dom.spec.whatwg.org",
|
|
|
|
"//domparsing.spec.whatwg.org",
|
2015-10-17 21:17:27 +00:00
|
|
|
"//drafts.fxtf.org",
|
2015-07-22 19:44:12 +00:00
|
|
|
"//encoding.spec.whatwg.org",
|
|
|
|
"//html.spec.whatwg.org",
|
|
|
|
"//url.spec.whatwg.org",
|
|
|
|
"//xhr.spec.whatwg.org",
|
2015-08-02 02:16:14 +00:00
|
|
|
"//w3c.github.io",
|
2015-10-10 16:17:52 +00:00
|
|
|
"//heycam.github.io/webidl",
|
2015-07-22 19:44:12 +00:00
|
|
|
# Not a URL
|
|
|
|
"// This interface is entirely internal to Servo, and should not be" +
|
|
|
|
" accessible to\n// web pages."
|
|
|
|
]
|
|
|
|
for i in standards:
|
|
|
|
if contents.find(i) != -1:
|
2015-07-27 10:39:04 +00:00
|
|
|
raise StopIteration
|
|
|
|
yield 0, "No specification link found."
|
2015-07-22 19:44:12 +00:00
|
|
|
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def check_spec(file_name, lines):
|
2015-07-28 14:56:39 +00:00
|
|
|
base_path = "components/script/dom/"
|
|
|
|
if base_path not in file_name:
|
|
|
|
raise StopIteration
|
|
|
|
file_name = os.path.relpath(os.path.splitext(file_name)[0], base_path)
|
|
|
|
patt = re.compile("^\s*\/\/.+")
|
2015-09-01 03:17:54 +00:00
|
|
|
|
|
|
|
# Pattern representing a line with a macro
|
|
|
|
macro_patt = re.compile("^\s*\S+!(.*)$")
|
|
|
|
|
|
|
|
# Pattern representing a line with comment containing a spec link
|
|
|
|
link_patt = re.compile("^\s*///? https://.+$")
|
|
|
|
|
|
|
|
# Pattern representing a line with comment
|
|
|
|
comment_patt = re.compile("^\s*///?.+$")
|
|
|
|
|
2015-08-31 21:48:24 +00:00
|
|
|
pattern = "impl %sMethods for %s {" % (file_name, file_name)
|
2015-07-28 14:56:39 +00:00
|
|
|
brace_count = 0
|
|
|
|
in_impl = False
|
2015-12-26 13:14:12 +00:00
|
|
|
for idx, line in enumerate(lines):
|
2015-07-28 14:56:39 +00:00
|
|
|
if "// check-tidy: no specs after this line" in line:
|
|
|
|
break
|
|
|
|
if not patt.match(line):
|
|
|
|
if pattern.lower() in line.lower():
|
|
|
|
in_impl = True
|
2015-09-01 03:17:54 +00:00
|
|
|
if ("fn " in line or macro_patt.match(line)) and brace_count == 1:
|
|
|
|
for up_idx in range(1, idx + 1):
|
2015-12-26 13:14:12 +00:00
|
|
|
up_line = lines[idx - up_idx]
|
2015-09-01 03:17:54 +00:00
|
|
|
if link_patt.match(up_line):
|
|
|
|
# Comment with spec link exists
|
|
|
|
break
|
|
|
|
if not comment_patt.match(up_line):
|
|
|
|
# No more comments exist above, yield warning
|
|
|
|
yield (idx + 1, "method declared in webidl is missing a comment with a specification link")
|
|
|
|
break
|
2015-07-28 14:56:39 +00:00
|
|
|
if '{' in line and in_impl:
|
|
|
|
brace_count += 1
|
|
|
|
if '}' in line and in_impl:
|
|
|
|
if brace_count == 1:
|
|
|
|
break
|
|
|
|
brace_count -= 1
|
|
|
|
|
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
def collect_errors_for_files(files_to_check, checking_functions, line_checking_functions):
|
|
|
|
for filename in files_to_check:
|
|
|
|
with open(filename, "r") as f:
|
|
|
|
contents = f.read()
|
2015-07-27 10:39:04 +00:00
|
|
|
for check in checking_functions:
|
2015-12-26 13:14:12 +00:00
|
|
|
for error in check(filename, contents):
|
|
|
|
# the result will be: `(filename, line, message)`
|
|
|
|
yield (filename,) + error
|
|
|
|
lines = contents.splitlines(True)
|
|
|
|
for check in line_checking_functions:
|
|
|
|
for error in check(filename, lines):
|
|
|
|
yield (filename,) + error
|
2014-07-11 11:57:48 +00:00
|
|
|
|
2014-09-09 14:18:18 +00:00
|
|
|
|
2015-03-07 17:48:51 +00:00
|
|
|
def check_reftest_order(files_to_check):
|
|
|
|
for file_name in files_to_check:
|
|
|
|
with open(file_name, "r") as fp:
|
|
|
|
split_lines = fp.read().splitlines()
|
|
|
|
lines = filter(lambda l: len(l) > 0 and l[0] != '#', split_lines)
|
|
|
|
for idx, line in enumerate(lines[:-1]):
|
2015-06-04 15:41:36 +00:00
|
|
|
next_line = lines[idx + 1]
|
2015-03-07 17:48:51 +00:00
|
|
|
current = get_reftest_names(line)
|
|
|
|
next = get_reftest_names(next_line)
|
|
|
|
if current is not None and next is not None and current > next:
|
|
|
|
yield (file_name, split_lines.index(next_line) + 1, "line not in alphabetical order")
|
|
|
|
|
|
|
|
|
|
|
|
def get_reftest_names(line):
|
|
|
|
tokens = line.split()
|
2015-08-24 22:03:05 +00:00
|
|
|
if len(tokens) == 3:
|
2015-03-07 17:48:51 +00:00
|
|
|
return tokens[1] + tokens[2]
|
2015-08-24 22:03:05 +00:00
|
|
|
if len(tokens) == 4:
|
2015-03-07 17:48:51 +00:00
|
|
|
return tokens[2] + tokens[3]
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2015-08-24 22:03:05 +00:00
|
|
|
def get_html_file_names_from_reftest_list(reftest_dir, file_name):
|
|
|
|
for line in open(os.path.join(reftest_dir, file_name), "r"):
|
|
|
|
for token in line.split():
|
|
|
|
if fnmatch.fnmatch(token, '*.html'):
|
|
|
|
yield os.path.join(reftest_dir, token)
|
|
|
|
|
|
|
|
|
|
|
|
def check_reftest_html_files_in_basic_list(reftest_dir):
|
|
|
|
basic_list_files = set(get_html_file_names_from_reftest_list(reftest_dir, "basic" + reftest_filetype))
|
|
|
|
|
|
|
|
for file_name in os.listdir(reftest_dir):
|
|
|
|
file_path = os.path.join(reftest_dir, file_name)
|
|
|
|
if fnmatch.fnmatch(file_path, '*.html') and file_path not in basic_list_files:
|
|
|
|
yield (file_path, "", "not found in basic.list")
|
|
|
|
|
|
|
|
|
2015-12-01 16:51:37 +00:00
|
|
|
def check_wpt_lint_errors():
|
|
|
|
import subprocess
|
|
|
|
wpt_working_dir = os.path.abspath(os.path.join(".", "tests", "wpt", "web-platform-tests"))
|
|
|
|
lint_cmd = os.path.join(wpt_working_dir, "lint")
|
|
|
|
try:
|
|
|
|
subprocess.check_call(lint_cmd, cwd=wpt_working_dir) # Must run from wpt's working dir
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
yield ("WPT Lint Tool", "", "lint error(s) in Web Platform Tests: exit status {0}".format(e.returncode))
|
|
|
|
|
|
|
|
|
2014-09-10 04:46:34 +00:00
|
|
|
def scan():
|
2015-08-25 00:39:22 +00:00
|
|
|
all_files = (os.path.join(r, f) for r, _, files in os.walk(".") for f in files)
|
2014-09-10 04:46:34 +00:00
|
|
|
files_to_check = filter(should_check, all_files)
|
2014-09-09 14:18:18 +00:00
|
|
|
|
2015-12-26 13:14:12 +00:00
|
|
|
checking_functions = (check_flake8, check_lock, check_webidl_spec)
|
|
|
|
line_checking_functions = (check_license, check_by_line, check_toml, check_rust, check_spec)
|
|
|
|
errors = collect_errors_for_files(files_to_check, checking_functions, line_checking_functions)
|
2015-03-07 17:48:51 +00:00
|
|
|
|
2015-08-25 00:39:22 +00:00
|
|
|
reftest_files = (os.path.join(r, f) for r, _, files in os.walk(reftest_dir) for f in files)
|
2015-03-07 17:48:51 +00:00
|
|
|
reftest_to_check = filter(should_check_reftest, reftest_files)
|
|
|
|
r_errors = check_reftest_order(reftest_to_check)
|
2015-08-24 23:19:02 +00:00
|
|
|
not_found_in_basic_list_errors = check_reftest_html_files_in_basic_list(reftest_dir)
|
2015-12-01 16:51:37 +00:00
|
|
|
wpt_lint_errors = check_wpt_lint_errors()
|
2015-03-07 17:48:51 +00:00
|
|
|
|
2015-12-17 03:39:59 +00:00
|
|
|
errors = itertools.chain(errors, r_errors, not_found_in_basic_list_errors, wpt_lint_errors)
|
2015-12-06 11:53:43 +00:00
|
|
|
|
2015-12-17 03:39:59 +00:00
|
|
|
error = None
|
|
|
|
for error in errors:
|
|
|
|
print "\033[94m{}\033[0m:\033[93m{}\033[0m: \033[91m{}\033[0m".format(*error)
|
|
|
|
|
|
|
|
if error is None:
|
2015-07-28 14:56:39 +00:00
|
|
|
print "\033[92mtidy reported no errors.\033[0m"
|
2014-09-10 04:46:34 +00:00
|
|
|
return 0
|
2015-12-17 03:39:59 +00:00
|
|
|
else:
|
|
|
|
return 1
|