Skip to content

Commit

Permalink
Added ROU104 (#6)
Browse files Browse the repository at this point in the history
* [DEV-4793] Added ROU104 for 2+ lines after comments

* Ignore section comments

* Ignore group comments

* Add dataclass for conditions
  • Loading branch information
rynmlng authored Mar 29, 2022
1 parent 40cd651 commit 503af19
Show file tree
Hide file tree
Showing 5 changed files with 285 additions and 2 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ Here is a list of the rules supported by this Flake8 plugin:
* `ROU100` - Triple double quotes not used for docstring
* `ROU101` - Import from a tests directory
* `ROU102` - Strings should not span multiple lines except comments or docstrings
* `ROU104` - Multiple blank lines are not allowed after a non-section comment

## Testing

Expand Down
104 changes: 102 additions & 2 deletions flake8_routable.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,55 @@
import ast
import importlib.metadata as importlib_metadata
import tokenize
from dataclasses import dataclass
from itertools import chain
from typing import Generator, List, Tuple, Type


DOCSTRING_STMT_TYPES = (
CLASS_AND_FUNC_TOKENS = (
"class",
"def",
)

MAX_BLANK_LINES_AFTER_COMMENT = 2
SECTION_COMMENT_START = "# --"

# Note: The rule should be what is wrong, not how to fix it
ROU100 = "ROU100 Triple double quotes not used for docstring"
ROU101 = "ROU101 Import from a tests directory"
ROU102 = "ROU102 Strings should not span multiple lines except comments or docstrings"
ROU104 = "ROU104 Multiple blank lines are not allowed after a non-section comment"


@dataclass
class BlankLinesAfterCommentConditions:
# Comment that is a section comment
section_comment: bool = True

# New line after comment
nl1_after_comment: bool = False

# Another new line after comment
nl2_after_comment: bool = False

# Another new line after comment
nl3_after_comment: bool = False

# A dedent
dedent: bool = False

# A class/function statement or statement decorator after dedent
stmt_or_decorator: bool = True

def is_all_passed(self):
return (
not self.section_comment
and self.nl1_after_comment
and self.nl2_after_comment
and self.nl3_after_comment
and self.dedent
and not self.stmt_or_decorator
)


class Visitor(ast.NodeVisitor):
Expand All @@ -39,9 +75,73 @@ def vist(self, file_tokens: List[tokenize.TokenInfo]) -> None:
self._file_tokens = file_tokens

# run methods that generate errors using file tokens
self.lines_with_blank_lines_after_comments()
self.lines_with_invalid_docstrings()
self.lines_with_invalid_multi_line_strings()

def lines_with_blank_lines_after_comments(self) -> None:
"""
Comments should not have more than one blank line after them.
The exception to this rule is if a comment is a section comment like so:
# -----------------
# Section Comment
# -----------------
"""
# A bit array representing all the conditions it takes for an error to be found
# (see inline comments below on conditions)
conditions = BlankLinesAfterCommentConditions()

for i, (token_type, token_str, start_indices, _, _) in enumerate(self._file_tokens):
do_reset_conditions = False

# Dedenting in progress
if conditions.dedent and conditions.stmt_or_decorator and token_type == tokenize.DEDENT:
continue
# Condition 6: Not a class/function statement or statement decorator after dedent
elif (
conditions.dedent
and not (token_type == tokenize.NAME and token_str in CLASS_AND_FUNC_TOKENS)
and not (token_type == tokenize.OP and token_str == "@")
):
conditions.stmt_or_decorator = False
elif conditions.nl3_after_comment and not conditions.dedent:
# Condition 5a: A dedent
if token_type == tokenize.DEDENT:
conditions.dedent = True
# Condition 5b: Not a dedent, this meets enough conditions to be an error
else:
conditions.dedent = True
conditions.stmt_or_decorator = False

# we want to use previous start_indices where the double new-line was found
start_indices = self._file_tokens[i - 1][2]
# Condition 4: Another new line after comment
elif conditions.nl2_after_comment and not conditions.nl3_after_comment and token_type == tokenize.NL:
conditions.nl3_after_comment = True
# Condition 3: Another new line after comment
elif conditions.nl1_after_comment and not conditions.nl2_after_comment and token_type == tokenize.NL:
conditions.nl2_after_comment = True
# Condition 2: New line after comment
elif not conditions.section_comment and not conditions.nl1_after_comment and token_type == tokenize.NL:
conditions.nl1_after_comment = True
# Condition 1: Comment that is not a section comment
elif (
conditions.section_comment
and token_type == tokenize.COMMENT
and not token_str.startswith(SECTION_COMMENT_START)
):
conditions.section_comment = False
else:
do_reset_conditions = True

if conditions.is_all_passed():
do_reset_conditions = True
self.errors.append((*start_indices, ROU104))

if do_reset_conditions:
conditions = BlankLinesAfterCommentConditions()

def lines_with_invalid_multi_line_strings(self) -> None:
"""
Multi-line strings should be single-quoted strings concatenated across multiple lines,
Expand Down Expand Up @@ -111,7 +211,7 @@ def lines_with_invalid_docstrings(self) -> None:
):
self.errors.append((*start_indices, ROU100))
# encountered a statement declaration, save its line number
elif token_type == tokenize.NAME and token_str in DOCSTRING_STMT_TYPES:
elif token_type == tokenize.NAME and token_str in CLASS_AND_FUNC_TOKENS:
is_inside_stmt = True
# encountered the end of a statement declaration, save the line number
elif token_type == tokenize.OP and is_inside_stmt and token_str == ":":
Expand Down
Empty file added helpers/__init__.py
Empty file.
144 changes: 144 additions & 0 deletions helpers/tokens.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
# Python imports
from token import (
AMPER,
AMPEREQUAL,
ASYNC,
AT,
ATEQUAL,
AWAIT,
CIRCUMFLEX,
CIRCUMFLEXEQUAL,
COLON,
COLONEQUAL,
COMMA,
COMMENT,
DEDENT,
DOT,
DOUBLESLASH,
DOUBLESLASHEQUAL,
DOUBLESTAR,
DOUBLESTAREQUAL,
ELLIPSIS,
ENCODING,
ENDMARKER,
EQEQUAL,
EQUAL,
ERRORTOKEN,
GREATER,
GREATEREQUAL,
INDENT,
LBRACE,
LEFTSHIFT,
LEFTSHIFTEQUAL,
LESS,
LESSEQUAL,
LPAR,
LSQB,
MINEQUAL,
MINUS,
NAME,
NEWLINE,
NL,
NOTEQUAL,
NT_OFFSET,
NUMBER,
N_TOKENS,
OP,
PERCENT,
PERCENTEQUAL,
PLUS,
PLUSEQUAL,
RARROW,
RBRACE,
RIGHTSHIFT,
RIGHTSHIFTEQUAL,
RPAR,
RSQB,
SEMI,
SLASH,
SLASHEQUAL,
STAR,
STAREQUAL,
STRING,
TILDE,
TYPE_COMMENT,
TYPE_IGNORE,
VBAR,
VBAREQUAL,
)


"""
This lookup is helpful when you are debugging code that is doing lexical analysis.
For example:
token = token.COMMENT
print(f"This token is a {TOKEN_REPR[token]}")
"""
TOKEN_REPR = {
AMPER: "AMPER",
AMPEREQUAL: "AMPEREQUAL",
ASYNC: "ASYNC",
AT: "AT",
ATEQUAL: "ATEQUAL",
AWAIT: "AWAIT",
CIRCUMFLEX: "CIRCUMFLEX",
CIRCUMFLEXEQUAL: "CIRCUMFLEXEQUAL",
COLON: "COLON",
COLONEQUAL: "COLONEQUAL",
COMMA: "COMMA",
COMMENT: "COMMENT",
DEDENT: "DEDENT",
DOT: "DOT",
DOUBLESLASH: "DOUBLESLASH",
DOUBLESLASHEQUAL: "DOUBLESLASHEQUAL",
DOUBLESTAR: "DOUBLESTAR",
DOUBLESTAREQUAL: "DOUBLESTAREQUAL",
ELLIPSIS: "ELLIPSIS",
ENCODING: "ENCODING",
ENDMARKER: "ENDMARKER",
EQEQUAL: "EQEQUAL",
EQUAL: "EQUAL",
ERRORTOKEN: "ERRORTOKEN",
GREATER: "GREATER",
GREATEREQUAL: "GREATEREQUAL",
INDENT: "INDENT",
LBRACE: "LBRACE",
LEFTSHIFT: "LEFTSHIFT",
LEFTSHIFTEQUAL: "LEFTSHIFTEQUAL",
LESS: "LESS",
LESSEQUAL: "LESSEQUAL",
LPAR: "LPAR",
LSQB: "LSQB",
MINEQUAL: "MINEQUAL",
MINUS: "MINUS",
NAME: "NAME",
NEWLINE: "NEWLINE",
NL: "NL",
NOTEQUAL: "NOTEQUAL",
NT_OFFSET: "NT_OFFSET",
NUMBER: "NUMBER",
N_TOKENS: "N_TOKENS",
OP: "OP",
PERCENT: "PERCENT",
PERCENTEQUAL: "PERCENTEQUAL",
PLUS: "PLUS",
PLUSEQUAL: "PLUSEQUAL",
RARROW: "RARROW",
RBRACE: "RBRACE",
RIGHTSHIFT: "RIGHTSHIFT",
RIGHTSHIFTEQUAL: "RIGHTSHIFTEQUAL",
RPAR: "RPAR",
RSQB: "RSQB",
SEMI: "SEMI",
SLASH: "SLASH",
SLASHEQUAL: "SLASHEQUAL",
STAR: "STAR",
STAREQUAL: "STAREQUAL",
STRING: "STRING",
TILDE: "TILDE",
TYPE_COMMENT: "TYPE_COMMENT",
TYPE_IGNORE: "TYPE_IGNORE",
VBAR: "VBAR",
VBAREQUAL: "VBAREQUAL",
}
38 changes: 38 additions & 0 deletions tests/test_flake8_routable.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,3 +187,41 @@ def test_correct_multi_line_string(self, multi_line_string):
def test_incorrect_multi_line_string(self):
errors = results(self.INVALID_MULTI_LINE_STRING)
assert errors == {"3:11: ROU102 Strings should not span multiple lines except comments or docstrings"}


class TestROU104:
BLANK_LINE_AFTER_COMMENT = "# Setup\n\nUser = get_user_model()\n"
BLANK_LINES_AFTER_SECTION = "# -------\n# Tests\n# -------\n\n\nX = 4"
BLANK_LINES_BEFORE_DEDENT_STATEMENT = (
"class FeatureFlagModelSerializer(serializers.ModelSerializer):\n"
' """FeatureFlag model serializer."""\n\n'
" class Meta:\n"
" model = FeatureFlag\n"
' fields = ["feature_flag"]\n\n'
" # Special method overrides\n\n"
" # Private methods\n\n"
" # Class methods\n\n"
" # Properties\n\n"
" # Overrides\n\n"
" # Validation\n\n"
" # Methods\n\n\n"
"@spicy_decorator\n"
"class FeatureSettingSerializer(NoCreateUpdateMixin, serializers.Serializer):\n"
" pass\n"
)

def test_incorrect_blank_lines_after_comment(self):
errors = results("# Setup\n\n\nUser = get_user_model()\n")
assert errors == {"3:0: ROU104 Multiple blank lines are not allowed after a non-section comment"}

@pytest.mark.parametrize(
"blank_lines_string",
(
BLANK_LINE_AFTER_COMMENT,
BLANK_LINES_AFTER_SECTION,
BLANK_LINES_BEFORE_DEDENT_STATEMENT,
),
)
def test_correct_blank_lines(self, blank_lines_string):
errors = results(blank_lines_string)
assert errors == set()

0 comments on commit 503af19

Please sign in to comment.