Skip to content

Commit bdb3c4f

Browse files
committed
Initial commit of a pygments plugin for Fluent formatting.
1 parent 267e2e0 commit bdb3c4f

File tree

10 files changed

+204
-1
lines changed

10 files changed

+204
-1
lines changed

README.md

+6-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ you're a tool author you may be interested in the formal [EBNF grammar][].
3636
Installation
3737
------------
3838

39-
python-fluent consists of two packages:
39+
python-fluent consists of these packages:
4040

4141
* `fluent.syntax` - includes AST classes and parser. Most end users will not
4242
need this directly. Documentation coming soon!
@@ -54,6 +54,11 @@ python-fluent consists of two packages:
5454

5555
(The correct version of ``fluent.syntax`` will be installed automatically)
5656

57+
* `fluent.pygments` - a plugin for pygments to add syntax highlighting to Sphinx.
58+
59+
To install:
60+
61+
pip install fluent.pygments
5762

5863
PyPI also contains an old `fluent` package which is an older version of just
5964
`fluent.syntax`.

fluent.pygments/fluent/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

fluent.pygments/fluent/pygments/__init__.py

Whitespace-only changes.
+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from __future__ import absolute_import, print_function, unicode_literals
2+
3+
import argparse
4+
import sys
5+
6+
from pygments import highlight
7+
from pygments.formatters import Terminal256Formatter
8+
from fluent.pygments.lexer import FluentLexer
9+
10+
11+
def main():
12+
parser = argparse.ArgumentParser()
13+
parser.add_argument('path')
14+
args = parser.parse_args()
15+
with open(args.path) as fh:
16+
code = fh.read()
17+
highlight(code, FluentLexer(), Terminal256Formatter(), sys.stdout)
18+
19+
20+
if __name__ == '__main__':
21+
main()
+91
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
from __future__ import absolute_import, print_function, unicode_literals
2+
3+
from fluent.syntax import ast as FTL
4+
from fluent.syntax import parse
5+
6+
from pygments.lexer import Lexer
7+
from pygments.token import Token
8+
9+
10+
class FluentLexer(Lexer):
11+
name = 'Fluent Lexer'
12+
aliases = ['fluent', 'ftl']
13+
filenames = ['*.ftl']
14+
15+
def get_tokens_unprocessed(self, text):
16+
last_end = 0
17+
tokenizer = Tokenizer(text)
18+
for token in tokenizer.tokenize():
19+
node, start, token, span = token
20+
if start > last_end:
21+
yield last_end, Token.Punctuation, text[last_end:start]
22+
last_end = node.span.end
23+
yield start, token, span
24+
if last_end < len(text):
25+
yield last_end, Token.Punctuation, text[last_end:]
26+
27+
28+
ATOMIC = {
29+
'Comment': Token.Comment.Multiline,
30+
'GroupComment': Token.Comment.Multiline,
31+
'ResourceComment': Token.Comment.Multiline,
32+
'Identifier': Token.Name.Constant,
33+
'TextElement': Token.Literal,
34+
'NumberLiteral': Token.Literal.Number,
35+
'StringLiteral': Token.Literal.String,
36+
'VariableReference': Token.Name.Variable,
37+
'Junk': Token.Generic.Error,
38+
}
39+
40+
41+
class Tokenizer(object):
42+
def __init__(self, text):
43+
self.text = text
44+
self.ast = parse(text)
45+
46+
def tokenize(self, node=None):
47+
if node is None:
48+
node = self.ast
49+
if isinstance(node, (FTL.Annotation, FTL.Span)):
50+
return
51+
if isinstance(node, FTL.SyntaxNode):
52+
for token in self.tokenize_node(node):
53+
yield token
54+
elif isinstance(node, list):
55+
for child in node:
56+
for token in self.tokenize(child):
57+
yield token
58+
59+
def tokenize_node(self, node):
60+
nodename = type(node).__name__
61+
if nodename in ATOMIC:
62+
yield self._token(node, ATOMIC[nodename])
63+
else:
64+
tokenize = getattr(self, 'tokenize_{}'.format(nodename), self.generic_tokenize)
65+
for token in tokenize(node):
66+
yield token
67+
68+
def generic_tokenize(self, node):
69+
children = [
70+
child for child in vars(node).values()
71+
if isinstance(child, (FTL.SyntaxNode, list)) and child != []
72+
]
73+
children.sort(
74+
key=lambda child: child.span.start if isinstance(child, FTL.SyntaxNode) else child[0].span.start
75+
)
76+
for child in children:
77+
for token in self.tokenize(child):
78+
yield token
79+
80+
def tokenize_Variant(self, node):
81+
yield self._token(node.key, Token.Name.Attribute)
82+
for token in self.tokenize(node.value):
83+
yield token
84+
85+
def _token(self, node, token):
86+
return (
87+
node,
88+
node.span.start,
89+
token,
90+
self.text[node.span.start:node.span.end],
91+
)

fluent.pygments/setup.cfg

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
[bdist_wheel]
2+
universal=1
3+
4+
[flake8]
5+
exclude=.tox
6+
max-line-length=120
7+
8+
[isort]
9+
line_length=120
10+
skip_glob=.tox
11+
not_skip=__init__.py
12+
13+
[options]
14+
install_requires =
15+
pygments
16+
fluent.syntax
17+
six
18+
19+
[options.entry_points]
20+
pygments.lexers =
21+
fluent=fluent.pygments.lexer:FluentLexer

fluent.pygments/setup.py

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#!/usr/bin/env python
2+
from setuptools import setup
3+
4+
setup(name='fluent.pygments',
5+
version='0.1.0',
6+
description='Pygments lexer for Fluent.',
7+
long_description='See https://github.com/projectfluent/python-fluent/ for more info.',
8+
author='Mozilla',
9+
author_email='[email protected]',
10+
license='APL 2',
11+
url='https://github.com/projectfluent/python-fluent',
12+
keywords=['fluent', 'pygments'],
13+
classifiers=[
14+
'Development Status :: 3 - Alpha',
15+
'Intended Audience :: Developers',
16+
'License :: OSI Approved :: Apache Software License',
17+
'Programming Language :: Python :: 2.7',
18+
'Programming Language :: Python :: 3.5',
19+
],
20+
packages=['fluent', 'fluent.pygments'],
21+
tests_require=['six'],
22+
test_suite='tests.pygments'
23+
)

fluent.pygments/tests/__init__.py

Whitespace-only changes.

fluent.pygments/tests/pygments/__init__.py

Whitespace-only changes.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from __future__ import absolute_import, print_function, unicode_literals
2+
3+
import unittest
4+
from pygments.token import Token
5+
6+
from fluent.pygments.lexer import FluentLexer
7+
8+
9+
class LexerTest(unittest.TestCase):
10+
def setUp(self):
11+
self.lexer = FluentLexer()
12+
13+
def test_comment(self):
14+
fragment = '# comment\n'
15+
tokens = [
16+
(Token.Comment.Multiline, '# comment'),
17+
(Token.Punctuation, '\n'),
18+
]
19+
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
20+
21+
def test_message(self):
22+
fragment = 'msg = some value\n'
23+
tokens = [
24+
(Token.Name.Constant, 'msg'),
25+
(Token.Punctuation, ' = '),
26+
(Token.Literal, 'some value'),
27+
(Token.Punctuation, '\n'),
28+
]
29+
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
30+
31+
def test_message_with_comment(self):
32+
fragment = '# good comment\nmsg = some value\n'
33+
tokens = [
34+
(Token.Comment.Multiline, '# good comment'),
35+
(Token.Punctuation, '\n'),
36+
(Token.Name.Constant, 'msg'),
37+
(Token.Punctuation, ' = '),
38+
(Token.Literal, 'some value'),
39+
(Token.Punctuation, '\n'),
40+
]
41+
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))

0 commit comments

Comments
 (0)