summaryrefslogtreecommitdiff
path: root/django/utils/translation
diff options
context:
space:
mode:
authorClaude Paroz <claude@2xlibre.net>2018-05-10 17:51:51 +0200
committerGitHub <noreply@github.com>2018-05-10 17:51:51 +0200
commit9c4ea63e878c053600c284e32d5f32d27a59b63a (patch)
treed676a1e98179dd709086214beb3add8a6e9dcff3 /django/utils/translation
parent1e20fedb352dde5db2657cad73f998ccdb1aa607 (diff)
Replaced TOKEN_* constants by TokenType enums.
Thanks Tim Graham for the review.
Diffstat (limited to 'django/utils/translation')
-rw-r--r--django/utils/translation/template.py21
1 files changed, 9 insertions, 12 deletions
diff --git a/django/utils/translation/template.py b/django/utils/translation/template.py
index 3659fab3b6..aa849b0937 100644
--- a/django/utils/translation/template.py
+++ b/django/utils/translation/template.py
@@ -2,10 +2,7 @@ import re
import warnings
from io import StringIO
-from django.template.base import (
- TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, TOKEN_VAR, TRANSLATOR_COMMENT_MARK,
- Lexer,
-)
+from django.template.base import TRANSLATOR_COMMENT_MARK, Lexer, TokenType
from . import TranslatorCommentWarning, trim_whitespace
@@ -63,7 +60,7 @@ def templatize(src, origin=None):
for t in Lexer(src).tokenize():
if incomment:
- if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
+ if t.token_type == TokenType.BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
@@ -79,7 +76,7 @@ def templatize(src, origin=None):
else:
comment.append(t.contents)
elif intrans:
- if t.token_type == TOKEN_BLOCK:
+ if t.token_type == TokenType.BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
@@ -130,12 +127,12 @@ def templatize(src, origin=None):
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
- elif t.token_type == TOKEN_VAR:
+ elif t.token_type == TokenType.VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
- elif t.token_type == TOKEN_TEXT:
+ elif t.token_type == TokenType.TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
@@ -147,7 +144,7 @@ def templatize(src, origin=None):
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
- if t.token_type != TOKEN_COMMENT:
+ if t.token_type != TokenType.COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
@@ -163,7 +160,7 @@ def templatize(src, origin=None):
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
- if t.token_type == TOKEN_BLOCK:
+ if t.token_type == TokenType.BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
@@ -211,7 +208,7 @@ def templatize(src, origin=None):
incomment = True
else:
out.write(blankout(t.contents, 'B'))
- elif t.token_type == TOKEN_VAR:
+ elif t.token_type == TokenType.VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
@@ -221,7 +218,7 @@ def templatize(src, origin=None):
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
- elif t.token_type == TOKEN_COMMENT:
+ elif t.token_type == TokenType.COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
comment_lineno_cache = t.lineno