summaryrefslogtreecommitdiff
path: root/django/utils/text.py
diff options
context:
space:
mode:
authorClaude Paroz <claude@2xlibre.net>2017-04-21 19:52:26 +0200
committerClaude Paroz <claude@2xlibre.net>2017-04-27 09:10:02 +0200
commit301de774c21d055e9e5a7073e5bffdb52bc71079 (patch)
tree4c0c65fd147d528cce920cbd6a16ffa493d832ca /django/utils/text.py
parent8ab7ce8558792f41637d6f87f2a8a117e169dd18 (diff)
Refs #27795 -- Replaced many force_text() with str()
Thanks Tim Graham for the review.
Diffstat (limited to 'django/utils/text.py')
-rw-r--r--django/utils/text.py23
1 files changed, 10 insertions, 13 deletions
diff --git a/django/utils/text.py b/django/utils/text.py
index cd9e704ea1..3e04f8bec7 100644
--- a/django/utils/text.py
+++ b/django/utils/text.py
@@ -4,7 +4,6 @@ import unicodedata
from gzip import GzipFile
from io import BytesIO
-from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
@@ -15,7 +14,7 @@ from django.utils.translation import gettext as _, gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
- return x and force_text(x)[0].upper() + force_text(x)[1:]
+ return x and str(x)[0].upper() + str(x)[1:]
# Set up regular expressions
@@ -62,7 +61,7 @@ class Truncator(SimpleLazyObject):
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
- super().__init__(lambda: force_text(text))
+ super().__init__(lambda: str(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
@@ -230,7 +229,7 @@ def get_valid_filename(s):
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
- s = force_text(s).strip().replace(' ', '_')
+ s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@@ -251,18 +250,17 @@ def get_text_list(list_, last_word=gettext_lazy('or')):
if len(list_) == 0:
return ''
if len(list_) == 1:
- return force_text(list_[0])
+ return str(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
- _(', ').join(force_text(i) for i in list_[:-1]),
- force_text(last_word), force_text(list_[-1]))
+ _(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1])
+ )
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
- text = force_text(text)
- return re_newlines.sub('\n', text)
+ return re_newlines.sub('\n', str(text))
@keep_lazy_text
@@ -349,8 +347,7 @@ def smart_split(text):
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
- text = force_text(text)
- for bit in smart_split_re.finditer(text):
+ for bit in smart_split_re.finditer(str(text)):
yield bit.group(0)
@@ -378,7 +375,7 @@ _entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
- return _entity_re.sub(_replace_entity, force_text(text))
+ return _entity_re.sub(_replace_entity, str(text))
@keep_lazy_text
@@ -409,7 +406,7 @@ def slugify(value, allow_unicode=False):
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
- value = force_text(value)
+ value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else: