Guido van Rossum wrote:
Thanks for the patches!  Applied, except for the change to
tokenize.py; instead, I changed test_tokenize.py to use io.StringIO.

--Guido

Glad to have the opportunity to help make the future happen. ;-)


This next one converts unicode literals in tokenize.py and it's tests to byte literals. I've also fixed some more unicode literals in a few other places I found.

By doing this first it will make the no raw escape patches not include any thing else.

Cheers,
   Ron


M      Lib/tokenize.py
M      Lib/test/tokenize_tests.txt
M      Lib/test/output/test_tokenize
- Removed unicode literals from test results and tokenize.py. And make it pass again.


M      Lib/test/output/test_pep277
- Removed unicode literals from test results. This is a windows only test, so I can't test it.

M      Lib/test/test_codeccallbacks.py
M      Objects/exceptions.c
- Remove unicode literals from test_codeccallbacks.py and removed unicode litteral quoting from exceptions.c to make it pass again.

M      Lib/test/test_codecs.py
M      Lib/test/test_doctest.py
M      Lib/test/re_tests.py
- Removed some literals from comments.

Index: Objects/exceptions.c
===================================================================
--- Objects/exceptions.c	(revision 56032)
+++ Objects/exceptions.c	(working copy)
@@ -1289,11 +1289,11 @@
         int badchar = (int)PyUnicode_AS_UNICODE(uself->object)[uself->start];
         const char *fmt;
         if (badchar <= 0xff)
-            fmt = "'%U' codec can't encode character u'\\x%02x' in position %zd: %U";
+            fmt = "'%U' codec can't encode character '\\x%02x' in position %zd: %U";
         else if (badchar <= 0xffff)
-            fmt = "'%U' codec can't encode character u'\\u%04x' in position %zd: %U";
+            fmt = "'%U' codec can't encode character '\\u%04x' in position %zd: %U";
         else
-            fmt = "'%U' codec can't encode character u'\\U%08x' in position %zd: %U";
+            fmt = "'%U' codec can't encode character '\\U%08x' in position %zd: %U";
         return PyUnicode_FromFormat(
             fmt,
             ((PyUnicodeErrorObject *)self)->encoding,
@@ -1440,11 +1440,11 @@
         int badchar = (int)PyUnicode_AS_UNICODE(uself->object)[uself->start];
         const char *fmt;
         if (badchar <= 0xff)
-            fmt = "can't translate character u'\\x%02x' in position %zd: %U";
+            fmt = "can't translate character '\\x%02x' in position %zd: %U";
         else if (badchar <= 0xffff)
-            fmt = "can't translate character u'\\u%04x' in position %zd: %U";
+            fmt = "can't translate character '\\u%04x' in position %zd: %U";
         else
-            fmt = "can't translate character u'\\U%08x' in position %zd: %U";
+            fmt = "can't translate character '\\U%08x' in position %zd: %U";
         return PyUnicode_FromFormat(
             fmt,
             badchar,
Index: Lib/tokenize.py
===================================================================
--- Lib/tokenize.py	(revision 56032)
+++ Lib/tokenize.py	(working copy)
@@ -69,10 +69,10 @@
 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
 # Tail end of """ string.
 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
+Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
 # Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
-               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
 
 # Because of leftmost-then-longest match semantics, be sure to put the
 # longest operators first (e.g., if = came before ==, == would get
@@ -90,9 +90,9 @@
 Token = Ignore + PlainToken
 
 # First (or only) line of ' or " string.
-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                 group("'", r'\\\r?\n'),
-                r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                 group('"', r'\\\r?\n'))
 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
@@ -102,28 +102,28 @@
 endprogs = {"'": re.compile(Single), '"': re.compile(Double),
             "'''": single3prog, '"""': double3prog,
             "r'''": single3prog, 'r"""': double3prog,
-            "u'''": single3prog, 'u"""': double3prog,
-            "ur'''": single3prog, 'ur"""': double3prog,
+            "b'''": single3prog, 'b"""': double3prog,
+            "br'''": single3prog, 'br"""': double3prog,
             "R'''": single3prog, 'R"""': double3prog,
-            "U'''": single3prog, 'U"""': double3prog,
-            "uR'''": single3prog, 'uR"""': double3prog,
-            "Ur'''": single3prog, 'Ur"""': double3prog,
-            "UR'''": single3prog, 'UR"""': double3prog,
-            'r': None, 'R': None, 'u': None, 'U': None}
+            "B'''": single3prog, 'B"""': double3prog,
+            "bR'''": single3prog, 'bR"""': double3prog,
+            "Br'''": single3prog, 'Br"""': double3prog,
+            "BR'''": single3prog, 'BR"""': double3prog,
+            'r': None, 'R': None, 'b': None, 'B': None}
 
 triple_quoted = {}
 for t in ("'''", '"""',
           "r'''", 'r"""', "R'''", 'R"""',
-          "u'''", 'u"""', "U'''", 'U"""',
-          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
-          "uR'''", 'uR"""', "UR'''", 'UR"""'):
+          "b'''", 'b"""', "B'''", 'B"""',
+          "br'''", 'br"""', "Br'''", 'Br"""',
+          "bR'''", 'bR"""', "BR'''", 'BR"""'):
     triple_quoted[t] = t
 single_quoted = {}
 for t in ("'", '"',
           "r'", 'r"', "R'", 'R"',
-          "u'", 'u"', "U'", 'U"',
-          "ur'", 'ur"', "Ur'", 'Ur"',
-          "uR'", 'uR"', "UR'", 'UR"' ):
+          "b'", 'b"', "B'", 'B"',
+          "br'", 'br"', "Br'", 'Br"',
+          "bR'", 'bR"', "BR'", 'BR"' ):
     single_quoted[t] = t
 
 tabsize = 8
Index: Lib/test/test_codecs.py
===================================================================
--- Lib/test/test_codecs.py	(revision 56032)
+++ Lib/test/test_codecs.py	(working copy)
@@ -459,10 +459,10 @@
                 "",
                 "\ufeff", # Second BOM has been read and emitted
                 "\ufeff\x00", # "\x00" read and emitted
-                "\ufeff\x00", # First byte of encoded u"\xff" read
-                "\ufeff\x00\xff", # Second byte of encoded u"\xff" read
-                "\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
-                "\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
+                "\ufeff\x00", # First byte of encoded "\xff" read
+                "\ufeff\x00\xff", # Second byte of encoded "\xff" read
+                "\ufeff\x00\xff", # First byte of encoded "\u07ff" read
+                "\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
                 "\ufeff\x00\xff\u07ff",
                 "\ufeff\x00\xff\u07ff",
                 "\ufeff\x00\xff\u07ff\u0800",
Index: Lib/test/tokenize_tests.txt
===================================================================
--- Lib/test/tokenize_tests.txt	(revision 56032)
+++ Lib/test/tokenize_tests.txt	(working copy)
@@ -110,19 +110,19 @@
 bar \\ baz
 """ + R'''spam
 '''
-x = u'abc' + U'ABC'
-y = u"abc" + U"ABC"
-x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'
-y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"
-x = ur'\\' + UR'\\'
-x = ur'\'' + ''
-y = ur'''
+x = b'abc' + B'ABC'
+y = b"abc" + B"ABC"
+x = br'abc' + Br'ABC' + bR'ABC' + BR'ABC'
+y = br"abc" + Br"ABC" + bR"ABC" + BR"ABC"
+x = br'\\' + BR'\\'
+x = br'\'' + ''
+y = br'''
 foo bar \\
-baz''' + UR'''
+baz''' + BR'''
 foo'''
-y = Ur"""foo
+y = Br"""foo
 bar \\ baz
-""" + uR'''spam
+""" + bR'''spam
 '''
 
 # Indentation
Index: Lib/test/output/test_pep277
===================================================================
--- Lib/test/output/test_pep277	(revision 56032)
+++ Lib/test/output/test_pep277	(working copy)
@@ -1,3 +1,3 @@
 test_pep277
-u'\xdf-\u66e8\u66e9\u66eb'
-[u'Gr\xfc\xdf-Gott', u'abc', u'ascii', u'\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2', u'\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435', u'\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1', u'\u306b\u307d\u3093', u'\u66e8\u05e9\u3093\u0434\u0393\xdf', u'\u66e8\u66e9\u66eb']
+'\xdf-\u66e8\u66e9\u66eb'
+['Gr\xfc\xdf-Gott', 'abc', 'ascii', '\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2', '\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435', '\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1', '\u306b\u307d\u3093', '\u66e8\u05e9\u3093\u0434\u0393\xdf', '\u66e8\u66e9\u66eb']
Index: Lib/test/output/test_tokenize
===================================================================
--- Lib/test/output/test_tokenize	(revision 56032)
+++ Lib/test/output/test_tokenize	(working copy)
@@ -342,59 +342,59 @@
 112,3-112,4:	NEWLINE	'\n'
 113,0-113,1:	NAME	'x'
 113,2-113,3:	OP	'='
-113,4-113,10:	STRING	"u'abc'"
+113,4-113,10:	STRING	"b'abc'"
 113,11-113,12:	OP	'+'
-113,13-113,19:	STRING	"U'ABC'"
+113,13-113,19:	STRING	"B'ABC'"
 113,19-113,20:	NEWLINE	'\n'
 114,0-114,1:	NAME	'y'
 114,2-114,3:	OP	'='
-114,4-114,10:	STRING	'u"abc"'
+114,4-114,10:	STRING	'b"abc"'
 114,11-114,12:	OP	'+'
-114,13-114,19:	STRING	'U"ABC"'
+114,13-114,19:	STRING	'B"ABC"'
 114,19-114,20:	NEWLINE	'\n'
 115,0-115,1:	NAME	'x'
 115,2-115,3:	OP	'='
-115,4-115,11:	STRING	"ur'abc'"
+115,4-115,11:	STRING	"br'abc'"
 115,12-115,13:	OP	'+'
-115,14-115,21:	STRING	"Ur'ABC'"
+115,14-115,21:	STRING	"Br'ABC'"
 115,22-115,23:	OP	'+'
-115,24-115,31:	STRING	"uR'ABC'"
+115,24-115,31:	STRING	"bR'ABC'"
 115,32-115,33:	OP	'+'
-115,34-115,41:	STRING	"UR'ABC'"
+115,34-115,41:	STRING	"BR'ABC'"
 115,41-115,42:	NEWLINE	'\n'
 116,0-116,1:	NAME	'y'
 116,2-116,3:	OP	'='
-116,4-116,11:	STRING	'ur"abc"'
+116,4-116,11:	STRING	'br"abc"'
 116,12-116,13:	OP	'+'
-116,14-116,21:	STRING	'Ur"ABC"'
+116,14-116,21:	STRING	'Br"ABC"'
 116,22-116,23:	OP	'+'
-116,24-116,31:	STRING	'uR"ABC"'
+116,24-116,31:	STRING	'bR"ABC"'
 116,32-116,33:	OP	'+'
-116,34-116,41:	STRING	'UR"ABC"'
+116,34-116,41:	STRING	'BR"ABC"'
 116,41-116,42:	NEWLINE	'\n'
 117,0-117,1:	NAME	'x'
 117,2-117,3:	OP	'='
-117,4-117,10:	STRING	"ur'\\\\'"
+117,4-117,10:	STRING	"br'\\\\'"
 117,11-117,12:	OP	'+'
-117,13-117,19:	STRING	"UR'\\\\'"
+117,13-117,19:	STRING	"BR'\\\\'"
 117,19-117,20:	NEWLINE	'\n'
 118,0-118,1:	NAME	'x'
 118,2-118,3:	OP	'='
-118,4-118,10:	STRING	"ur'\\''"
+118,4-118,10:	STRING	"br'\\''"
 118,11-118,12:	OP	'+'
 118,13-118,15:	STRING	"''"
 118,15-118,16:	NEWLINE	'\n'
 119,0-119,1:	NAME	'y'
 119,2-119,3:	OP	'='
-119,4-121,6:	STRING	"ur'''\nfoo bar \\\\\nbaz'''"
+119,4-121,6:	STRING	"br'''\nfoo bar \\\\\nbaz'''"
 121,7-121,8:	OP	'+'
-121,9-122,6:	STRING	"UR'''\nfoo'''"
+121,9-122,6:	STRING	"BR'''\nfoo'''"
 122,6-122,7:	NEWLINE	'\n'
 123,0-123,1:	NAME	'y'
 123,2-123,3:	OP	'='
-123,4-125,3:	STRING	'Ur"""foo\nbar \\\\ baz\n"""'
+123,4-125,3:	STRING	'Br"""foo\nbar \\\\ baz\n"""'
 125,4-125,5:	OP	'+'
-125,6-126,3:	STRING	"uR'''spam\n'''"
+125,6-126,3:	STRING	"bR'''spam\n'''"
 126,3-126,4:	NEWLINE	'\n'
 127,0-127,1:	NL	'\n'
 128,0-128,13:	COMMENT	'# Indentation'
Index: Lib/test/test_doctest.py
===================================================================
--- Lib/test/test_doctest.py	(revision 56032)
+++ Lib/test/test_doctest.py	(working copy)
@@ -2249,11 +2249,11 @@
     **********************************************************************
     File "...", line 7, in test_doctest4.txt
     Failed example:
-        u'...'
+        '...'
     Expected:
-        u'f\xf6\xf6'
+        'f\xf6\xf6'
     Got:
-        u'f\xc3\xb6\xc3\xb6'
+        'f\xc3\xb6\xc3\xb6'
     **********************************************************************
     ...
     **********************************************************************
Index: Lib/test/test_codeccallbacks.py
===================================================================
--- Lib/test/test_codeccallbacks.py	(revision 56032)
+++ Lib/test/test_codeccallbacks.py	(working copy)
@@ -329,7 +329,7 @@
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
             ["ascii", "g\xfcrk", 1, 2, "ouch"],
-            "'ascii' codec can't encode character u'\\xfc' in position 1: ouch"
+            "'ascii' codec can't encode character '\\xfc' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
@@ -339,23 +339,23 @@
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
             ["ascii", "\xfcx", 0, 1, "ouch"],
-            "'ascii' codec can't encode character u'\\xfc' in position 0: ouch"
+            "'ascii' codec can't encode character '\\xfc' in position 0: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
             ["ascii", "\u0100x", 0, 1, "ouch"],
-            "'ascii' codec can't encode character u'\\u0100' in position 0: ouch"
+            "'ascii' codec can't encode character '\\u0100' in position 0: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeEncodeError,
             ["ascii", "\uffffx", 0, 1, "ouch"],
-            "'ascii' codec can't encode character u'\\uffff' in position 0: ouch"
+            "'ascii' codec can't encode character '\\uffff' in position 0: ouch"
         )
         if sys.maxunicode > 0xffff:
             self.check_exceptionobjectargs(
                 UnicodeEncodeError,
                 ["ascii", "\U00010000x", 0, 1, "ouch"],
-                "'ascii' codec can't encode character u'\\U00010000' in position 0: ouch"
+                "'ascii' codec can't encode character '\\U00010000' in position 0: ouch"
             )
 
     def test_unicodedecodeerror(self):
@@ -374,23 +374,23 @@
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
             ["g\xfcrk", 1, 2, "ouch"],
-            "can't translate character u'\\xfc' in position 1: ouch"
+            "can't translate character '\\xfc' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
             ["g\u0100rk", 1, 2, "ouch"],
-            "can't translate character u'\\u0100' in position 1: ouch"
+            "can't translate character '\\u0100' in position 1: ouch"
         )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
             ["g\uffffrk", 1, 2, "ouch"],
-            "can't translate character u'\\uffff' in position 1: ouch"
+            "can't translate character '\\uffff' in position 1: ouch"
         )
         if sys.maxunicode > 0xffff:
             self.check_exceptionobjectargs(
                 UnicodeTranslateError,
                 ["g\U00010000rk", 1, 2, "ouch"],
-                "can't translate character u'\\U00010000' in position 1: ouch"
+                "can't translate character '\\U00010000' in position 1: ouch"
             )
         self.check_exceptionobjectargs(
             UnicodeTranslateError,
Index: Lib/test/re_tests.py
===================================================================
--- Lib/test/re_tests.py	(revision 56032)
+++ Lib/test/re_tests.py	(working copy)
@@ -662,7 +662,7 @@
 ]
 
 try:
-    u = eval("u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'")
+    u = eval("'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'")
 except SyntaxError:
     pass
 else:
_______________________________________________
Python-3000 mailing list
[email protected]
http://mail.python.org/mailman/listinfo/python-3000
Unsubscribe: 
http://mail.python.org/mailman/options/python-3000/archive%40mail-archive.com

Reply via email to