https://github.com/python/cpython/commit/4b5d3e0e721a952f4ac9d17bee331e6dfe543dcd
commit: 4b5d3e0e721a952f4ac9d17bee331e6dfe543dcd
branch: main
author: Lysandros Nikolaou <[email protected]>
committer: lysnikolaou <[email protected]>
date: 2024-06-12T20:52:55+02:00
summary:
gh-120343: Fix column offsets of multiline tokens in tokenize (#120391)
files:
M Lib/test/test_tokenize.py
M Python/Python-tokenize.c
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 36dba71766cc20..51aeb35f01065a 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1210,6 +1210,20 @@ def test_multiline_non_ascii_fstring(self):
FSTRING_END "\'\'\'" (2, 68) (2, 71)
""")
+ def test_multiline_non_ascii_fstring_with_expr(self):
+ self.check_tokenize("""\
+f'''
+ š This is a test {test_arg1}š
+š'''""", """\
+ FSTRING_START "f\'\'\'" (1, 0) (1, 4)
+ FSTRING_MIDDLE '\\n š This is a test ' (1, 4) (2, 21)
+ OP '{' (2, 21) (2, 22)
+ NAME 'test_arg1' (2, 22) (2, 31)
+ OP '}' (2, 31) (2, 32)
+ FSTRING_MIDDLE 'š\\nš' (2, 32) (3, 1)
+ FSTRING_END "\'\'\'" (3, 1) (3, 4)
+ """)
+
class GenerateTokensTest(TokenizeTest):
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index 2591dae35736ba..55c821754c2031 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -215,6 +215,7 @@ tokenizeriter_next(tokenizeriterobject *it)
const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start :
it->tok->line_start;
PyObject* line = NULL;
+ int line_changed = 1;
if (it->tok->tok_extra_tokens && is_trailing_token) {
line = PyUnicode_FromString("");
} else {
@@ -229,12 +230,11 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_XDECREF(it->last_line);
line = PyUnicode_DecodeUTF8(line_start, size, "replace");
it->last_line = line;
- if (it->tok->lineno != it->last_end_lineno) {
- it->byte_col_offset_diff = 0;
- }
+ it->byte_col_offset_diff = 0;
} else {
// Line hasn't changed so we reuse the cached one.
line = it->last_line;
+ line_changed = 0;
}
}
if (line == NULL) {
@@ -252,7 +252,13 @@ tokenizeriter_next(tokenizeriterobject *it)
Py_ssize_t byte_offset = -1;
if (token.start != NULL && token.start >= line_start) {
byte_offset = token.start - line_start;
- col_offset = byte_offset - it->byte_col_offset_diff;
+ if (line_changed) {
+ col_offset = _PyPegen_byte_offset_to_character_offset_line(line,
0, byte_offset);
+ it->byte_col_offset_diff = byte_offset - col_offset;
+ }
+ else {
+ col_offset = byte_offset - it->byte_col_offset_diff;
+ }
}
if (token.end != NULL && token.end >= it->tok->line_start) {
Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]