https://github.com/python/cpython/commit/f75abf8bcf1664e72ac7f031bde4cbd1349fce42
commit: f75abf8bcf1664e72ac7f031bde4cbd1349fce42
branch: 3.12
author: Miss Islington (bot) <[email protected]>
committer: lysnikolaou <[email protected]>
date: 2024-06-12T19:10:35Z
summary:

[3.12] gh-120343: Fix column offsets of multiline tokens in tokenize 
(GH-120391) (#120428)

(cherry picked from commit 4b5d3e0e721a952f4ac9d17bee331e6dfe543dcd)

Co-authored-by: Lysandros Nikolaou <[email protected]>

files:
M Lib/test/test_tokenize.py
M Python/Python-tokenize.c

diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index bfd4b94daa9a81..2dc925036855c1 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1215,6 +1215,20 @@ def test_multiline_non_ascii_fstring(self):
     FSTRING_END "\'\'\'"         (2, 68) (2, 71)
     """)
 
+    def test_multiline_non_ascii_fstring_with_expr(self):
+        self.check_tokenize("""\
+f'''
+    šŸ”— This is a test {test_arg1}šŸ”—
+šŸ”—'''""", """\
+    FSTRING_START "f\'\'\'"        (1, 0) (1, 4)
+    FSTRING_MIDDLE '\\n    šŸ”— This is a test ' (1, 4) (2, 21)
+    OP         '{'           (2, 21) (2, 22)
+    NAME       'test_arg1'   (2, 22) (2, 31)
+    OP         '}'           (2, 31) (2, 32)
+    FSTRING_MIDDLE 'šŸ”—\\nšŸ”—'        (2, 32) (3, 1)
+    FSTRING_END "\'\'\'"         (3, 1) (3, 4)
+    """)
+
 class GenerateTokensTest(TokenizeTest):
     def check_tokenize(self, s, expected):
         # Format the tokens in s in a table format.
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index ebcd9ce06ee4b4..baad836d7a3fad 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -214,6 +214,7 @@ tokenizeriter_next(tokenizeriterobject *it)
 
     const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : 
it->tok->line_start;
     PyObject* line = NULL;
+    int line_changed = 1;
     if (it->tok->tok_extra_tokens && is_trailing_token) {
         line = PyUnicode_FromString("");
     } else {
@@ -228,12 +229,11 @@ tokenizeriter_next(tokenizeriterobject *it)
             Py_XDECREF(it->last_line);
             line = PyUnicode_DecodeUTF8(line_start, size, "replace");
             it->last_line = line;
-            if (it->tok->lineno != it->last_end_lineno) {
-                it->byte_col_offset_diff = 0;
-            }
+            it->byte_col_offset_diff = 0;
         } else {
             // Line hasn't changed so we reuse the cached one.
             line = it->last_line;
+            line_changed = 0;
         }
     }
     if (line == NULL) {
@@ -251,7 +251,13 @@ tokenizeriter_next(tokenizeriterobject *it)
     Py_ssize_t byte_offset = -1;
     if (token.start != NULL && token.start >= line_start) {
         byte_offset = token.start - line_start;
-        col_offset = byte_offset - it->byte_col_offset_diff;
+        if (line_changed) {
+            col_offset = _PyPegen_byte_offset_to_character_offset_line(line, 
0, byte_offset);
+            it->byte_col_offset_diff = byte_offset - col_offset;
+        }
+        else {
+            col_offset = byte_offset - it->byte_col_offset_diff;
+        }
     }
     if (token.end != NULL && token.end >= it->tok->line_start) {
         Py_ssize_t end_byte_offset = token.end - it->tok->line_start;

_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]

Reply via email to