https://github.com/python/cpython/commit/4a0af0cfdcc0b81da5d78dc219df4985c4403f9c
commit: 4a0af0cfdcc0b81da5d78dc219df4985c4403f9c
branch: 3.12
author: Miss Islington (bot) <[email protected]>
committer: lysnikolaou <[email protected]>
date: 2024-05-28T22:49:02+02:00
summary:

[3.12] gh-119118: Fix performance regression in tokenize module (GH-119615) 
(#119683)

- Cache line object to avoid creating a Unicode object
  for all of the tokens in the same line.
- Speed up byte offset to column offset conversion by using the
  smallest buffer possible to measure the difference.

(cherry picked from commit d87b0151062e36e67f9e42e1595fba5bf23a485c)

Co-authored-by: Lysandros Nikolaou <[email protected]>
Co-authored-by: Pablo Galindo <[email protected]>

files:
A Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst
M Parser/pegen.c
M Parser/pegen.h
M Python/Python-tokenize.c

diff --git 
a/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst 
b/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst
new file mode 100644
index 00000000000000..3cf61662fe7767
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-05-28-12-15-03.gh-issue-119118.FMKz1F.rst
@@ -0,0 +1,2 @@
+Fix performance regression in the :mod:`tokenize` module by caching the 
``line``
+token attribute and calculating the column offset more efficiently.
diff --git a/Parser/pegen.c b/Parser/pegen.c
index cbceaae599d207..5460fbb2ffee14 100644
--- a/Parser/pegen.c
+++ b/Parser/pegen.c
@@ -17,6 +17,31 @@ _PyPegen_interactive_exit(Parser *p)
     return NULL;
 }
 
+Py_ssize_t
+_PyPegen_byte_offset_to_character_offset_line(PyObject *line, Py_ssize_t 
col_offset, Py_ssize_t end_col_offset)
+{
+    const char *data = PyUnicode_AsUTF8(line);
+
+    Py_ssize_t len = 0;
+    while (col_offset < end_col_offset) {
+        Py_UCS4 ch = data[col_offset];
+        if (ch < 0x80) {
+            col_offset += 1;
+        } else if ((ch & 0xe0) == 0xc0) {
+            col_offset += 2;
+        } else if ((ch & 0xf0) == 0xe0) {
+            col_offset += 3;
+        } else if ((ch & 0xf8) == 0xf0) {
+            col_offset += 4;
+        } else {
+            PyErr_SetString(PyExc_ValueError, "Invalid UTF-8 sequence");
+            return -1;
+        }
+        len++;
+    }
+    return len;
+}
+
 Py_ssize_t
 _PyPegen_byte_offset_to_character_offset_raw(const char* str, Py_ssize_t 
col_offset)
 {
diff --git a/Parser/pegen.h b/Parser/pegen.h
index c2a3e02b2e0aad..4617315a163944 100644
--- a/Parser/pegen.h
+++ b/Parser/pegen.h
@@ -150,6 +150,7 @@ int _PyPegen_fill_token(Parser *p);
 expr_ty _PyPegen_name_token(Parser *p);
 expr_ty _PyPegen_number_token(Parser *p);
 void *_PyPegen_string_token(Parser *p);
+Py_ssize_t _PyPegen_byte_offset_to_character_offset_line(PyObject *line, 
Py_ssize_t col_offset, Py_ssize_t end_col_offset);
 Py_ssize_t _PyPegen_byte_offset_to_character_offset(PyObject *line, Py_ssize_t 
col_offset);
 Py_ssize_t _PyPegen_byte_offset_to_character_offset_raw(const char*, 
Py_ssize_t col_offset);
 Py_ssize_t _PyPegen_calculate_display_width(PyObject *segment, Py_ssize_t 
character_offset);
diff --git a/Python/Python-tokenize.c b/Python/Python-tokenize.c
index 179f71aa1f5635..cb050e77b1520d 100644
--- a/Python/Python-tokenize.c
+++ b/Python/Python-tokenize.c
@@ -31,6 +31,11 @@ typedef struct
 {
     PyObject_HEAD struct tok_state *tok;
     int done;
+
+    /* Needed to cache line for performance */
+    PyObject *last_line;
+    Py_ssize_t last_lineno;
+    Py_ssize_t byte_col_offset_diff;
 } tokenizeriterobject;
 
 /*[clinic input]
@@ -67,6 +72,11 @@ tokenizeriter_new_impl(PyTypeObject *type, PyObject 
*readline,
         self->tok->tok_extra_tokens = 1;
     }
     self->done = 0;
+
+    self->last_line = NULL;
+    self->byte_col_offset_diff = 0;
+    self->last_lineno = 0;
+
     return (PyObject *)self;
 }
 
@@ -209,7 +219,18 @@ tokenizeriter_next(tokenizeriterobject *it)
         if (size >= 1 && it->tok->implicit_newline) {
             size -= 1;
         }
-        line = PyUnicode_DecodeUTF8(line_start, size, "replace");
+
+        if (it->tok->lineno != it->last_lineno) {
+            // Line has changed since last token, so we fetch the new line and 
cache it
+            // in the iter object.
+            Py_XDECREF(it->last_line);
+            line = PyUnicode_DecodeUTF8(line_start, size, "replace");
+            it->last_line = line;
+            it->byte_col_offset_diff = 0;
+        } else {
+            // Line hasn't changed so we reuse the cached one.
+            line = it->last_line;
+        }
     }
     if (line == NULL) {
         Py_DECREF(str);
@@ -218,13 +239,28 @@ tokenizeriter_next(tokenizeriterobject *it)
 
     Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : 
it->tok->lineno;
     Py_ssize_t end_lineno = it->tok->lineno;
+    it->last_lineno = lineno;
+
     Py_ssize_t col_offset = -1;
     Py_ssize_t end_col_offset = -1;
+    Py_ssize_t byte_offset = -1;
     if (token.start != NULL && token.start >= line_start) {
-        col_offset = _PyPegen_byte_offset_to_character_offset(line, 
token.start - line_start);
+        byte_offset = token.start - line_start;
+        col_offset = byte_offset - it->byte_col_offset_diff;
     }
     if (token.end != NULL && token.end >= it->tok->line_start) {
-        end_col_offset = 
_PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, token.end - 
it->tok->line_start);
+        Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
+        if (lineno == end_lineno) {
+            // If the whole token is at the same line, we can just use the 
token.start
+            // buffer for figuring out the new column offset, since using line 
is not
+            // performant for very long lines.
+            Py_ssize_t token_col_offset = 
_PyPegen_byte_offset_to_character_offset_line(line, byte_offset, 
end_byte_offset);
+            end_col_offset = col_offset + token_col_offset;
+            it->byte_col_offset_diff += token.end - token.start - 
token_col_offset;
+        } else {
+            end_col_offset = 
_PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, 
end_byte_offset);
+            it->byte_col_offset_diff += end_byte_offset - end_col_offset;
+        }
     }
 
     if (it->tok->tok_extra_tokens) {
@@ -264,7 +300,7 @@ tokenizeriter_next(tokenizeriterobject *it)
         }
     }
 
-    result = Py_BuildValue("(iN(nn)(nn)N)", type, str, lineno, col_offset, 
end_lineno, end_col_offset, line);
+    result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, 
end_lineno, end_col_offset, line);
 exit:
     _PyToken_Free(&token);
     if (type == ENDMARKER) {

_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]

Reply via email to