Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-asttokens for 
openSUSE:Factory checked in at 2022-12-03 10:03:11
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-asttokens (Old)
 and      /work/SRC/openSUSE:Factory/.python-asttokens.new.1835 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-asttokens"

Sat Dec  3 10:03:11 2022 rev:6 rq:1039428 version:2.1.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-asttokens/python-asttokens.changes        
2022-09-25 15:34:30.571508257 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-asttokens.new.1835/python-asttokens.changes  
    2022-12-03 10:03:12.475097398 +0100
@@ -1,0 +2,7 @@
+Thu Dec  1 22:18:13 UTC 2022 - Yogalakshmi Arunachalam <yarunacha...@suse.com>
+
+- Update to version 2.1.0 
+  * Merge pull request #93 from gristlabs/unmarked2
+      ASTText class that doesn't require tokens
+
+-------------------------------------------------------------------
@@ -10,0 +18,10 @@
+-------------------------------------------------------------------
+Fri Sep 23 02:15:13 UTC 2022 - Yogalakshmi Arunachalam <yarunacha...@suse.com>
+
+- Update to 2.0.8 
+  * Merge pull request #90 from palfrey/fix-explicit-import
+  * Fix mypy explicit re-export issues
+
+- Update to 2.0.7 
+  * Merge pull request #87 from gristlabs/astroid-type-checking
+  * Fix astroid type checking import errors

Old:
----
  asttokens-2.0.8.tar.gz

New:
----
  asttokens-2.1.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-asttokens.spec ++++++
--- /var/tmp/diff_new_pack.JKrNGR/_old  2022-12-03 10:03:13.175101288 +0100
+++ /var/tmp/diff_new_pack.JKrNGR/_new  2022-12-03 10:03:13.183101332 +0100
@@ -21,7 +21,7 @@
 %define skip_python2 1
 %define skip_python36 1
 Name:           python-asttokens
-Version:        2.0.8
+Version:        2.1.0
 Release:        0
 Summary:        Annotate AST trees with source code positions
 License:        Apache-2.0

++++++ asttokens-2.0.8.tar.gz -> asttokens-2.1.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/PKG-INFO new/asttokens-2.1.0/PKG-INFO
--- old/asttokens-2.0.8/PKG-INFO        2022-08-15 12:49:38.757367100 +0200
+++ new/asttokens-2.1.0/PKG-INFO        2022-10-29 13:23:38.843023000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: asttokens
-Version: 2.0.8
+Version: 2.1.0
 Summary: Annotate AST trees with source code positions
 Home-page: https://github.com/gristlabs/asttokens
 Author: Dmitry Sagalovskiy, Grist Labs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens/__init__.py 
new/asttokens-2.1.0/asttokens/__init__.py
--- old/asttokens-2.0.8/asttokens/__init__.py   2022-08-15 12:49:10.000000000 
+0200
+++ new/asttokens-2.1.0/asttokens/__init__.py   2022-10-29 12:14:52.000000000 
+0200
@@ -19,6 +19,6 @@
 """
 
 from .line_numbers import LineNumbers
-from .asttokens import ASTTokens
+from .asttokens import ASTText, ASTTokens, supports_tokenless
 
-__all__ = ['ASTTokens', 'LineNumbers']
+__all__ = ['ASTText', 'ASTTokens', 'LineNumbers', 'supports_tokenless']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens/asttokens.py 
new/asttokens-2.1.0/asttokens/asttokens.py
--- old/asttokens-2.0.8/asttokens/asttokens.py  2022-08-08 16:46:49.000000000 
+0200
+++ new/asttokens-2.1.0/asttokens/asttokens.py  2022-10-29 12:14:52.000000000 
+0200
@@ -12,23 +12,81 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import abc
 import ast
 import bisect
-import io
+import sys
 import token
-import tokenize
-from .util import Token, match_token, is_non_coding_token, 
patched_generate_tokens
+from ast import Module
+from typing import Iterable, Iterator, List, Optional, Tuple, Any, cast, 
TYPE_CHECKING, Type
+
 import six
-from six.moves import xrange      # pylint: disable=redefined-builtin
+from six.moves import xrange  # pylint: disable=redefined-builtin
+
 from .line_numbers import LineNumbers
-from typing import Callable, Iterator, List, Optional, Tuple, Any, 
cast,TYPE_CHECKING
-from ast import Module
+from .util import Token, match_token, is_non_coding_token, 
patched_generate_tokens, last_stmt, annotate_fstring_nodes, generate_tokens
+
+if TYPE_CHECKING:  # pragma: no cover
+  from .util import AstNode, TokenInfo
+
+
+class ASTTextBase(six.with_metaclass(abc.ABCMeta, object)):
+  def __init__(self, source_text, filename):
+    # type: (Any, str) -> None
+    # FIXME: Strictly, the type of source_text is one of the six string types, 
but hard to specify with mypy given
+    # 
https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
 
-if TYPE_CHECKING:
-  from .util import AstNode
+    self._filename = filename
+
+    # Decode source after parsing to let Python 2 handle coding declarations.
+    # (If the encoding was not utf-8 compatible, then even if it parses 
correctly,
+    # we'll fail with a unicode error here.)
+    source_text = six.ensure_text(source_text)
 
+    self._text = source_text
+    self._line_numbers = LineNumbers(source_text)
 
-class ASTTokens(object):
+  @abc.abstractmethod
+  def get_text_positions(self, node, padded):
+    # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
+    """
+    Returns two ``(lineno, col_offset)`` tuples for the start and end of the 
given node.
+    If the positions can't be determined, or the nodes don't correspond to any 
particular text,
+    returns ``(1, 0)`` for both.
+
+    ``padded`` corresponds to the ``padded`` argument to 
``ast.get_source_segment()``.
+    This means that if ``padded`` is True, the start position will be adjusted 
to include
+    leading whitespace if ``node`` is a multiline statement.
+    """
+    raise NotImplementedError
+
+  def get_text_range(self, node, padded=True):
+    # type: (AstNode, bool) -> Tuple[int, int]
+    """
+    Returns the (startpos, endpos) positions in source text corresponding to 
the given node.
+    Returns (0, 0) for nodes (like `Load`) that don't correspond to any 
particular text.
+
+    See ``get_text_positions()`` for details on the ``padded`` argument.
+    """
+    start, end = self.get_text_positions(node, padded)
+    return (
+      self._line_numbers.line_to_offset(*start),
+      self._line_numbers.line_to_offset(*end),
+    )
+
+  def get_text(self, node, padded=True):
+    # type: (AstNode, bool) -> str
+    """
+    Returns the text corresponding to the given node.
+    Returns '' for nodes (like `Load`) that don't correspond to any particular 
text.
+
+    See ``get_text_positions()`` for details on the ``padded`` argument.
+    """
+    start, end = self.get_text_range(node, padded)
+    return self._text[start: end]
+
+
+class ASTTokens(ASTTextBase, object):
   """
   ASTTokens maintains the text of Python code in several forms: as a string, 
as line numbers, and
   as tokens, and is used to mark and access token and position information.
@@ -47,24 +105,20 @@
   If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark 
the nodes of an AST
   tree created separately.
   """
-  def __init__(self, source_text, parse=False, tree=None, 
filename='<unknown>'):
-    # type: (Any, bool, Optional[Module], str) -> None
-    # FIXME: Strictly, the type of source_type is one of the six string types, 
but hard to specify with mypy given
-    # 
https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
 
-    self._filename = filename
-    self._tree = ast.parse(source_text, filename) if parse else tree
+  def __init__(self, source_text, parse=False, tree=None, 
filename='<unknown>', tokens=None):
+    # type: (Any, bool, Optional[Module], str, Iterable[TokenInfo]) -> None
+    # FIXME: Strictly, the type of source_text is one of the six string types, 
but hard to specify with mypy given
+    # 
https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
 
-    # Decode source after parsing to let Python 2 handle coding declarations.
-    # (If the encoding was not utf-8 compatible, then even if it parses 
correctly,
-    # we'll fail with a unicode error here.)
-    source_text = six.ensure_text(source_text)
+    super(ASTTokens, self).__init__(source_text, filename)
 
-    self._text = source_text
-    self._line_numbers = LineNumbers(source_text)
+    self._tree = ast.parse(source_text, filename) if parse else tree
 
     # Tokenize the code.
-    self._tokens = list(self._generate_tokens(source_text))
+    if tokens is None:
+      tokens = generate_tokens(self._text)
+    self._tokens = list(self._translate_tokens(tokens))
 
     # Extract the start positions of all tokens, so that we can quickly map 
positions to tokens.
     self._token_offsets = [tok.startpos for tok in self._tokens]
@@ -72,7 +126,6 @@
     if self._tree:
       self.mark_tokens(self._tree)
 
-
   def mark_tokens(self, root_node):
     # type: (Module) -> None
     """
@@ -85,16 +138,11 @@
     from .mark_tokens import MarkTokens # to avoid import loops
     MarkTokens(self).visit_tree(root_node)
 
-
-  def _generate_tokens(self, text):
-    # type: (str) -> Iterator[Token]
+  def _translate_tokens(self, original_tokens):
+    # type: (Iterable[TokenInfo]) -> Iterator[Token]
     """
-    Generates tokens for the given code.
+    Translates the given standard library tokens into our own representation.
     """
-    # tokenize.generate_tokens is technically an undocumented API for Python3, 
but allows us to use the same API as for
-    # Python2. See http://stackoverflow.com/a/4952291/328565.
-    # FIXME: Remove cast once https://github.com/python/typeshed/issues/7003 
gets fixed
-    original_tokens = tokenize.generate_tokens(cast(Callable[[], str], 
io.StringIO(text).readline))
     for index, tok in enumerate(patched_generate_tokens(original_tokens)):
       tok_type, tok_str, start, end, line = tok
       yield Token(tok_type, tok_str, start, end, line, index,
@@ -210,28 +258,188 @@
     """
     return self.token_range(node.first_token, node.last_token, 
include_extra=include_extra)
 
-  def get_text_range(self, node):
-    # type: (AstNode) -> Tuple[int, int]
+  def get_text_positions(self, node, padded):
+    # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
     """
-    After mark_tokens() has been called, returns the (startpos, endpos) 
positions in source text
-    corresponding to the given node. Returns (0, 0) for nodes (like `Load`) 
that don't correspond
-    to any particular text.
+    Returns two ``(lineno, col_offset)`` tuples for the start and end of the 
given node.
+    If the positions can't be determined, or the nodes don't correspond to any 
particular text,
+    returns ``(1, 0)`` for both.
+
+    ``padded`` corresponds to the ``padded`` argument to 
``ast.get_source_segment()``.
+    This means that if ``padded`` is True, the start position will be adjusted 
to include
+    leading whitespace if ``node`` is a multiline statement.
     """
     if not hasattr(node, 'first_token'):
-      return (0, 0)
+      return (1, 0), (1, 0)
 
-    start = node.first_token.startpos
-    if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
-      # Multi-line nodes would be invalid unless we keep the indentation of 
the first node.
-      start = self._text.rfind('\n', 0, start) + 1
+    start = node.first_token.start
+    end = node.last_token.end
+    if padded and any(match_token(t, token.NEWLINE) for t in 
self.get_tokens(node)):
+      # Set col_offset to 0 to include leading indentation for multiline 
statements.
+      start = (start[0], 0)
 
-    return (start, node.last_token.endpos)
+    return start, end
 
-  def get_text(self, node):
-    # type: (AstNode) -> str
-    """
-    After mark_tokens() has been called, returns the text corresponding to the 
given node. Returns
-    '' for nodes (like `Load`) that don't correspond to any particular text.
-    """
-    start, end = self.get_text_range(node)
-    return self._text[start : end]
+
+class ASTText(ASTTextBase, object):
+  """
+  Supports the same ``get_text*`` methods as ``ASTTokens``,
+  but uses the AST to determine the text positions instead of tokens.
+  This is faster than ``ASTTokens`` as it requires less setup work.
+
+  It also (sometimes) supports nodes inside f-strings, which ``ASTTokens`` 
doesn't.
+
+  Astroid trees are not supported at all and will raise an error.
+
+  Some node types and/or Python versions are not supported.
+  In these cases the ``get_text*`` methods will fall back to using 
``ASTTokens``
+  which incurs the usual setup cost the first time.
+  If you want to avoid this, check ``supports_tokenless(node)`` before calling 
``get_text*`` methods.
+  """
+  def __init__(self, source_text, tree=None, filename='<unknown>'):
+    # type: (Any, Optional[Module], str) -> None
+    # FIXME: Strictly, the type of source_text is one of the six string types, 
but hard to specify with mypy given
+    # 
https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
+
+    if not isinstance(tree, (ast.AST, type(None))):
+      raise NotImplementedError('ASTText only supports AST trees')
+
+    super(ASTText, self).__init__(source_text, filename)
+
+    self._tree = tree
+    if self._tree is not None:
+      annotate_fstring_nodes(self._tree)
+
+    self._asttokens = None  # type: Optional[ASTTokens]
+
+  @property
+  def tree(self):
+    # type: () -> Module
+    if self._tree is None:
+      self._tree = ast.parse(self._text, self._filename)
+      annotate_fstring_nodes(self._tree)
+    return self._tree
+
+  @property
+  def asttokens(self):
+    # type: () -> ASTTokens
+    if self._asttokens is None:
+      self._asttokens = ASTTokens(
+          self._text,
+          tree=self.tree,
+          filename=self._filename,
+      )
+    return self._asttokens
+
+  def _get_text_positions_tokenless(self, node, padded):
+    # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
+    """
+    Version of ``get_text_positions()`` that doesn't use tokens.
+    """
+    if sys.version_info[:2] < (3, 8):
+      raise AssertionError("This method should only be called internally after 
checking supports_tokenless()")
+
+    if isinstance(node, ast.Module):
+      # Modules don't have position info, so just return the range of the 
whole text.
+      # The token-using method does something different, but its behavior 
seems weird and inconsistent.
+      # For example, in a file with only comments, it only returns the first 
line.
+      # It's hard to imagine a case when this matters.
+      return (1, 0), self._line_numbers.offset_to_line(len(self._text))
+
+    if not hasattr(node, 'lineno'):
+      return (1, 0), (1, 0)
+
+    assert node  # tell mypy that node is not None, which we allowed up to 
here for compatibility
+
+    decorators = getattr(node, 'decorator_list', [])
+    if decorators:
+      # Function/Class definition nodes are marked by AST as starting at 
def/class,
+      # not the first decorator. This doesn't match the token-using behavior,
+      # or inspect.getsource(), and just seems weird.
+      start_node = decorators[0]
+    else:
+      start_node = node
+
+    if padded and last_stmt(node).lineno != node.lineno:
+      # Include leading indentation for multiline statements.
+      start_col_offset = 0
+    else:
+      start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, 
start_node.col_offset)
+
+    start = (start_node.lineno, start_col_offset)
+
+    # To match the token-using behaviour, we exclude trailing semicolons and 
comments.
+    # This means that for blocks containing multiple statements, we have to 
use the last one
+    # instead of the actual node for end_lineno and end_col_offset.
+    end_node = last_stmt(node)
+    end_lineno = cast(int, end_node.end_lineno)
+    end_col_offset = cast(int, end_node.end_col_offset)
+    end_col_offset = self._line_numbers.from_utf8_col(end_lineno, 
end_col_offset)
+    end = (end_lineno, end_col_offset)
+
+    return start, end
+
+  def get_text_positions(self, node, padded):
+    # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
+    """
+    Returns two ``(lineno, col_offset)`` tuples for the start and end of the 
given node.
+    If the positions can't be determined, or the nodes don't correspond to any 
particular text,
+    returns ``(1, 0)`` for both.
+
+    ``padded`` corresponds to the ``padded`` argument to 
``ast.get_source_segment()``.
+    This means that if ``padded`` is True, the start position will be adjusted 
to include
+    leading whitespace if ``node`` is a multiline statement.
+    """
+    if getattr(node, "_broken_positions", None):
+      # This node was marked in util.annotate_fstring_nodes as having 
untrustworthy lineno/col_offset.
+      return (1, 0), (1, 0)
+
+    if supports_tokenless(node):
+      return self._get_text_positions_tokenless(node, padded)
+
+    return self.asttokens.get_text_positions(node, padded)
+
+
+# Node types that _get_text_positions_tokenless doesn't support. Only relevant 
for Python 3.8+.
+_unsupported_tokenless_types = ()  # type: Tuple[Type[ast.AST], ...]
+if sys.version_info[:2] >= (3, 8):
+  _unsupported_tokenless_types += (
+    # no lineno
+    ast.arguments, ast.withitem,
+  )
+  if sys.version_info[:2] == (3, 8):
+    _unsupported_tokenless_types += (
+      # _get_text_positions_tokenless works incorrectly for these types due to 
bugs in Python 3.8.
+      ast.arg, ast.Starred,
+      # no lineno in 3.8
+      ast.Slice, ast.ExtSlice, ast.Index, ast.keyword,
+    )
+
+
+def supports_tokenless(node=None):
+  # type: (Any) -> bool
+  """
+  Returns True if the Python version and the node (if given) are supported by
+  the ``get_text*`` methods of ``ASTText`` without falling back to 
``ASTTokens``.
+  See ``ASTText`` for why this matters.
+
+  The following cases are not supported:
+
+    - Python 3.7 and earlier
+    - PyPy
+    - Astroid nodes (``get_text*`` methods of ``ASTText`` will raise an error)
+    - ``ast.arguments`` and ``ast.withitem``
+    - The following nodes in Python 3.8 only:
+      - ``ast.arg``
+      - ``ast.Starred``
+      - ``ast.Slice``
+      - ``ast.ExtSlice``
+      - ``ast.Index``
+      - ``ast.keyword``
+  """
+  return (
+      isinstance(node, (ast.AST, type(None)))
+      and not isinstance(node, _unsupported_tokenless_types)
+      and sys.version_info[:2] >= (3, 8)
+      and 'pypy' not in sys.version.lower()
+  )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens/util.py 
new/asttokens-2.1.0/asttokens/util.py
--- old/asttokens-2.0.8/asttokens/util.py       2022-08-15 12:49:10.000000000 
+0200
+++ new/asttokens-2.1.0/asttokens/util.py       2022-10-29 12:14:52.000000000 
+0200
@@ -14,16 +14,17 @@
 
 import ast
 import collections
+import io
 import sys
 import token
 import tokenize
 from abc import ABCMeta
 from ast import Module, expr, AST
-from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union, 
cast, Any, TYPE_CHECKING
+from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, 
Union, cast, Any, TYPE_CHECKING
 
 from six import iteritems
 
-if TYPE_CHECKING:
+if TYPE_CHECKING:  # pragma: no cover
   from astroid.node_classes import NodeNG
 
   # Type class used to expand out the definition of AST to include fields 
added by this library
@@ -36,6 +37,11 @@
 
   AstNode = Union[EnhancedAST, NodeNG]
 
+  if sys.version_info[0] == 2:
+    TokenInfo = Tuple[int, str, Tuple[int, int], Tuple[int, int], str]
+  else:
+    TokenInfo = tokenize.TokenInfo
+
 
 def token_repr(tok_type, string):
   # type: (int, Optional[str]) -> str
@@ -105,8 +111,19 @@
     return token_type >= token.N_TOKENS
 
 
+def generate_tokens(text):
+  # type: (str) -> Iterator[TokenInfo]
+  """
+  Generates standard library tokens for the given code.
+  """
+  # tokenize.generate_tokens is technically an undocumented API for Python3, 
but allows us to use the same API as for
+  # Python2. See http://stackoverflow.com/a/4952291/328565.
+  # FIXME: Remove cast once https://github.com/python/typeshed/issues/7003 
gets fixed
+  return tokenize.generate_tokens(cast(Callable[[], str], 
io.StringIO(text).readline))
+
+
 def iter_children_func(node):
-  # type: (Module) -> Callable
+  # type: (AST) -> Callable
   """
   Returns a function which yields all direct children of a AST node,
   skipping children that are singleton nodes.
@@ -249,7 +266,7 @@
 
 
 def walk(node):
-  # type: (Module) -> Iterator[Union[Module, AstNode]]
+  # type: (AST) -> Iterator[Union[Module, AstNode]]
   """
   Recursively yield all descendant nodes in the tree starting at ``node`` 
(including ``node``
   itself), using depth-first pre-order traversal (yieling parents before their 
children).
@@ -320,11 +337,11 @@
   # Python 2 doesn't support non-ASCII identifiers, and making the real 
patched_generate_tokens support Python 2
   # means working with raw tuples instead of tokenize.TokenInfo namedtuples.
   def patched_generate_tokens(original_tokens):
-    # type: (Any) -> Any
-    return original_tokens
+    # type: (Iterable[TokenInfo]) -> Iterator[TokenInfo]
+    return iter(original_tokens)
 else:
   def patched_generate_tokens(original_tokens):
-    # type: (Iterator[tokenize.TokenInfo]) -> Iterator[tokenize.TokenInfo]
+    # type: (Iterable[TokenInfo]) -> Iterator[TokenInfo]
     """
     Fixes tokens yielded by `tokenize.generate_tokens` to handle more 
non-ASCII characters in identifiers.
     Workaround for https://github.com/python/cpython/issues/68382.
@@ -361,3 +378,83 @@
         line=group[0].line,
       )
     ]
+
+
+def last_stmt(node):
+  # type: (ast.AST) -> ast.AST
+  """
+  If the given AST node contains multiple statements, return the last one.
+  Otherwise, just return the node.
+  """
+  child_stmts = [
+    child for child in ast.iter_child_nodes(node)
+    if isinstance(child, (ast.stmt, ast.excepthandler))
+  ]
+  if child_stmts:
+    return last_stmt(child_stmts[-1])
+  return node
+
+
+if sys.version_info[:2] >= (3, 8):
+  from functools import lru_cache
+
+  @lru_cache(maxsize=None)
+  def fstring_positions_work():
+    # type: () -> bool
+    """
+    The positions attached to nodes inside f-string FormattedValues have some 
bugs
+    that were fixed in Python 3.9.7 in 
https://github.com/python/cpython/pull/27729.
+    This checks for those bugs more concretely without relying on the Python 
version.
+    Specifically this checks:
+     - Values with a format spec or conversion
+     - Repeated (i.e. identical-looking) expressions
+     - Multiline f-strings implicitly concatenated.
+    """
+    source = """(
+      f"a {b}{b} c {d!r} e {f:g} h {i:{j}} k {l:{m:n}}"
+      f"a {b}{b} c {d!r} e {f:g} h {i:{j}} k {l:{m:n}}"
+      f"{x + y + z} {x} {y} {z} {z} {z!a} {z:z}"
+    )"""
+    tree = ast.parse(source)
+    name_nodes = [node for node in ast.walk(tree) if isinstance(node, 
ast.Name)]
+    name_positions = [(node.lineno, node.col_offset) for node in name_nodes]
+    positions_are_unique = len(set(name_positions)) == len(name_positions)
+    correct_source_segments = all(
+      ast.get_source_segment(source, node) == node.id
+      for node in name_nodes
+    )
+    return positions_are_unique and correct_source_segments
+
+  def annotate_fstring_nodes(tree):
+    # type: (ast.AST) -> None
+    """
+    Add a special attribute `_broken_positions` to nodes inside f-strings
+    if the lineno/col_offset cannot be trusted.
+    """
+    for joinedstr in walk(tree):
+      if not isinstance(joinedstr, ast.JoinedStr):
+        continue
+      for part in joinedstr.values:
+        # The ast positions of the FormattedValues/Constant nodes span the 
full f-string, which is weird.
+        setattr(part, '_broken_positions', True)  # use setattr for mypy
+
+        if isinstance(part, ast.FormattedValue):
+          if not fstring_positions_work():
+            for child in walk(part.value):
+              setattr(child, '_broken_positions', True)
+
+          if part.format_spec:  # this is another JoinedStr
+            # Again, the standard positions span the full f-string.
+            setattr(part.format_spec, '_broken_positions', True)
+            # Recursively handle this inner JoinedStr in the same way.
+            # While this is usually automatic for other nodes,
+            # the children of f-strings are explicitly excluded in 
iter_children_ast.
+            annotate_fstring_nodes(part.format_spec)
+else:
+  def fstring_positions_work():
+    # type: () -> bool
+    return False
+
+  def annotate_fstring_nodes(_tree):
+    # type: (ast.AST) -> None
+    pass
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens/version.py 
new/asttokens-2.1.0/asttokens/version.py
--- old/asttokens-2.0.8/asttokens/version.py    2022-08-15 12:49:38.000000000 
+0200
+++ new/asttokens-2.1.0/asttokens/version.py    2022-10-29 13:23:38.000000000 
+0200
@@ -1 +1 @@
-__version__ = "2.0.8"
+__version__ = "2.1.0"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens.egg-info/PKG-INFO 
new/asttokens-2.1.0/asttokens.egg-info/PKG-INFO
--- old/asttokens-2.0.8/asttokens.egg-info/PKG-INFO     2022-08-15 
12:49:38.000000000 +0200
+++ new/asttokens-2.1.0/asttokens.egg-info/PKG-INFO     2022-10-29 
13:23:38.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: asttokens
-Version: 2.0.8
+Version: 2.1.0
 Summary: Annotate AST trees with source code positions
 Home-page: https://github.com/gristlabs/asttokens
 Author: Dmitry Sagalovskiy, Grist Labs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/asttokens.egg-info/SOURCES.txt 
new/asttokens-2.1.0/asttokens.egg-info/SOURCES.txt
--- old/asttokens-2.0.8/asttokens.egg-info/SOURCES.txt  2022-08-15 
12:49:38.000000000 +0200
+++ new/asttokens-2.1.0/asttokens.egg-info/SOURCES.txt  2022-10-29 
13:23:38.000000000 +0200
@@ -33,6 +33,7 @@
 tests/test_asttokens.py
 tests/test_line_numbers.py
 tests/test_mark_tokens.py
+tests/test_tokenless.py
 tests/test_util.py
 tests/tools.py
 tests/testdata/README.md
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/tests/test_asttokens.py 
new/asttokens-2.1.0/tests/test_asttokens.py
--- old/asttokens-2.0.8/tests/test_asttokens.py 2022-08-01 00:18:22.000000000 
+0200
+++ new/asttokens-2.1.0/tests/test_asttokens.py 2022-10-29 12:14:52.000000000 
+0200
@@ -9,10 +9,9 @@
 
 class TestASTTokens(unittest.TestCase):
 
-  def test_tokenizing(self):
-    # Test that we produce meaningful tokens on initialization.
+  def assertTokenizing(self, generate_tokens):
     source = "import re  # comment\n\nfoo = 'bar'\n"
-    atok = asttokens.ASTTokens(source)
+    atok = asttokens.ASTTokens(source, tokens=generate_tokens(source))
     self.assertEqual(atok.text, source)
     self.assertEqual([str(t) for t in atok.tokens], [
       "NAME:'import'",
@@ -33,6 +32,28 @@
     self.assertEqual(atok.tokens[5].startpos, 22)
     self.assertEqual(atok.tokens[5].endpos, 25)
 
+  def test_tokenizing(self):
+    # Test that we produce meaningful tokens on initialization.
+    self.assertTokenizing(generate_tokens=lambda x: None)
+
+  def test_given_existing_tokens(self):
+    # type: () -> None
+    # Test that we process a give list of tokens on initialization.
+
+    self.was_called = False
+
+    def generate_tokens(source):
+      def tokens_iter():
+        # force nonlocal into scope
+        for token in asttokens.util.generate_tokens(source):
+          yield token
+        self.was_called = True
+      return tokens_iter()
+
+    self.assertTokenizing(generate_tokens)
+
+    self.assertTrue(self.was_called, "Should have used tokens from given 
iterable")
+
 
   def test_token_methods(self):
     # Test the methods that deal with tokens: prev/next_token, get_token, 
get_token_from_offset.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/tests/test_mark_tokens.py 
new/asttokens-2.1.0/tests/test_mark_tokens.py
--- old/asttokens-2.0.8/tests/test_mark_tokens.py       2022-08-15 
12:49:10.000000000 +0200
+++ new/asttokens-2.1.0/tests/test_mark_tokens.py       2022-10-29 
12:14:52.000000000 +0200
@@ -31,7 +31,7 @@
 
   def create_mark_checker(self, source, verify=True):
     atok = self.create_asttokens(source)
-    checker = tools.MarkChecker(atok)
+    checker = tools.MarkChecker(atok, self.is_astroid_test)
 
     # The last token should always be an ENDMARKER
     # None of the nodes should contain that token
@@ -442,6 +442,16 @@
     self.assertEqual(m.view_nodes_at(2, 4), {'Name:x', 'Subscript:x[4]'})
 
   if not six.PY2:
+    def test_bad_tokenless_types(self):
+      # Cases where _get_text_positions_tokenless is incorrect in 3.8.
+      source = textwrap.dedent("""
+        def foo(*, name: str):  # keyword-only argument with type annotation
+          pass
+
+        f(*(x))  # ast.Starred with parentheses
+      """)
+      self.create_mark_checker(source)
+
     def test_return_annotation(self):
       # See 
https://bitbucket.org/plas/thonny/issues/9/range-marker-crashes-on-function-return
       source = textwrap.dedent("""
@@ -554,11 +564,14 @@
   17
 ); d # comment1; comment2
 if 2: a; b; # comment3
+if a:
+  if b: c; d  # comment4
     """
     m = self.create_mark_checker(source)
     self.assertEqual(
       [m.atok.get_text(n) for n in m.all_nodes if util.is_stmt(n)],
-      ['a', 'b', 'c(\n  17\n)', 'd', 'if 2: a; b', 'a', 'b'])
+      ['a', 'b', 'c(\n  17\n)', 'd', 'if 2: a; b', 'a', 'b',
+       'if a:\n  if b: c; d', 'if b: c; d', 'c', 'd'])
 
 
   def test_complex_numbers(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/tests/test_tokenless.py 
new/asttokens-2.1.0/tests/test_tokenless.py
--- old/asttokens-2.0.8/tests/test_tokenless.py 1970-01-01 01:00:00.000000000 
+0100
+++ new/asttokens-2.1.0/tests/test_tokenless.py 2022-10-29 12:14:52.000000000 
+0200
@@ -0,0 +1,129 @@
+import ast
+import sys
+import unittest
+
+import astroid
+
+from asttokens import ASTText, supports_tokenless
+from asttokens.util import fstring_positions_work
+
+source = """
+x = 1
+if x > 0:
+  for i in range(10):
+    print(i)
+else:
+  print('negative')
+
+def foo(bar):
+  pass
+
+print(f"{xx + 22} is negative {1.23:.2f} {'a'!r} {yy =} {aa:{bb}}")
+
+import a
+import b as c, d.e as f
+from foo.bar import baz as spam
+"""
+
+fstring_node_dumps = [
+  ast.dump(ast.parse(s).body[0].value)  # type: ignore
+  for s in ["xx", "yy", "aa", "bb", "xx + 22", "22", "1.23", "'a'"]
+]
+
+
+def is_fstring_internal_node(node):
+  """
+  Returns True if the given node is an internal node in an f-string.
+  Only applies for nodes parsed from the source above.
+  """
+  return ast.dump(node) in fstring_node_dumps
+
+
+def is_fstring_format_spec(node):
+  """
+  Returns True if the given node is a format specifier in an f-string.
+  Only applies for nodes parsed from the source above.
+  """
+  return (
+      isinstance(node, ast.JoinedStr)
+      and len(node.values) == 1
+      and (
+          (
+              isinstance(node.values[0], ast.Str)
+              and node.values[0].value in ['.2f']
+          ) or (
+              isinstance(node.values[0], ast.FormattedValue)
+              and isinstance(node.values[0].value, ast.Name)
+              and node.values[0].value.id == 'bb'
+          )
+      )
+  )
+
+
+@unittest.skipUnless(supports_tokenless(), "Python version does not support 
not using tokens")
+class TestTokenless(unittest.TestCase):
+  def test_get_text_tokenless(self):
+    atok = ASTText(source)
+
+    for node in ast.walk(atok.tree):
+      if not isinstance(node, (ast.arguments, ast.arg)):
+        self.check_node(atok, node)
+        self.assertTrue(supports_tokenless(node), node)
+
+    # Check that we didn't need to fall back to using tokens
+    self.assertIsNone(atok._asttokens)
+
+    has_tokens = False
+    for node in ast.walk(atok.tree):
+      self.check_node(atok, node)
+
+      if isinstance(node, ast.arguments):
+        has_tokens = True
+
+      self.assertEqual(atok._asttokens is not None, has_tokens)
+
+    # Now we have started using tokens as fallback
+    self.assertIsNotNone(atok._asttokens)
+    self.assertTrue(has_tokens)
+
+  def check_node(self, atok, node):
+    if not hasattr(node, 'lineno'):
+      self.assertEqual(ast.get_source_segment(source, node), None)
+      atok_text = atok.get_text(node)
+      if not isinstance(node, (ast.arg, ast.arguments)):
+        self.assertEqual(atok_text, source if isinstance(node, ast.Module) 
else '', node)
+      return
+
+    for padded in [True, False]:
+      ast_text = ast.get_source_segment(source, node, padded=padded)
+      atok_text = atok.get_text(node, padded=padded)
+      if ast_text:
+        if (
+          ast_text.startswith("f") and isinstance(node, (ast.Str, 
ast.FormattedValue))
+          or is_fstring_format_spec(node)
+          or (not fstring_positions_work() and is_fstring_internal_node(node))
+        ):
+          self.assertEqual(atok_text, "", node)
+        else:
+          self.assertEqual(atok_text, ast_text, node)
+          self.assertEqual(
+            atok.get_text_positions(node, padded=False),
+            (
+              (node.lineno, node.col_offset),
+              (node.end_lineno, node.end_col_offset),
+            ),
+          )
+
+  def test_lazy_asttext_astroid_errors(self):
+    builder = astroid.builder.AstroidBuilder()
+    tree = builder.string_build(source)
+    with self.assertRaises(NotImplementedError):
+      ASTText(source, tree)
+
+
+class TestFstringPositionsWork(unittest.TestCase):
+  def test_fstring_positions_work(self):
+    self.assertEqual(
+      fstring_positions_work() and supports_tokenless(),
+      sys.version_info >= (3, 9, 7),
+    )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/tests/tools.py 
new/asttokens-2.1.0/tests/tools.py
--- old/asttokens-2.0.8/tests/tools.py  2021-09-30 21:43:20.000000000 +0200
+++ new/asttokens-2.1.0/tests/tools.py  2022-10-29 12:14:52.000000000 +0200
@@ -1,11 +1,12 @@
 from __future__ import unicode_literals, print_function
 
+import ast
 import io
 import os
 import re
 import sys
 
-from asttokens import util
+from asttokens import util, supports_tokenless, ASTText
 
 
 def get_fixture_path(*path_parts):
@@ -35,9 +36,11 @@
   """
   Helper tool to parse and mark an AST tree, with useful methods for verifying 
it.
   """
-  def __init__(self, atok):
+  def __init__(self, atok, is_astroid_test):
     self.atok = atok
     self.all_nodes = collect_nodes_preorder(self.atok.tree)
+    if not is_astroid_test:
+      self.atext = ASTText(atok.text, atok.tree, atok.filename)
 
   def get_nodes_at(self, line, col):
     """Returns all nodes that start with the token at the given position."""
@@ -69,8 +72,17 @@
     number of nodes that were tested this way.
     """
     test_case.longMessage = True
+
+    if supports_tokenless() and not test_case.is_astroid_test:
+      num_supported = sum(supports_tokenless(n) for n in self.all_nodes)
+      num_nodes = len(self.all_nodes)
+      test_case.assertGreater(num_supported / num_nodes, 0.5, (num_supported, 
num_nodes))
+
     tested_nodes = 0
     for node in self.all_nodes:
+      text = self.atok.get_text(node)
+      self.check_get_text_tokenless(node, test_case, text)
+
       if not (
           util.is_stmt(node) or
           util.is_expr(node) or
@@ -81,8 +93,6 @@
       if util.is_slice(node) and test_case.is_astroid_test:
         continue
 
-      text = self.atok.get_text(node)
-
       # await is not allowed outside async functions below 3.7
       # parsing again would give a syntax error
       if 'await' in text and 'async def' not in text and sys.version_info < 
(3, 7):
@@ -111,6 +121,43 @@
 
     return tested_nodes
 
+  def check_get_text_tokenless(self, node, test_case, text):
+    """
+    Check that `text` (returned from get_text()) usually returns the same text
+    whether from `ASTTokens` or `ASTText`.
+    """
+
+    if test_case.is_astroid_test or not supports_tokenless():
+      return
+
+    text_tokenless = self.atext.get_text(node)
+    if isinstance(node, ast.alias):
+      self._check_alias_tokenless(node, test_case, text_tokenless)
+    elif isinstance(node, ast.Module):
+      test_case.assertEqual(text_tokenless, self.atext._text)
+    elif supports_tokenless(node):
+      has_lineno = hasattr(node, 'lineno')
+      test_case.assertEqual(has_lineno, text_tokenless != '')
+      if has_lineno:
+        test_case.assertEqual(text, text_tokenless, ast.dump(node))
+      else:
+        # _get_text_positions_tokenless can't work with nodes without lineno.
+        # Double-check that such nodes are unusual.
+        test_case.assertFalse(util.is_stmt(node) or util.is_expr(node))
+        with test_case.assertRaises(SyntaxError, msg=(text, ast.dump(node))):
+          test_case.parse_snippet(text, node)
+
+  def _check_alias_tokenless(self, node, test_case, text):
+    if sys.version_info < (3, 10):
+      # Before 3.10, aliases don't have position information
+      test_case.assertEqual(text, '')
+    # For 3.10+, ASTTokens.get_text often returns the wrong value for aliases.
+    # So to verify ASTText.get_text, we instead check the general form.
+    elif node.asname:
+      test_case.assertEqual(text.split(), [node.name, 'as', node.asname])
+    else:
+      test_case.assertEqual(text, node.name)
+
 
 def repr_tree(node):
   """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/asttokens-2.0.8/tox.ini new/asttokens-2.1.0/tox.ini
--- old/asttokens-2.0.8/tox.ini 2022-08-08 16:30:50.000000000 +0200
+++ new/asttokens-2.1.0/tox.ini 2022-10-29 12:14:52.000000000 +0200
@@ -7,7 +7,7 @@
 envlist = py{27,35,36,37,38,39,310,py,py3}
 
 [testenv]
-commands = pytest
+commands = pytest {posargs}
 deps =
     .[test]
 passenv =

Reply via email to