Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package python-parso for openSUSE:Factory 
checked in at 2021-12-13 20:41:43
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-parso (Old)
 and      /work/SRC/openSUSE:Factory/.python-parso.new.2520 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-parso"

Mon Dec 13 20:41:43 2021 rev:17 rq:940007 version:0.8.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-parso/python-parso.changes        
2021-06-11 22:30:36.938124135 +0200
+++ /work/SRC/openSUSE:Factory/.python-parso.new.2520/python-parso.changes      
2021-12-13 20:45:54.948496246 +0100
@@ -1,0 +2,7 @@
+Fri Dec 10 20:32:47 UTC 2021 - Ben Greiner <c...@bnavigator.de>
+
+- update to 0.8.3:
+  * Add basic support for Python 3.11 and 3.12
+- Skip tests failing in Python 3.10 gh#davidhalter/parso#192
+
+-------------------------------------------------------------------

Old:
----
  parso-0.8.2.tar.gz

New:
----
  parso-0.8.3.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-parso.spec ++++++
--- /var/tmp/diff_new_pack.FJEom1/_old  2021-12-13 20:45:55.508496315 +0100
+++ /var/tmp/diff_new_pack.FJEom1/_new  2021-12-13 20:45:55.528496317 +0100
@@ -19,7 +19,7 @@
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 %define skip_python2 1
 Name:           python-parso
-Version:        0.8.2
+Version:        0.8.3
 Release:        0
 Summary:        An autocompletion tool for Python
 License:        MIT AND Python-2.0
@@ -55,7 +55,9 @@
 %python_expand %fdupes %{buildroot}%{$python_sitelib}
 
 %check
-%pytest
+# Python 3.10 has deviating exception messages -- gh#davidhalter/parso#192
+python310_args=("-k" "not test_python_exception_matches")
+%pytest "${$python_args[@]}"
 
 %files %{python_files}
 %license LICENSE.txt

++++++ parso-0.8.2.tar.gz -> parso-0.8.3.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/.coveragerc new/parso-0.8.3/.coveragerc
--- old/parso-0.8.2/.coveragerc 2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/.coveragerc 2021-11-30 22:04:12.000000000 +0100
@@ -4,6 +4,8 @@
 [report]
 # Regexes for lines to exclude from consideration
 exclude_lines =
+    pragma: no cover
+
     # Don't complain about missing debug-only code:
     def __repr__
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/CHANGELOG.rst 
new/parso-0.8.3/CHANGELOG.rst
--- old/parso-0.8.2/CHANGELOG.rst       2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/CHANGELOG.rst       2021-11-30 22:04:12.000000000 +0100
@@ -6,6 +6,11 @@
 Unreleased
 ++++++++++
 
+0.8.3 (2021-11-30)
+++++++++++++++++++
+
+- Add basic support for Python 3.11 and 3.12
+
 0.8.2 (2021-03-30)
 ++++++++++++++++++
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/PKG-INFO new/parso-0.8.3/PKG-INFO
--- old/parso-0.8.2/PKG-INFO    2021-03-30 22:43:48.816064400 +0200
+++ new/parso-0.8.3/PKG-INFO    2021-11-30 22:05:45.521858000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: parso
-Version: 0.8.2
+Version: 0.8.3
 Summary: A Python Parser
 Home-page: https://github.com/davidhalter/parso
 Author: David Halter
@@ -13,9 +13,9 @@
         ###################################################################
         
         
-        .. image:: https://travis-ci.org/davidhalter/parso.svg?branch=master
-            :target: https://travis-ci.org/davidhalter/parso
-            :alt: Travis CI build status
+        .. image:: 
https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master
+            :target: https://github.com/davidhalter/parso/actions
+            :alt: GitHub Actions build status
         
         .. image:: 
https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master
             :target: 
https://coveralls.io/github/davidhalter/parso?branch=master
@@ -113,6 +113,11 @@
         Unreleased
         ++++++++++
         
+        0.8.3 (2021-11-30)
+        ++++++++++++++++++
+        
+        - Add basic support for Python 3.11 and 3.12
+        
         0.8.2 (2021-03-30)
         ++++++++++++++++++
         
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/README.rst new/parso-0.8.3/README.rst
--- old/parso-0.8.2/README.rst  2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/README.rst  2021-11-30 22:04:12.000000000 +0100
@@ -3,9 +3,9 @@
 ###################################################################
 
 
-.. image:: https://travis-ci.org/davidhalter/parso.svg?branch=master
-    :target: https://travis-ci.org/davidhalter/parso
-    :alt: Travis CI build status
+.. image:: 
https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master
+    :target: https://github.com/davidhalter/parso/actions
+    :alt: GitHub Actions build status
 
 .. image:: 
https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master
     :target: https://coveralls.io/github/davidhalter/parso?branch=master
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/docs/docs/development.rst 
new/parso-0.8.3/docs/docs/development.rst
--- old/parso-0.8.2/docs/docs/development.rst   2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/docs/docs/development.rst   2021-11-30 22:04:12.000000000 
+0100
@@ -34,5 +34,5 @@
 
     python3.9 -m pytest
 
-Tests are also run automatically on `Travis CI
-<https://travis-ci.org/davidhalter/parso/>`_.
+Tests are also run automatically on `GitHub Actions
+<https://github.com/davidhalter/parso/actions>`_.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/docs/index.rst 
new/parso-0.8.3/docs/index.rst
--- old/parso-0.8.2/docs/index.rst      2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/docs/index.rst      2021-11-30 22:04:12.000000000 +0100
@@ -27,5 +27,5 @@
 ---------
 
 - `Source Code on Github <https://github.com/davidhalter/parso>`_
-- `Travis Testing <https://travis-ci.org/davidhalter/parso>`_
+- `GitHub Actions Testing <https://github.com/davidhalter/parso/actions>`_
 - `Python Package Index <http://pypi.python.org/pypi/parso/>`_
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/__init__.py 
new/parso-0.8.3/parso/__init__.py
--- old/parso-0.8.2/parso/__init__.py   2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/parso/__init__.py   2021-11-30 22:04:12.000000000 +0100
@@ -43,7 +43,7 @@
 from parso.utils import split_lines, python_bytes_to_unicode
 
 
-__version__ = '0.8.2'
+__version__ = '0.8.3'
 
 
 def parse(code=None, **kwargs):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/parser.py 
new/parso-0.8.3/parso/parser.py
--- old/parso-0.8.2/parso/parser.py     2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/parso/parser.py     2021-11-30 22:04:12.000000000 +0100
@@ -23,7 +23,7 @@
 complexity of the ``Parser`` (there's another parser sitting inside
 ``Statement``, which produces ``Array`` and ``Call``).
 """
-from typing import Dict
+from typing import Dict, Type
 
 from parso import tree
 from parso.pgen2.generator import ReservedString
@@ -110,10 +110,10 @@
     When a syntax error occurs, error_recovery() is called.
     """
 
-    node_map: Dict[str, type] = {}
+    node_map: Dict[str, Type[tree.BaseNode]] = {}
     default_node = tree.Node
 
-    leaf_map: Dict[str, type] = {}
+    leaf_map: Dict[str, Type[tree.Leaf]] = {}
     default_leaf = tree.Leaf
 
     def __init__(self, pgen_grammar, start_nonterminal='file_input', 
error_recovery=False):
@@ -156,8 +156,6 @@
             node = self.node_map[nonterminal](children)
         except KeyError:
             node = self.default_node(nonterminal, children)
-        for c in children:
-            c.parent = node
         return node
 
     def convert_leaf(self, type_, value, prefix, start_pos):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/errors.py 
new/parso-0.8.3/parso/python/errors.py
--- old/parso-0.8.2/parso/python/errors.py      2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/errors.py      2021-11-30 22:04:12.000000000 
+0100
@@ -5,7 +5,6 @@
 from contextlib import contextmanager
 
 from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
-from parso.python.tree import search_ancestor
 from parso.python.tokenize import _get_token_collection
 
 _BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
@@ -231,7 +230,7 @@
     elif node.type == "fstring":
         return True
     else:
-        return search_ancestor(node, "fstring")
+        return node.search_ancestor("fstring")
 
 
 class _Context:
@@ -1265,7 +1264,7 @@
         def search_all_comp_ancestors(node):
             has_ancestors = False
             while True:
-                node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
+                node = node.search_ancestor('testlist_comp', 'dictorsetmaker')
                 if node is None:
                     break
                 for child in node.children:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/grammar310.txt 
new/parso-0.8.3/parso/python/grammar310.txt
--- old/parso-0.8.2/parso/python/grammar310.txt 2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/grammar310.txt 2021-11-30 22:04:12.000000000 
+0100
@@ -97,9 +97,7 @@
 
 namedexpr_test: test [':=' test]
 test: or_test ['if' or_test 'else' test] | lambdef
-test_nocond: or_test | lambdef_nocond
 lambdef: 'lambda' [varargslist] ':' test
-lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
 or_test: and_test ('or' and_test)*
 and_test: not_test ('and' not_test)*
 not_test: 'not' not_test | comparison
@@ -155,7 +153,7 @@
 comp_iter: comp_for | comp_if
 sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
 comp_for: ['async'] sync_comp_for
-comp_if: 'if' test_nocond [comp_iter]
+comp_if: 'if' or_test [comp_iter]
 
 # not used in grammar, but may appear in "node" passed from Parser to Compiler
 encoding_decl: NAME
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/grammar311.txt 
new/parso-0.8.3/parso/python/grammar311.txt
--- old/parso-0.8.2/parso/python/grammar311.txt 1970-01-01 01:00:00.000000000 
+0100
+++ new/parso-0.8.3/parso/python/grammar311.txt 2021-11-30 22:04:12.000000000 
+0100
@@ -0,0 +1,169 @@
+# Grammar for Python
+
+# NOTE WELL: You should also follow all the steps listed at
+# https://devguide.python.org/grammar/
+
+# Start symbols for the grammar:
+#       single_input is a single interactive statement;
+#       file_input is a module or sequence of commands read from an input file;
+#       eval_input is the input for the eval() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: stmt* ENDMARKER
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' namedexpr_test NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef | async_funcdef)
+
+async_funcdef: 'async' funcdef
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+
+parameters: '(' [typedargslist] ')'
+typedargslist: (
+  (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] 
(
+        ',' tfpdef ['=' test])* ([',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]])
+  | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
+  | '**' tfpdef [',']]] )
+|  (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]]
+  | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+  | '**' tfpdef [','])
+)
+tfpdef: NAME [':' test]
+varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef 
['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']
+)
+vfpdef: NAME
+
+stmt: simple_stmt | compound_stmt | NEWLINE
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
+             import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
+                     ('=' (yield_expr|testlist_star_expr))*)
+annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+            '<<=' | '>>=' | '**=' | '//=')
+# For normal and annotated assignments, additional restrictions enforced by 
the interpreter
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist_star_expr]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+# note below: the ('.' | '...') is necessary because '...' is tokenized as 
ELLIPSIS
+import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
+              'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: 'global' NAME (',' NAME)*
+nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | 
funcdef | classdef | decorated | async_stmt
+async_stmt: 'async' (funcdef | with_stmt | for_stmt)
+if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* 
['else' ':' suite]
+while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+           ((except_clause ':' suite)+
+            ['else' ':' suite]
+            ['finally' ':' suite] |
+           'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)*  ':' suite
+with_item: test ['as' expr]
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test ['as' NAME]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+namedexpr_test: test [':=' test]
+test: or_test ['if' or_test 'else' test] | lambdef
+lambdef: 'lambda' [varargslist] ':' test
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+# <> isn't actually a valid comparison operator in Python. It's here for the
+# sake of a __future__ import described in PEP 401 (which really works :-)
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom_expr ['**' factor]
+atom_expr: ['await'] atom trailer*
+atom: ('(' [yield_expr|testlist_comp] ')' |
+       '[' [testlist_comp] ']' |
+       '{' [dictorsetmaker] '}' |
+       NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
+testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' 
(namedexpr_test|star_expr))* [','] )
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test [':=' test] | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictorsetmaker: ( ((test ':' test | '**' expr)
+                   (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+                  ((test [':=' test] | star_expr)
+                   (comp_for | (',' (test [':=' test] | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: argument (',' argument)*  [',']
+
+# The reason that keywords are test nodes instead of NAME is that using NAME
+# results in an ambiguity. ast.c makes sure it's a NAME.
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguments are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: ( test [comp_for] |
+            test ':=' test |
+            test '=' test |
+            '**' test |
+            '*' test )
+
+comp_iter: comp_for | comp_if
+sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
+comp_for: ['async'] sync_comp_for
+comp_if: 'if' or_test [comp_iter]
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist_star_expr
+
+strings: (STRING | fstring)+
+fstring: FSTRING_START fstring_content* FSTRING_END
+fstring_content: FSTRING_STRING | fstring_expr
+fstring_conversion: '!' NAME
+fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ 
fstring_format_spec ] '}'
+fstring_format_spec: ':' fstring_content*
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/grammar312.txt 
new/parso-0.8.3/parso/python/grammar312.txt
--- old/parso-0.8.2/parso/python/grammar312.txt 1970-01-01 01:00:00.000000000 
+0100
+++ new/parso-0.8.3/parso/python/grammar312.txt 2021-11-30 22:04:12.000000000 
+0100
@@ -0,0 +1,169 @@
+# Grammar for Python
+
+# NOTE WELL: You should also follow all the steps listed at
+# https://devguide.python.org/grammar/
+
+# Start symbols for the grammar:
+#       single_input is a single interactive statement;
+#       file_input is a module or sequence of commands read from an input file;
+#       eval_input is the input for the eval() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: stmt* ENDMARKER
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' namedexpr_test NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef | async_funcdef)
+
+async_funcdef: 'async' funcdef
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+
+parameters: '(' [typedargslist] ')'
+typedargslist: (
+  (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] 
(
+        ',' tfpdef ['=' test])* ([',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]])
+  | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]])
+  | '**' tfpdef [',']]] )
+|  (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [
+        '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+      | '**' tfpdef [',']]]
+  | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]]
+  | '**' tfpdef [','])
+)
+tfpdef: NAME [':' test]
+varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef 
['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [
+        '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+      | '**' vfpdef [',']]]
+  | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]]
+  | '**' vfpdef [',']
+)
+vfpdef: NAME
+
+stmt: simple_stmt | compound_stmt | NEWLINE
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
+             import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
+                     ('=' (yield_expr|testlist_star_expr))*)
+annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+            '<<=' | '>>=' | '**=' | '//=')
+# For normal and annotated assignments, additional restrictions enforced by 
the interpreter
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist_star_expr]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+# note below: the ('.' | '...') is necessary because '...' is tokenized as 
ELLIPSIS
+import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
+              'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: 'global' NAME (',' NAME)*
+nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | 
funcdef | classdef | decorated | async_stmt
+async_stmt: 'async' (funcdef | with_stmt | for_stmt)
+if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* 
['else' ':' suite]
+while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+           ((except_clause ':' suite)+
+            ['else' ':' suite]
+            ['finally' ':' suite] |
+           'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)*  ':' suite
+with_item: test ['as' expr]
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test ['as' NAME]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+namedexpr_test: test [':=' test]
+test: or_test ['if' or_test 'else' test] | lambdef
+lambdef: 'lambda' [varargslist] ':' test
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+# <> isn't actually a valid comparison operator in Python. It's here for the
+# sake of a __future__ import described in PEP 401 (which really works :-)
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom_expr ['**' factor]
+atom_expr: ['await'] atom trailer*
+atom: ('(' [yield_expr|testlist_comp] ')' |
+       '[' [testlist_comp] ']' |
+       '{' [dictorsetmaker] '}' |
+       NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False')
+testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' 
(namedexpr_test|star_expr))* [','] )
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test [':=' test] | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictorsetmaker: ( ((test ':' test | '**' expr)
+                   (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+                  ((test [':=' test] | star_expr)
+                   (comp_for | (',' (test [':=' test] | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: argument (',' argument)*  [',']
+
+# The reason that keywords are test nodes instead of NAME is that using NAME
+# results in an ambiguity. ast.c makes sure it's a NAME.
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguments are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: ( test [comp_for] |
+            test ':=' test |
+            test '=' test |
+            '**' test |
+            '*' test )
+
+comp_iter: comp_for | comp_if
+sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
+comp_for: ['async'] sync_comp_for
+comp_if: 'if' or_test [comp_iter]
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist_star_expr
+
+strings: (STRING | fstring)+
+fstring: FSTRING_START fstring_content* FSTRING_END
+fstring_content: FSTRING_STRING | fstring_expr
+fstring_conversion: '!' NAME
+fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ 
fstring_format_spec ] '}'
+fstring_format_spec: ':' fstring_content*
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/grammar39.txt 
new/parso-0.8.3/parso/python/grammar39.txt
--- old/parso-0.8.2/parso/python/grammar39.txt  2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/grammar39.txt  2021-11-30 22:04:12.000000000 
+0100
@@ -97,9 +97,7 @@
 
 namedexpr_test: test [':=' test]
 test: or_test ['if' or_test 'else' test] | lambdef
-test_nocond: or_test | lambdef_nocond
 lambdef: 'lambda' [varargslist] ':' test
-lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
 or_test: and_test ('or' and_test)*
 and_test: not_test ('and' not_test)*
 not_test: 'not' not_test | comparison
@@ -155,7 +153,7 @@
 comp_iter: comp_for | comp_if
 sync_comp_for: 'for' exprlist 'in' or_test [comp_iter]
 comp_for: ['async'] sync_comp_for
-comp_if: 'if' test_nocond [comp_iter]
+comp_if: 'if' or_test [comp_iter]
 
 # not used in grammar, but may appear in "node" passed from Parser to Compiler
 encoding_decl: NAME
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/parser.py 
new/parso-0.8.3/parso/python/parser.py
--- old/parso-0.8.2/parso/python/parser.py      2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/parser.py      2021-11-30 22:04:12.000000000 
+0100
@@ -96,8 +96,6 @@
                 # prefixes. Just ignore them.
                 children = [children[0]] + children[2:-1]
             node = self.default_node(nonterminal, children)
-        for c in children:
-            c.parent = node
         return node
 
     def convert_leaf(self, type, value, prefix, start_pos):
@@ -185,8 +183,6 @@
 
         if all_nodes:
             node = tree.PythonErrorNode(all_nodes)
-            for n in all_nodes:
-                n.parent = node
             self.stack[start_index - 1].nodes.append(node)
 
         self.stack[start_index:] = []
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/pep8.py 
new/parso-0.8.3/parso/python/pep8.py
--- old/parso-0.8.2/parso/python/pep8.py        2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/pep8.py        2021-11-30 22:04:12.000000000 
+0100
@@ -4,7 +4,7 @@
 
 from parso.python.errors import ErrorFinder, ErrorFinderConfig
 from parso.normalizer import Rule
-from parso.python.tree import search_ancestor, Flow, Scope
+from parso.python.tree import Flow, Scope
 
 
 _IMPORT_TYPES = ('import_name', 'import_from')
@@ -74,7 +74,7 @@
         parent_indentation = n.indentation
 
         next_leaf = leaf.get_next_leaf()
-        if '\n' in next_leaf.prefix:
+        if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix:
             # This implies code like:
             # foobarbaz(
             #     a,
@@ -116,7 +116,7 @@
         self.type = IndentationTypes.IMPLICIT
 
         next_leaf = leaf.get_next_leaf()
-        if leaf == ':' and '\n' not in next_leaf.prefix:
+        if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in 
next_leaf.prefix:
             self.indentation += ' '
 
 
@@ -124,7 +124,7 @@
     type = IndentationTypes.BACKSLASH
 
     def __init__(self, config, parent_indentation, containing_leaf, spacing, 
parent=None):
-        expr_stmt = search_ancestor(containing_leaf, 'expr_stmt')
+        expr_stmt = containing_leaf.search_ancestor('expr_stmt')
         if expr_stmt is not None:
             equals = expr_stmt.children[-2]
 
@@ -216,8 +216,8 @@
             endmarker = node.children[-1]
             prev = endmarker.get_previous_leaf()
             prefix = endmarker.prefix
-            if (not prefix.endswith('\n') and (
-                    prefix or prev is None or prev.value != '\n')):
+            if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
+                    prefix or prev is None or prev.value not in {'\n', '\r\n', 
'\r'})):
                 self.add_issue(endmarker, 292, "No newline at end of file")
 
         if typ in _IMPORT_TYPES:
@@ -465,7 +465,8 @@
                             + self._config.indentation:
                         self.add_issue(part, 129, "Line with same indent as 
next logical block")
                     elif indentation != should_be_indentation:
-                        if not self._check_tabs_spaces(spacing) and part.value 
!= '\n':
+                        if not self._check_tabs_spaces(spacing) and part.value 
not in \
+                                {'\n', '\r\n', '\r'}:
                             if value in '])}':
                                 if node.type == 
IndentationTypes.VERTICAL_BRACKET:
                                     self.add_issue(
@@ -652,7 +653,8 @@
             else:
                 prev_spacing = self._previous_spacing
                 if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
-                        and '\n' not in self._previous_leaf.prefix:
+                        and '\n' not in self._previous_leaf.prefix \
+                        and '\r' not in self._previous_leaf.prefix:
                     message = "Whitespace before operator doesn't match with 
whitespace after"
                     self.add_issue(spacing, 229, message)
 
@@ -724,11 +726,11 @@
 
     def add_issue(self, node, code, message):
         if self._previous_leaf is not None:
-            if search_ancestor(self._previous_leaf, 'error_node') is not None:
+            if self._previous_leaf.search_ancestor('error_node') is not None:
                 return
             if self._previous_leaf.type == 'error_leaf':
                 return
-        if search_ancestor(node, 'error_node') is not None:
+        if node.search_ancestor('error_node') is not None:
             return
         if code in (901, 903):
             # 901 and 903 are raised by the ErrorFinder.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/prefix.py 
new/parso-0.8.3/parso/python/prefix.py
--- old/parso-0.8.2/parso/python/prefix.py      2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/prefix.py      2021-11-30 22:04:12.000000000 
+0100
@@ -18,7 +18,7 @@
 
     @property
     def end_pos(self) -> Tuple[int, int]:
-        if self.value.endswith('\n'):
+        if self.value.endswith('\n') or self.value.endswith('\r'):
             return self.start_pos[0] + 1, 0
         if self.value == unicode_bom:
             # The bom doesn't have a length at the start of a Python file.
@@ -40,10 +40,18 @@
             self.start_pos
         )
 
+    def search_ancestor(self, *node_types):
+        node = self.parent
+        while node is not None:
+            if node.type in node_types:
+                return node
+            node = node.parent
+        return None
+
 
 _comment = r'#[^\n\r\f]*'
-_backslash = r'\\\r?\n'
-_newline = r'\r?\n'
+_backslash = r'\\\r?\n|\\\r'
+_newline = r'\r?\n|\r'
 _form_feed = r'\f'
 _only_spacing = '$'
 _spacing = r'[ \t]*'
@@ -86,7 +94,7 @@
             bom = True
 
         start = match.end(0)
-        if value.endswith('\n'):
+        if value.endswith('\n') or value.endswith('\r'):
             line += 1
             column = -start
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/tokenize.py 
new/parso-0.8.3/parso/python/tokenize.py
--- old/parso-0.8.2/parso/python/tokenize.py    2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/tokenize.py    2021-11-30 22:04:12.000000000 
+0100
@@ -548,7 +548,7 @@
                     additional_prefix = prefix + token
                 new_line = True
             elif initial == '#':  # Comments
-                assert not token.endswith("\n")
+                assert not token.endswith("\n") and not token.endswith("\r")
                 if fstring_stack and fstring_stack[-1].is_in_expr():
                     # `#` is not allowed in f-string expressions
                     yield PythonToken(ERRORTOKEN, initial, spos, prefix)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/python/tree.py 
new/parso-0.8.3/parso/python/tree.py
--- old/parso-0.8.2/parso/python/tree.py        2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/parso/python/tree.py        2021-11-30 22:04:12.000000000 
+0100
@@ -49,8 +49,7 @@
     from collections import Mapping
 from typing import Tuple
 
-from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
-    search_ancestor
+from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, 
search_ancestor  # noqa
 from parso.python.prefix import split_prefix
 from parso.utils import split_lines
 
@@ -549,7 +548,11 @@
     def __init__(self, children):
         super().__init__(children)
         parameters = self.children[2]  # After `def foo`
-        parameters.children[1:-1] = _create_params(parameters, 
parameters.children[1:-1])
+        parameters_children = parameters.children[1:-1]
+        # If input parameters list already has Param objects, keep it as is;
+        # otherwise, convert it to a list of Param objects.
+        if not any(isinstance(child, Param) for child in parameters_children):
+            parameters.children[1:-1] = _create_params(parameters, 
parameters_children)
 
     def _get_param_nodes(self):
         return self.children[2].children
@@ -652,7 +655,11 @@
         # We don't want to call the Function constructor, call its parent.
         super(Function, self).__init__(children)
         # Everything between `lambda` and the `:` operator is a parameter.
-        self.children[1:-2] = _create_params(self, self.children[1:-2])
+        parameters_children = self.children[1:-2]
+        # If input children list already has Param objects, keep it as is;
+        # otherwise, convert it to a list of Param objects.
+        if not any(isinstance(child, Param) for child in parameters_children):
+            self.children[1:-2] = _create_params(self, parameters_children)
 
     @property
     def name(self):
@@ -776,7 +783,7 @@
         return names
 
     def get_test_node_from_name(self, name):
-        node = search_ancestor(name, "with_item")
+        node = name.search_ancestor("with_item")
         if node is None:
             raise ValueError('The name is not actually part of a with 
statement.')
         return node.children[0]
@@ -1080,11 +1087,9 @@
     """
     type = 'param'
 
-    def __init__(self, children, parent):
+    def __init__(self, children, parent=None):
         super().__init__(children)
         self.parent = parent
-        for child in children:
-            child.parent = self
 
     @property
     def star_count(self):
@@ -1171,7 +1176,7 @@
         """
         Returns the function/lambda of a parameter.
         """
-        return search_ancestor(self, 'funcdef', 'lambdef')
+        return self.search_ancestor('funcdef', 'lambdef')
 
     def get_code(self, include_prefix=True, include_comma=True):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/tree.py 
new/parso-0.8.3/parso/tree.py
--- old/parso-0.8.2/parso/tree.py       2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/parso/tree.py       2021-11-30 22:04:12.000000000 +0100
@@ -1,33 +1,41 @@
 from abc import abstractmethod, abstractproperty
-from typing import List, Optional, Tuple
+from typing import List, Optional, Tuple, Union
 
 from parso.utils import split_lines
 
 
-def search_ancestor(node, *node_types):
+def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 
'Optional[BaseNode]':
     """
     Recursively looks at the parents of a node and returns the first found node
-    that matches node_types. Returns ``None`` if no matching node is found.
+    that matches ``node_types``. Returns ``None`` if no matching node is found.
+
+    This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` 
instead.
 
     :param node: The ancestors of this node will be checked.
     :param node_types: type names that are searched for.
-    :type node_types: tuple of str
     """
-    while True:
-        node = node.parent
-        if node is None or node.type in node_types:
-            return node
+    n = node.parent
+    while n is not None:
+        if n.type in node_types:
+            return n
+        n = n.parent
+    return None
 
 
 class NodeOrLeaf:
     """
     The base class for nodes and leaves.
     """
-    __slots__ = ()
+    __slots__ = ('parent',)
     type: str
     '''
     The type is a string that typically matches the types of the grammar file.
     '''
+    parent: 'Optional[BaseNode]'
+    '''
+    The parent :class:`BaseNode` of this node or leaf.
+    None if this is the root node.
+    '''
 
     def get_root_node(self):
         """
@@ -173,13 +181,117 @@
             e.g. a statement.
         """
 
+    def search_ancestor(self, *node_types: str) -> 'Optional[BaseNode]':
+        """
+        Recursively looks at the parents of this node or leaf and returns the
+        first found node that matches ``node_types``. Returns ``None`` if no
+        matching node is found.
+
+        :param node_types: type names that are searched for.
+        """
+        node = self.parent
+        while node is not None:
+            if node.type in node_types:
+                return node
+            node = node.parent
+        return None
+
+    def dump(self, *, indent: Optional[Union[int, str]] = 4) -> str:
+        """
+        Returns a formatted dump of the parser tree rooted at this node or 
leaf. This is
+        mainly useful for debugging purposes.
+
+        The ``indent`` parameter is interpreted in a similar way as 
:py:func:`ast.dump`.
+        If ``indent`` is a non-negative integer or string, then the tree will 
be
+        pretty-printed with that indent level. An indent level of 0, negative, 
or ``""``
+        will only insert newlines. ``None`` selects the single line 
representation.
+        Using a positive integer indent indents that many spaces per level. If
+        ``indent`` is a string (such as ``"\\t"``), that string is used to 
indent each
+        level.
+
+        :param indent: Indentation style as described above. The default 
indentation is
+            4 spaces, which yields a pretty-printed dump.
+
+        >>> import parso
+        >>> print(parso.parse("lambda x, y: x + y").dump())
+        Module([
+            Lambda([
+                Keyword('lambda', (1, 0)),
+                Param([
+                    Name('x', (1, 7), prefix=' '),
+                    Operator(',', (1, 8)),
+                ]),
+                Param([
+                    Name('y', (1, 10), prefix=' '),
+                ]),
+                Operator(':', (1, 11)),
+                PythonNode('arith_expr', [
+                    Name('x', (1, 13), prefix=' '),
+                    Operator('+', (1, 15), prefix=' '),
+                    Name('y', (1, 17), prefix=' '),
+                ]),
+            ]),
+            EndMarker('', (1, 18)),
+        ])
+        """
+        if indent is None:
+            newline = False
+            indent_string = ''
+        elif isinstance(indent, int):
+            newline = True
+            indent_string = ' ' * indent
+        elif isinstance(indent, str):
+            newline = True
+            indent_string = indent
+        else:
+            raise TypeError(f"expect 'indent' to be int, str or None, got 
{indent!r}")
+
+        def _format_dump(node: NodeOrLeaf, indent: str = '', top_level: bool = 
True) -> str:
+            result = ''
+            node_type = type(node).__name__
+            if isinstance(node, Leaf):
+                result += f'{indent}{node_type}('
+                if isinstance(node, ErrorLeaf):
+                    result += f'{node.token_type!r}, '
+                elif isinstance(node, TypedLeaf):
+                    result += f'{node.type!r}, '
+                result += f'{node.value!r}, {node.start_pos!r}'
+                if node.prefix:
+                    result += f', prefix={node.prefix!r}'
+                result += ')'
+            elif isinstance(node, BaseNode):
+                result += f'{indent}{node_type}('
+                if isinstance(node, Node):
+                    result += f'{node.type!r}, '
+                result += '['
+                if newline:
+                    result += '\n'
+                for child in node.children:
+                    result += _format_dump(child, indent=indent + 
indent_string, top_level=False)
+                result += f'{indent}])'
+            else:  # pragma: no cover
+                # We shouldn't ever reach here, unless:
+                # - `NodeOrLeaf` is incorrectly subclassed else where
+                # - or a node's children list contains invalid nodes or leafs
+                # Both are unexpected internal errors.
+                raise TypeError(f'unsupported node encountered: {node!r}')
+            if not top_level:
+                if newline:
+                    result += ',\n'
+                else:
+                    result += ', '
+            return result
+
+        return _format_dump(self)
+
 
 class Leaf(NodeOrLeaf):
     '''
     Leafs are basically tokens with a better API. Leafs exactly know where they
     were defined and what text preceeds them.
     '''
-    __slots__ = ('value', 'parent', 'line', 'column', 'prefix')
+    __slots__ = ('value', 'line', 'column', 'prefix')
+    prefix: str
 
     def __init__(self, value: str, start_pos: Tuple[int, int], prefix: str = 
'') -> None:
         self.value = value
@@ -257,7 +369,7 @@
     The super class for all nodes.
     A node has children, a type and possibly a parent node.
     """
-    __slots__ = ('children', 'parent')
+    __slots__ = ('children',)
 
     def __init__(self, children: List[NodeOrLeaf]) -> None:
         self.children = children
@@ -266,9 +378,11 @@
         """
         self.parent: Optional[BaseNode] = None
         '''
-        The parent :class:`BaseNode` of this leaf.
+        The parent :class:`BaseNode` of this node.
         None if this is the root node.
         '''
+        for child in children:
+            child.parent = self
 
     @property
     def start_pos(self) -> Tuple[int, int]:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso/utils.py 
new/parso-0.8.3/parso/utils.py
--- old/parso-0.8.2/parso/utils.py      2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/parso/utils.py      2021-11-30 22:04:12.000000000 +0100
@@ -92,7 +92,7 @@
             # UTF-8 byte-order mark
             return 'utf-8'
 
-        first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0)
+        first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', 
source).group(0)
         possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
                                       first_two_lines)
         if possible_encoding:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso.egg-info/PKG-INFO 
new/parso-0.8.3/parso.egg-info/PKG-INFO
--- old/parso-0.8.2/parso.egg-info/PKG-INFO     2021-03-30 22:43:48.000000000 
+0200
+++ new/parso-0.8.3/parso.egg-info/PKG-INFO     2021-11-30 22:05:45.000000000 
+0100
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: parso
-Version: 0.8.2
+Version: 0.8.3
 Summary: A Python Parser
 Home-page: https://github.com/davidhalter/parso
 Author: David Halter
@@ -13,9 +13,9 @@
         ###################################################################
         
         
-        .. image:: https://travis-ci.org/davidhalter/parso.svg?branch=master
-            :target: https://travis-ci.org/davidhalter/parso
-            :alt: Travis CI build status
+        .. image:: 
https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master
+            :target: https://github.com/davidhalter/parso/actions
+            :alt: GitHub Actions build status
         
         .. image:: 
https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master
             :target: 
https://coveralls.io/github/davidhalter/parso?branch=master
@@ -113,6 +113,11 @@
         Unreleased
         ++++++++++
         
+        0.8.3 (2021-11-30)
+        ++++++++++++++++++
+        
+        - Add basic support for Python 3.11 and 3.12
+        
         0.8.2 (2021-03-30)
         ++++++++++++++++++
         
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/parso.egg-info/SOURCES.txt 
new/parso-0.8.3/parso.egg-info/SOURCES.txt
--- old/parso-0.8.2/parso.egg-info/SOURCES.txt  2021-03-30 22:43:48.000000000 
+0200
+++ new/parso-0.8.3/parso.egg-info/SOURCES.txt  2021-11-30 22:05:45.000000000 
+0100
@@ -49,6 +49,8 @@
 parso/python/diff.py
 parso/python/errors.py
 parso/python/grammar310.txt
+parso/python/grammar311.txt
+parso/python/grammar312.txt
 parso/python/grammar36.txt
 parso/python/grammar37.txt
 parso/python/grammar38.txt
@@ -64,6 +66,7 @@
 test/fuzz_diff_parser.py
 test/test_cache.py
 test/test_diff_parser.py
+test/test_dump_tree.py
 test/test_error_recovery.py
 test/test_file_python_errors.py
 test/test_fstring.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_cache.py 
new/parso-0.8.3/test/test_cache.py
--- old/parso-0.8.2/test/test_cache.py  2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/test/test_cache.py  2021-11-30 22:04:12.000000000 +0100
@@ -137,7 +137,7 @@
     parse('somecode', cache=True, path=p)
     node_cache_item = next(iter(parser_cache.values()))[p]
     now = time.time()
-    assert node_cache_item.last_used < now
+    assert node_cache_item.last_used <= now
 
     if use_file_io:
         f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10)
@@ -185,6 +185,9 @@
     was_called = False
 
     monkeypatch.setattr(cache, '_save_to_file_system', save)
-    with pytest.warns(Warning):
-        parse(path=__file__, cache=True, diff_cache=True)
-    assert was_called
+    try:
+        with pytest.warns(Warning):
+            parse(path=__file__, cache=True, diff_cache=True)
+        assert was_called
+    finally:
+        parser_cache.clear()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_dump_tree.py 
new/parso-0.8.3/test/test_dump_tree.py
--- old/parso-0.8.2/test/test_dump_tree.py      1970-01-01 01:00:00.000000000 
+0100
+++ new/parso-0.8.3/test/test_dump_tree.py      2021-11-30 22:04:12.000000000 
+0100
@@ -0,0 +1,182 @@
+from textwrap import dedent
+
+import pytest
+
+from parso import parse
+# Using star import for easier eval testing below.
+from parso.python.tree import *  # noqa: F403
+from parso.tree import *  # noqa: F403
+from parso.tree import ErrorLeaf, TypedLeaf
+
+
+@pytest.mark.parametrize(
+    'indent,expected_dump', [
+        (None, "Module(["
+               "Lambda(["
+               "Keyword('lambda', (1, 0)), "
+               "Param(["
+               "Name('x', (1, 7), prefix=' '), "
+               "Operator(',', (1, 8)), "
+               "]), "
+               "Param(["
+               "Name('y', (1, 10), prefix=' '), "
+               "]), "
+               "Operator(':', (1, 11)), "
+               "PythonNode('arith_expr', ["
+               "Name('x', (1, 13), prefix=' '), "
+               "Operator('+', (1, 15), prefix=' '), "
+               "Name('y', (1, 17), prefix=' '), "
+               "]), "
+               "]), "
+               "EndMarker('', (1, 18)), "
+               "])"),
+        (0, dedent('''\
+            Module([
+            Lambda([
+            Keyword('lambda', (1, 0)),
+            Param([
+            Name('x', (1, 7), prefix=' '),
+            Operator(',', (1, 8)),
+            ]),
+            Param([
+            Name('y', (1, 10), prefix=' '),
+            ]),
+            Operator(':', (1, 11)),
+            PythonNode('arith_expr', [
+            Name('x', (1, 13), prefix=' '),
+            Operator('+', (1, 15), prefix=' '),
+            Name('y', (1, 17), prefix=' '),
+            ]),
+            ]),
+            EndMarker('', (1, 18)),
+            ])''')),
+        (4, dedent('''\
+            Module([
+                Lambda([
+                    Keyword('lambda', (1, 0)),
+                    Param([
+                        Name('x', (1, 7), prefix=' '),
+                        Operator(',', (1, 8)),
+                    ]),
+                    Param([
+                        Name('y', (1, 10), prefix=' '),
+                    ]),
+                    Operator(':', (1, 11)),
+                    PythonNode('arith_expr', [
+                        Name('x', (1, 13), prefix=' '),
+                        Operator('+', (1, 15), prefix=' '),
+                        Name('y', (1, 17), prefix=' '),
+                    ]),
+                ]),
+                EndMarker('', (1, 18)),
+            ])''')),
+        ('\t', dedent('''\
+            Module([
+            \tLambda([
+            \t\tKeyword('lambda', (1, 0)),
+            \t\tParam([
+            \t\t\tName('x', (1, 7), prefix=' '),
+            \t\t\tOperator(',', (1, 8)),
+            \t\t]),
+            \t\tParam([
+            \t\t\tName('y', (1, 10), prefix=' '),
+            \t\t]),
+            \t\tOperator(':', (1, 11)),
+            \t\tPythonNode('arith_expr', [
+            \t\t\tName('x', (1, 13), prefix=' '),
+            \t\t\tOperator('+', (1, 15), prefix=' '),
+            \t\t\tName('y', (1, 17), prefix=' '),
+            \t\t]),
+            \t]),
+            \tEndMarker('', (1, 18)),
+            ])''')),
+    ]
+)
+def test_dump_parser_tree(indent, expected_dump):
+    code = "lambda x, y: x + y"
+    module = parse(code)
+    assert module.dump(indent=indent) == expected_dump
+
+    # Check that dumped tree can be eval'd to recover the parser tree and 
original code.
+    recovered_code = eval(expected_dump).get_code()
+    assert recovered_code == code
+
+
+@pytest.mark.parametrize(
+    'node,expected_dump,expected_code', [
+        (  # Dump intermediate node (not top level module)
+            parse("def foo(x, y): return x + y").children[0], dedent('''\
+                Function([
+                    Keyword('def', (1, 0)),
+                    Name('foo', (1, 4), prefix=' '),
+                    PythonNode('parameters', [
+                        Operator('(', (1, 7)),
+                        Param([
+                            Name('x', (1, 8)),
+                            Operator(',', (1, 9)),
+                        ]),
+                        Param([
+                            Name('y', (1, 11), prefix=' '),
+                        ]),
+                        Operator(')', (1, 12)),
+                    ]),
+                    Operator(':', (1, 13)),
+                    ReturnStmt([
+                        Keyword('return', (1, 15), prefix=' '),
+                        PythonNode('arith_expr', [
+                            Name('x', (1, 22), prefix=' '),
+                            Operator('+', (1, 24), prefix=' '),
+                            Name('y', (1, 26), prefix=' '),
+                        ]),
+                    ]),
+                ])'''),
+            "def foo(x, y): return x + y",
+        ),
+        (  # Dump leaf
+            parse("def foo(x, y): return x + y").children[0].children[0],
+            "Keyword('def', (1, 0))",
+            'def',
+        ),
+        (  # Dump ErrorLeaf
+            ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' '),
+            "ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' ')",
+            ' error_code',
+        ),
+        (  # Dump TypedLeaf
+            TypedLeaf('type', 'value', (1, 1)),
+            "TypedLeaf('type', 'value', (1, 1))",
+            'value',
+        ),
+    ]
+)
+def test_dump_parser_tree_not_top_level_module(node, expected_dump, 
expected_code):
+    dump_result = node.dump()
+    assert dump_result == expected_dump
+
+    # Check that dumped tree can be eval'd to recover the parser tree and 
original code.
+    recovered_code = eval(dump_result).get_code()
+    assert recovered_code == expected_code
+
+
+def test_dump_parser_tree_invalid_args():
+    module = parse("lambda x, y: x + y")
+
+    with pytest.raises(TypeError):
+        module.dump(indent=1.1)
+
+
+def test_eval_dump_recovers_parent():
+    module = parse("lambda x, y: x + y")
+    module2 = eval(module.dump())
+    assert module2.parent is None
+    lambda_node = module2.children[0]
+    assert lambda_node.parent is module2
+    assert module2.children[1].parent is module2
+    assert lambda_node.children[0].parent is lambda_node
+    param_node = lambda_node.children[1]
+    assert param_node.parent is lambda_node
+    assert param_node.children[0].parent is param_node
+    assert param_node.children[1].parent is param_node
+    arith_expr_node = lambda_node.children[-1]
+    assert arith_expr_node.parent is lambda_node
+    assert arith_expr_node.children[0].parent is arith_expr_node
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_parser_tree.py 
new/parso-0.8.3/test/test_parser_tree.py
--- old/parso-0.8.2/test/test_parser_tree.py    2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/test/test_parser_tree.py    2021-11-30 22:04:12.000000000 
+0100
@@ -6,6 +6,7 @@
 
 from parso import parse
 from parso.python import tree
+from parso.tree import search_ancestor
 
 
 class TestsFunctionAndLambdaParsing:
@@ -239,3 +240,27 @@
         for name in with_stmt.get_defined_names(include_setitem=True)
     ]
     assert tests == ["A", "B", "C", "D"]
+
+
+sample_module = parse('x + y')
+sample_node = sample_module.children[0]
+sample_leaf = sample_node.children[0]
+
+
+@pytest.mark.parametrize(
+    'node,node_types,expected_ancestor', [
+        (sample_module, ('file_input',), None),
+        (sample_node, ('arith_expr',), None),
+        (sample_node, ('file_input', 'eval_input'), sample_module),
+        (sample_leaf, ('name',), None),
+        (sample_leaf, ('arith_expr',), sample_node),
+        (sample_leaf, ('file_input',), sample_module),
+        (sample_leaf, ('file_input', 'arith_expr'), sample_node),
+        (sample_leaf, ('shift_expr',), None),
+        (sample_leaf, ('name', 'shift_expr',), None),
+        (sample_leaf, (), None),
+    ]
+)
+def test_search_ancestor(node, node_types, expected_ancestor):
+    assert node.search_ancestor(*node_types) is expected_ancestor
+    assert search_ancestor(node, *node_types) is expected_ancestor  # 
deprecated
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_pep8.py 
new/parso-0.8.3/test/test_pep8.py
--- old/parso-0.8.2/test/test_pep8.py   2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/test/test_pep8.py   2021-11-30 22:04:12.000000000 +0100
@@ -15,6 +15,8 @@
         assert issue.code == 292
 
     assert not issues('asdf = 1\n')
+    assert not issues('asdf = 1\r\n')
+    assert not issues('asdf = 1\r')
     assert_issue('asdf = 1')
     assert_issue('asdf = 1\n# foo')
     assert_issue('# foobar')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_pgen2.py 
new/parso-0.8.3/test/test_pgen2.py
--- old/parso-0.8.2/test/test_pgen2.py  2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/test/test_pgen2.py  2021-11-30 22:04:12.000000000 +0100
@@ -339,7 +339,7 @@
 @pytest.mark.parametrize(
     'grammar, error_match', [
         ['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
-         r"foo is ambiguous.*given a PythonTokenTypes\.NAME.*bar or baz"],
+         r"foo is ambiguous.*given a (PythonTokenTypes\.)?NAME.*bar or baz"],
         ['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
          r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
         ['''foo: bar | 'x'\nbar: 'x'\n''',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_prefix.py 
new/parso-0.8.3/test/test_prefix.py
--- old/parso-0.8.2/test/test_prefix.py 2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/test/test_prefix.py 2021-11-30 22:04:12.000000000 +0100
@@ -19,6 +19,7 @@
     (' \f ', ['\f', ' ']),
     (' \f ', ['\f', ' ']),
     (' \r\n', ['\r\n', '']),
+    (' \r', ['\r', '']),
     ('\\\n', ['\\\n', '']),
     ('\\\r\n', ['\\\r\n', '']),
     ('\t\t\n\t', ['\n', '\t']),
@@ -34,7 +35,7 @@
         assert pt.value == expected
 
         # Calculate the estimated end_pos
-        if expected.endswith('\n'):
+        if expected.endswith('\n') or expected.endswith('\r'):
             end_pos = start_pos[0] + 1, 0
         else:
             end_pos = start_pos[0], start_pos[1] + len(expected) + 
len(pt.spacing)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_python_errors.py 
new/parso-0.8.3/test/test_python_errors.py
--- old/parso-0.8.2/test/test_python_errors.py  2021-03-30 22:43:19.000000000 
+0200
+++ new/parso-0.8.3/test/test_python_errors.py  2021-11-30 22:04:12.000000000 
+0100
@@ -57,10 +57,10 @@
         error, = errors
         actual = error.message
     assert actual in wanted
-    if sys.version_info[:2] < (3, 8):
+    if sys.version_info[:2] not in ((3, 8), (3, 9)):
         assert line_nr == error.start_pos[0]
     else:
-        assert line_nr == 0  # For whatever reason this is zero in Python 3.8+
+        assert line_nr == 0  # For whatever reason this is zero in Python 
3.8/3.9
 
 
 @pytest.mark.parametrize(
@@ -140,13 +140,16 @@
 
 
 def test_default_except_error_postition():
-    # For this error the position seemed to be one line off, but that doesn't
-    # really matter.
+    # For this error the position seemed to be one line off in Python < 3.10,
+    # but that doesn't really matter.
     code = 'try: pass\nexcept: pass\nexcept X: pass'
     wanted, line_nr = _get_actual_exception(code)
     error, = _get_error_list(code)
     assert error.message in wanted
-    assert line_nr != error.start_pos[0]
+    if sys.version_info[:2] >= (3, 10):
+        assert line_nr == error.start_pos[0]
+    else:
+        assert line_nr != error.start_pos[0]
     # I think this is the better position.
     assert error.start_pos[0] == 2
 
@@ -494,3 +497,14 @@
 )
 def test_valid_del(code):
     assert not _get_error_list(code)
+
+
+@pytest.mark.parametrize(
+    ('source', 'version', 'no_errors'), [
+        ('[x for x in range(10) if lambda: 1]', '3.8', True),
+        ('[x for x in range(10) if lambda: 1]', '3.9', False),
+        ('[x for x in range(10) if (lambda: 1)]', '3.9', True),
+    ]
+)
+def test_lambda_in_comp_if(source, version, no_errors):
+    assert bool(_get_error_list(source, version=version)) ^ no_errors
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/parso-0.8.2/test/test_utils.py 
new/parso-0.8.3/test/test_utils.py
--- old/parso-0.8.2/test/test_utils.py  2021-03-30 22:43:19.000000000 +0200
+++ new/parso-0.8.3/test/test_utils.py  2021-11-30 22:04:12.000000000 +0100
@@ -74,6 +74,10 @@
     ('code', 'errors'), [
         (b'# coding: wtf-12\nfoo', 'strict'),
         (b'# coding: wtf-12\nfoo', 'replace'),
+        (b'# coding: wtf-12\r\nfoo', 'strict'),
+        (b'# coding: wtf-12\r\nfoo', 'replace'),
+        (b'# coding: wtf-12\rfoo', 'strict'),
+        (b'# coding: wtf-12\rfoo', 'replace'),
     ]
 )
 def test_bytes_to_unicode_failing_encoding(code, errors):

Reply via email to