Author: Matti Picus <[email protected]>
Branch: unicode-utf8
Changeset: r94754:daeb185c5482
Date: 2018-06-10 22:26 -0700
http://bitbucket.org/pypy/pypy/changeset/daeb185c5482/
Log: merge default into branch
diff too long, truncating to 2000 out of 5932 lines
diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py
--- a/lib_pypy/grp.py
+++ b/lib_pypy/grp.py
@@ -4,6 +4,8 @@
from _pwdgrp_cffi import ffi, lib
import _structseq
+import thread
+_lock = thread.allocate_lock()
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -33,32 +35,35 @@
@builtinify
def getgrgid(gid):
- res = lib.getgrgid(gid)
- if not res:
- # XXX maybe check error eventually
- raise KeyError(gid)
- return _group_from_gstruct(res)
+ with _lock:
+ res = lib.getgrgid(gid)
+ if not res:
+ # XXX maybe check error eventually
+ raise KeyError(gid)
+ return _group_from_gstruct(res)
@builtinify
def getgrnam(name):
if not isinstance(name, basestring):
raise TypeError("expected string")
name = str(name)
- res = lib.getgrnam(name)
- if not res:
- raise KeyError("'getgrnam(): name not found: %s'" % name)
- return _group_from_gstruct(res)
+ with _lock:
+ res = lib.getgrnam(name)
+ if not res:
+ raise KeyError("'getgrnam(): name not found: %s'" % name)
+ return _group_from_gstruct(res)
@builtinify
def getgrall():
- lib.setgrent()
lst = []
- while 1:
- p = lib.getgrent()
- if not p:
- break
- lst.append(_group_from_gstruct(p))
- lib.endgrent()
+ with _lock:
+ lib.setgrent()
+ while 1:
+ p = lib.getgrent()
+ if not p:
+ break
+ lst.append(_group_from_gstruct(p))
+ lib.endgrent()
return lst
__all__ = ('struct_group', 'getgrgid', 'getgrnam', 'getgrall')
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -12,6 +12,8 @@
from _pwdgrp_cffi import ffi, lib
import _structseq
+import thread
+_lock = thread.allocate_lock()
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
@@ -55,10 +57,11 @@
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
- pw = lib.getpwuid(uid)
- if not pw:
- raise KeyError("getpwuid(): uid not found: %s" % uid)
- return _mkpwent(pw)
+ with _lock:
+ pw = lib.getpwuid(uid)
+ if not pw:
+ raise KeyError("getpwuid(): uid not found: %s" % uid)
+ return _mkpwent(pw)
@builtinify
def getpwnam(name):
@@ -71,10 +74,11 @@
if not isinstance(name, basestring):
raise TypeError("expected string")
name = str(name)
- pw = lib.getpwnam(name)
- if not pw:
- raise KeyError("getpwname(): name not found: %s" % name)
- return _mkpwent(pw)
+ with _lock:
+ pw = lib.getpwnam(name)
+ if not pw:
+ raise KeyError("getpwname(): name not found: %s" % name)
+ return _mkpwent(pw)
@builtinify
def getpwall():
@@ -84,13 +88,14 @@
See pwd.__doc__ for more on password database entries.
"""
users = []
- lib.setpwent()
- while True:
- pw = lib.getpwent()
- if not pw:
- break
- users.append(_mkpwent(pw))
- lib.endpwent()
+ with _lock:
+ lib.setpwent()
+ while True:
+ pw = lib.getpwent()
+ if not pw:
+ break
+ users.append(_mkpwent(pw))
+ lib.endpwent()
return users
__all__ = ('struct_passwd', 'getpwuid', 'getpwnam', 'getpwall')
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -20,7 +20,7 @@
OS and architecture. You may be able to use either use the
`most recent release`_ or one of our `development nightly build`_. These
builds depend on dynamically linked libraries that may not be available on your
-OS. See the section about `Linux binaries` for more info and alternatives that
+OS. See the section about `Linux binaries`_ for more info and alternatives that
may work on your system.
Please note that the nightly builds are not
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -7,9 +7,9 @@
.. branch: cppyy-packaging
-Upgrade to backend 0.6.0, support exception handling from wrapped functions,
-update enum handling, const correctness for data members and associated tests,
-support anonymous enums, support for function pointer arguments
+Upgrade to backend 1.1.0, improved handling of templated methods and
+functions (in particular automatic deduction of types), improved pythonization
+interface, and a range of compatibility fixes for Python3
.. branch: socket_default_timeout_blockingness
@@ -28,9 +28,11 @@
The reverse-debugger branch has been merged. For more information, see
https://bitbucket.org/pypy/revdb
+.. branch: pyparser-improvements-3
+
+Small refactorings in the Python parser.
+
.. branch: unicode-utf8-re
.. branch: utf8-io
Utf8 handling for unicode
-
-
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -29,29 +29,28 @@
``C:\Users\<user name>\AppData\Local\Programs\Common\Microsoft\Visual C++ for
Python``
or in
``C:\Program Files (x86)\Common Files\Microsoft\Visual C++ for Python``.
-A current version of ``setuptools`` will be able to find it there. For
-Windows 10, you must right-click the download, and under ``Properties`` ->
-``Compatibility`` mark it as ``Run run this program in comatibility mode for``
-``Previous version...``. Also, you must download and install the ``.Net
Framework 3.5``,
+A current version of ``setuptools`` will be able to find it there.
+Also, you must download and install the ``.Net Framework 3.5``,
otherwise ``mt.exe`` will silently fail. Installation will begin automatically
by running the mt.exe command by hand from a DOS window (that is how the author
discovered the problem).
.. _Microsoft Visual C++ Compiler for Python 2.7:
https://www.microsoft.com/EN-US/DOWNLOAD/DETAILS.ASPX?ID=44266
-Installing "Build Tools for Visual Studio 2017" (for Python 3)
+Installing "Build Tools for Visual Studio 2015" (for Python 3)
--------------------------------------------------------------
-As documented in the CPython Wiki_, CPython now recommends Visual C++ version
-14.0. A compact version of the compiler suite can be obtained from Microsoft_
-downloads, search the page for "Build Tools for Visual Studio 2017".
+As documented in the CPython Wiki_, CPython recommends Visual C++ version
+14.0 for python version 3.5. A compact version of the compiler suite can be
+obtained from Microsoft_ downloads, search the page for "Microsoft Build Tools
2015".
-You will also need to install the the `Windows SDK`_ in order to use the
-`mt.exe` mainfest compiler.
+You will need to reboot the computer for the installation to successfully
install and
+run the `mt.exe` mainfest compiler. The installation will set the
+`VS140COMNTOOLS` environment variable, this is key to distutils/setuptools
+finding the compiler
.. _Wiki: https://wiki.python.org/moin/WindowsCompilers
-.. _Microsoft: https://www.visualstudio.com/downloads
-.. _`Windows SDK`:
https://developer.microsoft.com/en-us/windows/downloads/windows-10-sdk
+.. _Microsoft: https://www.visualstudio.com/vs/older-downloads/
Translating PyPy with Visual Studio
-----------------------------------
@@ -99,6 +98,9 @@
Setting Up Visual Studio 9.0 for building SSL in Python3
--------------------------------------------------------
+**Note: this is old information, left for historical reference. We recommend
+using Visual Studio 2015, which now seems to properly set this all up.**
+
On Python3, the ``ssl`` module is based on ``cffi``, and requires a build step
after
translation. However ``distutils`` does not support the Micorosft-provided
Visual C
compiler, and ``cffi`` depends on ``distutils`` to find the compiler. The
@@ -146,14 +148,14 @@
Installing external packages
----------------------------
-We uses a `repository` parallel to pypy to hold binary compiled versions of the
+We uses a subrepository_ inside pypy to hold binary compiled versions of the
build dependencies for windows. As part of the `rpython` setup stage,
environment
variables will be set to use these dependencies. The repository has a README
file on how to replicate, and a branch for each supported platform. You may run
the `get_externals.py` utility to checkout the proper branch for your platform
and PyPy version.
-.. _repository: https://bitbucket.org/pypy/external
+.. _subrepository: https://bitbucket.org/pypy/external
Using the mingw compiler
------------------------
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py
b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -1096,7 +1096,7 @@
s = self.get_first_expr("'hi' ' implicitly' ' extra'")
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap("hi implicitly extra"))
- sentence = u"Die Männer ärgen sich!"
+ sentence = u"Die Männer ärgern sich!"
source = u"# coding: utf-7\nstuff = u'%s'" % (sentence,)
info = pyparse.CompileInfo("<test>", "exec")
tree = self.parser.parse_source(source.encode("utf-7"), info)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -27,7 +27,7 @@
generator._resolve_block_targets(blocks)
return generator, blocks
-class TestCompiler:
+class BaseTestCompiler:
"""These tests compile snippets of code and check them by
running them with our own interpreter. These are thus not
completely *unit* tests, but given that our interpreter is
@@ -74,6 +74,9 @@
def error_test(self, source, exc_type):
py.test.raises(exc_type, self.simple_test, source, None, None)
+
+class TestCompiler(BaseTestCompiler):
+
def test_issue_713(self):
func = "def f(_=2): return (_ if _ else _) if False else _"
yield self.st, func, "f()", 2
@@ -953,9 +956,11 @@
yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()",
'repr(x)', '(0.0, -0.0)')
+class TestCompilerRevDB(BaseTestCompiler):
+ spaceconfig = {"translation.reverse_debugger": True}
+
def test_revdb_metavar(self):
from pypy.interpreter.reverse_debugging import dbstate, setup_revdb
- self.space.config.translation.reverse_debugger = True
self.space.reverse_debugging = True
try:
setup_revdb(self.space)
diff --git a/pypy/interpreter/pyparser/future.py
b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -43,7 +43,7 @@
self.tok = self.tokens[index]
def skip(self, n):
- if self.tok[0] == n:
+ if self.tok.token_type == n:
self.next()
return True
else:
@@ -51,7 +51,7 @@
def skip_name(self, name):
from pypy.interpreter.pyparser import pygram
- if self.tok[0] == pygram.tokens.NAME and self.tok[1] == name:
+ if self.tok.token_type == pygram.tokens.NAME and self.tok.value ==
name:
self.next()
return True
else:
@@ -59,8 +59,8 @@
def next_feature_name(self):
from pypy.interpreter.pyparser import pygram
- if self.tok[0] == pygram.tokens.NAME:
- name = self.tok[1]
+ if self.tok.token_type == pygram.tokens.NAME:
+ name = self.tok.value
self.next()
if self.skip_name("as"):
self.skip(pygram.tokens.NAME)
@@ -101,7 +101,7 @@
# somewhere inside the last __future__ import statement
# (at the start would be fine too, but it's easier to grab a
# random position inside)
- last_position = (it.tok[2], it.tok[3])
+ last_position = (it.tok.lineno, it.tok.column)
result |= future_flags.get_compiler_feature(it.next_feature_name())
while it.skip(pygram.tokens.COMMA):
result |= future_flags.get_compiler_feature(it.next_feature_name())
diff --git a/pypy/interpreter/pyparser/parser.py
b/pypy/interpreter/pyparser/parser.py
--- a/pypy/interpreter/pyparser/parser.py
+++ b/pypy/interpreter/pyparser/parser.py
@@ -28,11 +28,24 @@
new.symbol_ids = self.symbol_ids
new.symbols_names = self.symbol_names
new.keyword_ids = self.keyword_ids
+ new.token_to_error_string = self.token_to_error_string
new.dfas = self.dfas
new.labels = self.labels
new.token_ids = self.token_ids
return new
+
+ def classify(self, token):
+ """Find the label for a token."""
+ if token.token_type == self.KEYWORD_TOKEN:
+ label_index = self.keyword_ids.get(token.value, -1)
+ if label_index != -1:
+ return label_index
+ label_index = self.token_ids.get(token.token_type, -1)
+ if label_index == -1:
+ raise ParseError("invalid token", token)
+ return label_index
+
def _freeze_(self):
# Remove some attributes not used in parsing.
try:
@@ -65,6 +78,33 @@
b[pos] |= bit
return str(b)
+
+class Token(object):
+ def __init__(self, token_type, value, lineno, column, line):
+ self.token_type = token_type
+ self.value = value
+ self.lineno = lineno
+ # 0-based offset
+ self.column = column
+ self.line = line
+
+ def __repr__(self):
+ return "Token(%s, %s)" % (self.token_type, self.value)
+
+ def __eq__(self, other):
+ # for tests
+ return (
+ self.token_type == other.token_type and
+ self.value == other.value and
+ self.lineno == other.lineno and
+ self.column == other.column and
+ self.line == other.line
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+
class Node(object):
__slots__ = ("type", )
@@ -105,6 +145,11 @@
self.lineno = lineno
self.column = column
+ @staticmethod
+ def fromtoken(token):
+ return Terminal(
+ token.token_type, token.value, token.lineno, token.column)
+
def __repr__(self):
return "Terminal(type=%s, value=%r)" % (self.type, self.value)
@@ -193,20 +238,14 @@
class ParseError(Exception):
- def __init__(self, msg, token_type, value, lineno, column, line,
- expected=-1, expected_str=None):
+ def __init__(self, msg, token, expected=-1, expected_str=None):
self.msg = msg
- self.token_type = token_type
- self.value = value
- self.lineno = lineno
- # this is a 0-based index
- self.column = column
- self.line = line
+ self.token = token
self.expected = expected
self.expected_str = expected_str
def __str__(self):
- return "ParserError(%s, %r)" % (self.token_type, self.value)
+ return "ParserError(%s)" % (self.token, )
class StackEntry(object):
@@ -249,8 +288,8 @@
self.root = None
self.stack = StackEntry(None, self.grammar.dfas[start - 256], 0)
- def add_token(self, token_type, value, lineno, column, line):
- label_index = self.classify(token_type, value, lineno, column, line)
+ def add_token(self, token):
+ label_index = self.grammar.classify(token)
sym_id = 0 # for the annotator
while True:
dfa = self.stack.dfa
@@ -261,7 +300,7 @@
sym_id = self.grammar.labels[i]
if label_index == i:
# We matched a non-terminal.
- self.shift(next_state, token_type, value, lineno, column)
+ self.shift(next_state, token)
state = states[next_state]
# While the only possible action is to accept, pop nodes
off
# the stack.
@@ -278,8 +317,7 @@
sub_node_dfa = self.grammar.dfas[sym_id - 256]
# Check if this token can start a child node.
if sub_node_dfa.could_match_token(label_index):
- self.push(sub_node_dfa, next_state, sym_id, lineno,
- column)
+ self.push(sub_node_dfa, next_state, sym_id)
break
else:
# We failed to find any arcs to another state, so unless this
@@ -287,8 +325,7 @@
if is_accepting:
self.pop()
if self.stack is None:
- raise ParseError("too much input", token_type, value,
- lineno, column, line)
+ raise ParseError("too much input", token)
else:
# If only one possible input would satisfy, attach it to
the
# error.
@@ -299,28 +336,16 @@
else:
expected = -1
expected_str = None
- raise ParseError("bad input", token_type, value, lineno,
- column, line, expected, expected_str)
+ raise ParseError("bad input", token, expected,
expected_str)
- def classify(self, token_type, value, lineno, column, line):
- """Find the label for a token."""
- if token_type == self.grammar.KEYWORD_TOKEN:
- label_index = self.grammar.keyword_ids.get(value, -1)
- if label_index != -1:
- return label_index
- label_index = self.grammar.token_ids.get(token_type, -1)
- if label_index == -1:
- raise ParseError("invalid token", token_type, value, lineno,
column,
- line)
- return label_index
- def shift(self, next_state, token_type, value, lineno, column):
+ def shift(self, next_state, token):
"""Shift a non-terminal and prepare for the next state."""
- new_node = Terminal(token_type, value, lineno, column)
+ new_node = Terminal.fromtoken(token)
self.stack.node_append_child(new_node)
self.stack.state = next_state
- def push(self, next_dfa, next_state, node_type, lineno, column):
+ def push(self, next_dfa, next_state, node_type):
"""Push a terminal and adjust the current state."""
self.stack.state = next_state
self.stack = self.stack.push(next_dfa, 0)
diff --git a/pypy/interpreter/pyparser/pygram.py
b/pypy/interpreter/pyparser/pygram.py
--- a/pypy/interpreter/pyparser/pygram.py
+++ b/pypy/interpreter/pyparser/pygram.py
@@ -23,6 +23,17 @@
python_grammar_no_print.keyword_ids =
python_grammar_no_print.keyword_ids.copy()
del python_grammar_no_print.keyword_ids["print"]
+python_grammar_revdb = python_grammar.shared_copy()
+python_grammar_no_print_revdb = python_grammar_no_print.shared_copy()
+copied_token_ids = python_grammar.token_ids.copy()
+python_grammar_revdb.token_ids = copied_token_ids
+python_grammar_no_print_revdb.token_ids = copied_token_ids
+
+metavar_token_id = pytoken.python_tokens['REVDBMETAVAR']
+# the following line affects python_grammar_no_print too, since they share the
+# dict
+del python_grammar.token_ids[metavar_token_id]
+
class _Tokens(object):
pass
for tok_name, idx in pytoken.python_tokens.iteritems():
@@ -39,3 +50,16 @@
syms._rev_lookup = rev_lookup # for debugging
del _get_python_grammar, _Tokens, tok_name, sym_name, idx
+
+def choose_grammar(print_function, revdb):
+ if print_function:
+ if revdb:
+ return python_grammar_no_print_revdb
+ else:
+ return python_grammar_no_print
+ else:
+ if revdb:
+ return python_grammar_revdb
+ else:
+ return python_grammar
+
diff --git a/pypy/interpreter/pyparser/pyparse.py
b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -147,38 +147,37 @@
flags &= ~consts.PyCF_DONT_IMPLY_DEDENT
self.prepare(_targets[compile_info.mode])
- tp = 0
try:
try:
# Note: we no longer pass the CO_FUTURE_* to the tokenizer,
# which is expected to work independently of them. It's
# certainly the case for all futures in Python <= 2.7.
tokens = pytokenizer.generate_tokens(source_lines, flags)
-
- newflags, last_future_import = (
- future.add_future_flags(self.future_flags, tokens))
- compile_info.last_future_import = last_future_import
- compile_info.flags |= newflags
-
- if compile_info.flags & consts.CO_FUTURE_PRINT_FUNCTION:
- self.grammar = pygram.python_grammar_no_print
- else:
- self.grammar = pygram.python_grammar
-
- for tp, value, lineno, column, line in tokens:
- if self.add_token(tp, value, lineno, column, line):
- break
except error.TokenError as e:
e.filename = compile_info.filename
raise
except error.TokenIndentationError as e:
e.filename = compile_info.filename
raise
+
+ newflags, last_future_import = (
+ future.add_future_flags(self.future_flags, tokens))
+ compile_info.last_future_import = last_future_import
+ compile_info.flags |= newflags
+
+ self.grammar = pygram.choose_grammar(
+ print_function=compile_info.flags &
consts.CO_FUTURE_PRINT_FUNCTION,
+ revdb=self.space.config.translation.reverse_debugger)
+
+ try:
+ for token in tokens:
+ if self.add_token(token):
+ break
except parser.ParseError as e:
# Catch parse errors, pretty them up and reraise them as a
# SyntaxError.
new_err = error.IndentationError
- if tp == pygram.tokens.INDENT:
+ if token.token_type == pygram.tokens.INDENT:
msg = "unexpected indent"
elif e.expected == pygram.tokens.INDENT:
msg = "expected an indented block"
@@ -190,7 +189,7 @@
# parser.ParseError(...).column is 0-based, but the offsets in
the
# exceptions in the error module are 1-based, hence the '+ 1'
- raise new_err(msg, e.lineno, e.column + 1, e.line,
+ raise new_err(msg, e.token.lineno, e.token.column + 1,
e.token.line,
compile_info.filename)
else:
tree = self.root
diff --git a/pypy/interpreter/pyparser/pytokenize.py
b/pypy/interpreter/pyparser/pytokenize.py
--- a/pypy/interpreter/pyparser/pytokenize.py
+++ b/pypy/interpreter/pyparser/pytokenize.py
@@ -1,9 +1,6 @@
# ______________________________________________________________________
"""Module pytokenize
-THIS FILE WAS COPIED FROM pypy/module/parser/pytokenize.py AND ADAPTED
-TO BE ANNOTABLE (Mainly made lists homogeneous)
-
This is a modified version of Ka-Ping Yee's tokenize module found in the
Python standard library.
@@ -12,7 +9,6 @@
expressions have been replaced with hand built DFA's using the
basil.util.automata module.
-$Id: pytokenize.py,v 1.3 2003/10/03 16:31:53 jriehl Exp $
"""
# ______________________________________________________________________
@@ -65,22 +61,3 @@
single_quoted[t] = t
tabsize = 8
-
-# PYPY MODIFICATION: removed TokenError class as it's not needed here
-
-# PYPY MODIFICATION: removed StopTokenizing class as it's not needed here
-
-# PYPY MODIFICATION: removed printtoken() as it's not needed here
-
-# PYPY MODIFICATION: removed tokenize() as it's not needed here
-
-# PYPY MODIFICATION: removed tokenize_loop() as it's not needed here
-
-# PYPY MODIFICATION: removed generate_tokens() as it was copied / modified
-# in pythonlexer.py
-
-# PYPY MODIFICATION: removed main() as it's not needed here
-
-# ______________________________________________________________________
-# End of pytokenize.py
-
diff --git a/pypy/interpreter/pyparser/pytokenizer.py
b/pypy/interpreter/pyparser/pytokenizer.py
--- a/pypy/interpreter/pyparser/pytokenizer.py
+++ b/pypy/interpreter/pyparser/pytokenizer.py
@@ -1,4 +1,5 @@
from pypy.interpreter.pyparser import automata
+from pypy.interpreter.pyparser.parser import Token
from pypy.interpreter.pyparser.pygram import tokens
from pypy.interpreter.pyparser.pytoken import python_opmap
from pypy.interpreter.pyparser.error import TokenError, TokenIndentationError
@@ -103,7 +104,7 @@
endmatch = endDFA.recognize(line)
if endmatch >= 0:
pos = end = endmatch
- tok = (tokens.STRING, contstr + line[:end], strstart[0],
+ tok = Token(tokens.STRING, contstr + line[:end], strstart[0],
strstart[1], line)
token_list.append(tok)
last_comment = ''
@@ -111,7 +112,7 @@
contline = None
elif (needcont and not line.endswith('\\\n') and
not line.endswith('\\\r\n')):
- tok = (tokens.ERRORTOKEN, contstr + line, strstart[0],
+ tok = Token(tokens.ERRORTOKEN, contstr + line, strstart[0],
strstart[1], line)
token_list.append(tok)
last_comment = ''
@@ -140,11 +141,11 @@
if column > indents[-1]: # count indents or dedents
indents.append(column)
- token_list.append((tokens.INDENT, line[:pos], lnum, 0, line))
+ token_list.append(Token(tokens.INDENT, line[:pos], lnum, 0,
line))
last_comment = ''
while column < indents[-1]:
indents.pop()
- token_list.append((tokens.DEDENT, '', lnum, pos, line))
+ token_list.append(Token(tokens.DEDENT, '', lnum, pos, line))
last_comment = ''
if column != indents[-1]:
err = "unindent does not match any outer indentation level"
@@ -177,11 +178,11 @@
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
- token_list.append((tokens.NUMBER, token, lnum, start,
line))
+ token_list.append(Token(tokens.NUMBER, token, lnum, start,
line))
last_comment = ''
elif initial in '\r\n':
if not parenstack:
- tok = (tokens.NEWLINE, last_comment, lnum, start, line)
+ tok = Token(tokens.NEWLINE, last_comment, lnum, start,
line)
token_list.append(tok)
last_comment = ''
elif initial == '#':
@@ -193,7 +194,7 @@
if endmatch >= 0: # all on one line
pos = endmatch
token = line[start:pos]
- tok = (tokens.STRING, token, lnum, start, line)
+ tok = Token(tokens.STRING, token, lnum, start, line)
token_list.append(tok)
last_comment = ''
else:
@@ -212,16 +213,16 @@
contline = line
break
else: # ordinary string
- tok = (tokens.STRING, token, lnum, start, line)
+ tok = Token(tokens.STRING, token, lnum, start, line)
token_list.append(tok)
last_comment = ''
elif initial in namechars: # ordinary name
- token_list.append((tokens.NAME, token, lnum, start, line))
+ token_list.append(Token(tokens.NAME, token, lnum, start,
line))
last_comment = ''
elif initial == '\\': # continued stmt
continued = 1
elif initial == '$':
- token_list.append((tokens.REVDBMETAVAR, token,
+ token_list.append(Token(tokens.REVDBMETAVAR, token,
lnum, start, line))
last_comment = ''
else:
@@ -246,7 +247,7 @@
punct = python_opmap[token]
else:
punct = tokens.OP
- token_list.append((punct, token, lnum, start, line))
+ token_list.append(Token(punct, token, lnum, start, line))
last_comment = ''
else:
start = whiteSpaceDFA.recognize(line, pos)
@@ -255,22 +256,22 @@
if start<max and line[start] in single_quoted:
raise TokenError("end of line (EOL) while scanning string
literal",
line, lnum, start+1, token_list)
- tok = (tokens.ERRORTOKEN, line[pos], lnum, pos, line)
+ tok = Token(tokens.ERRORTOKEN, line[pos], lnum, pos, line)
token_list.append(tok)
last_comment = ''
pos = pos + 1
lnum -= 1
if not (flags & consts.PyCF_DONT_IMPLY_DEDENT):
- if token_list and token_list[-1][0] != tokens.NEWLINE:
- tok = (tokens.NEWLINE, '', lnum, 0, '\n')
+ if token_list and token_list[-1].token_type != tokens.NEWLINE:
+ tok = Token(tokens.NEWLINE, '', lnum, 0, '\n')
token_list.append(tok)
for indent in indents[1:]: # pop remaining indent levels
- token_list.append((tokens.DEDENT, '', lnum, pos, line))
- tok = (tokens.NEWLINE, '', lnum, 0, '\n')
+ token_list.append(Token(tokens.DEDENT, '', lnum, pos, line))
+ tok = Token(tokens.NEWLINE, '', lnum, 0, '\n')
token_list.append(tok)
- token_list.append((tokens.ENDMARKER, '', lnum, pos, line))
+ token_list.append(Token(tokens.ENDMARKER, '', lnum, pos, line))
return token_list
diff --git a/pypy/interpreter/pyparser/test/test_automata.py
b/pypy/interpreter/pyparser/test/test_automata.py
--- a/pypy/interpreter/pyparser/test/test_automata.py
+++ b/pypy/interpreter/pyparser/test/test_automata.py
@@ -1,4 +1,4 @@
-from pypy.interpreter.pyparser.automata import DFA, DEFAULT
+from pypy.interpreter.pyparser.automata import DFA, NonGreedyDFA, DEFAULT
def test_states():
d = DFA([{"\x00": 1}, {"\x01": 0}], [False, True])
@@ -10,3 +10,20 @@
assert d.states == "\x01\x00"
assert d.defaults == "\xff\x00"
assert d.max_char == 1
+
+def test_recognize():
+ d = DFA([{"a": 1}, {"b": 0}], [False, True])
+ assert d.recognize("ababab") == 5
+ assert d.recognize("c") == -1
+
+ d = DFA([{"a": 1}, {DEFAULT: 0}], [False, True])
+ assert d.recognize("a,a?ab") == 5
+ assert d.recognize("c") == -1
+
+ d = NonGreedyDFA([{"a": 1}, {"b": 0}], [False, True])
+ assert d.recognize("ababab") == 1
+ assert d.recognize("c") == -1
+
+ d = NonGreedyDFA([{"a": 1}, {DEFAULT: 0}], [False, True])
+ assert d.recognize("a,a?ab") == 1
+ assert d.recognize("c") == -1
diff --git a/pypy/interpreter/pyparser/test/test_parser.py
b/pypy/interpreter/pyparser/test/test_parser.py
--- a/pypy/interpreter/pyparser/test/test_parser.py
+++ b/pypy/interpreter/pyparser/test/test_parser.py
@@ -20,7 +20,7 @@
rl = StringIO.StringIO(input + "\n").readline
gen = tokenize.generate_tokens(rl)
for tp, value, begin, end, line in gen:
- if self.add_token(tp, value, begin[0], begin[1], line):
+ if self.add_token(parser.Token(tp, value, begin[0], begin[1],
line)):
py.test.raises(StopIteration, gen.next)
return self.root
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py
b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -38,7 +38,7 @@
""", info=info)
assert tree.type == syms.file_input
assert info.encoding == "iso-8859-1"
- sentence = u"u'Die Männer ärgen sich!'"
+ sentence = u"u'Die Männer ärgern sich!'"
input = (u"# coding: utf-7\nstuff = %s" % (sentence,)).encode("utf-7")
tree = self.parse(input, info=info)
assert info.encoding == "utf-7"
@@ -168,13 +168,11 @@
assert expected_tree == tree
def test_revdb_dollar_num(self):
- self.parse('$0')
- self.parse('$5')
- self.parse('$42')
- self.parse('2+$42.attrname')
- py.test.raises(SyntaxError, self.parse, '$')
- py.test.raises(SyntaxError, self.parse, '$a')
- py.test.raises(SyntaxError, self.parse, '$.5')
+ assert not self.space.config.translation.reverse_debugger
+ py.test.raises(SyntaxError, self.parse, '$0')
+ py.test.raises(SyntaxError, self.parse, '$0 + 5')
+ py.test.raises(SyntaxError, self.parse,
+ "from __future__ import print_function\nx = ($0, print)")
def test_error_forgotten_chars(self):
info = py.test.raises(SyntaxError, self.parse, "if 1\n print 4")
@@ -183,3 +181,18 @@
assert "(expected ':')" in info.value.msg
info = py.test.raises(SyntaxError, self.parse, "def f:\n print 1")
assert "(expected '(')" in info.value.msg
+
+class TestPythonParserRevDB(TestPythonParser):
+ spaceconfig = {"translation.reverse_debugger": True}
+
+ def test_revdb_dollar_num(self):
+ self.parse('$0')
+ self.parse('$5')
+ self.parse('$42')
+ self.parse('2+$42.attrname')
+ self.parse("from __future__ import print_function\nx = ($0, print)")
+ py.test.raises(SyntaxError, self.parse, '$')
+ py.test.raises(SyntaxError, self.parse, '$a')
+ py.test.raises(SyntaxError, self.parse, '$.5')
+
+
diff --git a/pypy/interpreter/pyparser/test/test_pytokenizer.py
b/pypy/interpreter/pyparser/test/test_pytokenizer.py
--- a/pypy/interpreter/pyparser/test/test_pytokenizer.py
+++ b/pypy/interpreter/pyparser/test/test_pytokenizer.py
@@ -1,5 +1,6 @@
import pytest
from pypy.interpreter.pyparser import pytokenizer
+from pypy.interpreter.pyparser.parser import Token
from pypy.interpreter.pyparser.pygram import tokens
from pypy.interpreter.pyparser.error import TokenError
@@ -22,12 +23,12 @@
line = "a+1"
tks = tokenize(line)
assert tks == [
- (tokens.NAME, 'a', 1, 0, line),
- (tokens.PLUS, '+', 1, 1, line),
- (tokens.NUMBER, '1', 1, 2, line),
- (tokens.NEWLINE, '', 2, 0, '\n'),
- (tokens.NEWLINE, '', 2, 0, '\n'),
- (tokens.ENDMARKER, '', 2, 0, ''),
+ Token(tokens.NAME, 'a', 1, 0, line),
+ Token(tokens.PLUS, '+', 1, 1, line),
+ Token(tokens.NUMBER, '1', 1, 2, line),
+ Token(tokens.NEWLINE, '', 2, 0, '\n'),
+ Token(tokens.NEWLINE, '', 2, 0, '\n'),
+ Token(tokens.ENDMARKER, '', 2, 0, ''),
]
def test_error_parenthesis(self):
diff --git a/pypy/interpreter/test/test_reverse_debugging.py
b/pypy/interpreter/test/test_reverse_debugging.py
--- a/pypy/interpreter/test/test_reverse_debugging.py
+++ b/pypy/interpreter/test/test_reverse_debugging.py
@@ -86,6 +86,9 @@
if msg[0] == revdb.ANSWER_TEXT:
assert got_output is None
got_output = msg[-1]
+ assert msg[1] in (0, 1)
+ if msg[1]:
+ got_output += "\n"
elif msg[0] == revdb.ANSWER_CHBKPT:
assert got_chbkpt is None
assert msg[1] == 5
diff --git a/pypy/module/_cppyy/__init__.py b/pypy/module/_cppyy/__init__.py
--- a/pypy/module/_cppyy/__init__.py
+++ b/pypy/module/_cppyy/__init__.py
@@ -1,7 +1,7 @@
from pypy.interpreter.mixedmodule import MixedModule
class Module(MixedModule):
- "This module brigdes the cppyy frontend with its backend, through PyPy.\n\
+ "This module bridges the cppyy frontend with its backend, through PyPy.\n\
See http://cppyy.readthedocs.io/en/latest for full details."
interpleveldefs = {
@@ -14,17 +14,19 @@
'_set_function_generator': 'interp_cppyy.set_function_generator',
'_register_class' : 'interp_cppyy.register_class',
'_get_nullptr' : 'interp_cppyy.get_nullptr',
- 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance',
+ 'CPPInstance' : 'interp_cppyy.W_CPPInstance',
'addressof' : 'interp_cppyy.addressof',
'_bind_object' : 'interp_cppyy._bind_object',
'bind_object' : 'interp_cppyy.bind_object',
'move' : 'interp_cppyy.move',
+ '_pin_type' : 'interp_cppyy._pin_type',
}
appleveldefs = {
'_post_import_startup' : 'pythonify._post_import_startup',
+ 'Template' : 'pythonify.CPPTemplate',
'add_pythonization' : 'pythonify.add_pythonization',
- 'Template' : 'pythonify.CPPTemplate',
+ 'remove_pythonization' : 'pythonify.remove_pythonization',
}
def __init__(self, space, *args):
diff --git a/pypy/module/_cppyy/capi/__init__.py
b/pypy/module/_cppyy/capi/__init__.py
--- a/pypy/module/_cppyy/capi/__init__.py
+++ b/pypy/module/_cppyy/capi/__init__.py
@@ -11,6 +11,3 @@
assert lltype.typeOf(ptr) == C_OBJECT
address = rffi.cast(rffi.CCHARP, ptr)
return rffi.cast(C_OBJECT, lltype.direct_ptradd(address, offset))
-
-def exchange_address(ptr, cif_descr, index):
- return rffi.ptradd(ptr, cif_descr.exchange_args[index])
diff --git a/pypy/module/_cppyy/capi/loadable_capi.py
b/pypy/module/_cppyy/capi/loadable_capi.py
--- a/pypy/module/_cppyy/capi/loadable_capi.py
+++ b/pypy/module/_cppyy/capi/loadable_capi.py
@@ -69,7 +69,8 @@
space = self.space
cif_descr = self.cif_descr
size = cif_descr.exchange_size
- raw_string = rffi.cast(rffi.CCHARP, 0) # only ever have one in the
CAPI
+ raw_string1 = rffi.cast(rffi.CCHARP, 0)
+ raw_string2 = rffi.cast(rffi.CCHARP, 0) # have max two in any CAPI
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
for i in range(len(args)):
@@ -88,14 +89,18 @@
assert obj._voidp != rffi.cast(rffi.VOIDP, 0)
data = rffi.cast(rffi.VOIDPP, data)
data[0] = obj._voidp
- else: # only other use is sring
+ else: # only other use is string
assert obj.tc == 's'
n = len(obj._string)
- assert raw_string == rffi.cast(rffi.CCHARP, 0)
- # XXX could use rffi.get_nonmovingbuffer_final_null()
- raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
- data[0] = raw_string
+ if raw_string1 == rffi.cast(rffi.CCHARP, 0):
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
+ raw_string1 = rffi.str2charp(obj._string)
+ data[0] = raw_string1
+ else:
+ assert raw_string2 == rffi.cast(rffi.CCHARP, 0)
+ raw_string2 = rffi.str2charp(obj._string)
+ data[0] = raw_string2
jit_libffi.jit_ffi_call(cif_descr,
rffi.cast(rffi.VOIDP, funcaddr),
@@ -106,8 +111,10 @@
# immediate unwrapping, the round-trip is removed
w_res = self.ctitem.copy_and_convert_to_object(resultdata)
finally:
- if raw_string != rffi.cast(rffi.CCHARP, 0):
- rffi.free_charp(raw_string)
+ if raw_string1 != rffi.cast(rffi.CCHARP, 0):
+ rffi.free_charp(raw_string1)
+ if raw_string2 != rffi.cast(rffi.CCHARP, 0):
+ rffi.free_charp(raw_string2)
lltype.free(buffer, flavor='raw')
return w_res
@@ -183,8 +190,7 @@
'constructor' : ([c_method, c_object, c_int, c_voidp],
c_object),
'call_o' : ([c_method, c_object, c_int, c_voidp, c_type],
c_object),
- 'function_address_from_index' : ([c_scope, c_index],
c_voidp), # TODO: verify
- 'function_address_from_method' : ([c_method],
c_voidp), # id.
+ 'function_address' : ([c_method],
c_voidp), # TODO: verify
# handling of function argument buffer
'allocate_function_args' : ([c_int], c_voidp),
@@ -207,6 +213,8 @@
'num_bases' : ([c_type], c_int),
'base_name' : ([c_type, c_int],
c_ccharp),
'is_subtype' : ([c_type, c_type], c_int),
+ 'smartptr_info' : ([c_ccharp, c_voidp, c_voidp],
c_int),
+ 'add_smartptr_type' : ([c_ccharp], c_void),
'base_offset' : ([c_type, c_type, c_object, c_int],
c_ptrdiff_t),
@@ -214,30 +222,31 @@
'num_methods' : ([c_scope], c_int),
'method_indices_from_name' : ([c_scope, c_ccharp],
c_index_array),
- 'method_name' : ([c_scope, c_index],
c_ccharp),
- 'method_mangled_name' : ([c_scope, c_index],
c_ccharp),
- 'method_result_type' : ([c_scope, c_index],
c_ccharp),
- 'method_num_args' : ([c_scope, c_index], c_int),
- 'method_req_args' : ([c_scope, c_index], c_int),
- 'method_arg_type' : ([c_scope, c_index, c_int],
c_ccharp),
- 'method_arg_default' : ([c_scope, c_index, c_int],
c_ccharp),
- 'method_signature' : ([c_scope, c_index, c_int],
c_ccharp),
- 'method_prototype' : ([c_scope, c_index, c_int],
c_ccharp),
+ 'get_method' : ([c_scope, c_index],
c_method),
+
+ 'method_name' : ([c_method],
c_ccharp),
+ 'method_full_name' : ([c_method],
c_ccharp),
+ 'method_mangled_name' : ([c_method],
c_ccharp),
+ 'method_result_type' : ([c_method],
c_ccharp),
+ 'method_num_args' : ([c_method], c_int),
+ 'method_req_args' : ([c_method], c_int),
+ 'method_arg_type' : ([c_method, c_int],
c_ccharp),
+ 'method_arg_default' : ([c_method, c_int],
c_ccharp),
+ 'method_signature' : ([c_method, c_int],
c_ccharp),
+ 'method_prototype' : ([c_scope, c_method, c_int],
c_ccharp),
'is_const_method' : ([c_method], c_int),
'exists_method_template' : ([c_scope, c_ccharp], c_int),
'method_is_template' : ([c_scope, c_index], c_int),
- 'method_num_template_args' : ([c_scope, c_index], c_int),
- 'method_template_arg_name' : ([c_scope, c_index, c_index],
c_ccharp),
+ 'get_method_template' : ([c_scope, c_ccharp, c_ccharp],
c_method),
- 'get_method' : ([c_scope, c_index],
c_method),
'get_global_operator' : ([c_scope, c_scope, c_scope,
c_ccharp], c_index),
# method properties
- 'is_public_method' : ([c_type, c_index], c_int),
- 'is_constructor' : ([c_type, c_index], c_int),
- 'is_destructor' : ([c_type, c_index], c_int),
- 'is_staticmethod' : ([c_type, c_index], c_int),
+ 'is_public_method' : ([c_method], c_int),
+ 'is_constructor' : ([c_method], c_int),
+ 'is_destructor' : ([c_method], c_int),
+ 'is_staticmethod' : ([c_method], c_int),
# data member reflection information
'num_datamembers' : ([c_scope], c_int),
@@ -415,13 +424,9 @@
args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs),
_ArgH(cppclass.handle)]
return _cdata_to_cobject(space, call_capi(space, 'call_o', args))
-def c_function_address_from_index(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
+def c_function_address(space, cppmethod):
return rffi.cast(C_FUNC_PTR,
- _cdata_to_ptr(space, call_capi(space, 'function_address_from_index',
args)))
-def c_function_address_from_method(space, cppmethod):
- return rffi.cast(C_FUNC_PTR,
- _cdata_to_ptr(space, call_capi(space, 'function_address_from_method',
[_ArgH(cppmethod)])))
+ _cdata_to_ptr(space, call_capi(space, 'function_address',
[_ArgH(cppmethod)])))
# handling of function argument buffer ---------------------------------------
def c_allocate_function_args(space, size):
@@ -479,6 +484,21 @@
if derived == base:
return bool(1)
return space.bool_w(call_capi(space, 'is_subtype', [_ArgH(derived.handle),
_ArgH(base.handle)]))
+def c_smartptr_info(space, name):
+ out_raw = lltype.malloc(rffi.ULONGP.TO, 1, flavor='raw', zero=True)
+ out_deref = lltype.malloc(rffi.ULONGP.TO, 1, flavor='raw', zero=True)
+ try:
+ args = [_ArgS(name),
+ _ArgP(rffi.cast(rffi.VOIDP, out_raw)), _ArgP(rffi.cast(rffi.VOIDP,
out_deref))]
+ result = space.bool_w(call_capi(space, 'smartptr_info', args))
+ raw = rffi.cast(C_TYPE, out_raw[0])
+ deref = rffi.cast(C_METHOD, out_deref[0])
+ finally:
+ lltype.free(out_deref, flavor='raw')
+ lltype.free(out_raw, flavor='raw')
+ return (result, raw, deref)
+def c_add_smartptr_type(space, name):
+ return space.bool_w(call_capi(space, 'add_smartptr_type', [_ArgS(name)]))
def _c_base_offset(space, derived_h, base_h, address, direction):
args = [_ArgH(derived_h), _ArgH(base_h), _ArgH(address), _ArgL(direction)]
@@ -510,30 +530,36 @@
c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below
return py_indices
-def c_method_name(space, cppscope, index):
+def c_get_method(space, cppscope, index):
args = [_ArgH(cppscope.handle), _ArgL(index)]
- return charp2str_free(space, call_capi(space, 'method_name', args))
-def c_method_result_type(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
- return charp2str_free(space, call_capi(space, 'method_result_type', args))
-def c_method_num_args(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
- return space.int_w(call_capi(space, 'method_num_args', args))
-def c_method_req_args(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
- return space.int_w(call_capi(space, 'method_req_args', args))
-def c_method_arg_type(space, cppscope, index, arg_index):
- args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)]
+ return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method',
args)))
+
+def c_method_name(space, cppmeth):
+ return charp2str_free(space, call_capi(space, 'method_name',
[_ArgH(cppmeth)]))
+def c_method_full_name(space, cppmeth):
+ return charp2str_free(space, call_capi(space, 'method_full_name',
[_ArgH(cppmeth)]))
+def c_method_mangled_name(space, cppmeth):
+ return charp2str_free(space, call_capi(space, 'method_mangled_name',
[_ArgH(cppmeth)]))
+def c_method_result_type(space, cppmeth):
+ return charp2str_free(space, call_capi(space, 'method_result_type',
[_ArgH(cppmeth)]))
+def c_method_num_args(space, cppmeth):
+ return space.int_w(call_capi(space, 'method_num_args', [_ArgH(cppmeth)]))
+def c_method_req_args(space, cppmeth):
+ return space.int_w(call_capi(space, 'method_req_args', [_ArgH(cppmeth)]))
+def c_method_arg_type(space, cppmeth, arg_index):
+ args = [_ArgH(cppmeth), _ArgL(arg_index)]
return charp2str_free(space, call_capi(space, 'method_arg_type', args))
-def c_method_arg_default(space, cppscope, index, arg_index):
- args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)]
+def c_method_arg_default(space, cppmeth, arg_index):
+ args = [_ArgH(cppmeth), _ArgL(arg_index)]
return charp2str_free(space, call_capi(space, 'method_arg_default', args))
-def c_method_signature(space, cppscope, index, show_formalargs=True):
- args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(show_formalargs)]
+def c_method_signature(space, cppmeth, show_formalargs=True):
+ args = [_ArgH(cppmeth), _ArgL(show_formalargs)]
return charp2str_free(space, call_capi(space, 'method_signature', args))
-def c_method_prototype(space, cppscope, index, show_formalargs=True):
- args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(show_formalargs)]
+def c_method_prototype(space, cppscope, cppmeth, show_formalargs=True):
+ args = [_ArgH(cppscope.handle), _ArgH(cppmeth), _ArgL(show_formalargs)]
return charp2str_free(space, call_capi(space, 'method_prototype', args))
+def c_is_const_method(space, cppmeth):
+ return space.bool_w(call_capi(space, 'is_const_method', [_ArgH(cppmeth)]))
def c_exists_method_template(space, cppscope, name):
args = [_ArgH(cppscope.handle), _ArgS(name)]
@@ -541,21 +567,10 @@
def c_method_is_template(space, cppscope, index):
args = [_ArgH(cppscope.handle), _ArgL(index)]
return space.bool_w(call_capi(space, 'method_is_template', args))
-def _c_method_num_template_args(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
- return space.int_w(call_capi(space, 'method_num_template_args', args))
-def c_template_args(space, cppscope, index):
- nargs = _c_method_num_template_args(space, cppscope, index)
- arg1 = _ArgH(cppscope.handle)
- arg2 = _ArgL(index)
- args = [c_resolve_name(space, charp2str_free(space,
- call_capi(space, 'method_template_arg_name', [arg1, arg2,
_ArgL(iarg)]))
- ) for iarg in range(nargs)]
- return args
-def c_get_method(space, cppscope, index):
- args = [_ArgH(cppscope.handle), _ArgL(index)]
- return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method',
args)))
+def c_get_method_template(space, cppscope, name, proto):
+ args = [_ArgH(cppscope.handle), _ArgS(name), _ArgS(proto)]
+ return rffi.cast(C_METHOD, space.uint_w(call_capi(space,
'get_method_template', args)))
def c_get_global_operator(space, nss, lc, rc, op):
if nss is not None:
args = [_ArgH(nss.handle), _ArgH(lc.handle), _ArgH(rc.handle),
_ArgS(op)]
@@ -563,18 +578,14 @@
return rffi.cast(WLAVC_INDEX, -1)
# method properties ----------------------------------------------------------
-def c_is_public_method(space, cppclass, index):
- args = [_ArgH(cppclass.handle), _ArgL(index)]
- return space.bool_w(call_capi(space, 'is_public_method', args))
-def c_is_constructor(space, cppclass, index):
- args = [_ArgH(cppclass.handle), _ArgL(index)]
- return space.bool_w(call_capi(space, 'is_constructor', args))
-def c_is_destructor(space, cppclass, index):
- args = [_ArgH(cppclass.handle), _ArgL(index)]
- return space.bool_w(call_capi(space, 'is_destructor', args))
-def c_is_staticmethod(space, cppclass, index):
- args = [_ArgH(cppclass.handle), _ArgL(index)]
- return space.bool_w(call_capi(space, 'is_staticmethod', args))
+def c_is_public_method(space, cppmeth):
+ return space.bool_w(call_capi(space, 'is_public_method', [_ArgH(cppmeth)]))
+def c_is_constructor(space, cppmeth):
+ return space.bool_w(call_capi(space, 'is_constructor', [_ArgH(cppmeth)]))
+def c_is_destructor(space, cppmeth):
+ return space.bool_w(call_capi(space, 'is_destructor', [_ArgH(cppmeth)]))
+def c_is_staticmethod(space, cppmeth):
+ return space.bool_w(call_capi(space, 'is_staticmethod', [_ArgH(cppmeth)]))
# data member reflection information -----------------------------------------
def c_num_datamembers(space, cppscope):
@@ -676,7 +687,7 @@
space.setattr(w_pycppclass, space.newtext(m1),
space.getattr(w_pycppclass, space.newtext(m2)))
-def pythonize(space, name, w_pycppclass):
+def pythonize(space, w_pycppclass, name):
if name == "string":
space.setattr(w_pycppclass, space.newtext("c_str"),
_pythonizations["stdstring_c_str"])
_method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str")
diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -7,7 +7,7 @@
from rpython.rlib import rfloat, rawrefcount
from pypy.module._rawffi.interp_rawffi import letter2tp
-from pypy.module._rawffi.array import W_Array, W_ArrayInstance
+from pypy.module._rawffi.array import W_ArrayInstance
from pypy.module._cppyy import helper, capi, ffitypes
@@ -68,6 +68,8 @@
pass
# array type
try:
+ if hasattr(space, "fake"):
+ raise NotImplementedError
arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True)
if arr:
return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space)))
@@ -130,20 +132,6 @@
pass
-class ArrayCache(object):
- def __init__(self, space):
- self.space = space
- def __getattr__(self, name):
- if name.startswith('array_'):
- typecode = name[len('array_'):]
- arr = self.space.interp_w(W_Array, letter2tp(self.space, typecode))
- setattr(self, name, arr)
- return arr
- raise AttributeError(name)
-
- def _freeze_(self):
- return True
-
class ArrayTypeConverterMixin(object):
_mixin_ = True
_immutable_fields_ = ['size']
@@ -162,9 +150,7 @@
# read access, so no copy needed
address_value = self._get_raw_address(space, w_obj, offset)
address = rffi.cast(rffi.ULONG, address_value)
- cache = space.fromcache(ArrayCache)
- arr = getattr(cache, 'array_' + self.typecode)
- return arr.fromaddress(space, address, self.size)
+ return W_ArrayInstance(space, letter2tp(space, self.typecode),
self.size, address)
def to_memory(self, space, w_obj, w_value, offset):
# copy the full array (uses byte copy for now)
@@ -205,17 +191,15 @@
# read access, so no copy needed
address_value = self._get_raw_address(space, w_obj, offset)
address = rffi.cast(rffi.ULONGP, address_value)
- cache = space.fromcache(ArrayCache)
- arr = getattr(cache, 'array_' + self.typecode)
- return arr.fromaddress(space, address[0], self.size)
+ return W_ArrayInstance(space, letter2tp(space, self.typecode),
self.size, address[0])
def to_memory(self, space, w_obj, w_value, offset):
# copy only the pointer value
rawobject = get_rawobject_nonnull(space, w_obj)
- byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject,
offset))
+ byteptr = rffi.cast(rffi.VOIDPP, capi.direct_ptradd(rawobject, offset))
buf = space.getarg_w('s*', w_value)
try:
- byteptr[0] = buf.get_raw_address()
+ byteptr[0] = rffi.cast(rffi.VOIDP, buf.get_raw_address())
except ValueError:
raise oefmt(space.w_TypeError,
"raw buffer interface not supported")
@@ -337,6 +321,10 @@
address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj,
offset))
address[0] = self._unwrap_object(space, w_value)
+
+class UCharConverter(ffitypes.typeid(rffi.UCHAR), CharConverter):
+ pass
+
class FloatConverter(ffitypes.typeid(rffi.FLOAT), FloatTypeConverterMixin,
TypeConverter):
_immutable_fields_ = ['default']
@@ -398,12 +386,12 @@
arg = space.text_w(w_obj)
x[0] = rffi.cast(rffi.LONG, rffi.str2charp(arg))
ba = rffi.cast(rffi.CCHARP, address)
- ba[capi.c_function_arg_typeoffset(space)] = 'o'
+ ba[capi.c_function_arg_typeoffset(space)] = 'p'
def from_memory(self, space, w_obj, w_pycppclass, offset):
address = self._get_raw_address(space, w_obj, offset)
charpptr = rffi.cast(rffi.CCHARPP, address)
- return space.newbytes(rffi.charp2str(charpptr[0]))
+ return space.newtext(rffi.charp2str(charpptr[0]))
def free_argument(self, space, arg, call_local):
lltype.free(rffi.cast(rffi.CCHARPP, arg)[0], flavor='raw')
@@ -420,7 +408,7 @@
strsize = self.size
if charpptr[self.size-1] == '\0':
strsize = self.size-1 # rffi will add \0 back
- return space.newbytes(rffi.charpsize2str(charpptr, strsize))
+ return space.newtext(rffi.charpsize2str(charpptr, strsize))
class VoidPtrConverter(TypeConverter):
@@ -449,12 +437,12 @@
# returned as a long value for the address (INTPTR_T is not proper
# per se, but rffi does not come with a PTRDIFF_T)
address = self._get_raw_address(space, w_obj, offset)
- ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0])
- if ptrval == 0:
+ ptrval = rffi.cast(rffi.ULONGP, address)[0]
+ if ptrval == rffi.cast(rffi.ULONG, 0):
from pypy.module._cppyy import interp_cppyy
return interp_cppyy.get_nullptr(space)
- arr = space.interp_w(W_Array, letter2tp(space, 'P'))
- return arr.fromaddress(space, ptrval, sys.maxint)
+ shape = letter2tp(space, 'P')
+ return W_ArrayInstance(space, shape, sys.maxint/shape.size, ptrval)
def to_memory(self, space, w_obj, w_value, offset):
address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj,
offset))
@@ -504,8 +492,8 @@
def _unwrap_object(self, space, w_obj):
from pypy.module._cppyy.interp_cppyy import W_CPPInstance
if isinstance(w_obj, W_CPPInstance):
- from pypy.module._cppyy.interp_cppyy import
INSTANCE_FLAGS_IS_R_VALUE
- if w_obj.flags & INSTANCE_FLAGS_IS_R_VALUE:
+ from pypy.module._cppyy.interp_cppyy import
INSTANCE_FLAGS_IS_RVALUE
+ if w_obj.flags & INSTANCE_FLAGS_IS_RVALUE:
# reject moves as all are explicit
raise ValueError("lvalue expected")
if capi.c_is_subtype(space, w_obj.clsdecl, self.clsdecl):
@@ -514,7 +502,7 @@
obj_address = capi.direct_ptradd(rawobject, offset)
return rffi.cast(capi.C_OBJECT, obj_address)
raise oefmt(space.w_TypeError,
- "cannot pass %T as %s", w_obj, self.clsdecl.name)
+ "cannot pass %T instance as %s", w_obj, self.clsdecl.name)
def cffi_type(self, space):
state = space.fromcache(ffitypes.State)
@@ -534,11 +522,18 @@
class InstanceMoveConverter(InstanceRefConverter):
def _unwrap_object(self, space, w_obj):
# moving is same as by-ref, but have to check that move is allowed
- from pypy.module._cppyy.interp_cppyy import W_CPPInstance,
INSTANCE_FLAGS_IS_R_VALUE
- if isinstance(w_obj, W_CPPInstance):
- if w_obj.flags & INSTANCE_FLAGS_IS_R_VALUE:
- w_obj.flags &= ~INSTANCE_FLAGS_IS_R_VALUE
- return InstanceRefConverter._unwrap_object(self, space, w_obj)
+ from pypy.module._cppyy.interp_cppyy import W_CPPInstance,
INSTANCE_FLAGS_IS_RVALUE
+ obj = space.interp_w(W_CPPInstance, w_obj)
+ if obj:
+ if obj.flags & INSTANCE_FLAGS_IS_RVALUE:
+ obj.flags &= ~INSTANCE_FLAGS_IS_RVALUE
+ try:
+ return InstanceRefConverter._unwrap_object(self, space,
w_obj)
+ except Exception:
+ # TODO: if the method fails on some other converter, then
the next
+ # overload can not be an rvalue anymore
+ obj.flags |= INSTANCE_FLAGS_IS_RVALUE
+ raise
raise oefmt(space.w_ValueError, "object is not an rvalue")
@@ -629,8 +624,7 @@
address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space,
w_obj, offset))
assign = self.clsdecl.get_overload("__assign__")
from pypy.module._cppyy import interp_cppyy
- assign.call(
- interp_cppyy.wrap_cppinstance(space, address, self.clsdecl,
do_cast=False), [w_value])
+ assign.call_impl(address, [w_value])
except Exception:
InstanceConverter.to_memory(self, space, w_obj, w_value, offset)
@@ -639,7 +633,6 @@
class StdStringRefConverter(InstancePtrConverter):
_immutable_fields_ = ['cppclass', 'typecode']
-
typecode = 'V'
def __init__(self, space, extra):
@@ -702,8 +695,7 @@
m = cppol.functions[i]
if m.signature(False) == self.signature:
x = rffi.cast(rffi.VOIDPP, address)
- x[0] = rffi.cast(rffi.VOIDP,
- capi.c_function_address_from_method(space, m.cppmethod))
+ x[0] = rffi.cast(rffi.VOIDP, capi.c_function_address(space,
m.cppmethod))
address = rffi.cast(capi.C_OBJECT, address)
ba = rffi.cast(rffi.CCHARP, address)
ba[capi.c_function_arg_typeoffset(space)] = 'p'
@@ -714,6 +706,67 @@
"no overload found matching %s", self.signature)
+class SmartPointerConverter(TypeConverter):
+ _immutable_fields = ['typecode', 'smartdecl', 'rawdecl', 'deref']
+ typecode = 'V'
+
+ def __init__(self, space, smartdecl, raw, deref):
+ from pypy.module._cppyy.interp_cppyy import W_CPPClassDecl,
get_pythonized_cppclass
+ self.smartdecl = smartdecl
+ w_raw = get_pythonized_cppclass(space, raw)
+ self.rawdecl = space.interp_w(W_CPPClassDecl,
+ space.findattr(w_raw, space.newtext("__cppdecl__")))
+ self.deref = deref
+
+ def _unwrap_object(self, space, w_obj):
+ from pypy.module._cppyy.interp_cppyy import W_CPPInstance
+ if isinstance(w_obj, W_CPPInstance):
+ # w_obj could carry a 'hidden' smart ptr or be one, cover both
cases
+ have_match = False
+ if w_obj.smartdecl and capi.c_is_subtype(space, w_obj.smartdecl,
self.smartdecl):
+ # hidden case, do not derefence when getting obj address
+ have_match = True
+ rawobject = w_obj._rawobject # TODO: this direct access
if fugly
+ offset = capi.c_base_offset(space, w_obj.smartdecl,
self.smartdecl, rawobject, 1)
+ elif capi.c_is_subtype(space, w_obj.clsdecl, self.smartdecl):
+ # exposed smart pointer
+ have_match = True
+ rawobject = w_obj.get_rawobject()
+ offset = capi.c_base_offset(space, w_obj.clsdecl,
self.smartdecl, rawobject, 1)
+ if have_match:
+ obj_address = capi.direct_ptradd(rawobject, offset)
+ return rffi.cast(capi.C_OBJECT, obj_address)
+
+ raise oefmt(space.w_TypeError,
+ "cannot pass %T instance as %s", w_obj, self.rawdecl.name)
+
+ def convert_argument(self, space, w_obj, address, call_local):
+ x = rffi.cast(rffi.VOIDPP, address)
+ x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj))
+ address = rffi.cast(capi.C_OBJECT, address)
+ ba = rffi.cast(rffi.CCHARP, address)
+ ba[capi.c_function_arg_typeoffset(space)] = self.typecode
+
+ def from_memory(self, space, w_obj, w_pycppclass, offset):
+ address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj,
offset))
+ from pypy.module._cppyy import interp_cppyy
+ return interp_cppyy.wrap_cppinstance(space, address,
+ self.rawdecl, smartdecl=self.smartdecl, deref=self.deref,
do_cast=False)
+
+class SmartPointerPtrConverter(SmartPointerConverter):
+ typecode = 'o'
+
+ def from_memory(self, space, w_obj, w_pycppclass, offset):
+ self._is_abstract(space)
+
+ def to_memory(self, space, w_obj, w_value, offset):
+ self._is_abstract(space)
+
+
+class SmartPointerRefConverter(SmartPointerPtrConverter):
+ typecode = 'V'
+
+
class MacroConverter(TypeConverter):
def from_memory(self, space, w_obj, w_pycppclass, offset):
# TODO: get the actual type info from somewhere ...
@@ -729,44 +782,55 @@
# 1) full, exact match
# 1a) const-removed match
# 2) match of decorated, unqualified type
- # 3) accept ref as pointer (for the stubs, const& can be
- # by value, but that does not work for the ffi path)
- # 4) generalized cases (covers basically all user classes)
- # 5) void* or void converter (which fails on use)
+ # 3) generalized cases (covers basically all user classes)
+ # 3a) smart pointers
+ # 4) void* or void converter (which fails on use)
name = capi.c_resolve_name(space, _name)
- # 1) full, exact match
+ # full, exact match
try:
return _converters[name](space, default)
except KeyError:
pass
- # 1a) const-removed match
+ # const-removed match
try:
return _converters[helper.remove_const(name)](space, default)
except KeyError:
pass
- # 2) match of decorated, unqualified type
+ # match of decorated, unqualified type
compound = helper.compound(name)
clean_name = capi.c_resolve_name(space, helper.clean_type(name))
try:
# array_index may be negative to indicate no size or no size found
array_size = helper.array_size(_name) # uses original arg
+ # TODO: using clean_name here drops const (e.g. const char[] will
+ # never be seen this way)
return _a_converters[clean_name+compound](space, array_size)
except KeyError:
pass
- # 3) TODO: accept ref as pointer
-
- # 4) generalized cases (covers basically all user classes)
+ # generalized cases (covers basically all user classes)
from pypy.module._cppyy import interp_cppyy
scope_decl = interp_cppyy.scope_byname(space, clean_name)
if scope_decl:
- # type check for the benefit of the annotator
from pypy.module._cppyy.interp_cppyy import W_CPPClassDecl
clsdecl = space.interp_w(W_CPPClassDecl, scope_decl, can_be_None=False)
+
+ # check smart pointer type
+ check_smart = capi.c_smartptr_info(space, clean_name)
+ if check_smart[0]:
+ if compound == '':
+ return SmartPointerConverter(space, clsdecl, check_smart[1],
check_smart[2])
+ elif compound == '*':
+ return SmartPointerPtrConverter(space, clsdecl,
check_smart[1], check_smart[2])
+ elif compound == '&':
+ return SmartPointerRefConverter(space, clsdecl,
check_smart[1], check_smart[2])
+ # fall through: can still return smart pointer in non-smart way
+
+ # type check for the benefit of the annotator
if compound == "*":
return InstancePtrConverter(space, clsdecl)
elif compound == "&":
@@ -786,7 +850,7 @@
if pos > 0:
return FunctionPointerConverter(space, name[pos+2:])
- # 5) void* or void converter (which fails on use)
+ # void* or void converter (which fails on use)
if 0 <= compound.find('*'):
return VoidPtrConverter(space, default) # "user knows best"
@@ -797,6 +861,7 @@
_converters["bool"] = BoolConverter
_converters["char"] = CharConverter
+_converters["unsigned char"] = UCharConverter
_converters["float"] = FloatConverter
_converters["const float&"] = ConstFloatRefConverter
_converters["double"] = DoubleConverter
@@ -886,6 +951,7 @@
"NOT_RPYTHON"
array_info = (
('b', rffi.sizeof(rffi.UCHAR), ("bool",)), # is debatable, but
works ...
+ ('B', rffi.sizeof(rffi.UCHAR), ("unsigned char",)),
('h', rffi.sizeof(rffi.SHORT), ("short int", "short")),
('H', rffi.sizeof(rffi.USHORT), ("unsigned short int", "unsigned
short")),
('i', rffi.sizeof(rffi.INT), ("int",)),
@@ -901,9 +967,11 @@
for tcode, tsize, names in array_info:
class ArrayConverter(ArrayTypeConverterMixin, TypeConverter):
+ _immutable_fields_ = ['typecode', 'typesize']
typecode = tcode
typesize = tsize
class PtrConverter(PtrTypeConverterMixin, TypeConverter):
+ _immutable_fields_ = ['typecode', 'typesize']
typecode = tcode
typesize = tsize
for name in names:
@@ -912,6 +980,7 @@
# special case, const char* w/ size and w/o '\0'
_a_converters["const char[]"] = CStringConverterWithSize
+ _a_converters["char[]"] = _a_converters["const char[]"] #
debatable
_build_array_converters()
@@ -919,7 +988,6 @@
def _add_aliased_converters():
"NOT_RPYTHON"
aliases = (
- ("char", "unsigned char"), # TODO: check
("char", "signed char"), # TODO: check
("const char*", "char*"),
diff --git a/pypy/module/_cppyy/executor.py b/pypy/module/_cppyy/executor.py
--- a/pypy/module/_cppyy/executor.py
+++ b/pypy/module/_cppyy/executor.py
@@ -5,7 +5,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit_libffi
-from pypy.module._rawffi.interp_rawffi import unpack_simple_shape
+from pypy.module._rawffi.interp_rawffi import letter2tp
from pypy.module._rawffi.array import W_Array, W_ArrayInstance
from pypy.module._cppyy import helper, capi, ffitypes
@@ -56,11 +56,11 @@
raise NotImplementedError
lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args)
ptrval = rffi.cast(rffi.ULONG, lresult)
- arr = space.interp_w(W_Array, unpack_simple_shape(space,
space.newtext(self.typecode)))
- if ptrval == 0:
+ if ptrval == rffi.cast(rffi.ULONG, 0):
from pypy.module._cppyy import interp_cppyy
return interp_cppyy.get_nullptr(space)
- return arr.fromaddress(space, ptrval, sys.maxint)
+ shape = letter2tp(space, self.typecode)
+ return W_ArrayInstance(space, shape, sys.maxint/shape.size, ptrval)
class VoidExecutor(FunctionExecutor):
@@ -125,7 +125,6 @@
class CStringExecutor(FunctionExecutor):
-
def execute(self, space, cppmethod, cppthis, num_args, args):
lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args)
ccpresult = rffi.cast(rffi.CCHARP, lresult)
@@ -136,7 +135,6 @@
class ConstructorExecutor(FunctionExecutor):
-
def execute(self, space, cppmethod, cpptype, num_args, args):
from pypy.module._cppyy import interp_cppyy
newthis = capi.c_constructor(space, cppmethod, cpptype, num_args, args)
@@ -144,80 +142,77 @@
return space.newlong(rffi.cast(rffi.LONG, newthis)) # really want
ptrdiff_t here
-class InstancePtrExecutor(FunctionExecutor):
- _immutable_fields_ = ['cppclass']
+class InstanceExecutor(FunctionExecutor):
+ # For return of a C++ instance by pointer: MyClass* func()
+ _immutable_fields_ = ['clsdecl']
- def __init__(self, space, cppclass):
- FunctionExecutor.__init__(self, space, cppclass)
- self.cppclass = cppclass
+ def __init__(self, space, clsdecl):
+ FunctionExecutor.__init__(self, space, clsdecl)
+ self.clsdecl = clsdecl
+
+ def _wrap_result(self, space, obj):
+ from pypy.module._cppyy import interp_cppyy
+ return interp_cppyy.wrap_cppinstance(space,
+ obj, self.clsdecl, do_cast=False, python_owns=True, fresh=True)
+
+ def execute(self, space, cppmethod, cppthis, num_args, args):
+ oresult = capi.c_call_o(space, cppmethod, cppthis, num_args, args,
self.clsdecl)
+ return self._wrap_result(space, rffi.cast(capi.C_OBJECT, oresult))
+
+
+class InstancePtrExecutor(InstanceExecutor):
+ # For return of a C++ instance by pointer: MyClass* func()
def cffi_type(self, space):
state = space.fromcache(ffitypes.State)
return state.c_voidp
+ def _wrap_result(self, space, obj):
+ from pypy.module._cppyy import interp_cppyy
+ return interp_cppyy.wrap_cppinstance(space, obj, self.clsdecl)
+
def execute(self, space, cppmethod, cppthis, num_args, args):
- from pypy.module._cppyy import interp_cppyy
- long_result = capi.c_call_l(space, cppmethod, cppthis, num_args, args)
- ptr_result = rffi.cast(capi.C_OBJECT, long_result)
- pyres = interp_cppyy.wrap_cppinstance(space, ptr_result, self.cppclass)
- return pyres
+ lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args)
+ return self._wrap_result(space, rffi.cast(capi.C_OBJECT, lresult))
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
jit_libffi.jit_ffi_call(cif_descr, funcaddr, buffer)
- result = rffi.ptradd(buffer, cif_descr.exchange_result)
- from pypy.module._cppyy import interp_cppyy
- ptr_result = rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP,
result)[0])
- return interp_cppyy.wrap_cppinstance(space, ptr_result, self.cppclass)
+ presult = rffi.ptradd(buffer, cif_descr.exchange_result)
+ obj = rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, presult)[0])
+ return self._wrap_result(space, obj)
class InstancePtrPtrExecutor(InstancePtrExecutor):
+ # For return of a C++ instance by ptr-to-ptr or ptr-to-ref: MyClass*&
func()
def execute(self, space, cppmethod, cppthis, num_args, args):
- from pypy.module._cppyy import interp_cppyy
- voidp_result = capi.c_call_r(space, cppmethod, cppthis, num_args, args)
- ref_address = rffi.cast(rffi.VOIDPP, voidp_result)
- ptr_result = rffi.cast(capi.C_OBJECT, ref_address[0])
- return interp_cppyy.wrap_cppinstance(space, ptr_result, self.cppclass)
+ presult = capi.c_call_r(space, cppmethod, cppthis, num_args, args)
+ ref = rffi.cast(rffi.VOIDPP, presult)
+ return self._wrap_result(space, rffi.cast(capi.C_OBJECT, ref[0]))
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
raise FastCallNotPossible
-class InstanceExecutor(InstancePtrExecutor):
-
- def execute(self, space, cppmethod, cppthis, num_args, args):
- from pypy.module._cppyy import interp_cppyy
- long_result = capi.c_call_o(space, cppmethod, cppthis, num_args, args,
self.cppclass)
- ptr_result = rffi.cast(capi.C_OBJECT, long_result)
- return interp_cppyy.wrap_cppinstance(space, ptr_result, self.cppclass,
- do_cast=False, python_owns=True,
fresh=True)
-
- def execute_libffi(self, space, cif_descr, funcaddr, buffer):
- from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
- raise FastCallNotPossible
-
class StdStringExecutor(InstancePtrExecutor):
-
def execute(self, space, cppmethod, cppthis, num_args, args):
cstr, cstr_len = capi.c_call_s(space, cppmethod, cppthis, num_args,
args)
pystr = rffi.charpsize2str(cstr, cstr_len)
capi.c_free(space, rffi.cast(rffi.VOIDP, cstr))
- return space.newbytes(pystr)
+ return space.newbytes(pystr)
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
raise FastCallNotPossible
class StdStringRefExecutor(InstancePtrExecutor):
-
- def __init__(self, space, cppclass):
+ def __init__(self, space, clsdecl):
from pypy.module._cppyy import interp_cppyy
- cppclass = interp_cppyy.scope_byname(space, capi.std_string_name)
- InstancePtrExecutor.__init__(self, space, cppclass)
+ clsdecl = interp_cppyy.scope_byname(space, capi.std_string_name)
+ InstancePtrExecutor.__init__(self, space, clsdecl)
class PyObjectExecutor(PtrTypeExecutor):
-
def wrap_result(self, space, lresult):
space.getbuiltinmodule("cpyext")
from pypy.module.cpyext.pyobject import PyObject, from_ref, make_ref,
decref
@@ -241,6 +236,41 @@
return self.wrap_result(space, rffi.cast(rffi.LONGP, result)[0])
+class SmartPointerExecutor(InstanceExecutor):
+ _immutable_fields_ = ['smartdecl', 'deref']
+
+ def __init__(self, space, smartdecl, raw, deref):
+ from pypy.module._cppyy.interp_cppyy import W_CPPClassDecl,
get_pythonized_cppclass
+ w_raw = get_pythonized_cppclass(space, raw)
+ rawdecl = space.interp_w(W_CPPClassDecl, space.findattr(w_raw,
space.newtext("__cppdecl__")))
+ InstanceExecutor.__init__(self, space, rawdecl)
+ self.smartdecl = smartdecl
+ self.deref = deref
+
+ def _wrap_result(self, space, obj):
+ from pypy.module._cppyy import interp_cppyy
+ return interp_cppyy.wrap_cppinstance(space, obj, self.clsdecl,
+ self.smartdecl, self.deref, do_cast=False, python_owns=True,
fresh=True)
+
+class SmartPointerPtrExecutor(InstancePtrExecutor):
+ _immutable_fields_ = ['smartdecl', 'deref']
+
+ def __init__(self, space, smartdecl, raw, deref):
+ # TODO: share this with SmartPointerExecutor through in mixin
+ from pypy.module._cppyy.interp_cppyy import W_CPPClassDecl,
get_pythonized_cppclass
+ w_raw = get_pythonized_cppclass(space, raw)
+ rawdecl = space.interp_w(W_CPPClassDecl, space.findattr(w_raw,
space.newtext("__cppdecl__")))
+ InstancePtrExecutor.__init__(self, space, rawdecl)
+ self.smartdecl = smartdecl
+ self.deref = deref
+
+ def _wrap_result(self, space, obj):
+ from pypy.module._cppyy import interp_cppyy
+ # TODO: this is a pointer to a smart pointer, take ownership on the
smart one?
+ return interp_cppyy.wrap_cppinstance(space, obj, self.clsdecl,
+ self.smartdecl, self.deref, do_cast=False)
+
+
_executors = {}
def get_executor(space, name):
# Matching of 'name' to an executor factory goes through up to four levels:
@@ -253,7 +283,7 @@
name = capi.c_resolve_name(space, name)
- # 1) full, qualified match
+ # full, qualified match
try:
return _executors[name](space, None)
except KeyError:
@@ -262,13 +292,13 @@
compound = helper.compound(name)
clean_name = capi.c_resolve_name(space, helper.clean_type(name))
- # 1a) clean lookup
+ # clean lookup
try:
return _executors[clean_name+compound](space, None)
except KeyError:
pass
- # 2) drop '&': by-ref is pretty much the same as by-value, python-wise
+ # drop '&': by-ref is pretty much the same as by-value, python-wise
if compound and compound[len(compound)-1] == '&':
# TODO: this does not actually work with Reflex (?)
try:
@@ -276,19 +306,29 @@
except KeyError:
pass
- # 3) types/classes, either by ref/ptr or by value
+ # types/classes, either by ref/ptr or by value
from pypy.module._cppyy import interp_cppyy
cppclass = interp_cppyy.scope_byname(space, clean_name)
if cppclass:
# type check for the benefit of the annotator
from pypy.module._cppyy.interp_cppyy import W_CPPClassDecl
- cppclass = space.interp_w(W_CPPClassDecl, cppclass, can_be_None=False)
+ clsdecl = space.interp_w(W_CPPClassDecl, cppclass, can_be_None=False)
+
+ # check smart pointer type
+ check_smart = capi.c_smartptr_info(space, clean_name)
+ if check_smart[0]:
+ if compound == '':
+ return SmartPointerExecutor(space, clsdecl, check_smart[1],
check_smart[2])
+ elif compound == '*' or compound == '&':
+ return SmartPointerPtrExecutor(space, clsdecl, check_smart[1],
check_smart[2])
+ # fall through: can still return smart pointer in non-smart way
+
if compound == '':
- return InstanceExecutor(space, cppclass)
+ return InstanceExecutor(space, clsdecl)
elif compound == '*' or compound == '&':
- return InstancePtrExecutor(space, cppclass)
+ return InstancePtrExecutor(space, clsdecl)
elif compound == '**' or compound == '*&':
- return InstancePtrPtrExecutor(space, cppclass)
+ return InstancePtrPtrExecutor(space, clsdecl)
elif "(anonymous)" in name:
# special case: enum w/o a type name
return _executors["internal_enum_type_t"](space, None)
diff --git a/pypy/module/_cppyy/ffitypes.py b/pypy/module/_cppyy/ffitypes.py
--- a/pypy/module/_cppyy/ffitypes.py
+++ b/pypy/module/_cppyy/ffitypes.py
@@ -74,15 +74,52 @@
# allow int to pass to char and make sure that str is of length 1
if space.isinstance_w(w_value, space.w_int):
ival = space.c_int_w(w_value)
+ if ival < -128 or 127 < ival:
+ raise oefmt(space.w_ValueError, "char arg not in
range(-128,128)")
+
+ value = rffi.cast(rffi.CHAR, space.c_int_w(w_value))
+ else:
+ if space.isinstance_w(w_value, space.w_text):
+ value = space.text_w(w_value)
+ else:
+ value = space.bytes_w(w_value)
+ if len(value) != 1:
+ raise oefmt(space.w_ValueError,
+ "char expected, got string of size %d", len(value))
+
+ value = rffi.cast(rffi.CHAR, value[0])
+ return value # turn it into a "char" to the annotator
+
+ def cffi_type(self, space):
+ state = space.fromcache(State)
+ return state.c_char
+
+class UCharTypeMixin(object):
+ _mixin_ = True
+ _immutable_fields_ = ['c_type', 'c_ptrtype']
+
+ c_type = rffi.UCHAR
+ c_ptrtype = rffi.CCHARP # there's no such thing as rffi.UCHARP
+
+ def _wrap_object(self, space, obj):
+ return space.newbytes(obj)
+
+ def _unwrap_object(self, space, w_value):
+ # allow int to pass to char and make sure that str is of length 1
+ if space.isinstance_w(w_value, space.w_int):
+ ival = space.c_int_w(w_value)
if ival < 0 or 256 <= ival:
raise oefmt(space.w_ValueError, "char arg not in range(256)")
value = rffi.cast(rffi.CHAR, space.c_int_w(w_value))
else:
- value = space.text_w(w_value)
+ if space.isinstance_w(w_value, space.w_text):
+ value = space.text_w(w_value)
+ else:
+ value = space.bytes_w(w_value)
if len(value) != 1:
raise oefmt(space.w_ValueError,
- "char expected, got string of size %d", len(value))
+ "usigned char expected, got string of size %d",
len(value))
value = rffi.cast(rffi.CHAR, value[0])
return value # turn it into a "char" to the annotator
@@ -277,6 +314,7 @@
"NOT_RPYTHON"
if c_type == bool: return BoolTypeMixin
if c_type == rffi.CHAR: return CharTypeMixin
+ if c_type == rffi.UCHAR: return UCharTypeMixin
if c_type == rffi.SHORT: return ShortTypeMixin
if c_type == rffi.USHORT: return UShortTypeMixin
if c_type == rffi.INT: return IntTypeMixin
diff --git a/pypy/module/_cppyy/helper.py b/pypy/module/_cppyy/helper.py
--- a/pypy/module/_cppyy/helper.py
+++ b/pypy/module/_cppyy/helper.py
@@ -1,3 +1,4 @@
+import sys
from rpython.rlib import rstring
@@ -116,6 +117,17 @@
# TODO: perhaps absorb or "pythonify" these operators?
return cppname
+if sys.hexversion < 0x3000000:
+ CPPYY__div__ = "__div__"
+ CPPYY__idiv__ = "__idiv__"
+ CPPYY__long__ = "__long__"
+ CPPYY__bool__ = "__nonzero__"
+else:
+ CPPYY__div__ = "__truediv__"
+ CPPYY__idiv__ = "__itruediv__"
+ CPPYY__long__ = "__int__"
+ CPPYY__bool__ = "__bool__"
+
# _operator_mappings["[]"] = "__setitem__" # depends on return type
# _operator_mappings["+"] = "__add__" # depends on # of args (see
__pos__)
# _operator_mappings["-"] = "__sub__" # id. (eq. __neg__)
@@ -123,7 +135,7 @@
# _operator_mappings["[]"] = "__getitem__" # depends on return type
_operator_mappings["()"] = "__call__"
-_operator_mappings["/"] = "__div__" # __truediv__ in p3
+_operator_mappings["/"] = CPPYY__div__
_operator_mappings["%"] = "__mod__"
_operator_mappings["**"] = "__pow__" # not C++
_operator_mappings["<<"] = "__lshift__"
@@ -136,7 +148,7 @@
_operator_mappings["+="] = "__iadd__"
_operator_mappings["-="] = "__isub__"
_operator_mappings["*="] = "__imul__"
-_operator_mappings["/="] = "__idiv__" # __itruediv__ in p3
+_operator_mappings["/="] = CPPYY__idiv__
_operator_mappings["%="] = "__imod__"
_operator_mappings["**="] = "__ipow__"
_operator_mappings["<<="] = "__ilshift__"
@@ -154,7 +166,7 @@
# the following type mappings are "exact"
_operator_mappings["const char*"] = "__str__"
_operator_mappings["int"] = "__int__"
-_operator_mappings["long"] = "__long__" # __int__ in p3
+_operator_mappings["long"] = CPPYY__long__
_operator_mappings["double"] = "__float__"
# the following type mappings are "okay"; the assumption is that they
@@ -163,13 +175,13 @@
_operator_mappings["char*"] = "__str__"
_operator_mappings["short"] = "__int__"
_operator_mappings["unsigned short"] = "__int__"
-_operator_mappings["unsigned int"] = "__long__" # __int__ in p3
-_operator_mappings["unsigned long"] = "__long__" # id.
-_operator_mappings["long long"] = "__long__" # id.
-_operator_mappings["unsigned long long"] = "__long__" # id.
+_operator_mappings["unsigned int"] = CPPYY__long__
+_operator_mappings["unsigned long"] = CPPYY__long__
+_operator_mappings["long long"] = CPPYY__long__
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit