Author: Carl Friedrich Bolz <cfb...@gmx.de> Branch: guard-compatible Changeset: r87252:d983a5a6f8ed Date: 2016-09-20 13:53 +0200 http://bitbucket.org/pypy/pypy/changeset/d983a5a6f8ed/
Log: merge default diff too long, truncating to 2000 out of 5948 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -30,3 +30,6 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -369,3 +369,109 @@ Roman Podoliaka Dan Loewenherz werat + + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London + +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. + +License for 'lib-python/2.7' +============================ + +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the terms that you can find here: https://docs.python.org/2/license.html + +License for 'pypy/module/unicodedata/' +====================================== + +The following files are from the website of The Unicode Consortium +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. + + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt + +License for 'dotviewer/font/' +============================= + +Copyright (C) 2008 The Android Open Source Project + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Detailed license information is contained in the NOTICE file in the +directory. + + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. + +License for 'rpython/rlib/rvmprof/src' +-------------------------------------- + +The code is based on gperftools. You may see a copy of the License for it at + + https://github.com/gperftools/gperftools/blob/master/COPYING diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -13,6 +13,7 @@ import sys import os import shlex +import imp from distutils.errors import DistutilsPlatformError @@ -62,8 +63,7 @@ """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = ".so" - g['SOABI'] = g['SO'].rsplit('.')[0] + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -75,8 +75,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = ".pyd" - g['SOABI'] = g['SO'].rsplit('.')[0] + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] global _config_vars _config_vars = g diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -529,7 +529,7 @@ for suffix, mode, type_ in imp.get_suffixes(): if type_ == imp.C_EXTENSION: _CONFIG_VARS['SOABI'] = suffix.split('.')[1] - break + break if args: vals = [] diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.3 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.3" +__version_info__ = (1, 8, 3) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include <pyconfig.h> +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include <Python.h> #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.3" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,43 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/doc/config/translation.profopt.txt b/pypy/doc/config/translation.profopt.txt --- a/pypy/doc/config/translation.profopt.txt +++ b/pypy/doc/config/translation.profopt.txt @@ -3,3 +3,14 @@ RPython program) to gather profile data. Example for pypy-c: "-c 'from richards import main;main(); from test import pystone; pystone.main()'" + +NOTE: be aware of what this does in JIT-enabled executables. What it +does is instrument and later optimize the C code that happens to run in +the example you specify, ignoring any execution of the JIT-generated +assembler. That means that you have to choose the example wisely. If +it is something that will just generate assembler and stay there, there +is little value. If it is something that exercises heavily library +routines that are anyway written in C, then it will optimize that. Most +interesting would be something that causes a lot of JIT-compilation, +like running a medium-sized test suite several times in a row, in order +to optimize the warm-up in general. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -449,6 +449,27 @@ support (see ``multiline_input()``). On the other hand, ``parse_and_bind()`` calls are ignored (issue `#2072`_). +* ``sys.getsizeof()`` always raises ``TypeError``. This is because a + memory profiler using this function is most likely to give results + inconsistent with reality on PyPy. It would be possible to have + ``sys.getsizeof()`` return a number (with enough work), but that may + or may not represent how much memory the object uses. It doesn't even + make really sense to ask how much *one* object uses, in isolation with + the rest of the system. For example, instances have maps, which are + often shared across many instances; in this case the maps would + probably be ignored by an implementation of ``sys.getsizeof()``, but + their overhead is important in some cases if they are many instances + with unique maps. Conversely, equal strings may share their internal + string data even if they are different objects---or empty containers + may share parts of their internals as long as they are empty. Even + stranger, some lists create objects as you read them; if you try to + estimate the size in memory of ``range(10**6)`` as the sum of all + items' size, that operation will by itself create one million integer + objects that never existed in the first place. Note that some of + these concerns also exist on CPython, just less so. For this reason + we explicitly don't implement ``sys.getsizeof()``. + + .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.1.rst release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -0,0 +1,64 @@ +========== +PyPy 5.4.1 +========== + +We have released a bugfix for PyPy2.7-v5.4.0, released last week, +due to the following issues: + + * Update list of contributors in documentation and LICENSE file, + this was unfortunately left out of 5.4.0. My apologies to the new + contributors + + * Allow tests run with ``-A`` to find ``libm.so`` even if it is a script not a + dynamically loadable file + + * Bump ``sys.setrecursionlimit()`` when translating PyPy, for translating with CPython + + * Tweak a float comparison with 0 in ``backendopt.inline`` to avoid rounding errors + + * Fix for an issue for translating the sandbox + + * Fix for and issue where ``unicode.decode('utf8', 'custom_replace')`` messed up + the last byte of a unicode string sometimes + + * Update built-in cffi_ to version 1.8.1 + + * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash + translation with a message asking for help porting it + + * Fix a regression where a PyBytesObject was forced (converted to a RPython + object) when not required, reported as issue #2395 + +Thanks to those who reported the issues. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _cffi: https://cffi.readthedocs.io +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,17 @@ .. branch: rpython-resync Backport rpython changes made directly on the py3k and py3.5 branches. + +.. branch: buffer-interface +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews +in numpypy + +.. branch: force-virtual-state +Improve merging of virtual states in the JIT in order to avoid jumping to the +preamble. Accomplished by allocating virtual objects where non-virtuals are +expected. + +.. branch: conditional_call_value_3 +JIT residual calls: if the called function starts with a fast-path +like "if x.foo != 0: return x.foo", then inline the check before +doing the CALL. For now, string hashing is about the only case. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -239,6 +239,10 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") + if config.translation.profopt is not None: + raise Exception("Cannot use the --profopt option " + "when --shared is on (it is by default). " + "See issue #2398.") if sys.platform == 'win32': libdir = thisdir.join('..', '..', 'libs') libdir.ensure(dir=1) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1430,6 +1430,9 @@ BUF_FORMAT = 0x0004 BUF_ND = 0x0008 BUF_STRIDES = 0x0010 | BUF_ND + BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES + BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES + BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES BUF_INDIRECT = 0x0100 | BUF_STRIDES BUF_CONTIG_RO = BUF_ND @@ -1978,6 +1981,7 @@ 'ZeroDivisionError', 'RuntimeWarning', 'PendingDeprecationWarning', + 'UserWarning', ] if sys.platform.startswith("win"): diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -63,7 +63,7 @@ """x.__iter__() <==> iter(x)""" return self.space.wrap(self) - def descr_send(self, w_arg=None): + def descr_send(self, w_arg): """send(arg) -> send 'arg' into generator, return next yielded value or raise StopIteration.""" return self.send_ex(w_arg) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -264,25 +264,22 @@ try: executioncontext.call_trace(self) # - if operr is not None: - ec = self.space.getexecutioncontext() - next_instr = self.handle_operation_error(ec, operr) - self.last_instr = intmask(next_instr - 1) - else: - # Execution starts just after the last_instr. Initially, - # last_instr is -1. After a generator suspends it points to - # the YIELD_VALUE instruction. - next_instr = r_uint(self.last_instr + 1) - if next_instr != 0: - self.pushvalue(w_inputvalue) - # try: + if operr is not None: + ec = self.space.getexecutioncontext() + next_instr = self.handle_operation_error(ec, operr) + self.last_instr = intmask(next_instr - 1) + else: + # Execution starts just after the last_instr. Initially, + # last_instr is -1. After a generator suspends it points to + # the YIELD_VALUE instruction. + next_instr = r_uint(self.last_instr + 1) + if next_instr != 0: + self.pushvalue(w_inputvalue) w_exitvalue = self.dispatch(self.pycode, next_instr, executioncontext) - except Exception: - executioncontext.return_trace(self, self.space.w_None) - raise - executioncontext.return_trace(self, w_exitvalue) + finally: + executioncontext.return_trace(self, w_exitvalue) # it used to say self.last_exception = None # this is now done by the code in pypyjit module # since we don't want to invalidate the virtualizable diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -57,12 +57,14 @@ def f(): yield 2 g = f() + # two arguments version raises(NameError, g.throw, NameError, "Error") def test_throw2(self): def f(): yield 2 g = f() + # single argument version raises(NameError, g.throw, NameError("Error")) def test_throw3(self): @@ -221,7 +223,8 @@ def f(): yield 1 g = f() - raises(TypeError, g.send, 1) + raises(TypeError, g.send) # one argument required + raises(TypeError, g.send, 1) # not started, must send None def test_generator_explicit_stopiteration(self): def f(): diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -562,3 +562,21 @@ res = f(10).g() sys.settrace(None) assert res == 10 + + def test_throw_trace_bug(self): + import sys + def f(): + yield 5 + gen = f() + assert next(gen) == 5 + seen = [] + def trace_func(frame, event, *args): + seen.append(event) + return trace_func + sys.settrace(trace_func) + try: + gen.throw(ValueError) + except ValueError: + pass + sys.settrace(None) + assert seen == ['call', 'exception', 'return'] diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.3" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -11,7 +11,7 @@ from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj -from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray, W_CTypePointer from pypy.module._cffi_backend import ctypeprim @@ -22,6 +22,7 @@ is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): + assert isinstance(ctptr, W_CTypePointer) W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, ctptr.ctitem) self.length = length diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -35,8 +35,7 @@ assert isinstance(ellipsis, bool) extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, - could_cast_anything=False) + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult) self.fargs = fargs self.ellipsis = ellipsis self.abi = abi @@ -59,6 +58,16 @@ lltype.free(self.cif_descr, flavor='raw') self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def is_unichar_ptr_or_array(self): + return False + + def is_char_or_unichar_ptr_or_array(self): + return False + + def string(self, cdataobj, maxlen): + # Can't use ffi.string() on a function pointer + return W_CType.string(self, cdataobj, maxlen) + def new_ctypefunc_completing_argtypes(self, args_w): space = self.space nargs_declared = len(self.fargs) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -19,7 +19,6 @@ # XXX this could be improved with an elidable method get_size() # that raises in case it's still -1... - cast_anything = False is_primitive_integer = False is_nonfunc_pointer_or_array = False is_indirect_arg_for_call_python = False diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -120,7 +120,6 @@ class W_CTypePrimitiveChar(W_CTypePrimitiveCharOrUniChar): _attrs_ = [] - cast_anything = True def cast_to_int(self, cdata): return self.space.wrap(ord(cdata[0])) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -14,12 +14,11 @@ class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length'] + _attrs_ = ['ctitem', 'accept_str', 'length'] + _immutable_fields_ = ['ctitem', 'accept_str', 'length'] length = -1 - def __init__(self, space, size, extra, extra_position, ctitem, - could_cast_anything=True): + def __init__(self, space, size, extra, extra_position, ctitem): name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -27,10 +26,11 @@ # - for arrays, it is the array item type # - for functions, it is the return type self.ctitem = ctitem - self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.accept_str = (self.can_cast_anything or - (ctitem.is_primitive_integer and - ctitem.size == rffi.sizeof(lltype.Char))) + self.accept_str = (self.is_nonfunc_pointer_or_array and + (isinstance(ctitem, ctypevoid.W_CTypeVoid) or + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar) or + (ctitem.is_primitive_integer and + ctitem.size == rffi.sizeof(lltype.Char)))) def is_unichar_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar) @@ -137,7 +137,10 @@ class W_CTypePtrBase(W_CTypePtrOrArray): # base class for both pointers and pointers-to-functions - _attrs_ = [] + _attrs_ = ['is_void_ptr', 'is_voidchar_ptr'] + _immutable_fields_ = ['is_void_ptr', 'is_voidchar_ptr'] + is_void_ptr = False + is_voidchar_ptr = False def convert_to_object(self, cdata): ptrdata = rffi.cast(rffi.CCHARPP, cdata)[0] @@ -154,7 +157,16 @@ else: raise self._convert_error("compatible pointer", w_ob) if self is not other: - if not (self.can_cast_anything or other.can_cast_anything): + if self.is_void_ptr or other.is_void_ptr: + pass # cast from or to 'void *' + elif self.is_voidchar_ptr or other.is_voidchar_ptr: + space = self.space + msg = ("implicit cast from '%s' to '%s' " + "will be forbidden in the future (check that the types " + "are as you expect; use an explicit ffi.cast() if they " + "are correct)" % (other.name, self.name)) + space.warn(space.wrap(msg), space.w_UserWarning, stacklevel=1) + else: raise self._convert_error("compatible pointer", w_ob) rffi.cast(rffi.CCHARPP, cdata)[0] = w_ob.unsafe_escaping_ptr() @@ -165,8 +177,8 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] - _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', '_array_types'] + _immutable_fields_ = ['is_file', 'cache_array_type?'] kind = "pointer" cache_array_type = None is_nonfunc_pointer_or_array = True @@ -181,6 +193,8 @@ self.is_file = (ctitem.name == "struct _IO_FILE" or ctitem.name == "FILE") self.is_void_ptr = isinstance(ctitem, ctypevoid.W_CTypeVoid) + self.is_voidchar_ptr = (self.is_void_ptr or + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)) W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init, allocator): diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -105,9 +105,6 @@ return True return False - def _check_only_one_argument_for_union(self, w_ob): - pass - def convert_from_object(self, cdata, w_ob): if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) @@ -117,19 +114,24 @@ ) def convert_struct_from_object(self, cdata, w_ob, optvarsize): self.force_lazy_struct() - self._check_only_one_argument_for_union(w_ob) space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) - if len(lst_w) > len(self._fields_list): - raise oefmt(space.w_ValueError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) - for i in range(len(lst_w)): - optvarsize = self._fields_list[i].write_v(cdata, lst_w[i], + j = 0 + for w_obj in lst_w: + try: + while (self._fields_list[j].flags & + W_CField.BF_IGNORE_IN_CTOR): + j += 1 + except IndexError: + raise oefmt(space.w_ValueError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) + optvarsize = self._fields_list[j].write_v(cdata, w_obj, optvarsize) + j += 1 return optvarsize elif space.isinstance_w(w_ob, space.w_dict): @@ -185,14 +187,6 @@ class W_CTypeUnion(W_CTypeStructOrUnion): kind = "union" - def _check_only_one_argument_for_union(self, w_ob): - space = self.space - n = space.int_w(space.len(w_ob)) - if n > 1: - raise oefmt(space.w_ValueError, - "initializer for '%s': %d items given, but only one " - "supported (use a dict if needed)", self.name, n) - class W_CField(W_Root): _immutable_ = True @@ -200,18 +194,21 @@ BS_REGULAR = -1 BS_EMPTY_ARRAY = -2 - def __init__(self, ctype, offset, bitshift, bitsize): + BF_IGNORE_IN_CTOR = 0x01 + + def __init__(self, ctype, offset, bitshift, bitsize, flags): self.ctype = ctype self.offset = offset self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY self.bitsize = bitsize + self.flags = flags # BF_xxx def is_bitfield(self): return self.bitshift >= 0 - def make_shifted(self, offset): + def make_shifted(self, offset, fflags): return W_CField(self.ctype, offset + self.offset, - self.bitshift, self.bitsize) + self.bitshift, self.bitsize, self.flags | fflags) def read(self, cdata): cdata = rffi.ptradd(cdata, self.offset) @@ -341,5 +338,6 @@ offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), bitsize = interp_attrproperty('bitsize', W_CField), + flags = interp_attrproperty('flags', W_CField), ) W_CField.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/ctypevoid.py b/pypy/module/_cffi_backend/ctypevoid.py --- a/pypy/module/_cffi_backend/ctypevoid.py +++ b/pypy/module/_cffi_backend/ctypevoid.py @@ -7,7 +7,6 @@ class W_CTypeVoid(W_CType): _attrs_ = [] - cast_anything = True kind = "void" def __init__(self, space): diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -32,8 +32,8 @@ @unwrap_spec(w_cdata=cdataobj.W_CData) def from_handle(space, w_cdata): ctype = w_cdata.ctype - if (not isinstance(ctype, ctypeptr.W_CTypePtrOrArray) or - not ctype.can_cast_anything): + if (not isinstance(ctype, ctypeptr.W_CTypePointer) or + not ctype.is_voidchar_ptr): raise oefmt(space.w_TypeError, "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -345,6 +345,11 @@ if alignment < falign and do_align: alignment = falign # + if is_union and i > 0: + fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR + else: + fflags = 0 + # if fbitsize < 0: # not a bitfield: common case @@ -372,7 +377,7 @@ for name, srcfld in ftype._fields_dict.items(): srcfield2names[srcfld] = name for srcfld in ftype._fields_list: - fld = srcfld.make_shifted(boffset // 8) + fld = srcfld.make_shifted(boffset // 8, fflags) fields_list.append(fld) try: fields_dict[srcfield2names[srcfld]] = fld @@ -382,7 +387,8 @@ w_ctype._custom_field_pos = True else: # a regular field - fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1) + fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1, + fflags) fields_list.append(fld) fields_dict[fname] = fld @@ -489,7 +495,7 @@ bitshift = 8 * ftype.size - fbitsize- bitshift fld = ctypestruct.W_CField(ftype, field_offset_bytes, - bitshift, fbitsize) + bitshift, fbitsize, fflags) fields_list.append(fld) fields_dict[fname] = fld diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.3", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -2525,6 +2525,25 @@ assert d[2][1].bitshift == -1 assert d[2][1].bitsize == -1 +def test_nested_anonymous_struct_2(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BInnerUnion = new_union_type("union bar") + complete_struct_or_union(BInnerUnion, [('a1', BInt, -1), + ('a2', BInt, -1)]) + complete_struct_or_union(BStruct, [('b1', BInt, -1), + ('', BInnerUnion, -1), + ('b2', BInt, -1)]) + assert sizeof(BInnerUnion) == sizeof(BInt) + assert sizeof(BStruct) == sizeof(BInt) * 3 + fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields] + assert fields == [ + ('b1', 0 * sizeof(BInt), 0), + ('a1', 1 * sizeof(BInt), 0), + ('a2', 1 * sizeof(BInt), 1), + ('b2', 2 * sizeof(BInt), 0), + ] + def test_sizeof_union(): # a union has the largest alignment of its members, and a total size # that is the largest of its items *possibly further aligned* if @@ -3646,3 +3665,27 @@ check_dir(pp, []) check_dir(pp[0], ['a1', 'a2']) check_dir(pp[0][0], ['a1', 'a2']) + +def test_char_pointer_conversion(): + import warnings + assert __version__.startswith(("1.8", "1.9")), ( + "consider turning the warning into an error") + BCharP = new_pointer_type(new_primitive_type("char")) + BIntP = new_pointer_type(new_primitive_type("int")) + BVoidP = new_pointer_type(new_void_type()) + z1 = cast(BCharP, 0) + z2 = cast(BIntP, 0) + z3 = cast(BVoidP, 0) + with warnings.catch_warnings(record=True) as w: + newp(new_pointer_type(BIntP), z1) # warn + assert len(w) == 1 + newp(new_pointer_type(BVoidP), z1) # fine + assert len(w) == 1 + newp(new_pointer_type(BCharP), z2) # warn + assert len(w) == 2 + newp(new_pointer_type(BVoidP), z2) # fine + assert len(w) == 2 + newp(new_pointer_type(BCharP), z3) # fine + assert len(w) == 2 + newp(new_pointer_type(BIntP), z3) # fine + assert len(w) == 2 diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -503,3 +503,10 @@ assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" p = ffi.new("int[]", [-123456789]) assert ffi.unpack(p, 1) == [-123456789] + + def test_bug_1(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + q = ffi.new("char[]", "abcd") + p = ffi.cast("char(*)(void)", q) + raises(TypeError, ffi.string, p) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -450,7 +450,12 @@ # For compatibility assert exc.value.errno == _ssl.SSL_ERROR_WANT_READ finally: - c.shutdown() + try: + c.shutdown() + except _ssl.SSLError: + # If the expected exception was raised, the SSLContext + # can't be shut down yet + pass finally: s.close() diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -30,16 +30,19 @@ raise oefmt(space.w_TypeError, "array.array() does not take keyword arguments") + w_initializer_type = None + w_initializer = None + if len(__args__.arguments_w) > 0: + w_initializer = __args__.arguments_w[0] + w_initializer_type = space.type(w_initializer) for tc in unroll_typecodes: if typecode == tc: a = space.allocate_instance(types[tc].w_class, w_cls) a.__init__(space) - - if len(__args__.arguments_w) > 0: - w_initializer = __args__.arguments_w[0] - if space.type(w_initializer) is space.w_str: + if w_initializer is not None: + if w_initializer_type is space.w_str: a.descr_fromstring(space, w_initializer) - elif space.type(w_initializer) is space.w_list: + elif w_initializer_type is space.w_list: a.descr_fromlist(space, w_initializer) else: a.extend(w_initializer, True) @@ -597,6 +600,18 @@ def getlength(self): return self.array.len * self.array.itemsize + def getformat(self): + return self.array.typecode + + def getitemsize(self): + return self.array.itemsize + + def getndim(self): + return 1 + + def getstrides(self): + return [self.getitemsize()] + def getitem(self, index): array = self.array data = array._charbuf_start() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -119,10 +119,10 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE Py_MAX_FMT METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -645,6 +645,9 @@ ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), + ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), + ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,7 +1,6 @@ -from pypy.interpreter.error import oefmt -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import rffi from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) + cpython_api, CANNOT_FAIL, Py_TPFLAGS_HAVE_NEWBUFFER) from pypy.module.cpyext.pyobject import PyObject @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) @@ -13,33 +12,4 @@ return 1 return 0 -@cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], - rffi.INT_real, error=-1) -def PyObject_GetBuffer(space, w_obj, view, flags): - """Export obj into a Py_buffer, view. These arguments must - never be NULL. The flags argument is a bit field indicating what - kind of buffer the caller is prepared to deal with and therefore what - kind of buffer the exporter is allowed to return. The buffer interface - allows for complicated memory sharing possibilities, but some caller may - not be able to handle all the complexity but may want to see if the - exporter will let them take a simpler view to its memory. - - Some exporters may not be able to share memory in every possible way and - may need to raise errors to signal to some consumers that something is - just not possible. These errors should be a BufferError unless - there is another error that is actually causing the problem. The - exporter can use flags information to simplify how much of the - Py_buffer structure is filled in with non-default values and/or - raise an error if the object can't support a simpler view of its memory. - - 0 is returned on success and -1 on error.""" - raise oefmt(space.w_TypeError, - "PyPy does not yet implement the new buffer interface") - -@cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - # PyPy only supports contiguous Py_buffers for now. - return 1 + diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -14,45 +14,33 @@ ## Implementation of PyBytesObject ## ================================ ## -## The problem -## ----------- +## PyBytesObject has its own ob_sval buffer, so we have two copies of a string; +## one in the PyBytesObject returned from various C-API functions and another +## in the corresponding RPython object. ## -## PyString_AsString() must return a (non-movable) pointer to the underlying -## ob_sval, whereas pypy strings are movable. C code may temporarily store -## this address and use it, as long as it owns a reference to the PyObject. -## There is no "release" function to specify that the pointer is not needed -## any more. +## The following calls can create a PyBytesObject without a correspoinding +## RPython object: ## -## Also, the pointer may be used to fill the initial value of string. This is -## valid only when the string was just allocated, and is not used elsewhere. +## PyBytes_FromStringAndSize(NULL, n) / PyString_FromStringAndSize(NULL, n) ## -## Solution -## -------- +## In the PyBytesObject returned, the ob_sval buffer may be modified as +## long as the freshly allocated PyBytesObject is not "forced" via a call +## to any of the more sophisticated C-API functions. ## -## PyBytesObject contains two additional members: the ob_size and a pointer to a -## char ob_sval; it may be NULL. -## -## - A string allocated by pypy will be converted into a PyBytesObject with a -## NULL buffer. The first time PyString_AsString() is called, memory is -## allocated (with flavor='raw') and content is copied. -## -## - A string allocated with PyString_FromStringAndSize(NULL, size) will -## allocate a PyBytesObject structure, and a buffer with the specified -## size+1, but the reference won't be stored in the global map; there is no -## corresponding object in pypy. When from_ref() or Py_INCREF() is called, -## the pypy string is created, and added to the global map of tracked -## objects. The buffer is then supposed to be immutable. -## -##- A buffer obtained from PyString_AS_STRING() could be mutable iff -## there is no corresponding pypy object for the string -## -## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a -## similar object. -## -## - PyString_Size() doesn't need to force the object. +## Care has been taken in implementing the functions below, so that +## if they are called with a non-forced PyBytesObject, they will not +## unintentionally force the creation of a RPython object. As long as only these +## are used, the ob_sval buffer is still modifiable: +## +## PyBytes_AsString / PyString_AsString +## PyBytes_AS_STRING / PyString_AS_STRING +## PyBytes_AsStringAndSize / PyString_AsStringAndSize +## PyBytes_Size / PyString_Size +## PyBytes_Resize / PyString_Resize +## _PyBytes_Resize / _PyString_Resize (raises if called with a forced object) ## ## - There could be an (expensive!) check in from_ref() that the buffer still -## corresponds to the pypy gc-managed string. +## corresponds to the pypy gc-managed string, ## PyBytesObjectStruct = lltype.ForwardReference() @@ -156,9 +144,6 @@ "expected string or Unicode object, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) - if not pyobj_has_w_obj(ref): - # XXX Force the ref? - bytes_realize(space, ref) return ref_str.c_ob_sval @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0) @@ -182,9 +167,6 @@ raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", from_ref(space, ref)) - if not pyobj_has_w_obj(ref): - # force the ref - bytes_realize(space, ref) ref_str = rffi.cast(PyBytesObject, ref) data[0] = ref_str.c_ob_sval if length: diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -142,7 +142,9 @@ typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *); typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **); -/* Py3k buffer interface */ +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 +#define Py_MAX_FMT 5 typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ @@ -156,12 +158,14 @@ char *format; Py_ssize_t *shape; Py_ssize_t *strides; - Py_ssize_t *suboffsets; - + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format[Py_MAX_FMT]; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - void *internal; + void *internal; /* always NULL for app-level objects */ } Py_buffer; diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.1-alpha0" -#define PYPY_VERSION_NUM 0x05040100 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -6,7 +6,6 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask from rpython.rlib.rbigint import rbigint -from rpython.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long") @@ -28,25 +27,25 @@ """Return a new PyLongObject object from a C size_t, or NULL on failure. """ - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.LONGLONG], PyObject) def PyLong_FromLongLong(space, val): """Return a new PyLongObject object from a C long long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.ULONG], PyObject) def PyLong_FromUnsignedLong(space, val): """Return a new PyLongObject object from a C unsigned long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.ULONGLONG], PyObject) def PyLong_FromUnsignedLongLong(space, val): """Return a new PyLongObject object from a C unsigned long long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([PyObject], rffi.ULONG, error=-1) def PyLong_AsUnsignedLong(space, w_long): @@ -203,7 +202,10 @@ can be retrieved from the resulting value using PyLong_AsVoidPtr(). If the integer is larger than LONG_MAX, a positive long integer is returned.""" - return space.wrap(rffi.cast(ADDR, p)) + value = rffi.cast(ADDR, p) # signed integer + if value < 0: + return space.newlong_from_rarith_int(rffi.cast(lltype.Unsigned, p)) + return space.wrap(value) @cpython_api([PyObject], rffi.VOIDP, error=lltype.nullptr(rffi.VOIDP.TO)) def PyLong_AsVoidPtr(space, w_long): diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,10 +1,122 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject -from rpython.rtyper.lltypesystem import lltype + Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import widen +from pypy.objspace.std.memoryobject import W_MemoryView PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") +@cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + rffi.INT_real, error=-1) +def PyObject_GetBuffer(space, w_obj, view, flags): + """Export obj into a Py_buffer, view. These arguments must + never be NULL. The flags argument is a bit field indicating what + kind of buffer the caller is prepared to deal with and therefore what + kind of buffer the exporter is allowed to return. The buffer interface + allows for complicated memory sharing possibilities, but some caller may + not be able to handle all the complexity but may want to see if the + exporter will let them take a simpler view to its memory. + + Some exporters may not be able to share memory in every possible way and + may need to raise errors to signal to some consumers that something is + just not possible. These errors should be a BufferError unless + there is another error that is actually causing the problem. The + exporter can use flags information to simplify how much of the + Py_buffer structure is filled in with non-default values and/or + raise an error if the object can't support a simpler view of its memory. + + 0 is returned on success and -1 on error.""" + flags = widen(flags) + buf = space.buffer_w(w_obj, flags) + try: + view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) + except ValueError: + raise BufferError("could not create buffer from object") + view.c_obj = make_ref(space, w_obj) + return fill_Py_buffer(space, buf, view) + +def fill_Py_buffer(space, buf, view): + # c_buf, c_obj have been filled in + ndim = buf.getndim() + view.c_len = buf.getlength() + view.c_itemsize = buf.getitemsize() + rffi.setintfield(view, 'c_ndim', ndim) + view.c_format = rffi.cast(rffi.CCHARP, view.c__format) + view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) + view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) + fmt = buf.getformat() + n = Py_MAX_FMT - 1 # NULL terminated buffer + if len(fmt) > n: + ### WARN? + pass + else: + n = len(fmt) + for i in range(n): + view.c_format[i] = fmt[i] + view.c_format[n] = '\x00' + shape = buf.getshape() + strides = buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 + +def _IsFortranContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(view.c_ndim): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + +def _IsCContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(ndim - 1, -1, -1): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + +@cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fort): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # traverse the strides, checking for consistent stride increases from + # right-to-left (c) or left-to-right (fortran). Copied from cpython + if not view.c_suboffsets: + return 0 + if (fort == 'C'): + return _IsCContiguous(view) + elif (fort == 'F'): + return _IsFortranContiguous(view) + elif (fort == 'A'): + return (_IsCContiguous(view) or _IsFortranContiguous(view)) + return 0 + @cpython_api([PyObject], PyObject) def PyMemoryView_FromObject(space, w_obj): return space.call_method(space.builtin, "memoryview", w_obj) @@ -12,6 +124,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER + # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) @@ -20,21 +133,22 @@ object. The object must be a memoryview instance; this macro doesn't check its type, you must do it yourself or you will risk crashes.""" view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - # TODO - fill in fields - ''' - view.c_buf = buf - view.c_len = length - view.c_obj = obj - Py_IncRef(space, obj) - view.c_itemsize = 1 - rffi.setintfield(view, 'c_readonly', readonly) - rffi.setintfield(view, 'c_ndim', 0) - view.c_format = lltype.nullptr(rffi.CCHARP.TO) - view.c_shape = lltype.nullptr(Py_ssize_tP.TO) - view.c_strides = lltype.nullptr(Py_ssize_tP.TO) - view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) - view.c_internal = lltype.nullptr(rffi.VOIDP.TO) - ''' + if not isinstance(w_obj, W_MemoryView): + return view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + view.c_obj = make_ref(space, w_obj) + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + isstr = False + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + rffi.setintfield(view, 'c_readonly', 1) + isstr = True + fill_Py_buffer(space, w_obj.buf, view) return view - diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -508,10 +508,9 @@ @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) def PyBuffer_Release(space, view): """ - Releases a Py_buffer obtained from getbuffer ParseTuple's s*. - - This is not a complete re-implementation of the CPython API; it only - provides a subset of CPython's behavior. + Release the buffer view. This should be called when the buffer is + no longer being used as it may free memory from it """ Py_DecRef(space, view.c_obj) view.c_obj = lltype.nullptr(PyObject.TO) + # XXX do other fields leak memory? diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -5,7 +5,6 @@ from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr) from pypy.module.cpyext.frameobject import PyFrameObject -from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter import pycode diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -43,16 +43,20 @@ def PySequence_Fast(space, w_obj, m): """Returns the sequence o as a tuple, unless it is already a tuple or list, in which case o is returned. Use PySequence_Fast_GET_ITEM() to access the - members of the result. Returns NULL on failure. If the object is not a - sequence, raises TypeError with m as the message text.""" + members of the result. Returns NULL on failure. If the object cannot be + converted to a sequence, and raises a TypeError, raise a new TypeError with + m as the message text. If the conversion otherwise, fails, reraise the + original exception""" if isinstance(w_obj, W_ListObject): # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM w_obj.convert_to_cpy_strategy(space) return w_obj try: return W_ListObject.newlist_cpyext(space, space.listview(w_obj)) - except OperationError: - raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) + raise e @cpython_api([rffi.VOIDP, Py_ssize_t], PyObject, result_borrowed=True) def PySequence_Fast_GET_ITEM(space, w_obj, index): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -335,9 +335,15 @@ def getshape(self): return self.shape + def getstrides(self): + return self.strides + def getitemsize(self): return self.itemsize + def getndim(self): + return self.ndim + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -107,14 +107,11 @@ PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) { PyMyArray* self = (PyMyArray*)obj; - fprintf(stdout, "in PyMyArray_getbuffer\n"); if (view == NULL) { - fprintf(stdout, "view is NULL\n"); PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); return -1; } if (flags == 0) { - fprintf(stdout, "flags is 0\n"); PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); return -1; } @@ -188,7 +185,131 @@ (initproc)PyMyArray_init, /* tp_init */ }; +static PyObject* +test_buffer(PyObject* self, PyObject* args) +{ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len); +} + +/* Copied from numpy tests */ +/* + * Create python string from a FLAG and or the corresponding PyBuf flag + * for the use in get_buffer_info. + */ +#define GET_PYBUF_FLAG(FLAG) \ + buf_flag = PyUnicode_FromString(#FLAG); \ + flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ); \ + Py_DECREF(buf_flag); \ + if (flag_matches == 1) { \ + Py_DECREF(tmp); \ + flags |= PyBUF_##FLAG; \ + continue; \ + } \ + else if (flag_matches == -1) { \ + Py_DECREF(tmp); \ + return NULL; \ + } + + +/* + * Get information for a buffer through PyBuf_GetBuffer with the + * corresponding flags or'ed. Note that the python caller has to + * make sure that or'ing those flags actually makes sense. + * More information should probably be returned for future tests. + */ +static PyObject * +get_buffer_info(PyObject *self, PyObject *args) +{ + PyObject *buffer_obj, *pyflags; + PyObject *tmp, *buf_flag; + Py_buffer buffer; + PyObject *shape, *strides; + Py_ssize_t i, n; + int flag_matches; + int flags = 0; + + if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) { + return NULL; + } + + n = PySequence_Length(pyflags); + if (n < 0) { + return NULL; + } + + for (i=0; i < n; i++) { + tmp = PySequence_GetItem(pyflags, i); + if (tmp == NULL) { + return NULL; + } + + GET_PYBUF_FLAG(SIMPLE); + GET_PYBUF_FLAG(WRITABLE); + GET_PYBUF_FLAG(STRIDES); + GET_PYBUF_FLAG(ND); + GET_PYBUF_FLAG(C_CONTIGUOUS); + GET_PYBUF_FLAG(F_CONTIGUOUS); + GET_PYBUF_FLAG(ANY_CONTIGUOUS); + GET_PYBUF_FLAG(INDIRECT); + GET_PYBUF_FLAG(FORMAT); + GET_PYBUF_FLAG(STRIDED); + GET_PYBUF_FLAG(STRIDED_RO); + GET_PYBUF_FLAG(RECORDS); + GET_PYBUF_FLAG(RECORDS_RO); + GET_PYBUF_FLAG(FULL); + GET_PYBUF_FLAG(FULL_RO); + GET_PYBUF_FLAG(CONTIG); + GET_PYBUF_FLAG(CONTIG_RO); + + Py_DECREF(tmp); + + /* One of the flags must match */ + PyErr_SetString(PyExc_ValueError, "invalid flag used."); + return NULL; + } + + if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) { + return NULL; + } + + if (buffer.shape == NULL) { + Py_INCREF(Py_None); + shape = Py_None; + } + else { + shape = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i])); + } + } + + if (buffer.strides == NULL) { + Py_INCREF(Py_None); + strides = Py_None; + } + else { + strides = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i])); + } + } + + PyBuffer_Release(&buffer); + return Py_BuildValue("(NN)", shape, strides); +} + + + static PyMethodDef buffer_functions[] = { + {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, + {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -4,9 +4,7 @@ PyObject* foo3type_tp_new(PyTypeObject* metatype, PyObject* args, PyObject* kwds) { PyObject* newType; - /*printf("in foo3type_tp_new, preprocessing...\n"); */ newType = PyType_Type.tp_new(metatype, args, kwds); - /*printf("in foo3type_tp_new, postprocessing...\n"); */ return newType; } @@ -81,4 +79,5 @@ return; if (PyDict_SetItemString(d, "footype", (PyObject *)&footype) < 0) return; + Py_INCREF(&footype); } diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -1,5 +1,5 @@ import py, pytest -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype from pypy.interpreter.baseobjspace import W_Root from pypy.module.cpyext.state import State from pypy.module.cpyext import api diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.api import PyObject diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -183,8 +183,27 @@ Py_INCREF(Py_None); return Py_None; """), + ("c_only", "METH_NOARGS", + """ + int ret; + char * buf2; + PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024); + if (!obj) + return NULL; + buf2 = PyBytes_AsString(obj); + if (!buf2) + return NULL; + /* buf should not have been forced, issue #2395 */ + ret = _PyBytes_Resize(&obj, 512); + if (ret < 0) + return NULL; + Py_DECREF(obj); + Py_INCREF(Py_None); + return Py_None; + """), ]) module.getbytes() + module.c_only() def test_py_string_as_string_Unicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -18,6 +18,8 @@ from .support import c_compile +only_pypy ="config.option.runappdirect and '__pypy__' not in sys.builtin_module_names" + @api.cpython_api([], api.PyObject) def PyPy_Crash1(space): 1/0 @@ -53,7 +55,48 @@ libraries=libraries) return soname -def compile_extension_module(space, modname, include_dirs=[], +class SystemCompilationInfo(object): + """Bundles all the generic information required to compile extensions. + + Note: here, 'system' means OS + target interpreter + test config + ... + """ + def __init__(self, include_extra=None, compile_extra=None, link_extra=None, + extra_libs=None, ext=None): + self.include_extra = include_extra or [] + self.compile_extra = compile_extra + self.link_extra = link_extra + self.extra_libs = extra_libs + self.ext = ext + +def get_cpyext_info(space): + from pypy.module.imp.importing import get_so_extension + state = space.fromcache(State) + api_library = state.api_lib + if sys.platform == 'win32': + libraries = [api_library] + # '%s' undefined; assuming extern returning int + compile_extra = ["/we4013"] + # prevent linking with PythonXX.lib + w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2] + link_extra = ["/NODEFAULTLIB:Python%d%d.lib" % + (space.int_w(w_maj), space.int_w(w_min))] + else: + libraries = [] + if sys.platform.startswith('linux'): + compile_extra = [ + "-Werror", "-g", "-O0", "-Wp,-U_FORTIFY_SOURCE", "-fPIC"] + link_extra = ["-g"] + else: + compile_extra = link_extra = None + return SystemCompilationInfo( + include_extra=api.include_dirs, + compile_extra=compile_extra, + link_extra=link_extra, + extra_libs=libraries, + ext=get_so_extension(space)) + + +def compile_extension_module(sys_info, modname, include_dirs=[], source_files=None, source_strings=None): """ Build an extension module and return the filename of the resulting native @@ -65,35 +108,15 @@ Any extra keyword arguments are passed on to ExternalCompilationInfo to build the module (so specify your source with one of those). _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit