Author: Manuel Jacob <[email protected]>
Branch: llvm-translation-backend
Changeset: r75422:67a005ec73ed
Date: 2015-01-08 22:43 +0100
http://bitbucket.org/pypy/pypy/changeset/67a005ec73ed/
Log: hg merge default
diff too long, truncating to 2000 out of 35007 lines
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -28,7 +28,7 @@
DEALINGS IN THE SOFTWARE.
-PyPy Copyright holders 2003-2014
+PyPy Copyright holders 2003-2015
-----------------------------------
Except when otherwise stated (look for LICENSE files or information at
diff --git a/README.rst b/README.rst
--- a/README.rst
+++ b/README.rst
@@ -37,4 +37,4 @@
to use virtualenv with the resulting pypy-c as the interpreter; you can
find more details about various installation schemes here:
-http://doc.pypy.org/en/latest/getting-started.html#installing-pypy
+ http://doc.pypy.org/en/latest/install.html
diff --git a/lib-python/2.7/distutils/unixccompiler.py
b/lib-python/2.7/distutils/unixccompiler.py
--- a/lib-python/2.7/distutils/unixccompiler.py
+++ b/lib-python/2.7/distutils/unixccompiler.py
@@ -58,7 +58,7 @@
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
- 'compiler_cxx' : ["cc"],
+ 'compiler_cxx' : ["c++"], # pypy: changed, 'cc' is bogus
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
diff --git a/lib-python/2.7/sqlite3/test/dbapi.py
b/lib-python/2.7/sqlite3/test/dbapi.py
--- a/lib-python/2.7/sqlite3/test/dbapi.py
+++ b/lib-python/2.7/sqlite3/test/dbapi.py
@@ -478,6 +478,29 @@
except TypeError:
pass
+ def CheckCurDescription(self):
+ self.cu.execute("select * from test")
+
+ actual = self.cu.description
+ expected = [
+ ('id', None, None, None, None, None, None),
+ ('name', None, None, None, None, None, None),
+ ('income', None, None, None, None, None, None),
+ ]
+ self.assertEqual(expected, actual)
+
+ def CheckCurDescriptionVoidStatement(self):
+ self.cu.execute("insert into test(name) values (?)", ("foo",))
+ self.assertIsNone(self.cu.description)
+
+ def CheckCurDescriptionWithoutStatement(self):
+ cu = self.cx.cursor()
+ try:
+ self.assertIsNone(cu.description)
+ finally:
+ cu.close()
+
+
@unittest.skipUnless(threading, 'This test requires threading.')
class ThreadTests(unittest.TestCase):
def setUp(self):
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -655,6 +655,21 @@
"""Create new Popen instance."""
_cleanup()
+ # --- PyPy hack, see _pypy_install_libs_after_virtualenv() ---
+ # match arguments passed by different versions of virtualenv
+ if args[1:] in (
+ ['-c', 'import sys; print(sys.prefix)'], # 1.6 10ba3f3c
+ ['-c', "\nimport sys\nprefix = sys.prefix\n" # 1.7 0e9342ce
+ "if sys.version_info[0] == 3:\n"
+ " prefix = prefix.encode('utf8')\n"
+ "if hasattr(sys.stdout, 'detach'):\n"
+ " sys.stdout = sys.stdout.detach()\n"
+ "elif hasattr(sys.stdout, 'buffer'):\n"
+ " sys.stdout = sys.stdout.buffer\nsys.stdout.write(prefix)\n"],
+ ['-c', 'import sys;out=sys.stdout;getattr(out, "buffer"'
+ ', out).write(sys.prefix.encode("utf-8"))']): # 1.7.2 a9454bce
+ _pypy_install_libs_after_virtualenv(args[0])
+
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
@@ -1560,6 +1575,27 @@
self.send_signal(signal.SIGKILL)
+def _pypy_install_libs_after_virtualenv(target_executable):
+ # https://bitbucket.org/pypy/pypy/issue/1922/future-proofing-virtualenv
+ #
+ # PyPy 2.4.1 turned --shared on by default. This means the pypy binary
+ # depends on the 'libpypy-c.so' shared library to be able to run.
+ # The virtualenv code existing at the time did not account for this
+ # and would break. Try to detect that we're running under such a
+ # virtualenv in the "Testing executable with" phase and copy the
+ # library ourselves.
+ caller = sys._getframe(2)
+ if ('virtualenv_version' in caller.f_globals and
+ 'copyfile' in caller.f_globals):
+ dest_dir = sys.pypy_resolvedirof(target_executable)
+ src_dir = sys.pypy_resolvedirof(sys.executable)
+ for libname in ['libpypy-c.so', 'libpypy-c.dylib']:
+ dest_library = os.path.join(dest_dir, libname)
+ src_library = os.path.join(src_dir, libname)
+ if os.path.exists(src_library):
+ caller.f_globals['copyfile'](src_library, dest_library)
+
+
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
diff --git a/lib-python/2.7/test/test_collections.py
b/lib-python/2.7/test/test_collections.py
--- a/lib-python/2.7/test/test_collections.py
+++ b/lib-python/2.7/test/test_collections.py
@@ -1108,6 +1108,16 @@
od.popitem()
self.assertEqual(len(od), 0)
+ def test_popitem_first(self):
+ pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+ shuffle(pairs)
+ od = OrderedDict(pairs)
+ while pairs:
+ self.assertEqual(od.popitem(last=False), pairs.pop(0))
+ with self.assertRaises(KeyError):
+ od.popitem(last=False)
+ self.assertEqual(len(od), 0)
+
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
@@ -1179,7 +1189,11 @@
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b,
2]\n'
- self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
+
+ # PyPy bug fix: added [0] at the end of this line, because the
+ # test is really about the 2-tuples that need to be 2-lists
+ # inside the list of 6 of them
+ self.assertTrue(all(type(pair)==list for pair in
od.__reduce__()[1][0]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
@@ -1189,6 +1203,16 @@
od.x = 10
self.assertEqual(len(od.__reduce__()), 3)
+ def test_reduce_exact_output(self):
+ # PyPy: test that __reduce__() produces the exact same answer as
+ # CPython does, even though in the 'all_ordered_dicts' branch we
+ # have to emulate it.
+ pairs = [['c', 1], ['b', 2], ['d', 4]]
+ od = OrderedDict(pairs)
+ self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,)))
+ od.x = 10
+ self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10}))
+
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5),
('f', 6)])
self.assertEqual(repr(od),
diff --git a/lib-python/2.7/test/test_xml_etree.py
b/lib-python/2.7/test/test_xml_etree.py
--- a/lib-python/2.7/test/test_xml_etree.py
+++ b/lib-python/2.7/test/test_xml_etree.py
@@ -225,9 +225,9 @@
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
- >>> element.remove(subelement)
+ >>> element.remove(subelement) # doctest: +ELLIPSIS
Traceback (most recent call last):
- ValueError: list.remove(x): x not in list
+ ValueError: list.remove(...
>>> serialize(element) # 6
'<tag key="value" />'
>>> element[0:0] = [subelement, subelement, subelement]
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -59,7 +59,7 @@
def __init__(self, basename, core=False, compiler=None, usemodules='',
skip=None):
self.basename = basename
- self._usemodules = usemodules.split() + ['signal', 'rctime',
'itertools', '_socket']
+ self._usemodules = usemodules.split() + ['signal', 'time',
'itertools', '_socket']
self._compiler = compiler
self.core = core
self.skip = skip
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -7,7 +7,7 @@
1. check out the branch vendor/stdlib
2. upgrade the files there
-3. update stdlib-versions.txt with the output of hg -id from the cpython repo
+3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
5. update to default/py3k
6. create a integration branch for the new stdlib
diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py
--- a/lib_pypy/_functools.py
+++ b/lib_pypy/_functools.py
@@ -9,7 +9,10 @@
of the given arguments and keywords.
"""
- def __init__(self, func, *args, **keywords):
+ def __init__(self, *args, **keywords):
+ if not args:
+ raise TypeError('__init__() takes at least 2 arguments (1 given)')
+ func, args = args[0], args[1:]
if not callable(func):
raise TypeError("the first argument must be callable")
self._func = func
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -1175,8 +1175,9 @@
try:
return self.__description
except AttributeError:
- self.__description = self.__statement._get_description()
- return self.__description
+ if self.__statement:
+ self.__description = self.__statement._get_description()
+ return self.__description
description = property(__get_description)
def __get_lastrowid(self):
diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py
--- a/lib_pypy/grp.py
+++ b/lib_pypy/grp.py
@@ -66,11 +66,12 @@
@builtinify
def getgrnam(name):
- if not isinstance(name, str):
+ if not isinstance(name, basestring):
raise TypeError("expected string")
+ name = str(name)
res = libc.getgrnam(name)
if not res:
- raise KeyError(name)
+ raise KeyError("'getgrnam(): name not found: %s'" % name)
return _group_from_gstruct(res)
@builtinify
diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py
--- a/lib_pypy/readline.py
+++ b/lib_pypy/readline.py
@@ -6,4 +6,11 @@
are only stubs at the moment.
"""
-from pyrepl.readline import *
+try:
+ from pyrepl.readline import *
+except ImportError:
+ import sys
+ if sys.platform == 'win32':
+ raise ImportError("the 'readline' module is not available on Windows"
+ " (on either PyPy or CPython)")
+ raise
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -29,7 +29,7 @@
# --allworkingmodules
working_modules = default_modules.copy()
working_modules.update([
- "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "rctime" ,
+ "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "time" ,
"select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios",
"zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses",
"cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array",
@@ -40,7 +40,7 @@
translation_modules = default_modules.copy()
translation_modules.update([
- "fcntl", "rctime", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
+ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
"cStringIO", "array", "binascii",
# the following are needed for pyrepl (and hence for the
# interactive prompt/pdb)
@@ -64,19 +64,15 @@
default_modules.add("_locale")
if sys.platform == "sunos5":
- working_modules.remove('mmap') # depend on ctypes, can't get at c-level
'errono'
- working_modules.remove('rctime') # depend on ctypes, missing
tm_zone/tm_gmtoff
- working_modules.remove('signal') # depend on ctypes, can't get at c-level
'errono'
working_modules.remove('fcntl') # LOCK_NB not defined
working_modules.remove("_minimal_curses")
working_modules.remove("termios")
- working_modules.remove("_multiprocessing") # depends on rctime
if "cppyy" in working_modules:
working_modules.remove("cppyy") # depends on ctypes
module_dependencies = {
- '_multiprocessing': [('objspace.usemodules.rctime', True),
+ '_multiprocessing': [('objspace.usemodules.time', True),
('objspace.usemodules.thread', True)],
'cpyext': [('objspace.usemodules.array', True)],
'cppyy': [('objspace.usemodules.cpyext', True)],
@@ -86,9 +82,10 @@
# itself needs the interp-level struct module
# because 'P' is missing from the app-level one
"_rawffi": [("objspace.usemodules.struct", True)],
- "cpyext": [("translation.secondaryentrypoints", "cpyext,main"),
- ("translation.shared", sys.platform == "win32")],
+ "cpyext": [("translation.secondaryentrypoints", "cpyext,main")],
}
+if sys.platform == "win32":
+ module_suggests["cpyext"].append(("translation.shared", True))
module_import_dependencies = {
# no _rawffi if importing rpython.rlib.clibffi raises ImportError
@@ -255,10 +252,6 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("builtinshortcut",
- "a shortcut for operations between built-in types. XXX: "
- "deprecated, not really a shortcut any more.",
- default=False),
BoolOption("getattributeshortcut",
"track types that override __getattribute__",
default=False,
@@ -270,9 +263,6 @@
# weakrefs needed, because of get_subclasses()
requires=[("translation.rweakref", True)]),
- ChoiceOption("multimethods", "the multimethod implementation to use",
- ["doubledispatch", "mrd"],
- default="mrd"),
BoolOption("withidentitydict",
"track types that override __hash__, __eq__ or __cmp__ and
use a special dict strategy for those which do not",
default=False,
diff --git a/pypy/config/test/test_pypyoption.py
b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -64,7 +64,7 @@
def check_file_exists(fn):
assert configdocdir.join(fn).check()
- from pypy.doc.config.confrest import all_optiondescrs
+ from pypy.doc.config.generate import all_optiondescrs
configdocdir = thisdir.dirpath().dirpath().join("doc", "config")
for descr in all_optiondescrs:
prefix = descr._name
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -119,6 +119,9 @@
pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py
+(You can use ``python`` instead of ``pypy`` here, which will take longer
+but works too.)
+
If everything works correctly this will create an executable ``pypy-c`` in the
current directory. The executable behaves mostly like a normal Python
interpreter (see :doc:`cpython_differences`).
diff --git a/pypy/doc/config/objspace.std.builtinshortcut.txt
b/pypy/doc/config/objspace.std.builtinshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.builtinshortcut.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-A shortcut speeding up primitive operations between built-in types.
-
-This is a space-time trade-off: at the moment, this option makes a
-translated pypy-c executable bigger by about 1.7 MB. (This can probably
-be improved with careful analysis.)
diff --git a/pypy/doc/config/objspace.std.multimethods.txt
b/pypy/doc/config/objspace.std.multimethods.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.multimethods.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Choose the multimethod implementation.
-
-* ``doubledispatch`` turns
- a multimethod call into a sequence of normal method calls.
-
-* ``mrd`` uses a technique known as Multiple Row Displacement
- which precomputes a few compact tables of numbers and
- function pointers.
diff --git a/pypy/doc/config/objspace.usemodules.rctime.txt
b/pypy/doc/config/objspace.usemodules.rctime.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.usemodules.rctime.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Use the 'rctime' module.
-
-'rctime' is our `rffi`_ based implementation of the builtin 'time' module.
-It supersedes the less complete :config:`objspace.usemodules.time`,
-at least for C-like targets (the C and LLVM backends).
-
-.. _`rffi`: ../rffi.html
diff --git a/pypy/doc/config/objspace.usemodules.time.txt
b/pypy/doc/config/objspace.usemodules.time.txt
--- a/pypy/doc/config/objspace.usemodules.time.txt
+++ b/pypy/doc/config/objspace.usemodules.time.txt
@@ -1,5 +1,1 @@
Use the 'time' module.
-
-Obsolete; use :config:`objspace.usemodules.rctime` for our up-to-date version
-of the application-level 'time' module, at least for C-like targets (the C
-and LLVM backends).
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -205,23 +205,28 @@
The above is true both in CPython and in PyPy. Differences
can occur about whether a built-in function or method will
call an overridden method of *another* object than ``self``.
-In PyPy, they are generally always called, whereas not in
-CPython. For example, in PyPy, ``dict1.update(dict2)``
-considers that ``dict2`` is just a general mapping object, and
-will thus call overridden ``keys()`` and ``__getitem__()``
-methods on it. So the following code prints ``42`` on PyPy
-but ``foo`` on CPython::
+In PyPy, they are often called in cases where CPython would not.
+Two examples::
- >>>> class D(dict):
- .... def __getitem__(self, key):
- .... return 42
- ....
- >>>>
- >>>> d1 = {}
- >>>> d2 = D(a='foo')
- >>>> d1.update(d2)
- >>>> print d1['a']
- 42
+ class D(dict):
+ def __getitem__(self, key):
+ return "%r from D" % (key,)
+
+ class A(object):
+ pass
+
+ a = A()
+ a.__dict__ = D()
+ a.foo = "a's own foo"
+ print a.foo
+ # CPython => a's own foo
+ # PyPy => 'foo' from D
+
+ glob = D(foo="base item")
+ loc = {}
+ exec "print foo" in glob, loc
+ # CPython => base item
+ # PyPy => 'foo' from D
Mutating classes of objects which are already used as dictionary keys
@@ -292,6 +297,9 @@
above types will return a value that is computed from the argument, and can
thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
+Notably missing from the list above are ``str`` and ``unicode``. If your
+code relies on comparing strings with ``is``, then it might break in PyPy.
+
Miscellaneous
-------------
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -6,6 +6,10 @@
C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_
project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API.
+**NOTE**: As of 1st of December, PyPy comes with ``--shared`` by default
+on linux, linux64 and windows. We will make it the default on all platforms
+by the time of the next release.
+
The first thing that you need is to compile PyPy yourself with the option
``--shared``. We plan to make ``--shared`` the default in the future. Consult
the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so``
@@ -93,12 +97,18 @@
return res;
}
-If we save it as ``x.c`` now, compile it and run it with::
+If we save it as ``x.c`` now, compile it and run it (on linux) with::
fijal@hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L.
fijal@hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x
hello from pypy
+on OSX it is necessary to set the rpath of the binary if one wants to link to
it::
+
+ gcc -o x x.c -lpypy-c -L. -Wl,-rpath -Wl,@executable_path
+ ./x
+ hello from pypy
+
Worked!
.. note:: If the compilation fails because of missing PyPy.h header file,
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -35,6 +35,13 @@
PyPy's bytearray type is very inefficient. It would be an interesting
task to look into possible optimizations on this.
+Implement AF_XXX packet types for PyPy
+--------------------------------------
+
+PyPy is missing AF_XXX types of sockets. Implementing it is easy-to-medium
+task. `bug report`_
+
+.. _`bug report`:
https://bitbucket.org/pypy/pypy/issue/1942/support-for-af_xxx-sockets#more
Implement copy-on-write list slicing
------------------------------------
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -35,3 +35,19 @@
Split RPython documentation from PyPy documentation and clean up. There now is
a clearer separation between documentation for users, developers and people
interested in background information.
+
+.. branch: kill-multimethod
+
+Kill multimethod machinery, all multimethods were removed earlier.
+
+.. branch nditer-external_loop
+
+Implement `external_loop` arguement to numpy's nditer
+
+.. branch kill-rctime
+
+Rename pypy/module/rctime to pypy/module/time, since it contains the
implementation of the 'time' module.
+
+.. branch: ssa-flow
+
+Use SSA form for flow graphs inside build_flow() and part of simplify_graph()
diff --git a/pypy/goal/targetpypystandalone.py
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -208,23 +208,6 @@
from pypy.config.pypyoption import set_pypy_opt_level
set_pypy_opt_level(config, translateconfig.opt)
- # as of revision 27081, multimethod.py uses the InstallerVersion1 by
default
- # because it is much faster both to initialize and run on top of
CPython.
- # The InstallerVersion2 is optimized for making a translator-friendly
- # structure for low level backends. However, InstallerVersion1 is still
- # preferable for high level backends, so we patch here.
-
- from pypy.objspace.std import multimethod
- if config.objspace.std.multimethods == 'mrd':
- assert multimethod.InstallerVersion1.instance_counter == 0,\
- 'The wrong Installer version has already been instatiated'
- multimethod.Installer = multimethod.InstallerVersion2
- elif config.objspace.std.multimethods == 'doubledispatch':
- # don't rely on the default, set again here
- assert multimethod.InstallerVersion2.instance_counter == 0,\
- 'The wrong Installer version has already been instatiated'
- multimethod.Installer = multimethod.InstallerVersion1
-
def print_help(self, config):
self.opt_parser(config).print_help()
@@ -251,8 +234,7 @@
enable_translationmodules(config)
config.translation.suggest(check_str_without_nul=True)
- if sys.platform.startswith('linux'):
- config.translation.suggest(shared=True)
+ config.translation.suggest(shared=True)
if config.translation.thread:
config.objspace.usemodules.thread = True
diff --git a/pypy/interpreter/astcompiler/assemble.py
b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -1,15 +1,13 @@
-"""
-Python control flow graph generation and bytecode assembly.
-"""
+"""Python control flow graph generation and bytecode assembly."""
-from pypy.interpreter.astcompiler import ast, symtable
-from pypy.interpreter import pycode
+from rpython.rlib import rfloat
+from rpython.rlib.objectmodel import we_are_translated
+
+from pypy.interpreter.astcompiler import ast, misc, symtable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pycode import PyCode
from pypy.tool import stdlib_opcode as ops
-from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import we_are_translated
-from rpython.rlib import rfloat
-
class Instruction(object):
"""Represents a single opcode."""
@@ -21,14 +19,12 @@
self.has_jump = False
def size(self):
- """Return the size of bytes of this instruction when it is encoded."""
+ """Return the size of bytes of this instruction when it is
+ encoded.
+ """
if self.opcode >= ops.HAVE_ARGUMENT:
- if self.arg > 0xFFFF:
- return 6
- else:
- return 3
- else:
- return 1
+ return (6 if self.arg > 0xFFFF else 3)
+ return 1
def jump_to(self, target, absolute=False):
"""Indicate the target this jump instruction.
@@ -54,9 +50,9 @@
class Block(object):
"""A basic control flow block.
- It has one entry point and several possible exit points. Its instructions
- may be jumps to other blocks, or if control flow reaches the end of the
- block, it continues to next_block.
+ It has one entry point and several possible exit points. Its
+ instructions may be jumps to other blocks, or if control flow
+ reaches the end of the block, it continues to next_block.
"""
def __init__(self):
@@ -71,10 +67,10 @@
stack.append(nextblock)
def post_order(self):
- """Return this block and its children in post order.
- This means that the graph of blocks is first cleaned up to
- ignore back-edges, thus turning it into a DAG. Then the DAG
- is linearized. For example:
+ """Return this block and its children in post order. This means
+ that the graph of blocks is first cleaned up to ignore
+ back-edges, thus turning it into a DAG. Then the DAG is
+ linearized. For example:
A --> B -\ => [A, D, B, C]
\-> D ---> C
@@ -105,7 +101,9 @@
return resultblocks
def code_size(self):
- """Return the encoded size of all the instructions in this block."""
+ """Return the encoded size of all the instructions in this
+ block.
+ """
i = 0
for instr in self.instructions:
i += instr.size()
@@ -141,6 +139,7 @@
i += 1
return result
+
def _list_to_dict(l, offset=0):
result = {}
index = offset
@@ -300,11 +299,11 @@
def _resolve_block_targets(self, blocks):
"""Compute the arguments of jump instructions."""
last_extended_arg_count = 0
- # The reason for this loop is extended jumps. EXTENDED_ARG extends the
- # bytecode size, so it might invalidate the offsets we've already
given.
- # Thus we have to loop until the number of extended args is stable.
Any
- # extended jump at all is extremely rare, so performance is not too
- # concerning.
+ # The reason for this loop is extended jumps. EXTENDED_ARG
+ # extends the bytecode size, so it might invalidate the offsets
+ # we've already given. Thus we have to loop until the number of
+ # extended args is stable. Any extended jump at all is
+ # extremely rare, so performance is not too concerning.
while True:
extended_arg_count = 0
offset = 0
@@ -330,7 +329,8 @@
instr.opcode = ops.JUMP_ABSOLUTE
absolute = True
elif target_op == ops.RETURN_VALUE:
- # Replace JUMP_* to a RETURN into just a
RETURN
+ # Replace JUMP_* to a RETURN into
+ # just a RETURN
instr.opcode = ops.RETURN_VALUE
instr.arg = 0
instr.has_jump = False
@@ -345,7 +345,8 @@
instr.arg = jump_arg
if jump_arg > 0xFFFF:
extended_arg_count += 1
- if extended_arg_count == last_extended_arg_count and not
force_redo:
+ if (extended_arg_count == last_extended_arg_count and
+ not force_redo):
break
else:
last_extended_arg_count = extended_arg_count
@@ -360,12 +361,14 @@
while True:
try:
w_key = space.next(w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
break
w_index = space.getitem(w_consts, w_key)
- consts_w[space.int_w(w_index)] = space.getitem(w_key, first)
+ w_constant = space.getitem(w_key, first)
+ w_constant = misc.intern_if_common_string(space, w_constant)
+ consts_w[space.int_w(w_index)] = w_constant
return consts_w
def _get_code_flags(self):
@@ -433,15 +436,16 @@
continue
addr = offset - current_off
# Python assumes that lineno always increases with
- # increasing bytecode address (lnotab is unsigned char).
- # Depending on when SET_LINENO instructions are emitted
this
- # is not always true. Consider the code:
+ # increasing bytecode address (lnotab is unsigned
+ # char). Depending on when SET_LINENO instructions
+ # are emitted this is not always true. Consider the
+ # code:
# a = (1,
# b)
- # In the bytecode stream, the assignment to "a" occurs
after
- # the loading of "b". This works with the C Python
compiler
- # because it only generates a SET_LINENO instruction for
the
- # assignment.
+ # In the bytecode stream, the assignment to "a"
+ # occurs after the loading of "b". This works with
+ # the C Python compiler because it only generates a
+ # SET_LINENO instruction for the assignment.
if line or addr:
while addr > 255:
push(chr(255))
@@ -484,22 +488,22 @@
free_names = _list_from_dict(self.free_vars, len(cell_names))
flags = self._get_code_flags() | self.compile_info.flags
bytecode = ''.join([block.get_code() for block in blocks])
- return pycode.PyCode(self.space,
- self.argcount,
- len(self.var_names),
- stack_depth,
- flags,
- bytecode,
- list(consts_w),
- names,
- var_names,
- self.compile_info.filename,
- self.name,
- self.first_lineno,
- lnotab,
- free_names,
- cell_names,
- self.compile_info.hidden_applevel)
+ return PyCode(self.space,
+ self.argcount,
+ len(self.var_names),
+ stack_depth,
+ flags,
+ bytecode,
+ list(consts_w),
+ names,
+ var_names,
+ self.compile_info.filename,
+ self.name,
+ self.first_lineno,
+ lnotab,
+ free_names,
+ cell_names,
+ self.compile_info.hidden_applevel)
def _list_from_dict(d, offset=0):
@@ -510,134 +514,134 @@
_static_opcode_stack_effects = {
- ops.NOP : 0,
- ops.STOP_CODE : 0,
+ ops.NOP: 0,
+ ops.STOP_CODE: 0,
- ops.POP_TOP : -1,
- ops.ROT_TWO : 0,
- ops.ROT_THREE : 0,
- ops.ROT_FOUR : 0,
- ops.DUP_TOP : 1,
+ ops.POP_TOP: -1,
+ ops.ROT_TWO: 0,
+ ops.ROT_THREE: 0,
+ ops.ROT_FOUR: 0,
+ ops.DUP_TOP: 1,
- ops.UNARY_POSITIVE : 0,
- ops.UNARY_NEGATIVE : 0,
- ops.UNARY_NOT : 0,
- ops.UNARY_CONVERT : 0,
- ops.UNARY_INVERT : 0,
+ ops.UNARY_POSITIVE: 0,
+ ops.UNARY_NEGATIVE: 0,
+ ops.UNARY_NOT: 0,
+ ops.UNARY_CONVERT: 0,
+ ops.UNARY_INVERT: 0,
- ops.LIST_APPEND : -1,
- ops.SET_ADD : -1,
- ops.MAP_ADD : -2,
- ops.STORE_MAP : -2,
+ ops.LIST_APPEND: -1,
+ ops.SET_ADD: -1,
+ ops.MAP_ADD: -2,
+ ops.STORE_MAP: -2,
- ops.BINARY_POWER : -1,
- ops.BINARY_MULTIPLY : -1,
- ops.BINARY_DIVIDE : -1,
- ops.BINARY_MODULO : -1,
- ops.BINARY_ADD : -1,
- ops.BINARY_SUBTRACT : -1,
- ops.BINARY_SUBSCR : -1,
- ops.BINARY_FLOOR_DIVIDE : -1,
- ops.BINARY_TRUE_DIVIDE : -1,
- ops.BINARY_LSHIFT : -1,
- ops.BINARY_RSHIFT : -1,
- ops.BINARY_AND : -1,
- ops.BINARY_OR : -1,
- ops.BINARY_XOR : -1,
+ ops.BINARY_POWER: -1,
+ ops.BINARY_MULTIPLY: -1,
+ ops.BINARY_DIVIDE: -1,
+ ops.BINARY_MODULO: -1,
+ ops.BINARY_ADD: -1,
+ ops.BINARY_SUBTRACT: -1,
+ ops.BINARY_SUBSCR: -1,
+ ops.BINARY_FLOOR_DIVIDE: -1,
+ ops.BINARY_TRUE_DIVIDE: -1,
+ ops.BINARY_LSHIFT: -1,
+ ops.BINARY_RSHIFT: -1,
+ ops.BINARY_AND: -1,
+ ops.BINARY_OR: -1,
+ ops.BINARY_XOR: -1,
- ops.INPLACE_FLOOR_DIVIDE : -1,
- ops.INPLACE_TRUE_DIVIDE : -1,
- ops.INPLACE_ADD : -1,
- ops.INPLACE_SUBTRACT : -1,
- ops.INPLACE_MULTIPLY : -1,
- ops.INPLACE_DIVIDE : -1,
- ops.INPLACE_MODULO : -1,
- ops.INPLACE_POWER : -1,
- ops.INPLACE_LSHIFT : -1,
- ops.INPLACE_RSHIFT : -1,
- ops.INPLACE_AND : -1,
- ops.INPLACE_OR : -1,
- ops.INPLACE_XOR : -1,
+ ops.INPLACE_FLOOR_DIVIDE: -1,
+ ops.INPLACE_TRUE_DIVIDE: -1,
+ ops.INPLACE_ADD: -1,
+ ops.INPLACE_SUBTRACT: -1,
+ ops.INPLACE_MULTIPLY: -1,
+ ops.INPLACE_DIVIDE: -1,
+ ops.INPLACE_MODULO: -1,
+ ops.INPLACE_POWER: -1,
+ ops.INPLACE_LSHIFT: -1,
+ ops.INPLACE_RSHIFT: -1,
+ ops.INPLACE_AND: -1,
+ ops.INPLACE_OR: -1,
+ ops.INPLACE_XOR: -1,
- ops.SLICE+0 : 1,
- ops.SLICE+1 : 0,
- ops.SLICE+2 : 0,
- ops.SLICE+3 : -1,
- ops.STORE_SLICE+0 : -2,
- ops.STORE_SLICE+1 : -3,
- ops.STORE_SLICE+2 : -3,
- ops.STORE_SLICE+3 : -4,
- ops.DELETE_SLICE+0 : -1,
- ops.DELETE_SLICE+1 : -2,
- ops.DELETE_SLICE+2 : -2,
- ops.DELETE_SLICE+3 : -3,
+ ops.SLICE+0: 1,
+ ops.SLICE+1: 0,
+ ops.SLICE+2: 0,
+ ops.SLICE+3: -1,
+ ops.STORE_SLICE+0: -2,
+ ops.STORE_SLICE+1: -3,
+ ops.STORE_SLICE+2: -3,
+ ops.STORE_SLICE+3: -4,
+ ops.DELETE_SLICE+0: -1,
+ ops.DELETE_SLICE+1: -2,
+ ops.DELETE_SLICE+2: -2,
+ ops.DELETE_SLICE+3: -3,
- ops.STORE_SUBSCR : -2,
- ops.DELETE_SUBSCR : -2,
+ ops.STORE_SUBSCR: -2,
+ ops.DELETE_SUBSCR: -2,
- ops.GET_ITER : 0,
- ops.FOR_ITER : 1,
- ops.BREAK_LOOP : 0,
- ops.CONTINUE_LOOP : 0,
- ops.SETUP_LOOP : 0,
+ ops.GET_ITER: 0,
+ ops.FOR_ITER: 1,
+ ops.BREAK_LOOP: 0,
+ ops.CONTINUE_LOOP: 0,
+ ops.SETUP_LOOP: 0,
- ops.PRINT_EXPR : -1,
- ops.PRINT_ITEM : -1,
- ops.PRINT_NEWLINE : 0,
- ops.PRINT_ITEM_TO : -2,
- ops.PRINT_NEWLINE_TO : -1,
+ ops.PRINT_EXPR: -1,
+ ops.PRINT_ITEM: -1,
+ ops.PRINT_NEWLINE: 0,
+ ops.PRINT_ITEM_TO: -2,
+ ops.PRINT_NEWLINE_TO: -1,
- ops.WITH_CLEANUP : -1,
- ops.POP_BLOCK : 0,
- ops.END_FINALLY : -1,
- ops.SETUP_WITH : 1,
- ops.SETUP_FINALLY : 0,
- ops.SETUP_EXCEPT : 0,
+ ops.WITH_CLEANUP: -1,
+ ops.POP_BLOCK: 0,
+ ops.END_FINALLY: -1,
+ ops.SETUP_WITH: 1,
+ ops.SETUP_FINALLY: 0,
+ ops.SETUP_EXCEPT: 0,
- ops.LOAD_LOCALS : 1,
- ops.RETURN_VALUE : -1,
- ops.EXEC_STMT : -3,
- ops.YIELD_VALUE : 0,
- ops.BUILD_CLASS : -2,
- ops.BUILD_MAP : 1,
- ops.BUILD_SET : 1,
- ops.COMPARE_OP : -1,
+ ops.LOAD_LOCALS: 1,
+ ops.RETURN_VALUE: -1,
+ ops.EXEC_STMT: -3,
+ ops.YIELD_VALUE: 0,
+ ops.BUILD_CLASS: -2,
+ ops.BUILD_MAP: 1,
+ ops.BUILD_SET: 1,
+ ops.COMPARE_OP: -1,
- ops.LOOKUP_METHOD : 1,
+ ops.LOOKUP_METHOD: 1,
- ops.LOAD_NAME : 1,
- ops.STORE_NAME : -1,
- ops.DELETE_NAME : 0,
+ ops.LOAD_NAME: 1,
+ ops.STORE_NAME: -1,
+ ops.DELETE_NAME: 0,
- ops.LOAD_FAST : 1,
- ops.STORE_FAST : -1,
- ops.DELETE_FAST : 0,
+ ops.LOAD_FAST: 1,
+ ops.STORE_FAST: -1,
+ ops.DELETE_FAST: 0,
- ops.LOAD_ATTR : 0,
- ops.STORE_ATTR : -2,
- ops.DELETE_ATTR : -1,
+ ops.LOAD_ATTR: 0,
+ ops.STORE_ATTR: -2,
+ ops.DELETE_ATTR: -1,
- ops.LOAD_GLOBAL : 1,
- ops.STORE_GLOBAL : -1,
- ops.DELETE_GLOBAL : 0,
+ ops.LOAD_GLOBAL: 1,
+ ops.STORE_GLOBAL: -1,
+ ops.DELETE_GLOBAL: 0,
- ops.LOAD_CLOSURE : 1,
- ops.LOAD_DEREF : 1,
- ops.STORE_DEREF : -1,
+ ops.LOAD_CLOSURE: 1,
+ ops.LOAD_DEREF: 1,
+ ops.STORE_DEREF: -1,
- ops.LOAD_CONST : 1,
+ ops.LOAD_CONST: 1,
- ops.IMPORT_STAR : -1,
- ops.IMPORT_NAME : -1,
- ops.IMPORT_FROM : 1,
+ ops.IMPORT_STAR: -1,
+ ops.IMPORT_NAME: -1,
+ ops.IMPORT_FROM: 1,
- ops.JUMP_FORWARD : 0,
- ops.JUMP_ABSOLUTE : 0,
- ops.JUMP_IF_TRUE_OR_POP : 0,
- ops.JUMP_IF_FALSE_OR_POP : 0,
- ops.POP_JUMP_IF_TRUE : -1,
- ops.POP_JUMP_IF_FALSE : -1,
- ops.JUMP_IF_NOT_DEBUG : 0,
+ ops.JUMP_FORWARD: 0,
+ ops.JUMP_ABSOLUTE: 0,
+ ops.JUMP_IF_TRUE_OR_POP: 0,
+ ops.JUMP_IF_FALSE_OR_POP: 0,
+ ops.POP_JUMP_IF_TRUE: -1,
+ ops.POP_JUMP_IF_FALSE: -1,
+ ops.JUMP_IF_NOT_DEBUG: 0,
ops.BUILD_LIST_FROM_ARG: 1,
}
diff --git a/pypy/interpreter/astcompiler/misc.py
b/pypy/interpreter/astcompiler/misc.py
--- a/pypy/interpreter/astcompiler/misc.py
+++ b/pypy/interpreter/astcompiler/misc.py
@@ -106,3 +106,13 @@
except IndexError:
return name
return "_%s%s" % (klass[i:], name)
+
+
+def intern_if_common_string(space, w_const):
+ # only intern identifier-like strings
+ if not space.is_w(space.type(w_const), space.w_str):
+ return w_const
+ for c in space.str_w(w_const):
+ if not (c.isalnum() or c == '_'):
+ return w_const
+ return space.new_interned_w_str(w_const)
diff --git a/pypy/interpreter/astcompiler/optimize.py
b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -83,17 +83,16 @@
class __extend__(ast.BoolOp):
- def _accept_jump_if_any_is(self, gen, condition, target):
- self.values[0].accept_jump_if(gen, condition, target)
- for i in range(1, len(self.values)):
+ def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0):
+ for i in range(len(self.values) - skip_last):
self.values[i].accept_jump_if(gen, condition, target)
def accept_jump_if(self, gen, condition, target):
if condition and self.op == ast.And or \
(not condition and self.op == ast.Or):
end = gen.new_block()
- self._accept_jump_if_any_is(gen, not condition, end)
- gen.emit_jump(ops.JUMP_FORWARD, target)
+ self._accept_jump_if_any_is(gen, not condition, end, skip_last=1)
+ self.values[-1].accept_jump_if(gen, condition, target)
gen.use_next_block(end)
else:
self._accept_jump_if_any_is(gen, condition, target)
@@ -272,6 +271,11 @@
if w_const is None:
return tup
consts_w[i] = w_const
+ # intern the string constants packed into the tuple here,
+ # because assemble.py will see the result as just a tuple constant
+ for i in range(len(consts_w)):
+ consts_w[i] = misc.intern_if_common_string(
+ self.space, consts_w[i])
else:
consts_w = []
w_consts = self.space.newtuple(consts_w)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -14,7 +14,7 @@
UserDelAction)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
-from pypy.interpreter.miscutils import ThreadLocals
+from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
__all__ = ['ObjSpace', 'OperationError', 'W_Root']
@@ -384,7 +384,7 @@
self.builtin_modules = {}
self.reloading_modules = {}
- self.interned_strings = {}
+ self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
self.user_del_action = UserDelAction(self)
@@ -522,11 +522,6 @@
if name not in modules:
modules.append(name)
- # a bit of custom logic: rctime take precedence over time
- # XXX this could probably be done as a "requires" in the config
- if 'rctime' in modules and 'time' in modules:
- modules.remove('time')
-
self._builtinmodule_list = modules
return self._builtinmodule_list
@@ -782,25 +777,30 @@
return self.w_False
def new_interned_w_str(self, w_s):
+ assert isinstance(w_s, W_Root) # and is not None
s = self.str_w(w_s)
if not we_are_translated():
assert type(s) is str
- try:
- return self.interned_strings[s]
- except KeyError:
- pass
- self.interned_strings[s] = w_s
- return w_s
+ w_s1 = self.interned_strings.get(s)
+ if w_s1 is None:
+ w_s1 = w_s
+ self.interned_strings.set(s, w_s1)
+ return w_s1
def new_interned_str(self, s):
if not we_are_translated():
assert type(s) is str
- try:
- return self.interned_strings[s]
- except KeyError:
- pass
- w_s = self.interned_strings[s] = self.wrap(s)
- return w_s
+ w_s1 = self.interned_strings.get(s)
+ if w_s1 is None:
+ w_s1 = self.wrap(s)
+ self.interned_strings.set(s, w_s1)
+ return w_s1
+
+ def is_interned_str(self, s):
+ # interface for marshal_impl
+ if not we_are_translated():
+ assert type(s) is str
+ return self.interned_strings.get(s) is not None
def descr_self_interp_w(self, RequiredClass, w_obj):
if not isinstance(w_obj, RequiredClass):
diff --git a/pypy/interpreter/executioncontext.py
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -32,6 +32,17 @@
self.compiler = space.createcompiler()
self.profilefunc = None
self.w_profilefuncarg = None
+ self.thread_disappeared = False # might be set to True after
os.fork()
+
+ @staticmethod
+ def _mark_thread_disappeared(space):
+ # Called in the child process after os.fork() by interp_posix.py.
+ # Marks all ExecutionContexts except the current one
+ # with 'thread_disappeared = True'.
+ me = space.getexecutioncontext()
+ for ec in space.threadlocals.getallvalues().values():
+ if ec is not me:
+ ec.thread_disappeared = True
def gettopframe(self):
return self.topframeref()
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -31,3 +31,19 @@
def getallvalues(self):
return {0: self._value}
+
+
+def make_weak_value_dictionary(space, keytype, valuetype):
+ "NOT_RPYTHON"
+ if space.config.translation.rweakref:
+ from rpython.rlib.rweakref import RWeakValueDictionary
+ return RWeakValueDictionary(keytype, valuetype)
+ else:
+ class FakeWeakValueDict(object):
+ def __init__(self):
+ self._dict = {}
+ def get(self, key):
+ return self._dict.get(key, None)
+ def set(self, key, value):
+ self._dict[key] = value
+ return FakeWeakValueDict()
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -125,13 +125,14 @@
else:
return self.space.builtin
+ _NO_CELLS = []
+
@jit.unroll_safe
def initialize_frame_scopes(self, outer_func, code):
# regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
# class bodies only have CO_NEWLOCALS.
# CO_NEWLOCALS: make a locals dict unless optimized is also set
# CO_OPTIMIZED: no locals dict needed at all
- # NB: this method is overridden in nestedscope.py
flags = code.co_flags
if not (flags & pycode.CO_OPTIMIZED):
if flags & pycode.CO_NEWLOCALS:
@@ -144,7 +145,7 @@
nfreevars = len(code.co_freevars)
if not nfreevars:
if not ncellvars:
- self.cells = []
+ self.cells = self._NO_CELLS
return # no self.cells needed - fast path
elif outer_func is None:
space = self.space
diff --git a/pypy/interpreter/test/test_compiler.py
b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -970,7 +970,12 @@
sys.stdout = out
output = s.getvalue()
assert "CALL_METHOD" in output
-
+
+ def test_interned_strings(self):
+ source = """x = ('foo_bar42', 5); y = 'foo_bar42'; z = x[0]"""
+ exec source
+ assert y is z
+
class AppTestExceptions:
def test_indentation_error(self):
diff --git a/pypy/interpreter/test/test_objspace.py
b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -378,3 +378,41 @@
assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
space.startup()
assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
+
+ def test_interned_strings_are_weak(self):
+ import weakref, gc, random
+ space = self.space
+ assert space.config.translation.rweakref
+ w1 = space.new_interned_str("abcdef")
+ w2 = space.new_interned_str("abcdef")
+ assert w2 is w1
+ #
+ # check that 'w1' goes away if we don't hold a reference to it
+ rw1 = weakref.ref(w1)
+ del w1, w2
+ i = 10
+ while rw1() is not None:
+ i -= 1
+ assert i >= 0
+ gc.collect()
+ #
+ s = "foobar%r" % random.random()
+ w0 = space.wrap(s)
+ w1 = space.new_interned_w_str(w0)
+ assert w1 is w0
+ w2 = space.new_interned_w_str(w0)
+ assert w2 is w0
+ w3 = space.wrap(s)
+ assert w3 is not w0
+ w4 = space.new_interned_w_str(w3)
+ assert w4 is w0
+ #
+ # check that 'w0' goes away if we don't hold a reference to it
+ # (even if we hold a reference to 'w3')
+ rw0 = weakref.ref(w0)
+ del w0, w1, w2, w4
+ i = 10
+ while rw0() is not None:
+ i -= 1
+ assert i >= 0
+ gc.collect()
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -618,6 +618,7 @@
from pypy.interpreter.nestedscope import Cell
from pypy.interpreter.special import NotImplemented, Ellipsis
+
def descr_get_dict(space, w_obj):
w_dict = w_obj.getdict(space)
if w_dict is None:
@@ -638,6 +639,11 @@
return space.w_None
return lifeline.get_any_weakref(space)
+dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict,
+ doc="dictionary for instance variables (if
defined)")
+dict_descr.name = '__dict__'
+
+
def generic_ne(space, w_obj1, w_obj2):
if space.eq_w(w_obj1, w_obj2):
return space.w_False
diff --git a/pypy/module/_cffi_backend/__init__.py
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -34,6 +34,7 @@
'newp_handle': 'handle.newp_handle',
'from_handle': 'handle.from_handle',
'_get_types': 'func._get_types',
+ 'from_buffer': 'func.from_buffer',
'string': 'func.string',
'buffer': 'cbuffer.buffer',
diff --git a/pypy/module/_cffi_backend/ccallback.py
b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -45,8 +45,9 @@
#
cif_descr = self.getfunctype().cif_descr
if not cif_descr:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("callbacks with '...'"))
+ raise oefmt(space.w_NotImplementedError,
+ "%s: callback with unsupported argument or "
+ "return type or with '...'", self.getfunctype().name)
res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif,
invoke_callback,
rffi.cast(rffi.VOIDP, self.unique_id))
diff --git a/pypy/module/_cffi_backend/cdataobj.py
b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -440,6 +440,25 @@
return "handle to %s" % (self.space.str_w(w_repr),)
+class W_CDataFromBuffer(W_CData):
+ _attrs_ = ['buf', 'length', 'w_keepalive']
+ _immutable_fields_ = ['buf', 'length', 'w_keepalive']
+
+ def __init__(self, space, cdata, ctype, buf, w_object):
+ W_CData.__init__(self, space, cdata, ctype)
+ self.buf = buf
+ self.length = buf.getlength()
+ self.w_keepalive = w_object
+
+ def get_array_length(self):
+ return self.length
+
+ def _repr_extra(self):
+ w_repr = self.space.repr(self.w_keepalive)
+ return "buffer len %d from '%s' object" % (
+ self.length, self.space.type(self.w_keepalive).name)
+
+
W_CData.typedef = TypeDef(
'_cffi_backend.CData',
__module__ = '_cffi_backend',
diff --git a/pypy/module/_cffi_backend/ctypefunc.py
b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -34,6 +34,7 @@
could_cast_anything=False)
self.fargs = fargs
self.ellipsis = bool(ellipsis)
+ self.cif_descr = lltype.nullptr(CIF_DESCRIPTION)
# fresult is stored in self.ctitem
if not ellipsis:
@@ -41,7 +42,14 @@
# at all. The cif is computed on every call from the actual
# types passed in. For all other functions, the cif_descr
# is computed here.
- CifDescrBuilder(fargs, fresult).rawallocate(self)
+ builder = CifDescrBuilder(fargs, fresult)
+ try:
+ builder.rawallocate(self)
+ except OperationError, e:
+ if not e.match(space, space.w_NotImplementedError):
+ raise
+ # else, eat the NotImplementedError. We will get the
+ # exception if we see an actual call
def new_ctypefunc_completing_argtypes(self, args_w):
space = self.space
@@ -178,8 +186,6 @@
# ____________________________________________________________
-W_CTypeFunc.cif_descr = lltype.nullptr(CIF_DESCRIPTION) # default value
-
BIG_ENDIAN = sys.byteorder == 'big'
USE_C_LIBFFI_MSVC = getattr(clibffi, 'USE_C_LIBFFI_MSVC', False)
@@ -295,18 +301,18 @@
nflat = 0
for i, cf in enumerate(ctype.fields_list):
if cf.is_bitfield():
- raise OperationError(space.w_NotImplementedError,
- space.wrap("cannot pass as argument or return value "
- "a struct with bit fields"))
+ raise oefmt(space.w_NotImplementedError,
+ "ctype '%s' not supported as argument or return value"
+ " (it is a struct with bit fields)", ctype.name)
flat = 1
ct = cf.ctype
while isinstance(ct, ctypearray.W_CTypeArray):
flat *= ct.length
ct = ct.ctitem
if flat <= 0:
- raise OperationError(space.w_NotImplementedError,
- space.wrap("cannot pass as argument or return value "
- "a struct with a zero-length array"))
+ raise oefmt(space.w_NotImplementedError,
+ "ctype '%s' not supported as argument or return value"
+ " (it is a struct with a zero-length array)", ctype.name)
nflat += flat
if USE_C_LIBFFI_MSVC and is_result_type:
diff --git a/pypy/module/_cffi_backend/ctypeprim.py
b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -158,21 +158,14 @@
class W_CTypePrimitiveSigned(W_CTypePrimitive):
- _attrs_ = ['value_fits_long', 'vmin', 'vrangemax']
- _immutable_fields_ = ['value_fits_long', 'vmin', 'vrangemax']
+ _attrs_ = ['value_fits_long', 'value_smaller_than_long']
+ _immutable_fields_ = ['value_fits_long', 'value_smaller_than_long']
is_primitive_integer = True
def __init__(self, *args):
W_CTypePrimitive.__init__(self, *args)
self.value_fits_long = self.size <= rffi.sizeof(lltype.Signed)
- if self.size < rffi.sizeof(lltype.Signed):
- assert self.value_fits_long
- sh = self.size * 8
- self.vmin = r_uint(-1) << (sh - 1)
- self.vrangemax = (r_uint(1) << sh) - 1
- else:
- self.vmin = r_uint(0)
- self.vrangemax = r_uint(-1)
+ self.value_smaller_than_long = self.size < rffi.sizeof(lltype.Signed)
def cast_to_int(self, cdata):
return self.convert_to_object(cdata)
@@ -192,8 +185,17 @@
def convert_from_object(self, cdata, w_ob):
if self.value_fits_long:
value = misc.as_long(self.space, w_ob)
- if self.size < rffi.sizeof(lltype.Signed):
- if r_uint(value) - self.vmin > self.vrangemax:
+ if self.value_smaller_than_long:
+ size = self.size
+ if size == 1:
+ signextended = misc.signext(value, 1)
+ elif size == 2:
+ signextended = misc.signext(value, 2)
+ elif size == 4:
+ signextended = misc.signext(value, 4)
+ else:
+ raise AssertionError("unsupported size")
+ if value != signextended:
self._overflow(w_ob)
misc.write_raw_signed_data(cdata, value, self.size)
else:
@@ -221,7 +223,7 @@
length = w_cdata.get_array_length()
populate_list_from_raw_array(res, buf, length)
return res
- elif self.value_fits_long:
+ elif self.value_smaller_than_long:
res = [0] * w_cdata.get_array_length()
misc.unpack_list_from_raw_array(res, w_cdata._cdata, self.size)
return res
@@ -235,8 +237,8 @@
cdata = rffi.cast(rffi.LONGP, cdata)
copy_list_to_raw_array(int_list, cdata)
else:
- overflowed = misc.pack_list_to_raw_array_bounds(
- int_list, cdata, self.size, self.vmin, self.vrangemax)
+ overflowed = misc.pack_list_to_raw_array_bounds_signed(
+ int_list, cdata, self.size)
if overflowed != 0:
self._overflow(self.space.wrap(overflowed))
return True
@@ -314,8 +316,8 @@
def pack_list_of_items(self, cdata, w_ob):
int_list = self.space.listview_int(w_ob)
if int_list is not None:
- overflowed = misc.pack_list_to_raw_array_bounds(
- int_list, cdata, self.size, r_uint(0), self.vrangemax)
+ overflowed = misc.pack_list_to_raw_array_bounds_unsigned(
+ int_list, cdata, self.size, self.vrangemax)
if overflowed != 0:
self._overflow(self.space.wrap(overflowed))
return True
diff --git a/pypy/module/_cffi_backend/func.py
b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -76,3 +76,32 @@
def _get_types(space):
return space.newtuple([space.gettypefor(cdataobj.W_CData),
space.gettypefor(ctypeobj.W_CType)])
+
+# ____________________________________________________________
+
+@unwrap_spec(w_ctype=ctypeobj.W_CType)
+def from_buffer(space, w_ctype, w_x):
+ from pypy.module._cffi_backend import ctypearray, ctypeprim
+ #
+ if (not isinstance(w_ctype, ctypearray.W_CTypeArray) or
+ not isinstance(w_ctype.ctptr.ctitem, ctypeprim.W_CTypePrimitiveChar)):
+ raise oefmt(space.w_TypeError,
+ "needs 'char[]', got '%s'", w_ctype.name)
+ #
+ # xxx do we really need to implement the same mess as in CPython 2.7
+ # w.r.t. buffers and memoryviews??
+ try:
+ buf = space.readbuf_w(w_x)
+ except OperationError, e:
+ if not e.match(space, space.w_TypeError):
+ raise
+ buf = space.buffer_w(w_x, space.BUF_SIMPLE)
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
+ #
+ return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
diff --git a/pypy/module/_cffi_backend/misc.py
b/pypy/module/_cffi_backend/misc.py
--- a/pypy/module/_cffi_backend/misc.py
+++ b/pypy/module/_cffi_backend/misc.py
@@ -216,6 +216,19 @@
neg_msg = "can't convert negative number to unsigned"
ovf_msg = "long too big to convert"
[email protected](1)
+def signext(value, size):
+ # 'value' is sign-extended from 'size' bytes to a full integer.
+ # 'size' should be a constant smaller than a full integer size.
+ if size == rffi.sizeof(rffi.SIGNEDCHAR):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.SIGNEDCHAR, value))
+ elif size == rffi.sizeof(rffi.SHORT):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, value))
+ elif size == rffi.sizeof(rffi.INT):
+ return rffi.cast(lltype.Signed, rffi.cast(rffi.INT, value))
+ else:
+ raise AssertionError("unsupported size")
+
# ____________________________________________________________
class _NotStandardObject(Exception):
@@ -334,13 +347,26 @@
# ____________________________________________________________
-def pack_list_to_raw_array_bounds(int_list, target, size, vmin, vrangemax):
+def pack_list_to_raw_array_bounds_signed(int_list, target, size):
for TP, TPP in _prim_signed_types:
if size == rffi.sizeof(TP):
ptr = rffi.cast(TPP, target)
for i in range(len(int_list)):
x = int_list[i]
- if r_uint(x) - vmin > vrangemax:
+ y = rffi.cast(TP, x)
+ if x != rffi.cast(lltype.Signed, y):
+ return x # overflow
+ ptr[i] = y
+ return 0
+ raise NotImplementedError("bad integer size")
+
+def pack_list_to_raw_array_bounds_unsigned(int_list, target, size, vrangemax):
+ for TP, TPP in _prim_signed_types:
+ if size == rffi.sizeof(TP):
+ ptr = rffi.cast(TPP, target)
+ for i in range(len(int_list)):
+ x = int_list[i]
+ if r_uint(x) > vrangemax:
return x # overflow
ptr[i] = rffi.cast(TP, x)
return 0
diff --git a/pypy/module/_cffi_backend/newtype.py
b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -62,10 +62,54 @@
eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned)
eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned)
-eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx
eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned)
eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned)
+_WCTSigned = ctypeprim.W_CTypePrimitiveSigned
+_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned
+
+eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned)
+eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned)
+eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign)
+
+if hasattr(rffi, 'INT_LEAST8_T'):
+ eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned)
+ eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned)
+ eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned)
+ eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned)
+ eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign)
+ eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign)
+ eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign)
+ eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign)
+else:
+ eptypesize("int_least8_t", 1, _WCTSigned)
+ eptypesize("uint_least8_t", 1, _WCTUnsign)
+ eptypesize("int_least16_t", 2, _WCTSigned)
+ eptypesize("uint_least16_t", 2, _WCTUnsign)
+ eptypesize("int_least32_t", 4, _WCTSigned)
+ eptypesize("uint_least32_t", 4, _WCTUnsign)
+ eptypesize("int_least64_t", 8, _WCTSigned)
+ eptypesize("uint_least64_t", 8, _WCTUnsign)
+
+if hasattr(rffi, 'INT_FAST8_T'):
+ eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned)
+ eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned)
+ eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned)
+ eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned)
+ eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign)
+ eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign)
+ eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign)
+ eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign)
+else:
+ eptypesize("int_fast8_t", 1, _WCTSigned)
+ eptypesize("uint_fast8_t", 1, _WCTUnsign)
+ eptypesize("int_fast16_t", 2, _WCTSigned)
+ eptypesize("uint_fast16_t", 2, _WCTUnsign)
+ eptypesize("int_fast32_t", 4, _WCTSigned)
+ eptypesize("uint_fast32_t", 4, _WCTUnsign)
+ eptypesize("int_fast64_t", 8, _WCTSigned)
+ eptypesize("uint_fast64_t", 8, _WCTUnsign)
+
@unwrap_spec(name=str)
def new_primitive_type(space, name):
try:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -397,7 +397,7 @@
def test_invalid_indexing():
p = new_primitive_type("int")
x = cast(p, 42)
- py.test.raises(TypeError, "p[0]")
+ py.test.raises(TypeError, "x[0]")
def test_default_str():
BChar = new_primitive_type("char")
@@ -1030,11 +1030,12 @@
BInt = new_primitive_type("int")
BArray0 = new_array_type(new_pointer_type(BInt), 0)
BStruct = new_struct_type("struct foo")
+ BStructP = new_pointer_type(BStruct)
complete_struct_or_union(BStruct, [('a', BArray0)])
- py.test.raises(NotImplementedError, new_function_type,
- (BStruct,), BInt, False)
- py.test.raises(NotImplementedError, new_function_type,
- (BInt,), BStruct, False)
+ BFunc = new_function_type((BStruct,), BInt, False)
+ py.test.raises(NotImplementedError, cast(BFunc, 123), cast(BStructP, 123))
+ BFunc2 = new_function_type((BInt,), BStruct, False)
+ py.test.raises(NotImplementedError, cast(BFunc2, 123), 123)
def test_call_function_9():
BInt = new_primitive_type("int")
@@ -1805,7 +1806,8 @@
new_function_type((), new_pointer_type(BFunc))
BUnion = new_union_type("union foo_u")
complete_struct_or_union(BUnion, [])
- py.test.raises(NotImplementedError, new_function_type, (), BUnion)
+ BFunc = new_function_type((), BUnion)
+ py.test.raises(NotImplementedError, cast(BFunc, 123))
py.test.raises(TypeError, new_function_type, (), BArray)
def test_struct_return_in_func():
@@ -2718,7 +2720,16 @@
def test_nonstandard_integer_types():
for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t',
'uint32_t', 'int64_t', 'uint64_t', 'intptr_t',
- 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']:
+ 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t',
+ 'int_least8_t', 'uint_least8_t',
+ 'int_least16_t', 'uint_least16_t',
+ 'int_least32_t', 'uint_least32_t',
+ 'int_least64_t', 'uint_least64_t',
+ 'int_fast8_t', 'uint_fast8_t',
+ 'int_fast16_t', 'uint_fast16_t',
+ 'int_fast32_t', 'uint_fast32_t',
+ 'int_fast64_t', 'uint_fast64_t',
+ 'intmax_t', 'uintmax_t']:
new_primitive_type(typename) # works
def test_cannot_convert_unicode_to_charp():
@@ -3186,6 +3197,20 @@
('a2', BChar, 5)],
None, -1, -1, SF_PACKED)
+def test_from_buffer():
+ import array
+ a = array.array('H', [10000, 20000, 30000])
+ BChar = new_primitive_type("char")
+ BCharP = new_pointer_type(BChar)
+ BCharA = new_array_type(BCharP, None)
+ c = from_buffer(BCharA, a)
+ assert typeof(c) is BCharA
+ assert len(c) == 6
+ assert repr(c) == "<cdata 'char[]' buffer len 6 from 'array.array' object>"
+ p = new_pointer_type(new_primitive_type("unsigned short"))
+ cast(p, c)[1] += 500
+ assert list(a) == [10000, 20500, 30000]
+
def test_version():
# this test is here mostly for PyPy
assert __version__ == "0.8.6"
diff --git a/pypy/module/_cffi_backend/test/test_c.py
b/pypy/module/_cffi_backend/test/test_c.py
--- a/pypy/module/_cffi_backend/test/test_c.py
+++ b/pypy/module/_cffi_backend/test/test_c.py
@@ -30,7 +30,7 @@
class AppTestC(object):
"""Populated below, hack hack hack."""
- spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO'))
+ spaceconfig = dict(usemodules=('_cffi_backend', 'cStringIO', 'array'))
def setup_class(cls):
testfuncs_w = []
diff --git a/pypy/module/_file/interp_stream.py
b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -34,8 +34,12 @@
# this function runs with the GIL acquired so there is no race
# condition in the creation of the lock
me = self.space.getexecutioncontext() # used as thread ident
- if self.slockowner is me:
- return False # already acquired by the current thread
+ if self.slockowner is not None:
+ if self.slockowner is me:
+ return False # already acquired by the current thread
+ if self.slockowner.thread_disappeared:
+ self.slockowner = None
+ self.slock = None
try:
if self.slock is None:
self.slock = self.space.allocate_lock()
diff --git a/pypy/module/_file/test/test_file.py
b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -275,6 +275,24 @@
finally:
f.close()
+ def test_ignore_ioerror_in_readall_if_nonempty_result(self):
+ # this is the behavior of regular files in CPython 2.7, as
+ # well as of _io.FileIO at least in CPython 3.3. This is
+ # *not* the behavior of _io.FileIO in CPython 3.4 or 3.5;
+ # see CPython's issue #21090.
+ try:
+ from posix import openpty, fdopen, write, close
+ except ImportError:
+ skip('no openpty on this platform')
+ read_fd, write_fd = openpty()
+ write(write_fd, 'Abc\n')
+ close(write_fd)
+ f = fdopen(read_fd)
+ s = f.read()
+ assert s == 'Abc\r\n'
+ raises(IOError, f.read)
+ f.close()
+
class AppTestNonblocking(object):
def setup_class(cls):
@@ -286,7 +304,7 @@
py.test.skip("works with internals of _file impl on py.py")
state = [0]
def read(fd, n=None):
- if fd != 42:
+ if fd != 424242:
return cls.old_read(fd, n)
if state[0] == 0:
state[0] += 1
@@ -297,7 +315,7 @@
return ''
os.read = read
stdin = W_File(cls.space)
- stdin.file_fdopen(42, 'rb', 1)
+ stdin.file_fdopen(424242, 'rb', 1)
stdin.name = '<stdin>'
cls.w_stream = stdin
diff --git a/pypy/module/_file/test/test_file_extra.py
b/pypy/module/_file/test/test_file_extra.py
--- a/pypy/module/_file/test/test_file_extra.py
+++ b/pypy/module/_file/test/test_file_extra.py
@@ -221,7 +221,7 @@
expected_filename = str(udir.join('sample'))
expected_mode = 'rb'
extra_args = ()
- spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']}
+ spaceconfig = {'usemodules': ['binascii', 'time', 'struct']}
def setup_method(self, method):
space = self.space
@@ -281,7 +281,7 @@
expected_filename = '<fdopen>'
expected_mode = 'rb'
extra_args = ()
- spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']}
+ spaceconfig = {'usemodules': ['binascii', 'time', 'struct']}
def setup_method(self, method):
space = self.space
@@ -359,7 +359,7 @@
# A few extra tests
class AppTestAFewExtra:
- spaceconfig = {'usemodules': ['_socket', 'array', 'binascii', 'rctime',
+ spaceconfig = {'usemodules': ['_socket', 'array', 'binascii', 'time',
'struct']}
def setup_method(self, method):
diff --git a/pypy/module/_io/interp_bufferedio.py
b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -565,7 +565,7 @@
# Flush the write buffer if necessary
if self.writable:
- self._writer_flush_unlocked(space)
+ self._flush_and_rewind_unlocked(space)
self._reader_reset_buf()
# Read whole blocks, and don't buffer them
@@ -812,11 +812,6 @@
self._check_closed(space, "flush of closed file")
with self.lock:
self._flush_and_rewind_unlocked(space)
- if self.readable:
- # Rewind the raw stream so that its position corresponds to
- # the current logical position.
- self._raw_seek(space, -self._raw_offset(), 1)
- self._reader_reset_buf()
def _flush_and_rewind_unlocked(self, space):
self._writer_flush_unlocked(space)
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -24,8 +24,7 @@
try:
w_value = error.get_w_value(space)
w_errno = space.getattr(w_value, space.wrap("errno"))
- return space.is_true(
- space.eq(w_errno, space.wrap(EINTR)))
+ return space.eq_w(w_errno, space.wrap(EINTR))
except OperationError:
return False
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -352,3 +352,42 @@
assert mod == 'io'
else:
assert mod == '_io'
+
+ def test_issue1902(self):
+ import _io
+ with _io.open(self.tmpfile, 'w+b', 4096) as f:
+ f.write(b'\xff' * 13569)
+ f.flush()
+ f.seek(0, 0)
+ f.read(1)
+ f.seek(-1, 1)
+ f.write(b'')
+
+ def test_issue1902_2(self):
+ import _io
+ with _io.open(self.tmpfile, 'w+b', 4096) as f:
+ f.write(b'\xff' * 13569)
+ f.flush()
+ f.seek(0, 0)
+
+ f.read(1)
+ f.seek(-1, 1)
+ f.write(b'\xff')
+ f.seek(1, 0)
+ f.read(4123)
+ f.seek(-4123, 1)
+
+ def test_issue1902_3(self):
+ import _io
+ buffer_size = 4096
+ with _io.open(self.tmpfile, 'w+b', buffer_size) as f:
+ f.write(b'\xff' * buffer_size * 3)
+ f.flush()
+ f.seek(0, 0)
+
+ f.read(1)
+ f.seek(-1, 1)
+ f.write(b'\xff')
+ f.seek(1, 0)
+ f.read(buffer_size * 2)
+ assert f.tell() == 1 + buffer_size * 2
diff --git a/pypy/module/_lsprof/test/test_cprofile.py
b/pypy/module/_lsprof/test/test_cprofile.py
--- a/pypy/module/_lsprof/test/test_cprofile.py
+++ b/pypy/module/_lsprof/test/test_cprofile.py
@@ -1,6 +1,6 @@
class AppTestCProfile(object):
spaceconfig = {
- "usemodules": ['_lsprof', 'rctime'],
+ "usemodules": ['_lsprof', 'time'],
}
def setup_class(cls):
diff --git a/pypy/module/_md5/test/test_md5.py
b/pypy/module/_md5/test/test_md5.py
--- a/pypy/module/_md5/test/test_md5.py
+++ b/pypy/module/_md5/test/test_md5.py
@@ -5,7 +5,7 @@
class AppTestMD5(object):
spaceconfig = {
- 'usemodules': ['_md5', 'binascii', 'rctime', 'struct'],
+ 'usemodules': ['_md5', 'binascii', 'time', 'struct'],
}
def setup_class(cls):
diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
--- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
+++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
@@ -97,24 +97,24 @@
Py_UNICODE *outbuf_start, *outbuf, *outbuf_end;
};
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d,
char *inbuf, Py_ssize_t inlen);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
void pypy_cjk_dec_free(struct pypy_cjk_dec_s *);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d);
-RPY_EXPORTED_FOR_TESTS
+RPY_EXTERN
Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d,
Py_UNICODE *, Py_ssize_t, Py_ssize_t);
@@ -125,35 +125,35 @@
unsigned char *outbuf_start, *outbuf, *outbuf_end;
};
-RPY_EXPORTED_FOR_TESTS
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit