Author: Wim Lavrijsen <[email protected]>
Branch: cppyy-packaging
Changeset: r94402:b74ad9bd1274
Date: 2018-01-26 14:27 -0800
http://bitbucket.org/pypy/pypy/changeset/b74ad9bd1274/
Log: merge default into branch
diff too long, truncating to 2000 out of 93932 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -44,3 +44,10 @@
d72f9800a42b46a8056951b1da2426d2c2d8d502 release-pypy3.5-v5.9.0
03d614975835870da65ff0481e1edad68ebbcb8d release-pypy2.7-v5.9.0
84a2f3e6a7f88f2fe698e473998755b3bd1a12e2 release-pypy2.7-v5.9.0
+0e7ea4fe15e82d5124e805e2e4a37cae1a402d4b release-pypy2.7-v5.10.0
+a91df6163fb76df245091f741dbf6a23ddc72374 release-pypy3.5-v5.10.0
+a91df6163fb76df245091f741dbf6a23ddc72374 release-pypy3.5-v5.10.0
+0000000000000000000000000000000000000000 release-pypy3.5-v5.10.0
+0000000000000000000000000000000000000000 release-pypy3.5-v5.10.0
+09f9160b643e3f02ccb8c843b2fbb4e5cbf54082 release-pypy3.5-v5.10.0
+3f6eaa010fce78cc7973bdc1dfdb95970f08fed2 release-pypy3.5-v5.10.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -30,7 +30,7 @@
DEALINGS IN THE SOFTWARE.
-PyPy Copyright holders 2003-2017
+PyPy Copyright holders 2003-2018
-----------------------------------
Except when otherwise stated (look for LICENSE files or information at
@@ -339,8 +339,10 @@
Stanisław Halik
Julien Phalip
Roman Podoliaka
+ Steve Papanik
Eli Stevens
Boglarka Vezer
+ gabrielg
PavloKapyshin
Tomer Chachamu
Christopher Groskopf
@@ -363,11 +365,13 @@
Konrad Delong
Dinu Gherman
pizi
+ Tomáš Pružina
James Robert
Armin Ronacher
Diana Popa
Mads Kiilerich
Brett Cannon
+ Caleb Hattingh
aliceinwire
Zooko Wilcox-O Hearn
James Lan
@@ -388,6 +392,7 @@
Jason Madden
Yaroslav Fedevych
Even Wiik Thomassen
+ [email protected]
Stefan Marr
Heinrich-Heine University, Germany
diff --git a/_pytest/terminal.py b/_pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -366,11 +366,11 @@
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
- self.config.hook.pytest_terminal_summary(terminalreporter=self)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
new file mode 100644
--- /dev/null
+++ b/extra_tests/requirements.txt
@@ -0,0 +1,3 @@
+pytest
+hypothesis
+vmprof
diff --git a/extra_tests/test_bytes.py b/extra_tests/test_bytes.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_bytes.py
@@ -0,0 +1,84 @@
+from hypothesis import strategies as st
+from hypothesis import given, example
+
+st_bytestring = st.binary() | st.binary().map(bytearray)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_find(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.find(u) <= len(prefix)
+ assert s.find(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_index(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.index(u) <= len(prefix)
+ assert s.index(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_rfind(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rfind(u) >= len(prefix)
+ assert s.rfind(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_rindex(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rindex(u) >= len(prefix)
+ assert s.rindex(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+def adjust_indices(u, start, end):
+ if end < 0:
+ end = max(end + len(u), 0)
+ else:
+ end = min(end, len(u))
+ if start < 0:
+ start = max(start + len(u), 0)
+ return start, end
+
+@given(st_bytestring, st_bytestring)
+def test_startswith_basic(u, v):
+ assert u.startswith(v) is (u[:len(v)] == v)
+
+@example(b'x', b'', 1)
+@example(b'x', b'', 2)
+@given(st_bytestring, st_bytestring, st.integers())
+def test_startswith_start(u, v, start):
+ expected = u[start:].startswith(v) if v else (start <= len(u))
+ assert u.startswith(v, start) is expected
+
+@example(b'x', b'', 1, 0)
+@example(b'xx', b'', -1, 0)
+@given(st_bytestring, st_bytestring, st.integers(), st.integers())
+def test_startswith_3(u, v, start, end):
+ if v:
+ expected = u[start:end].startswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.startswith(v, start, end) is expected
+
+@given(st_bytestring, st_bytestring)
+def test_endswith_basic(u, v):
+ if len(v) > len(u):
+ assert u.endswith(v) is False
+ else:
+ assert u.endswith(v) is (u[len(u) - len(v):] == v)
+
+@example(b'x', b'', 1)
+@example(b'x', b'', 2)
+@given(st_bytestring, st_bytestring, st.integers())
+def test_endswith_2(u, v, start):
+ expected = u[start:].endswith(v) if v else (start <= len(u))
+ assert u.endswith(v, start) is expected
+
+@example(b'x', b'', 1, 0)
+@example(b'xx', b'', -1, 0)
+@given(st_bytestring, st_bytestring, st.integers(), st.integers())
+def test_endswith_3(u, v, start, end):
+ if v:
+ expected = u[start:end].endswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.endswith(v, start, end) is expected
diff --git a/extra_tests/test_json.py b/extra_tests/test_json.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_json.py
@@ -0,0 +1,33 @@
+import pytest
+import json
+from hypothesis import given, strategies
+
+def is_(x, y):
+ return type(x) is type(y) and x == y
+
+def test_no_ensure_ascii():
+ assert is_(json.dumps(u"\u1234", ensure_ascii=False), u'"\u1234"')
+ assert is_(json.dumps("\xc0", ensure_ascii=False), '"\xc0"')
+ with pytest.raises(UnicodeDecodeError) as excinfo:
+ json.dumps((u"\u1234", "\xc0"), ensure_ascii=False)
+ assert str(excinfo.value).startswith(
+ "'ascii' codec can't decode byte 0xc0 ")
+ with pytest.raises(UnicodeDecodeError) as excinfo:
+ json.dumps(("\xc0", u"\u1234"), ensure_ascii=False)
+ assert str(excinfo.value).startswith(
+ "'ascii' codec can't decode byte 0xc0 ")
+
+def test_issue2191():
+ assert is_(json.dumps(u"xxx", ensure_ascii=False), u'"xxx"')
+
+jsondata = strategies.recursive(
+ strategies.none() |
+ strategies.booleans() |
+ strategies.floats(allow_nan=False) |
+ strategies.text(),
+ lambda children: strategies.lists(children) |
+ strategies.dictionaries(strategies.text(), children))
+
+@given(jsondata)
+def test_roundtrip(d):
+ assert json.loads(json.dumps(d)) == d
diff --git a/extra_tests/test_textio.py b/extra_tests/test_textio.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_textio.py
@@ -0,0 +1,48 @@
+from hypothesis import given, strategies as st
+
+from io import BytesIO, TextIOWrapper
+import os
+
+def translate_newlines(text):
+ text = text.replace('\r\n', '\n')
+ text = text.replace('\r', '\n')
+ return text.replace('\n', os.linesep)
+
[email protected]
+def st_readline_universal(
+ draw, st_nlines=st.integers(min_value=0, max_value=10)):
+ n_lines = draw(st_nlines)
+ lines = draw(st.lists(
+ st.text(st.characters(blacklist_characters='\r\n')),
+ min_size=n_lines, max_size=n_lines))
+ limits = []
+ for line in lines:
+ limit = draw(st.integers(min_value=0, max_value=len(line) + 5))
+ limits.append(limit)
+ limits.append(-1)
+ endings = draw(st.lists(
+ st.sampled_from(['\n', '\r', '\r\n']),
+ min_size=n_lines, max_size=n_lines))
+ return (
+ ''.join(line + ending for line, ending in zip(lines, endings)),
+ limits)
+
+@given(data=st_readline_universal(),
+ mode=st.sampled_from(['\r', '\n', '\r\n', '', None]))
+def test_readline(data, mode):
+ txt, limits = data
+ textio = TextIOWrapper(
+ BytesIO(txt.encode('utf-8', 'surrogatepass')),
+ encoding='utf-8', errors='surrogatepass', newline=mode)
+ lines = []
+ for limit in limits:
+ line = textio.readline(limit)
+ if limit >= 0:
+ assert len(line) <= limit
+ if line:
+ lines.append(line)
+ elif limit:
+ break
+ if mode is None:
+ txt = translate_newlines(txt)
+ assert txt.startswith(u''.join(lines))
diff --git a/extra_tests/test_unicode.py b/extra_tests/test_unicode.py
--- a/extra_tests/test_unicode.py
+++ b/extra_tests/test_unicode.py
@@ -1,3 +1,4 @@
+import sys
import pytest
from hypothesis import strategies as st
from hypothesis import given, settings, example
@@ -32,3 +33,89 @@
@given(s=st.text())
def test_composition(s, norm1, norm2, norm3):
assert normalize(norm2, normalize(norm1, s)) == normalize(norm3, s)
+
+@given(st.text(), st.text(), st.text())
+def test_find(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.find(u) <= len(prefix)
+ assert s.find(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_index(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.index(u) <= len(prefix)
+ assert s.index(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_rfind(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rfind(u) >= len(prefix)
+ assert s.rfind(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_rindex(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rindex(u) >= len(prefix)
+ assert s.rindex(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+def adjust_indices(u, start, end):
+ if end < 0:
+ end = max(end + len(u), 0)
+ else:
+ end = min(end, len(u))
+ if start < 0:
+ start = max(start + len(u), 0)
+ return start, end
+
+@given(st.text(), st.text())
+def test_startswith_basic(u, v):
+ assert u.startswith(v) is (u[:len(v)] == v)
+
+@example(u'x', u'', 1)
+@example(u'x', u'', 2)
+@given(st.text(), st.text(), st.integers())
+def test_startswith_2(u, v, start):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:].startswith(v)
+ else: # CPython leaks implementation details in this case
+ expected = start <= len(u)
+ assert u.startswith(v, start) is expected
+
+@example(u'x', u'', 1, 0)
+@example(u'xx', u'', -1, 0)
+@given(st.text(), st.text(), st.integers(), st.integers())
+def test_startswith_3(u, v, start, end):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:end].startswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.startswith(v, start, end) is expected
+
+@given(st.text(), st.text())
+def test_endswith_basic(u, v):
+ if len(v) > len(u):
+ assert u.endswith(v) is False
+ else:
+ assert u.endswith(v) is (u[len(u) - len(v):] == v)
+
+@example(u'x', u'', 1)
+@example(u'x', u'', 2)
+@given(st.text(), st.text(), st.integers())
+def test_endswith_2(u, v, start):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:].endswith(v)
+ else: # CPython leaks implementation details in this case
+ expected = start <= len(u)
+ assert u.endswith(v, start) is expected
+
+@example(u'x', u'', 1, 0)
+@example(u'xx', u'', -1, 0)
+@given(st.text(), st.text(), st.integers(), st.integers())
+def test_endswith_3(u, v, start, end):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:end].endswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.endswith(v, start, end) is expected
diff --git a/extra_tests/test_vmprof_greenlet.py
b/extra_tests/test_vmprof_greenlet.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_vmprof_greenlet.py
@@ -0,0 +1,28 @@
+import time
+import pytest
+import greenlet
+vmprof = pytest.importorskip('vmprof')
+
+def count_samples(filename):
+ stats = vmprof.read_profile(filename)
+ return len(stats.profiles)
+
+def cpuburn(duration):
+ end = time.time() + duration
+ while time.time() < end:
+ pass
+
+def test_sampling_inside_callback(tmpdir):
+ # see also test_sampling_inside_callback inside
+ # pypy/module/_continuation/test/test_stacklet.py
+ #
+ G = greenlet.greenlet(cpuburn)
+ fname = tmpdir.join('log.vmprof')
+ with fname.open('w+b') as f:
+ vmprof.enable(f.fileno(), 1/250.0)
+ G.switch(0.1)
+ vmprof.disable()
+
+ samples = count_samples(str(fname))
+ # 0.1 seconds at 250Hz should be 25 samples
+ assert 23 < samples < 27
diff --git a/lib-python/2.7/ctypes/__init__.py
b/lib-python/2.7/ctypes/__init__.py
--- a/lib-python/2.7/ctypes/__init__.py
+++ b/lib-python/2.7/ctypes/__init__.py
@@ -360,14 +360,15 @@
self._FuncPtr = _FuncPtr
if handle is None:
- if flags & _FUNCFLAG_CDECL:
- pypy_dll = _ffi.CDLL(name, mode)
- else:
- pypy_dll = _ffi.WinDLL(name, mode)
- self.__pypy_dll__ = pypy_dll
- handle = int(pypy_dll)
- if _sys.maxint > 2 ** 32:
- handle = int(handle) # long -> int
+ handle = 0
+ if flags & _FUNCFLAG_CDECL:
+ pypy_dll = _ffi.CDLL(name, mode, handle)
+ else:
+ pypy_dll = _ffi.WinDLL(name, mode, handle)
+ self.__pypy_dll__ = pypy_dll
+ handle = int(pypy_dll)
+ if _sys.maxint > 2 ** 32:
+ handle = int(handle) # long -> int
self._handle = handle
def __repr__(self):
diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py
--- a/lib-python/2.7/inspect.py
+++ b/lib-python/2.7/inspect.py
@@ -40,6 +40,10 @@
import linecache
from operator import attrgetter
from collections import namedtuple
+try:
+ from cpyext import is_cpyext_function as _is_cpyext_function
+except ImportError:
+ _is_cpyext_function = lambda obj: False
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
@@ -230,7 +234,7 @@
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
- return isinstance(object, types.BuiltinFunctionType)
+ return isinstance(object, types.BuiltinFunctionType) or
_is_cpyext_function(object)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -1296,7 +1296,7 @@
'copyfile' in caller.f_globals):
dest_dir = sys.pypy_resolvedirof(target_executable)
src_dir = sys.pypy_resolvedirof(sys.executable)
- for libname in ['libpypy-c.so', 'libpypy-c.dylib']:
+ for libname in ['libpypy-c.so', 'libpypy-c.dylib', 'libpypy-c.dll']:
dest_library = os.path.join(dest_dir, libname)
src_library = os.path.join(src_dir, libname)
if os.path.exists(src_library):
diff --git a/lib-python/2.7/test/test_urllib2net.py
b/lib-python/2.7/test/test_urllib2net.py
--- a/lib-python/2.7/test/test_urllib2net.py
+++ b/lib-python/2.7/test/test_urllib2net.py
@@ -286,7 +286,7 @@
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
u.close()
- FTP_HOST = 'ftp://ftp.debian.org/debian/'
+ FTP_HOST = 'ftp://www.pythontest.net/'
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py
--- a/lib-python/2.7/warnings.py
+++ b/lib-python/2.7/warnings.py
@@ -43,11 +43,12 @@
unicodetype = unicode
except NameError:
unicodetype = ()
+ template = "%s: %s: %s\n"
try:
message = str(message)
except UnicodeEncodeError:
- pass
- s = "%s: %s: %s\n" % (lineno, category.__name__, message)
+ template = unicode(template)
+ s = template % (lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -12,7 +12,8 @@
if cls == (_CData,): # this is the Array class defined below
res._ffiarray = None
return res
- if not hasattr(res, '_length_') or not isinstance(res._length_, int):
+ if not hasattr(res, '_length_') or not isinstance(res._length_,
+ (int, long)):
raise AttributeError(
"class must define a '_length_' attribute, "
"which must be a positive integer")
diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py
--- a/lib_pypy/_ctypes_test.py
+++ b/lib_pypy/_ctypes_test.py
@@ -21,5 +21,11 @@
with fp:
imp.load_module('_ctypes_test', fp, filename, description)
except ImportError:
+ if os.name == 'nt':
+ # hack around finding compilers on win32
+ try:
+ import setuptools
+ except ImportError:
+ pass
print('could not find _ctypes_test in %s' % output_dir)
_pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test',
output_dir)
diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py
--- a/lib_pypy/_testcapi.py
+++ b/lib_pypy/_testcapi.py
@@ -16,4 +16,10 @@
with fp:
imp.load_module('_testcapi', fp, filename, description)
except ImportError:
+ if os.name == 'nt':
+ # hack around finding compilers on win32
+ try:
+ import setuptools
+ except ImportError:
+ pass
_pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir)
diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py
--- a/lib_pypy/_tkinter/app.py
+++ b/lib_pypy/_tkinter/app.py
@@ -180,6 +180,9 @@
if err == tklib.TCL_ERROR:
self.raiseTclError()
+ def interpaddr(self):
+ return int(tkffi.cast('size_t', self.interp))
+
def _var_invoke(self, func, *args, **kwargs):
if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread():
# The current thread is not the interpreter thread.
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,11 +1,12 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.11.2
+Version: 1.11.4
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
Author-email: [email protected]
License: MIT
+Description-Content-Type: UNKNOWN
Description:
CFFI
====
@@ -27,5 +28,7 @@
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
-__version__ = "1.11.2"
-__version_info__ = (1, 11, 2)
+__version__ = "1.11.4"
+__version_info__ = (1, 11, 4)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -7,6 +7,16 @@
we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
the same works for the other two macros. Py_DEBUG implies them,
but not the other way around.
+
+ Issue #350 is still open: on Windows, the code here causes it to link
+ with PYTHON36.DLL (for example) instead of PYTHON3.DLL. A fix was
+ attempted in 164e526a5515 and 14ce6985e1c3, but reverted: virtualenv
+ does not make PYTHON3.DLL available, and so the "correctly" compiled
+ version would not run inside a virtualenv. We will re-apply the fix
+ after virtualenv has been fixed for some time. For explanation, see
+ issue #355. For a workaround if you want PYTHON3.DLL and don't worry
+ about virtualenv, see issue #350. See also 'py_limited_api' in
+ setuptools_ext.py.
*/
#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
# include <pyconfig.h>
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -247,7 +247,7 @@
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.11.2"
+ "\ncompiled with cffi version: 1.11.4"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -295,8 +295,9 @@
base_module_name = self.module_name.split('.')[-1]
if self.ffi._embedding is not None:
prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,))
- prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' %
- (self._string_literal(self.ffi._embedding),))
+ prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {')
+ self._print_string_literal_in_array(self.ffi._embedding)
+ prnt('0 };')
prnt('#ifdef PYPY_VERSION')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % (
base_module_name,))
@@ -1271,17 +1272,18 @@
_generate_cpy_extern_python_plus_c_ctx = \
_generate_cpy_extern_python_ctx
- def _string_literal(self, s):
- def _char_repr(c):
- # escape with a '\' the characters '\', '"' or (for trigraphs) '?'
- if c in '\\"?': return '\\' + c
- if ' ' <= c < '\x7F': return c
- if c == '\n': return '\\n'
- return '\\%03o' % ord(c)
- lines = []
- for line in s.splitlines(True) or ['']:
- lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
- return ' \\\n'.join(lines)
+ def _print_string_literal_in_array(self, s):
+ prnt = self._prnt
+ prnt('// # NB. this is not a string because of a size limit in MSVC')
+ for line in s.splitlines(True):
+ prnt(('// ' + line).rstrip())
+ printed_line = ''
+ for c in line:
+ if len(printed_line) >= 76:
+ prnt(printed_line)
+ printed_line = ''
+ printed_line += '%d,' % (ord(c),)
+ prnt(printed_line)
# ----------
# emitting the opcodes for individual types
diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py
--- a/lib_pypy/cffi/verifier.py
+++ b/lib_pypy/cffi/verifier.py
@@ -301,7 +301,6 @@
return suffixes
def _ensure_dir(filename):
- try:
- os.makedirs(os.path.dirname(filename))
- except OSError:
- pass
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.isdir(dirname):
+ os.makedirs(dirname)
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -17,10 +17,13 @@
"""
from __future__ import division
-import time as _time
+import time as _timemodule
import math as _math
import struct as _struct
+# for cpyext, use these as base classes
+from __pypy__._pypydatetime import dateinterop, deltainterop, timeinterop
+
_SENTINEL = object()
def _cmp(x, y):
@@ -179,7 +182,7 @@
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
- return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
+ return _timemodule.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
@@ -247,7 +250,7 @@
else:
push(ch)
newformat = "".join(newformat)
- return _time.strftime(newformat, timetuple)
+ return _timemodule.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
@@ -433,7 +436,7 @@
raise TypeError("unsupported type for timedelta %s component: %s" %
(tag, type(num)))
-class timedelta(object):
+class timedelta(deltainterop):
"""Represent the difference between two datetime objects.
Supported operators:
@@ -489,7 +492,7 @@
if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
raise OverflowError("days=%d; must have magnitude <= %d" % (d,
_MAX_DELTA_DAYS))
- self = object.__new__(cls)
+ self = deltainterop.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
@@ -667,7 +670,7 @@
timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
timedelta.resolution = timedelta(microseconds=1)
-class date(object):
+class date(dateinterop):
"""Concrete date type.
Constructors:
@@ -707,12 +710,12 @@
if month is None and isinstance(year, bytes) and len(year) == 4 and \
1 <= ord(year[2]) <= 12:
# Pickle support
- self = object.__new__(cls)
+ self = dateinterop.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
- self = object.__new__(cls)
+ self = dateinterop.__new__(cls)
self._year = year
self._month = month
self._day = day
@@ -724,13 +727,13 @@
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
- y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
+ y, m, d, hh, mm, ss, weekday, jday, dst = _timemodule.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
- t = _time.time()
+ t = _timemodule.time()
return cls.fromtimestamp(t)
@classmethod
@@ -1061,7 +1064,7 @@
_tzinfo_class = tzinfo
-class time(object):
+class time(timeinterop):
"""Time with time zone.
Constructors:
@@ -1097,14 +1100,14 @@
"""
if isinstance(hour, bytes) and len(hour) == 6 and ord(hour[0]) < 24:
# Pickle support
- self = object.__new__(cls)
+ self = timeinterop.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
- self = object.__new__(cls)
+ self = timeinterop.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
@@ -1408,7 +1411,7 @@
if isinstance(year, bytes) and len(year) == 10 and \
1 <= ord(year[2]) <= 12:
# Pickle support
- self = object.__new__(cls)
+ self = dateinterop.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
@@ -1416,7 +1419,7 @@
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
- self = object.__new__(cls)
+ self = dateinterop.__new__(cls)
self._year = year
self._month = month
self._day = day
@@ -1461,7 +1464,7 @@
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
- converter = _time.localtime if tz is None else _time.gmtime
+ converter = _timemodule.localtime if tz is None else _timemodule.gmtime
self = cls._from_timestamp(converter, timestamp, tz)
if tz is not None:
self = tz.fromutc(self)
@@ -1470,7 +1473,7 @@
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
- return cls._from_timestamp(_time.gmtime, t, None)
+ return cls._from_timestamp(_timemodule.gmtime, t, None)
@classmethod
def _from_timestamp(cls, converter, timestamp, tzinfo):
@@ -1493,13 +1496,13 @@
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
- t = _time.time()
+ t = _timemodule.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
- t = _time.time()
+ t = _timemodule.time()
return cls.utcfromtimestamp(t)
@classmethod
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -20,6 +20,7 @@
or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
__metaclass__ = _structseq.structseqtype
+ name = "resource.struct_rusage"
ru_utime = _structseq.structseqfield(0, "user time used")
ru_stime = _structseq.structseqfield(1, "system time used")
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -149,7 +149,7 @@
xz-devel # For lzma on PyPy3.
(XXX plus the SLES11 version of libgdbm-dev and tk-dev)
-On Mac OS X::
+On Mac OS X:
Most of these build-time dependencies are installed alongside
the Developer Tools. However, note that in order for the installation to
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -59,7 +59,7 @@
# General information about the project.
project = u'PyPy'
-copyright = u'2017, The PyPy Project'
+copyright = u'2018, The PyPy Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -217,6 +217,7 @@
Alejandro J. Cura
Vladimir Kryachko
Gabriel
+ Thomas Hisch
Mark Williams
Kunal Grover
Nathan Taylor
@@ -306,8 +307,10 @@
Stanisław Halik
Julien Phalip
Roman Podoliaka
+ Steve Papanik
Eli Stevens
Boglarka Vezer
+ gabrielg
PavloKapyshin
Tomer Chachamu
Christopher Groskopf
@@ -330,11 +333,13 @@
Konrad Delong
Dinu Gherman
pizi
+ Tomáš Pružina
James Robert
Armin Ronacher
Diana Popa
Mads Kiilerich
Brett Cannon
+ Caleb Hattingh
aliceinwire
Zooko Wilcox-O Hearn
James Lan
@@ -355,4 +360,5 @@
Jason Madden
Yaroslav Fedevych
Even Wiik Thomassen
+ [email protected]
Stefan Marr
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -355,7 +355,11 @@
containers (as list items or in sets for example), the exact rule of
equality used is "``if x is y or x == y``" (on both CPython and PyPy);
as a consequence, because all ``nans`` are identical in PyPy, you
-cannot have several of them in a set, unlike in CPython. (Issue `#1974`__)
+cannot have several of them in a set, unlike in CPython. (Issue `#1974`__).
+Another consequence is that ``cmp(float('nan'), float('nan')) == 0``, because
+``cmp`` checks with ``is`` first whether the arguments are identical (there is
+no good value to return from this call to ``cmp``, because ``cmp`` pretends
+that there is a total order on floats, but that is wrong for NaNs).
.. __:
https://bitbucket.org/pypy/pypy/issue/1974/different-behaviour-for-collections-of
@@ -541,6 +545,15 @@
``del foo.bar`` where ``foo`` is a module (or class) that contains the
function ``bar``, is significantly slower than CPython.
+* Various built-in functions in CPython accept only positional arguments
+ and not keyword arguments. That can be considered a long-running
+ historical detail: newer functions tend to accept keyword arguments
+ and older function are occasionally fixed to do so as well. In PyPy,
+ most built-in functions accept keyword arguments (``help()`` shows the
+ argument names). But don't rely on it too much because future
+ versions of PyPy may have to rename the arguments if CPython starts
+ accepting them too.
+
.. _`is ignored in PyPy`: http://bugs.python.org/issue14621
.. _`little point`:
http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html
.. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -62,7 +62,7 @@
* go to pypy/tool/release and run
``force-builds.py <release branch>``
The following JIT binaries should be built, however, we need more buildbots
- windows, linux-32, linux-64, osx64, armhf-raring, armhf-raspberrian, armel,
+ windows, linux-32, linux-64, osx64, armhf-raspberrian, armel,
freebsd64
* wait for builds to complete, make sure there are no failures
diff --git a/pypy/doc/index-of-release-notes.rst
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,8 @@
.. toctree::
+ release-v5.10.1.rst
+ release-v5.10.0.rst
release-v5.9.0.rst
release-v5.8.0.rst
release-v5.7.1.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-pypy2-5.10.0.rst
whatsnew-pypy2-5.9.0.rst
whatsnew-pypy2-5.8.0.rst
whatsnew-pypy2-5.7.0.rst
diff --git a/pypy/doc/release-v5.10.0.rst b/pypy/doc/release-v5.10.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-v5.10.0.rst
@@ -0,0 +1,100 @@
+======================================
+PyPy2.7 and PyPy3.5 v5.10 dual release
+======================================
+
+The PyPy team is proud to release both PyPy2.7 v5.10 (an interpreter supporting
+Python 2.7 syntax), and a final PyPy3.5 v5.10 (an interpreter for Python
+3.5 syntax). The two releases are both based on much the same codebase, thus
+the dual release.
+
+This release is an incremental release with very few new features, the main
+feature being the final PyPy3.5 release that works on linux and OS X with beta
+windows support. It also includes fixes for `vmprof`_ cooperation with
greenlets.
+
+Compared to 5.9, the 5.10 release contains mostly bugfixes and small
improvements.
+We have in the pipeline big new features coming for PyPy 6.0 that did not make
+the release cut and should be available within the next couple months.
+
+As always, this release is 100% compatible with the previous one and fixed
+several issues and bugs raised by the growing community of PyPy users.
+As always, we strongly recommend updating.
+
+There are quite a few important changes that are in the pipeline that did not
+make it into the 5.10 release. Most important are speed improvements to cpyext
+(which will make numpy and pandas a bit faster) and utf8 branch that changes
+internal representation of unicode to utf8, which should help especially the
+Python 3.5 version of PyPy.
+
+This release concludes the Mozilla Open Source `grant`_ for having a compatible
+PyPy 3.5 release and we're very grateful for that. Of course, we will continue
+to improve PyPy 3.5 and probably move to 3.6 during the course of 2018.
+
+You can download the v5.10 releases here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+.. _vmprof: http://vmprof.readthedocs.io
+.. _grant:
https://morepypy.blogspot.com/2016/08/pypy-gets-funding-from-mozilla-for.html
+.. _`PyPy`: index.html
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7 and CPython 3.5. It's fast (`PyPy and CPython 2.7.x`_ performance
comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+The PyPy release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html
+
+Changelog
+=========
+
+* improve ssl handling on windows for pypy3 (makes pip work)
+* improve unicode handling in various error reporters
+* fix vmprof cooperation with greenlets
+* fix some things in cpyext
+* test and document the cmp(nan, nan) == 0 behaviour
+* don't crash when calling sleep with inf or nan
+* fix bugs in _io module
+* inspect.isbuiltin() now returns True for functions implemented in C
+* allow the sequences future-import, docstring, future-import for CPython bug
compatibility
+* Issue #2699: non-ascii messages in warnings
+* posix.lockf
+* fixes for FreeBSD platform
+* add .debug files, so builds contain debugging info, instead of being stripped
+* improvements to cppyy
+* issue #2677 copy pure c PyBuffer_{From,To}Contiguous from cpython
+* issue #2682, split firstword on any whitespace in sqlite3
+* ctypes: allow ptr[0] = foo when ptr is a pointer to struct
+* matplotlib will work with tkagg backend once `matplotlib pr #9356`_ is merged
+* improvements to utf32 surrogate handling
+* cffi version bump to 1.11.2
+
+.. _`matplotlib pr #9356`: https://github.com/matplotlib/matplotlib/pull/9356
diff --git a/pypy/doc/release-v5.10.1.rst b/pypy/doc/release-v5.10.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-v5.10.1.rst
@@ -0,0 +1,63 @@
+===========
+PyPy 5.10.1
+===========
+
+We have released a bugfix PyPy3.5-v5.10.1
+due to the following issues:
+
+ * Fix ``time.sleep(float('nan')`` which would hang on windows
+
+ * Fix missing ``errno`` constants on windows
+
+ * Fix issue 2718_ for the REPL on linux
+
+ * Fix an overflow in converting 3 secs to nanosecs (issue 2717_ )
+
+ * Flag kwarg to ``os.setxattr`` had no effect
+
+ * Fix the winreg module for unicode entries in the registry on windows
+
+Note that many of these fixes are for our new beta verison of PyPy3.5 on
+windows. There may be more unicode problems in the windows beta version
+especially around the subject of directory- and file-names with non-ascii
+characters.
+
+Our downloads are available now. On macos, we recommend you wait for the
+Homebrew_ package.
+
+Thanks to those who reported the issues.
+
+.. _2718: https://bitbucket.org/pypy/pypy/issues/2718
+.. _2717: https://bitbucket.org/pypy/pypy/issues/2717
+.. _Homebrew: http://brewformulas.org/Pypy
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7 and CPython 3.5. It's fast (`PyPy and CPython 2.7.x`_ performance
comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+This PyPy 3.5 release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
--- a/pypy/doc/tool/makecontributor.py
+++ b/pypy/doc/tool/makecontributor.py
@@ -81,6 +81,7 @@
'Yasir Suhail':['yasirs'],
'Squeaky': ['squeaky'],
"Amaury Forgeot d'Arc": ['[email protected]'],
+ "Dodan Mihai": ['[email protected]'],
}
alias_map = {}
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,12 +1,16 @@
-===========================
-What's new in PyPy2.7 5.10+
-===========================
-
-.. this is a revision shortly after release-pypy2.7-v5.9.0
-.. startrev:d56dadcef996
-
-.. branch: cppyy-packaging
-Cleanup and improve cppyy packaging
-
-.. branch: docs-osx-brew-openssl
-
+===========================
+What's new in PyPy2.7 5.10+
+===========================
+
+.. this is a revision shortly after release-pypy2.7-v5.10.0
+.. startrev: 6b024edd9d12
+
+.. branch: cpyext-avoid-roundtrip
+
+Big refactoring of some cpyext code, which avoids a lot of nonsense when
+calling C from Python and vice-versa: the result is a big speedup in
+function/method calls, up to 6 times faster.
+
+.. branch: cpyext-datetime2
+
+Support ``tzinfo`` field on C-API datetime objects, fixes latest pandas HEAD
diff --git a/pypy/doc/whatsnew-pypy2-5.10.0.rst
b/pypy/doc/whatsnew-pypy2-5.10.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy2-5.10.0.rst
@@ -0,0 +1,42 @@
+==========================
+What's new in PyPy2.7 5.10
+==========================
+
+.. this is a revision shortly after release-pypy2.7-v5.9.0
+.. startrev:d56dadcef996
+
+
+.. branch: cppyy-packaging
+
+Cleanup and improve cppyy packaging
+
+.. branch: docs-osx-brew-openssl
+
+.. branch: keep-debug-symbols
+
+Add a smartstrip tool, which can optionally keep the debug symbols in a
+separate file, instead of just stripping them away. Use it in packaging
+
+.. branch: bsd-patches
+
+Fix failures on FreeBSD, contributed by David Naylor as patches on the issue
+tracker (issues 2694, 2695, 2696, 2697)
+
+.. branch: run-extra-tests
+
+Run extra_tests/ in buildbot
+
+.. branch: vmprof-0.4.10
+
+Upgrade the _vmprof backend to vmprof 0.4.10
+
+.. branch: fix-vmprof-stacklet-switch
+.. branch: fix-vmprof-stacklet-switch-2
+Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
+
+.. branch: win32-vcvars
+
+.. branch: rdict-fast-hash
+
+Make it possible to declare that the hash function of an r_dict is fast in
RPython.
+
diff --git a/pypy/doc/whatsnew-pypy2-5.6.0.rst
b/pypy/doc/whatsnew-pypy2-5.6.0.rst
--- a/pypy/doc/whatsnew-pypy2-5.6.0.rst
+++ b/pypy/doc/whatsnew-pypy2-5.6.0.rst
@@ -101,7 +101,7 @@
.. branch: newinitwarn
-Match CPython's stricter handling of __new/init__ arguments
+Match CPython's stricter handling of ``__new__``/``__init__`` arguments
.. branch: openssl-1.1
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -11,7 +11,7 @@
To build pypy-c you need a working python environment, and a C compiler.
It is possible to translate with a CPython 2.6 or later, but this is not
-the preferred way, because it will take a lot longer to run � depending
+the preferred way, because it will take a lot longer to run – depending
on your architecture, between two and three times as long. So head to
`our downloads`_ and get the latest stable version.
@@ -25,8 +25,10 @@
This compiler, while the standard one for Python 2.7, is deprecated. Microsoft
has
made it available as the `Microsoft Visual C++ Compiler for Python 2.7`_ (the
link
-was checked in Nov 2016). Note that the compiler suite will be installed in
-``C:\Users\<user name>\AppData\Local\Programs\Common\Microsoft\Visual C++ for
Python``.
+was checked in Nov 2016). Note that the compiler suite may be installed in
+``C:\Users\<user name>\AppData\Local\Programs\Common\Microsoft\Visual C++ for
Python``
+or in
+``C:\Program Files (x86)\Common Files\Microsoft\Visual C++ for Python``.
A current version of ``setuptools`` will be able to find it there. For
Windows 10, you must right-click the download, and under ``Properties`` ->
``Compatibility`` mark it as ``Run run this program in comatibility mode for``
@@ -41,7 +43,6 @@
-----------------------------------
We routinely test translation using v9, also known as Visual Studio 2008.
-Our buildbot is still using the Express Edition, not the compiler noted above.
Other configurations may work as well.
The translation scripts will set up the appropriate environment variables
@@ -81,6 +82,31 @@
.. _build instructions: http://pypy.org/download.html#building-from-source
+Setting Up Visual Studio for building SSL in Python3
+----------------------------------------------------
+
+On Python3, the ``ssl`` module is based on ``cffi``, and requires a build step
after
+translation. However ``distutils`` does not support the Micorosft-provided
Visual C
+compiler, and ``cffi`` depends on ``distutils`` to find the compiler. The
+traditional solution to this problem is to install the ``setuptools`` module
+via running ``-m ensurepip`` which installs ``pip`` and ``setuptools``. However
+``pip`` requires ``ssl``. So we have a chicken-and-egg problem: ``ssl``
depends on
+``cffi`` which depends on ``setuptools``, which depends on ``ensurepip``, which
+depends on ``ssl``.
+
+In order to solve this, the buildbot sets an environment varaible that helps
+``distutils`` find the compiler without ``setuptools``::
+
+ set VS90COMNTOOLS=C:\Program Files (x86)\Common Files\Microsoft\Visual
C++ for Python\9.0\VC\bin
+
+or whatever is appropriate for your machine. Note that this is not enough, you
+must also copy the ``vcvarsall.bat`` file fron the ``...\9.0`` directory to the
+``...\9.0\VC`` directory, and edit it, changing the lines that set
+``VCINSTALLDIR`` and ``WindowsSdkDir``::
+
+ set VCINSTALLDIR=%~dp0\
+ set WindowsSdkDir=%~dp0\..\WinSDK\
+
Preparing Windows for the large build
-------------------------------------
diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py
--- a/pypy/goal/getnightly.py
+++ b/pypy/goal/getnightly.py
@@ -15,7 +15,7 @@
arch = 'linux'
cmd = 'wget "%s"'
TAR_OPTIONS += ' --wildcards'
- binfiles = "'*/bin/pypy' '*/bin/libpypy-c.so'"
+ binfiles = "'*/bin/pypy*' '*/bin/libpypy-c.so*'"
if os.uname()[-1].startswith('arm'):
arch += '-armhf-raspbian'
elif sys.platform.startswith('darwin'):
diff --git a/pypy/interpreter/astcompiler/assemble.py
b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -1,7 +1,7 @@
"""Python control flow graph generation and bytecode assembly."""
+import math
import os
-from rpython.rlib import rfloat
from rpython.rlib.objectmodel import we_are_translated
from pypy.interpreter.astcompiler import ast, misc, symtable
@@ -266,7 +266,7 @@
w_type = space.type(obj)
if space.is_w(w_type, space.w_float):
val = space.float_w(obj)
- if val == 0.0 and rfloat.copysign(1., val) < 0:
+ if val == 0.0 and math.copysign(1., val) < 0:
w_key = space.newtuple([obj, space.w_float, space.w_None])
else:
w_key = space.newtuple([obj, space.w_float])
@@ -276,9 +276,9 @@
real = space.float_w(w_real)
imag = space.float_w(w_imag)
real_negzero = (real == 0.0 and
- rfloat.copysign(1., real) < 0)
+ math.copysign(1., real) < 0)
imag_negzero = (imag == 0.0 and
- rfloat.copysign(1., imag) < 0)
+ math.copysign(1., imag) < 0)
if real_negzero and imag_negzero:
tup = [obj, space.w_complex, space.w_None, space.w_None,
space.w_None]
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py
b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -1246,3 +1246,7 @@
exc = py.test.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
" bytes in position 0-1: truncated \\xXX escape")
+ input = "u'\\x1'"
+ exc = py.test.raises(SyntaxError, self.get_ast, input).value
+ assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
+ " bytes in position 0-2: truncated \\xXX escape")
diff --git a/pypy/interpreter/pyparser/future.py
b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -85,13 +85,17 @@
# permissive parsing of the given list of tokens; it relies on
# the real parsing done afterwards to give errors.
it.skip_newlines()
- it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
- if it.skip(pygram.tokens.STRING):
- it.skip_newlines()
- while (it.skip_name("from") and
+ docstring_possible = True
+ while True:
+ it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
+ if docstring_possible and it.skip(pygram.tokens.STRING):
+ it.skip_newlines()
+ docstring_possible = False
+ if not (it.skip_name("from") and
it.skip_name("__future__") and
it.skip_name("import")):
+ break
it.skip(pygram.tokens.LPAR) # optionally
# return in 'last_position' any line-column pair that points
# somewhere inside the last __future__ import statement
diff --git a/pypy/interpreter/pyparser/test/test_future.py
b/pypy/interpreter/pyparser/test/test_future.py
--- a/pypy/interpreter/pyparser/test/test_future.py
+++ b/pypy/interpreter/pyparser/test/test_future.py
@@ -208,3 +208,13 @@
'from __future__ import with_statement;')
f = run(s, (2, 23))
assert f == fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_WITH_STATEMENT
+
+def test_future_doc_future():
+ # for some reason people do this :-[
+ s = '''
+from __future__ import generators
+"Docstring"
+from __future__ import division
+ '''
+ f = run(s, (4, 24))
+ assert f == fut.CO_FUTURE_DIVISION | fut.CO_GENERATOR_ALLOWED
diff --git a/pypy/interpreter/test/test_unicodehelper.py
b/pypy/interpreter/test/test_unicodehelper.py
--- a/pypy/interpreter/test/test_unicodehelper.py
+++ b/pypy/interpreter/test/test_unicodehelper.py
@@ -1,4 +1,7 @@
-from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8
+import pytest
+import struct
+from pypy.interpreter.unicodehelper import (
+ encode_utf8, decode_utf8, unicode_encode_utf_32_be)
class FakeSpace:
pass
@@ -24,3 +27,23 @@
assert map(ord, got) == [0xd800, 0xdc00]
got = decode_utf8(space, "\xf0\x90\x80\x80")
assert map(ord, got) == [0x10000]
+
[email protected]('unich', [u"\ud800", u"\udc80"])
+def test_utf32_surrogates(unich):
+ assert (unicode_encode_utf_32_be(unich, 1, None) ==
+ struct.pack('>i', ord(unich)))
+ with pytest.raises(UnicodeEncodeError):
+ unicode_encode_utf_32_be(unich, 1, None, allow_surrogates=False)
+
+ def replace_with(ru, rs):
+ def errorhandler(errors, enc, msg, u, startingpos, endingpos):
+ if errors == 'strict':
+ raise UnicodeEncodeError(enc, u, startingpos, endingpos, msg)
+ return ru, rs, endingpos
+ return unicode_encode_utf_32_be(
+ u"<%s>" % unich, 3, None,
+ errorhandler, allow_surrogates=False)
+
+ assert replace_with(u'rep', None) == u'<rep>'.encode('utf-32-be')
+ assert (replace_with(None, '\xca\xfe\xca\xfe') ==
+ '\x00\x00\x00<\xca\xfe\xca\xfe\x00\x00\x00>')
diff --git a/pypy/interpreter/unicodehelper.py
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -1,7 +1,11 @@
+from rpython.rlib.objectmodel import specialize
+from rpython.rlib.rarithmetic import intmask
+from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
+from rpython.rlib import runicode
+from rpython.rlib.runicode import (
+ default_unicode_error_encode, default_unicode_error_decode,
+ MAXUNICODE, BYTEORDER, BYTEORDER2, UNICHR)
from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import specialize
-from rpython.rlib import runicode
-from pypy.module._codecs import interp_codecs
@specialize.memo()
def decode_error_handler(space):
@@ -37,6 +41,7 @@
# These functions take and return unwrapped rpython strings and unicodes
def decode_unicode_escape(space, string):
+ from pypy.module._codecs import interp_codecs
state = space.fromcache(interp_codecs.CodecState)
unicodedata_handler = state.get_unicodedata_handler(space)
result, consumed = runicode.str_decode_unicode_escape(
@@ -71,3 +76,229 @@
uni, len(uni), "strict",
errorhandler=None,
allow_surrogates=True)
+
+# ____________________________________________________________
+# utf-32
+
+def str_decode_utf_32(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "native")
+ return result, length
+
+def str_decode_utf_32_be(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "big")
+ return result, length
+
+def str_decode_utf_32_le(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "little")
+ return result, length
+
+def py3k_str_decode_utf_32(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "native", 'utf-32-' + BYTEORDER2)
+ return result, length
+
+def py3k_str_decode_utf_32_be(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "big", 'utf-32-be')
+ return result, length
+
+def py3k_str_decode_utf_32_le(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "little", 'utf-32-le')
+ return result, length
+
+BOM32_DIRECT = intmask(0x0000FEFF)
+BOM32_REVERSE = intmask(0xFFFE0000)
+
+def str_decode_utf_32_helper(s, size, errors, final=True,
+ errorhandler=None,
+ byteorder="native",
+ public_encoding_name='utf32'):
+ if errorhandler is None:
+ errorhandler = default_unicode_error_decode
+ bo = 0
+
+ if BYTEORDER == 'little':
+ iorder = [0, 1, 2, 3]
+ else:
+ iorder = [3, 2, 1, 0]
+
+ # Check for BOM marks (U+FEFF) in the input and adjust current
+ # byte order setting accordingly. In native mode, the leading BOM
+ # mark is skipped, in all other modes, it is copied to the output
+ # stream as-is (giving a ZWNBSP character).
+ pos = 0
+ if byteorder == 'native':
+ if size >= 4:
+ bom = intmask(
+ (ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) |
+ (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]]))
+ if BYTEORDER == 'little':
+ if bom == BOM32_DIRECT:
+ pos += 4
+ bo = -1
+ elif bom == BOM32_REVERSE:
+ pos += 4
+ bo = 1
+ else:
+ if bom == BOM32_DIRECT:
+ pos += 4
+ bo = 1
+ elif bom == BOM32_REVERSE:
+ pos += 4
+ bo = -1
+ elif byteorder == 'little':
+ bo = -1
+ else:
+ bo = 1
+ if size == 0:
+ return u'', 0, bo
+ if bo == -1:
+ # force little endian
+ iorder = [0, 1, 2, 3]
+ elif bo == 1:
+ # force big endian
+ iorder = [3, 2, 1, 0]
+
+ result = UnicodeBuilder(size // 4)
+
+ while pos < size:
+ # remaining bytes at the end? (size should be divisible by 4)
+ if len(s) - pos < 4:
+ if not final:
+ break
+ r, pos = errorhandler(errors, public_encoding_name,
+ "truncated data",
+ s, pos, len(s))
+ result.append(r)
+ if len(s) - pos < 4:
+ break
+ continue
+ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) <<
16) |
+ (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]]))
+ if ch >= 0x110000:
+ r, pos = errorhandler(errors, public_encoding_name,
+ "codepoint not in range(0x110000)",
+ s, pos, len(s))
+ result.append(r)
+ continue
+
+ if MAXUNICODE < 65536 and ch >= 0x10000:
+ ch -= 0x10000L
+ result.append(unichr(0xD800 + (ch >> 10)))
+ result.append(unichr(0xDC00 + (ch & 0x03FF)))
+ else:
+ result.append(UNICHR(ch))
+ pos += 4
+ return result.build(), pos, bo
+
+def _STORECHAR32(result, CH, byteorder):
+ c0 = chr(((CH) >> 24) & 0xff)
+ c1 = chr(((CH) >> 16) & 0xff)
+ c2 = chr(((CH) >> 8) & 0xff)
+ c3 = chr((CH) & 0xff)
+ if byteorder == 'little':
+ result.append(c3)
+ result.append(c2)
+ result.append(c1)
+ result.append(c0)
+ else:
+ result.append(c0)
+ result.append(c1)
+ result.append(c2)
+ result.append(c3)
+
+def unicode_encode_utf_32_helper(s, size, errors,
+ errorhandler=None,
+ allow_surrogates=True,
+ byteorder='little',
+ public_encoding_name='utf32'):
+ if errorhandler is None:
+ errorhandler = default_unicode_error_encode
+ if size == 0:
+ if byteorder == 'native':
+ result = StringBuilder(4)
+ _STORECHAR32(result, 0xFEFF, BYTEORDER)
+ return result.build()
+ return ""
+
+ result = StringBuilder(size * 4 + 4)
+ if byteorder == 'native':
+ _STORECHAR32(result, 0xFEFF, BYTEORDER)
+ byteorder = BYTEORDER
+
+ pos = 0
+ while pos < size:
+ ch = ord(s[pos])
+ pos += 1
+ ch2 = 0
+ if not allow_surrogates and 0xD800 <= ch < 0xE000:
+ ru, rs, pos = errorhandler(
+ errors, public_encoding_name, 'surrogates not allowed',
+ s, pos - 1, pos)
+ if rs is not None:
+ # py3k only
+ if len(rs) % 4 != 0:
+ errorhandler(
+ 'strict', public_encoding_name, 'surrogates not
allowed',
+ s, pos - 1, pos)
+ result.append(rs)
+ continue
+ for ch in ru:
+ if ord(ch) < 0xD800:
+ _STORECHAR32(result, ord(ch), byteorder)
+ else:
+ errorhandler(
+ 'strict', public_encoding_name,
+ 'surrogates not allowed', s, pos - 1, pos)
+ continue
+ if 0xD800 <= ch < 0xDC00 and MAXUNICODE < 65536 and pos < size:
+ ch2 = ord(s[pos])
+ if 0xDC00 <= ch2 < 0xE000:
+ ch = (((ch & 0x3FF) << 10) | (ch2 & 0x3FF)) + 0x10000
+ pos += 1
+ _STORECHAR32(result, ch, byteorder)
+
+ return result.build()
+
+def unicode_encode_utf_32(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "native")
+
+def unicode_encode_utf_32_be(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "big")
+
+def unicode_encode_utf_32_le(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "little")
+
+def py3k_unicode_encode_utf_32(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "native",
+ 'utf-32-' + BYTEORDER2)
+
+def py3k_unicode_encode_utf_32_be(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "big",
+ 'utf-32-be')
+
+def py3k_unicode_encode_utf_32_le(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "little",
+ 'utf-32-le')
diff --git a/pypy/module/__builtin__/operation.py
b/pypy/module/__builtin__/operation.py
--- a/pypy/module/__builtin__/operation.py
+++ b/pypy/module/__builtin__/operation.py
@@ -2,11 +2,13 @@
Interp-level implementation of the basic space operations.
"""
+import math
+
from pypy.interpreter import gateway
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from rpython.rlib.runicode import UNICHR
-from rpython.rlib.rfloat import isfinite, isinf, round_double, round_away
+from rpython.rlib.rfloat import isfinite, round_double, round_away
from rpython.rlib import rfloat
import __builtin__
@@ -151,7 +153,7 @@
else:
# finite x, and ndigits is not unreasonably large
z = round_double(number, ndigits)
- if isinf(z):
+ if math.isinf(z):
raise oefmt(space.w_OverflowError,
"rounded value too large to represent")
return space.newfloat(z)
diff --git a/pypy/module/__builtin__/test/test_builtin.py
b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -404,6 +404,7 @@
def test_cmp(self):
+ assert cmp(float('nan'), float('nan')) == 0
assert cmp(9,9) == 0
assert cmp(0,9) < 0
assert cmp(9,0) > 0
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -58,6 +58,14 @@
}
+class PyPyDateTime(MixedModule):
+ appleveldefs = {}
+ interpleveldefs = {
+ 'dateinterop': 'interp_pypydatetime.W_DateTime_Date',
+ 'timeinterop' : 'interp_pypydatetime.W_DateTime_Time',
+ 'deltainterop' : 'interp_pypydatetime.W_DateTime_Delta',
+ }
+
class Module(MixedModule):
""" PyPy specific "magic" functions. A lot of them are experimental and
subject to change, many are internal. """
@@ -108,6 +116,7 @@
"thread": ThreadModule,
"intop": IntOpModule,
"os": OsModule,
+ '_pypydatetime': PyPyDateTime,
}
def setup_after_space_initialization(self):
diff --git a/pypy/module/__pypy__/interp_pypydatetime.py
b/pypy/module/__pypy__/interp_pypydatetime.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/interp_pypydatetime.py
@@ -0,0 +1,24 @@
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.typedef import TypeDef
+from pypy.interpreter.gateway import interp2app
+from rpython.tool.sourcetools import func_with_new_name
+
+def create_class(name):
+ class W_Class(W_Root):
+ 'builtin base clasee for datetime.%s to allow interop with cpyext' %
name
+ def descr_new__(space, w_type):
+ return space.allocate_instance(W_Class, w_type)
+
+ W_Class.typedef = TypeDef(name,
+ __new__ = interp2app(func_with_new_name(
+ W_Class.descr_new__.im_func,
+ '%s_new' % (name,))),
+ )
+ W_Class.typedef.acceptable_as_base_class = True
+ return W_Class
+
+W_DateTime_Time = create_class('pypydatetime_time')
+W_DateTime_Date = create_class('pypydatetime_date')
+W_DateTime_Delta = create_class('pypydatetime_delta')
+
+
diff --git a/pypy/module/_cffi_backend/__init__.py
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.11.2"
+VERSION = "1.11.4"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.11.2", ("This test_c.py file is for testing a version"
+assert __version__ == "1.11.4", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -8,7 +8,8 @@
@unwrap_spec(cdef='text', module_name='text', source='text', packed=int)
def prepare(space, cdef, module_name, source, w_includes=None,
- w_extra_source=None, w_min_version=None, packed=False):
+ w_extra_source=None, w_min_version=None, packed=False,
+ w_extra_compile_args=None):
try:
import cffi
from cffi import FFI # <== the system one, which
@@ -55,10 +56,14 @@
sources = []
if w_extra_source is not None:
sources.append(space.str_w(w_extra_source))
+ kwargs = {}
+ if w_extra_compile_args is not None:
+ kwargs['extra_compile_args'] = space.unwrap(w_extra_compile_args)
ext = ffiplatform.get_extension(c_file, module_name,
include_dirs=[str(rdir)],
export_symbols=['_cffi_pypyinit_' + base_module_name],
- sources=sources)
+ sources=sources,
+ **kwargs)
ffiplatform.compile(str(rdir), ext)
for extension in ['so', 'pyd', 'dylib']:
@@ -2054,3 +2059,51 @@
"Such structs are only supported as return value if the function is
"
"'API mode' and non-variadic (i.e. declared inside
ffibuilder.cdef()"
"+ffibuilder.set_source() and not taking a final '...' argument)")
+
+ def test_gcc_visibility_hidden(self):
+ import sys
+ if sys.platform == 'win32':
+ skip("test for gcc/clang")
+ ffi, lib = self.prepare("""
+ int f(int);
+ """, "test_gcc_visibility_hidden", """
+ int f(int a) { return a + 40; }
+ """, extra_compile_args=['-fvisibility=hidden'])
+ assert lib.f(2) == 42
+
+ def test_override_default_definition(self):
+ ffi, lib = self.prepare("""
+ typedef long int16_t, char16_t;
+ """, "test_override_default_definition", """
+ """)
+ assert ffi.typeof("int16_t") is ffi.typeof("char16_t") is
ffi.typeof("long")
+
+ def test_char16_char32_plain_c(self):
+ ffi, lib = self.prepare("""
+ char16_t foo_2bytes(char16_t);
+ char32_t foo_4bytes(char32_t);
+ """, "test_char16_char32_type_nocpp", """
+ #if !defined(__cplusplus) || (!defined(_LIBCPP_VERSION) && __cplusplus
< 201103L)
+ typedef uint_least16_t char16_t;
+ typedef uint_least32_t char32_t;
+ #endif
+
+ char16_t foo_2bytes(char16_t a) { return (char16_t)(a + 42); }
+ char32_t foo_4bytes(char32_t a) { return (char32_t)(a + 42); }
+ """, min_version=(1, 11, 0))
+ assert lib.foo_2bytes(u'\u1234') == u'\u125e'
+ assert lib.foo_4bytes(u'\u1234') == u'\u125e'
+ assert lib.foo_4bytes(u'\U00012345') == u'\U0001236f'
+ raises(TypeError, lib.foo_2bytes, u'\U00012345')
+ raises(TypeError, lib.foo_2bytes, 1234)
+ raises(TypeError, lib.foo_4bytes, 1234)
+
+ def test_loader_spec(self):
+ import sys
+ ffi, lib = self.prepare("", "test_loader_spec", "")
+ if sys.version_info < (3,):
+ assert not hasattr(lib, '__loader__')
+ assert not hasattr(lib, '__spec__')
+ else:
+ assert lib.__loader__ is None
+ assert lib.__spec__ is None
diff --git a/pypy/module/_codecs/interp_codecs.py
b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -1,10 +1,12 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated, not_rpython
from rpython.rlib.rstring import UnicodeBuilder
+from rpython.rlib import runicode
from rpython.rlib.runicode import code_to_unichr, MAXUNICODE
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
+from pypy.interpreter import unicodehelper
class VersionTag(object):
@@ -210,7 +212,8 @@
def xmlcharrefreplace_errors(space, w_exc):
check_exception(space, w_exc)
if space.isinstance_w(w_exc, space.w_UnicodeEncodeError):
- obj = space.realunicode_w(space.getattr(w_exc,
space.newtext('object')))
+ w_obj = space.getattr(w_exc, space.newtext('object'))
+ obj = space.realunicode_w(w_obj)
start = space.int_w(space.getattr(w_exc, space.newtext('start')))
w_end = space.getattr(w_exc, space.newtext('end'))
end = space.int_w(w_end)
@@ -236,7 +239,8 @@
def backslashreplace_errors(space, w_exc):
check_exception(space, w_exc)
if space.isinstance_w(w_exc, space.w_UnicodeEncodeError):
- obj = space.realunicode_w(space.getattr(w_exc,
space.newtext('object')))
+ w_obj = space.getattr(w_exc, space.newtext('object'))
+ obj = space.realunicode_w(w_obj)
start = space.int_w(space.getattr(w_exc, space.newtext('start')))
w_end = space.getattr(w_exc, space.newtext('end'))
end = space.int_w(w_end)
@@ -363,19 +367,23 @@
raise oefmt(space.w_TypeError, "handler must be callable")
# ____________________________________________________________
-# delegation to runicode
+# delegation to runicode/unicodehelper
-from rpython.rlib import runicode
+def _find_implementation(impl_name):
+ try:
+ func = getattr(unicodehelper, impl_name)
+ except AttributeError:
+ func = getattr(runicode, impl_name)
+ return func
def make_encoder_wrapper(name):
rname = "unicode_encode_%s" % (name.replace("_encode", ""), )
- assert hasattr(runicode, rname)
+ func = _find_implementation(rname)
@unwrap_spec(uni=unicode, errors='text_or_none')
def wrap_encoder(space, uni, errors="strict"):
if errors is None:
errors = 'strict'
state = space.fromcache(CodecState)
- func = getattr(runicode, rname)
result = func(uni, len(uni), errors, state.encode_error_handler)
return space.newtuple([space.newbytes(result), space.newint(len(uni))])
wrap_encoder.func_name = rname
@@ -383,7 +391,7 @@
def make_decoder_wrapper(name):
rname = "str_decode_%s" % (name.replace("_decode", ""), )
- assert hasattr(runicode, rname)
+ func = _find_implementation(rname)
@unwrap_spec(string='bufferstr', errors='text_or_none',
w_final=WrappedDefault(False))
def wrap_decoder(space, string, errors="strict", w_final=None):
@@ -391,7 +399,6 @@
errors = 'strict'
final = space.is_true(w_final)
state = space.fromcache(CodecState)
- func = getattr(runicode, rname)
result, consumed = func(string, len(string), errors,
final, state.decode_error_handler)
return space.newtuple([space.newunicode(result),
space.newint(consumed)])
diff --git a/pypy/module/_codecs/test/test_codecs.py
b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -115,10 +115,10 @@
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit