Author: Ronan Lamy <[email protected]>
Branch: refactor-slots
Changeset: r93657:c4f4a2bf8eac
Date: 2018-01-12 17:19 +0000
http://bitbucket.org/pypy/pypy/changeset/c4f4a2bf8eac/
Log: hg merge default
diff too long, truncating to 2000 out of 102699 lines
diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -71,6 +71,8 @@
^lib_pypy/.+.c$
^lib_pypy/.+.o$
^lib_pypy/.+.so$
+^lib_pypy/.+.pyd$
+^lib_pypy/Release/
^pypy/doc/discussion/.+\.html$
^include/.+\.h$
^include/.+\.inl$
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -40,3 +40,14 @@
2875f328eae2216a87f3d6f335092832eb031f56 release-pypy3.5-v5.7.1
c925e73810367cd960a32592dd7f728f436c125c release-pypy2.7-v5.8.0
a37ecfe5f142bc971a86d17305cc5d1d70abec64 release-pypy3.5-v5.8.0
+03d614975835870da65ff0481e1edad68ebbcb8d release-pypy2.7-v5.9.0
+d72f9800a42b46a8056951b1da2426d2c2d8d502 release-pypy3.5-v5.9.0
+03d614975835870da65ff0481e1edad68ebbcb8d release-pypy2.7-v5.9.0
+84a2f3e6a7f88f2fe698e473998755b3bd1a12e2 release-pypy2.7-v5.9.0
+0e7ea4fe15e82d5124e805e2e4a37cae1a402d4b release-pypy2.7-v5.10.0
+a91df6163fb76df245091f741dbf6a23ddc72374 release-pypy3.5-v5.10.0
+a91df6163fb76df245091f741dbf6a23ddc72374 release-pypy3.5-v5.10.0
+0000000000000000000000000000000000000000 release-pypy3.5-v5.10.0
+0000000000000000000000000000000000000000 release-pypy3.5-v5.10.0
+09f9160b643e3f02ccb8c843b2fbb4e5cbf54082 release-pypy3.5-v5.10.0
+3f6eaa010fce78cc7973bdc1dfdb95970f08fed2 release-pypy3.5-v5.10.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -30,7 +30,7 @@
DEALINGS IN THE SOFTWARE.
-PyPy Copyright holders 2003-2017
+PyPy Copyright holders 2003-2018
-----------------------------------
Except when otherwise stated (look for LICENSE files or information at
@@ -339,8 +339,10 @@
Stanisław Halik
Julien Phalip
Roman Podoliaka
+ Steve Papanik
Eli Stevens
Boglarka Vezer
+ gabrielg
PavloKapyshin
Tomer Chachamu
Christopher Groskopf
@@ -363,11 +365,13 @@
Konrad Delong
Dinu Gherman
pizi
+ Tomáš Pružina
James Robert
Armin Ronacher
Diana Popa
Mads Kiilerich
Brett Cannon
+ Caleb Hattingh
aliceinwire
Zooko Wilcox-O Hearn
James Lan
@@ -388,6 +392,7 @@
Jason Madden
Yaroslav Fedevych
Even Wiik Thomassen
+ [email protected]
Stefan Marr
Heinrich-Heine University, Germany
diff --git a/_pytest/terminal.py b/_pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -366,11 +366,11 @@
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
- self.config.hook.pytest_terminal_summary(terminalreporter=self)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
new file mode 100644
--- /dev/null
+++ b/extra_tests/requirements.txt
@@ -0,0 +1,3 @@
+pytest
+hypothesis
+vmprof
diff --git a/extra_tests/test_bytes.py b/extra_tests/test_bytes.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_bytes.py
@@ -0,0 +1,84 @@
+from hypothesis import strategies as st
+from hypothesis import given, example
+
+st_bytestring = st.binary() | st.binary().map(bytearray)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_find(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.find(u) <= len(prefix)
+ assert s.find(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_index(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.index(u) <= len(prefix)
+ assert s.index(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_rfind(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rfind(u) >= len(prefix)
+ assert s.rfind(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st_bytestring, st_bytestring, st_bytestring)
+def test_rindex(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rindex(u) >= len(prefix)
+ assert s.rindex(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+def adjust_indices(u, start, end):
+ if end < 0:
+ end = max(end + len(u), 0)
+ else:
+ end = min(end, len(u))
+ if start < 0:
+ start = max(start + len(u), 0)
+ return start, end
+
+@given(st_bytestring, st_bytestring)
+def test_startswith_basic(u, v):
+ assert u.startswith(v) is (u[:len(v)] == v)
+
+@example(b'x', b'', 1)
+@example(b'x', b'', 2)
+@given(st_bytestring, st_bytestring, st.integers())
+def test_startswith_start(u, v, start):
+ expected = u[start:].startswith(v) if v else (start <= len(u))
+ assert u.startswith(v, start) is expected
+
+@example(b'x', b'', 1, 0)
+@example(b'xx', b'', -1, 0)
+@given(st_bytestring, st_bytestring, st.integers(), st.integers())
+def test_startswith_3(u, v, start, end):
+ if v:
+ expected = u[start:end].startswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.startswith(v, start, end) is expected
+
+@given(st_bytestring, st_bytestring)
+def test_endswith_basic(u, v):
+ if len(v) > len(u):
+ assert u.endswith(v) is False
+ else:
+ assert u.endswith(v) is (u[len(u) - len(v):] == v)
+
+@example(b'x', b'', 1)
+@example(b'x', b'', 2)
+@given(st_bytestring, st_bytestring, st.integers())
+def test_endswith_2(u, v, start):
+ expected = u[start:].endswith(v) if v else (start <= len(u))
+ assert u.endswith(v, start) is expected
+
+@example(b'x', b'', 1, 0)
+@example(b'xx', b'', -1, 0)
+@given(st_bytestring, st_bytestring, st.integers(), st.integers())
+def test_endswith_3(u, v, start, end):
+ if v:
+ expected = u[start:end].endswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.endswith(v, start, end) is expected
diff --git a/pypy/module/test_lib_pypy/test_json_extra.py
b/extra_tests/test_json.py
rename from pypy/module/test_lib_pypy/test_json_extra.py
rename to extra_tests/test_json.py
--- a/pypy/module/test_lib_pypy/test_json_extra.py
+++ b/extra_tests/test_json.py
@@ -1,4 +1,6 @@
-import py, json
+import pytest
+import json
+from hypothesis import given, strategies
def is_(x, y):
return type(x) is type(y) and x == y
@@ -6,12 +8,26 @@
def test_no_ensure_ascii():
assert is_(json.dumps(u"\u1234", ensure_ascii=False), u'"\u1234"')
assert is_(json.dumps("\xc0", ensure_ascii=False), '"\xc0"')
- e = py.test.raises(UnicodeDecodeError, json.dumps,
- (u"\u1234", "\xc0"), ensure_ascii=False)
- assert str(e.value).startswith("'ascii' codec can't decode byte 0xc0 ")
- e = py.test.raises(UnicodeDecodeError, json.dumps,
- ("\xc0", u"\u1234"), ensure_ascii=False)
- assert str(e.value).startswith("'ascii' codec can't decode byte 0xc0 ")
+ with pytest.raises(UnicodeDecodeError) as excinfo:
+ json.dumps((u"\u1234", "\xc0"), ensure_ascii=False)
+ assert str(excinfo.value).startswith(
+ "'ascii' codec can't decode byte 0xc0 ")
+ with pytest.raises(UnicodeDecodeError) as excinfo:
+ json.dumps(("\xc0", u"\u1234"), ensure_ascii=False)
+ assert str(excinfo.value).startswith(
+ "'ascii' codec can't decode byte 0xc0 ")
def test_issue2191():
assert is_(json.dumps(u"xxx", ensure_ascii=False), u'"xxx"')
+
+jsondata = strategies.recursive(
+ strategies.none() |
+ strategies.booleans() |
+ strategies.floats(allow_nan=False) |
+ strategies.text(),
+ lambda children: strategies.lists(children) |
+ strategies.dictionaries(strategies.text(), children))
+
+@given(jsondata)
+def test_roundtrip(d):
+ assert json.loads(json.dumps(d)) == d
diff --git a/extra_tests/test_textio.py b/extra_tests/test_textio.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_textio.py
@@ -0,0 +1,48 @@
+from hypothesis import given, strategies as st
+
+from io import BytesIO, TextIOWrapper
+import os
+
+def translate_newlines(text):
+ text = text.replace('\r\n', '\n')
+ text = text.replace('\r', '\n')
+ return text.replace('\n', os.linesep)
+
[email protected]
+def st_readline_universal(
+ draw, st_nlines=st.integers(min_value=0, max_value=10)):
+ n_lines = draw(st_nlines)
+ lines = draw(st.lists(
+ st.text(st.characters(blacklist_characters='\r\n')),
+ min_size=n_lines, max_size=n_lines))
+ limits = []
+ for line in lines:
+ limit = draw(st.integers(min_value=0, max_value=len(line) + 5))
+ limits.append(limit)
+ limits.append(-1)
+ endings = draw(st.lists(
+ st.sampled_from(['\n', '\r', '\r\n']),
+ min_size=n_lines, max_size=n_lines))
+ return (
+ ''.join(line + ending for line, ending in zip(lines, endings)),
+ limits)
+
+@given(data=st_readline_universal(),
+ mode=st.sampled_from(['\r', '\n', '\r\n', '', None]))
+def test_readline(data, mode):
+ txt, limits = data
+ textio = TextIOWrapper(
+ BytesIO(txt.encode('utf-8', 'surrogatepass')),
+ encoding='utf-8', errors='surrogatepass', newline=mode)
+ lines = []
+ for limit in limits:
+ line = textio.readline(limit)
+ if limit >= 0:
+ assert len(line) <= limit
+ if line:
+ lines.append(line)
+ elif limit:
+ break
+ if mode is None:
+ txt = translate_newlines(txt)
+ assert txt.startswith(u''.join(lines))
diff --git a/extra_tests/test_unicode.py b/extra_tests/test_unicode.py
--- a/extra_tests/test_unicode.py
+++ b/extra_tests/test_unicode.py
@@ -1,3 +1,4 @@
+import sys
import pytest
from hypothesis import strategies as st
from hypothesis import given, settings, example
@@ -32,3 +33,89 @@
@given(s=st.text())
def test_composition(s, norm1, norm2, norm3):
assert normalize(norm2, normalize(norm1, s)) == normalize(norm3, s)
+
+@given(st.text(), st.text(), st.text())
+def test_find(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.find(u) <= len(prefix)
+ assert s.find(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_index(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert 0 <= s.index(u) <= len(prefix)
+ assert s.index(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_rfind(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rfind(u) >= len(prefix)
+ assert s.rfind(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+@given(st.text(), st.text(), st.text())
+def test_rindex(u, prefix, suffix):
+ s = prefix + u + suffix
+ assert s.rindex(u) >= len(prefix)
+ assert s.rindex(u, len(prefix), len(s) - len(suffix)) == len(prefix)
+
+def adjust_indices(u, start, end):
+ if end < 0:
+ end = max(end + len(u), 0)
+ else:
+ end = min(end, len(u))
+ if start < 0:
+ start = max(start + len(u), 0)
+ return start, end
+
+@given(st.text(), st.text())
+def test_startswith_basic(u, v):
+ assert u.startswith(v) is (u[:len(v)] == v)
+
+@example(u'x', u'', 1)
+@example(u'x', u'', 2)
+@given(st.text(), st.text(), st.integers())
+def test_startswith_2(u, v, start):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:].startswith(v)
+ else: # CPython leaks implementation details in this case
+ expected = start <= len(u)
+ assert u.startswith(v, start) is expected
+
+@example(u'x', u'', 1, 0)
+@example(u'xx', u'', -1, 0)
+@given(st.text(), st.text(), st.integers(), st.integers())
+def test_startswith_3(u, v, start, end):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:end].startswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.startswith(v, start, end) is expected
+
+@given(st.text(), st.text())
+def test_endswith_basic(u, v):
+ if len(v) > len(u):
+ assert u.endswith(v) is False
+ else:
+ assert u.endswith(v) is (u[len(u) - len(v):] == v)
+
+@example(u'x', u'', 1)
+@example(u'x', u'', 2)
+@given(st.text(), st.text(), st.integers())
+def test_endswith_2(u, v, start):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:].endswith(v)
+ else: # CPython leaks implementation details in this case
+ expected = start <= len(u)
+ assert u.endswith(v, start) is expected
+
+@example(u'x', u'', 1, 0)
+@example(u'xx', u'', -1, 0)
+@given(st.text(), st.text(), st.integers(), st.integers())
+def test_endswith_3(u, v, start, end):
+ if v or sys.version_info[0] == 2:
+ expected = u[start:end].endswith(v)
+ else: # CPython leaks implementation details in this case
+ start0, end0 = adjust_indices(u, start, end)
+ expected = start0 <= len(u) and start0 <= end0
+ assert u.endswith(v, start, end) is expected
diff --git a/extra_tests/test_vmprof_greenlet.py
b/extra_tests/test_vmprof_greenlet.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_vmprof_greenlet.py
@@ -0,0 +1,28 @@
+import time
+import pytest
+import greenlet
+vmprof = pytest.importorskip('vmprof')
+
+def count_samples(filename):
+ stats = vmprof.read_profile(filename)
+ return len(stats.profiles)
+
+def cpuburn(duration):
+ end = time.time() + duration
+ while time.time() < end:
+ pass
+
+def test_sampling_inside_callback(tmpdir):
+ # see also test_sampling_inside_callback inside
+ # pypy/module/_continuation/test/test_stacklet.py
+ #
+ G = greenlet.greenlet(cpuburn)
+ fname = tmpdir.join('log.vmprof')
+ with fname.open('w+b') as f:
+ vmprof.enable(f.fileno(), 1/250.0)
+ G.switch(0.1)
+ vmprof.disable()
+
+ samples = count_samples(str(fname))
+ # 0.1 seconds at 250Hz should be 25 samples
+ assert 23 < samples < 27
diff --git a/lib-python/2.7/ctypes/__init__.py
b/lib-python/2.7/ctypes/__init__.py
--- a/lib-python/2.7/ctypes/__init__.py
+++ b/lib-python/2.7/ctypes/__init__.py
@@ -360,14 +360,15 @@
self._FuncPtr = _FuncPtr
if handle is None:
- if flags & _FUNCFLAG_CDECL:
- pypy_dll = _ffi.CDLL(name, mode)
- else:
- pypy_dll = _ffi.WinDLL(name, mode)
- self.__pypy_dll__ = pypy_dll
- handle = int(pypy_dll)
- if _sys.maxint > 2 ** 32:
- handle = int(handle) # long -> int
+ handle = 0
+ if flags & _FUNCFLAG_CDECL:
+ pypy_dll = _ffi.CDLL(name, mode, handle)
+ else:
+ pypy_dll = _ffi.WinDLL(name, mode, handle)
+ self.__pypy_dll__ = pypy_dll
+ handle = int(pypy_dll)
+ if _sys.maxint > 2 ** 32:
+ handle = int(handle) # long -> int
self._handle = handle
def __repr__(self):
diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py
--- a/lib-python/2.7/inspect.py
+++ b/lib-python/2.7/inspect.py
@@ -40,6 +40,10 @@
import linecache
from operator import attrgetter
from collections import namedtuple
+try:
+ from cpyext import is_cpyext_function as _is_cpyext_function
+except ImportError:
+ _is_cpyext_function = lambda obj: False
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
@@ -230,7 +234,7 @@
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
- return isinstance(object, types.BuiltinFunctionType)
+ return isinstance(object, types.BuiltinFunctionType) or
_is_cpyext_function(object)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -1296,7 +1296,7 @@
'copyfile' in caller.f_globals):
dest_dir = sys.pypy_resolvedirof(target_executable)
src_dir = sys.pypy_resolvedirof(sys.executable)
- for libname in ['libpypy-c.so', 'libpypy-c.dylib']:
+ for libname in ['libpypy-c.so', 'libpypy-c.dylib', 'libpypy-c.dll']:
dest_library = os.path.join(dest_dir, libname)
src_library = os.path.join(src_dir, libname)
if os.path.exists(src_library):
diff --git a/lib-python/2.7/test/test_urllib2net.py
b/lib-python/2.7/test/test_urllib2net.py
--- a/lib-python/2.7/test/test_urllib2net.py
+++ b/lib-python/2.7/test/test_urllib2net.py
@@ -286,7 +286,7 @@
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
u.close()
- FTP_HOST = 'ftp://ftp.debian.org/debian/'
+ FTP_HOST = 'ftp://www.pythontest.net/'
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py
--- a/lib-python/2.7/warnings.py
+++ b/lib-python/2.7/warnings.py
@@ -43,11 +43,12 @@
unicodetype = unicode
except NameError:
unicodetype = ()
+ template = "%s: %s: %s\n"
try:
message = str(message)
except UnicodeEncodeError:
- pass
- s = "%s: %s: %s\n" % (lineno, category.__name__, message)
+ template = unicode(template)
+ s = template % (lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -8,60 +8,64 @@
class ArrayMeta(_CDataMeta):
def __new__(self, name, cls, typedict):
res = type.__new__(self, name, cls, typedict)
- if '_type_' in typedict:
- ffiarray = _rawffi.Array(typedict['_type_']._ffishape_)
- res._ffiarray = ffiarray
- subletter = getattr(typedict['_type_'], '_type_', None)
- if subletter == 'c':
- def getvalue(self):
- return _rawffi.charp2string(self._buffer.buffer,
- self._length_)
- def setvalue(self, val):
- # we don't want to have buffers here
- if len(val) > self._length_:
- raise ValueError("%r too long" % (val,))
- if isinstance(val, str):
- _rawffi.rawstring2charp(self._buffer.buffer, val)
- else:
- for i in range(len(val)):
- self[i] = val[i]
- if len(val) < self._length_:
- self._buffer[len(val)] = '\x00'
- res.value = property(getvalue, setvalue)
- def getraw(self):
- return _rawffi.charp2rawstring(self._buffer.buffer,
- self._length_)
+ if cls == (_CData,): # this is the Array class defined below
+ res._ffiarray = None
+ return res
+ if not hasattr(res, '_length_') or not isinstance(res._length_,
+ (int, long)):
+ raise AttributeError(
+ "class must define a '_length_' attribute, "
+ "which must be a positive integer")
+ ffiarray = res._ffiarray = _rawffi.Array(res._type_._ffishape_)
+ subletter = getattr(res._type_, '_type_', None)
+ if subletter == 'c':
+ def getvalue(self):
+ return _rawffi.charp2string(self._buffer.buffer,
+ self._length_)
+ def setvalue(self, val):
+ # we don't want to have buffers here
+ if len(val) > self._length_:
+ raise ValueError("%r too long" % (val,))
+ if isinstance(val, str):
+ _rawffi.rawstring2charp(self._buffer.buffer, val)
+ else:
+ for i in range(len(val)):
+ self[i] = val[i]
+ if len(val) < self._length_:
+ self._buffer[len(val)] = b'\x00'
+ res.value = property(getvalue, setvalue)
- def setraw(self, buffer):
- if len(buffer) > self._length_:
- raise ValueError("%r too long" % (buffer,))
- _rawffi.rawstring2charp(self._buffer.buffer, buffer)
- res.raw = property(getraw, setraw)
- elif subletter == 'u':
- def getvalue(self):
- return _rawffi.wcharp2unicode(self._buffer.buffer,
- self._length_)
+ def getraw(self):
+ return _rawffi.charp2rawstring(self._buffer.buffer,
+ self._length_)
- def setvalue(self, val):
- # we don't want to have buffers here
- if len(val) > self._length_:
- raise ValueError("%r too long" % (val,))
- if isinstance(val, unicode):
- target = self._buffer
- else:
- target = self
- for i in range(len(val)):
- target[i] = val[i]
- if len(val) < self._length_:
- target[len(val)] = u'\x00'
- res.value = property(getvalue, setvalue)
-
- if '_length_' in typedict:
- res._ffishape_ = (ffiarray, typedict['_length_'])
- res._fficompositesize_ = res._sizeofinstances()
- else:
- res._ffiarray = None
+ def setraw(self, buffer):
+ if len(buffer) > self._length_:
+ raise ValueError("%r too long" % (buffer,))
+ _rawffi.rawstring2charp(self._buffer.buffer, buffer)
+ res.raw = property(getraw, setraw)
+ elif subletter == 'u':
+ def getvalue(self):
+ return _rawffi.wcharp2unicode(self._buffer.buffer,
+ self._length_)
+
+ def setvalue(self, val):
+ # we don't want to have buffers here
+ if len(val) > self._length_:
+ raise ValueError("%r too long" % (val,))
+ if isinstance(val, unicode):
+ target = self._buffer
+ else:
+ target = self
+ for i in range(len(val)):
+ target[i] = val[i]
+ if len(val) < self._length_:
+ target[len(val)] = u'\x00'
+ res.value = property(getvalue, setvalue)
+
+ res._ffishape_ = (ffiarray, res._length_)
+ res._fficompositesize_ = res._sizeofinstances()
return res
from_address = cdata_from_address
@@ -156,7 +160,7 @@
l = [self[i] for i in range(start, stop, step)]
letter = getattr(self._type_, '_type_', None)
if letter == 'c':
- return "".join(l)
+ return b"".join(l)
if letter == 'u':
return u"".join(l)
return l
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -176,6 +176,10 @@
def _get_buffer_value(self):
return self._buffer[0]
+ def _copy_to(self, addr):
+ target = type(self).from_address(addr)._buffer
+ target[0] = self._get_buffer_value()
+
def _to_ffi_param(self):
if self.__class__._is_pointer_like():
return self._get_buffer_value()
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -114,7 +114,9 @@
cobj = self._type_.from_param(value)
if ensure_objects(cobj) is not None:
store_reference(self, index, cobj._objects)
- self._subarray(index)[0] = cobj._get_buffer_value()
+ address = self._buffer[0]
+ address += index * sizeof(self._type_)
+ cobj._copy_to(address)
def __nonzero__(self):
return self._buffer[0] != 0
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -291,6 +291,11 @@
def _get_buffer_value(self):
return self._buffer.buffer
+ def _copy_to(self, addr):
+ from ctypes import memmove
+ origin = self._get_buffer_value()
+ memmove(addr, origin, self._fficompositesize_)
+
def _to_ffi_param(self):
return self._buffer
diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py
--- a/lib_pypy/_ctypes_test.py
+++ b/lib_pypy/_ctypes_test.py
@@ -21,5 +21,11 @@
with fp:
imp.load_module('_ctypes_test', fp, filename, description)
except ImportError:
+ if os.name == 'nt':
+ # hack around finding compilers on win32
+ try:
+ import setuptools
+ except ImportError:
+ pass
print('could not find _ctypes_test in %s' % output_dir)
_pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test',
output_dir)
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -1027,21 +1027,25 @@
if '\0' in sql:
raise ValueError("the query contains a null character")
- first_word = sql.lstrip().split(" ")[0].upper()
- if first_word == "":
+
+ if sql:
+ first_word = sql.lstrip().split()[0].upper()
+ if first_word == '':
+ self._type = _STMT_TYPE_INVALID
+ if first_word == "SELECT":
+ self._type = _STMT_TYPE_SELECT
+ elif first_word == "INSERT":
+ self._type = _STMT_TYPE_INSERT
+ elif first_word == "UPDATE":
+ self._type = _STMT_TYPE_UPDATE
+ elif first_word == "DELETE":
+ self._type = _STMT_TYPE_DELETE
+ elif first_word == "REPLACE":
+ self._type = _STMT_TYPE_REPLACE
+ else:
+ self._type = _STMT_TYPE_OTHER
+ else:
self._type = _STMT_TYPE_INVALID
- elif first_word == "SELECT":
- self._type = _STMT_TYPE_SELECT
- elif first_word == "INSERT":
- self._type = _STMT_TYPE_INSERT
- elif first_word == "UPDATE":
- self._type = _STMT_TYPE_UPDATE
- elif first_word == "DELETE":
- self._type = _STMT_TYPE_DELETE
- elif first_word == "REPLACE":
- self._type = _STMT_TYPE_REPLACE
- else:
- self._type = _STMT_TYPE_OTHER
if isinstance(sql, unicode):
sql = sql.encode('utf-8')
diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py
--- a/lib_pypy/_testcapi.py
+++ b/lib_pypy/_testcapi.py
@@ -16,4 +16,10 @@
with fp:
imp.load_module('_testcapi', fp, filename, description)
except ImportError:
+ if os.name == 'nt':
+ # hack around finding compilers on win32
+ try:
+ import setuptools
+ except ImportError:
+ pass
_pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir)
diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py
--- a/lib_pypy/_tkinter/app.py
+++ b/lib_pypy/_tkinter/app.py
@@ -119,7 +119,7 @@
tklib.TCL_GLOBAL_ONLY)
# This is used to get the application class for Tk 4.1 and up
- argv0 = className.lower()
+ argv0 = className.lower().encode('ascii')
tklib.Tcl_SetVar(self.interp, "argv0", argv0,
tklib.TCL_GLOBAL_ONLY)
@@ -180,6 +180,9 @@
if err == tklib.TCL_ERROR:
self.raiseTclError()
+ def interpaddr(self):
+ return int(tkffi.cast('size_t', self.interp))
+
def _var_invoke(self, func, *args, **kwargs):
if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread():
# The current thread is not the interpreter thread.
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.11.1
+Version: 1.11.3
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
-__version__ = "1.11.1"
-__version_info__ = (1, 11, 1)
+__version__ = "1.11.3"
+__version_info__ = (1, 11, 3)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -7,11 +7,38 @@
we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
the same works for the other two macros. Py_DEBUG implies them,
but not the other way around.
+
+ Issue #350: more mess: on Windows, with _MSC_VER, we have to define
+ Py_LIMITED_API even before including pyconfig.h. In that case, we
+ guess what pyconfig.h will do to the macros above, and check our
+ guess after the #include.
*/
#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
-# include <pyconfig.h>
-# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
-# define Py_LIMITED_API
+# ifdef _MSC_VER
+# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) &&
!defined(Py_REF_DEBUG)
+# define Py_LIMITED_API
+# endif
+# include <pyconfig.h>
+ /* sanity-check: Py_LIMITED_API will cause crashes if any of these
+ are also defined. Normally, the Python file PC/pyconfig.h does not
+ cause any of these to be defined, with the exception that _DEBUG
+ causes Py_DEBUG. Double-check that. */
+# ifdef Py_LIMITED_API
+# if defined(Py_DEBUG)
+# error "pyconfig.h unexpectedly defines Py_DEBUG but _DEBUG is not set"
+# endif
+# if defined(Py_TRACE_REFS)
+# error "pyconfig.h unexpectedly defines Py_TRACE_REFS"
+# endif
+# if defined(Py_REF_DEBUG)
+# error "pyconfig.h unexpectedly defines Py_REF_DEBUG"
+# endif
+# endif
+# else
+# include <pyconfig.h>
+# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
+# define Py_LIMITED_API
+# endif
# endif
#endif
@@ -238,9 +265,9 @@
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x)
{
if (sizeof(_cffi_wchar_t) == 2)
- return _cffi_from_c_wchar_t(x);
+ return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
- return _cffi_from_c_wchar3216_t(x);
+ return _cffi_from_c_wchar3216_t((int)x);
}
_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o)
@@ -254,7 +281,7 @@
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x)
{
if (sizeof(_cffi_wchar_t) == 4)
- return _cffi_from_c_wchar_t(x);
+ return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t(x);
}
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -247,7 +247,7 @@
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.11.1"
+ "\ncompiled with cffi version: 1.11.3"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -295,8 +295,9 @@
base_module_name = self.module_name.split('.')[-1]
if self.ffi._embedding is not None:
prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,))
- prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' %
- (self._string_literal(self.ffi._embedding),))
+ prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {')
+ self._print_string_literal_in_array(self.ffi._embedding)
+ prnt('0 };')
prnt('#ifdef PYPY_VERSION')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % (
base_module_name,))
@@ -1271,17 +1272,18 @@
_generate_cpy_extern_python_plus_c_ctx = \
_generate_cpy_extern_python_ctx
- def _string_literal(self, s):
- def _char_repr(c):
- # escape with a '\' the characters '\', '"' or (for trigraphs) '?'
- if c in '\\"?': return '\\' + c
- if ' ' <= c < '\x7F': return c
- if c == '\n': return '\\n'
- return '\\%03o' % ord(c)
- lines = []
- for line in s.splitlines(True) or ['']:
- lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
- return ' \\\n'.join(lines)
+ def _print_string_literal_in_array(self, s):
+ prnt = self._prnt
+ prnt('// # NB. this is not a string because of a size limit in MSVC')
+ for line in s.splitlines(True):
+ prnt(('// ' + line).rstrip())
+ printed_line = ''
+ for c in line:
+ if len(printed_line) >= 76:
+ prnt(printed_line)
+ printed_line = ''
+ printed_line += '%d,' % (ord(c),)
+ prnt(printed_line)
# ----------
# emitting the opcodes for individual types
diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py
--- a/lib_pypy/cffi/verifier.py
+++ b/lib_pypy/cffi/verifier.py
@@ -301,7 +301,6 @@
return suffixes
def _ensure_dir(filename):
- try:
- os.makedirs(os.path.dirname(filename))
- except OSError:
- pass
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.isdir(dirname):
+ os.makedirs(dirname)
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -20,6 +20,7 @@
or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
__metaclass__ = _structseq.structseqtype
+ name = "resource.struct_rusage"
ru_utime = _structseq.structseqfield(0, "user time used")
ru_stime = _structseq.structseqfield(1, "system time used")
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -119,7 +119,7 @@
To run untranslated tests, you need the Boehm garbage collector libgc.
-On recent Debian and Ubuntu (like 17.04), this is the command to install
+On recent Debian and Ubuntu (16.04 onwards), this is the command to install
all build-time dependencies::
apt-get install gcc make libffi-dev pkg-config zlib1g-dev libbz2-dev \
@@ -127,7 +127,7 @@
tk-dev libgc-dev python-cffi \
liblzma-dev libncursesw5-dev # these two only needed on PyPy3
-On older Debian and Ubuntu (12.04 to 16.04)::
+On older Debian and Ubuntu (12.04-14.04)::
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
@@ -149,12 +149,23 @@
xz-devel # For lzma on PyPy3.
(XXX plus the SLES11 version of libgdbm-dev and tk-dev)
-On Mac OS X, most of these build-time dependencies are installed alongside
+On Mac OS X:
+
+Most of these build-time dependencies are installed alongside
the Developer Tools. However, note that in order for the installation to
find them you may need to run::
xcode-select --install
+An exception is OpenSSL, which is no longer provided with the operating
+system. It can be obtained via Homebrew (with ``$ brew install openssl``),
+but it will not be available on the system path by default. The easiest
+way to enable it for building pypy is to set an environment variable::
+
+ export PKG_CONFIG_PATH=$(brew --prefix)/opt/openssl/lib/pkgconfig
+
+After setting this, translation (described next) will find the OpenSSL libs
+as expected.
Run the translation
-------------------
@@ -187,18 +198,18 @@
entire pypy interpreter. This step is currently singe threaded, and RAM
hungry. As part of this step, the chain creates a large number of C code
files and a Makefile to compile them in a
- directory controlled by the ``PYPY_USESSION_DIR`` environment variable.
+ directory controlled by the ``PYPY_USESSION_DIR`` environment variable.
2. Create an executable ``pypy-c`` by running the Makefile. This step can
- utilize all possible cores on the machine.
-3. Copy the needed binaries to the current directory.
-4. Generate c-extension modules for any cffi-based stdlib modules.
+ utilize all possible cores on the machine.
+3. Copy the needed binaries to the current directory.
+4. Generate c-extension modules for any cffi-based stdlib modules.
The resulting executable behaves mostly like a normal Python
interpreter (see :doc:`cpython_differences`), and is ready for testing, for
use as a base interpreter for a new virtualenv, or for packaging into a binary
suitable for installation on another machine running the same OS as the build
-machine.
+machine.
Note that step 4 is merely done as a convenience, any of the steps may be rerun
without rerunning the previous steps.
@@ -255,7 +266,7 @@
* PyPy 2.5.1 or earlier: normal users would see permission errors.
Installers need to run ``pypy -c "import gdbm"`` and other similar
- commands at install time; the exact list is in
+ commands at install time; the exact list is in
:source:`pypy/tool/release/package.py <package.py>`. Users
seeing a broken installation of PyPy can fix it after-the-fact if they
have sudo rights, by running once e.g. ``sudo pypy -c "import gdbm``.
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -59,7 +59,7 @@
# General information about the project.
project = u'PyPy'
-copyright = u'2017, The PyPy Project'
+copyright = u'2018, The PyPy Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -217,6 +217,7 @@
Alejandro J. Cura
Vladimir Kryachko
Gabriel
+ Thomas Hisch
Mark Williams
Kunal Grover
Nathan Taylor
@@ -306,8 +307,10 @@
Stanisław Halik
Julien Phalip
Roman Podoliaka
+ Steve Papanik
Eli Stevens
Boglarka Vezer
+ gabrielg
PavloKapyshin
Tomer Chachamu
Christopher Groskopf
@@ -330,11 +333,13 @@
Konrad Delong
Dinu Gherman
pizi
+ Tomáš Pružina
James Robert
Armin Ronacher
Diana Popa
Mads Kiilerich
Brett Cannon
+ Caleb Hattingh
aliceinwire
Zooko Wilcox-O Hearn
James Lan
@@ -355,4 +360,5 @@
Jason Madden
Yaroslav Fedevych
Even Wiik Thomassen
+ [email protected]
Stefan Marr
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -355,7 +355,11 @@
containers (as list items or in sets for example), the exact rule of
equality used is "``if x is y or x == y``" (on both CPython and PyPy);
as a consequence, because all ``nans`` are identical in PyPy, you
-cannot have several of them in a set, unlike in CPython. (Issue `#1974`__)
+cannot have several of them in a set, unlike in CPython. (Issue `#1974`__).
+Another consequence is that ``cmp(float('nan'), float('nan')) == 0``, because
+``cmp`` checks with ``is`` first whether the arguments are identical (there is
+no good value to return from this call to ``cmp``, because ``cmp`` pretends
+that there is a total order on floats, but that is wrong for NaNs).
.. __:
https://bitbucket.org/pypy/pypy/issue/1974/different-behaviour-for-collections-of
@@ -541,6 +545,15 @@
``del foo.bar`` where ``foo`` is a module (or class) that contains the
function ``bar``, is significantly slower than CPython.
+* Various built-in functions in CPython accept only positional arguments
+ and not keyword arguments. That can be considered a long-running
+ historical detail: newer functions tend to accept keyword arguments
+ and older function are occasionally fixed to do so as well. In PyPy,
+ most built-in functions accept keyword arguments (``help()`` shows the
+ argument names). But don't rely on it too much because future
+ versions of PyPy may have to rename the arguments if CPython starts
+ accepting them too.
+
.. _`is ignored in PyPy`: http://bugs.python.org/issue14621
.. _`little point`:
http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html
.. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -182,6 +182,57 @@
technical difficulties.
+What about numpy, numpypy, micronumpy?
+--------------------------------------
+
+Way back in 2011, the PyPy team `started to reimplement`_ numpy in PyPy. It
+has two pieces:
+
+ * the builtin module :source:`pypy/module/micronumpy`: this is written in
+ RPython and roughly covers the content of the ``numpy.core.multiarray``
+ module. Confusingly enough, this is available in PyPy under the name
+ ``_numpypy``. It is included by default in all the official releases of
+ PyPy (but it might be dropped in the future).
+
+ * a fork_ of the official numpy repository maintained by us and informally
+ called ``numpypy``: even more confusing, the name of the repo on bitbucket
+ is ``numpy``. The main difference with the upstream numpy, is that it is
+ based on the micronumpy module written in RPython, instead of of
+ ``numpy.core.multiarray`` which is written in C.
+
+Moreover, it is also possible to install the upstream version of ``numpy``:
+its core is written in C and it runs on PyPy under the cpyext compatibility
+layer. This is what you get if you do ``pypy -m pip install numpy``.
+
+
+Should I install numpy or numpypy?
+-----------------------------------
+
+TL;DR version: you should use numpy. You can install it by doing ``pypy -m pip
+install numpy``. You might also be interested in using the experimental `PyPy
+binary wheels`_ to save compilation time.
+
+The upstream ``numpy`` is written in C, and runs under the cpyext
+compatibility layer. Nowadays, cpyext is mature enough that you can simply
+use the upstream ``numpy``, since it passes 99.9% of the test suite. At the
+moment of writing (October 2017) the main drawback of ``numpy`` is that cpyext
+is infamously slow, and thus it has worse performance compared to
+``numpypy``. However, we are actively working on improving it, as we expect to
+reach the same speed, eventually.
+
+On the other hand, ``numpypy`` is more JIT-friendly and very fast to call,
+since it is written in RPython: but it is a reimplementation, and it's hard to
+be completely compatible: over the years the project slowly matured and
+eventually it was able to call out to the LAPACK and BLAS libraries to speed
+matrix calculations, and reached around an 80% parity with the upstream
+numpy. However, 80% is far from 100%. Since cpyext/numpy compatibility is
+progressing fast, we have discontinued support for ``numpypy``.
+
+.. _`started to reimplement`:
https://morepypy.blogspot.co.il/2011/05/numpy-in-pypy-status-and-roadmap.html
+.. _fork: https://bitbucket.org/pypy/numpy
+.. _`PyPy binary wheels`: https://github.com/antocuni/pypy-wheels
+
+
Is PyPy more clever than CPython about Tail Calls?
--------------------------------------------------
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -62,7 +62,7 @@
* go to pypy/tool/release and run
``force-builds.py <release branch>``
The following JIT binaries should be built, however, we need more buildbots
- windows, linux-32, linux-64, osx64, armhf-raring, armhf-raspberrian, armel,
+ windows, linux-32, linux-64, osx64, armhf-raspberrian, armel,
freebsd64
* wait for builds to complete, make sure there are no failures
diff --git a/pypy/doc/index-of-release-notes.rst
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,8 @@
.. toctree::
+ release-v5.10.1.rst
+ release-v5.10.0.rst
release-v5.9.0.rst
release-v5.8.0.rst
release-v5.7.1.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-pypy2-5.10.0.rst
whatsnew-pypy2-5.9.0.rst
whatsnew-pypy2-5.8.0.rst
whatsnew-pypy2-5.7.0.rst
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -240,9 +240,12 @@
**matplotlib** https://github.com/matplotlib/matplotlib
- TODO: the tkagg backend does not work, which makes tests fail on downstream
- projects like Pandas, SciPy. It uses id(obj) as a c-pointer to obj in
- tkagg.py, which requires refactoring
+ Status: using the matplotlib branch of PyPy and the tkagg-cffi branch of
+ matplotlib from https://github.com/mattip/matplotlib/tree/tkagg-cffi, the
+ tkagg backend can function.
+
+ TODO: the matplotlib branch passes numpy arrays by value (copying all the
+ data), this proof-of-concept needs help to become completely compliant
**wxPython** https://bitbucket.org/amauryfa/wxpython-cffi
diff --git a/pypy/doc/release-v5.10.0.rst b/pypy/doc/release-v5.10.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-v5.10.0.rst
@@ -0,0 +1,100 @@
+======================================
+PyPy2.7 and PyPy3.5 v5.10 dual release
+======================================
+
+The PyPy team is proud to release both PyPy2.7 v5.10 (an interpreter supporting
+Python 2.7 syntax), and a final PyPy3.5 v5.10 (an interpreter for Python
+3.5 syntax). The two releases are both based on much the same codebase, thus
+the dual release.
+
+This release is an incremental release with very few new features, the main
+feature being the final PyPy3.5 release that works on linux and OS X with beta
+windows support. It also includes fixes for `vmprof`_ cooperation with
greenlets.
+
+Compared to 5.9, the 5.10 release contains mostly bugfixes and small
improvements.
+We have in the pipeline big new features coming for PyPy 6.0 that did not make
+the release cut and should be available within the next couple months.
+
+As always, this release is 100% compatible with the previous one and fixed
+several issues and bugs raised by the growing community of PyPy users.
+As always, we strongly recommend updating.
+
+There are quite a few important changes that are in the pipeline that did not
+make it into the 5.10 release. Most important are speed improvements to cpyext
+(which will make numpy and pandas a bit faster) and utf8 branch that changes
+internal representation of unicode to utf8, which should help especially the
+Python 3.5 version of PyPy.
+
+This release concludes the Mozilla Open Source `grant`_ for having a compatible
+PyPy 3.5 release and we're very grateful for that. Of course, we will continue
+to improve PyPy 3.5 and probably move to 3.6 during the course of 2018.
+
+You can download the v5.10 releases here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+.. _vmprof: http://vmprof.readthedocs.io
+.. _grant:
https://morepypy.blogspot.com/2016/08/pypy-gets-funding-from-mozilla-for.html
+.. _`PyPy`: index.html
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7 and CPython 3.5. It's fast (`PyPy and CPython 2.7.x`_ performance
comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+The PyPy release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html
+
+Changelog
+=========
+
+* improve ssl handling on windows for pypy3 (makes pip work)
+* improve unicode handling in various error reporters
+* fix vmprof cooperation with greenlets
+* fix some things in cpyext
+* test and document the cmp(nan, nan) == 0 behaviour
+* don't crash when calling sleep with inf or nan
+* fix bugs in _io module
+* inspect.isbuiltin() now returns True for functions implemented in C
+* allow the sequences future-import, docstring, future-import for CPython bug
compatibility
+* Issue #2699: non-ascii messages in warnings
+* posix.lockf
+* fixes for FreeBSD platform
+* add .debug files, so builds contain debugging info, instead of being stripped
+* improvements to cppyy
+* issue #2677 copy pure c PyBuffer_{From,To}Contiguous from cpython
+* issue #2682, split firstword on any whitespace in sqlite3
+* ctypes: allow ptr[0] = foo when ptr is a pointer to struct
+* matplotlib will work with tkagg backend once `matplotlib pr #9356`_ is merged
+* improvements to utf32 surrogate handling
+* cffi version bump to 1.11.2
+
+.. _`matplotlib pr #9356`: https://github.com/matplotlib/matplotlib/pull/9356
diff --git a/pypy/doc/release-v5.10.1.rst b/pypy/doc/release-v5.10.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-v5.10.1.rst
@@ -0,0 +1,63 @@
+===========
+PyPy 5.10.1
+===========
+
+We have released a bugfix PyPy3.5-v5.10.1
+due to the following issues:
+
+ * Fix ``time.sleep(float('nan')`` which would hang on windows
+
+ * Fix missing ``errno`` constants on windows
+
+ * Fix issue 2718_ for the REPL on linux
+
+ * Fix an overflow in converting 3 secs to nanosecs (issue 2717_ )
+
+ * Flag kwarg to ``os.setxattr`` had no effect
+
+ * Fix the winreg module for unicode entries in the registry on windows
+
+Note that many of these fixes are for our new beta verison of PyPy3.5 on
+windows. There may be more unicode problems in the windows beta version
+especially around the subject of directory- and file-names with non-ascii
+characters.
+
+Our downloads are available now. On macos, we recommend you wait for the
+Homebrew_ package.
+
+Thanks to those who reported the issues.
+
+.. _2718: https://bitbucket.org/pypy/pypy/issues/2718
+.. _2717: https://bitbucket.org/pypy/pypy/issues/2717
+.. _Homebrew: http://brewformulas.org/Pypy
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7 and CPython 3.5. It's fast (`PyPy and CPython 2.7.x`_ performance
comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+This PyPy 3.5 release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
--- a/pypy/doc/tool/makecontributor.py
+++ b/pypy/doc/tool/makecontributor.py
@@ -81,6 +81,7 @@
'Yasir Suhail':['yasirs'],
'Squeaky': ['squeaky'],
"Amaury Forgeot d'Arc": ['[email protected]'],
+ "Dodan Mihai": ['[email protected]'],
}
alias_map = {}
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,15 +1,12 @@
-===========================
-What's new in PyPy2.7 5.10+
-===========================
-
-.. this is a revision shortly after release-pypy2.7-v5.9.0
-.. startrev:899e5245de1e
-
-.. branch: cpyext-jit
-
-Differentiate the code to call METH_NOARGS, METH_O and METH_VARARGS in cpyext:
-this allows to write specialized code which is much faster than previous
-completely generic version. Moreover, let the JIT to look inside the cpyext
-module: the net result is that cpyext calls are up to 7x faster. However, this
-is true only for very simple situations: in all real life code, we are still
-much slower than CPython (more optimizations to come)
+===========================
+What's new in PyPy2.7 5.10+
+===========================
+
+.. this is a revision shortly after release-pypy2.7-v5.10.0
+.. startrev: 6b024edd9d12
+
+.. branch: cpyext-avoid-roundtrip
+
+Big refactoring of some cpyext code, which avoids a lot of nonsense when
+calling C from Python and vice-versa: the result is a big speedup in
+function/method calls, up to 6 times faster.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-pypy2-5.10.0.rst
copy from pypy/doc/whatsnew-head.rst
copy to pypy/doc/whatsnew-pypy2-5.10.0.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-pypy2-5.10.0.rst
@@ -1,15 +1,42 @@
-===========================
-What's new in PyPy2.7 5.10+
-===========================
-
-.. this is a revision shortly after release-pypy2.7-v5.9.0
-.. startrev:899e5245de1e
-
-.. branch: cpyext-jit
-
-Differentiate the code to call METH_NOARGS, METH_O and METH_VARARGS in cpyext:
-this allows to write specialized code which is much faster than previous
-completely generic version. Moreover, let the JIT to look inside the cpyext
-module: the net result is that cpyext calls are up to 7x faster. However, this
-is true only for very simple situations: in all real life code, we are still
-much slower than CPython (more optimizations to come)
+==========================
+What's new in PyPy2.7 5.10
+==========================
+
+.. this is a revision shortly after release-pypy2.7-v5.9.0
+.. startrev:d56dadcef996
+
+
+.. branch: cppyy-packaging
+
+Cleanup and improve cppyy packaging
+
+.. branch: docs-osx-brew-openssl
+
+.. branch: keep-debug-symbols
+
+Add a smartstrip tool, which can optionally keep the debug symbols in a
+separate file, instead of just stripping them away. Use it in packaging
+
+.. branch: bsd-patches
+
+Fix failures on FreeBSD, contributed by David Naylor as patches on the issue
+tracker (issues 2694, 2695, 2696, 2697)
+
+.. branch: run-extra-tests
+
+Run extra_tests/ in buildbot
+
+.. branch: vmprof-0.4.10
+
+Upgrade the _vmprof backend to vmprof 0.4.10
+
+.. branch: fix-vmprof-stacklet-switch
+.. branch: fix-vmprof-stacklet-switch-2
+Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
+
+.. branch: win32-vcvars
+
+.. branch: rdict-fast-hash
+
+Make it possible to declare that the hash function of an r_dict is fast in
RPython.
+
diff --git a/pypy/doc/whatsnew-pypy2-5.6.0.rst
b/pypy/doc/whatsnew-pypy2-5.6.0.rst
--- a/pypy/doc/whatsnew-pypy2-5.6.0.rst
+++ b/pypy/doc/whatsnew-pypy2-5.6.0.rst
@@ -101,7 +101,7 @@
.. branch: newinitwarn
-Match CPython's stricter handling of __new/init__ arguments
+Match CPython's stricter handling of ``__new__``/``__init__`` arguments
.. branch: openssl-1.1
diff --git a/pypy/doc/whatsnew-pypy2-5.9.0.rst
b/pypy/doc/whatsnew-pypy2-5.9.0.rst
--- a/pypy/doc/whatsnew-pypy2-5.9.0.rst
+++ b/pypy/doc/whatsnew-pypy2-5.9.0.rst
@@ -85,3 +85,12 @@
.. branch: py_ssize_t
Explicitly use Py_ssize_t as the Signed type in pypy c-api
+
+.. branch: cpyext-jit
+
+Differentiate the code to call METH_NOARGS, METH_O and METH_VARARGS in cpyext:
+this allows to write specialized code which is much faster than previous
+completely generic version. Moreover, let the JIT to look inside the cpyext
+module: the net result is that cpyext calls are up to 7x faster. However, this
+is true only for very simple situations: in all real life code, we are still
+much slower than CPython (more optimizations to come)
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -11,7 +11,7 @@
To build pypy-c you need a working python environment, and a C compiler.
It is possible to translate with a CPython 2.6 or later, but this is not
-the preferred way, because it will take a lot longer to run � depending
+the preferred way, because it will take a lot longer to run – depending
on your architecture, between two and three times as long. So head to
`our downloads`_ and get the latest stable version.
@@ -25,8 +25,10 @@
This compiler, while the standard one for Python 2.7, is deprecated. Microsoft
has
made it available as the `Microsoft Visual C++ Compiler for Python 2.7`_ (the
link
-was checked in Nov 2016). Note that the compiler suite will be installed in
-``C:\Users\<user name>\AppData\Local\Programs\Common\Microsoft\Visual C++ for
Python``.
+was checked in Nov 2016). Note that the compiler suite may be installed in
+``C:\Users\<user name>\AppData\Local\Programs\Common\Microsoft\Visual C++ for
Python``
+or in
+``C:\Program Files (x86)\Common Files\Microsoft\Visual C++ for Python``.
A current version of ``setuptools`` will be able to find it there. For
Windows 10, you must right-click the download, and under ``Properties`` ->
``Compatibility`` mark it as ``Run run this program in comatibility mode for``
@@ -41,7 +43,6 @@
-----------------------------------
We routinely test translation using v9, also known as Visual Studio 2008.
-Our buildbot is still using the Express Edition, not the compiler noted above.
Other configurations may work as well.
The translation scripts will set up the appropriate environment variables
@@ -81,6 +82,31 @@
.. _build instructions: http://pypy.org/download.html#building-from-source
+Setting Up Visual Studio for building SSL in Python3
+----------------------------------------------------
+
+On Python3, the ``ssl`` module is based on ``cffi``, and requires a build step
after
+translation. However ``distutils`` does not support the Micorosft-provided
Visual C
+compiler, and ``cffi`` depends on ``distutils`` to find the compiler. The
+traditional solution to this problem is to install the ``setuptools`` module
+via running ``-m ensurepip`` which installs ``pip`` and ``setuptools``. However
+``pip`` requires ``ssl``. So we have a chicken-and-egg problem: ``ssl``
depends on
+``cffi`` which depends on ``setuptools``, which depends on ``ensurepip``, which
+depends on ``ssl``.
+
+In order to solve this, the buildbot sets an environment varaible that helps
+``distutils`` find the compiler without ``setuptools``::
+
+ set VS90COMNTOOLS=C:\Program Files (x86)\Common Files\Microsoft\Visual
C++ for Python\9.0\VC\bin
+
+or whatever is appropriate for your machine. Note that this is not enough, you
+must also copy the ``vcvarsall.bat`` file fron the ``...\9.0`` directory to the
+``...\9.0\VC`` directory, and edit it, changing the lines that set
+``VCINSTALLDIR`` and ``WindowsSdkDir``::
+
+ set VCINSTALLDIR=%~dp0\
+ set WindowsSdkDir=%~dp0\..\WinSDK\
+
Preparing Windows for the large build
-------------------------------------
diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py
--- a/pypy/goal/getnightly.py
+++ b/pypy/goal/getnightly.py
@@ -15,7 +15,7 @@
arch = 'linux'
cmd = 'wget "%s"'
TAR_OPTIONS += ' --wildcards'
- binfiles = "'*/bin/pypy' '*/bin/libpypy-c.so'"
+ binfiles = "'*/bin/pypy*' '*/bin/libpypy-c.so*'"
if os.uname()[-1].startswith('arm'):
arch += '-armhf-raspbian'
elif sys.platform.startswith('darwin'):
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py
b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -1246,3 +1246,7 @@
exc = py.test.raises(SyntaxError, self.get_ast, input).value
assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
" bytes in position 0-1: truncated \\xXX escape")
+ input = "u'\\x1'"
+ exc = py.test.raises(SyntaxError, self.get_ast, input).value
+ assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
+ " bytes in position 0-2: truncated \\xXX escape")
diff --git a/pypy/interpreter/pyparser/future.py
b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -85,13 +85,17 @@
# permissive parsing of the given list of tokens; it relies on
# the real parsing done afterwards to give errors.
it.skip_newlines()
- it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
- if it.skip(pygram.tokens.STRING):
- it.skip_newlines()
- while (it.skip_name("from") and
+ docstring_possible = True
+ while True:
+ it.skip_name("r") or it.skip_name("u") or it.skip_name("ru")
+ if docstring_possible and it.skip(pygram.tokens.STRING):
+ it.skip_newlines()
+ docstring_possible = False
+ if not (it.skip_name("from") and
it.skip_name("__future__") and
it.skip_name("import")):
+ break
it.skip(pygram.tokens.LPAR) # optionally
# return in 'last_position' any line-column pair that points
# somewhere inside the last __future__ import statement
diff --git a/pypy/interpreter/pyparser/test/test_future.py
b/pypy/interpreter/pyparser/test/test_future.py
--- a/pypy/interpreter/pyparser/test/test_future.py
+++ b/pypy/interpreter/pyparser/test/test_future.py
@@ -208,3 +208,13 @@
'from __future__ import with_statement;')
f = run(s, (2, 23))
assert f == fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_WITH_STATEMENT
+
+def test_future_doc_future():
+ # for some reason people do this :-[
+ s = '''
+from __future__ import generators
+"Docstring"
+from __future__ import division
+ '''
+ f = run(s, (4, 24))
+ assert f == fut.CO_FUTURE_DIVISION | fut.CO_GENERATOR_ALLOWED
diff --git a/pypy/interpreter/test/test_unicodehelper.py
b/pypy/interpreter/test/test_unicodehelper.py
--- a/pypy/interpreter/test/test_unicodehelper.py
+++ b/pypy/interpreter/test/test_unicodehelper.py
@@ -1,4 +1,7 @@
-from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8
+import pytest
+import struct
+from pypy.interpreter.unicodehelper import (
+ encode_utf8, decode_utf8, unicode_encode_utf_32_be)
class FakeSpace:
pass
@@ -24,3 +27,23 @@
assert map(ord, got) == [0xd800, 0xdc00]
got = decode_utf8(space, "\xf0\x90\x80\x80")
assert map(ord, got) == [0x10000]
+
[email protected]('unich', [u"\ud800", u"\udc80"])
+def test_utf32_surrogates(unich):
+ assert (unicode_encode_utf_32_be(unich, 1, None) ==
+ struct.pack('>i', ord(unich)))
+ with pytest.raises(UnicodeEncodeError):
+ unicode_encode_utf_32_be(unich, 1, None, allow_surrogates=False)
+
+ def replace_with(ru, rs):
+ def errorhandler(errors, enc, msg, u, startingpos, endingpos):
+ if errors == 'strict':
+ raise UnicodeEncodeError(enc, u, startingpos, endingpos, msg)
+ return ru, rs, endingpos
+ return unicode_encode_utf_32_be(
+ u"<%s>" % unich, 3, None,
+ errorhandler, allow_surrogates=False)
+
+ assert replace_with(u'rep', None) == u'<rep>'.encode('utf-32-be')
+ assert (replace_with(None, '\xca\xfe\xca\xfe') ==
+ '\x00\x00\x00<\xca\xfe\xca\xfe\x00\x00\x00>')
diff --git a/pypy/interpreter/unicodehelper.py
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -1,7 +1,11 @@
+from rpython.rlib.objectmodel import specialize
+from rpython.rlib.rarithmetic import intmask
+from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
+from rpython.rlib import runicode
+from rpython.rlib.runicode import (
+ default_unicode_error_encode, default_unicode_error_decode,
+ MAXUNICODE, BYTEORDER, BYTEORDER2, UNICHR)
from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import specialize
-from rpython.rlib import runicode
-from pypy.module._codecs import interp_codecs
@specialize.memo()
def decode_error_handler(space):
@@ -37,6 +41,7 @@
# These functions take and return unwrapped rpython strings and unicodes
def decode_unicode_escape(space, string):
+ from pypy.module._codecs import interp_codecs
state = space.fromcache(interp_codecs.CodecState)
unicodedata_handler = state.get_unicodedata_handler(space)
result, consumed = runicode.str_decode_unicode_escape(
@@ -71,3 +76,229 @@
uni, len(uni), "strict",
errorhandler=None,
allow_surrogates=True)
+
+# ____________________________________________________________
+# utf-32
+
+def str_decode_utf_32(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "native")
+ return result, length
+
+def str_decode_utf_32_be(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "big")
+ return result, length
+
+def str_decode_utf_32_le(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "little")
+ return result, length
+
+def py3k_str_decode_utf_32(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "native", 'utf-32-' + BYTEORDER2)
+ return result, length
+
+def py3k_str_decode_utf_32_be(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "big", 'utf-32-be')
+ return result, length
+
+def py3k_str_decode_utf_32_le(s, size, errors, final=True,
+ errorhandler=None):
+ result, length, byteorder = str_decode_utf_32_helper(
+ s, size, errors, final, errorhandler, "little", 'utf-32-le')
+ return result, length
+
+BOM32_DIRECT = intmask(0x0000FEFF)
+BOM32_REVERSE = intmask(0xFFFE0000)
+
+def str_decode_utf_32_helper(s, size, errors, final=True,
+ errorhandler=None,
+ byteorder="native",
+ public_encoding_name='utf32'):
+ if errorhandler is None:
+ errorhandler = default_unicode_error_decode
+ bo = 0
+
+ if BYTEORDER == 'little':
+ iorder = [0, 1, 2, 3]
+ else:
+ iorder = [3, 2, 1, 0]
+
+ # Check for BOM marks (U+FEFF) in the input and adjust current
+ # byte order setting accordingly. In native mode, the leading BOM
+ # mark is skipped, in all other modes, it is copied to the output
+ # stream as-is (giving a ZWNBSP character).
+ pos = 0
+ if byteorder == 'native':
+ if size >= 4:
+ bom = intmask(
+ (ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) |
+ (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]]))
+ if BYTEORDER == 'little':
+ if bom == BOM32_DIRECT:
+ pos += 4
+ bo = -1
+ elif bom == BOM32_REVERSE:
+ pos += 4
+ bo = 1
+ else:
+ if bom == BOM32_DIRECT:
+ pos += 4
+ bo = 1
+ elif bom == BOM32_REVERSE:
+ pos += 4
+ bo = -1
+ elif byteorder == 'little':
+ bo = -1
+ else:
+ bo = 1
+ if size == 0:
+ return u'', 0, bo
+ if bo == -1:
+ # force little endian
+ iorder = [0, 1, 2, 3]
+ elif bo == 1:
+ # force big endian
+ iorder = [3, 2, 1, 0]
+
+ result = UnicodeBuilder(size // 4)
+
+ while pos < size:
+ # remaining bytes at the end? (size should be divisible by 4)
+ if len(s) - pos < 4:
+ if not final:
+ break
+ r, pos = errorhandler(errors, public_encoding_name,
+ "truncated data",
+ s, pos, len(s))
+ result.append(r)
+ if len(s) - pos < 4:
+ break
+ continue
+ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) <<
16) |
+ (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]]))
+ if ch >= 0x110000:
+ r, pos = errorhandler(errors, public_encoding_name,
+ "codepoint not in range(0x110000)",
+ s, pos, len(s))
+ result.append(r)
+ continue
+
+ if MAXUNICODE < 65536 and ch >= 0x10000:
+ ch -= 0x10000L
+ result.append(unichr(0xD800 + (ch >> 10)))
+ result.append(unichr(0xDC00 + (ch & 0x03FF)))
+ else:
+ result.append(UNICHR(ch))
+ pos += 4
+ return result.build(), pos, bo
+
+def _STORECHAR32(result, CH, byteorder):
+ c0 = chr(((CH) >> 24) & 0xff)
+ c1 = chr(((CH) >> 16) & 0xff)
+ c2 = chr(((CH) >> 8) & 0xff)
+ c3 = chr((CH) & 0xff)
+ if byteorder == 'little':
+ result.append(c3)
+ result.append(c2)
+ result.append(c1)
+ result.append(c0)
+ else:
+ result.append(c0)
+ result.append(c1)
+ result.append(c2)
+ result.append(c3)
+
+def unicode_encode_utf_32_helper(s, size, errors,
+ errorhandler=None,
+ allow_surrogates=True,
+ byteorder='little',
+ public_encoding_name='utf32'):
+ if errorhandler is None:
+ errorhandler = default_unicode_error_encode
+ if size == 0:
+ if byteorder == 'native':
+ result = StringBuilder(4)
+ _STORECHAR32(result, 0xFEFF, BYTEORDER)
+ return result.build()
+ return ""
+
+ result = StringBuilder(size * 4 + 4)
+ if byteorder == 'native':
+ _STORECHAR32(result, 0xFEFF, BYTEORDER)
+ byteorder = BYTEORDER
+
+ pos = 0
+ while pos < size:
+ ch = ord(s[pos])
+ pos += 1
+ ch2 = 0
+ if not allow_surrogates and 0xD800 <= ch < 0xE000:
+ ru, rs, pos = errorhandler(
+ errors, public_encoding_name, 'surrogates not allowed',
+ s, pos - 1, pos)
+ if rs is not None:
+ # py3k only
+ if len(rs) % 4 != 0:
+ errorhandler(
+ 'strict', public_encoding_name, 'surrogates not
allowed',
+ s, pos - 1, pos)
+ result.append(rs)
+ continue
+ for ch in ru:
+ if ord(ch) < 0xD800:
+ _STORECHAR32(result, ord(ch), byteorder)
+ else:
+ errorhandler(
+ 'strict', public_encoding_name,
+ 'surrogates not allowed', s, pos - 1, pos)
+ continue
+ if 0xD800 <= ch < 0xDC00 and MAXUNICODE < 65536 and pos < size:
+ ch2 = ord(s[pos])
+ if 0xDC00 <= ch2 < 0xE000:
+ ch = (((ch & 0x3FF) << 10) | (ch2 & 0x3FF)) + 0x10000
+ pos += 1
+ _STORECHAR32(result, ch, byteorder)
+
+ return result.build()
+
+def unicode_encode_utf_32(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "native")
+
+def unicode_encode_utf_32_be(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "big")
+
+def unicode_encode_utf_32_le(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "little")
+
+def py3k_unicode_encode_utf_32(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "native",
+ 'utf-32-' + BYTEORDER2)
+
+def py3k_unicode_encode_utf_32_be(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "big",
+ 'utf-32-be')
+
+def py3k_unicode_encode_utf_32_le(s, size, errors,
+ errorhandler=None, allow_surrogates=True):
+ return unicode_encode_utf_32_helper(s, size, errors, errorhandler,
+ allow_surrogates, "little",
+ 'utf-32-le')
diff --git a/pypy/module/__builtin__/test/test_builtin.py
b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -404,6 +404,7 @@
def test_cmp(self):
+ assert cmp(float('nan'), float('nan')) == 0
assert cmp(9,9) == 0
assert cmp(0,9) < 0
assert cmp(9,0) > 0
diff --git a/pypy/module/_cffi_backend/__init__.py
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.11.1"
+VERSION = "1.11.3"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.11.1", ("This test_c.py file is for testing a version"
+assert __version__ == "1.11.3", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -8,7 +8,8 @@
@unwrap_spec(cdef='text', module_name='text', source='text', packed=int)
def prepare(space, cdef, module_name, source, w_includes=None,
- w_extra_source=None, w_min_version=None, packed=False):
+ w_extra_source=None, w_min_version=None, packed=False,
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit