Author: Amaury Forgeot d'Arc <[email protected]>
Branch: py3k
Changeset: r57530:87147c4713f0
Date: 2012-09-23 18:11 +0200
http://bitbucket.org/pypy/pypy/changeset/87147c4713f0/
Log: hg merge default
diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py
--- a/lib_pypy/_csv.py
+++ b/lib_pypy/_csv.py
@@ -363,9 +363,7 @@
(self.dialect.delimiter, self.dialect.quotechar))
elif self.state == self.EAT_CRNL:
- if c in '\r\n':
- pass
- else:
+ if c not in '\r\n':
raise Error("new-line character seen in unquoted field - "
"do you need to open the file "
"in universal-newline mode?")
diff --git a/pypy/interpreter/astcompiler/assemble.py
b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -65,24 +65,44 @@
self.marked = False
self.have_return = False
- def _post_order(self, blocks):
- if self.marked:
- return
- self.marked = True
- if self.next_block is not None:
- self.next_block._post_order(blocks)
- for instr in self.instructions:
- if instr.has_jump:
- instr.jump[0]._post_order(blocks)
- blocks.append(self)
- self.marked = True
+ def _post_order_see(self, stack, nextblock):
+ if nextblock.marked == 0:
+ nextblock.marked = 1
+ stack.append(nextblock)
def post_order(self):
- """Return this block and its children in post order."""
- blocks = []
- self._post_order(blocks)
- blocks.reverse()
- return blocks
+ """Return this block and its children in post order.
+ This means that the graph of blocks is first cleaned up to
+ ignore back-edges, thus turning it into a DAG. Then the DAG
+ is linearized. For example:
+
+ A --> B -\ => [A, D, B, C]
+ \-> D ---> C
+ """
+ resultblocks = []
+ stack = [self]
+ self.marked = 1
+ while stack:
+ current = stack[-1]
+ if current.marked == 1:
+ current.marked = 2
+ if current.next_block is not None:
+ self._post_order_see(stack, current.next_block)
+ else:
+ i = current.marked - 2
+ assert i >= 0
+ while i < len(current.instructions):
+ instr = current.instructions[i]
+ i += 1
+ if instr.has_jump:
+ current.marked = i + 2
+ self._post_order_see(stack, instr.jump[0])
+ break
+ else:
+ resultblocks.append(current)
+ stack.pop()
+ resultblocks.reverse()
+ return resultblocks
def code_size(self):
"""Return the encoded size of all the instructions in this block."""
@@ -354,20 +374,26 @@
def _stacksize(self, blocks):
"""Compute co_stacksize."""
for block in blocks:
- block.marked = False
- block.initial_depth = -1000
- return self._recursive_stack_depth_walk(blocks[0], 0, 0)
+ block.initial_depth = 0
+ # Assumes that it is sufficient to walk the blocks in 'post-order'.
+ # This means we ignore all back-edges, but apart from that, we only
+ # look into a block when all the previous blocks have been done.
+ self._max_depth = 0
+ for block in blocks:
+ self._do_stack_depth_walk(block)
+ return self._max_depth
- def _recursive_stack_depth_walk(self, block, depth, max_depth):
- if block.marked or block.initial_depth >= depth:
- return max_depth
- block.marked = True
- block.initial_depth = depth
+ def _next_stack_depth_walk(self, nextblock, depth):
+ if depth > nextblock.initial_depth:
+ nextblock.initial_depth = depth
+
+ def _do_stack_depth_walk(self, block):
+ depth = block.initial_depth
done = False
for instr in block.instructions:
depth += _opcode_stack_effect(instr.opcode, instr.arg)
- if depth >= max_depth:
- max_depth = depth
+ if depth >= self._max_depth:
+ self._max_depth = depth
if instr.has_jump:
target_depth = depth
jump_op = instr.opcode
@@ -377,20 +403,15 @@
jump_op == ops.SETUP_EXCEPT or
jump_op == ops.SETUP_WITH):
target_depth += 3
- if target_depth > max_depth:
- max_depth = target_depth
- max_depth = self._recursive_stack_depth_walk(instr.jump[0],
- target_depth,
- max_depth)
+ if target_depth > self._max_depth:
+ self._max_depth = target_depth
+ self._next_stack_depth_walk(instr.jump[0], target_depth)
if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
# Nothing more can occur.
done = True
break
if block.next_block and not done:
- max_depth = self._recursive_stack_depth_walk(block.next_block,
- depth, max_depth)
- block.marked = False
- return max_depth
+ max_depth = self._next_stack_depth_walk(block.next_block, depth)
def _build_lnotab(self, blocks):
"""Build the line number table for tracebacks and tracing."""
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -808,6 +808,10 @@
return y"""
yield self.st, test, "f()", 4
+ def test_lots_of_loops(self):
+ source = "for x in y: pass\n" * 1000
+ compile_with_astcompiler(source, 'exec', self.space)
+
def test_raise_from(self):
test = """if 1:
def f():
diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -106,7 +106,8 @@
def compile_loop(metainterp, greenkey, start,
inputargs, jumpargs,
- resume_at_jump_descr, full_preamble_needed=True):
+ resume_at_jump_descr, full_preamble_needed=True,
+ try_disabling_unroll=False):
"""Try to compile a new procedure by closing the current history back
to the first operation.
"""
@@ -116,6 +117,13 @@
jitdriver_sd = metainterp.jitdriver_sd
history = metainterp.history
+ enable_opts = jitdriver_sd.warmstate.enable_opts
+ if try_disabling_unroll:
+ if 'unroll' not in enable_opts:
+ return None
+ enable_opts = enable_opts.copy()
+ del enable_opts['unroll']
+
jitcell_token = make_jitcell_token(jitdriver_sd)
part = create_empty_loop(metainterp)
part.inputargs = inputargs[:]
@@ -126,7 +134,7 @@
[ResOperation(rop.LABEL, jumpargs, None,
descr=jitcell_token)]
try:
- optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts)
+ optimize_trace(metainterp_sd, part, enable_opts)
except InvalidLoop:
return None
target_token = part.operations[0].getdescr()
@@ -153,7 +161,7 @@
jumpargs = part.operations[-1].getarglist()
try:
- optimize_trace(metainterp_sd, part,
jitdriver_sd.warmstate.enable_opts)
+ optimize_trace(metainterp_sd, part, enable_opts)
except InvalidLoop:
return None
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -2039,8 +2039,9 @@
memmgr = self.staticdata.warmrunnerdesc.memory_manager
if memmgr:
if self.cancel_count > memmgr.max_unroll_loops:
- self.staticdata.log('cancelled too many times!')
- raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+ self.compile_loop_or_abort(original_boxes,
+ live_arg_boxes,
+ start, resumedescr)
self.staticdata.log('cancelled, tracing more...')
# Otherwise, no loop found so far, so continue tracing.
@@ -2140,7 +2141,8 @@
return None
return token
- def compile_loop(self, original_boxes, live_arg_boxes, start,
resume_at_jump_descr):
+ def compile_loop(self, original_boxes, live_arg_boxes, start,
+ resume_at_jump_descr, try_disabling_unroll=False):
num_green_args = self.jitdriver_sd.num_green_args
greenkey = original_boxes[:num_green_args]
if not self.partial_trace:
@@ -2156,7 +2158,8 @@
target_token = compile.compile_loop(self, greenkey, start,
original_boxes[num_green_args:],
live_arg_boxes[num_green_args:],
- resume_at_jump_descr)
+ resume_at_jump_descr,
+ try_disabling_unroll=try_disabling_unroll)
if target_token is not None:
assert isinstance(target_token, TargetToken)
self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey,
target_token.targeting_jitcell_token)
@@ -2168,6 +2171,18 @@
jitcell_token = target_token.targeting_jitcell_token
self.raise_continue_running_normally(live_arg_boxes, jitcell_token)
+ def compile_loop_or_abort(self, original_boxes, live_arg_boxes,
+ start, resume_at_jump_descr):
+ """Called after we aborted more than 'max_unroll_loops' times.
+ As a last attempt, try to compile the loop with unrolling disabled.
+ """
+ if not self.partial_trace:
+ self.compile_loop(original_boxes, live_arg_boxes, start,
+ resume_at_jump_descr, try_disabling_unroll=True)
+ #
+ self.staticdata.log('cancelled too many times!')
+ raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP)
+
def compile_trace(self, live_arg_boxes, resume_at_jump_descr):
num_green_args = self.jitdriver_sd.num_green_args
greenkey = live_arg_boxes[:num_green_args]
diff --git a/pypy/jit/metainterp/test/test_ajit.py
b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -2734,6 +2734,35 @@
finally:
optimizeopt.optimize_trace = old_optimize_trace
+ def test_max_unroll_loops_retry_without_unroll(self):
+ from pypy.jit.metainterp.optimize import InvalidLoop
+ from pypy.jit.metainterp import optimizeopt
+ myjitdriver = JitDriver(greens = [], reds = ['n', 'i'])
+ #
+ def f(n, limit):
+ set_param(myjitdriver, 'threshold', 5)
+ set_param(myjitdriver, 'max_unroll_loops', limit)
+ i = 0
+ while i < n:
+ myjitdriver.jit_merge_point(n=n, i=i)
+ print i
+ i += 1
+ return i
+ #
+ seen = []
+ def my_optimize_trace(metainterp_sd, loop, enable_opts, *args, **kwds):
+ seen.append('unroll' in enable_opts)
+ raise InvalidLoop
+ old_optimize_trace = optimizeopt.optimize_trace
+ optimizeopt.optimize_trace = my_optimize_trace
+ try:
+ res = self.meta_interp(f, [23, 4])
+ assert res == 23
+ assert False in seen
+ assert True in seen
+ finally:
+ optimizeopt.optimize_trace = old_optimize_trace
+
def test_retrace_limit_with_extra_guards(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a',
'node'])
diff --git a/pypy/module/_cffi_backend/ctypeptr.py
b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -70,7 +70,8 @@
for i in range(len(lst_w)):
ctitem.convert_from_object(cdata, lst_w[i])
cdata = rffi.ptradd(cdata, ctitem.size)
- elif isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveChar):
+ elif (self.ctitem.is_primitive_integer and
+ self.ctitem.size == rffi.sizeof(lltype.Char)):
try:
s = space.str_w(w_ob)
except OperationError, e:
diff --git a/pypy/module/_cffi_backend/libraryobj.py
b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -28,7 +28,7 @@
self.handle = dlopen(ll_libname, mode)
except DLOpenError, e:
raise operationerrfmt(space.w_OSError,
- "cannot load '%s': %s",
+ "cannot load library %s: %s",
filename, e.msg)
self.name = filename
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -2161,3 +2161,31 @@
#
d = rawaddressof(BCharP, s, 1)
assert d == cast(BCharP, p) + 1
+
+def test_newp_signed_unsigned_char():
+ BCharArray = new_array_type(
+ new_pointer_type(new_primitive_type("char")), None)
+ p = newp(BCharArray, b"foo")
+ assert len(p) == 4
+ assert list(p) == [b"f", b"o", b"o", b"\x00"]
+ #
+ BUCharArray = new_array_type(
+ new_pointer_type(new_primitive_type("unsigned char")), None)
+ p = newp(BUCharArray, b"fo\xff")
+ assert len(p) == 4
+ assert list(p) == [ord("f"), ord("o"), 0xff, 0]
+ #
+ BSCharArray = new_array_type(
+ new_pointer_type(new_primitive_type("signed char")), None)
+ p = newp(BSCharArray, b"fo\xff")
+ assert len(p) == 4
+ assert list(p) == [ord("f"), ord("o"), -1, 0]
+
+def test_newp_from_bytearray_doesnt_work():
+ BCharArray = new_array_type(
+ new_pointer_type(new_primitive_type("char")), None)
+ py.test.raises(TypeError, newp, BCharArray, bytearray(b"foo"))
+ p = newp(BCharArray, 4)
+ buffer(p)[:] = bytearray(b"foo\x00")
+ assert len(p) == 4
+ assert list(p) == [b"f", b"o", b"o", b"\x00"]
diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py
--- a/pypy/rlib/jit.py
+++ b/pypy/rlib/jit.py
@@ -426,7 +426,7 @@
'loop_longevity': 1000,
'retrace_limit': 5,
'max_retrace_guards': 15,
- 'max_unroll_loops': 4,
+ 'max_unroll_loops': 0,
'enable_opts': 'all',
}
unroll_parameters = unrolling_iterable(PARAMETERS.items())
diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py
--- a/pypy/rlib/runicode.py
+++ b/pypy/rlib/runicode.py
@@ -1210,74 +1210,82 @@
return builder.build(), pos
-def unicode_encode_unicode_escape(s, size, errors, errorhandler=None,
quotes=False):
- # errorhandler is not used: this function cannot cause Unicode errors
- result = StringBuilder(size)
+def make_unicode_escape_function():
+ # Python3 has two similar escape functions: One to implement
+ # encode('unicode_escape') and which outputs bytes, and unicode.__repr__
+ # which outputs unicode. They cannot share RPython code, so we generate
+ # them with the template below.
+ # Python2 does not really need this, but it reduces diffs between branches.
+ def unicode_escape(s, size, errors, errorhandler=None, quotes=False):
+ # errorhandler is not used: this function cannot cause Unicode errors
+ result = StringBuilder(size)
- if quotes:
- if s.find(u'\'') != -1 and s.find(u'\"') == -1:
- quote = ord('\"')
- result.append('"')
+ if quotes:
+ if s.find(u'\'') != -1 and s.find(u'\"') == -1:
+ quote = ord('\"')
+ result.append('"')
+ else:
+ quote = ord('\'')
+ result.append('\'')
else:
- quote = ord('\'')
- result.append('\'')
- else:
- quote = 0
+ quote = 0
- if size == 0:
- return ''
+ if size == 0:
+ return ''
- pos = 0
- while pos < size:
- ch = s[pos]
- oc = ord(ch)
+ pos = 0
+ while pos < size:
+ ch = s[pos]
+ oc = ord(ch)
- # Escape quotes
- if quotes and (oc == quote or ch == '\\'):
- result.append('\\')
- result.append(chr(oc))
- pos += 1
- continue
-
- # The following logic is enabled only if MAXUNICODE == 0xffff, or
- # for testing on top of a host CPython where sys.maxunicode == 0xffff
- if ((MAXUNICODE < 65536 or
- (not we_are_translated() and sys.maxunicode < 65536))
- and 0xD800 <= oc < 0xDC00 and pos + 1 < size):
- # Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes
- pos += 1
- oc2 = ord(s[pos])
-
- if 0xDC00 <= oc2 <= 0xDFFF:
- ucs = (((oc & 0x03FF) << 10) | (oc2 & 0x03FF)) + 0x00010000
- raw_unicode_escape_helper(result, ucs)
+ # Escape quotes
+ if quotes and (oc == quote or ch == '\\'):
+ result.append('\\')
+ result.append(chr(oc))
pos += 1
continue
- # Fall through: isolated surrogates are copied as-is
- pos -= 1
- # Map special whitespace to '\t', \n', '\r'
- if ch == '\t':
- result.append('\\t')
- elif ch == '\n':
- result.append('\\n')
- elif ch == '\r':
- result.append('\\r')
- elif ch == '\\':
- result.append('\\\\')
+ # The following logic is enabled only if MAXUNICODE == 0xffff, or
+ # for testing on top of a host Python where sys.maxunicode ==
0xffff
+ if ((MAXUNICODE < 65536 or
+ (not we_are_translated() and sys.maxunicode < 65536))
+ and 0xD800 <= oc < 0xDC00 and pos + 1 < size):
+ # Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes
+ pos += 1
+ oc2 = ord(s[pos])
- # Map non-printable or non-ascii to '\xhh' or '\uhhhh'
- elif oc < 32 or oc >= 0x7F:
- raw_unicode_escape_helper(result, oc)
+ if 0xDC00 <= oc2 <= 0xDFFF:
+ ucs = (((oc & 0x03FF) << 10) | (oc2 & 0x03FF)) + 0x00010000
+ raw_unicode_escape_helper(result, ucs)
+ pos += 1
+ continue
+ # Fall through: isolated surrogates are copied as-is
+ pos -= 1
- # Copy everything else as-is
- else:
- result.append(chr(oc))
- pos += 1
+ # Map special whitespace to '\t', \n', '\r'
+ if ch == '\t':
+ result.append('\\t')
+ elif ch == '\n':
+ result.append('\\n')
+ elif ch == '\r':
+ result.append('\\r')
+ elif ch == '\\':
+ result.append('\\\\')
- if quotes:
- result.append(chr(quote))
- return result.build()
+ # Map non-printable or non-ascii to '\xhh' or '\uhhhh'
+ elif oc < 32 or oc >= 0x7F:
+ raw_unicode_escape_helper(result, oc)
+
+ # Copy everything else as-is
+ else:
+ result.append(chr(oc))
+ pos += 1
+
+ if quotes:
+ result.append(chr(quote))
+ return result.build()
+ return unicode_escape
+unicode_encode_unicode_escape = make_unicode_escape_function()
# ____________________________________________________________
# Raw unicode escape
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit