Author: Maciej Fijalkowski <[email protected]>
Branch: optresult
Changeset: r74991:8e03e2425205
Date: 2014-12-17 19:32 +0200
http://bitbucket.org/pypy/pypy/changeset/8e03e2425205/

Log:    merge default

diff too long, truncating to 2000 out of 2146 lines

diff --git a/lib-python/2.7/test/test_collections.py 
b/lib-python/2.7/test/test_collections.py
--- a/lib-python/2.7/test/test_collections.py
+++ b/lib-python/2.7/test/test_collections.py
@@ -1108,6 +1108,16 @@
             od.popitem()
         self.assertEqual(len(od), 0)
 
+    def test_popitem_first(self):
+        pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
+        shuffle(pairs)
+        od = OrderedDict(pairs)
+        while pairs:
+            self.assertEqual(od.popitem(last=False), pairs.pop(0))
+        with self.assertRaises(KeyError):
+            od.popitem(last=False)
+        self.assertEqual(len(od), 0)
+
     def test_pop(self):
         pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
         shuffle(pairs)
@@ -1179,7 +1189,11 @@
         od = OrderedDict(pairs)
         # yaml.dump(od) -->
         # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n  - [b, 
2]\n'
-        self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
+
+        # PyPy bug fix: added [0] at the end of this line, because the
+        # test is really about the 2-tuples that need to be 2-lists
+        # inside the list of 6 of them
+        self.assertTrue(all(type(pair)==list for pair in 
od.__reduce__()[1][0]))
 
     def test_reduce_not_too_fat(self):
         # do not save instance dictionary if not needed
@@ -1189,6 +1203,16 @@
         od.x = 10
         self.assertEqual(len(od.__reduce__()), 3)
 
+    def test_reduce_exact_output(self):
+        # PyPy: test that __reduce__() produces the exact same answer as
+        # CPython does, even though in the 'all_ordered_dicts' branch we
+        # have to emulate it.
+        pairs = [['c', 1], ['b', 2], ['d', 4]]
+        od = OrderedDict(pairs)
+        self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,)))
+        od.x = 10
+        self.assertEqual(od.__reduce__(), (OrderedDict, (pairs,), {'x': 10}))
+
     def test_repr(self):
         od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), 
('f', 6)])
         self.assertEqual(repr(od),
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -7,7 +7,7 @@
 
 1. check out the branch vendor/stdlib
 2. upgrade the files there
-3. update stdlib-versions.txt with the output of hg -id from the cpython repo
+3. update stdlib-version.txt with the output of hg -id from the cpython repo
 4. commit
 5. update to default/py3k
 6. create a integration branch for the new stdlib
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -1175,11 +1175,9 @@
         try:
             return self.__description
         except AttributeError:
-            try:
+            if self.__statement:
                 self.__description = self.__statement._get_description()
                 return self.__description
-            except AttributeError:
-                return None
     description = property(__get_description)
 
     def __get_lastrowid(self):
diff --git a/pypy/module/test_lib_pypy/test_datetime.py 
b/pypy/module/test_lib_pypy/test_datetime.py
--- a/pypy/module/test_lib_pypy/test_datetime.py
+++ b/pypy/module/test_lib_pypy/test_datetime.py
@@ -3,6 +3,7 @@
 from __future__ import absolute_import
 import py
 
+
 class BaseTestDatetime:
     def test_repr(self):
         print datetime
@@ -210,11 +211,13 @@
             naive == aware
         assert str(e.value) == "can't compare offset-naive and offset-aware 
times"
 
-class TestDatetimeCPython(BaseTestDatetime):
+
+class TestDatetimeHost(BaseTestDatetime):
     def setup_class(cls):
         global datetime
         import datetime
 
+
 class TestDatetimePyPy(BaseTestDatetime):
     def setup_class(cls):
         global datetime
diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py 
b/pypy/module/test_lib_pypy/test_sqlite3.py
--- a/pypy/module/test_lib_pypy/test_sqlite3.py
+++ b/pypy/module/test_lib_pypy/test_sqlite3.py
@@ -1,267 +1,285 @@
 # -*- coding: utf-8 -*-
 """Tests for _sqlite3.py"""
 
-import pytest, sys
+from __future__ import absolute_import
+import pytest
+import sys
 
-if sys.version_info < (2, 7):
-    pytest.skip("_sqlite3 requires Python 2.7")
-try:
-    import _cffi_backend
-except ImportError:
-    # On CPython, "pip install cffi".  On old PyPy's, no chance
-    pytest.skip("_sqlite3 requires _cffi_backend to be installed")
-
-from lib_pypy import _sqlite3
 
 def pytest_funcarg__con(request):
     con = _sqlite3.connect(':memory:')
     request.addfinalizer(lambda: con.close())
     return con
 
-def test_list_ddl(con):
-    """From issue996.  Mostly just looking for lack of exceptions."""
-    cursor = con.cursor()
-    cursor.execute('CREATE TABLE foo (bar INTEGER)')
-    result = list(cursor)
-    assert result == []
-    cursor.execute('INSERT INTO foo (bar) VALUES (42)')
-    result = list(cursor)
-    assert result == []
-    cursor.execute('SELECT * FROM foo')
-    result = list(cursor)
-    assert result == [(42,)]
 
-def test_connect_takes_same_positional_args_as_Connection(con):
-    from inspect import getargspec
-    clsargs = getargspec(_sqlite3.Connection.__init__).args[1:]  # ignore self
-    conargs = getargspec(_sqlite3.connect).args
-    assert clsargs == conargs
+class BaseTestSQLite:
+    def test_list_ddl(self, con):
+        """From issue996.  Mostly just looking for lack of exceptions."""
+        cursor = con.cursor()
+        cursor.execute('CREATE TABLE foo (bar INTEGER)')
+        result = list(cursor)
+        assert result == []
+        cursor.execute('INSERT INTO foo (bar) VALUES (42)')
+        result = list(cursor)
+        assert result == []
+        cursor.execute('SELECT * FROM foo')
+        result = list(cursor)
+        assert result == [(42,)]
 
-def test_total_changes_after_close(con):
-    con.close()
-    pytest.raises(_sqlite3.ProgrammingError, "con.total_changes")
+    def test_connect_takes_same_positional_args_as_Connection(self, con):
+        if not hasattr(_sqlite3, '_ffi'):
+            pytest.skip("only works for lib_pypy _sqlite3")
+        from inspect import getargspec
+        clsargs = getargspec(_sqlite3.Connection.__init__).args[1:]  # ignore 
self
+        conargs = getargspec(_sqlite3.connect).args
+        assert clsargs == conargs
 
-def test_connection_check_init():
-    class Connection(_sqlite3.Connection):
-        def __init__(self, name):
+    def test_total_changes_after_close(self, con):
+        con.close()
+        pytest.raises(_sqlite3.ProgrammingError, "con.total_changes")
+
+    def test_connection_check_init(self):
+        class Connection(_sqlite3.Connection):
+            def __init__(self, name):
+                pass
+
+        con = Connection(":memory:")
+        e = pytest.raises(_sqlite3.ProgrammingError, "con.cursor()")
+        assert '__init__' in e.value.message
+
+    def test_cursor_check_init(self, con):
+        class Cursor(_sqlite3.Cursor):
+            def __init__(self, name):
+                pass
+
+        cur = Cursor(con)
+        e = pytest.raises(_sqlite3.ProgrammingError, "cur.execute('select 1')")
+        assert '__init__' in e.value.message
+
+    def test_connection_after_close(self, con):
+        pytest.raises(TypeError, "con()")
+        con.close()
+        # raises ProgrammingError because should check closed before check args
+        pytest.raises(_sqlite3.ProgrammingError, "con()")
+
+    def test_cursor_iter(self, con):
+        cur = con.cursor()
+        with pytest.raises(StopIteration):
+            next(cur)
+
+        cur.execute('select 1')
+        next(cur)
+        with pytest.raises(StopIteration):
+            next(cur)
+
+        cur.execute('select 1')
+        con.commit()
+        next(cur)
+        with pytest.raises(StopIteration):
+            next(cur)
+
+        with pytest.raises(_sqlite3.ProgrammingError):
+            cur.executemany('select 1', [])
+        with pytest.raises(StopIteration):
+            next(cur)
+
+        cur.execute('select 1')
+        cur.execute('create table test(ing)')
+        with pytest.raises(StopIteration):
+            next(cur)
+
+        cur.execute('select 1')
+        cur.execute('insert into test values(1)')
+        con.commit()
+        with pytest.raises(StopIteration):
+            next(cur)
+
+    def test_cursor_after_close(self, con):
+        cur = con.execute('select 1')
+        cur.close()
+        con.close()
+        pytest.raises(_sqlite3.ProgrammingError, "cur.close()")
+        # raises ProgrammingError because should check closed before check args
+        pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)")
+        pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)")
+
+    @pytest.mark.skipif("not hasattr(sys, 'pypy_translation_info')")
+    def test_connection_del(self, tmpdir):
+        """For issue1325."""
+        import os
+        import gc
+        try:
+            import resource
+        except ImportError:
+            pytest.skip("needs resource module")
+
+        limit = resource.getrlimit(resource.RLIMIT_NOFILE)
+        try:
+            fds = 0
+            while True:
+                fds += 1
+                resource.setrlimit(resource.RLIMIT_NOFILE, (fds, limit[1]))
+                try:
+                    for p in os.pipe(): os.close(p)
+                except OSError:
+                    assert fds < 100
+                else:
+                    break
+
+            def open_many(cleanup):
+                con = []
+                for i in range(3):
+                    con.append(_sqlite3.connect(str(tmpdir.join('test.db'))))
+                    if cleanup:
+                        con[i] = None
+                        gc.collect(); gc.collect()
+
+            pytest.raises(_sqlite3.OperationalError, open_many, False)
+            gc.collect(); gc.collect()
+            open_many(True)
+        finally:
+            resource.setrlimit(resource.RLIMIT_NOFILE, limit)
+
+    def test_on_conflict_rollback_executemany(self, con):
+        major, minor, micro = _sqlite3.sqlite_version.split('.')[:3]
+        if (int(major), int(minor), int(micro)) < (3, 2, 2):
+            pytest.skip("requires sqlite3 version >= 3.2.2")
+        con.execute("create table foo(x, unique(x) on conflict rollback)")
+        con.execute("insert into foo(x) values (1)")
+        try:
+            con.executemany("insert into foo(x) values (?)", [[1]])
+        except _sqlite3.DatabaseError:
             pass
+        con.execute("insert into foo(x) values (2)")
+        try:
+            con.commit()
+        except _sqlite3.OperationalError:
+            pytest.fail("_sqlite3 knew nothing about the implicit ROLLBACK")
 
-    con = Connection(":memory:")
-    e = pytest.raises(_sqlite3.ProgrammingError, "con.cursor()")
-    assert '__init__' in e.value.message
+    def test_statement_arg_checking(self, con):
+        with pytest.raises(_sqlite3.Warning) as e:
+            con(123)
+        assert str(e.value) == 'SQL is of wrong type. Must be string or 
unicode.'
+        with pytest.raises(ValueError) as e:
+            con.execute(123)
+        assert str(e.value) == 'operation parameter must be str or unicode'
+        with pytest.raises(ValueError) as e:
+            con.executemany(123, 123)
+        assert str(e.value) == 'operation parameter must be str or unicode'
+        with pytest.raises(ValueError) as e:
+            con.executescript(123)
+        assert str(e.value) == 'script argument must be unicode or string.'
 
-def test_cursor_check_init(con):
-    class Cursor(_sqlite3.Cursor):
-        def __init__(self, name):
-            pass
+    def test_statement_param_checking(self, con):
+        con.execute('create table foo(x)')
+        con.execute('insert into foo(x) values (?)', [2])
+        con.execute('insert into foo(x) values (?)', (2,))
+        class seq(object):
+            def __len__(self):
+                return 1
+            def __getitem__(self, key):
+                return 2
+        con.execute('insert into foo(x) values (?)', seq())
+        del seq.__len__
+        with pytest.raises(_sqlite3.ProgrammingError):
+            con.execute('insert into foo(x) values (?)', seq())
+        with pytest.raises(_sqlite3.ProgrammingError):
+            con.execute('insert into foo(x) values (?)', {2:2})
+        with pytest.raises(ValueError) as e:
+            con.execute('insert into foo(x) values (?)', 2)
+        assert str(e.value) == 'parameters are of unsupported type'
 
-    cur = Cursor(con)
-    e = pytest.raises(_sqlite3.ProgrammingError, "cur.execute('select 1')")
-    assert '__init__' in e.value.message
+    def test_explicit_begin(self, con):
+        con.execute('BEGIN')
+        con.execute('BEGIN ')
+        con.execute('BEGIN')
+        con.commit()
+        con.execute('BEGIN')
+        con.commit()
 
-def test_connection_after_close(con):
-    pytest.raises(TypeError, "con()")
-    con.close()
-    # raises ProgrammingError because should check closed before check args
-    pytest.raises(_sqlite3.ProgrammingError, "con()")
+    def test_row_factory_use(self, con):
+        con.row_factory = 42
+        con.execute('select 1')
 
-def test_cursor_iter(con):
-    cur = con.cursor()
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_returning_blob_must_own_memory(self, con):
+        import gc
+        con.create_function("returnblob", 0, lambda: buffer("blob"))
+        cur = con.execute("select returnblob()")
+        val = cur.fetchone()[0]
+        for i in range(5):
+            gc.collect()
+            got = (val[0], val[1], val[2], val[3])
+            assert got == ('b', 'l', 'o', 'b')
+        # in theory 'val' should be a read-write buffer
+        # but it's not right now
+        if not hasattr(_sqlite3, '_ffi'):
+            val[1] = 'X'
+            got = (val[0], val[1], val[2], val[3])
+            assert got == ('b', 'X', 'o', 'b')
 
-    cur.execute('select 1')
-    next(cur)
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_description_after_fetchall(self, con):
+        cur = con.cursor()
+        assert cur.description is None
+        cur.execute("select 42").fetchall()
+        assert cur.description is not None
 
-    cur.execute('select 1')
-    con.commit()
-    next(cur)
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_executemany_lastrowid(self, con):
+        cur = con.cursor()
+        cur.execute("create table test(a)")
+        cur.executemany("insert into test values (?)", [[1], [2], [3]])
+        assert cur.lastrowid is None
 
-    with pytest.raises(_sqlite3.ProgrammingError):
-        cur.executemany('select 1', [])
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_authorizer_bad_value(self, con):
+        def authorizer_cb(action, arg1, arg2, dbname, source):
+            return 42
+        con.set_authorizer(authorizer_cb)
+        with pytest.raises(_sqlite3.OperationalError) as e:
+            con.execute('select 123')
+        major, minor, micro = _sqlite3.sqlite_version.split('.')[:3]
+        if (int(major), int(minor), int(micro)) >= (3, 6, 14):
+            assert str(e.value) == 'authorizer malfunction'
+        else:
+            assert str(e.value) == \
+                ("illegal return value (1) from the authorization function - "
+                 "should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY")
 
-    cur.execute('select 1')
-    cur.execute('create table test(ing)')
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_issue1573(self, con):
+        cur = con.cursor()
+        cur.execute(u'SELECT 1 as m&#233;il')
+        assert cur.description[0][0] == u"m&#233;il".encode('utf-8')
 
-    cur.execute('select 1')
-    cur.execute('insert into test values(1)')
-    con.commit()
-    with pytest.raises(StopIteration):
-        next(cur)
+    def test_adapter_exception(self, con):
+        def cast(obj):
+            raise ZeroDivisionError
 
-def test_cursor_after_close(con):
-    cur = con.execute('select 1')
-    cur.close()
-    con.close()
-    pytest.raises(_sqlite3.ProgrammingError, "cur.close()")
-    # raises ProgrammingError because should check closed before check args
-    pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)")
-    pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)")
+        _sqlite3.register_adapter(int, cast)
+        try:
+            cur = con.cursor()
+            cur.execute("select ?", (4,))
+            val = cur.fetchone()[0]
+            # Adapter error is ignored, and parameter is passed as is.
+            assert val == 4
+            assert type(val) is int
+        finally:
+            del _sqlite3.adapters[(int, _sqlite3.PrepareProtocol)]
 
[email protected]("not hasattr(sys, 'pypy_translation_info')")
-def test_connection_del(tmpdir):
-    """For issue1325."""
-    import os
-    import gc
-    try:
-        import resource
-    except ImportError:
-        pytest.skip("needs resource module")
 
-    limit = resource.getrlimit(resource.RLIMIT_NOFILE)
-    try:
-        fds = 0
-        while True:
-            fds += 1
-            resource.setrlimit(resource.RLIMIT_NOFILE, (fds, limit[1]))
-            try:
-                for p in os.pipe(): os.close(p)
-            except OSError:
-                assert fds < 100
-            else:
-                break
-        def open_many(cleanup):
-            con = []
-            for i in range(3):
-                con.append(_sqlite3.connect(str(tmpdir.join('test.db'))))
-                if cleanup:
-                    con[i] = None
-                    gc.collect(); gc.collect()
+class TestSQLiteHost(BaseTestSQLite):
+    def setup_class(cls):
+        global _sqlite3
+        import _sqlite3
 
-        pytest.raises(_sqlite3.OperationalError, open_many, False)
-        gc.collect(); gc.collect()
-        open_many(True)
-    finally:
-        resource.setrlimit(resource.RLIMIT_NOFILE, limit)
 
-def test_on_conflict_rollback_executemany(con):
-    major, minor, micro = _sqlite3.sqlite_version.split('.')[:3]
-    if (int(major), int(minor), int(micro)) < (3, 2, 2):
-        pytest.skip("requires sqlite3 version >= 3.2.2")
-    con.execute("create table foo(x, unique(x) on conflict rollback)")
-    con.execute("insert into foo(x) values (1)")
-    try:
-        con.executemany("insert into foo(x) values (?)", [[1]])
-    except _sqlite3.DatabaseError:
-        pass
-    con.execute("insert into foo(x) values (2)")
-    try:
-        con.commit()
-    except _sqlite3.OperationalError:
-        pytest.fail("_sqlite3 knew nothing about the implicit ROLLBACK")
+class TestSQLitePyPy(BaseTestSQLite):
+    def setup_class(cls):
+        if sys.version_info < (2, 7):
+            pytest.skip("_sqlite3 requires Python 2.7")
 
-def test_statement_arg_checking(con):
-    with pytest.raises(_sqlite3.Warning) as e:
-        con(123)
-    assert str(e.value) == 'SQL is of wrong type. Must be string or unicode.'
-    with pytest.raises(ValueError) as e:
-        con.execute(123)
-    assert str(e.value) == 'operation parameter must be str or unicode'
-    with pytest.raises(ValueError) as e:
-        con.executemany(123, 123)
-    assert str(e.value) == 'operation parameter must be str or unicode'
-    with pytest.raises(ValueError) as e:
-        con.executescript(123)
-    assert str(e.value) == 'script argument must be unicode or string.'
+        try:
+            import _cffi_backend
+        except ImportError:
+            # On CPython, "pip install cffi".  On old PyPy's, no chance
+            pytest.skip("_sqlite3 requires _cffi_backend to be installed")
 
-def test_statement_param_checking(con):
-    con.execute('create table foo(x)')
-    con.execute('insert into foo(x) values (?)', [2])
-    con.execute('insert into foo(x) values (?)', (2,))
-    class seq(object):
-        def __len__(self):
-            return 1
-        def __getitem__(self, key):
-            return 2
-    con.execute('insert into foo(x) values (?)', seq())
-    del seq.__len__
-    with pytest.raises(_sqlite3.ProgrammingError):
-        con.execute('insert into foo(x) values (?)', seq())
-    with pytest.raises(_sqlite3.ProgrammingError):
-        con.execute('insert into foo(x) values (?)', {2:2})
-    with pytest.raises(ValueError) as e:
-        con.execute('insert into foo(x) values (?)', 2)
-    assert str(e.value) == 'parameters are of unsupported type'
-
-def test_explicit_begin(con):
-    con.execute('BEGIN')
-    con.execute('BEGIN ')
-    con.execute('BEGIN')
-    con.commit()
-    con.execute('BEGIN')
-    con.commit()
-
-def test_row_factory_use(con):
-    con.row_factory = 42
-    con.execute('select 1')
-
-def test_returning_blob_must_own_memory(con):
-    import gc
-    con.create_function("returnblob", 0, lambda: buffer("blob"))
-    cur = con.execute("select returnblob()")
-    val = cur.fetchone()[0]
-    for i in range(5):
-        gc.collect()
-        got = (val[0], val[1], val[2], val[3])
-        assert got == ('b', 'l', 'o', 'b')
-    # in theory 'val' should be a read-write buffer
-    # but it's not right now
-    pytest.skip("in theory 'val' should be a read-write buffer")
-    val[1] = 'X'
-    got = (val[0], val[1], val[2], val[3])
-    assert got == ('b', 'X', 'o', 'b')
-
-def test_description_after_fetchall(con):
-    cur = con.cursor()
-    cur.execute("select 42").fetchall()
-    assert cur.description is not None
-
-def test_executemany_lastrowid(con):
-    cur = con.cursor()
-    cur.execute("create table test(a)")
-    cur.executemany("insert into test values (?)", [[1], [2], [3]])
-    assert cur.lastrowid is None
-
-
-def test_authorizer_bad_value(con):
-    def authorizer_cb(action, arg1, arg2, dbname, source):
-        return 42
-    con.set_authorizer(authorizer_cb)
-    with pytest.raises(_sqlite3.OperationalError) as e:
-        con.execute('select 123')
-    major, minor, micro = _sqlite3.sqlite_version.split('.')[:3]
-    if (int(major), int(minor), int(micro)) >= (3, 6, 14):
-        assert str(e.value) == 'authorizer malfunction'
-    else:
-        assert str(e.value) == \
-            ("illegal return value (1) from the authorization function - "
-             "should be SQLITE_OK, SQLITE_IGNORE, or SQLITE_DENY")
-
-
-def test_issue1573(con):
-    cur = con.cursor()
-    cur.execute(u'SELECT 1 as m&#233;il')
-    assert cur.description[0][0] == u"m&#233;il".encode('utf-8')
-
-def test_adapter_exception(con):
-    def cast(obj):
-        raise ZeroDivisionError
-
-    _sqlite3.register_adapter(int, cast)
-    try:
-        cur = con.cursor()
-        cur.execute("select ?", (4,))
-        val = cur.fetchone()[0]
-        # Adapter error is ignored, and parameter is passed as is.
-        assert val == 4
-        assert type(val) is int
-    finally:
-        del _sqlite3.adapters[(int, _sqlite3.PrepareProtocol)]
+        global _sqlite3
+        from lib_pypy import _sqlite3
diff --git a/rpython/jit/metainterp/compile.py 
b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -142,7 +142,8 @@
     part.operations.append(ResOperation(rop.LABEL, jumpargs, 
descr=jitcell_token))
 
     try:
-        start_state = optimize_trace(metainterp_sd, part, enable_opts)
+        start_state = optimize_trace(metainterp_sd, part, enable_opts,
+                                     export_state=True)
     except InvalidLoop:
         return None
     target_token = part.operations[0].getdescr()
@@ -170,7 +171,7 @@
 
         try:
             optimize_trace(metainterp_sd, part, enable_opts,
-                           start_state=start_state)
+                           start_state=start_state, export_state=False)
         except InvalidLoop:
             return None
 
@@ -224,7 +225,7 @@
     assert label.getopnum() == rop.LABEL
     try:
         optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts,
-                       start_state=start_state)
+                       start_state=start_state, export_state=False)
     except InvalidLoop:
         # Fall back on jumping to preamble
         target_token = label.getdescr()
@@ -235,7 +236,8 @@
         try:
             optimize_trace(metainterp_sd, part,
                            jitdriver_sd.warmstate.enable_opts,
-                           inline_short_preamble=False, 
start_state=start_state)
+                           inline_short_preamble=False, 
start_state=start_state,
+                           export_state=False)
         except InvalidLoop:
             return None
     assert part.operations[-1].getopnum() != rop.LABEL
@@ -807,7 +809,7 @@
         inline_short_preamble = True
     try:
         state = optimize_trace(metainterp_sd, new_trace, state.enable_opts,
-                               inline_short_preamble)
+                               inline_short_preamble, export_state=True)
     except InvalidLoop:
         debug_print("compile_new_bridge: got an InvalidLoop")
         # XXX I am fairly convinced that optimize_bridge cannot actually raise
diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py 
b/rpython/jit/metainterp/optimizeopt/__init__.py
--- a/rpython/jit/metainterp/optimizeopt/__init__.py
+++ b/rpython/jit/metainterp/optimizeopt/__init__.py
@@ -48,7 +48,8 @@
     return optimizations, unroll
 
 def optimize_trace(metainterp_sd, loop, enable_opts,
-                   inline_short_preamble=True, start_state=None):
+                   inline_short_preamble=True, start_state=None,
+                   export_state=True):
     """Optimize loop.operations to remove internal overheadish operations.
     """
 
@@ -59,7 +60,8 @@
         optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts)
         if unroll:
             return optimize_unroll(metainterp_sd, loop, optimizations,
-                                   inline_short_preamble, start_state)
+                                   inline_short_preamble, start_state,
+                                   export_state)
         else:
             optimizer = Optimizer(metainterp_sd, loop, optimizations)
             optimizer.propagate_all_forward()
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py 
b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py
@@ -45,7 +45,8 @@
             part.operations = operations
 
             self.add_guard_future_condition(part)
-            state = self._do_optimize_loop(part, None, state)
+            state = self._do_optimize_loop(part, None, state,
+                                           export_state=True)
             if part.operations[-1].getopnum() == rop.LABEL:
                 last_label = [part.operations.pop()]
             else:
@@ -497,7 +498,8 @@
 
 class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel):
 
-    def _do_optimize_loop(self, loop, call_pure_results, state):
+    def _do_optimize_loop(self, loop, call_pure_results, state,
+                          export_state=False):
         from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll
         from rpython.jit.metainterp.optimizeopt.util import args_dict
         from rpython.jit.metainterp.optimizeopt.pure import OptPure
@@ -505,7 +507,7 @@
         self.loop = loop
         loop.call_pure_results = args_dict()
         metainterp_sd = FakeMetaInterpStaticData(self.cpu)
-        return optimize_unroll(metainterp_sd, loop, [OptRewrite(), 
OptRenameStrlen(), OptHeap(), OptPure()], True, state)
+        return optimize_unroll(metainterp_sd, loop, [OptRewrite(), 
OptRenameStrlen(), OptHeap(), OptPure()], True, state, export_state)
 
     def test_optimizer_renaming_boxes1(self):
         ops = """
@@ -543,8 +545,8 @@
         self.optimize_loop(ops, expected)
 
 
-class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin):
+class XxxTestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin):
     pass
 
-class TestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, 
LLtypeMixin):
+class XxxTestOptimizerRenamingBoxesLLtype(BaseTestOptimizerRenamingBoxes, 
LLtypeMixin):
     pass
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py 
b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -105,7 +105,7 @@
         if loop.operations[-1].getopnum() == rop.JUMP:
             loop.operations[-1].setdescr(token)
         expected = convert_old_style_to_targets(self.parse(optops), jump=True)
-        self._do_optimize_loop(loop, call_pure_results)
+        self._do_optimize_loop(loop, call_pure_results, export_state=False)
         #print '\n'.join([str(o) for o in loop.operations])
         self.assert_equal(loop, expected)
 
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py 
b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -2414,16 +2414,16 @@
         guard_true(i3) []
         i4 = int_neg(i2)
         setfield_gc(p1, i2, descr=valuedescr)
-        i7 = same_as_i(i2) # This same_as should be killed by backend
-        i6 = same_as_i(i4)
-        jump(p1, i1, i2, i4, i6)
-        """
-        expected = """
-        [p1, i1, i2, i4, i5]
+        #i7 = same_as(i2) # This same_as should be killed by backend
+        #i6 = same_as(i4)
+        jump(p1, i1, i2, i4)
+        """
+        expected = """
+        [p1, i1, i2, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         guard_true(i4) []
         setfield_gc(p1, i2, descr=valuedescr)
-        jump(p1, i1, i2, i5, i5)
+        jump(p1, i1, i2, 1)
         """
         self.optimize_loop(ops, expected, preamble)
 
@@ -2446,15 +2446,15 @@
         i4 = int_neg(i2)
         setfield_gc(p1, NULL, descr=nextdescr)
         escape_n()
-        i5 = same_as(i4)
-        jump(p1, i2, i4, i5)
-        """
-        expected = """
-        [p1, i2, i4, i5]
+        #i5 = same_as(i4)
+        jump(p1, i2, i4)
+        """
+        expected = """
+        [p1, i2, i4]
         guard_true(i4) [p1]
         setfield_gc(p1, NULL, descr=nextdescr)
         escape_n()
-        jump(p1, i2, i5, i5)
+        jump(p1, i2, 1)
         """
         self.optimize_loop(ops, expected, preamble)
 
@@ -2476,15 +2476,15 @@
         i4 = int_neg(i2)
         setfield_gc(p1, NULL, descr=nextdescr)
         escape_n()
-        i5 = same_as(i4)
-        jump(p1, i2, i4, i5)
-        """
-        expected = """
-        [p1, i2, i4, i5]
+        #i5 = same_as(i4)
+        jump(p1, i2, i4)
+        """
+        expected = """
+        [p1, i2, i4]
         guard_true(i4) [i2, p1]
         setfield_gc(p1, NULL, descr=nextdescr)
         escape_n()
-        jump(p1, i2, i5, i5)
+        jump(p1, i2, 1)
         """
         self.optimize_loop(ops, expected)
 
@@ -2507,17 +2507,17 @@
         guard_true(i5) []
         i4 = int_neg(i2)
         setfield_gc(p1, i2, descr=valuedescr)
-        i8 = same_as(i2) # This same_as should be killed by backend
-        i7 = same_as(i4)
-        jump(p1, i1, i2, i4, i7)
-        """
-        expected = """
-        [p1, i1, i2, i4, i7]
+        #i8 = same_as(i2) # This same_as should be killed by backend
+        #i7 = same_as(i4)
+        jump(p1, i1, i2, i4)
+        """
+        expected = """
+        [p1, i1, i2, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         i5 = int_eq(i4, 5)
         guard_true(i5) []
         setfield_gc(p1, i2, descr=valuedescr)
-        jump(p1, i1, i2, i7, i7)
+        jump(p1, i1, i2, 5)
         """
         self.optimize_loop(ops, expected, preamble)
 
@@ -2735,16 +2735,16 @@
         p2 = new_with_vtable(ConstClass(node_vtable))
         setfield_gc(p2, p4, descr=nextdescr)
         setfield_gc(p1, p2, descr=nextdescr)
-        i101 = same_as(i4)
-        jump(p1, i2, i4, p4, i101)
-        """
-        expected = """
-        [p1, i2, i4, p4, i5]
+        #i101 = same_as(i4)
+        jump(p1, i2, i4, p4)
+        """
+        expected = """
+        [p1, i2, i4, p4]
         guard_true(i4) [p1, p4]
         p2 = new_with_vtable(ConstClass(node_vtable))
         setfield_gc(p2, p4, descr=nextdescr)
         setfield_gc(p1, p2, descr=nextdescr)
-        jump(p1, i2, i5, p4, i5)
+        jump(p1, i2, 1, p4)
         """
         self.optimize_loop(ops, expected, preamble)
 
@@ -3620,7 +3620,7 @@
         setfield_gc(p1, i1, descr=valuedescr)
         i3 = call_assembler(i1, descr=asmdescr)
         setfield_gc(p1, i3, descr=valuedescr)
-        i143 = same_as(i3) # Should be killed by backend
+        #i143 = same_as(i3) # Should be killed by backend
         jump(p1, i4, i3)
         '''
         self.optimize_loop(ops, ops, preamble)
@@ -3645,18 +3645,18 @@
         jump(p1, i4, i3)
         '''
         expected = '''
-        [p1, i4, i3, i5]
-        setfield_gc(p1, i5, descr=valuedescr)
-        jump(p1, i3, i5, i5)
+        [p1, i4, i3]
+        setfield_gc(p1, i3, descr=valuedescr)
+        jump(p1, i3, i3)
         '''
         preamble = '''
         [p1, i1, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         i3 = call(p1, descr=plaincalldescr)
         setfield_gc(p1, i3, descr=valuedescr)
-        i148 = same_as(i3)
-        i147 = same_as(i3)
-        jump(p1, i4, i3, i148)
+        #i148 = same_as(i3)
+        #i147 = same_as(i3)
+        jump(p1, i4, i3)
         '''
         self.optimize_loop(ops, expected, preamble)
 
@@ -3670,17 +3670,17 @@
         jump(p1, i4, i3)
         '''
         expected = '''
-        [p1, i4, i3, i5]
+        [p1, i4, i3]
         setfield_gc(p1, i4, descr=valuedescr)
-        jump(p1, i3, i5, i5)
+        jump(p1, i3, i3)
         '''
         preamble = '''
         [p1, i1, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         i3 = call(p1, descr=plaincalldescr)
         setfield_gc(p1, i1, descr=valuedescr)
-        i151 = same_as(i3)
-        jump(p1, i4, i3, i151)
+        #i151 = same_as(i3)
+        jump(p1, i4, i3)
         '''
         self.optimize_loop(ops, expected, preamble)
 
@@ -3700,14 +3700,14 @@
         escape_n(i1)
         escape_n(i2)
         i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
-        i153 = same_as(i4)
-        jump(i0, i4, i153)
+        #i153 = same_as(i4)
+        jump(i0, i4)
         '''
         expected = '''
-        [i0, i4, i5]
+        [i0, i4]
         escape_n(42)
         escape_n(i4)
-        jump(i0, i5, i5)
+        jump(i0, i4)
         '''
         self.optimize_loop(ops, expected, preamble, call_pure_results)
 
@@ -3731,14 +3731,14 @@
         escape_n(i2)
         i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
         guard_no_exception() []
-        i155 = same_as(i4)
-        jump(i0, i4, i155)
+        #i155 = same_as(i4)
+        jump(i0, i4)
         '''
         expected = '''
-        [i0, i2, i3]
+        [i0, i2]
         escape_n(42)
         escape_n(i2)
-        jump(i0, i3, i3)
+        jump(i0, i2)
         '''
         self.optimize_loop(ops, expected, preamble, call_pure_results)
 
@@ -5864,11 +5864,12 @@
         [p0]
         p1 = getfield_gc(p0, descr=valuedescr)
         setfield_gc(p0, p0, descr=valuedescr)
-        p4450 = same_as(p0) # Should be killed by backend
+        #p4450 = same_as(p0) # Should be killed by backend
         jump(p0)
         """
         expected = """
         [p0]
+        p1 = getfield_gc(p0, descr=valuedescr)
         setfield_gc(p0, p0, descr=valuedescr)
         jump(p0)
         """
@@ -6264,13 +6265,13 @@
         [p1, i1, i2, i3]
         escape(i3)
         i4 = int_sub(i2, i1)
-        i5 = same_as(i4)
-        jump(p1, i1, i2, i4, i5)
-        """
-        expected = """
-        [p1, i1, i2, i3, i4]
+        #i5 = same_as(i4)
+        jump(p1, i1, i2, i4)
+        """
+        expected = """
+        [p1, i1, i2, i3]
         escape(i3)
-        jump(p1, i1, i2, i4, i4)
+        jump(p1, i1, i2, i3)
         """
         self.optimize_strunicode_loop(ops, expected, preamble)
 
@@ -6290,14 +6291,15 @@
         escape(i5)
         i4 = int_sub(i2, i1)
         setfield_gc(p2, i4, descr=valuedescr)
-        i8 = same_as(i4)
-        jump(p1, i1, i2, p2, i8, i4)
-        """
-        expected = """
-        [p1, i1, i2, p2, i5, i6]
-        escape(i5)
-        setfield_gc(p2, i6, descr=valuedescr)
-        jump(p1, i1, i2, p2, i6, i6)
+        #i8 = same_as(i4)
+        jump(p1, i1, i2, p2, i4)
+        """
+        expected = """
+        [p1, i1, i2, p2, i5]
+        i311 = getfield_gc(p2, descr=valuedescr)
+        escape(i311)
+        setfield_gc(p2, i5, descr=valuedescr)
+        jump(p1, i1, i2, p2, i5)
         """
         self.optimize_strunicode_loop(ops, expected, preamble)
 
@@ -7104,19 +7106,18 @@
         p188 = getarrayitem_gc(p187, 42, descr=<GcPtrArrayDescr>)
         guard_value(p188, ConstPtr(myptr)) []
         p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr)
-        p26 = same_as(p25)
-        jump(p25, p187, i184, p26)
+        jump(p25, p187, i184)
         """
         short = """
         [p1, p187, i184]
         p188 = getarrayitem_gc(p187, 42, descr=<GcPtrArrayDescr>)
         guard_value(p188, ConstPtr(myptr)) []
         p25 = getfield_gc(ConstPtr(myptr), descr=otherdescr)
-        jump(p1, p187, i184, p25)
-        """
-        expected = """
-        [p25, p187, i184, p189]
-        jump(p189, p187, i184, p189)
+        jump(p25, p187, i184)
+        """
+        expected = """
+        [p25, p187, i184]
+        jump(p25, p187, i184)
         """
         self.optimize_loop(ops, expected, preamble, expected_short=short)
 
@@ -7225,14 +7226,16 @@
         i3 = int_add(i1, i2)
         setfield_gc(p0, ii, descr=valuedescr)
         setfield_gc(p1, ii, descr=otherdescr)
-        jump(p0, p1, ii2, ii, ii, ii)
-        """
-        expected = """
-        [p0, p1, ii, ii2, i1, i2]
+        jump(p0, p1, ii2, ii)
+        """
+        expected = """
+        [p0, p1, ii, ii2]
+        i1 = getfield_gc(p0, descr=valuedescr)
+        i2 = getfield_gc(p1, descr=otherdescr)
         i3 = int_add(i1, i2)
         setfield_gc(p0, ii, descr=valuedescr)
         setfield_gc(p1, ii, descr=otherdescr)
-        jump(p0, p1, ii2, ii, ii, ii)
+        jump(p0, p1, ii2, ii)
         """
         self.optimize_loop(ops, expected)
 
@@ -7723,10 +7726,10 @@
         jump(p1)
         """
         expected = """
-        [p0, p1]
+        [p0]
         call(p0, descr=nonwritedescr)
-        call(p1, descr=writeadescr)
-        jump(p1, p1)
+        call(p0, descr=writeadescr)
+        jump(p0)
         """
         self.optimize_loop(ops, expected)
 
@@ -7837,10 +7840,11 @@
         jump(p5, p6)
         """
         expected = """
-        [p5, p6, i10, i11]
+        [p5, p6, i10]
+        i11 = getfield_gc(p6, descr=nextdescr)
         call(i10, i11, descr=nonwritedescr)
         setfield_gc(p6, i10, descr=nextdescr)
-        jump(p5, p6, i10, i10)
+        jump(p5, p6, i10)
         """
         self.optimize_loop(ops, expected)
 
@@ -7856,11 +7860,12 @@
         jump(p5, p6)
         """
         expected = """
-        [p5, p6, i14, i12, i10]
-        i13 = int_add(i14, 7)
+        [p5, p6, i12, i10]
+        i11 = getfield_gc(p6, descr=nextdescr)
+        i13 = int_add(i11, 7)
         call(i12, i13, descr=nonwritedescr)
         setfield_gc(p6, i10, descr=nextdescr)
-        jump(p5, p6, i10, i12, i10)
+        jump(p5, p6, i12, i10)
         """
         self.optimize_loop(ops, expected)
 
@@ -7980,9 +7985,9 @@
         jump(i1, i3)
         """
         expected = """
-        [i1, i2, i6, i3]
+        [i1, i2, i6]
         call(i6, descr=nonwritedescr)
-        jump(i1, i3, i6, i3)
+        jump(i1, i2, i6)
         """
         short = """
         [i1, i2]
@@ -7990,7 +7995,7 @@
         i4 = int_add(i3, i3)
         i5 = int_add(i4, i4)
         i6 = int_add(i5, i5)
-        jump(i1, i2, i6, i3)
+        jump(i1, i3, i6)
         """
         self.optimize_loop(ops, expected, expected_short=short)
 
@@ -8145,19 +8150,18 @@
         call(i2, descr=nonwritedescr)
         setfield_gc(p22, i1, descr=valuedescr)
         guard_nonnull_class(p18, ConstClass(node_vtable)) []
-        i10 = same_as(i1)
-        jump(p22, p18, i1, i10)
+        jump(p22, p18, i1)
         """
         short = """
         [p22, p18, i1]
-        i2 = getfield_gc(p22, descr=valuedescr)
-        jump(p22, p18, i1, i2)
-        """
-        expected = """
-        [p22, p18, i1, i2]
-        call(i2, descr=nonwritedescr)
-        setfield_gc(p22, i1, descr=valuedescr)
-        jump(p22, p18, i1, i1)
+        jump(p22, p18, i1)
+        """
+        expected = """
+        [p22, p18, i2]
+        i1 = getfield_gc(p22, descr=valuedescr)
+        call(i1, descr=nonwritedescr)
+        setfield_gc(p22, i2, descr=valuedescr)
+        jump(p22, p18, i2)
         """
         self.optimize_loop(ops, expected, preamble, expected_short=short)
 
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py 
b/rpython/jit/metainterp/optimizeopt/test/test_util.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_util.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py
@@ -390,7 +390,8 @@
         assert equaloplists(optimized.operations,
                             expected.operations, False, remap, text_right)
 
-    def _do_optimize_loop(self, loop, call_pure_results, start_state=None):
+    def _do_optimize_loop(self, loop, call_pure_results, start_state=None,
+                          export_state=False):
         from rpython.jit.metainterp.optimizeopt import optimize_trace
         from rpython.jit.metainterp.optimizeopt.util import args_dict
 
@@ -406,7 +407,8 @@
             metainterp_sd.callinfocollection = self.callinfocollection
         #
         return optimize_trace(metainterp_sd, loop, self.enable_opts,
-                              start_state=start_state)
+                              start_state=start_state,
+                              export_state=export_state)
 
     def unroll_and_optimize(self, loop, call_pure_results=None):
         metainterp_sd = FakeMetaInterpStaticData(self.cpu)
@@ -431,9 +433,8 @@
         preamble.operations = [ResOperation(rop.LABEL, inputargs, 
descr=TargetToken(token))] + \
                               operations +  \
                               [ResOperation(rop.LABEL, jump_args, descr=token)]
-        start_state = self._do_optimize_loop(preamble, call_pure_results)
-        import pdb
-        pdb.set_trace()
+        start_state = self._do_optimize_loop(preamble, call_pure_results,
+                                             export_state=True)
 
         assert preamble.operations[-1].getopnum() == rop.LABEL
 
@@ -447,7 +448,8 @@
         assert loop.operations[0].getopnum() == rop.LABEL
         loop.inputargs = loop.operations[0].getarglist()
 
-        self._do_optimize_loop(loop, call_pure_results, start_state)
+        self._do_optimize_loop(loop, call_pure_results, start_state,
+                               export_state=False)
         extra_same_as = []
         while loop.operations[0].getopnum() != rop.LABEL:
             extra_same_as.append(loop.operations[0])
diff --git 
a/rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py 
b/rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
rename from 
rpython/jit/metainterp/optimizeopt/test/test_disable_optimizations.py
rename to rpython/jit/metainterp/optimizeopt/test/test_zdisable_opts.py
diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py 
b/rpython/jit/metainterp/optimizeopt/unroll.py
--- a/rpython/jit/metainterp/optimizeopt/unroll.py
+++ b/rpython/jit/metainterp/optimizeopt/unroll.py
@@ -17,10 +17,11 @@
 # FIXME: Introduce some VirtualOptimizer super class instead
 
 def optimize_unroll(metainterp_sd, loop, optimizations,
-                    inline_short_preamble=True, start_state=None):
+                    inline_short_preamble=True, start_state=None,
+                    export_state=True):
     opt = UnrollOptimizer(metainterp_sd, loop, optimizations)
     opt.inline_short_preamble = inline_short_preamble
-    return opt.propagate_all_forward(start_state)
+    return opt.propagate_all_forward(start_state, export_state)
 
 
 class UnrollableOptimizer(Optimizer):
@@ -71,7 +72,7 @@
         prev = self.fix_snapshot(jump_args, snapshot.prev)
         return Snapshot(prev, new_snapshot_args)
 
-    def propagate_all_forward(self, starting_state):
+    def propagate_all_forward(self, starting_state, export_state=True):
         loop = self.optimizer.loop
         self.optimizer.clear_newoperations()
 
@@ -146,11 +147,14 @@
             self.close_bridge(start_label)
 
         self.optimizer.flush()
-        KillHugeIntBounds(self.optimizer).apply()
+        if export_state:
+            KillHugeIntBounds(self.optimizer).apply()
 
         loop.operations = self.optimizer.get_newoperations()
-        final_state = self.export_state(stop_label)
-        final_state.dump(self.optimizer.metainterp_sd)
+        if export_state:
+            final_state = self.export_state(stop_label)
+        else:
+            final_state = None
         loop.operations.append(stop_label)
         return final_state
 
@@ -179,8 +183,7 @@
             for box in self.inputargs:
                 self.boxes_created_this_iteration[box] = None
 
-        short_boxes = ShortBoxes(self.optimizer, inputargs,
-                                 self.boxes_created_this_iteration)
+        short_boxes = ShortBoxes(self.optimizer, inputargs)
 
         self.optimizer.clear_newoperations()
         for i in range(len(original_jump_args)):
@@ -317,7 +320,9 @@
         jumpop.initarglist(jumpargs)
 
         # Inline the short preamble at the end of the loop
-        jmp_to_short_args = virtual_state.make_inputargs(values, 
self.optimizer, keyboxes=True)
+        jmp_to_short_args = virtual_state.make_inputargs(values,
+                                                         self.optimizer,
+                                                         keyboxes=True)
         assert len(short_inputargs) == len(jmp_to_short_args)
         args = {}
         for i in range(len(short_inputargs)):
diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py 
b/rpython/jit/metainterp/optimizeopt/virtualstate.py
--- a/rpython/jit/metainterp/optimizeopt/virtualstate.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py
@@ -587,12 +587,11 @@
 
 
 class ShortBoxes(object):
-    def __init__(self, optimizer, surviving_boxes, available_boxes=None):
+    def __init__(self, optimizer, surviving_boxes):
         self.potential_ops = {}
         self.alternatives = {}
         self.synthetic = {}
         self.optimizer = optimizer
-        self.available_boxes = available_boxes
         self.assumed_classes = {}
 
         assert surviving_boxes is not None
@@ -679,8 +678,6 @@
             return
         if op in self.short_boxes_in_production:
             raise BoxNotProducable
-        if self.available_boxes is not None and op not in self.available_boxes:
-            raise BoxNotProducable
         self.short_boxes_in_production[op] = None
 
         if op in self.potential_ops:
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -330,6 +330,20 @@
     keepalive_until_here(newp)
     return newp
 
[email protected]_look_inside
[email protected]()
+def ll_arrayclear(p):
+    # Equivalent to memset(array, 0).  Only for GcArray(primitive-type) for 
now.
+    from rpython.rlib.objectmodel import keepalive_until_here
+
+    length = len(p)
+    ARRAY = lltype.typeOf(p).TO
+    offset = llmemory.itemoffsetof(ARRAY, 0)
+    dest_addr = llmemory.cast_ptr_to_adr(p) + offset
+    llmemory.raw_memclear(dest_addr, llmemory.sizeof(ARRAY.OF) * length)
+    keepalive_until_here(p)
+
+
 def no_release_gil(func):
     func._dont_inline_ = True
     func._no_release_gil_ = True
diff --git a/rpython/rlib/test/test_rgc.py b/rpython/rlib/test/test_rgc.py
--- a/rpython/rlib/test/test_rgc.py
+++ b/rpython/rlib/test/test_rgc.py
@@ -158,6 +158,16 @@
     assert a2[2].x == 3
     assert a2[2].y == 15
 
+def test_ll_arrayclear():
+    TYPE = lltype.GcArray(lltype.Signed)
+    a1 = lltype.malloc(TYPE, 10)
+    for i in range(10):
+        a1[i] = 100 + i
+    rgc.ll_arrayclear(a1)
+    assert len(a1) == 10
+    for i in range(10):
+        assert a1[i] == 0
+
 def test__contains_gcptr():
     assert not rgc._contains_gcptr(lltype.Signed)
     assert not rgc._contains_gcptr(
diff --git a/rpython/rtyper/lltypesystem/rdict.py 
b/rpython/rtyper/lltypesystem/rdict.py
--- a/rpython/rtyper/lltypesystem/rdict.py
+++ b/rpython/rtyper/lltypesystem/rdict.py
@@ -669,6 +669,7 @@
     d.num_items = 0
     d.resize_counter = DICT_INITSIZE * 2
     return d
+DictRepr.ll_newdict = staticmethod(ll_newdict)
 
 def ll_newdict_size(DICT, length_estimate):
     length_estimate = (length_estimate // 2) * 3
@@ -692,26 +693,6 @@
     pass
 
 
-def rtype_r_dict(hop, i_force_non_null=None):
-    r_dict = hop.r_result
-    if not r_dict.custom_eq_hash:
-        raise TyperError("r_dict() call does not return an r_dict instance")
-    v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
-    v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
-    if i_force_non_null is not None:
-        assert i_force_non_null == 2
-        hop.inputarg(lltype.Void, arg=2)
-    cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
-    hop.exception_cannot_occur()
-    v_result = hop.gendirectcall(ll_newdict, cDICT)
-    if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
-        cname = hop.inputconst(lltype.Void, 'fnkeyeq')
-        hop.genop('setfield', [v_result, cname, v_eqfn])
-    if r_dict.r_rdict_hashfn.lowleveltype != lltype.Void:
-        cname = hop.inputconst(lltype.Void, 'fnkeyhash')
-        hop.genop('setfield', [v_result, cname, v_hashfn])
-    return v_result
-
 # ____________________________________________________________
 #
 #  Iteration.
diff --git a/rpython/rtyper/lltypesystem/rordereddict.py 
b/rpython/rtyper/lltypesystem/rordereddict.py
--- a/rpython/rtyper/lltypesystem/rordereddict.py
+++ b/rpython/rtyper/lltypesystem/rordereddict.py
@@ -28,8 +28,8 @@
 #    }
 #
 #    struct dicttable {
-#        int num_items;
-#        int num_used_items;
+#        int num_live_items;
+#        int num_ever_used_items;
 #        int resize_counter;
 #        {byte, short, int, long} *indexes;
 #        dictentry *entries;
@@ -72,6 +72,8 @@
         'must_clear_value': (isinstance(DICTVALUE, lltype.Ptr)
                              and DICTVALUE._needsgc()),
         }
+    if getattr(ll_eq_function, 'no_direct_compare', False):
+        entrymeths['no_direct_compare'] = True
 
     # * the key
     entryfields.append(("key", DICTKEY))
@@ -113,8 +115,8 @@
     DICTENTRY = lltype.Struct("odictentry", *entryfields)
     DICTENTRYARRAY = lltype.GcArray(DICTENTRY,
                                     adtmeths=entrymeths)
-    fields =          [ ("num_items", lltype.Signed),
-                        ("num_used_items", lltype.Signed),
+    fields =          [ ("num_live_items", lltype.Signed),
+                        ("num_ever_used_items", lltype.Signed),
                         ("resize_counter", lltype.Signed),
                         ("indexes", llmemory.GCREF),
                         ("lookup_function_no", lltype.Signed),
@@ -156,7 +158,7 @@
 
     def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
                  custom_eq_hash=None, force_non_null=False):
-        assert not force_non_null
+        #assert not force_non_null
         self.rtyper = rtyper
         self.finalized = False
         self.DICT = lltype.GcForwardReference()
@@ -217,7 +219,7 @@
         #dictobj = getattr(dictobj, '__self__', dictobj)
         if dictobj is None:
             return lltype.nullptr(self.DICT)
-        if not isinstance(dictobj, (dict, objectmodel.r_ordereddict)):
+        if not isinstance(dictobj, (dict, objectmodel.r_dict)):
             raise TypeError("expected a dict: %r" % (dictobj,))
         try:
             key = Constant(dictobj)
@@ -231,8 +233,8 @@
             if r_key.lowleveltype == llmemory.Address:
                 raise TypeError("No prebuilt dicts of address keys")
             r_value = self.value_repr
-            if isinstance(dictobj, objectmodel.r_ordereddict):
-                
+            if isinstance(dictobj, objectmodel.r_dict):
+
                 if self.r_rdict_eqfn.lowleveltype != lltype.Void:
                     l_fn = self.r_rdict_eqfn.convert_const(dictobj.key_eq)
                     l_dict.fnkeyeq = l_fn
@@ -416,6 +418,7 @@
 TYPE_LONG  = lltype.Unsigned
 
 def ll_malloc_indexes_and_choose_lookup(d, n):
+    # keep in sync with ll_clear_indexes() below
     if n <= 256:
         d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF,
                                            lltype.malloc(DICTINDEX_BYTE.TO, n,
@@ -437,6 +440,16 @@
                                                          zero=True))
         d.lookup_function_no = FUNC_LONG
 
+def ll_clear_indexes(d, n):
+    if n <= 256:
+        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes))
+    elif n <= 65536:
+        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes))
+    elif IS_64BIT and n <= 2 ** 32:
+        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes))
+    else:
+        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes))
+
 def ll_call_insert_clean_function(d, hash, i):
     DICT = lltype.typeOf(d).TO
     if d.lookup_function_no == FUNC_BYTE:
@@ -492,11 +505,11 @@
     return objectmodel.hlinvoke(DICT.r_rdict_eqfn, d.fnkeyeq, key1, key2)
 
 def ll_dict_len(d):
-    return d.num_items
+    return d.num_live_items
 
 def ll_dict_bool(d):
     # check if a dict is True, allowing for None
-    return bool(d) and d.num_items != 0
+    return bool(d) and d.num_live_items != 0
 
 def ll_dict_getitem(d, key):
     index = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP)
@@ -519,43 +532,64 @@
         entry = d.entries[i]
         entry.value = value
     else:
-        if len(d.entries) == d.num_used_items:
-            if ll_dict_grow(d):
-                ll_call_insert_clean_function(d, hash, d.num_used_items)
-        entry = d.entries[d.num_used_items]
+        reindexed = False
+        if len(d.entries) == d.num_ever_used_items:
+            try:
+                reindexed = ll_dict_grow(d)
+            except:
+                _ll_dict_rescue(d)
+                raise
+        rc = d.resize_counter - 3
+        if rc <= 0:
+            try:
+                ll_dict_resize(d)
+                reindexed = True
+            except:
+                _ll_dict_rescue(d)
+                raise
+            rc = d.resize_counter - 3
+            ll_assert(rc > 0, "ll_dict_resize failed?")
+        if reindexed:
+            ll_call_insert_clean_function(d, hash, d.num_ever_used_items)
+        #
+        d.resize_counter = rc
+        entry = d.entries[d.num_ever_used_items]
         entry.key = key
         entry.value = value
         if hasattr(ENTRY, 'f_hash'):
             entry.f_hash = hash
         if hasattr(ENTRY, 'f_valid'):
             entry.f_valid = True
-        d.num_used_items += 1
-        d.num_items += 1
-        rc = d.resize_counter - 3
-        if rc <= 0:
-            ll_dict_resize(d)
-            rc = d.resize_counter - 3
-            ll_assert(rc > 0, "ll_dict_resize failed?")
-        d.resize_counter = rc
+        d.num_ever_used_items += 1
+        d.num_live_items += 1
+
[email protected]_look_inside
+def _ll_dict_rescue(d):
+    # MemoryError situation!  The 'indexes' contains an invalid entry
+    # at this point.  But we can call ll_dict_reindex() with the
+    # following arguments, ensuring no further malloc occurs.
+    ll_dict_reindex(d, _ll_len_of_d_indexes(d))
+_ll_dict_rescue._dont_inline_ = True
 
 def _ll_dict_insertclean(d, key, value, hash):
     ENTRY = lltype.typeOf(d.entries).TO.OF
-    ll_call_insert_clean_function(d, hash, d.num_used_items)
-    entry = d.entries[d.num_used_items]
+    ll_call_insert_clean_function(d, hash, d.num_ever_used_items)
+    entry = d.entries[d.num_ever_used_items]
     entry.key = key
     entry.value = value
     if hasattr(ENTRY, 'f_hash'):
         entry.f_hash = hash
     if hasattr(ENTRY, 'f_valid'):
         entry.f_valid = True
-    d.num_used_items += 1
-    d.num_items += 1
+    d.num_ever_used_items += 1
+    d.num_live_items += 1
     rc = d.resize_counter - 3
     d.resize_counter = rc
 
 def _ll_len_of_d_indexes(d):
     # xxx Haaaack: returns len(d.indexes).  Works independently of
     # the exact type pointed to by d, using a forced cast...
+    # Must only be called by @jit.dont_look_inside functions.
     return len(rffi.cast(DICTINDEX_BYTE, d.indexes))
 
 def _overallocate_entries_len(baselen):
@@ -575,7 +609,11 @@
 
 @jit.dont_look_inside
 def ll_dict_grow(d):
-    if d.num_items < d.num_used_items // 4:
+    if d.num_live_items < d.num_ever_used_items // 2:
+        # At least 50% of the allocated entries are dead, so perform a
+        # compaction. If ll_dict_remove_deleted_items detects that over
+        # 75% of allocated entries are dead, then it will also shrink the
+        # memory allocated at the same time as doing a compaction.
         ll_dict_remove_deleted_items(d)
         return True
 
@@ -594,16 +632,25 @@
     return False
 
 def ll_dict_remove_deleted_items(d):
-    new_allocated = _overallocate_entries_len(d.num_items)
-    if new_allocated < len(d.entries) // 2:
+    if d.num_live_items < len(d.entries) // 4:
+        # At least 75% of the allocated entries are dead, so shrink the memory
+        # allocated as well as doing a compaction.
+        new_allocated = _overallocate_entries_len(d.num_live_items)
         newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated)
     else:
         newitems = d.entries
+        # The loop below does a lot of writes into 'newitems'.  It's a better
+        # idea to do a single gc_writebarrier rather than activating the
+        # card-by-card logic (worth 11% in microbenchmarks).
+        from rpython.rtyper.lltypesystem.lloperation import llop
+        llop.gc_writebarrier(lltype.Void, newitems)
     #
-    ENTRY = lltype.typeOf(d).TO.entries.TO.OF
+    ENTRIES = lltype.typeOf(d).TO.entries.TO
+    ENTRY = ENTRIES.OF
     isrc = 0
     idst = 0
-    while isrc < len(d.entries):
+    isrclimit = d.num_ever_used_items
+    while isrc < isrclimit:
         if d.entries.valid(isrc):
             src = d.entries[isrc]
             dst = newitems[idst]
@@ -616,9 +663,21 @@
                 dst.f_valid = True
             idst += 1
         isrc += 1
-    d.entries = newitems
-    assert d.num_items == idst
-    d.num_used_items = idst
+    assert d.num_live_items == idst
+    d.num_ever_used_items = idst
+    if ((ENTRIES.must_clear_key or ENTRIES.must_clear_value) and
+            d.entries == newitems):
+        # must clear the extra entries: they may contain valid pointers
+        # which would create a temporary memory leak
+        while idst < isrclimit:
+            entry = newitems[idst]
+            if ENTRIES.must_clear_key:
+                entry.key = lltype.nullptr(ENTRY.key.TO)
+            if ENTRIES.must_clear_value:
+                entry.value = lltype.nullptr(ENTRY.value.TO)
+            idst += 1
+    else:
+        d.entries = newitems
 
     ll_dict_reindex(d, _ll_len_of_d_indexes(d))
 
@@ -632,7 +691,7 @@
 @jit.look_inside_iff(lambda d, i: jit.isvirtual(d) and jit.isconstant(i))
 def _ll_dict_del(d, index):
     d.entries.mark_deleted(index)
-    d.num_items -= 1
+    d.num_live_items -= 1
     # clear the key and the value if they are GC pointers
     ENTRIES = lltype.typeOf(d.entries).TO
     ENTRY = ENTRIES.OF
@@ -641,32 +700,37 @@
         entry.key = lltype.nullptr(ENTRY.key.TO)
     if ENTRIES.must_clear_value:
         entry.value = lltype.nullptr(ENTRY.value.TO)
-    #
-    # The rest is commented out: like CPython we no longer shrink the
-    # dictionary here.  It may shrink later if we try to append a number
-    # of new items to it.  Unsure if this behavior was designed in
-    # CPython or is accidental.  A design reason would be that if you
-    # delete all items in a dictionary (e.g. with a series of
-    # popitem()), then CPython avoids shrinking the table several times.
-    #num_entries = len(d.entries)
-    #if num_entries > DICT_INITSIZE and d.num_items <= num_entries / 4:
-    #    ll_dict_resize(d)
-    # A previous xxx: move the size checking and resize into a single
-    # call which is opaque to the JIT when the dict isn't virtual, to
-    # avoid extra branches.
+
+    if index == d.num_ever_used_items - 1:
+        # The last element of the ordereddict has been deleted. Instead of
+        # simply marking the item as dead, we can safely reuse it. Since it's
+        # also possible that there are more dead items immediately behind the
+        # last one, we reclaim all the dead items at the end of the ordereditem
+        # at the same point.
+        i = d.num_ever_used_items - 2
+        while i >= 0 and not d.entries.valid(i):
+            i -= 1
+        j = i + 1
+        assert j >= 0
+        d.num_ever_used_items = j
+
+    # If the dictionary is at least 87.5% dead items, then consider shrinking
+    # it.
+    if d.num_live_items + DICT_INITSIZE <= len(d.entries) / 8:
+        ll_dict_resize(d)
 
 def ll_dict_resize(d):
     # make a 'new_size' estimate and shrink it if there are many
     # deleted entry markers.  See CPython for why it is a good idea to
     # quadruple the dictionary size as long as it's not too big.
-    # (Quadrupling comes from '(d.num_items + d.num_items + 1) * 2'
-    # as long as num_items is not too large.)
-    num_extra = min(d.num_items + 1, 30000)
+    # (Quadrupling comes from '(d.num_live_items + d.num_live_items + 1) * 2'
+    # as long as num_live_items is not too large.)
+    num_extra = min(d.num_live_items + 1, 30000)
     _ll_dict_resize_to(d, num_extra)
 ll_dict_resize.oopspec = 'odict.resize(d)'
 
 def _ll_dict_resize_to(d, num_extra):
-    new_estimate = (d.num_items + num_extra) * 2
+    new_estimate = (d.num_live_items + num_extra) * 2
     new_size = DICT_INITSIZE
     while new_size <= new_estimate:
         new_size *= 2
@@ -677,13 +741,17 @@
         ll_dict_reindex(d, new_size)
 
 def ll_dict_reindex(d, new_size):
-    ll_malloc_indexes_and_choose_lookup(d, new_size)
-    d.resize_counter = new_size * 2 - d.num_items * 3
+    if bool(d.indexes) and _ll_len_of_d_indexes(d) == new_size:
+        ll_clear_indexes(d, new_size)   # hack: we can reuse the same array
+    else:
+        ll_malloc_indexes_and_choose_lookup(d, new_size)
+    d.resize_counter = new_size * 2 - d.num_live_items * 3
     assert d.resize_counter > 0
     #
     entries = d.entries
     i = 0
-    while i < d.num_used_items:
+    ibound = d.num_ever_used_items
+    while i < ibound:
         if entries.valid(i):
             hash = entries.hash(i)
             ll_call_insert_clean_function(d, hash, i)
@@ -758,7 +826,7 @@
     else:
         # pristine entry -- lookup failed
         if store_flag == FLAG_STORE:
-            indexes[i] = rffi.cast(T, d.num_used_items + VALID_OFFSET)
+            indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET)
         elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD:
             return ll_kill_something(d, T)
         return -1
@@ -775,7 +843,7 @@
             if store_flag == FLAG_STORE:
                 if deletedslot == -1:
                     deletedslot = intmask(i)
-                indexes[deletedslot] = rffi.cast(T, d.num_used_items +
+                indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items +
                                                  VALID_OFFSET)
             elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD:
                 return ll_kill_something(d, T)
@@ -835,10 +903,11 @@
     d = DICT.allocate()
     d.entries = _ll_empty_array(DICT)
     ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE)
-    d.num_items = 0
-    d.num_used_items = 0
+    d.num_live_items = 0
+    d.num_ever_used_items = 0
     d.resize_counter = DICT_INITSIZE * 2
     return d
+OrderedDictRepr.ll_newdict = staticmethod(ll_newdict)
 
 def ll_newdict_size(DICT, orig_length_estimate):
     length_estimate = (orig_length_estimate // 2) * 3
@@ -848,8 +917,8 @@
     d = DICT.allocate()
     d.entries = DICT.entries.TO.allocate(orig_length_estimate)
     ll_malloc_indexes_and_choose_lookup(d, n)
-    d.num_items = 0
-    d.num_used_items = 0
+    d.num_live_items = 0
+    d.num_ever_used_items = 0
     d.resize_counter = n * 2
     return d
 
@@ -864,23 +933,6 @@
     pass
 
 
-def rtype_r_dict(hop):
-    r_dict = hop.r_result
-    if not r_dict.custom_eq_hash:
-        raise TyperError("r_dict() call does not return an r_dict instance")
-    v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
-    v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
-    cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
-    hop.exception_cannot_occur()
-    v_result = hop.gendirectcall(ll_newdict, cDICT)
-    if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
-        cname = hop.inputconst(lltype.Void, 'fnkeyeq')
-        hop.genop('setfield', [v_result, cname, v_eqfn])
-    if r_dict.r_rdict_hashfn.lowleveltype != lltype.Void:
-        cname = hop.inputconst(lltype.Void, 'fnkeyhash')
-        hop.genop('setfield', [v_result, cname, v_hashfn])
-    return v_result
-
 # ____________________________________________________________
 #
 #  Iteration.
@@ -916,7 +968,7 @@
         entries = dict.entries
         index = iter.index
         assert index >= 0
-        entries_len = dict.num_used_items
+        entries_len = dict.num_ever_used_items
         while index < entries_len:
             nextindex = index + 1
             if entries.valid(index):
@@ -951,15 +1003,15 @@
     newdict = DICT.allocate()
     newdict.entries = DICT.entries.TO.allocate(len(dict.entries))
 
-    newdict.num_items = dict.num_items
-    newdict.num_used_items = dict.num_used_items
+    newdict.num_live_items = dict.num_live_items
+    newdict.num_ever_used_items = dict.num_ever_used_items
     if hasattr(DICT, 'fnkeyeq'):
         newdict.fnkeyeq = dict.fnkeyeq
     if hasattr(DICT, 'fnkeyhash'):
         newdict.fnkeyhash = dict.fnkeyhash
 
     i = 0
-    while i < newdict.num_used_items:
+    while i < newdict.num_ever_used_items:
         d_entry = newdict.entries[i]
         entry = dict.entries[i]
         ENTRY = lltype.typeOf(newdict.entries).TO.OF
@@ -976,14 +1028,14 @@
 ll_dict_copy.oopspec = 'odict.copy(dict)'
 
 def ll_dict_clear(d):
-    if d.num_used_items == 0:
+    if d.num_ever_used_items == 0:
         return
     DICT = lltype.typeOf(d).TO
     old_entries = d.entries
     d.entries = _ll_empty_array(DICT)
     ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE)
-    d.num_items = 0
-    d.num_used_items = 0
+    d.num_live_items = 0
+    d.num_ever_used_items = 0
     d.resize_counter = DICT_INITSIZE * 2
     # old_entries.delete() XXX
 ll_dict_clear.oopspec = 'odict.clear(d)'
@@ -991,9 +1043,9 @@
 def ll_dict_update(dic1, dic2):
     if dic1 == dic2:
         return
-    ll_prepare_dict_update(dic1, dic2.num_items)
+    ll_prepare_dict_update(dic1, dic2.num_live_items)
     i = 0
-    while i < dic2.num_used_items:
+    while i < dic2.num_ever_used_items:
         entries = dic2.entries
         if entries.valid(i):
             entry = entries[i]
@@ -1012,7 +1064,13 @@
     #      (d.resize_counter - 1) // 3 = room left in d
     #  so, if num_extra == 1, we need d.resize_counter > 3
     #      if num_extra == 2, we need d.resize_counter > 6  etc.
-    jit.conditional_call(d.resize_counter <= num_extra * 3,
+    # Note however a further hack: if num_extra <= d.num_live_items,
+    # we avoid calling _ll_dict_resize_to here.  This is to handle
+    # the case where dict.update() actually has a lot of collisions.
+    # If num_extra is much greater than d.num_live_items the conditional_call
+    # will trigger anyway, which is really the goal.
+    x = num_extra - d.num_live_items
+    jit.conditional_call(d.resize_counter <= x * 3,
                          _ll_dict_resize_to, d, num_extra)
 
 # this is an implementation of keys(), values() and items()
@@ -1028,9 +1086,9 @@
 
 def _make_ll_keys_values_items(kind):
     def ll_kvi(LIST, dic):
-        res = LIST.ll_newlist(dic.num_items)
+        res = LIST.ll_newlist(dic.num_live_items)
         entries = dic.entries
-        dlen = dic.num_used_items
+        dlen = dic.num_ever_used_items
         items = res.ll_items()
         i = 0
         p = 0
@@ -1064,24 +1122,24 @@
     return i != -1
 
 def _ll_getnextitem(dic):
-    if dic.num_items == 0:
+    if dic.num_live_items == 0:
         raise KeyError
 
     entries = dic.entries
 
     while True:
-        i = dic.num_used_items - 1
+        i = dic.num_ever_used_items - 1
         if entries.valid(i):
             break
-        dic.num_used_items -= 1
+        dic.num_ever_used_items -= 1
 
     key = entries[i].key
     index = dic.lookup_function(dic, key, entries.hash(i),
                                 FLAG_DELETE_TRY_HARD)
     # if the lookup function returned me a random strange thing,
     # don't care about deleting the item
-    if index == dic.num_used_items - 1:
-        dic.num_used_items -= 1
+    if index == dic.num_ever_used_items - 1:
+        dic.num_ever_used_items -= 1
     else:
         assert index != -1
     return index
diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py
--- a/rpython/rtyper/rbuiltin.py
+++ b/rpython/rtyper/rbuiltin.py
@@ -724,15 +724,16 @@
     raise TyperError("hasattr is only suported on a constant")
 
 @typer_for(annmodel.SomeOrderedDict.knowntype)
+@typer_for(objectmodel.r_dict)
 @typer_for(objectmodel.r_ordereddict)
-def rtype_ordered_dict(hop):
-    from rpython.rtyper.lltypesystem.rordereddict import ll_newdict
-
+def rtype_dict_constructor(hop, i_force_non_null=None):
+    # 'i_force_non_null' is ignored here; if it has any effect, it
+    # has already been applied to 'hop.r_result'
     hop.exception_cannot_occur()
     r_dict = hop.r_result
     cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
-    v_result = hop.gendirectcall(ll_newdict, cDICT)
-    if hasattr(r_dict, 'r_dict_eqfn'):
+    v_result = hop.gendirectcall(r_dict.ll_newdict, cDICT)
+    if r_dict.custom_eq_hash:
         v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
         v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
         if r_dict.r_rdict_eqfn.lowleveltype != lltype.Void:
@@ -743,9 +744,6 @@
             hop.genop('setfield', [v_result, cname, v_hashfn])
     return v_result
 
-from rpython.rtyper.lltypesystem.rdict import rtype_r_dict
-typer_for(objectmodel.r_dict)(rtype_r_dict)
-
 # _________________________________________________________________
 # weakrefs
 
diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py
--- a/rpython/rtyper/rdict.py
+++ b/rpython/rtyper/rdict.py
@@ -57,11 +57,10 @@
 
 
 def rtype_newdict(hop):
-    from rpython.rtyper.lltypesystem.rdict import ll_newdict
     hop.inputargs()    # no arguments expected
     r_dict = hop.r_result
     cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
-    v_result = hop.gendirectcall(ll_newdict, cDICT)
+    v_result = hop.gendirectcall(r_dict.ll_newdict, cDICT)
     return v_result
 
 
diff --git a/rpython/rtyper/test/test_rordereddict.py 
b/rpython/rtyper/test/test_rordereddict.py
--- a/rpython/rtyper/test/test_rordereddict.py
+++ b/rpython/rtyper/test/test_rordereddict.py
@@ -71,7 +71,7 @@
             for j in range(i):
                 assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
             rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
-        assert ll_d.num_items == 20
+        assert ll_d.num_live_items == 20
         for i in range(20):
             assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
 
@@ -84,7 +84,7 @@
             rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
             if i % 2 != 0:
                 rordereddict.ll_dict_delitem(ll_d, llstr(str(i)))
-        assert ll_d.num_items == 10
+        assert ll_d.num_live_items == 10
         for i in range(0, 20, 2):
             assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
 
@@ -129,7 +129,7 @@
         for i in range(40):
             rordereddict.ll_dict_setitem(ll_d, lls, i)
             rordereddict.ll_dict_delitem(ll_d, lls)
-        assert ll_d.num_used_items <= 10
+        assert ll_d.num_ever_used_items <= 10
 
     def test_dict_iteration(self):
         DICT = self._get_str_dict()
@@ -190,7 +190,7 @@
         rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
         rordereddict.ll_dict_setitem(ll_d, llstr("l"), 1)
         rordereddict.ll_dict_clear(ll_d)
-        assert ll_d.num_items == 0
+        assert ll_d.num_live_items == 0
 
     def test_get(self):
         DICT = self._get_str_dict()
@@ -251,6 +251,16 @@
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to