Author: Maciej Fijalkowski <fij...@gmail.com>
Branch: optresult-unroll
Changeset: r78903:18a9d18296d2
Date: 2015-08-07 14:36 +0200
http://bitbucket.org/pypy/pypy/changeset/18a9d18296d2/

Log:    implement a proper fix for heapcache dealing with getfield_gc_pure

diff --git a/rpython/jit/backend/llgraph/runner.py 
b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -116,6 +116,10 @@
         self.fieldname = fieldname
         self.FIELD = getattr(S, fieldname)
         self.index = heaptracker.get_fielddescr_index_in(S, fieldname)
+        self._is_pure = S._immutable_field(fieldname)
+
+    def is_always_pure(self):
+        return self._is_pure
 
     def get_parent_descr(self):
         return self.parent_descr
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py 
b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -287,11 +287,13 @@
     def clean_caches(self):
         del self._lazy_setfields_and_arrayitems[:]
         for descr, cf in self.cached_fields.items():
-            cf.invalidate(descr)
-        for submap in self.cached_arrayitems.itervalues():
-            for index, cf in submap.iteritems():
-                cf.invalidate(None)
-        self.cached_arrayitems.clear()
+            if not descr.is_always_pure():
+                cf.invalidate(descr)
+        for descr, submap in self.cached_arrayitems.iteritems():
+            if not descr.is_always_pure():
+                for index, cf in submap.iteritems():
+                    cf.invalidate(None)
+        #self.cached_arrayitems.clear()
         self.cached_dict_reads.clear()
 
     def field_cache(self, descr):
diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py 
b/rpython/jit/metainterp/optimizeopt/optimizer.py
--- a/rpython/jit/metainterp/optimizeopt/optimizer.py
+++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
@@ -268,6 +268,9 @@
     def force_op_from_preamble(self, op):
         return op
 
+    def notice_guard_future_condition(self, op):
+        self.patchguardop = op
+
     def replace_guard(self, op, value):
         assert isinstance(value, info.NonNullPtrInfo)
         if value.last_guard_pos == -1:
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py 
b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -593,7 +593,7 @@
         self.emit_operation(op)
 
     def optimize_GUARD_FUTURE_CONDITION(self, op):
-        pass # just remove it
+        self.optimizer.notice_guard_future_condition(op)
 
     def optimize_INT_FLOORDIV(self, op):
         arg0 = op.getarg(0)
diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py 
b/rpython/jit/metainterp/optimizeopt/simplify.py
--- a/rpython/jit/metainterp/optimizeopt/simplify.py
+++ b/rpython/jit/metainterp/optimizeopt/simplify.py
@@ -70,7 +70,7 @@
     #     self.emit_operation(op)
 
     def optimize_GUARD_FUTURE_CONDITION(self, op):
-        pass
+        self.optimizer.notice_guard_future_condition(op)
 
 dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_',
         default=OptSimplify.emit_operation)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py 
b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -2949,7 +2949,7 @@
         """
         exc = self.raises(InvalidLoop, self.optimize_loop, ops, "crash!")
         if exc:
-            assert "node" in exc.msg
+            assert "promote of a virtual" in exc.msg
 
     def test_merge_guard_class_guard_value(self):
         ops = """
@@ -3171,8 +3171,6 @@
         [p1, p2]
         i1 = ptr_eq(p1, p2)
         i3 = int_add(i1, 1)
-        i3b = int_is_true(i3)
-        guard_true(i3b) []
         escape_n(i3)
         escape_n(i3)
         guard_true(i1) []
@@ -7438,17 +7436,17 @@
 
         ops = """
         [p0]
-        p1 = new_with_vtable(descr=nodesize)
-        setfield_gc(p1, p0, descr=valuedescr)
+        p1 = new_with_vtable(descr=nodesize3)
+        setfield_gc(p1, p0, descr=valuedescr3)
         escape_n(p1)
-        p2 = getfield_gc_pure_r(p1, descr=valuedescr)
+        p2 = getfield_gc_pure_r(p1, descr=valuedescr3)
         escape_n(p2)
         jump(p0)
         """
         expected = """
         [p0]
-        p1 = new_with_vtable(descr=nodesize)
-        setfield_gc(p1, p0, descr=valuedescr)
+        p1 = new_with_vtable(descr=nodesize3)
+        setfield_gc(p1, p0, descr=valuedescr3)
         escape_n(p1)
         escape_n(p0)
         jump(p0)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py 
b/rpython/jit/metainterp/optimizeopt/test/test_util.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_util.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py
@@ -88,6 +88,9 @@
     node_vtable2 = lltype.malloc(OBJECT_VTABLE, immortal=True)
     node_vtable2.name = rclass.alloc_array_name('node2')
     node_vtable_adr2 = llmemory.cast_ptr_to_adr(node_vtable2)
+    node_vtable3 = lltype.malloc(OBJECT_VTABLE, immortal=True)
+    node_vtable3.name = rclass.alloc_array_name('node3')
+    node_vtable_adr3 = llmemory.cast_ptr_to_adr(node_vtable3)
     cpu = runner.LLGraphCPU(None)
 
     NODE = lltype.GcForwardReference()
@@ -98,6 +101,13 @@
                                         ('next', lltype.Ptr(NODE))))
     NODE2 = lltype.GcStruct('NODE2', ('parent', NODE),
                                      ('other', lltype.Ptr(NODE)))
+
+    NODE3 = lltype.GcForwardReference()
+    NODE3.become(lltype.GcStruct('NODE3', ('parent', OBJECT),
+                            ('value', lltype.Signed),
+                            ('next', lltype.Ptr(NODE3)),
+                            hints={'immutable': True}))
+    
     node = lltype.malloc(NODE)
     node.value = 5
     node.parent.typeptr = node_vtable
@@ -111,11 +121,16 @@
     #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2))
     nodesize = cpu.sizeof(NODE, True)
     nodesize2 = cpu.sizeof(NODE2, True)
+    nodesize3 = cpu.sizeof(NODE3, True)
     valuedescr = cpu.fielddescrof(NODE, 'value')
     floatdescr = cpu.fielddescrof(NODE, 'floatval')
     chardescr = cpu.fielddescrof(NODE, 'charval')
     nextdescr = cpu.fielddescrof(NODE, 'next')
     otherdescr = cpu.fielddescrof(NODE2, 'other')
+    valuedescr3 = cpu.fielddescrof(NODE3, 'value')
+    nextdescr3 = cpu.fielddescrof(NODE3, 'next')
+    assert valuedescr3.is_always_pure()
+    assert nextdescr3.is_always_pure()
 
     accessor = FieldListAccessor()
     accessor.initialize(None, {'inst_field': IR_QUASIIMMUTABLE})
@@ -312,6 +327,7 @@
 
     register_known_gctype(cpu, node_vtable,  NODE)
     register_known_gctype(cpu, node_vtable2, NODE2)
+    register_known_gctype(cpu, node_vtable3, NODE3)
     register_known_gctype(cpu, u_vtable,     U)
     register_known_gctype(cpu, jit_virtual_ref_vtable,vrefinfo.JIT_VIRTUAL_REF)
     register_known_gctype(cpu, intobj_noimmut_vtable, INTOBJ_NOIMMUT)
@@ -433,6 +449,7 @@
         return call_pure_results
 
     def unroll_and_optimize(self, loop, call_pure_results=None):
+        self.add_guard_future_condition(loop)
         jump_op = loop.operations[-1]
         assert jump_op.getopnum() == rop.JUMP
         ops = loop.operations[:-1]
diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py 
b/rpython/jit/metainterp/optimizeopt/unroll.py
--- a/rpython/jit/metainterp/optimizeopt/unroll.py
+++ b/rpython/jit/metainterp/optimizeopt/unroll.py
@@ -125,7 +125,12 @@
         for i in range(len(jump_args)):
             sb.short_inputargs[i].set_forwarded(None)
             self.make_equal_to(sb.short_inputargs[i], jump_args[i])
+        patchguardop = self.optimizer.patchguardop
         for op in sb.short:
+            if op.is_guard():
+                op = self.replace_op_with(op, op.getopnum())
+                op.rd_snapshot = patchguardop.rd_snapshot
+                op.rd_frame_info_list = patchguardop.rd_frame_info_list
             self.optimizer.send_extra_operation(op)
         res = [self.optimizer.get_box_replacement(op) for op in
                 sb.short_preamble_jump]
diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py 
b/rpython/jit/metainterp/optimizeopt/virtualize.py
--- a/rpython/jit/metainterp/optimizeopt/virtualize.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualize.py
@@ -170,17 +170,6 @@
 
     def optimize_GETFIELD_GC_I(self, op):
         opinfo = self.getptrinfo(op.getarg(0))
-        # XXX dealt with by heapcache
-        # If this is an immutable field (as indicated by op.is_always_pure())
-        # then it's safe to reuse the virtual's field, even if it has been
-        # forced, because it should never be written to again.
-        #if op.is_always_pure():
-        #    
-        #    if value.is_forced_virtual() and op.is_always_pure():
-        #        fieldvalue = value.getfield(op.getdescr(), None)
-        #        if fieldvalue is not None:
-        #            self.make_equal_to(op, fieldvalue)
-        #            return
         if opinfo and opinfo.is_virtual():
             fieldop = opinfo.getfield(op.getdescr())
             if fieldop is None:
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to