For each variable its live range is calculated precisely as described
in Wimmer's master thesis "Linear Scan Register Allocator" in 5.6.3
"Build Intervals".

This patch reduces register allocator stress by generating shorter,
more precise live ranges and therefore reduces number of interval spills.

This patch also introduces distinction between even and odd use
positions. Even use positions represent input to instruction and odd
positions represent output. This allows for better register
utilization. Example:

mov r1, r2
add r2, r3

after allocation:

mov ebx, ebx  ; this can be optimized out in the future
add ebx, ebx

Signed-off-by: Tomek Grabiec <tgrab...@gmail.com>
---
 arch/x86/include/arch/instruction.h |    5 --
 arch/x86/instruction.c              |    5 ++
 arch/x86/use-def.c                  |   28 +++++++++++
 include/jit/instruction.h           |    6 +++
 include/jit/use-position.h          |    6 +++
 include/jit/vars.h                  |   15 ++----
 jit/interval.c                      |   38 +++++++++++++--
 jit/linear-scan.c                   |   21 +++++++--
 jit/liveness.c                      |   44 +++++++++++-------
 jit/spill-reload.c                  |   85 ++++++++++++++++++++++++----------
 test/jit/liveness-test.c            |   30 ++++++------
 test/jit/spill-reload-test.c        |   14 +++---
 12 files changed, 209 insertions(+), 88 deletions(-)

diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index 5e04d92..be321de 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -214,11 +214,6 @@ struct insn {
 
 void insn_sanity_check(void);
 
-static inline unsigned long lir_position(struct use_position *reg)
-{
-       return reg->insn->lir_pos;
-}
-
 struct insn *insn(enum insn_type);
 struct insn *memlocal_reg_insn(enum insn_type, struct stack_slot *, struct 
var_info *);
 struct insn *membase_reg_insn(enum insn_type, struct var_info *, long, struct 
var_info *);
diff --git a/arch/x86/instruction.c b/arch/x86/instruction.c
index 8213e8b..0b1e145 100644
--- a/arch/x86/instruction.c
+++ b/arch/x86/instruction.c
@@ -107,6 +107,7 @@ static void init_membase_operand(struct insn *insn, 
unsigned long idx,
        operand->disp = disp;
 
        init_register(&operand->base_reg, insn, base_reg->interval);
+       operand->base_reg.kind = USE_KIND_INPUT;
 }
 
 static void init_memdisp_operand(struct insn *insn, unsigned long idx,
@@ -131,6 +132,9 @@ static void init_memindex_operand(struct insn *insn, 
unsigned long idx,
 
        init_register(&operand->base_reg, insn, base_reg->interval);
        init_register(&operand->index_reg, insn, index_reg->interval);
+
+       operand->base_reg.kind  = USE_KIND_INPUT;
+       operand->index_reg.kind = USE_KIND_INPUT;
 }
 
 static void init_memlocal_operand(struct insn *insn, unsigned long idx,
@@ -152,6 +156,7 @@ static void init_reg_operand(struct insn *insn, unsigned 
long idx,
        operand->type = OPERAND_REG;
 
        init_register(&operand->reg, insn, reg->interval);
+       operand->reg.kind = insn_operand_use_kind(insn, idx);
 }
 
 static void init_rel_operand(struct insn *insn, unsigned long idx,
diff --git a/arch/x86/use-def.c b/arch/x86/use-def.c
index 59e1f2a..0730a07 100644
--- a/arch/x86/use-def.c
+++ b/arch/x86/use-def.c
@@ -248,3 +248,31 @@ int insn_uses(struct insn *insn, struct var_info **uses)
 
        return nr;
 }
+
+int insn_operand_use_kind(struct insn *insn, int idx)
+{
+       struct insn_info *info;
+       int use_mask;
+       int def_mask;
+       int kind_mask;
+
+       info = get_info(insn);
+
+       if (idx == 0) {
+               use_mask = USE_SRC;
+               def_mask = DEF_SRC;
+       } else {
+               assert(idx == 1);
+               use_mask = USE_DST;
+               def_mask = DEF_DST;
+       }
+
+       kind_mask = 0;
+       if (info->flags & use_mask)
+               kind_mask |= USE_KIND_INPUT;
+
+       if (info->flags & def_mask)
+               kind_mask |= USE_KIND_OUTPUT;
+
+       return kind_mask;
+}
diff --git a/include/jit/instruction.h b/include/jit/instruction.h
index cc303fe..d360c82 100644
--- a/include/jit/instruction.h
+++ b/include/jit/instruction.h
@@ -9,11 +9,17 @@ static inline struct insn *next_insn(struct insn *insn)
        return list_entry(insn->insn_list_node.next, struct insn, 
insn_list_node);
 }
 
+static inline struct insn *prev_insn(struct insn *insn)
+{
+       return list_entry(insn->insn_list_node.prev, struct insn, 
insn_list_node);
+}
+
 struct insn *alloc_insn(enum insn_type);
 void free_insn(struct insn *);
 
 int insn_defs(struct compilation_unit *, struct insn *, struct var_info **);
 int insn_uses(struct insn *, struct var_info **);
+int insn_operand_use_kind(struct insn *, int);
 
 #define for_each_insn(insn, insn_list) list_for_each_entry(insn, insn_list, 
insn_list_node)
 
diff --git a/include/jit/use-position.h b/include/jit/use-position.h
index ee968d0..c2f215a 100644
--- a/include/jit/use-position.h
+++ b/include/jit/use-position.h
@@ -6,6 +6,9 @@
 
 struct insn;
 
+#define USE_KIND_INPUT         0x1
+#define USE_KIND_OUTPUT                0x2
+
 /**
  * struct use_position - register use position
  *
@@ -22,6 +25,7 @@ struct use_position {
        struct insn             *insn;
        struct live_interval    *interval;
        struct list_head        use_pos_list;   /* node in interval use 
position list */
+       int                     kind;
 };
 
 static inline void register_set_insn(struct use_position *reg, struct insn 
*insn)
@@ -60,4 +64,6 @@ static inline bool is_vreg(struct use_position *reg, unsigned 
long vreg)
        return reg->interval->var_info->vreg == vreg;
 }
 
+int get_lir_positions(struct use_position *reg, unsigned long *pos);
+
 #endif /* __JIT_USE_POSITION_H */
diff --git a/include/jit/vars.h b/include/jit/vars.h
index f00c5f9..2471b17 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -149,16 +149,6 @@ static inline unsigned long interval_end(struct 
live_interval *it)
        return node_to_range(it->range_list.prev)->end;
 }
 
-static inline unsigned long interval_last_insn_pos(struct live_interval *it)
-{
-       return (interval_end(it) - 1) & ~1ul;
-}
-
-static inline unsigned long interval_first_insn_pos(struct live_interval *it)
-{
-       return interval_start(it) & ~1ul;
-}
-
 static inline bool interval_is_empty(struct live_interval *it)
 {
        return list_is_empty(&it->range_list);
@@ -184,4 +174,9 @@ int interval_add_range(struct live_interval *, unsigned 
long, unsigned long);
 struct live_range *interval_range_at(struct live_interval *, unsigned long);
 void interval_update_current_range(struct live_interval *, unsigned long);
 
+static inline unsigned long first_use_pos(struct live_interval *it)
+{
+       return next_use_pos(it, 0);
+}
+
 #endif /* __JIT_VARS_H */
diff --git a/jit/interval.c b/jit/interval.c
index 9e22c0c..23703a1 100644
--- a/jit/interval.c
+++ b/jit/interval.c
@@ -149,7 +149,11 @@ struct live_interval *split_interval_at(struct 
live_interval *interval,
                new->reg = interval->reg;
 
        list_for_each_entry_safe(this, next, &interval->use_positions, 
use_pos_list) {
-               if (lir_position(this) < pos)
+               unsigned long use_pos[2];
+
+               get_lir_positions(this, use_pos);
+
+               if (use_pos[0] < pos)
                        continue;
 
                list_move(&this->use_pos_list, &new->use_positions);
@@ -169,11 +173,18 @@ unsigned long next_use_pos(struct live_interval *it, 
unsigned long pos)
        unsigned long min = LONG_MAX;
 
        list_for_each_entry(this, &it->use_positions, use_pos_list) {
-               if (lir_position(this) < pos)
-                       continue;
+               unsigned long use_pos[2];
+               int nr_use_pos;
+               int i;
+
+               nr_use_pos = get_lir_positions(this, use_pos);
+               for (i = 0; i < nr_use_pos; i++) {
+                       if (use_pos[i] < pos)
+                               continue;
 
-               if (lir_position(this) < min)
-                       min = lir_position(this);
+                       if (use_pos[i] < min)
+                               min = use_pos[i];
+               }
        }
 
        return min;
@@ -357,3 +368,20 @@ int interval_add_range(struct live_interval *it, unsigned 
long start,
        list_add_tail(&new->range_list_node, &it->range_list);
        return 0;
 }
+
+int get_lir_positions(struct use_position *reg, unsigned long *pos)
+{
+       int nr_pos;
+
+       nr_pos = 0;
+
+       assert(reg->kind);
+
+       if (reg->kind & USE_KIND_INPUT)
+               pos[nr_pos++] = reg->insn->lir_pos;
+
+       if (reg->kind & USE_KIND_OUTPUT)
+               pos[nr_pos++] = reg->insn->lir_pos + 1;
+
+       return nr_pos;
+}
diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 765cea9..018daaa 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -123,8 +123,18 @@ static void spill_interval(struct live_interval *it, 
unsigned long pos,
                if (next_pos > interval_start(new))
                        new = split_interval_at(new, next_pos);
 
+               /*
+                * When next use position is a write then we must not
+                * reload the new interval. One reason for this is
+                * that it's unnecessary. Another one is that we won't
+                * be able to insert a reload instruction in the
+                * middle of instruction when new interval starts at odd
+                * position.
+                */
+               if ((next_pos & 1) == 0)
+                       mark_need_reload(new, it);
+
                it->need_spill = true;
-               mark_need_reload(new, it);
                pqueue_insert(unhandled, new);
        }
 }
@@ -134,16 +144,19 @@ static void __spill_interval_intersecting(struct 
live_interval *current,
                                          struct live_interval *it,
                                          struct pqueue *unhandled)
 {
+       unsigned long start;
+
        if (it->reg != reg)
                return;
 
        if (!intervals_intersect(it, current))
                return;
 
-       if (interval_start(current) == interval_start(it))
+       start = interval_intersection_start(current, it);
+       if (start == interval_start(it))
                return;
 
-       spill_interval(it, interval_start(current), unhandled);
+       spill_interval(it, start, unhandled);
 }
 
 static void spill_all_intervals_intersecting(struct live_interval *current,
@@ -236,7 +249,7 @@ static void allocate_blocked_reg(struct live_interval 
*current,
                 */
                current->reg = reg;
 
-               if (block_pos[reg] < interval_start(current))
+               if (block_pos[reg] < interval_end(current))
                        spill_interval(current, block_pos[reg], unhandled);
 
                spill_all_intervals_intersecting(current, reg, active,
diff --git a/jit/liveness.c b/jit/liveness.c
index 5c8c303..4089298 100644
--- a/jit/liveness.c
+++ b/jit/liveness.c
@@ -36,18 +36,32 @@
 #include <errno.h>
 #include <stdlib.h>
 
-static void __update_live_range(struct live_interval *it, unsigned long pos)
+static void __update_live_ranges(struct compilation_unit *cu, struct 
basic_block *bb)
 {
-       if (interval_is_empty(it))
-               interval_add_range(it, pos, pos + 1);
-       else {
-               struct live_range *r = interval_first_range(it);
+       struct var_info *uses[MAX_REG_OPERANDS];
+       struct var_info *defs[MAX_REG_OPERANDS];
+       struct insn *insn;
+       int nr_uses;
+       int nr_defs;
+       int i;
+
+       for_each_insn_reverse(insn, &bb->insn_list) {
+               nr_defs = insn_defs(bb->b_parent, insn, defs);
+               for (i = 0; i < nr_defs; i++) {
+                       if (interval_is_empty(defs[i]->interval)) {
+                               interval_add_range(defs[i]->interval, 
insn->lir_pos + 1, bb->end_insn);
+                               continue;
+                       }
 
-               if (r->start > pos)
-                       r->start = pos;
+                       struct live_range *r
+                               = interval_first_range(defs[i]->interval);
 
-               if (r->end < (pos + 1))
-                       r->end = pos + 1;
+                       r->start = insn->lir_pos + 1;
+               }
+
+               nr_uses = insn_uses(insn, uses);
+               for (i = 0; i < nr_uses; i++)
+                       interval_add_range(uses[i]->interval, bb->start_insn, 
insn->lir_pos + 1);
        }
 }
 
@@ -55,16 +69,15 @@ static void update_live_ranges(struct compilation_unit *cu)
 {
        struct basic_block *this;
 
-       for_each_basic_block(this, &cu->bb_list) {
+       for_each_basic_block_reverse(this, &cu->bb_list) {
                struct var_info *var;
 
                for_each_variable(var, cu->var_infos) {
-                       if (test_bit(this->live_in_set->bits, var->vreg))
-                               __update_live_range(var->interval, 
this->start_insn);
-
                        if (test_bit(this->live_out_set->bits, var->vreg))
-                               __update_live_range(var->interval, 
this->end_insn);
+                               interval_add_range(var->interval, 
this->start_insn, this->end_insn);
                }
+
+               __update_live_ranges(cu, this);
        }
 }
 
@@ -140,8 +153,6 @@ static void __analyze_use_def(struct basic_block *bb, 
struct insn *insn)
        for (i = 0; i < nr_uses; i++) {
                struct var_info *var = uses[i];
 
-               __update_live_range(var->interval, insn->lir_pos);
-
                /*
                 * It's in the use set if and only if it has not
                 * _already_ been defined by insn basic block.
@@ -154,7 +165,6 @@ static void __analyze_use_def(struct basic_block *bb, 
struct insn *insn)
        for (i = 0; i < nr_defs; i++) {
                struct var_info *var = defs[i];
 
-               __update_live_range(var->interval, insn->lir_pos);
                set_bit(bb->def_set->bits, var->vreg);
        }
 }
diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index f434547..622966b 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -46,32 +46,65 @@ struct live_interval_mapping {
        struct live_interval *from, *to;
 };
 
-static struct insn *first_insn(struct compilation_unit *cu, struct 
live_interval *interval)
+static struct insn *
+get_reload_before_insn(struct compilation_unit *cu, struct live_interval 
*interval)
 {
        struct insn *ret;
 
-       ret = radix_tree_lookup(cu->lir_insn_map, 
interval_first_insn_pos(interval));
-       assert(ret != NULL);
+       unsigned long start = interval_start(interval);
+
+       ret = radix_tree_lookup(cu->lir_insn_map, start);
+
+       if (start & 1) {
+               /*
+                * If interval starts at odd position and has a use
+                * position there then it means that it's value is
+                * being defined. In this case, there is no need to
+                * reload anything. Otherwise, if interval starts at
+                * odd position and has no use at this position, we
+                * should reload after that instruction.
+                */
+               if (first_use_pos(interval) == interval_start(interval))
+                       error("interval begins with a def-use and is marked for 
reload");
+
+               ret = next_insn(ret);
+       }
 
+       assert(ret != NULL);
        return ret;
 }
 
-static struct insn *last_insn(struct compilation_unit *cu, struct 
live_interval *interval)
+static struct insn *
+get_spill_after_insn(struct compilation_unit *cu, struct live_interval 
*interval)
 {
        struct insn *ret;
 
-       ret = radix_tree_lookup(cu->lir_insn_map, 
interval_last_insn_pos(interval));
+       /*
+        * If interval ends at even position then it is not written to
+        * at last instruction and we can safely spill before the last
+        * insn. If interval ends at odd position then we must spill
+        * after last instruction.
+        */
+       unsigned long last_pos = interval_end(interval) - 1;
+
+       if (last_pos & 1) {
+               ret = radix_tree_lookup(cu->lir_insn_map, last_pos - 1);
+       } else {
+               ret = radix_tree_lookup(cu->lir_insn_map, last_pos);
+               ret = prev_insn(ret);
+       }
+
        assert(ret != NULL);
 
        return ret;
 }
 
 /**
- * Returns the node before which spill instructions should be inserted
+ * Returns the node after which spill instructions should be inserted
  * when they are supposed to be executed just before control leaves
  * given basic block. When basic block is ended with a branch
- * instruction it returns node of that branch; otherwise it returns
- * the next node.
+ * instruction it returns node before that branch; otherwise it returns
+ * the last node.
  */
 static struct list_head *bb_last_spill_node(struct basic_block *bb)
 {
@@ -90,15 +123,15 @@ static struct list_head *bb_last_spill_node(struct 
basic_block *bb)
        assert(last);
 
        if (insn_is_branch(last))
-               return &last->insn_list_node;
+               return last->insn_list_node.prev;
 
-       return last->insn_list_node.next;
+       return &last->insn_list_node;
 }
 
 static struct stack_slot *
 spill_interval(struct live_interval *interval,
               struct compilation_unit *cu,
-              struct list_head *spill_before,
+              struct list_head *spill_after,
               unsigned long bc_offset)
 {
        struct stack_slot *slot;
@@ -114,21 +147,19 @@ spill_interval(struct live_interval *interval,
 
        spill->bytecode_offset = bc_offset;
 
-       list_add_tail(&spill->insn_list_node, spill_before);
+       list_add(&spill->insn_list_node, spill_after);
        return slot;
 }
 
 static int
 insert_spill_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
-       struct insn *last;
+       struct insn *spill_after;
 
-       last = last_insn(cu, interval);
-       if (!insn_is_branch(last))
-               last = next_insn(last);
-
-       interval->spill_slot = spill_interval(interval, cu, 
&last->insn_list_node,
-                                             last->bytecode_offset);
+       spill_after = get_spill_after_insn(cu, interval);
+       interval->spill_slot = spill_interval(interval, cu,
+                                             &spill_after->insn_list_node,
+                                             spill_after->bytecode_offset);
        if (!interval->spill_slot)
                return warn("out of memory"), -ENOMEM;
 
@@ -198,11 +229,11 @@ static int __insert_spill_reload_insn(struct 
live_interval *interval, struct com
                 * can't insert a reload instruction in the middle of
                 * instruction.
                 */
-               assert((interval_start(interval) & 1) == 0);
+               if ((interval_start(interval) & 1) == 0);
 
                err = insert_reload_insn(interval, cu,
                                interval->spill_parent->spill_slot,
-                               first_insn(cu, interval));
+                               get_reload_before_insn(cu, interval));
                if (err)
                        goto out;
        }
@@ -224,21 +255,25 @@ static void insert_mov_insns(struct compilation_unit *cu,
 {
        struct live_interval *from_it, *to_it;
        struct stack_slot *slots[nr_mapped];
-       struct list_head *spill_before;
+       struct list_head *spill_after;
+       struct list_head *push_before;
        unsigned long bc_offset;
        int i;
 
-       spill_before = bb_last_spill_node(from_bb);
+       spill_after = bb_last_spill_node(from_bb);
+       push_before = spill_after->next;
        bc_offset = from_bb->end - 1;
 
        /* Spill all intervals that have to be resolved */
        for (i = 0; i < nr_mapped; i++) {
                from_it         = mappings[i].from;
+               if (!from_it)
+                       continue;
 
                if (from_it->need_spill && interval_end(from_it) < 
from_bb->end_insn) {
                        slots[i] = from_it->spill_slot;
                } else {
-                       slots[i] = spill_interval(from_it, cu, spill_before, 
bc_offset);
+                       slots[i] = spill_interval(from_it, cu, spill_after, 
bc_offset);
                }
        }
 
@@ -249,7 +284,7 @@ static void insert_mov_insns(struct compilation_unit *cu,
                if (to_it->need_reload && interval_start(to_it) >= 
to_bb->start_insn) {
                        insert_copy_slot_insn(mappings[i].to, cu, slots[i],
                                              to_it->spill_parent->spill_slot,
-                                             spill_before, bc_offset);
+                                             push_before, bc_offset);
                        continue;
                }
 
diff --git a/test/jit/liveness-test.c b/test/jit/liveness-test.c
index cb85e42..56a4fd8 100644
--- a/test/jit/liveness-test.c
+++ b/test/jit/liveness-test.c
@@ -67,15 +67,15 @@ void test_variable_range_limited_to_basic_block(void)
        assert_defines(bb, r1);
        assert_defines(bb, r2);
 
-       assert_live_range(r1->interval, 0, 5);
-       assert_live_range(r2->interval, 2, 5);
+       assert_live_range(r1->interval, 1, 5);
+       assert_live_range(r2->interval, 3, 6);
 
-       assert_insn_at_equals(insn[0], cu, r1->interval, 0);
-       assert_insn_at_equals(insn[1], cu, r1->interval, 2);
-       assert_insn_at_equals(insn[2], cu, r1->interval, 4);
+       assert_insn_at_equals(insn[0], cu, r1->interval, -1);
+       assert_insn_at_equals(insn[1], cu, r1->interval, 1);
+       assert_insn_at_equals(insn[2], cu, r1->interval, 3);
 
-       assert_insn_at_equals(insn[1], cu, r2->interval, 0);
-       assert_insn_at_equals(insn[2], cu, r2->interval, 2);
+       assert_insn_at_equals(insn[1], cu, r2->interval, -1);
+       assert_insn_at_equals(insn[2], cu, r2->interval, 1);
 
        free_compilation_unit(cu);
 }
@@ -114,16 +114,16 @@ void test_variable_range_spans_two_basic_blocks(void)
        assert_defines(bb2, r2);
        assert_uses(bb2, r1);
 
-       assert_live_range(r1->interval, 0, 7);
-       assert_live_range(r2->interval, 4, 7);
+       assert_live_range(r1->interval, 1, 7);
+       assert_live_range(r2->interval, 5, 8);
 
-       assert_insn_at_equals(insn[0], cu, r1->interval, 0);
-       assert_insn_at_equals(insn[1], cu, r1->interval, 2);
-       assert_insn_at_equals(insn[2], cu, r1->interval, 4);
-       assert_insn_at_equals(insn[3], cu, r1->interval, 6);
+       assert_insn_at_equals(insn[0], cu, r1->interval, -1);
+       assert_insn_at_equals(insn[1], cu, r1->interval, 1);
+       assert_insn_at_equals(insn[2], cu, r1->interval, 3);
+       assert_insn_at_equals(insn[3], cu, r1->interval, 5);
 
-       assert_insn_at_equals(insn[2], cu, r2->interval, 0);
-       assert_insn_at_equals(insn[3], cu, r2->interval, 2);
+       assert_insn_at_equals(insn[2], cu, r2->interval, -1);
+       assert_insn_at_equals(insn[3], cu, r2->interval, 1);
 
        free_compilation_unit(cu);
 }
diff --git a/test/jit/spill-reload-test.c b/test/jit/spill-reload-test.c
index 5dadefe..9842e1c 100644
--- a/test/jit/spill-reload-test.c
+++ b/test/jit/spill-reload-test.c
@@ -49,7 +49,7 @@ static void assert_ld_insn(enum insn_type type, enum 
machine_reg reg, struct sta
 }
 
 
-void test_spill_insn_is_inserted_at_the_end_of_the_interval_if_necessary(void)
+void test_spill_insn_is_inserted_before_last_read_if_necessary(void)
 {
         struct compilation_unit *cu;
         struct insn *insn_array[2];
@@ -75,22 +75,22 @@ void 
test_spill_insn_is_inserted_at_the_end_of_the_interval_if_necessary(void)
        insert_spill_reload_insns(cu);
 
        /*
-        * First instruction stays the same. 
+        * First instruction stays the same.
         */
        insn = list_first_entry(&bb->insn_list, struct insn, insn_list_node);
        assert_ptr_equals(insn_array[0], insn);
 
        /*
-        * Last instruction stays the same.
+        * A spill instruction is inserted before last read-use position
         */
        insn = list_next_entry(&insn->insn_list_node, struct insn, 
insn_list_node);
-       assert_ptr_equals(insn_array[1], insn);
+       assert_st_insn(INSN_ST_LOCAL, r1->interval->spill_slot, 
r1->interval->reg, insn);
 
        /*
-        * A spill instruction is inserted at the end of the interval.
-        */ 
+        * Last instruction stays the same.
+        */
        insn = list_next_entry(&insn->insn_list_node, struct insn, 
insn_list_node);
-       assert_st_insn(INSN_ST_LOCAL, r1->interval->spill_slot, 
r1->interval->reg, insn);
+       assert_ptr_equals(insn_array[1], insn);
 
        free_compilation_unit(cu);
 }
-- 
1.6.3.3


------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
_______________________________________________
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel

Reply via email to