This is needed for precise modeling of live ranges.
Signed-off-by: Tomek Grabiec <[email protected]>
---
include/jit/vars.h | 83 +++++++++++++---
jit/interval.c | 228 +++++++++++++++++++++++++++++++++++++++++-
jit/linear-scan.c | 51 ++++++----
jit/liveness.c | 24 +++--
jit/spill-reload.c | 12 +-
jit/trace-jit.c | 26 +++--
test/jit/linear-scan-test.c | 18 +---
test/jit/live-range-test.c | 50 ++++++++++
test/jit/liveness-test.c | 8 +-
9 files changed, 421 insertions(+), 79 deletions(-)
diff --git a/include/jit/vars.h b/include/jit/vars.h
index 6afb16b..f00c5f9 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -10,18 +10,9 @@
struct live_range {
unsigned long start, end; /* end is exclusive */
+ struct list_head range_list_node;
};
-static inline unsigned long range_last_insn_pos(struct live_range *range)
-{
- return (range->end - 1) & ~1;
-}
-
-static inline unsigned long range_first_insn_pos(struct live_range *range)
-{
- return range->start & ~1;
-}
-
static inline bool in_range(struct live_range *range, unsigned long offset)
{
return (offset >= range->start) && (offset < range->end);
@@ -69,8 +60,21 @@ struct live_interval {
/* Parent variable of this interval. */
struct var_info *var_info;
- /* Live range of this interval. */
- struct live_range range;
+ /* Live ranges of this interval. List of not overlaping and
+ not adjacent ranges sorted in ascending order. */
+ struct list_head range_list;
+
+ /*
+ * Points to a range from range_list which should be
+ * considered as interval's starting range in operations:
+ * intervals_intersect(), interval_intersection_start(),
+ * interval_range_at(). It's used to speedup register
+ * allocation. Intervals can have a lot of live ranges. Linear
+ * scan algorithm goes through intervals in ascending order by
+ * interval start. We can take advantage of this and don't
+ * browse ranges past current position in some operations.
+ */
+ struct live_range *current_range;
/* Linked list of child intervals. */
struct live_interval *next_child, *prev_child;
@@ -118,11 +122,66 @@ mark_need_reload(struct live_interval *it, struct
live_interval *parent)
it->spill_parent = parent;
}
+static inline struct live_range *node_to_range(struct list_head *node)
+{
+ return list_entry(node, struct live_range, range_list_node);
+}
+
+static inline struct live_range *
+next_range(struct list_head *list, struct live_range *range)
+{
+ if (range->range_list_node.next == list)
+ return NULL;
+
+ return list_entry(range->range_list_node.next, struct live_range,
+ range_list_node);
+}
+
+static inline unsigned long interval_start(struct live_interval *it)
+{
+ assert(!list_is_empty(&it->range_list));
+ return node_to_range(it->range_list.next)->start;
+}
+
+static inline unsigned long interval_end(struct live_interval *it)
+{
+ assert(!list_is_empty(&it->range_list));
+ return node_to_range(it->range_list.prev)->end;
+}
+
+static inline unsigned long interval_last_insn_pos(struct live_interval *it)
+{
+ return (interval_end(it) - 1) & ~1ul;
+}
+
+static inline unsigned long interval_first_insn_pos(struct live_interval *it)
+{
+ return interval_start(it) & ~1ul;
+}
+
+static inline bool interval_is_empty(struct live_interval *it)
+{
+ return list_is_empty(&it->range_list);
+}
+
+static inline struct live_range *interval_first_range(struct live_interval *it)
+{
+ assert(!interval_is_empty(it));
+ return list_first_entry(&it->range_list, struct live_range,
+ range_list_node);
+}
+
struct live_interval *alloc_interval(struct var_info *);
void free_interval(struct live_interval *);
struct live_interval *split_interval_at(struct live_interval *, unsigned long
pos);
unsigned long next_use_pos(struct live_interval *, unsigned long);
struct live_interval *vreg_start_interval(struct compilation_unit *, unsigned
long);
struct live_interval *interval_child_at(struct live_interval *, unsigned long);
+bool intervals_intersect(struct live_interval *, struct live_interval *);
+unsigned long interval_intersection_start(struct live_interval *, struct
live_interval *);
+bool interval_covers(struct live_interval *, unsigned long);
+int interval_add_range(struct live_interval *, unsigned long, unsigned long);
+struct live_range *interval_range_at(struct live_interval *, unsigned long);
+void interval_update_current_range(struct live_interval *, unsigned long);
#endif /* __JIT_VARS_H */
diff --git a/jit/interval.c b/jit/interval.c
index 9ad9d97..9e22c0c 100644
--- a/jit/interval.c
+++ b/jit/interval.c
@@ -35,6 +35,65 @@
#include <string.h>
#include <errno.h>
+static struct live_range *alloc_live_range(unsigned long start, unsigned long
end)
+{
+ struct live_range *range;
+
+ range = malloc(sizeof *range);
+ if (!range)
+ return NULL;
+
+ range->start = start;
+ range->end = end;
+ INIT_LIST_HEAD(&range->range_list_node);
+ return range;
+}
+
+static int split_ranges(struct live_interval *new, struct live_interval *it,
+ unsigned long pos)
+{
+ struct live_range *this;
+
+ list_for_each_entry(this, &it->range_list, range_list_node) {
+ struct live_range *next;
+
+ if (!in_range(this, pos))
+ continue;
+
+ next = next_range(&it->range_list, this);
+
+ if (this->start == pos) {
+ list_move(&this->range_list_node, &new->range_list);
+ } else {
+ struct live_range *range;
+
+ range = alloc_live_range(pos, this->end);
+ if (!range)
+ return -ENOMEM;
+
+ this->end = pos;
+ list_add(&range->range_list_node, &new->range_list);
+ }
+
+ this = next;
+ if (!this)
+ return 0;
+
+ while (&this->range_list_node != &it->range_list) {
+ struct list_head *next;
+
+ next = this->range_list_node.next;
+
+ list_move(&this->range_list_node, new->range_list.prev);
+ this = node_to_range(next);
+ }
+
+ return 0;
+ }
+
+ error("pos is not within an interval live ranges");
+}
+
struct live_interval *alloc_interval(struct var_info *var)
{
struct live_interval *interval = zalloc(sizeof *interval);
@@ -42,11 +101,10 @@ struct live_interval *alloc_interval(struct var_info *var)
interval->var_info = var;
interval->reg = MACH_REG_UNASSIGNED;
interval->fixed_reg = false;
- interval->range.start = ~0UL;
- interval->range.end = 0UL;
interval->spill_reload_reg.interval = interval;
INIT_LIST_HEAD(&interval->interval_node);
INIT_LIST_HEAD(&interval->use_positions);
+ INIT_LIST_HEAD(&interval->range_list);
}
return interval;
}
@@ -56,6 +114,11 @@ void free_interval(struct live_interval *interval)
if (interval->next_child)
free_interval(interval->next_child);
+ struct live_range *this, *next;
+ list_for_each_entry_safe(this, next, &interval->range_list,
range_list_node) {
+ free(this);
+ }
+
free(interval);
}
@@ -65,14 +128,21 @@ struct live_interval *split_interval_at(struct
live_interval *interval,
struct use_position *this, *next;
struct live_interval *new;
+ assert(pos > interval_start(interval));
+ assert(pos < interval_end(interval));
+
new = alloc_interval(interval->var_info);
if (!new)
return NULL;
new->reg = MACH_REG_UNASSIGNED;
- new->range.start = pos;
- new->range.end = interval->range.end;
- interval->range.end = pos;
+
+ if (split_ranges(new, interval, pos)) {
+ free(new);
+ return NULL;
+ }
+
+ new->current_range = interval_first_range(new);
new->fixed_reg = interval->fixed_reg;
if (new->fixed_reg)
@@ -128,12 +198,58 @@ struct live_interval *vreg_start_interval(struct
compilation_unit *cu, unsigned
return var->interval;
}
+/**
+ * Advances @it->current_range to the last range which covers @pos or
+ * is before @pos.
+ */
+void interval_update_current_range(struct live_interval *it, unsigned long pos)
+{
+ if (pos < interval_start(it) || pos >= interval_end(it))
+ return;
+
+ assert (pos >= it->current_range->start);
+
+ while (!in_range(it->current_range, pos)) {
+ struct live_range *next;
+
+ next = next_range(&it->range_list, it->current_range);
+ if (pos < next->start)
+ break;
+
+ it->current_range = next;
+ }
+}
+
+struct live_range *interval_range_at(struct live_interval *it, unsigned long
pos)
+{
+ struct live_range *range;
+
+ if (pos < interval_start(it) || pos >= interval_end(it))
+ return NULL;
+
+ range = it->current_range;
+ if (pos < range->start)
+ range = interval_first_range(it);
+
+ while (range && pos >= range->start) {
+ if (in_range(range, pos))
+ return range;
+
+ range = next_range(&it->range_list, range);
+ }
+
+ return NULL;
+}
+
struct live_interval *interval_child_at(struct live_interval *parent, unsigned
long pos)
{
struct live_interval *it = parent;
while (it) {
- if (in_range(&it->range, pos))
+ struct live_range *range;
+
+ range = interval_range_at(it, pos);
+ if (range)
return it;
it = it->next_child;
@@ -141,3 +257,103 @@ struct live_interval *interval_child_at(struct
live_interval *parent, unsigned l
return NULL;
}
+
+bool intervals_intersect(struct live_interval *it1, struct live_interval *it2)
+{
+ struct live_range *r1, *r2;
+
+ if (interval_start(it1) >= interval_end(it2) ||
+ interval_start(it2) >= interval_end(it1))
+ return false;
+
+ if (interval_is_empty(it1) || interval_is_empty(it2))
+ return false;
+
+ r1 = it1->current_range;
+ r2 = it2->current_range;
+
+ while (r1 && r1->end <= r2->start)
+ r1 = next_range(&it1->range_list, r1);
+
+ while (r2 && r2->end <= r1->start)
+ r2 = next_range(&it2->range_list, r2);
+
+ while (r1 && r2) {
+ if (ranges_intersect(r1, r2))
+ return true;
+
+ if (r1->start < r2->start)
+ r1 = next_range(&it1->range_list, r1);
+ else
+ r2 = next_range(&it2->range_list, r2);
+ }
+
+ return false;
+}
+
+unsigned long
+interval_intersection_start(struct live_interval *it1, struct live_interval
*it2)
+{
+ struct live_range *r1, *r2;
+
+ assert(!interval_is_empty(it1) && !interval_is_empty(it2));
+
+ r1 = it1->current_range;
+ r2 = it2->current_range;
+
+ while (r1 && r1->end <= r2->start)
+ r1 = next_range(&it1->range_list, r1);
+
+ while (r2 && r2->end <= r1->start)
+ r2 = next_range(&it2->range_list, r2);
+
+ while (r1 && r2) {
+ if (ranges_intersect(r1, r2))
+ return range_intersection_start(r1, r2);
+
+ if (r1->start < r2->start)
+ r1 = next_range(&it1->range_list, r1);
+ else
+ r2 = next_range(&it2->range_list, r2);
+ }
+
+ error("intervals do not overlap");
+}
+
+bool interval_covers(struct live_interval *it, unsigned long pos)
+{
+ return interval_range_at(it, pos) != NULL;
+}
+
+int interval_add_range(struct live_interval *it, unsigned long start,
+ unsigned long end)
+{
+ struct live_range *range, *next, *new;
+
+ new = alloc_live_range(start, end);
+ if (!new)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(range, next, &it->range_list, range_list_node)
{
+ if (range->start > end)
+ break;
+
+ if (range->start <= new->end && new->start <= range->end) {
+ new->start = min(new->start, range->start);
+ new->end = max(new->end, range->end);
+ list_del(&range->range_list_node);
+ free(range);
+ }
+ }
+
+ list_for_each_entry_safe(range, next, &it->range_list, range_list_node)
{
+ if (new->start > range->start)
+ continue;
+
+ list_add_tail(&new->range_list_node, &range->range_list_node);
+ return 0;
+ }
+
+ list_add_tail(&new->range_list_node, &it->range_list);
+ return 0;
+}
diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 8baa914..765cea9 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -120,7 +120,7 @@ static void spill_interval(struct live_interval *it,
unsigned long pos,
unsigned long next_pos = next_use_pos(new, 0);
/* Trim interval if it does not start with a use position. */
- if (next_pos > new->range.start)
+ if (next_pos > interval_start(new))
new = split_interval_at(new, next_pos);
it->need_spill = true;
@@ -137,13 +137,13 @@ static void __spill_interval_intersecting(struct
live_interval *current,
if (it->reg != reg)
return;
- if (!ranges_intersect(&it->range, ¤t->range))
+ if (!intervals_intersect(it, current))
return;
- if (current->range.start == it->range.start)
+ if (interval_start(current) == interval_start(it))
return;
- spill_interval(it, current->range.start, unhandled);
+ spill_interval(it, interval_start(current), unhandled);
}
static void spill_all_intervals_intersecting(struct live_interval *current,
@@ -185,7 +185,7 @@ static void allocate_blocked_reg(struct live_interval
*current,
if (it->fixed_reg)
continue;
- pos = next_use_pos(it, current->range.start);
+ pos = next_use_pos(it, interval_start(current));
set_use_pos(use_pos, it->reg, pos);
}
@@ -195,8 +195,8 @@ static void allocate_blocked_reg(struct live_interval
*current,
if (it->fixed_reg)
continue;
- if (ranges_intersect(&it->range, ¤t->range)) {
- pos = next_use_pos(it, current->range.start);
+ if (intervals_intersect(it, current)) {
+ pos = next_use_pos(it, interval_start(current));
set_use_pos(use_pos, it->reg, pos);
}
}
@@ -212,10 +212,10 @@ static void allocate_blocked_reg(struct live_interval
*current,
if (!it->fixed_reg)
continue;
- if (ranges_intersect(&it->range, ¤t->range)) {
+ if (intervals_intersect(it, current)) {
unsigned long pos;
- pos = range_intersection_start(&it->range,
¤t->range);
+ pos = interval_intersection_start(it, current);
set_block_pos(block_pos, use_pos, it->reg, pos);
}
}
@@ -228,7 +228,7 @@ static void allocate_blocked_reg(struct live_interval
*current,
* All active and inactive intervals are used before current,
* so it is best to spill current itself
*/
- pos = next_use_pos(current, current->range.start);
+ pos = next_use_pos(current, interval_start(current));
spill_interval(current, pos, unhandled);
} else {
/*
@@ -236,7 +236,7 @@ static void allocate_blocked_reg(struct live_interval
*current,
*/
current->reg = reg;
- if (block_pos[reg] < current->range.end)
+ if (block_pos[reg] < interval_start(current))
spill_interval(current, block_pos[reg], unhandled);
spill_all_intervals_intersecting(current, reg, active,
@@ -262,10 +262,10 @@ static void try_to_allocate_free_reg(struct live_interval
*current,
}
list_for_each_entry(it, inactive, interval_node) {
- if (ranges_intersect(&it->range, ¤t->range)) {
+ if (intervals_intersect(it, current)) {
unsigned long pos;
- pos = range_intersection_start(&it->range,
¤t->range);
+ pos = interval_intersection_start(it, current);
set_free_pos(free_until_pos, it->reg, pos);
}
}
@@ -278,7 +278,7 @@ static void try_to_allocate_free_reg(struct live_interval
*current,
return;
}
- if (current->range.end <= free_until_pos[reg]) {
+ if (interval_end(current) <= free_until_pos[reg]) {
/*
* Register available for the full interval.
*/
@@ -297,7 +297,7 @@ static int interval_compare(void *a, void *b)
struct live_interval *x = a;
struct live_interval *y = b;
- return (int)(y->range.start - x->range.start);
+ return (int)(interval_start(y) - interval_start(x));
}
int allocate_registers(struct compilation_unit *cu)
@@ -327,6 +327,11 @@ int allocate_registers(struct compilation_unit *cu)
* non-fixed interval.
*/
for_each_variable(var, cu->var_infos) {
+ if (interval_is_empty(var->interval))
+ continue;
+
+ var->interval->current_range =
interval_first_range(var->interval);
+
if (var->interval->fixed_reg)
list_add(&var->interval->interval_node, &inactive);
else
@@ -338,29 +343,35 @@ int allocate_registers(struct compilation_unit *cu)
unsigned long position;
current = pqueue_remove_top(unhandled);
- position = current->range.start;
+ position = interval_start(current);
list_for_each_entry_safe(it, prev, &active, interval_node) {
- if (it->range.end <= position) {
+ if (interval_end(it) <= position) {
/*
* Remove handled interval from active list.
*/
list_del(&it->interval_node);
continue;
}
- if (!in_range(&it->range, position))
+
+ interval_update_current_range(it, position);
+
+ if (!interval_covers(it, position))
list_move(&it->interval_node, &inactive);
}
list_for_each_entry_safe(it, prev, &inactive, interval_node) {
- if (it->range.end <= position) {
+ if (interval_end(it) <= position) {
/*
* Remove handled interval from inactive list.
*/
list_del(&it->interval_node);
continue;
}
- if (in_range(&it->range, position))
+
+ interval_update_current_range(it, position);
+
+ if (interval_covers(it, position))
list_move(&it->interval_node, &active);
}
diff --git a/jit/liveness.c b/jit/liveness.c
index 3e0f586..ee01989 100644
--- a/jit/liveness.c
+++ b/jit/liveness.c
@@ -36,13 +36,19 @@
#include <errno.h>
#include <stdlib.h>
-static void __update_live_range(struct live_range *range, unsigned long pos)
+static void __update_live_range(struct live_interval *it, unsigned long pos)
{
- if (range->start > pos)
- range->start = pos;
+ if (interval_is_empty(it))
+ interval_add_range(it, pos, pos + 1);
+ else {
+ struct live_range *r = interval_first_range(it);
- if (range->end < (pos + 1))
- range->end = pos + 1;
+ if (r->start > pos)
+ r->start = pos;
+
+ if (r->end < (pos + 1))
+ r->end = pos + 1;
+ }
}
static void update_live_ranges(struct compilation_unit *cu)
@@ -54,10 +60,10 @@ static void update_live_ranges(struct compilation_unit *cu)
for_each_variable(var, cu->var_infos) {
if (test_bit(this->live_in_set->bits, var->vreg))
- __update_live_range(&var->interval->range,
this->start_insn);
+ __update_live_range(var->interval,
this->start_insn);
if (test_bit(this->live_out_set->bits, var->vreg))
- __update_live_range(&var->interval->range,
this->end_insn);
+ __update_live_range(var->interval,
this->end_insn);
}
}
}
@@ -134,7 +140,7 @@ static void __analyze_use_def(struct basic_block *bb,
struct insn *insn)
for (i = 0; i < nr_uses; i++) {
struct var_info *var = uses[i];
- __update_live_range(&var->interval->range, insn->lir_pos);
+ __update_live_range(var->interval, insn->lir_pos);
/*
* It's in the use set if and only if it has not
@@ -148,7 +154,7 @@ static void __analyze_use_def(struct basic_block *bb,
struct insn *insn)
for (i = 0; i < nr_defs; i++) {
struct var_info *var = defs[i];
- __update_live_range(&var->interval->range, insn->lir_pos);
+ __update_live_range(var->interval, insn->lir_pos);
set_bit(bb->def_set->bits, var->vreg);
}
}
diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 5964682..f434547 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -50,7 +50,7 @@ static struct insn *first_insn(struct compilation_unit *cu,
struct live_interval
{
struct insn *ret;
- ret = radix_tree_lookup(cu->lir_insn_map,
range_first_insn_pos(&interval->range));
+ ret = radix_tree_lookup(cu->lir_insn_map,
interval_first_insn_pos(interval));
assert(ret != NULL);
return ret;
@@ -60,7 +60,7 @@ static struct insn *last_insn(struct compilation_unit *cu,
struct live_interval
{
struct insn *ret;
- ret = radix_tree_lookup(cu->lir_insn_map,
range_last_insn_pos(&interval->range));
+ ret = radix_tree_lookup(cu->lir_insn_map,
interval_last_insn_pos(interval));
assert(ret != NULL);
return ret;
@@ -186,7 +186,7 @@ static int __insert_spill_reload_insn(struct live_interval
*interval, struct com
{
int err = 0;
- if (range_is_empty(&interval->range))
+ if (interval_is_empty(interval))
goto out;
if (interval->need_reload) {
@@ -198,7 +198,7 @@ static int __insert_spill_reload_insn(struct live_interval
*interval, struct com
* can't insert a reload instruction in the middle of
* instruction.
*/
- assert((interval->range.start & 1) == 0);
+ assert((interval_start(interval) & 1) == 0);
err = insert_reload_insn(interval, cu,
interval->spill_parent->spill_slot,
@@ -235,7 +235,7 @@ static void insert_mov_insns(struct compilation_unit *cu,
for (i = 0; i < nr_mapped; i++) {
from_it = mappings[i].from;
- if (from_it->need_spill && from_it->range.end <
from_bb->end_insn) {
+ if (from_it->need_spill && interval_end(from_it) <
from_bb->end_insn) {
slots[i] = from_it->spill_slot;
} else {
slots[i] = spill_interval(from_it, cu, spill_before,
bc_offset);
@@ -246,7 +246,7 @@ static void insert_mov_insns(struct compilation_unit *cu,
for (i = 0; i < nr_mapped; i++) {
to_it = mappings[i].to;
- if (to_it->need_reload && to_it->range.start >=
to_bb->start_insn) {
+ if (to_it->need_reload && interval_start(to_it) >=
to_bb->start_insn) {
insert_copy_slot_insn(mappings[i].to, cu, slots[i],
to_it->spill_parent->spill_slot,
spill_before, bc_offset);
diff --git a/jit/trace-jit.c b/jit/trace-jit.c
index 3b2823e..c648a40 100644
--- a/jit/trace-jit.c
+++ b/jit/trace-jit.c
@@ -238,14 +238,14 @@ print_var_liveness(struct compilation_unit *cu, struct
var_info *var)
interval = var->interval;
offset = 0;
- if (range_is_empty(&interval->range))
+ if (interval_is_empty(interval))
goto skip;
- for (; offset < interval->range.start; offset++)
+ for (; offset < interval_start(interval); offset++)
trace_printf(" ");
- for (; offset < interval->range.end; offset++) {
- if (in_range(&interval->range, offset)) {
+ for (; offset < interval_end(interval); offset++) {
+ if (interval_covers(interval, offset)) {
if (next_use_pos(var->interval, offset) == offset) {
/* In use */
trace_printf("UUU");
@@ -263,10 +263,10 @@ print_var_liveness(struct compilation_unit *cu, struct
var_info *var)
for (; offset < cu->last_insn; offset++)
trace_printf(" ");
- if (!range_is_empty(&interval->range))
+ if (!interval_is_empty(interval))
trace_printf(" (start: %2lu, end: %2lu)\n",
- interval->range.start,
- interval->range.end);
+ interval_start(interval),
+ interval_end(interval));
else
trace_printf(" (empty)\n");
}
@@ -349,7 +349,15 @@ void trace_regalloc(struct compilation_unit *cu)
struct live_interval *interval;
for (interval = var->interval; interval != NULL; interval =
interval->next_child) {
- trace_printf(" %2lu (pos: %2ld-%2lu):", var->vreg,
(signed long)interval->range.start, interval->range.end);
+ trace_printf(" %2lu ", var->vreg);
+
+ if (interval_is_empty(interval))
+ trace_printf("(empty) :");
+ else
+ trace_printf("(pos: %3lu-%3lu):",
+ interval_start(interval),
+ interval_end(interval));
+
trace_printf("\t%s", reg_name(interval->reg));
trace_printf("\t%s", interval->fixed_reg ? "fixed\t" :
"non-fixed");
if (interval->need_spill) {
@@ -409,7 +417,7 @@ static void print_gc_map(struct compilation_unit *cu,
struct insn *insn)
if (!test_bit(live_vars->bits, i++))
continue;
- if (in_range(&var->interval->range, insn->lir_pos)) {
+ if (interval_covers(var->interval, insn->lir_pos)) {
trace_printf("%d (%s), ",
var->vreg, reg_name(var->interval->reg));
} else if (var->interval->need_spill) {
diff --git a/test/jit/linear-scan-test.c b/test/jit/linear-scan-test.c
index b742084..433c503 100644
--- a/test/jit/linear-scan-test.c
+++ b/test/jit/linear-scan-test.c
@@ -19,12 +19,10 @@ void
test_allocates_different_registers_for_overlapping_intervals(void)
cu = compilation_unit_alloc(&method);
v1 = get_var(cu, J_INT);
- v1->interval->range.start = 0;
- v1->interval->range.end = 2;
+ interval_add_range(v1->interval, 0, 2);
v2 = get_var(cu, J_INT);
- v2->interval->range.start = 1;
- v2->interval->range.end = 2;
+ interval_add_range(v2->interval, 1, 2);
allocate_registers(cu);
@@ -41,12 +39,10 @@ void
test_reuses_registers_for_non_overlapping_intervals(void)
cu = compilation_unit_alloc(&method);
v1 = get_var(cu, J_INT);
- v1->interval->range.start = 0;
- v1->interval->range.end = 2;
+ interval_add_range(v1->interval, 0, 2);
v2 = get_var(cu, J_INT);
- v2->interval->range.start = 2;
- v2->interval->range.end = 4;
+ interval_add_range(v2->interval, 2, 4);
allocate_registers(cu);
@@ -63,12 +59,10 @@ void
test_honors_fixed_interval_register_constraint_for_overlapping_intervals(vo
cu = compilation_unit_alloc(&method);
v1 = get_fixed_var(cu, R0);
- v1->interval->range.start = 0;
- v1->interval->range.end = 2;
+ interval_add_range(v1->interval, 0, 2);
v2 = get_var(cu, J_INT);
- v2->interval->range.start = 0;
- v2->interval->range.end = 2;
+ interval_add_range(v2->interval, 0, 2);
allocate_registers(cu);
diff --git a/test/jit/live-range-test.c b/test/jit/live-range-test.c
index 0f0f626..a078218 100644
--- a/test/jit/live-range-test.c
+++ b/test/jit/live-range-test.c
@@ -84,3 +84,53 @@ void test_ranges_that_do_not_intersect(void)
assert_false(ranges_intersect(&range1, &range2));
assert_false(ranges_intersect(&range2, &range1));
}
+
+void test_interval_add_range(void)
+{
+ struct live_interval it;
+ struct live_range *r;
+
+ INIT_LIST_HEAD(&it.range_list);
+
+ interval_add_range(&it, 1, 3);
+ r = interval_first_range(&it);
+ assert_int_equals(1, r->start);
+ assert_int_equals(3, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+
+ interval_add_range(&it, 5, 7);
+ r = interval_first_range(&it);
+ assert_int_equals(1, r->start);
+ assert_int_equals(3, r->end);
+ r = next_range(&it.range_list, r);
+ assert_int_equals(5, r->start);
+ assert_int_equals(7, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+
+ interval_add_range(&it, 3, 5);
+ r = interval_first_range(&it);
+ assert_int_equals(1, r->start);
+ assert_int_equals(7, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+
+ interval_add_range(&it, 7, 8);
+ r = interval_first_range(&it);
+ assert_int_equals(1, r->start);
+ assert_int_equals(8, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+
+ interval_add_range(&it, 10, 13);
+ r = interval_first_range(&it);
+ assert_int_equals(1, r->start);
+ assert_int_equals(8, r->end);
+ r = next_range(&it.range_list, r);
+ assert_int_equals(10, r->start);
+ assert_int_equals(13, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+
+ interval_add_range(&it, 0, 14);
+ r = interval_first_range(&it);
+ assert_int_equals(0, r->start);
+ assert_int_equals(14, r->end);
+ assert_ptr_equals(NULL, next_range(&it.range_list, r));
+}
diff --git a/test/jit/liveness-test.c b/test/jit/liveness-test.c
index 19f01d0..cb85e42 100644
--- a/test/jit/liveness-test.c
+++ b/test/jit/liveness-test.c
@@ -15,8 +15,8 @@ struct vm_method method;
static void assert_live_range(struct live_interval *interval, unsigned long
expected_start, unsigned long expected_end)
{
- assert_int_equals(expected_start, interval->range.start);
- assert_int_equals(expected_end, interval->range.end);
+ assert_int_equals(expected_start, interval_start(interval));
+ assert_int_equals(expected_end, interval_end(interval));
}
static void assert_uses(struct basic_block *bb, struct var_info *var)
@@ -35,9 +35,7 @@ static void assert_insn_at_equals(struct insn *insn, struct
compilation_unit *cu
{
struct insn *insn2;
- insn2 = radix_tree_lookup(cu->lir_insn_map,
- interval->range.start + offset);
-
+ insn2 = radix_tree_lookup(cu->lir_insn_map, interval_start(interval) +
offset);
assert_ptr_equals(insn, insn2);
}
--
1.6.3.3
------------------------------------------------------------------------------
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day
trial. Simplify your report design, integration and deployment - and focus on
what you do best, core application coding. Discover what's new with
Crystal Reports now. http://p.sf.net/sfu/bobj-july
_______________________________________________
Jatovm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/jatovm-devel