[RFC][PATCH] gc: rewrite GC stop-the-world algorithm.

2009-12-16 Thread Tomek Grabiec
Garbage collection is done in a separate thread.

Before the main GC work is started, thread creation and destruction
must be disabled and all mutator threads must be suspended. The
SIGUSR2 signal is sent to all threads. The signal handler determines
whether a thread is running JIT code or native code. In the former
case the handler returns to let the thread reach a safepoint. In the
latter case it is put into safepoint immediately.  When thread is
suspended in safepoint it waits for another SIGUSR2 signal which is
sent by the GC thread to wake threads up.

We must not use mutexes from signal handlers because
pthread_mutex_lock() is not reentrant. Using it from a signal handler
may result in a deadlock. This approach uses semaphores to notify GC
when a thread enters or leaves a safepoint.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/include/arch/memory.h|   13 +++
 include/vm/gc.h   |4 +-
 include/vm/thread.h   |9 ++-
 regression/jvm/GcTortureTest.java |2 +-
 vm/gc.c   |  216 ++---
 vm/signal.c   |9 +--
 vm/thread.c   |   85 +++
 7 files changed, 194 insertions(+), 144 deletions(-)

diff --git a/arch/x86/include/arch/memory.h b/arch/x86/include/arch/memory.h
index e27c31b..30f8c0f 100644
--- a/arch/x86/include/arch/memory.h
+++ b/arch/x86/include/arch/memory.h
@@ -3,6 +3,19 @@
 
 #include 
 
+/*
+ * Memory barriers.
+ */
+#ifdef CONFIG_X86_32
+#define mb() asm volatile("lock; addl $0,0(%esp)")
+#define rmb() asm volatile("lock; addl $0,0(%esp)")
+#define wmb() asm volatile("lock; addl $0,0(%esp)")
+#else
+#define mb()   asm volatile("mfence":::"memory")
+#define rmb()  asm volatile("lfence":::"memory")
+#define wmb()  asm volatile("sfence" ::: "memory")
+#endif
+
 static inline void cpu_write_u32(unsigned char *p, uint32_t val)
 {
*((uint32_t*)p) = val;
diff --git a/include/vm/gc.h b/include/vm/gc.h
index 06bebc6..7d23f5b 100644
--- a/include/vm/gc.h
+++ b/include/vm/gc.h
@@ -13,9 +13,7 @@ void gc_init(void);
 
 void *gc_alloc(size_t size);
 
-void gc_attach_thread(void);
-void gc_detach_thread(void);
-
 void gc_safepoint(struct register_state *);
+void suspend_handler(int, siginfo_t *, void *);
 
 #endif
diff --git a/include/vm/thread.h b/include/vm/thread.h
index f6e6551..97f672e 100644
--- a/include/vm/thread.h
+++ b/include/vm/thread.h
@@ -36,7 +36,7 @@ struct vm_exec_env {
struct vm_thread *thread;
 };
 
-unsigned int vm_nr_threads_running(void);
+unsigned int vm_nr_threads(void);
 
 extern __thread struct vm_exec_env current_exec_env;
 
@@ -59,5 +59,12 @@ char *vm_thread_get_name(struct vm_thread *thread);
 bool vm_thread_is_interrupted(struct vm_thread *thread);
 bool vm_thread_interrupted(struct vm_thread *thread);
 void vm_thread_interrupt(struct vm_thread *thread);
+void vm_lock_thread_count(void);
+void vm_unlock_thread_count(void);
+
+extern struct list_head thread_list;
+extern pthread_mutex_t threads_mutex;
+
+#define vm_thread_for_each(this) list_for_each_entry(this, &thread_list, 
list_node)
 
 #endif
diff --git a/regression/jvm/GcTortureTest.java 
b/regression/jvm/GcTortureTest.java
index 33eef15..9f00232 100644
--- a/regression/jvm/GcTortureTest.java
+++ b/regression/jvm/GcTortureTest.java
@@ -7,7 +7,7 @@ public class GcTortureTest {
 for (int i = 0; i < threads.length; i++) {
 threads[i] = new Thread(new Runnable() {
 public void run() {
-for (int i = 0; i < 100; i++)
+for (int i = 0; i < 10; i++)
   new Object();
 }
 });
diff --git a/vm/gc.c b/vm/gc.c
index 0815d59..3de8f21 100644
--- a/vm/gc.c
+++ b/vm/gc.c
@@ -1,45 +1,37 @@
 #include 
+#include 
+#include 
 #include 
 #include 
-#include 
 
+#include "arch/memory.h"
 #include "arch/registers.h"
+#include "arch/signal.h"
 #include "lib/guard-page.h"
 #include "vm/thread.h"
 #include "vm/stdlib.h"
 #include "vm/die.h"
 #include "vm/gc.h"
+#include "vm/trace.h"
 
 void *gc_safepoint_page;
 
-static pthread_mutex_t safepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Protected by safepoint_mutex */
-static pthread_cond_t everyone_in_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t everyone_out_cond = PTHREAD_COND_INITIALIZER;
-static unsigned int nr_exiting_safepoint;
-static unsigned int nr_in_safepoint;
+static pthread_mutex_t gc_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t gc_cond = PTHREAD_COND_INITIALIZER;
 
-static pthread_cond_t can_continue_cond = PTHREAD_COND_INITIALIZER;
-static bool can_continue = true;
+/* Protected by gc_mutex */
+static unsigned int nr_threads;
+static bool gc_started;
 
-bool verbose_gc;
-bool

[PATCH 3/3] gc: rewrite GC stop-the-world algorithm.

2009-12-15 Thread Tomek Grabiec
Garbage collection is done in a separate thread to assure a
predictable amount of stack space.

Before the main GC work is started all mutator threads must be
suspended. The SIGUSR2 signal is sent to all threads.  The signal
handler determines whether a thread is running JIT code or native
code. In the former case the handler returns to let the thread reach a
safepoint. In the latter case it is put into safepoint immediately.
When thread is suspended in safepoint it waits for another SIGUSR2
signal which is sent by the GC thread to wake threads up.

We must not use mutexes from signal handlers because
pthread_mutex_lock() is not reentrant. Using it from a signal handler
may result in a deadlock. This approach uses semaphores to notify GC
when a thread enters or leaves a safepoint. According to the manual
sem_post() is the only async-signal safe pthreads method.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/include/arch/memory.h|8 ++
 include/vm/gc.h   |4 +-
 include/vm/thread.h   |7 +-
 regression/jvm/GcTortureTest.java |2 +-
 vm/gc.c   |  212 ++---
 vm/signal.c   |9 +--
 vm/thread.c   |   50 ++---
 7 files changed, 154 insertions(+), 138 deletions(-)

diff --git a/arch/x86/include/arch/memory.h b/arch/x86/include/arch/memory.h
index e27c31b..89232d2 100644
--- a/arch/x86/include/arch/memory.h
+++ b/arch/x86/include/arch/memory.h
@@ -3,6 +3,14 @@
 
 #include 
 
+static inline void mbarrier(void)
+{
+   __asm__ volatile (
+ "lock \n"
+ "addl $0,0(%%esp)\n"
+ ::: "memory");
+}
+
 static inline void cpu_write_u32(unsigned char *p, uint32_t val)
 {
*((uint32_t*)p) = val;
diff --git a/include/vm/gc.h b/include/vm/gc.h
index 06bebc6..7d23f5b 100644
--- a/include/vm/gc.h
+++ b/include/vm/gc.h
@@ -13,9 +13,7 @@ void gc_init(void);
 
 void *gc_alloc(size_t size);
 
-void gc_attach_thread(void);
-void gc_detach_thread(void);
-
 void gc_safepoint(struct register_state *);
+void suspend_handler(int, siginfo_t *, void *);
 
 #endif
diff --git a/include/vm/thread.h b/include/vm/thread.h
index e4ac546..97f672e 100644
--- a/include/vm/thread.h
+++ b/include/vm/thread.h
@@ -36,7 +36,7 @@ struct vm_exec_env {
struct vm_thread *thread;
 };
 
-unsigned int vm_nr_threads_running(void);
+unsigned int vm_nr_threads(void);
 
 extern __thread struct vm_exec_env current_exec_env;
 
@@ -62,4 +62,9 @@ void vm_thread_interrupt(struct vm_thread *thread);
 void vm_lock_thread_count(void);
 void vm_unlock_thread_count(void);
 
+extern struct list_head thread_list;
+extern pthread_mutex_t threads_mutex;
+
+#define vm_thread_for_each(this) list_for_each_entry(this, &thread_list, 
list_node)
+
 #endif
diff --git a/regression/jvm/GcTortureTest.java 
b/regression/jvm/GcTortureTest.java
index 33eef15..9f00232 100644
--- a/regression/jvm/GcTortureTest.java
+++ b/regression/jvm/GcTortureTest.java
@@ -7,7 +7,7 @@ public class GcTortureTest {
 for (int i = 0; i < threads.length; i++) {
 threads[i] = new Thread(new Runnable() {
 public void run() {
-for (int i = 0; i < 100; i++)
+for (int i = 0; i < 10; i++)
   new Object();
 }
 });
diff --git a/vm/gc.c b/vm/gc.c
index 0815d59..305470f 100644
--- a/vm/gc.c
+++ b/vm/gc.c
@@ -1,45 +1,37 @@
 #include 
+#include 
+#include 
 #include 
 #include 
-#include 
 
+#include "arch/memory.h"
 #include "arch/registers.h"
+#include "arch/signal.h"
 #include "lib/guard-page.h"
 #include "vm/thread.h"
 #include "vm/stdlib.h"
 #include "vm/die.h"
 #include "vm/gc.h"
+#include "vm/trace.h"
 
 void *gc_safepoint_page;
 
-static pthread_mutex_t safepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Protected by safepoint_mutex */
-static pthread_cond_t everyone_in_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t everyone_out_cond = PTHREAD_COND_INITIALIZER;
-static unsigned int nr_exiting_safepoint;
-static unsigned int nr_in_safepoint;
+static pthread_mutex_t gc_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t gc_cond = PTHREAD_COND_INITIALIZER;
 
-static pthread_cond_t can_continue_cond = PTHREAD_COND_INITIALIZER;
-static bool can_continue = true;
+/* Protected by gc_mutex */
+static unsigned int nr_threads;
+static bool gc_started;
 
-bool verbose_gc;
-bool gc_enabled;
+static sem_t safepoint_sem;
 
-void gc_init(void)
-{
-   gc_safepoint_page = alloc_guard_page(false);
-   if (!gc_safepoint_page)
-   die("Couldn't allocate GC safepoint guard page");
-}
+static volatile sig_atomic_t can_continue;
+static __thread sig_atomic_t in_safepoint;
 
-void gc_attach_thread(void)
-{
-}
+static pthread_

[PATCH 2/3] vm: support for disabling thread creation and deletion

2009-12-15 Thread Tomek Grabiec
We need this to implement GC during which threads
should not be created nor deleted.

Signed-off-by: Tomek Grabiec 
---
 include/vm/thread.h |2 ++
 vm/thread.c |   37 ++---
 2 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/include/vm/thread.h b/include/vm/thread.h
index f6e6551..e4ac546 100644
--- a/include/vm/thread.h
+++ b/include/vm/thread.h
@@ -59,5 +59,7 @@ char *vm_thread_get_name(struct vm_thread *thread);
 bool vm_thread_is_interrupted(struct vm_thread *thread);
 bool vm_thread_interrupted(struct vm_thread *thread);
 void vm_thread_interrupt(struct vm_thread *thread);
+void vm_lock_thread_count(void);
+void vm_unlock_thread_count(void);
 
 #endif
diff --git a/vm/thread.c b/vm/thread.c
index a0770b1..76e4f2e 100644
--- a/vm/thread.c
+++ b/vm/thread.c
@@ -58,6 +58,9 @@ static struct list_head thread_list;
 
 #define thread_for_each(this) list_for_each_entry(this, &thread_list, 
list_node)
 
+static bool thread_count_locked;
+static pthread_cond_t thread_count_lock_cond = PTHREAD_COND_INITIALIZER;
+
 static void vm_thread_free(struct vm_thread *thread)
 {
free(thread);
@@ -150,29 +153,23 @@ void vm_thread_set_state(struct vm_thread *thread, enum 
vm_thread_state state)
 
 static void vm_thread_attach_thread(struct vm_thread *thread)
 {
-   pthread_mutex_lock(&threads_mutex);
list_add(&thread->list_node, &thread_list);
nr_threads_running++;
-   pthread_mutex_unlock(&threads_mutex);
-
gc_attach_thread();
 }
 
+/* The caller must hold threads_mutex */
 static void vm_thread_detach_thread(struct vm_thread *thread)
 {
vm_thread_set_state(thread, VM_THREAD_STATE_TERMINATED);
 
gc_detach_thread();
-
-   pthread_mutex_lock(&threads_mutex);
-
list_del(&thread->list_node);
 
if (!vm_thread_is_daemon(thread))
nr_non_daemons--;
 
pthread_cond_broadcast(&thread_terminate_cond);
-   pthread_mutex_unlock(&threads_mutex);
 }
 
 int init_threading(void)
@@ -254,7 +251,12 @@ static void *vm_thread_entry(void *arg)
if (exception_occurred())
vm_print_exception(exception_occurred());
 
+   pthread_mutex_lock(&threads_mutex);
+   while (thread_count_locked)
+   pthread_cond_wait(&thread_count_lock_cond, &threads_mutex);
+
vm_thread_detach_thread(vm_thread_self());
+   pthread_mutex_unlock(&threads_mutex);
 
return NULL;
 }
@@ -284,13 +286,19 @@ int vm_thread_start(struct vm_object *vmthread)
pthread_mutex_unlock(&threads_mutex);
}
 
+   pthread_mutex_lock(&threads_mutex);
+   while (thread_count_locked)
+   pthread_cond_wait(&thread_count_lock_cond, &threads_mutex);
+
vm_thread_attach_thread(thread);
 
if (pthread_create(&thread->posix_id, NULL, &vm_thread_entry, thread)) {
vm_thread_detach_thread(thread);
+   pthread_mutex_unlock(&threads_mutex);
return -1;
}
 
+   pthread_mutex_unlock(&threads_mutex);
return 0;
 }
 
@@ -359,3 +367,18 @@ void vm_thread_interrupt(struct vm_thread *thread)
pthread_cond_broadcast(&mon->cond);
pthread_mutex_unlock(&mon->mutex);
 }
+
+void vm_lock_thread_count(void)
+{
+   pthread_mutex_lock(&threads_mutex);
+   thread_count_locked = true;
+   pthread_mutex_unlock(&threads_mutex);
+}
+
+void vm_unlock_thread_count(void)
+{
+   pthread_mutex_lock(&threads_mutex);
+   thread_count_locked = false;
+   pthread_cond_broadcast(&thread_count_lock_cond);
+   pthread_mutex_unlock(&threads_mutex);
+}
-- 
1.6.0.4


--
Return on Information:
Google Enterprise Search pays you back
Get the facts.
http://p.sf.net/sfu/google-dev2dev
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/3] x86: make all safepoint instructions do a safepoint poll before.

2009-12-15 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |   39 ++-
 1 files changed, 18 insertions(+), 21 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 630af2f..5dea125 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -916,7 +916,7 @@ reg:EXPR_CLASS_FIELD 1
 
if (running_on_valgrind) {
select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned 
long)vmc));
-   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   select_safepoint_insn(s, tree, rel_insn(INSN_CALL_REL, 
(unsigned long)vm_class_ensure_init));
method_args_cleanup(s, tree, 1);
 
mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG,
@@ -1011,7 +1011,7 @@ freg: EXPR_FLOAT_CLASS_FIELD 1
 
if (running_on_valgrind) {
select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned 
long)vmc));
-   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   select_safepoint_insn(s, tree, rel_insn(INSN_CALL_REL, 
(unsigned long)vm_class_ensure_init));
method_args_cleanup(s, tree, 1);
 
if (expr->vm_type == J_FLOAT)
@@ -2734,7 +2734,7 @@ stmt: STMT_MONITOR_ENTER(reg)
ref = state->left->reg1;
 
select_insn(s, tree, reg_insn(INSN_PUSH_REG, ref));
-   select_insn(s, tree, rel_insn(INSN_CALL_REL,
+   select_safepoint_insn(s, tree, rel_insn(INSN_CALL_REL,
(unsigned long) vm_object_lock));
 
method_args_cleanup(s, tree, 1);
@@ -2747,7 +2747,7 @@ stmt: STMT_MONITOR_EXIT(reg)
ref = state->left->reg1;
 
select_insn(s, tree, reg_insn(INSN_PUSH_REG, ref));
-   select_insn(s, tree, rel_insn(INSN_CALL_REL,
+   select_safepoint_insn(s, tree, rel_insn(INSN_CALL_REL,
(unsigned long) vm_object_unlock));
 
method_args_cleanup(s, tree, 1);
@@ -2809,11 +2809,21 @@ select_insn(struct basic_block *bb, struct tree_node 
*tree, struct insn *insn)
bb_add_insn(bb, insn);
 }
 
+static void select_poll_safepoint(struct basic_block *s, struct tree_node 
*tree)
+{
+   struct insn *insn;
+
+   assert(gc_safepoint_page);
+   insn = imm_memdisp_insn(INSN_TEST_IMM_MEMDISP, 0, (unsigned long) 
gc_safepoint_page);
+   select_insn(s, tree, insn);
+}
+
 static void
 select_safepoint_insn(struct basic_block *bb, struct tree_node *tree,
  struct insn *insn)
 {
insn->flags |= INSN_FLAG_SAFEPOINT;
+   select_poll_safepoint(bb, tree);
select_insn(bb, tree, insn);
 }
 
@@ -3230,7 +3240,7 @@ static void select_vm_native_call(struct basic_block *s, 
struct tree_node *tree,
field = tr_addr + offsetof(struct vm_native_stack_entry, target);
select_set_target(s, tree, target, offset_reg, field);
 
-   select_insn(s, tree, call_insn);
+   select_safepoint_insn(s, tree, call_insn);
 
save_invoke_result(s, tree, method, stmt);
 
@@ -3242,15 +3252,6 @@ static void select_vm_native_call(struct basic_block *s, 
struct tree_node *tree,
 offset_reg, offset_tls));
 }
 
-static void select_poll_safepoint(struct basic_block *s, struct tree_node 
*tree)
-{
-   struct insn *insn;
-
-   assert(gc_safepoint_page);
-   insn = imm_memdisp_insn(INSN_TEST_IMM_MEMDISP, 0, (unsigned long) 
gc_safepoint_page);
-   select_safepoint_insn(s, tree, insn);
-}
-
 static void invoke(struct basic_block *s, struct tree_node *tree)
 {
struct compilation_unit *cu;
@@ -3285,14 +3286,12 @@ static void invoke(struct basic_block *s, struct 
tree_node *tree)
pthread_mutex_unlock(&cu->mutex);
}
 
-   select_poll_safepoint(s, tree);
-
call_insn = rel_insn(INSN_CALL_REL, (unsigned long) target);
 
if (vm_method_is_vm_native(method))
select_vm_native_call(s, tree, method, stmt, call_insn, 
vm_method_native_ptr(method));
else {
-   select_insn(s, tree, call_insn);
+   select_safepoint_insn(s, tree, call_insn);
save_invoke_result(s, tree, method, stmt);
}
 
@@ -3342,12 +3341,10 @@ static void invokevirtual(struct _MBState *state, 
struct basic_block *s, struct
/* native ptr */
select_insn(s, tree, imm_reg_insn(INSN_ADD_IMM_REG, method_offset, 
call_target));
 
-   select_poll_safepoint(s, tree);
-
/* invoke method */
call_insn = reg_insn(INSN_CALL_REG, call_target);
 
-   select_insn(s, tree, call_insn);
+   select_safepoint_insn(s, tree, call_insn);
save_invoke_result(s, tree, method, stmt);
 
nr_stack_args = get_stack_args_count(method);
@@ -3396,7 +3393,7 @@ static void invokeinterface(struct _MBState *state, 
struct basic_block *s, struc
/* invoke me

[PATCH] vm: dump state of all threads on SIGUSR2

2009-12-11 Thread Tomek Grabiec
When jato receives SIGUSR2 all threads print their register
content and backtrace:

pkill -SIGUSR2 jato

Signed-off-by: Tomek Grabiec 
---
 arch/x86/backtrace.c   |   23 +++-
 include/vm/backtrace.h |1 +
 vm/signal.c|   52 
 3 files changed, 70 insertions(+), 6 deletions(-)

diff --git a/arch/x86/backtrace.c b/arch/x86/backtrace.c
index f6d77e4..6e48eff 100644
--- a/arch/x86/backtrace.c
+++ b/arch/x86/backtrace.c
@@ -222,7 +222,7 @@ static void show_registers(gregset_t gregs)
 }
 #endif
 
-void print_backtrace_and_die(int sig, siginfo_t *info, void *secret)
+void print_backtrace_from_signal(int sig, siginfo_t *info, void *secret)
 {
ucontext_t *uc = secret;
unsigned long eip, ebp, addr;
@@ -231,6 +231,21 @@ void print_backtrace_and_die(int sig, siginfo_t *info, 
void *secret)
ebp = uc->uc_mcontext.gregs[BP_REG];
addr= (unsigned long) info->si_addr;
 
+   show_registers(uc->uc_mcontext.gregs);
+
+   print_trace_from(eip, (void *) ebp);
+
+   trace_flush();
+}
+
+void print_backtrace_and_die(int sig, siginfo_t *info, void *secret)
+{
+   ucontext_t *uc = secret;
+   unsigned long eip, addr;
+
+   eip = uc->uc_mcontext.gregs[IP_REG];
+   addr= (unsigned long) info->si_addr;
+
switch (sig) {
case SIGSEGV:
trace_printf("SIGSEGV at %s %08lx while accessing memory 
address %08lx.\n",
@@ -240,11 +255,7 @@ void print_backtrace_and_die(int sig, siginfo_t *info, 
void *secret)
trace_printf("Signal %d at %s %08lx\n", sig, IP_REG_NAME, eip);
break;
};
-   show_registers(uc->uc_mcontext.gregs);
-
-   print_trace_from(eip, (void *) ebp);
-
-   trace_flush();
 
+   print_backtrace_from_signal(sig, info, secret);
exit(1);
 }
diff --git a/include/vm/backtrace.h b/include/vm/backtrace.h
index 6cb2406..449d82b 100644
--- a/include/vm/backtrace.h
+++ b/include/vm/backtrace.h
@@ -6,6 +6,7 @@
 
 struct string;
 
+extern void print_backtrace_from_signal(int, siginfo_t *, void *);
 extern void print_backtrace_and_die(int, siginfo_t *, void *);
 extern void print_trace(void);
 extern void print_trace_from(unsigned long, void*);
diff --git a/vm/signal.c b/vm/signal.c
index 63fe0d9..dfab59a 100644
--- a/vm/signal.c
+++ b/vm/signal.c
@@ -35,6 +35,7 @@
 #include "vm/preload.h"
 #include "vm/signal.h"
 #include "vm/stack-trace.h"
+#include "vm/trace.h"
 
 #include "arch/signal.h"
 
@@ -44,6 +45,9 @@
 #include 
 
 static __thread struct register_state thread_register_state;
+static pthread_mutex_t sigusr2_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t sigusr2_cond = PTHREAD_COND_INITIALIZER;
+static bool sigusr2_sent;
 
 static unsigned long throw_arithmetic_exception(unsigned long src_addr)
 {
@@ -171,6 +175,51 @@ static void signal_handler(int sig, siginfo_t *si, void 
*ctx)
print_backtrace_and_die(sig, si, ctx);
 }
 
+static void print_state(int sig, siginfo_t *si, void *ctx)
+{
+   struct vm_thread *self;
+   char * name;
+
+   self = vm_thread_self();
+   name = vm_thread_get_name(self);
+   trace_printf("\n[%s] :\n\n", name ? name : "(null)");
+
+   print_backtrace_from_signal(sig, si, ctx);
+   free(name);
+}
+
+static void sigusr2_handler(int sig, siginfo_t *si, void *ctx)
+{
+   pthread_mutex_lock(&sigusr2_mutex);
+
+   if (sigusr2_sent) {
+   print_state(sig, si, ctx);
+   pthread_mutex_unlock(&sigusr2_mutex);
+   pthread_cond_broadcast(&sigusr2_cond);
+   return;
+   }
+
+   sigusr2_sent = true;
+
+   struct vm_thread *thread;
+
+   vm_thread_for_each(thread) {
+   if (thread->posix_id == pthread_self()) {
+   print_state(sig, si, ctx);
+   continue;
+   }
+
+   if (pthread_kill(thread->posix_id, SIGUSR2) != 0)
+   die("pthread_kill");
+
+   pthread_cond_wait(&sigusr2_cond, &sigusr2_mutex);
+   }
+
+   sigusr2_sent = false;
+
+   pthread_mutex_unlock(&sigusr2_mutex);
+}
+
 void setup_signal_handlers(void)
 {
struct sigaction sa;
@@ -186,4 +235,7 @@ void setup_signal_handlers(void)
 
sa.sa_sigaction = signal_handler;
sigaction(SIGUSR1, &sa, NULL);
+
+   sa.sa_sigaction = sigusr2_handler;
+   sigaction(SIGUSR2, &sa, NULL);
 }
-- 
1.6.0.4


--
Return on Information:
Google Enterprise Search pays you back
Get the facts.
http://p.sf.net/sfu/google-dev2dev
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] vm: fix init_stack_trace_elem_current()

2009-11-16 Thread Tomek Grabiec
The function was setting elem to the init_stack_trace_elem_current()'s
frame which is error prone because after function returns its stack
frame might be overriden by another function calls done from the
caller.

A fix for that is to make init_stack_trace_elem_current() set elem to
the caller's frame. We must make it a macro becasue we can't rely on
__builtin_frame_address(1) because of gcc optimizations.

Signed-off-by: Tomek Grabiec 
---
 include/vm/stack-trace.h   |8 +++-
 test/vm/stack-trace-stub.c |4 
 vm/stack-trace.c   |7 ---
 3 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/include/vm/stack-trace.h b/include/vm/stack-trace.h
index a913f18..34af859 100644
--- a/include/vm/stack-trace.h
+++ b/include/vm/stack-trace.h
@@ -88,6 +88,13 @@ static inline void *vm_native_stack_get_frame(void)
__builtin_frame_address(0); \
} while (0)
 
+#define init_stack_trace_elem_current(elem) do {   \
+   __lab:  \
+   init_stack_trace_elem(elem, \
+ (unsigned long)&&__lab,   \
+ __builtin_frame_address(0));  \
+   } while (0)
+
 enum stack_trace_elem_type {
STACK_TRACE_ELEM_TYPE_JIT,
STACK_TRACE_ELEM_TYPE_JNI,
@@ -131,7 +138,6 @@ stack_trace_elem_type_is_java(enum stack_trace_elem_type 
type)
 void init_stack_trace_printing(void);
 void init_stack_trace_elem(struct stack_trace_elem *elem, unsigned long addr,
   void *frame);
-void init_stack_trace_elem_current(struct stack_trace_elem *elem);
 int stack_trace_elem_next(struct stack_trace_elem *elem);
 int stack_trace_elem_next_java(struct stack_trace_elem *elem);
 int skip_frames_from_class(struct stack_trace_elem *elem, struct vm_class 
*class);
diff --git a/test/vm/stack-trace-stub.c b/test/vm/stack-trace-stub.c
index 38e3df7..c2ef8af 100644
--- a/test/vm/stack-trace-stub.c
+++ b/test/vm/stack-trace-stub.c
@@ -44,10 +44,6 @@ void init_stack_trace_elem(struct stack_trace_elem *elem, 
unsigned long addr,
 {
 }
 
-void init_stack_trace_elem_current(struct stack_trace_elem *elem)
-{
-}
-
 int stack_trace_elem_next(struct stack_trace_elem *elem)
 {
return -1;
diff --git a/vm/stack-trace.c b/vm/stack-trace.c
index d02d9d0..0d12c4e 100644
--- a/vm/stack-trace.c
+++ b/vm/stack-trace.c
@@ -310,13 +310,6 @@ void init_stack_trace_elem(struct stack_trace_elem *elem, 
unsigned long addr,
}
 }
 
-void init_stack_trace_elem_current(struct stack_trace_elem *elem)
-{
-   init_stack_trace_elem(elem,
- (unsigned long)&init_stack_trace_elem_current,
- __builtin_frame_address(0));
-}
-
 struct compilation_unit *stack_trace_elem_get_cu(struct stack_trace_elem *elem)
 {
if (elem->type == STACK_TRACE_ELEM_TYPE_OTHER)
-- 
1.6.0.4


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] x86: fix is_sse_insn()

2009-10-31 Thread Tomek Grabiec
SSE instructions have two prefixes depending on operand size:
0xf3 for 32-bit XMM and 0xf2 for 64-bit XMM.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c  |2 +-
 regression/jvm/PutstaticPatchingTest.java |   36 -
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index c7e759e..fe30b7c 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -472,7 +472,7 @@ static inline bool is_rex_prefix(unsigned char opc)
 
 static inline bool is_sse_insn(unsigned char *opc)
 {
-   return opc[0] == 0xf3 && opc[1] == 0x0f;
+   return (opc[0] & 0xfe) == 0xf2 && opc[1] == 0x0f;
 }
 
 void fixup_static(struct vm_class *vmc)
diff --git a/regression/jvm/PutstaticPatchingTest.java 
b/regression/jvm/PutstaticPatchingTest.java
index 25740e1..7e0937c 100644
--- a/regression/jvm/PutstaticPatchingTest.java
+++ b/regression/jvm/PutstaticPatchingTest.java
@@ -14,7 +14,7 @@ public class PutstaticPatchingTest extends TestCase {
 }
 }
 
-public static void main(String[] args) {
+private static void testClassInitOnPutstatic() {
 int i = 0;
 
 assertFalse(clinit_run);
@@ -28,4 +28,38 @@ public class PutstaticPatchingTest extends TestCase {
 X.y = i;
 assertFalse(clinit_run);
 }
+
+private static class DoubleFieldClass {
+public static double x;
+};
+
+private static void testDoublePutstaticPatching() {
+DoubleFieldClass.x = 1.0;
+assertEquals(DoubleFieldClass.x, 1.0);
+}
+
+private static class FloatFieldClass {
+public static float x;
+};
+
+private static void testFloatPutstaticPatching() {
+FloatFieldClass.x = 1.0f;
+assertEquals(FloatFieldClass.x, 1.0f);
+}
+
+private static class IntFieldClass {
+public static int x;
+};
+
+private static void testIntPutstaticPatching() {
+IntFieldClass.x = 1;
+assertEquals(IntFieldClass.x, 1);
+}
+
+public static void main(String[] args) {
+testClassInitOnPutstatic();
+testDoublePutstaticPatching();
+testFloatPutstaticPatching();
+testIntPutstaticPatching();
+}
 }
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/5] jit: fix get_reload_before_insn()

2009-10-24 Thread Tomek Grabiec
LIR position passed to radix_tree_lookup() should be always even.

Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |5 +++--
 1 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 3dfb0b3..eaa2790 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -53,8 +53,6 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
 
unsigned long start = interval_start(interval);
 
-   ret = radix_tree_lookup(cu->lir_insn_map, start);
-
if (start & 1) {
/*
 * If interval starts at odd position and has a use
@@ -67,7 +65,10 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
if (first_use_pos(interval) == interval_start(interval))
error("interval begins with a def-use and is marked for 
reload");
 
+   ret = radix_tree_lookup(cu->lir_insn_map, start - 1);
ret = next_insn(ret);
+   } else {
+   ret = radix_tree_lookup(cu->lir_insn_map, start);
}
 
assert(ret != NULL);
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/5] jit: fix dead code in __insert_spill_reload_insn()

2009-10-24 Thread Tomek Grabiec
Reported-by: Pekka Enberg 
Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index eaa2790..c7b3b51 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -225,7 +225,7 @@ static int __insert_spill_reload_insn(struct live_interval 
*interval, struct com
 * can't insert a reload instruction in the middle of
 * instruction.
 */
-   if ((interval_start(interval) & 1) == 0);
+   assert((interval_start(interval) & 1) == 0);
 
err = insert_reload_insn(interval, cu,
interval->spill_parent->spill_slot,
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/5] jit: set bytecode offset of spill/reload instructions properly

2009-10-24 Thread Tomek Grabiec
Functions get_spill_after_insn() and get_reload_before_insn() not
always return a pointer to struct insn (the API will be fixed in
following patches). This can happen when we must insert spill
instruction at the beginning of basic block, or a reload instruction
at the end of basic block. In those cases a container of insn list
head is returned. Therefore we can't get the bytecode offset by
dereferencing the result. A valid bytecode offset value is returned by
those function by setting a variable to which pointer is passed as an
argument.

Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |   21 +++--
 1 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index ba8ac27..36c25f8 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -47,7 +47,8 @@ struct live_interval_mapping {
 };
 
 static struct insn *
-get_reload_before_insn(struct compilation_unit *cu, struct live_interval 
*interval)
+get_reload_before_insn(struct compilation_unit *cu, struct live_interval 
*interval,
+  unsigned long *bc_offset)
 {
struct insn *ret;
 
@@ -66,9 +67,11 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
error("interval begins with a def-use and is marked for 
reload");
 
ret = radix_tree_lookup(cu->lir_insn_map, start - 1);
+   *bc_offset = ret->bytecode_offset;
ret = next_insn(ret);
} else {
ret = radix_tree_lookup(cu->lir_insn_map, start);
+   *bc_offset = ret->bytecode_offset;
}
 
assert(ret != NULL);
@@ -76,7 +79,9 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
 }
 
 static struct insn *
-get_spill_after_insn(struct compilation_unit *cu, struct live_interval 
*interval)
+get_spill_after_insn(struct compilation_unit *cu,
+struct live_interval *interval,
+unsigned long *bc_offset)
 {
struct insn *ret;
 
@@ -90,8 +95,10 @@ get_spill_after_insn(struct compilation_unit *cu, struct 
live_interval *interval
 
if (last_pos & 1) {
ret = radix_tree_lookup(cu->lir_insn_map, last_pos - 1);
+   *bc_offset = ret->bytecode_offset;
} else {
ret = radix_tree_lookup(cu->lir_insn_map, last_pos);
+   *bc_offset = ret->bytecode_offset;
ret = prev_insn(ret);
}
 
@@ -157,11 +164,12 @@ static int
 insert_spill_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
struct insn *spill_after;
+   unsigned long bc_offset;
 
-   spill_after = get_spill_after_insn(cu, interval);
+   spill_after = get_spill_after_insn(cu, interval, &bc_offset);
interval->spill_slot = spill_interval(interval, cu,
  &spill_after->insn_list_node,
- spill_after->bytecode_offset);
+ bc_offset);
if (!interval->spill_slot)
return warn("out of memory"), -ENOMEM;
 
@@ -172,6 +180,7 @@ static int
 insert_reload_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
struct insn *reload_before;
+   unsigned long bc_offset;
struct insn *reload;
 
reload = reload_insn(interval->spill_parent->spill_slot,
@@ -179,8 +188,8 @@ insert_reload_insn(struct live_interval *interval, struct 
compilation_unit *cu)
if (!reload)
return warn("out of memory"), -ENOMEM;
 
-   reload_before = get_reload_before_insn(cu, interval);
-   reload->bytecode_offset = reload_before->bytecode_offset;
+   reload_before = get_reload_before_insn(cu, interval, &bc_offset);
+   reload->bytecode_offset = bc_offset;
list_add_tail(&reload->insn_list_node, &reload_before->insn_list_node);
 
return 0;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 5/5] jit: cleanup in spill-reload.c

2009-10-24 Thread Tomek Grabiec
This changes the API and renames get_spill_after_insn() and
get_reload_before_insn() so that they do not return a pointer to
struct insn, since it is not always a pointer to struct insn.

Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |   57 ---
 1 files changed, 27 insertions(+), 30 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 36c25f8..5da7164 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -46,13 +46,15 @@ struct live_interval_mapping {
struct live_interval *from, *to;
 };
 
-static struct insn *
-get_reload_before_insn(struct compilation_unit *cu, struct live_interval 
*interval,
+static struct list_head *
+get_reload_before_node(struct compilation_unit *cu,
+  struct live_interval *interval,
   unsigned long *bc_offset)
 {
-   struct insn *ret;
+   unsigned long start;
+   struct insn *insn;
 
-   unsigned long start = interval_start(interval);
+   start = interval_start(interval);
 
if (start & 1) {
/*
@@ -66,24 +68,22 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
if (first_use_pos(interval) == interval_start(interval))
error("interval begins with a def-use and is marked for 
reload");
 
-   ret = radix_tree_lookup(cu->lir_insn_map, start - 1);
-   *bc_offset = ret->bytecode_offset;
-   ret = next_insn(ret);
-   } else {
-   ret = radix_tree_lookup(cu->lir_insn_map, start);
-   *bc_offset = ret->bytecode_offset;
+   insn = radix_tree_lookup(cu->lir_insn_map, start - 1);
+   *bc_offset = insn->bytecode_offset;
+   return insn->insn_list_node.next;
}
 
-   assert(ret != NULL);
-   return ret;
+   insn = radix_tree_lookup(cu->lir_insn_map, start);
+   *bc_offset = insn->bytecode_offset;
+   return &insn->insn_list_node;
 }
 
-static struct insn *
-get_spill_after_insn(struct compilation_unit *cu,
+static struct list_head *
+get_spill_after_node(struct compilation_unit *cu,
 struct live_interval *interval,
 unsigned long *bc_offset)
 {
-   struct insn *ret;
+   struct insn *insn;
 
/*
 * If interval ends at even position then it is not written to
@@ -94,17 +94,14 @@ get_spill_after_insn(struct compilation_unit *cu,
unsigned long last_pos = interval_end(interval) - 1;
 
if (last_pos & 1) {
-   ret = radix_tree_lookup(cu->lir_insn_map, last_pos - 1);
-   *bc_offset = ret->bytecode_offset;
-   } else {
-   ret = radix_tree_lookup(cu->lir_insn_map, last_pos);
-   *bc_offset = ret->bytecode_offset;
-   ret = prev_insn(ret);
+   insn = radix_tree_lookup(cu->lir_insn_map, last_pos - 1);
+   *bc_offset = insn->bytecode_offset;
+   return &insn->insn_list_node;
}
 
-   assert(ret != NULL);
-
-   return ret;
+   insn = radix_tree_lookup(cu->lir_insn_map, last_pos);
+   *bc_offset = insn->bytecode_offset;
+   return insn->insn_list_node.prev;
 }
 
 /**
@@ -163,12 +160,12 @@ spill_interval(struct live_interval *interval,
 static int
 insert_spill_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
-   struct insn *spill_after;
+   struct list_head *spill_after;
unsigned long bc_offset;
 
-   spill_after = get_spill_after_insn(cu, interval, &bc_offset);
+   spill_after = get_spill_after_node(cu, interval, &bc_offset);
interval->spill_slot = spill_interval(interval, cu,
- &spill_after->insn_list_node,
+ spill_after,
  bc_offset);
if (!interval->spill_slot)
return warn("out of memory"), -ENOMEM;
@@ -179,7 +176,7 @@ insert_spill_insn(struct live_interval *interval, struct 
compilation_unit *cu)
 static int
 insert_reload_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
-   struct insn *reload_before;
+   struct list_head *reload_before;
unsigned long bc_offset;
struct insn *reload;
 
@@ -188,9 +185,9 @@ insert_reload_insn(struct live_interval *interval, struct 
compilation_unit *cu)
if (!reload)
return warn("out of memory"), -ENOMEM;
 
-   reload_before = get_reload_before_insn(cu, interval, &bc_offset);
+   reload_before = get_reload_before_node(cu, interval, &bc_offset);
reload->bytecode_offset = bc_offset;
-   list_add_tail(&reload->i

[PATCH 3/5] jit: make API to insert_reload_insn() the same as for insert_spill_insn()

2009-10-24 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |   20 +---
 1 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index c7b3b51..ba8ac27 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -168,20 +168,20 @@ insert_spill_insn(struct live_interval *interval, struct 
compilation_unit *cu)
return 0;
 }
 
-static int insert_reload_insn(struct live_interval *interval,
- struct compilation_unit *cu,
- struct stack_slot *from,
- struct insn *first)
+static int
+insert_reload_insn(struct live_interval *interval, struct compilation_unit *cu)
 {
+   struct insn *reload_before;
struct insn *reload;
 
-   reload = reload_insn(from, &interval->spill_reload_reg);
+   reload = reload_insn(interval->spill_parent->spill_slot,
+&interval->spill_reload_reg);
if (!reload)
return warn("out of memory"), -ENOMEM;
 
-   reload->bytecode_offset = first->bytecode_offset;
-
-   list_add_tail(&reload->insn_list_node, &first->insn_list_node);
+   reload_before = get_reload_before_insn(cu, interval);
+   reload->bytecode_offset = reload_before->bytecode_offset;
+   list_add_tail(&reload->insn_list_node, &reload_before->insn_list_node);
 
return 0;
 }
@@ -227,9 +227,7 @@ static int __insert_spill_reload_insn(struct live_interval 
*interval, struct com
 */
assert((interval_start(interval) & 1) == 0);
 
-   err = insert_reload_insn(interval, cu,
-   interval->spill_parent->spill_slot,
-   get_reload_before_insn(cu, interval));
+   err = insert_reload_insn(interval, cu);
if (err)
goto out;
}
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/2] jit: fix dead code in __insert_spill_reload_insn()

2009-10-24 Thread Tomek Grabiec
The if statement should be an assertion.

Reported-by: Pekka Enberg 
Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 7624bd6..f8fd582 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -224,7 +224,7 @@ static int __insert_spill_reload_insn(struct live_interval 
*interval, struct com
 * can't insert a reload instruction in the middle of
 * instruction.
 */
-   if ((interval_start(interval) & 1) == 0);
+   assert((interval_start(interval) & 1) == 0);
 
err = insert_reload_insn(interval, cu,
interval->spill_parent->spill_slot,
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/2] jit: fix bug in get_spill_after_insn() and get_reload_after_insn()

2009-10-24 Thread Tomek Grabiec
When interval ends just before the next basic block, then prev_insn()
will be called on that next basic block's first instruction and will
return invalid instruction.  We should not use prev_insn here because
it does not work across basic block boundaries.

Simmilar applies for next_insn() and get_reload_after_insn().

Signed-off-by: Tomek Grabiec 
---
 jit/spill-reload.c |   14 +++---
 1 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 3dfb0b3..7624bd6 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -49,11 +49,10 @@ struct live_interval_mapping {
 static struct insn *
 get_reload_before_insn(struct compilation_unit *cu, struct live_interval 
*interval)
 {
+   unsigned long start;
struct insn *ret;
 
-   unsigned long start = interval_start(interval);
-
-   ret = radix_tree_lookup(cu->lir_insn_map, start);
+   start = interval_start(interval);
 
if (start & 1) {
/*
@@ -64,10 +63,12 @@ get_reload_before_insn(struct compilation_unit *cu, struct 
live_interval *interv
 * odd position and has no use at this position, we
 * should reload after that instruction.
 */
-   if (first_use_pos(interval) == interval_start(interval))
+   if (first_use_pos(interval) == start)
error("interval begins with a def-use and is marked for 
reload");
 
-   ret = next_insn(ret);
+   ret = radix_tree_lookup(cu->lir_insn_map, start + 1);
+   } else {
+   ret = radix_tree_lookup(cu->lir_insn_map, start);
}
 
assert(ret != NULL);
@@ -90,8 +91,7 @@ get_spill_after_insn(struct compilation_unit *cu, struct 
live_interval *interval
if (last_pos & 1) {
ret = radix_tree_lookup(cu->lir_insn_map, last_pos - 1);
} else {
-   ret = radix_tree_lookup(cu->lir_insn_map, last_pos);
-   ret = prev_insn(ret);
+   ret = radix_tree_lookup(cu->lir_insn_map, last_pos - 2);
}
 
assert(ret != NULL);
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/2] x86: Add missing result conversion after native method invocations.

2009-10-18 Thread Tomek Grabiec
This bugfix is related to the one in commit
a6cc64dea7ea078e173fe0bea89ad54e54038826 "vm: do not return jboolean
from VM natives.". It implements the other approach described in the
changelog of that commit. We must put conversion instructions after
native invocations because JNI methods do not perform the conversion.

Therefore it is now safe to use jbyte, jboolean, jchar and jshort as
VM native implementation return type.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |   32 
 1 files changed, 32 insertions(+), 0 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 443fa2e..3b1f102 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -3090,6 +3090,38 @@ void save_invoke_result(struct basic_block *s, struct 
tree_node *tree,
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, edx, 
tmp->tmp_high));
break;
case J_INT:
+   eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
+
+   if (vm_method_is_native(method)) {
+   /*
+* Native methods (especially JNI methods) might
+* not cast return value to int for byte, boolean,
+* char and short so we must do this here.
+*/
+   switch (method->return_type.vm_type) {
+   case J_BYTE:
+   /* Fall through */
+   case J_BOOLEAN:
+   select_insn(s, tree, 
reg_reg_insn(INSN_MOVSX_8_REG_REG, eax, tmp->tmp_low));
+   break;
+   case J_CHAR:
+   select_insn(s, tree, 
reg_reg_insn(INSN_MOVZX_16_REG_REG, eax, tmp->tmp_low));
+   break;
+   case J_SHORT:
+   select_insn(s, tree, 
reg_reg_insn(INSN_MOVSX_16_REG_REG, eax, tmp->tmp_low));
+   break;
+   case J_INT:
+   select_insn(s, tree, 
reg_reg_insn(INSN_MOV_REG_REG, eax, tmp->tmp_low));
+   break;
+   default:
+   error("unexpected return type");
+   }
+
+   break;
+   }
+
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, 
tmp->tmp_low));
+   break;
case J_REFERENCE:
eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, 
tmp->tmp_low));
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/2] mmix: fix compilation error on make check

2009-10-18 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/mmix/call.c |6 +-
 1 files changed, 1 insertions(+), 5 deletions(-)

diff --git a/arch/mmix/call.c b/arch/mmix/call.c
index 1dc640a..9c3f244 100644
--- a/arch/mmix/call.c
+++ b/arch/mmix/call.c
@@ -27,10 +27,6 @@
 #include "vm/method.h"
 #include "vm/call.h"
 
-void native_call(struct vm_method *method, const void *target, unsigned long 
*args, union jvalue *result)
-{
-}
-
-void vm_native_call(struct vm_method *method, const void *target, unsigned 
long *args, union jvalue *result)
+void native_call(struct vm_method *method, void *target, unsigned long *args, 
union jvalue *result)
 {
 }
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] jit: fix conversion of dup_x2 when second value is J_LONG or J_DOUBLE

2009-10-17 Thread Tomek Grabiec
Converter for dup_x2 didn't check the stack size of second
stack operand which caused that the forst value was put
one level deeper when second value was J_LONG or J_DOUBLE.

Signed-off-by: Tomek Grabiec 
---
 jit/ostack-bc.c  |4 
 regression/jvm/DupTest.j |   37 +
 2 files changed, 41 insertions(+), 0 deletions(-)

diff --git a/jit/ostack-bc.c b/jit/ostack-bc.c
index 54f8dd1..c31ed3c 100644
--- a/jit/ostack-bc.c
+++ b/jit/ostack-bc.c
@@ -105,6 +105,10 @@ int convert_dup_x2(struct parse_context *ctx)
 
value1 = stack_pop(ctx->bb->mimic_stack);
value2 = stack_pop(ctx->bb->mimic_stack);
+
+   if (vm_type_slot_size(value2->vm_type) == 2)
+   return __convert_dup_x1(ctx, value1, value2);
+
value3 = stack_pop(ctx->bb->mimic_stack);
 
return __convert_dup_x2(ctx, value1, value2, value3);
diff --git a/regression/jvm/DupTest.j b/regression/jvm/DupTest.j
index 4c7122f..644f1a5 100644
--- a/regression/jvm/DupTest.j
+++ b/regression/jvm/DupTest.j
@@ -123,6 +123,42 @@
 invokestatic jvm/TestCase/assertEquals(II)V
 .end method
 
+; Test 'dup_x2' instruction when value2 has computational type of category 2
+.method public static testDup_x2_long()V
+.limit locals 5
+
+iconst_0
+putstatic jvm/DupTest/counter I
+
+dconst_0
+invokestatic jvm/DupTest/advanceCounter()I
+dup_x2
+
+iconst_1
+iadd
+istore_1
+
+dconst_1
+dadd
+dstore_2
+
+iconst_3
+iadd
+istore 4
+
+iconst_2
+iload_1
+invokestatic jvm/TestCase/assertEquals(II)V
+
+dconst_1
+dload_2
+invokestatic jvm/TestCase/assertEquals(DD)V
+
+bipush 4
+iload 4
+invokestatic jvm/TestCase/assertEquals(II)V
+.end method
+
 ; Test 'dup2' instruction
 .method public static testDup2()V
 .limit locals 5
@@ -287,6 +323,7 @@
 invokestatic jvm/DupTest/testDup()V
 invokestatic jvm/DupTest/testDup_x1()V
 invokestatic jvm/DupTest/testDup_x2()V
+invokestatic jvm/DupTest/testDup_x2_long()V
 invokestatic jvm/DupTest/testDup2()V
 invokestatic jvm/DupTest/testDup2_x1()V
 invokestatic jvm/DupTest/testDup2_x2()V
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/2] x86, vm: implement support for invoking methods with J_LONG return type in reflection.

2009-10-15 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/call.c  |   50 ++
 include/vm/types.h   |5 +++
 regression/java/lang/reflect/MethodTest.java |5 +++
 runtime/reflection.c |4 ++-
 4 files changed, 63 insertions(+), 1 deletions(-)

diff --git a/arch/x86/call.c b/arch/x86/call.c
index 389b1a6..064bfdf 100644
--- a/arch/x86/call.c
+++ b/arch/x86/call.c
@@ -83,6 +83,54 @@ static void native_call_eax(struct vm_method *method, void 
*target,
 : "%ecx", "%edi", "cc", "memory");
 }
 
+static void native_call_long(struct vm_method *method, void *target,
+unsigned long *args, union jvalue *result)
+{
+   __asm__ volatile
+   (
+"movl %%ebx, %%ecx \n"
+"shl $2, %%ebx \n"
+"subl %%ebx, %%esp \n"
+"movl %%esp, %%edi \n"
+"cld \n"
+"rep movsd \n"
+"mov %%ebx, %%esi \n"
+
+"test %4, %4 \n"
+"jz 1f \n"
+
+"pushl %%esp \n"
+"pushl %2 \n"
+"call vm_enter_vm_native \n"
+"addl $8, %%esp \n"
+"test %%eax, %%eax \n"
+"jnz 2f \n"
+
+"call *%2 \n"
+"movl %3, %%edi \n"
+"movl %%eax, (%%edi) \n"
+"movl %%edx, 4(%%edi) \n"
+
+"call vm_leave_vm_native \n"
+"jmp 2f \n"
+"1: \n"
+"call *%2 \n"
+"movl %3, %%edi \n"
+"movl %%eax, (%%edi) \n"
+"movl %%edx, 4(%%edi) \n"
+
+"2: \n"
+
+"addl %%esi, %%esp \n"
+:
+: "b" (method->args_count),
+  "S" (args),
+  "m" (target),
+  "m" (result),
+  "r" (vm_method_is_vm_native(method))
+: "%ecx", "%edi", "cc", "memory");
+}
+
 /**
  * This calls a function with call arguments copied from @args
  * array. The array contains @args_count elements of machine word
@@ -121,6 +169,8 @@ void native_call(struct vm_method *method, void *target,
result->i = (jint) result->z;
break;
case J_LONG:
+   native_call_long(method, target, args, result);
+   break;
case J_DOUBLE:
case J_FLOAT:
error("not implemented");
diff --git a/include/vm/types.h b/include/vm/types.h
index 1f3350b..2b21e2a 100644
--- a/include/vm/types.h
+++ b/include/vm/types.h
@@ -81,4 +81,9 @@ static inline enum vm_type mimic_stack_type(enum vm_type type)
}
 }
 
+static inline int get_arg_size(enum vm_type type)
+{
+   return get_vmtype_size(type) / sizeof(unsigned long);
+}
+
 #endif
diff --git a/regression/java/lang/reflect/MethodTest.java 
b/regression/java/lang/reflect/MethodTest.java
index 14dbaa5..2bfbb55 100644
--- a/regression/java/lang/reflect/MethodTest.java
+++ b/regression/java/lang/reflect/MethodTest.java
@@ -51,6 +51,10 @@ public class MethodTest extends TestCase {
   public static int intIncrement(int x) {
   return x + 1;
   }
+
+  public static long longIncrement(long x) {
+  return x + 1;
+  }
 }
 
 public static Object invoke(String name, Class arg_class, Object arg) {
@@ -64,6 +68,7 @@ public class MethodTest extends TestCase {
 
 public static void testMethodReflectionInvoke() {
 assertObjectEquals(Integer.valueOf(2), invoke("intIncrement", 
int.class, Integer.valueOf(1)));
+assertObjectEquals(Long.valueOf(0xdeadbeefcafebabfl), 
invoke("longIncrement", long.class, Long.valueOf(0xdeadbeefcafebabel)));
 }
 
 public static void main(String[] args) throws Exception {
diff --git a/runtime/reflection.c b/runtime/reflection.c
index 3d67aba..7aa927b 100644
--- a/runtime/reflection.c
+++ b/runtime/reflection.c
@@ -747,8 +747,10 @@ static int marshall_call_arguments(struct vm_method *vmm, 
unsigned long *args,
struct vm_object *arg_obj;
 
arg_obj = array_get_field_ptr(args_array, args_array_idx++);
-   if (unwrap(&args[idx++], arg->type_info.vm_type, arg_obj))
+   if (unwrap(&args[idx], arg->type_info.vm_type, arg_obj))
return -1;
+
+   idx += get_arg_size(arg->type_info.vm_type);
}
 
return 0;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/2] x86: cleanup in call.c

2009-10-15 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/call.c   |  180 ++---
 include/vm/call.h |6 +--
 vm/call.c |6 --
 3 files changed, 62 insertions(+), 130 deletions(-)

diff --git a/arch/x86/call.c b/arch/x86/call.c
index 0b96020..389b1a6 100644
--- a/arch/x86/call.c
+++ b/arch/x86/call.c
@@ -33,150 +33,92 @@
 
 #include "vm/call.h"
 #include "vm/method.h"
+#include "vm/stack-trace.h"
 
 #ifdef CONFIG_X86_32
 
-/**
- * Calls @method which address is obtained from a memory
- * pointed by @target. Function returns call result which
- * is supposed to be saved to %eax.
- */
-static unsigned long native_call_gp(struct vm_method *method,
-   const void *target,
-   unsigned long *args)
-{
-   unsigned long result;
-
-   __asm__ volatile (
-   "movl %%ebx, %%ecx \n"
-   "shl $2, %%ebx \n"
-   "subl %%ebx, %%esp \n"
-   "movl %%esp, %%edi \n"
-   "cld \n"
-   "rep movsd \n"
-   "movl %%ebx, %%esi \n"
-   "call *%3 \n"
-   "addl %%esi, %%esp \n"
-   : "=a" (result)
-   : "b" (method->args_count), "S"(args), "m"(target)
-   : "%ecx", "%edi", "cc"
-   );
-
-   return result;
-}
-
-static unsigned long vm_native_call_gp(struct vm_method *method,
-  const void *target,
-  unsigned long *args)
+static void native_call_eax(struct vm_method *method, void *target,
+   unsigned long *args, union jvalue *result)
 {
-   unsigned long result;
-
-   __asm__ volatile (
-   "movl %%ebx, %%ecx \n"
-   "shl $2, %%ebx \n"
-   "subl %%ebx, %%esp \n"
-   "movl %%esp, %%edi \n"
-   "cld \n"
-   "rep movsd \n"
-   "movl %%ebx, %%esi \n"
-
-   "pushl %%esp \n"
-   "pushl %3 \n"
-   "call vm_enter_vm_native \n"
-   "addl $8, %%esp \n"
-   "test %%eax, %%eax \n"
-   "jnz 1f \n"
-
-   "call * -8(%%esp)\n"
-   "movl %%eax, %0 \n"
-
-   "call vm_leave_vm_native \n"
-
-   "1: addl %%esi, %%esp \n"
-   : "=r" (result)
-   : "b" (method->args_count), "S"(args), "r"(target)
-   : "%ecx", "%edi", "cc"
-   );
-
-   return result;
+   __asm__ volatile
+   (
+"movl %%ebx, %%ecx \n"
+"shl $2, %%ebx \n"
+"subl %%ebx, %%esp \n"
+"movl %%esp, %%edi \n"
+"cld \n"
+"rep movsd \n"
+"mov %%ebx, %%esi \n"
+
+"test %4, %4 \n"
+"jz 1f \n"
+
+"pushl %%esp \n"
+"pushl %2 \n"
+"call vm_enter_vm_native \n"
+"addl $8, %%esp \n"
+"test %%eax, %%eax \n"
+"jnz 2f \n"
+
+"call *%2 \n"
+"movl %3, %%edi \n"
+"movl %%eax, (%%edi) \n"
+
+"call vm_leave_vm_native \n"
+"jmp 2f \n"
+"1: \n"
+"call *%2 \n"
+"movl %3, %%edi \n"
+"movl %%eax, (%%edi) \n"
+
+"2: \n"
+
+"addl %%esi, %%esp \n"
+:
+: "b" (method->args_count),
+  "S" (args),
+  "m" (target),
+  "m" (result),
+  "r" (vm_method_is_vm_native(method))
+: "%ecx", "%edi", "cc", "memory");
 }
 
 /**
  * This calls a function with call arguments copied from @args
  * array. The array contains @args_count elements of machine word
- * size. The @target must be a variable holding a function
- * pointer. Call result will be stored in @result.
- */
-void native_call(struct vm_method *method,
-const void *target,
-unsigned long *args,
-union jvalue *result)
-{
-   switch (method->return_type.vm_type) {
-   case J_VOID:
-   native_call_gp(method, target, args);
-   break;
-   case J_REFERENCE:
-   result->l = (jobject) native_call_gp(method, target, args);
- 

[PATCH 3/3] vm: implement VMClassLoader.defineClass()

2009-10-14 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/runtime/classloader.h |1 +
 include/vm/class.h|1 +
 include/vm/classloader.h  |1 +
 include/vm/preload.h  |1 +
 runtime/classloader.c |   40 +++-
 test/vm/preload-stub.c|1 +
 vm/class.c|   35 ++-
 vm/classloader.c  |   28 
 vm/jato.c |1 +
 vm/preload.c  |2 ++
 10 files changed, 109 insertions(+), 2 deletions(-)

diff --git a/include/runtime/classloader.h b/include/runtime/classloader.h
index 2f034ae..7cc602b 100644
--- a/include/runtime/classloader.h
+++ b/include/runtime/classloader.h
@@ -6,5 +6,6 @@
 jobject native_vmclassloader_getprimitiveclass(jint type);
 jobject native_vmclassloader_findloadedclass(jobject classloader, jobject 
name);
 jobject native_vmclassloader_loadclass(jobject name, jboolean resolve);
+jobject native_vmclassloader_defineclass(jobject classloader, jobject name, 
jobject data, jint offset, jint len, jobject pd);
 
 #endif /* RUNTIME_CLASSLOADER_H */
diff --git a/include/vm/class.h b/include/vm/class.h
index 4451680..bdb17b3 100644
--- a/include/vm/class.h
+++ b/include/vm/class.h
@@ -185,6 +185,7 @@ struct vm_class *vm_class_get_array_element_class(const 
struct vm_class *array_c
 enum vm_type vm_class_get_storage_vmtype(const struct vm_class *class);
 struct vm_class *vm_class_get_class_from_class_object(struct vm_object *clazz);
 struct vm_class *vm_class_get_array_class(struct vm_class *element_class);
+struct vm_class *vm_class_define(const char *name, uint8_t *data, unsigned 
long len);
 
 #define DECLARE_STATIC_FIELD_GETTER(type)  \
 static inline j ## type
\
diff --git a/include/vm/classloader.h b/include/vm/classloader.h
index 12873b4..3c5409c 100644
--- a/include/vm/classloader.h
+++ b/include/vm/classloader.h
@@ -17,5 +17,6 @@ struct vm_class *classloader_load(struct vm_object *loader,
  const char *class_name);
 struct vm_class *classloader_load_primitive(const char *class_name);
 struct vm_class *classloader_find_class(struct vm_object *loader, const char 
*name);
+int classloader_add_to_cache(struct vm_object *loader, struct vm_class *class);
 
 #endif
diff --git a/include/vm/preload.h b/include/vm/preload.h
index d09f6b7..cd060d3 100644
--- a/include/vm/preload.h
+++ b/include/vm/preload.h
@@ -50,6 +50,7 @@ extern struct vm_class *vm_java_lang_IllegalArgumentException;
 extern struct vm_class *vm_java_lang_ClassLoader;
 extern struct vm_class *vm_java_lang_Number;
 extern struct vm_class *vm_java_lang_InterruptedException;
+extern struct vm_class *vm_java_lang_ClassFormatError;
 extern struct vm_class *vm_boolean_class;
 extern struct vm_class *vm_char_class;
 extern struct vm_class *vm_float_class;
diff --git a/runtime/classloader.c b/runtime/classloader.c
index 9258c40..029f562 100644
--- a/runtime/classloader.c
+++ b/runtime/classloader.c
@@ -29,8 +29,9 @@
 #include "runtime/classloader.h"
 
 #include "vm/class.h"
-#include "vm/object.h"
 #include "vm/classloader.h"
+#include "vm/object.h"
+#include "vm/preload.h"
 
 #include 
 #include 
@@ -92,3 +93,40 @@ jobject native_vmclassloader_loadclass(jobject name, 
jboolean resolve)
 
return vmc->object;
 }
+
+jobject native_vmclassloader_defineclass(jobject classloader, jobject name,
+   jobject data, jint offset, jint len, jobject pd)
+{
+   struct vm_class *class;
+   char *c_name;
+   uint8_t *buf;
+
+   buf = malloc(len);
+   if (!buf) {
+   signal_new_exception(vm_java_lang_OutOfMemoryError, NULL);
+   return NULL;
+   }
+
+   for (jint i = 0; i < len; i++)
+   buf[i] = array_get_field_byte(data, offset + i);
+
+   if (name)
+   c_name = vm_string_to_cstr(name);
+   else
+   c_name = strdup("unknown");
+
+   class = vm_class_define(c_name, buf, len);
+   free(buf);
+
+   if (!class)
+   return NULL;
+
+   class->classloader = classloader;
+   if (classloader_add_to_cache(classloader, class)) {
+   signal_new_exception(vm_java_lang_OutOfMemoryError, NULL);
+   return NULL;
+   }
+
+   vm_class_ensure_object(class);
+   return class->object;
+}
diff --git a/test/vm/preload-stub.c b/test/vm/preload-stub.c
index ca3e77f..aec88ec 100644
--- a/test/vm/preload-stub.c
+++ b/test/vm/preload-stub.c
@@ -54,6 +54,7 @@ struct vm_class *vm_java_lang_VMThread;
 struct vm_class *vm_java_lang_IllegalMonitorStateException;
 struct vm_class *vm_java_lang_OutOfMemoryError;
 struct vm_class *vm_java_lang_InterruptedException;
+struct vm_class *vm_java_lang_ClassFor

[PATCH 2/3] vm: make class cache recognize different classloaders.

2009-10-14 Thread Tomek Grabiec
Two different classloaders may load a class of the same name. We must
therefore make a classloader reference a part of class cache key.

Signed-off-by: Tomek Grabiec 
---
 include/lib/hash-map.h   |1 +
 include/vm/classloader.h |2 +-
 lib/hash-map.c   |5 +++
 runtime/classloader.c|5 +--
 vm/classloader.c |   69 ++---
 5 files changed, 66 insertions(+), 16 deletions(-)

diff --git a/include/lib/hash-map.h b/include/lib/hash-map.h
index 42c243f..7e78cf9 100644
--- a/include/lib/hash-map.h
+++ b/include/lib/hash-map.h
@@ -32,6 +32,7 @@ bool hash_map_contains(struct hash_map *map, const void *key);
list_for_each_entry(this, &(hashmap)->table[__i], list_node)
 
 hash_fn string_hash;
+hash_fn ptr_hash;
 compare_fn string_compare;
 
 #endif
diff --git a/include/vm/classloader.h b/include/vm/classloader.h
index fe2556f..12873b4 100644
--- a/include/vm/classloader.h
+++ b/include/vm/classloader.h
@@ -16,6 +16,6 @@ void classloader_init(void);
 struct vm_class *classloader_load(struct vm_object *loader,
  const char *class_name);
 struct vm_class *classloader_load_primitive(const char *class_name);
-struct vm_class *classloader_find_class(const char *name);
+struct vm_class *classloader_find_class(struct vm_object *loader, const char 
*name);
 
 #endif
diff --git a/lib/hash-map.c b/lib/hash-map.c
index 28df5d8..7082f81 100644
--- a/lib/hash-map.c
+++ b/lib/hash-map.c
@@ -142,6 +142,11 @@ unsigned long string_hash(const void *key, unsigned long 
size)
return hash % size;
 }
 
+unsigned long ptr_hash(const void *key, unsigned long size)
+{
+   return ((unsigned long) key) % size;
+}
+
 int string_compare(const void *key1, const void *key2)
 {
return strcmp(key1, key2);
diff --git a/runtime/classloader.c b/runtime/classloader.c
index 5ca54cf..9258c40 100644
--- a/runtime/classloader.c
+++ b/runtime/classloader.c
@@ -62,15 +62,12 @@ jobject native_vmclassloader_findloadedclass(jobject 
classloader, jobject name)
if (!c_name)
return NULL;
 
-   vmc = classloader_find_class(c_name);
+   vmc = classloader_find_class(classloader, c_name);
free(c_name);
 
if (!vmc)
return NULL;
 
-   if (vmc->classloader != classloader)
-   return NULL;
-
vm_class_ensure_init(vmc);
return vmc->object;
 }
diff --git a/vm/classloader.c b/vm/classloader.c
index e86aba4..ce173b5 100644
--- a/vm/classloader.c
+++ b/vm/classloader.c
@@ -194,6 +194,11 @@ enum class_load_status {
CLASS_NOT_FOUND,
 };
 
+struct classes_key {
+   char *class_name;
+   struct vm_object *classloader;
+};
+
 struct classloader_class {
enum class_load_status status;
struct vm_class *class;
@@ -201,27 +206,65 @@ struct classloader_class {
/* number of threads waiting for a class. */
unsigned long nr_waiting;
struct vm_thread *loading_thread;
+   struct vm_object *classloader;
+
+   struct classes_key key;
 };
 
 static struct hash_map *classes;
 
+static unsigned long classes_key_hash(const void *key, unsigned long size)
+{
+   const struct classes_key *classes_key = key;
+
+   return (string_hash(classes_key->class_name, size) ^
+   ptr_hash(classes_key->classloader, size)) % size;
+}
+
+static int classes_key_compare(const void *key1, const void *key2)
+{
+   const struct classes_key *classes_key1 = key1;
+   const struct classes_key *classes_key2 = key2;
+
+   if (!strcmp(classes_key1->class_name, classes_key2->class_name) &&
+   classes_key1->classloader == classes_key2->classloader)
+   return 0;
+
+   return -1;
+}
+
 void classloader_init(void)
 {
-   classes = alloc_hash_map(1, string_hash, string_compare);
+   classes = alloc_hash_map(1, classes_key_hash, classes_key_compare);
if (!classes)
error("failed to initialize class loader");
 }
 
-static struct classloader_class *lookup_class(const char *class_name)
+static struct classloader_class *
+lookup_class(struct vm_object *loader, const char *class_name)
 {
void *class;
+   struct classes_key key;
+
+   key.class_name  = (char *) class_name;
+   key.classloader = loader;
 
-   if (hash_map_get(classes, class_name, &class))
+   if (hash_map_get(classes, &key, &class))
return NULL;
 
return class;
 }
 
+static void remove_class(struct vm_object *loader, const char *class_name)
+{
+   struct classes_key key;
+
+   key.class_name  = (char *) class_name;
+   key.classloader = loader;
+
+   hash_map_remove(classes, &key);
+}
+
 static char *dots_to_slash(const char *name)
 {
char *result = strdup(name);
@@ -515,11 +558,12 @@ out_filename:
return result;
 }
 
-static stru

[PATCH 1/3] vm: fix loading of array classes.

2009-10-14 Thread Tomek Grabiec
Array classes have classloader set to the classloader of its
element class. For primitive arrays, the classloader is always
bootstrap classloader.

This is a bug fix which reveals itself after introducing a
classloader-aware cache. Loading of array classes [I and [[I can be
initiated with different classloaders. The former is always loaded
with bootstrap classloader (primitive array) while loading of the
latter can be initiated with any classloader. Class [[I must be
eventually loaded with bootstrap classloader too.

Signed-off-by: Tomek Grabiec 
---
 include/vm/types.h |6 ++
 vm/classloader.c   |   16 +---
 2 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/include/vm/types.h b/include/vm/types.h
index 4695d9c..1f3350b 100644
--- a/include/vm/types.h
+++ b/include/vm/types.h
@@ -3,6 +3,7 @@
 
 #include 
 #include 
+#include 
 
 #include "lib/list.h"
 
@@ -62,6 +63,11 @@ static inline int vm_type_slot_size(enum vm_type type)
return 1;
 }
 
+static inline bool is_primitive_array(const char *name)
+{
+   return name[0] == '[' && name[strlen(name) - 1] != ';';
+}
+
 static inline enum vm_type mimic_stack_type(enum vm_type type)
 {
switch (type) {
diff --git a/vm/classloader.c b/vm/classloader.c
index 66682a7..e86aba4 100644
--- a/vm/classloader.c
+++ b/vm/classloader.c
@@ -400,6 +400,7 @@ struct vm_class *classloader_load_primitive(const char 
*class_name)
return NULL;
}
 
+   class->classloader = NULL;
class->primitive_vm_type = str_to_type(class_name);
 
if (vm_class_link_primitive_class(class, class_name)) {
@@ -439,6 +440,8 @@ load_array_class(struct vm_object *loader, const char 
*class_name)
else
elem_class = classloader_load(loader, elem_class_name);
 
+   array_class->classloader = elem_class->classloader;
+
if (vm_class_link_array_class(array_class, elem_class, class_name)) {
signal_new_exception(vm_java_lang_OutOfMemoryError, NULL);
return NULL;
@@ -505,8 +508,10 @@ static struct vm_class *load_class(struct vm_object 
*loader,
}
 
 out_filename:
-   free(filename);
+   if (result)
+   result->classloader = NULL;
 
+   free(filename);
return result;
 }
 
@@ -569,6 +574,13 @@ classloader_load(struct vm_object *loader, const char 
*class_name)
 
vmc = NULL;
 
+   /*
+* Array classes have classloader set to the classloader of its 
elements.
+* Primitive types are always loaded with bootstrap classloader.
+*/
+   if (is_primitive_array(class_name))
+   loader = NULL;
+
pthread_mutex_lock(&classloader_mutex);
 
class = find_class(slash_class_name);
@@ -619,8 +631,6 @@ classloader_load(struct vm_object *loader, const char 
*class_name)
 
pthread_mutex_lock(&classloader_mutex);
 
-   vmc->classloader = loader;
-
class->class = vmc;
class->status = CLASS_LOADED;
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] runtime: move VMClassLoader natives to runtime/classloader.c

2009-10-14 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 Makefile  |1 +
 include/runtime/classloader.h |   10 
 runtime/classloader.c |   97 +
 vm/jato.c |   66 +---
 4 files changed, 109 insertions(+), 65 deletions(-)
 create mode 100644 include/runtime/classloader.h
 create mode 100644 runtime/classloader.c

diff --git a/Makefile b/Makefile
index 24deef4..9aad5f9 100644
--- a/Makefile
+++ b/Makefile
@@ -99,6 +99,7 @@ JIT_OBJS = \
 
 VM_OBJS = \
runtime/class.o \
+   runtime/classloader.o   \
runtime/reflection.o\
runtime/runtime.o   \
runtime/stack-walker.o  \
diff --git a/include/runtime/classloader.h b/include/runtime/classloader.h
new file mode 100644
index 000..2f034ae
--- /dev/null
+++ b/include/runtime/classloader.h
@@ -0,0 +1,10 @@
+#ifndef RUNTIME_CLASSLOADER_H
+#define RUNTIME_CLASSLOADER_H
+
+#include "vm/jni.h"
+
+jobject native_vmclassloader_getprimitiveclass(jint type);
+jobject native_vmclassloader_findloadedclass(jobject classloader, jobject 
name);
+jobject native_vmclassloader_loadclass(jobject name, jboolean resolve);
+
+#endif /* RUNTIME_CLASSLOADER_H */
diff --git a/runtime/classloader.c b/runtime/classloader.c
new file mode 100644
index 000..5ca54cf
--- /dev/null
+++ b/runtime/classloader.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2009 Tomasz Grabiec
+ *
+ * This file is released under the GPL version 2 with the following
+ * clarification and special exception:
+ *
+ * Linking this library statically or dynamically with other modules is
+ * making a combined work based on this library. Thus, the terms and
+ * conditions of the GNU General Public License cover the whole
+ * combination.
+ *
+ * As a special exception, the copyright holders of this library give you
+ * permission to link this library with independent modules to produce an
+ * executable, regardless of the license terms of these independent
+ * modules, and to copy and distribute the resulting executable under terms
+ * of your choice, provided that you also meet, for each linked independent
+ * module, the terms and conditions of the license of that module. An
+ * independent module is a module which is not derived from or based on
+ * this library. If you modify this library, you may extend this exception
+ * to your version of the library, but you are not obligated to do so. If
+ * you do not wish to do so, delete this exception statement from your
+ * version.
+ *
+ * Please refer to the file LICENSE for details.
+ */
+
+#include "jit/exception.h"
+
+#include "runtime/classloader.h"
+
+#include "vm/class.h"
+#include "vm/object.h"
+#include "vm/classloader.h"
+
+#include 
+#include 
+
+jobject native_vmclassloader_getprimitiveclass(jint type)
+{
+   char primitive_class_name[] = { "X" };
+   struct vm_class *class;
+
+   primitive_class_name[0] = (char)type;
+
+   class = classloader_load_primitive(primitive_class_name);
+   if (!class)
+   return NULL;
+
+   vm_class_ensure_init(class);
+   if (exception_occurred())
+   return NULL;
+
+   return class->object;
+}
+
+jobject native_vmclassloader_findloadedclass(jobject classloader, jobject name)
+{
+   struct vm_class *vmc;
+   char *c_name;
+
+   c_name = vm_string_to_cstr(name);
+   if (!c_name)
+   return NULL;
+
+   vmc = classloader_find_class(c_name);
+   free(c_name);
+
+   if (!vmc)
+   return NULL;
+
+   if (vmc->classloader != classloader)
+   return NULL;
+
+   vm_class_ensure_init(vmc);
+   return vmc->object;
+}
+
+/* TODO: respect the @resolve parameter. */
+jobject native_vmclassloader_loadclass(jobject name, jboolean resolve)
+{
+   struct vm_class *vmc;
+   char *c_name;
+
+   c_name = vm_string_to_cstr(name);
+   if (!c_name)
+   return NULL;
+
+   vmc = classloader_load(NULL, c_name);
+   free(c_name);
+   if (!vmc)
+   return NULL;
+
+   if (vm_class_ensure_object(vmc))
+   return NULL;
+
+   return vmc->object;
+}
diff --git a/vm/jato.c b/vm/jato.c
index 73dca95..8980d73 100644
--- a/vm/jato.c
+++ b/vm/jato.c
@@ -53,6 +53,7 @@
 #include "runtime/runtime.h"
 #include "runtime/unsafe.h"
 #include "runtime/class.h"
+#include "runtime/classloader.h"
 
 #include "jit/compiler.h"
 #include "jit/cu-mapping.h"
@@ -400,25 +401,6 @@ native_vmobject_getclass(struct vm_object *object)
return object->class->object;
 }
 
-static struct vm_object *
-native_vmclassloader_getprimitiveclass(int type)
-{
-   char primitive_class_name[] = { "X" };
-   struct vm_class *class;

[PATCH 2/3] jit: handle exception handlers covering subroutines in subroutine inlining.

2009-10-14 Thread Tomek Grabiec
The code didn't handle the case where exception handler range covers
the whole subroutine and more.

Signed-off-by: Tomek Grabiec 
---
 jit/subroutine.c |  180 ++
 1 files changed, 127 insertions(+), 53 deletions(-)

diff --git a/jit/subroutine.c b/jit/subroutine.c
index 5ac8633..8a5181a 100644
--- a/jit/subroutine.c
+++ b/jit/subroutine.c
@@ -768,6 +768,42 @@ eh_split(struct inlining_context *ctx, int i)
return &new_table[i + 1];
 }
 
+static inline bool
+eh_outside_subroutine(struct cafebabe_code_attribute_exception *eh,
+ struct subroutine *s)
+{
+   return eh->start_pc >= s->end_pc + s->epilog_size ||
+   eh->end_pc <= s->start_pc;
+}
+
+static inline bool
+eh_inside_subroutine(struct cafebabe_code_attribute_exception *eh,
+struct subroutine *s)
+{
+   return eh->start_pc >= s->start_pc &&
+   eh->end_pc <= s->end_pc + s->epilog_size;
+}
+
+static inline bool
+eh_covers_subroutine(struct cafebabe_code_attribute_exception *eh,
+struct subroutine *s)
+{
+   return eh->start_pc <= s->start_pc &&
+   eh->end_pc >= s->end_pc + s->epilog_size;
+}
+
+static inline bool
+eh_target_inside_subroutine(struct cafebabe_code_attribute_exception *eh,
+   struct subroutine *s)
+{
+   return eh->handler_pc >= s->start_pc &&
+   eh->handler_pc < s->end_pc + s->epilog_size;
+}
+
+/**
+ * Duplicates exception handler entry so that it covers all instances
+ * of inlined subroutines it belongs to.
+ */
 static int
 copy_exception_handler(struct inlining_context *ctx, struct subroutine *s,
   int *eh_index, struct pc_map *pc_map)
@@ -775,12 +811,14 @@ copy_exception_handler(struct inlining_context *ctx, 
struct subroutine *s,
struct cafebabe_code_attribute_exception *eh;
unsigned long eh_start_pc_offset;
unsigned long eh_end_pc_offset;
-   unsigned long eh_handler_pc_offset;
+   unsigned long eh_handler_pc;
unsigned long body_start_pc;
unsigned long body_size;
 
eh = &ctx->exception_table[*eh_index];
 
+   eh_handler_pc = eh->handler_pc;
+
body_start_pc = s->start_pc + s->prolog_size;
body_size = subroutine_get_body_size(s);
 
@@ -794,8 +832,6 @@ copy_exception_handler(struct inlining_context *ctx, struct 
subroutine *s,
else
eh_end_pc_offset = eh->end_pc - body_start_pc;
 
-   eh_handler_pc_offset = eh->handler_pc - body_start_pc;
-
for (int i = 0; i < s->nr_call_sites; i++) {
struct cafebabe_code_attribute_exception *new_eh;
unsigned long sub_start;
@@ -804,26 +840,84 @@ copy_exception_handler(struct inlining_context *ctx, 
struct subroutine *s,
if (pc_map_get_unique(pc_map, &sub_start))
return warn("no or ambiguous mapping"), -EINVAL;
 
-   if (i == s->nr_call_sites - 1)
-   new_eh = &ctx->exception_table[*eh_index];
-   else {
-   new_eh = eh_split(ctx, *eh_index);
-   if (!new_eh)
-   return warn("out of memory"), -ENOMEM;
-   }
+   new_eh = eh_split(ctx, *eh_index);
+   if (!new_eh)
+   return warn("out of memory"), -ENOMEM;
 
new_eh->start_pc = sub_start + eh_start_pc_offset;
new_eh->end_pc = sub_start + eh_end_pc_offset;
-   new_eh->handler_pc = sub_start + eh_handler_pc_offset;
new_eh->catch_type = ctx->exception_table[*eh_index].catch_type;
+
+   if (eh_target_inside_subroutine(eh, s)) {
+   new_eh->handler_pc = sub_start + eh_handler_pc - 
body_start_pc;
+   } else {
+   unsigned long pc = eh_handler_pc;
+   if (pc_map_get_unique(pc_map, &pc))
+   return -EINVAL;
+
+   new_eh->handler_pc = pc;
+   }
}
 
-   *eh_index = *eh_index + s->nr_call_sites - 1;
+   *eh_index = *eh_index + s->nr_call_sites;
 
return 0;
 }
 
 static int
+update_exception_handler(struct cafebabe_code_attribute_exception *eh,
+struct pc_map *pc_map)
+{
+   unsigned long start_pc, end_pc, handler_pc;
+   int err;
+
+   start_pc = eh->start_pc;
+   end_pc = eh->end_pc;
+   handler_pc = eh->handler_pc;
+
+   err = pc_map_get_unique(pc_map, &start_pc);
+   if (err)
+   return warn("no or ambiguous mapping"), -EINVAL;
+
+   err = pc_map_get_unique

[PATCH 3/3] vm: do not return jboolean from VM natives.

2009-10-14 Thread Tomek Grabiec
We should return jint instead of jboolean in VM native implementations
because in the latter case only least significant byte is valid and
JIT code treats the native's return value as jint. This can cause that
even if native function returns false as jboolean, the JIT code might
see it as a jint value != 0. All java functions which return byte,
boolean, char and short push J_INT onto mimic stack (according to JVM
spec). To be ok with the specification and code we can do one of the
following:

   (1) add conversion code to JIT after native calls

   (2) set jint as a return value for all VM native functions
   returning types of size less than jint.

This patch provides solution nr (2).

This caused the following error to happen for clojure, because
native_vmthread_interrupted() returned != 0 even if
vm_thread_interrupted() returned false:

Exception in thread "main" java.lang.Exception: Transaction failed after 
reaching retry limit
   at clojure.lang.LockingTransaction.run(LockingTransaction.java:386)
   at 
clojure.lang.LockingTransaction.runInTransaction(LockingTransaction.java:236)
   at clojure.core$load_one__5814.invoke(core.clj:3731)
   at clojure.core$load_lib__5835.doInvoke(core.clj:3763)
   at clojure.lang.RestFn.applyTo(RestFn.java:147)
   at clojure.core$apply__3977.doInvoke(core.clj:402)
   at clojure.lang.RestFn.invoke(RestFn.java:443)
   at clojure.core$load_libs__5847.doInvoke(core.clj:3789)
   at clojure.lang.RestFn.applyTo(RestFn.java:142)
   at clojure.core$apply__3977.doInvoke(core.clj:402)
   at clojure.lang.RestFn.invoke(RestFn.java:443)
   at clojure.core$require__5853.doInvoke(core.clj:3857)
   at clojure.lang.RestFn.invoke(RestFn.java:413)
   at clojure.lang.Var.invoke(Var.java:359)
   at clojure.main.main(main.java:36)

Signed-off-by: Tomek Grabiec 
---
 include/runtime/class.h |6 +++---
 runtime/class.c |6 +++---
 vm/jato.c   |6 +++---
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/runtime/class.h b/include/runtime/class.h
index 50d1ec0..7691ac9 100644
--- a/include/runtime/class.h
+++ b/include/runtime/class.h
@@ -11,14 +11,14 @@ struct vm_object *native_vmclass_forname(struct vm_object 
*name,
 struct vm_object *loader);
 struct vm_object *native_vmclass_getname(struct vm_object *object);
 int32_t native_vmclass_is_anonymous_class(struct vm_object *object);
-jboolean native_vmclass_is_assignable_from(struct vm_object *clazz_1,
+jint native_vmclass_is_assignable_from(struct vm_object *clazz_1,
   struct vm_object *clazz_2);
 int32_t native_vmclass_isarray(struct vm_object *object);
 int32_t native_vmclass_isprimitive(struct vm_object *object);
 jint native_vmclass_getmodifiers(struct vm_object *clazz);
 struct vm_object *native_vmclass_getcomponenttype(struct vm_object *object);
-jboolean native_vmclass_isinstance(struct vm_object *clazz,
+jint native_vmclass_isinstance(struct vm_object *clazz,
   struct vm_object *object);
-jboolean native_vmclass_isinterface(struct vm_object *clazz);
+jint native_vmclass_isinterface(struct vm_object *clazz);
 
 #endif /* RUNTIME_CLASS_H */
diff --git a/runtime/class.c b/runtime/class.c
index 9fe690d..636ff11 100644
--- a/runtime/class.c
+++ b/runtime/class.c
@@ -108,7 +108,7 @@ int32_t native_vmclass_is_anonymous_class(struct vm_object 
*object)
return vm_class_is_anonymous(class);
 }
 
-jboolean native_vmclass_is_assignable_from(struct vm_object *clazz_1,
+jint native_vmclass_is_assignable_from(struct vm_object *clazz_1,
   struct vm_object *clazz_2)
 {
struct vm_class *vmc_1 = vm_object_to_vm_class(clazz_1);
@@ -164,7 +164,7 @@ struct vm_object *native_vmclass_getcomponenttype(struct 
vm_object *object)
return vm_class_get_array_element_class(class)->object;
 }
 
-jboolean native_vmclass_isinstance(struct vm_object *clazz,
+jint native_vmclass_isinstance(struct vm_object *clazz,
   struct vm_object *object)
 {
struct vm_class *class = vm_object_to_vm_class(clazz);
@@ -175,7 +175,7 @@ jboolean native_vmclass_isinstance(struct vm_object *clazz,
return vm_class_is_assignable_from(class, object->class);
 }
 
-jboolean native_vmclass_isinterface(struct vm_object *clazz)
+jint native_vmclass_isinterface(struct vm_object *clazz)
 {
struct vm_class *class = vm_object_to_vm_class(clazz);
if (!class)
diff --git a/vm/jato.c b/vm/jato.c
index 9e1c9ac..73dca95 100644
--- a/vm/jato.c
+++ b/vm/jato.c
@@ -461,12 +461,12 @@ static void native_vmthread_start(struct vm_object 
*vmthread, jlong stacksize)
vm_thread_start(vmthread);
 }
 
-static jboolean native_vmthread_interrupted(void)
+static jint native_vmthread_interrupted(void)
 {
return vm_thread_interrupted(vm_thread_self());
 }
 
-static jboolean native_vmthread

[PATCH 1/3] jit: fix print_exception_table()

2009-10-14 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/exception.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/jit/exception.c b/jit/exception.c
index b412e25..8274eaa 100644
--- a/jit/exception.c
+++ b/jit/exception.c
@@ -352,7 +352,7 @@ print_exception_table(const struct vm_method *method,
 
if (!eh->catch_type) {
trace_printf("all\n");
-   return;
+   continue;
}
 
const struct vm_class *catch_class;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] vm: fix bug in native_field_get()

2009-10-11 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 vm/reflection.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/vm/reflection.c b/vm/reflection.c
index d97fec9..faf27bd 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -621,7 +621,7 @@ struct vm_object *native_field_get(struct vm_object *this, 
struct vm_object *o)
value_p = &o->fields[vmf->offset];
}
 
-   return wrap((union jvalue *) &value_p, type);
+   return wrap((union jvalue *) value_p, type);
 }
 
 jint native_field_get_modifiers_internal(struct vm_object *this)
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/4] x86: make native_call() hard crash for not implemented types.

2009-10-11 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/call.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/call.c b/arch/x86/call.c
index 717d53d..0b96020 100644
--- a/arch/x86/call.c
+++ b/arch/x86/call.c
@@ -137,7 +137,7 @@ void native_call(struct vm_method *method,
case J_LONG:
case J_DOUBLE:
case J_FLOAT:
-   NOT_IMPLEMENTED;
+   error("not implemented");
break;
case J_RETURN_ADDRESS:
case VM_TYPE_MAX:
@@ -181,7 +181,7 @@ void vm_native_call(struct vm_method *method,
case J_LONG:
case J_DOUBLE:
case J_FLOAT:
-   NOT_IMPLEMENTED;
+   error("not implemented");
break;
case J_RETURN_ADDRESS:
case VM_TYPE_MAX:
@@ -294,7 +294,7 @@ void native_call(struct vm_method *method,
break;
case J_DOUBLE:
case J_FLOAT:
-   NOT_IMPLEMENTED;
+   error("not implemented");
break;
case J_RETURN_ADDRESS:
case VM_TYPE_MAX:
@@ -340,7 +340,7 @@ void vm_native_call(struct vm_method *method,
break;
case J_DOUBLE:
case J_FLOAT:
-   NOT_IMPLEMENTED;
+   error("not implemented");
break;
case J_RETURN_ADDRESS:
case VM_TYPE_MAX:
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/4] regression: fix TestCase.assertObjectEquals()

2009-10-11 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 regression/jvm/TestCase.java |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/regression/jvm/TestCase.java b/regression/jvm/TestCase.java
index fd00063..2a53a8b 100644
--- a/regression/jvm/TestCase.java
+++ b/regression/jvm/TestCase.java
@@ -90,7 +90,7 @@ public class TestCase {
 assertTrue(o.getClass().getName().equals(className));
 }
 
-protected static void assertObjectEquals(String a, String b) {
+protected static void assertObjectEquals(Object a, Object b) {
 if (a == null && b == null)
 return;
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/4] vm: fix native_vmclass_get_declared_methods()

2009-10-11 Thread Tomek Grabiec
We should perform the vm_method_is_special() check also when
public_only == false;

Signed-off-by: Tomek Grabiec 
---
 vm/reflection.c |   23 ++-
 1 files changed, 10 insertions(+), 13 deletions(-)

diff --git a/vm/reflection.c b/vm/reflection.c
index 6d03a57..d97fec9 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -173,6 +173,7 @@ native_vmclass_get_declared_methods(struct vm_object *clazz,
jboolean public_only)
 {
struct vm_class *vmc;
+   int count;
 
vmc = vm_object_to_vm_class(clazz);
if (!vmc)
@@ -181,25 +182,21 @@ native_vmclass_get_declared_methods(struct vm_object 
*clazz,
if (vm_class_is_primitive_class(vmc) || vm_class_is_array_class(vmc))
return 
vm_object_alloc_array(vm_array_of_java_lang_reflect_Field, 0);
 
-   int count;
+   count = 0;
+   for (int i = 0; i < vmc->class->methods_count; i++) {
+   struct vm_method *vmm = &vmc->methods[i];
 
-   if (public_only) {
-   count = 0;
+   if (public_only && !vm_method_is_public(vmm))
+   continue;
 
-   for (int i = 0; i < vmc->class->methods_count; i++) {
-   struct vm_method *vmm = &vmc->methods[i];
+   if (vm_method_is_special(vmm))
+   continue;
 
-   if (vm_method_is_public(vmm) &&
-   !vm_method_is_special(vmm))
-   count ++;
-   }
-   } else {
-   count = vmc->class->methods_count;
+   count ++;
}
 
struct vm_object *array
-   = vm_object_alloc_array(vm_array_of_java_lang_reflect_Method,
-   count);
+   = vm_object_alloc_array(vm_array_of_java_lang_reflect_Method, 
count);
if (!array) {
NOT_IMPLEMENTED;
return NULL;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/4] regression: add regression test for method invokation via reflection

2009-10-11 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 regression/jvm/MethodTest.java |   20 +++-
 1 files changed, 19 insertions(+), 1 deletions(-)

diff --git a/regression/jvm/MethodTest.java b/regression/jvm/MethodTest.java
index c633f8c..b805da4 100644
--- a/regression/jvm/MethodTest.java
+++ b/regression/jvm/MethodTest.java
@@ -36,7 +36,7 @@ public class MethodTest extends TestCase {
 assertEquals(Modifier.STATIC | Modifier.PUBLIC, 
modifiers("publicClassMethod"));
 assertEquals(Modifier.PUBLIC, modifiers("publicInstanceMethod"));
 }
- 
+
 private static int modifiers(String name) throws Exception {
   return Klass.class.getMethod(name, new Class[] { }).getModifiers();
 }
@@ -45,9 +45,27 @@ public class MethodTest extends TestCase {
   public final void publicFinalInstanceMethod() { }
   public static void publicClassMethod() { }
   public void publicInstanceMethod() { }
+
+  public static int intIncrement(int x) {
+  return x + 1;
+  }
+}
+
+public static Object invoke(String name, Class arg_class, Object arg) {
+try {
+return Klass.class.getMethod(name, new Class[] { arg_class 
}).invoke(null, new Object[] { arg });
+} catch (Exception e) {
+fail();
+return null;
+}
+}
+
+public static void testMethodReflectionInvoke() {
+assertObjectEquals(Integer.valueOf(2), invoke("intIncrement", 
int.class, Integer.valueOf(1)));
 }
 
 public static void main(String[] args) throws Exception {
 testMethodModifiers();
+testMethodReflectionInvoke();
 }
 }
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/4] vm: rename encapsulate_value() to wrap()

2009-10-11 Thread Tomek Grabiec
While at it, make it more robust by using valueOf() functions.

Signed-off-by: Tomek Grabiec 
---
 include/vm/preload.h |8 +++
 vm/preload.c |   56 ++
 vm/reflection.c  |  153 --
 3 files changed, 124 insertions(+), 93 deletions(-)

diff --git a/include/vm/preload.h b/include/vm/preload.h
index a99fa89..d09f6b7 100644
--- a/include/vm/preload.h
+++ b/include/vm/preload.h
@@ -101,13 +101,21 @@ extern struct vm_method *vm_java_lang_VMThread_init;
 extern struct vm_method *vm_java_lang_VMThread_run;
 extern struct vm_method *vm_java_lang_System_exit;
 extern struct vm_method *vm_java_lang_Boolean_init;
+extern struct vm_method *vm_java_lang_Boolean_valueOf;
 extern struct vm_method *vm_java_lang_Byte_init;
+extern struct vm_method *vm_java_lang_Byte_valueOf;
 extern struct vm_method *vm_java_lang_Character_init;
+extern struct vm_method *vm_java_lang_Character_valueOf;
 extern struct vm_method *vm_java_lang_Double_init;
+extern struct vm_method *vm_java_lang_Double_valueOf;
 extern struct vm_method *vm_java_lang_Float_init;
+extern struct vm_method *vm_java_lang_Float_valueOf;
 extern struct vm_method *vm_java_lang_Integer_init;
+extern struct vm_method *vm_java_lang_Integer_valueOf;
 extern struct vm_method *vm_java_lang_Long_init;
+extern struct vm_method *vm_java_lang_Long_valueOf;
 extern struct vm_method *vm_java_lang_Short_init;
+extern struct vm_method *vm_java_lang_Short_valueOf;
 extern struct vm_method *vm_java_lang_ClassLoader_loadClass;
 extern struct vm_method *vm_java_lang_ClassLoader_getSystemClassLoader;
 extern struct vm_method *vm_java_lang_Number_intValue;
diff --git a/vm/preload.c b/vm/preload.c
index 23516af..ba2d50f 100644
--- a/vm/preload.c
+++ b/vm/preload.c
@@ -247,13 +247,21 @@ struct vm_method *vm_java_lang_VMThread_init;
 struct vm_method *vm_java_lang_VMThread_run;
 struct vm_method *vm_java_lang_System_exit;
 struct vm_method *vm_java_lang_Boolean_init;
+struct vm_method *vm_java_lang_Boolean_valueOf;
 struct vm_method *vm_java_lang_Byte_init;
+struct vm_method *vm_java_lang_Byte_valueOf;
 struct vm_method *vm_java_lang_Character_init;
+struct vm_method *vm_java_lang_Character_valueOf;
 struct vm_method *vm_java_lang_Double_init;
+struct vm_method *vm_java_lang_Double_valueOf;
 struct vm_method *vm_java_lang_Float_init;
+struct vm_method *vm_java_lang_Float_valueOf;
 struct vm_method *vm_java_lang_Integer_init;
+struct vm_method *vm_java_lang_Integer_valueOf;
 struct vm_method *vm_java_lang_Long_init;
+struct vm_method *vm_java_lang_Long_valueOf;
 struct vm_method *vm_java_lang_Short_init;
+struct vm_method *vm_java_lang_Short_valueOf;
 struct vm_method *vm_java_lang_ClassLoader_loadClass;
 struct vm_method *vm_java_lang_ClassLoader_getSystemClassLoader;
 struct vm_method *vm_java_lang_VMString_intern;
@@ -360,48 +368,96 @@ static const struct method_preload_entry 
method_preload_entries[] = {
&vm_java_lang_Boolean_init,
},
{
+   &vm_java_lang_Boolean,
+   "valueOf",
+   "(Z)Ljava/lang/Boolean;",
+   &vm_java_lang_Boolean_valueOf,
+   },
+   {
&vm_java_lang_Byte,
"",
"(B)V",
&vm_java_lang_Byte_init,
},
{
+   &vm_java_lang_Byte,
+   "valueOf",
+   "(B)Ljava/lang/Byte;",
+   &vm_java_lang_Byte_valueOf,
+   },
+   {
&vm_java_lang_Character,
"",
"(C)V",
&vm_java_lang_Character_init,
},
{
+   &vm_java_lang_Character,
+   "valueOf",
+   "(C)Ljava/lang/Character;",
+   &vm_java_lang_Character_valueOf,
+   },
+   {
&vm_java_lang_Double,
"",
"(D)V",
&vm_java_lang_Double_init,
},
{
+   &vm_java_lang_Double,
+   "valueOf",
+   "(D)Ljava/lang/Double;",
+   &vm_java_lang_Double_valueOf,
+   },
+   {
&vm_java_lang_Long,
"",
"(J)V",
&vm_java_lang_Long_init,
},
{
+   &vm_java_lang_Long,
+   "valueOf",
+   "(J)Ljava/lang/Long;",
+   &vm_java_lang_Long_valueOf,
+   },
+   {
&vm_java_lang_Short,
"",
"(S)V",
&vm_java_lang_Short_init,
},
{
+   &vm_java_lang_Short,
+   "valueOf",
+

[PATCH 4/4] vm: fix call_static_method() and call_virtual_method()

2009-10-11 Thread Tomek Grabiec
Those functions should return values wrapped in approptiate objects,
not the value itself. This caused a SIGSEGV when running clojure
because an integer value was accessed like an object reference.

Signed-off-by: Tomek Grabiec 
---
 vm/reflection.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/vm/reflection.c b/vm/reflection.c
index 027bfb4..6d03a57 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -721,7 +721,7 @@ call_virtual_method(struct vm_method *vmm, struct vm_object 
*o,
return NULL;
 
vm_call_method_this_a(vmm, o, args, &result);
-   return result.l;
+   return wrap(&result, vmm->return_type.vm_type);
 }
 
 static struct vm_object *
@@ -734,7 +734,7 @@ call_static_method(struct vm_method *vmm, struct vm_object 
*args_array)
return NULL;
 
vm_call_method_a(vmm, args, &result);
-   return result.l;
+   return wrap(&result, vmm->return_type.vm_type);
 }
 
 jint native_method_get_modifiers_internal(struct vm_object *this)
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/4] vm: change call result return method for vm_call_method_*() so that it can handle any type

2009-10-11 Thread Tomek Grabiec
Before that, all vm_call_method_*() functions returned unsigned long
which was not a good idea because there was no way to handle jlong or
jdouble this way on x86.

This patch changes the API, so that vm_call_method_*() functions are
given a pointer to a union jvalue to which the result should be
saved. This way we can use vm_call_method_*() functions in a machine
and type independed way.

While at it, implement JNI funciton families:
 CallStatic*Method()
 CallStatic*MethodV()
 Call*Method()

Signed-off-by: Tomek Grabiec 
---
 arch/x86/call.c|  222 +++-
 include/vm/call.h  |   85 +
 vm/call.c  |   40 
 vm/jni-interface.c |  265 +---
 vm/reflection.c|   21 +++--
 5 files changed, 385 insertions(+), 248 deletions(-)

diff --git a/arch/x86/call.c b/arch/x86/call.c
index 08b7ac9..717d53d 100644
--- a/arch/x86/call.c
+++ b/arch/x86/call.c
@@ -37,14 +37,13 @@
 #ifdef CONFIG_X86_32
 
 /**
- * This calls a function with call arguments copied from @args
- * array. The array contains @args_count elements of machine word
- * size. The @target must be a variable holding a function
- * pointer. Call result will be stored in @result.
+ * Calls @method which address is obtained from a memory
+ * pointed by @target. Function returns call result which
+ * is supposed to be saved to %eax.
  */
-unsigned long native_call(struct vm_method *method,
- const void *target,
- unsigned long *args)
+static unsigned long native_call_gp(struct vm_method *method,
+   const void *target,
+   unsigned long *args)
 {
unsigned long result;
 
@@ -66,15 +65,9 @@ unsigned long native_call(struct vm_method *method,
return result;
 }
 
-/**
- * This calls a VM native function with call arguments copied from
- * @args array. The array contains @args_count elements of machine
- * word size. The @target must be a pointer to a VM function. Call
- * result will be stored in @result.
- */
-unsigned long vm_native_call(struct vm_method *method,
-const void *target,
-unsigned long *args)
+static unsigned long vm_native_call_gp(struct vm_method *method,
+  const void *target,
+  unsigned long *args)
 {
unsigned long result;
 
@@ -108,11 +101,104 @@ unsigned long vm_native_call(struct vm_method *method,
return result;
 }
 
+/**
+ * This calls a function with call arguments copied from @args
+ * array. The array contains @args_count elements of machine word
+ * size. The @target must be a variable holding a function
+ * pointer. Call result will be stored in @result.
+ */
+void native_call(struct vm_method *method,
+const void *target,
+unsigned long *args,
+union jvalue *result)
+{
+   switch (method->return_type.vm_type) {
+   case J_VOID:
+   native_call_gp(method, target, args);
+   break;
+   case J_REFERENCE:
+   result->l = (jobject) native_call_gp(method, target, args);
+   break;
+   case J_INT:
+   result->i = (jint) native_call_gp(method, target, args);
+   break;
+   case J_CHAR:
+   result->c = (jchar) native_call_gp(method, target, args);
+   break;
+   case J_BYTE:
+   result->b = (jbyte) native_call_gp(method, target, args);
+   break;
+   case J_SHORT:
+   result->s = (jshort) native_call_gp(method, target, args);
+   break;
+   case J_BOOLEAN:
+   result->z = (jboolean) native_call_gp(method, target, args);
+   break;
+   case J_LONG:
+   case J_DOUBLE:
+   case J_FLOAT:
+   NOT_IMPLEMENTED;
+   break;
+   case J_RETURN_ADDRESS:
+   case VM_TYPE_MAX:
+   die("unexpected type");
+   }
+}
+
+/**
+ * This calls a VM native function with call arguments copied from
+ * @args array. The array contains @args_count elements of machine
+ * word size. The @target must be a pointer to a VM function. Call
+ * result will be stored in @result.
+ */
+void vm_native_call(struct vm_method *method,
+   const void *target,
+   unsigned long *args,
+   union jvalue *result)
+{
+   switch (method->return_type.vm_type) {
+   case J_VOID:
+   vm_native_call_gp(method, target, args);
+   break;
+   case J_REFERENCE:
+   result->l = (jobject) vm_native_call_gp(method, target, args);
+   break;
+   case J_INT:
+   result->i = (jint) vm_native_call_gp(method, target, args);
+   break;
+ 

[PATCH 2/4] vm: implement unwrap() for J_LONG and J_DOUBLE

2009-10-11 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/vm/preload.h |2 ++
 vm/preload.c |   14 ++
 vm/reflection.c  |7 ++-
 3 files changed, 22 insertions(+), 1 deletions(-)

diff --git a/include/vm/preload.h b/include/vm/preload.h
index c71d8f9..a99fa89 100644
--- a/include/vm/preload.h
+++ b/include/vm/preload.h
@@ -112,6 +112,8 @@ extern struct vm_method *vm_java_lang_ClassLoader_loadClass;
 extern struct vm_method *vm_java_lang_ClassLoader_getSystemClassLoader;
 extern struct vm_method *vm_java_lang_Number_intValue;
 extern struct vm_method *vm_java_lang_Number_floatValue;
+extern struct vm_method *vm_java_lang_Number_longValue;
+extern struct vm_method *vm_java_lang_Number_doubleValue;
 
 int preload_vm_classes(void);
 
diff --git a/vm/preload.c b/vm/preload.c
index b2804da..23516af 100644
--- a/vm/preload.c
+++ b/vm/preload.c
@@ -259,6 +259,8 @@ struct vm_method 
*vm_java_lang_ClassLoader_getSystemClassLoader;
 struct vm_method *vm_java_lang_VMString_intern;
 struct vm_method *vm_java_lang_Number_intValue;
 struct vm_method *vm_java_lang_Number_floatValue;
+struct vm_method *vm_java_lang_Number_longValue;
+struct vm_method *vm_java_lang_Number_doubleValue;
 
 static const struct method_preload_entry method_preload_entries[] = {
{
@@ -429,6 +431,18 @@ static const struct method_preload_entry 
method_preload_entries[] = {
"()F",
&vm_java_lang_Number_floatValue,
},
+   {
+   &vm_java_lang_Number,
+   "longValue",
+   "()J",
+   &vm_java_lang_Number_longValue,
+   },
+   {
+   &vm_java_lang_Number,
+   "doubleValue",
+   "()D",
+   &vm_java_lang_Number_doubleValue,
+   },
 };
 
 /*
diff --git a/vm/reflection.c b/vm/reflection.c
index 790dce8..e5dea46 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -655,8 +655,13 @@ static int unwrap(void *field_ptr, enum vm_type type,
*(jfloat *) field_ptr = result.f;
return 0;
case J_LONG:
+   vm_call_method_this_a(vm_java_lang_Number_longValue, value, 
args, &result);
+   *(jlong *) field_ptr = result.j;
+   return 0;
case J_DOUBLE:
-   error("not implemented");
+   vm_call_method_this_a(vm_java_lang_Number_doubleValue, value, 
args, &result);
+   *(jdouble *) field_ptr = result.d;
+   return 0;
case J_VOID:
case J_RETURN_ADDRESS:
case VM_TYPE_MAX:
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] jit: skip conversion of unreachable basic blocks

2009-10-11 Thread Tomek Grabiec
Basic blocks which are unreachable in CFG and are not
exception handlers will not be converted.

This is a workaround for a problem encountered in bytecode generated
by clojure. Clojure generates unreachable basic blocks which leave
empty mimic stack and jump to a basic block which expects a value on
mimic stack. This breaks mimic stack resolution because we have
different stack depths on different paths (bb1->bb3 and bb2->bb3):

...

--- bb1 (unreachable)
[main][ 354 ]  0xa7  goto   359

--- bb2
[main][ 357 ]  0x57  pop
[main][ 358 ]  0x01  aconst_null

--- bb3
[main][ 359 ]  0xb0  areturn

Signed-off-by: Tomek Grabiec 
---
 jit/bytecode-to-ir.c |3 +++
 1 files changed, 3 insertions(+), 0 deletions(-)

diff --git a/jit/bytecode-to-ir.c b/jit/bytecode-to-ir.c
index a93a2ca..5f66587 100644
--- a/jit/bytecode-to-ir.c
+++ b/jit/bytecode-to-ir.c
@@ -434,6 +434,9 @@ int convert_to_ir(struct compilation_unit *cu)
 * really converted all basic blocks.
 */
for_each_basic_block(bb, &cu->bb_list) {
+   if (!bb->is_eh)
+   continue;
+
err = convert_bb_to_ir(bb);
if (err)
break;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/3] vm: do not return and methods in native_vmclass_get_declared_methods()

2009-10-10 Thread Tomek Grabiec
The API spec says we shouldn't do so.

Signed-off-by: Tomek Grabiec 
---
 include/vm/method.h |5 +
 vm/reflection.c |6 +-
 2 files changed, 10 insertions(+), 1 deletions(-)

diff --git a/include/vm/method.h b/include/vm/method.h
index ccf1db8..7c9e0d7 100644
--- a/include/vm/method.h
+++ b/include/vm/method.h
@@ -119,6 +119,11 @@ static inline bool vm_method_is_vm_native(struct vm_method 
*vmm)
&& vmm->is_vm_native;
 }
 
+static inline bool vm_method_is_special(struct vm_method *vmm)
+{
+   return vmm->name[0] == '<';
+}
+
 static inline enum vm_type method_return_type(struct vm_method *method)
 {
char *return_type = index(method->type, ')') + 1;
diff --git a/vm/reflection.c b/vm/reflection.c
index d0bff58..676a332 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -189,7 +189,8 @@ native_vmclass_get_declared_methods(struct vm_object *clazz,
for (int i = 0; i < vmc->class->methods_count; i++) {
struct vm_method *vmm = &vmc->methods[i];
 
-   if (vm_method_is_public(vmm))
+   if (vm_method_is_public(vmm) &&
+   !vm_method_is_special(vmm))
count ++;
}
} else {
@@ -212,6 +213,9 @@ native_vmclass_get_declared_methods(struct vm_object *clazz,
if (public_only && !vm_method_is_public(vmm))
continue;
 
+   if (vm_method_is_special(vmm))
+   continue;
+
struct vm_object *method
= vm_object_alloc(vm_java_lang_reflect_Method);
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/3] vm: rename unwrap_and_set_field() to unwrap()

2009-10-10 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 vm/reflection.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/vm/reflection.c b/vm/reflection.c
index c7fc9e5..d0bff58 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -624,7 +624,7 @@ jint native_field_get_modifiers_internal(struct vm_object 
*this)
return vmf->field->access_flags;
 }
 
-static int unwrap_and_set_field(void *field_ptr, enum vm_type type,
+static int unwrap(void *field_ptr, enum vm_type type,
struct vm_object *value)
 {
unsigned long args[] = { (unsigned long) value };
@@ -685,7 +685,7 @@ void native_field_set(struct vm_object *this, struct 
vm_object *o,
enum vm_type type = vm_field_type(vmf);
 
if (vm_field_is_static(vmf)) {
-   unwrap_and_set_field(vmf->class->static_values + vmf->offset,
+   unwrap(vmf->class->static_values + vmf->offset,
 type, value_obj);
} else {
/*
@@ -704,7 +704,7 @@ void native_field_set(struct vm_object *this, struct 
vm_object *o,
return;
}
 
-   unwrap_and_set_field(&o->fields[vmf->offset], type, value_obj);
+   unwrap(&o->fields[vmf->offset], type, value_obj);
}
 }
 
@@ -725,7 +725,7 @@ static int marshall_call_arguments(struct vm_method *vmm, 
unsigned long *args,
struct vm_object *arg_obj;
 
arg_obj = array_get_field_ptr(args_array, args_array_idx++);
-   if (unwrap_and_set_field(&args[idx++], arg->type_info.vm_type, 
arg_obj))
+   if (unwrap(&args[idx++], arg->type_info.vm_type, arg_obj))
return -1;
}
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/3] jit: fix bug in spill_interval()

2009-10-10 Thread Tomek Grabiec
The function did not handle properly the case when we are splitting
and interval which has child intervals that need reloading from it.
We should change the spill parent of those child intervals to the
new interval, so that they are reloaded correctly.

This bug caused execution of invalid methods due to register
corruption, which prevented java.awt.image.Raster.createWritableRaster()
from correct working.

Signed-off-by: Tomek Grabiec 
---
 jit/linear-scan.c |   14 ++
 1 files changed, 14 insertions(+), 0 deletions(-)

diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 05ff33d..9940c80 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -126,6 +126,20 @@ static void spill_interval(struct live_interval *it, 
unsigned long pos,
new = split_interval_at(new, next_pos);
 
/*
+* If any child interval of @it must be reloaded from
+* @it then we have to update its spill parent to @new.
+*/
+   struct live_interval *child = new->next_child;
+   while (child) {
+   if (child->need_reload && child->spill_parent == it)
+   child->spill_parent = new;
+
+   child = child->next_child;
+   }
+
+   new->need_spill = it->need_spill;
+
+   /*
 * When next use position is a write then we must not
 * reload the new interval. One reason for this is
 * that it's unnecessary. Another one is that we won't
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] vm: fix unwrap_and_set_field()

2009-10-09 Thread Tomek Grabiec
'This' pointer was not passed to vm_call_method_this_a() in args which
caused a SIGSEGV inside that function.

Signed-off-by: Tomek Grabiec 
---
 vm/call.c   |1 +
 vm/reflection.c |6 --
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/vm/call.c b/vm/call.c
index a33e6a7..cc2f84d 100644
--- a/vm/call.c
+++ b/vm/call.c
@@ -83,6 +83,7 @@ unsigned long vm_call_method_this_a(struct vm_method *method,
void *target;
 
target = this->class->vtable.native_ptr[method->virtual_index];
+   assert(args != NULL);
assert(args[0] == (unsigned long) this);
 
return call_method_a(method, target, args);
diff --git a/vm/reflection.c b/vm/reflection.c
index 5ce258d..c7fc9e5 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -627,6 +627,8 @@ jint native_field_get_modifiers_internal(struct vm_object 
*this)
 static int unwrap_and_set_field(void *field_ptr, enum vm_type type,
struct vm_object *value)
 {
+   unsigned long args[] = { (unsigned long) value };
+
switch (type) {
case J_REFERENCE:
*(jobject *) field_ptr = value;
@@ -641,11 +643,11 @@ static int unwrap_and_set_field(void *field_ptr, enum 
vm_type type,
 * returned by ireturn anyway.
 */
*(long *) field_ptr = 
vm_call_method_this_a(vm_java_lang_Number_intValue,
-   value, NULL);
+   value, args);
return 0;
case J_FLOAT:
*(jfloat *) field_ptr = (jfloat) 
vm_call_method_this_a(vm_java_lang_Number_floatValue,
-  value, 
NULL);
+  value, 
args);
return 0;
case J_LONG:
case J_DOUBLE:
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] vm: fix call_virtual_method()

2009-10-09 Thread Tomek Grabiec
'This' pointer must be added to args manually. vm_call_method_this_a()
expects that it is already in args to avoid unnecessary copying.

Signed-off-by: Tomek Grabiec 
---
 vm/call.c   |6 +-
 vm/reflection.c |3 ++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/vm/call.c b/vm/call.c
index eaf2b4f..a33e6a7 100644
--- a/vm/call.c
+++ b/vm/call.c
@@ -80,7 +80,11 @@ unsigned long vm_call_method_this_a(struct vm_method *method,
struct vm_object *this,
unsigned long *args)
 {
-   void *target = this->class->vtable.native_ptr[method->virtual_index];
+   void *target;
+
+   target = this->class->vtable.native_ptr[method->virtual_index];
+   assert(args[0] == (unsigned long) this);
+
return call_method_a(method, target, args);
 }
 
diff --git a/vm/reflection.c b/vm/reflection.c
index 80db700..5ce258d 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -736,7 +736,8 @@ call_virtual_method(struct vm_method *vmm, struct vm_object 
*o,
 {
unsigned long args[vmm->args_count];
 
-   if (marshall_call_arguments(vmm, args, args_array))
+   args[0] = (unsigned long) o;
+   if (marshall_call_arguments(vmm, args + 1, args_array))
return NULL;
 
return (struct vm_object *) vm_call_method_this_a(vmm, o, args);
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] cafebabe: fix bug in cafebabe_stream_close()

2009-10-09 Thread Tomek Grabiec
The bracket was misplaced causing close() to use wrong file descriptor.
This led to the following error message for tetris:
.: Fatal IO error 9 (Bad file descriptor) on X server :0.0.

CC: Vegard Nossum 
Signed-off-by: Tomek Grabiec 
---
 cafebabe/src/cafebabe/stream.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/cafebabe/src/cafebabe/stream.c b/cafebabe/src/cafebabe/stream.c
index 2d45484..79b1f9c 100644
--- a/cafebabe/src/cafebabe/stream.c
+++ b/cafebabe/src/cafebabe/stream.c
@@ -103,7 +103,7 @@ cafebabe_stream_close(struct cafebabe_stream *s)
 
/* We try not to leak file descriptors. */
do {
-   if (close(s->fd == -1)) {
+   if (close(s->fd) == -1) {
if (errno == EINTR)
continue;
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/5] vm: implement all GetStatic*Field() JNI functions

2009-10-08 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 vm/jni-interface.c |   52 ++--
 1 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/vm/jni-interface.c b/vm/jni-interface.c
index b3b205e..ac8188f 100644
--- a/vm/jni-interface.c
+++ b/vm/jni-interface.c
@@ -727,24 +727,24 @@ vm_jni_get_static_field_id(struct vm_jni_env *env, jclass 
clazz,
return fb;
 }
 
-static jdouble
-vm_jni_get_static_double_field(struct vm_jni_env *env, jobject object,
-  jfieldID field)
-{
-   enter_vm_from_jni();
-
-   if (!object) {
-   signal_new_exception(vm_java_lang_NullPointerException, NULL);
-   return 0;
-   }
-
-   if (vm_field_type(field) != J_DOUBLE || !vm_field_is_static(field)) {
-   NOT_IMPLEMENTED;
-   return 0;
-   }
+#define DEFINE_GET_STATIC_FIELD(func, type, get)   \
+   static type \
+   func(struct vm_jni_env *env, jobject object, jfieldID field)\
+   {   \
+   enter_vm_from_jni();\
+   \
+   return get(field);  \
+   }   \
 
-   return static_field_get_double(field);
-}
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_object_field, jobject, 
static_field_get_object);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_boolean_field, jboolean, 
static_field_get_boolean);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_byte_field, jbyte, 
static_field_get_byte);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_char_field, jchar, 
static_field_get_char);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_short_field, jshort, 
static_field_get_short);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_int_field, jint, 
static_field_get_int);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_long_field, jlong, 
static_field_get_long);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_float_field, jfloat, 
static_field_get_float);
+DEFINE_GET_STATIC_FIELD(vm_jni_get_static_double_field, jdouble, 
static_field_get_double);
 
 #define DEFINE_SET_STATIC_FIELD(func, type, set)   \
static void \
@@ -1224,19 +1224,19 @@ void *vm_jni_native_interface[] = {
vm_jni_call_static_void_method,
vm_jni_call_static_void_method_v,
NULL, /* CallStaticVoidMethodA */
-   vm_jni_get_static_field_id, /* GetStaticFieldID */
+   vm_jni_get_static_field_id,
 
/* 145 */
-   NULL, /* GetStaticObjectField */
-   NULL, /* GetStaticBooleanField */
-   NULL, /* GetStaticByteField */
-   NULL, /* GetStaticCharField */
-   NULL, /* GetStaticShortField */
+   vm_jni_get_static_object_field,
+   vm_jni_get_static_boolean_field,
+   vm_jni_get_static_byte_field,
+   vm_jni_get_static_char_field,
+   vm_jni_get_static_short_field,
 
/* 150 */
-   NULL, /* GetStaticIntField */
-   NULL, /* GetStaticLongField */
-   NULL, /* GetStaticFloatField */
+   vm_jni_get_static_int_field,
+   vm_jni_get_static_long_field,
+   vm_jni_get_static_float_field,
vm_jni_get_static_double_field,
vm_jni_set_static_object_field,
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 5/5] vm: fix pack_args()

2009-10-08 Thread Tomek Grabiec
The condition should be negated.

Signed-off-by: Tomek Grabiec 
---
 vm/jni-interface.c |6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/vm/jni-interface.c b/vm/jni-interface.c
index cc1e7ca..dca174a 100644
--- a/vm/jni-interface.c
+++ b/vm/jni-interface.c
@@ -912,12 +912,12 @@ static inline void pack_args(struct vm_method *vmm, 
unsigned long *packed_args,
idx = 0;
 
list_for_each_entry(arg, &vmm->args, list_node) {
-   if (arg->type_info.vm_type != J_LONG &&
-   arg->type_info.vm_type != J_DOUBLE) {
+   if (arg->type_info.vm_type == J_LONG ||
+   arg->type_info.vm_type == J_DOUBLE) {
packed_args[packed_idx++] = low_64(args[idx]);
packed_args[packed_idx++] = high_64(args[idx++]);
} else {
-   packed_args[packed_idx++] = args[idx++] & ~0ul;
+   packed_args[packed_idx++] = low_64(args[idx++]);
}
}
 #else
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/5] vm: fix vm_jni_set_static_*()

2009-10-08 Thread Tomek Grabiec
Those functions were using non-static field setters on a class object,
which is incorrect.

Signed-off-by: Tomek Grabiec 
---
 vm/jni-interface.c |   20 ++--
 1 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/vm/jni-interface.c b/vm/jni-interface.c
index 7e502b7..b3b205e 100644
--- a/vm/jni-interface.c
+++ b/vm/jni-interface.c
@@ -753,18 +753,18 @@ vm_jni_get_static_double_field(struct vm_jni_env *env, 
jobject object,
{   \
enter_vm_from_jni();\
\
-   set(object, field, value);  \
+   set(field, value);  \
}   \
 
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_object_field, jobject, 
field_set_object);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_boolean_field, jboolean, 
field_set_boolean);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_byte_field, jbyte, field_set_byte);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_char_field, jchar, field_set_char);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_short_field, jshort, 
field_set_short);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_int_field, jint, field_set_int);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_long_field, jlong, field_set_long);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_float_field, jfloat, 
field_set_float);
-DEFINE_SET_STATIC_FIELD(vm_jni_set_static_double_field, jdouble, 
field_set_double);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_object_field, jobject, 
static_field_set_object);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_boolean_field, jboolean, 
static_field_set_boolean);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_byte_field, jbyte, 
static_field_set_byte);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_char_field, jchar, 
static_field_set_char);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_short_field, jshort, 
static_field_set_short);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_int_field, jint, 
static_field_set_int);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_long_field, jlong, 
static_field_set_long);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_float_field, jfloat, 
static_field_set_float);
+DEFINE_SET_STATIC_FIELD(vm_jni_set_static_double_field, jdouble, 
static_field_set_double);
 
 static jboolean
 vm_jni_call_static_boolean_method(struct vm_jni_env *env, jclass clazz,
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/5] vm: fix vm_jni_new_object_a()

2009-10-08 Thread Tomek Grabiec
We should call the constructor virtually not statically.

Signed-off-by: Tomek Grabiec 
---
 vm/jni-interface.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/vm/jni-interface.c b/vm/jni-interface.c
index ac8188f..cc1e7ca 100644
--- a/vm/jni-interface.c
+++ b/vm/jni-interface.c
@@ -945,7 +945,7 @@ vm_jni_new_object_a(struct vm_jni_env *env, jclass clazz, 
jmethodID method,
packed_args[0] = (unsigned long) result;
pack_args(method, packed_args + 1, args);
 
-   vm_call_method_a(method, packed_args);
+   vm_call_method_this_a(method, result, packed_args);
 
return result;
 }
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/5] vm: fix vm_jni_get_static_double_field()

2009-10-08 Thread Tomek Grabiec
The function incorrectly used non-static field getter which was
tracked down with help of valgrind:

==6229== Invalid read of size 4
==6229==at 0x8075E8C: vm_jni_get_static_double_field (object.h:154)
==6229==by 0x639A3B6: Java_java_lang_VMDouble_initIDs (in 
/usr/lib/classpath/libjavalang.so.0.0.0)
==6229==by 0x89DCC34: ???
==6229==by 0x218BB927: ???
==6229==by 0x806B6B6: jit_magic_trampoline (class.h:99)
==6229==by 0x89DCA3C: ???
==6229==by 0x89DCB2D: ???
==6229==by 0x1EEF47A7: ???
==6229==by 0x1EEF47A7: ???
==6229==by 0x1EEF47A7: ???
==6229==by 0x1EEF47A7: ???
==6229==by 0x1EEF47A7: ???
==6229==  Address 0x1f0d51f8 is not stack'd, malloc'd or (recently) free'd

Signed-off-by: Tomek Grabiec 
---
 vm/jni-interface.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/vm/jni-interface.c b/vm/jni-interface.c
index 73cfba2..7e502b7 100644
--- a/vm/jni-interface.c
+++ b/vm/jni-interface.c
@@ -743,7 +743,7 @@ vm_jni_get_static_double_field(struct vm_jni_env *env, 
jobject object,
return 0;
}
 
-   return field_get_double(object, field);
+   return static_field_get_double(field);
 }
 
 #define DEFINE_SET_STATIC_FIELD(func, type, set)   \
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/5] vm: implement thread interruption operations.

2009-10-07 Thread Tomek Grabiec
The following natives are implemented:
java/lang/VMThread.isInterrupted()
java/lang/VMThread.interrupted()
java/lang/VMThread.interrupt()

Signed-off-by: Tomek Grabiec 
---
 Makefile|1 +
 include/vm/preload.h|1 +
 include/vm/thread.h |7 
 regression/jvm/MonitorTest.java |   65 +++
 test/vm/preload-stub.c  |1 +
 vm/jato.c   |   24 ++
 vm/monitor.c|   18 +--
 vm/preload.c|2 +
 vm/thread.c |   47 
 9 files changed, 163 insertions(+), 3 deletions(-)
 create mode 100644 regression/jvm/MonitorTest.java

diff --git a/Makefile b/Makefile
index 6dcd736..93088c9 100644
--- a/Makefile
+++ b/Makefile
@@ -293,6 +293,7 @@ REGRESSION_TEST_SUITE_CLASSES = \
regression/jvm/LongArithmeticTest.java \
regression/jvm/MethodInvocationAndReturnTest.java \
regression/jvm/MethodInvocationExceptionsTest.java \
+   regression/jvm/MonitorTest.java \
regression/jvm/MultithreadingTest.java \
regression/jvm/ObjectArrayTest.java \
regression/jvm/ObjectCreationAndManipulationExceptionsTest.java \
diff --git a/include/vm/preload.h b/include/vm/preload.h
index b65b938..c71d8f9 100644
--- a/include/vm/preload.h
+++ b/include/vm/preload.h
@@ -49,6 +49,7 @@ extern struct vm_class *vm_java_lang_Short;
 extern struct vm_class *vm_java_lang_IllegalArgumentException;
 extern struct vm_class *vm_java_lang_ClassLoader;
 extern struct vm_class *vm_java_lang_Number;
+extern struct vm_class *vm_java_lang_InterruptedException;
 extern struct vm_class *vm_boolean_class;
 extern struct vm_class *vm_char_class;
 extern struct vm_class *vm_float_class;
diff --git a/include/vm/thread.h b/include/vm/thread.h
index afb5ede..f6e6551 100644
--- a/include/vm/thread.h
+++ b/include/vm/thread.h
@@ -3,6 +3,8 @@
 
 #include "lib/list.h"
 
+#include "vm/object.h"
+
 #include  /* for NOT_IMPLEMENTED */
 #include 
 
@@ -26,6 +28,8 @@ struct vm_thread {
pthread_t posix_id;
enum vm_thread_state state;
struct list_head list_node;
+   bool interrupted;
+   struct vm_monitor *wait_mon;
 };
 
 struct vm_exec_env {
@@ -52,5 +56,8 @@ void vm_thread_wait_for_non_daemons(void);
 void vm_thread_set_state(struct vm_thread *thread, enum vm_thread_state state);
 struct vm_object *vm_thread_get_java_thread(struct vm_thread *thread);
 char *vm_thread_get_name(struct vm_thread *thread);
+bool vm_thread_is_interrupted(struct vm_thread *thread);
+bool vm_thread_interrupted(struct vm_thread *thread);
+void vm_thread_interrupt(struct vm_thread *thread);
 
 #endif
diff --git a/regression/jvm/MonitorTest.java b/regression/jvm/MonitorTest.java
new file mode 100644
index 000..61ad5ef
--- /dev/null
+++ b/regression/jvm/MonitorTest.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2009 Tomasz Grabiec
+ *
+ * This file is released under the GPL version 2 with the following
+ * clarification and special exception:
+ *
+ * Linking this library statically or dynamically with other modules is
+ * making a combined work based on this library. Thus, the terms and
+ * conditions of the GNU General Public License cover the whole
+ * combination.
+ *
+ * As a special exception, the copyright holders of this library give you
+ * permission to link this library with independent modules to produce an
+ * executable, regardless of the license terms of these independent
+ * modules, and to copy and distribute the resulting executable under terms
+ * of your choice, provided that you also meet, for each linked independent
+ * module, the terms and conditions of the license of that module. An
+ * independent module is a module which is not derived from or based on
+ * this library. If you modify this library, you may extend this exception
+ * to your version of the library, but you are not obligated to do so. If
+ * you do not wish to do so, delete this exception statement from your
+ * version.
+ *
+ * Please refer to the file LICENSE for details.
+ */
+package jvm;
+
+/**
+ * @author Tomasz Grabiec
+ */
+public class MonitorTest extends TestCase {
+public static void testInterruptedWait() {
+Thread t = new Thread() {
+public void run() {
+boolean caught = false;
+
+synchronized (this) {
+notify();
+try {
+wait();
+} catch (InterruptedException e) {
+caught = true;
+}
+
+assertTrue(caught);
+}
+}
+};
+
+try {
+synchronized (t) {
+t.start();
+t.wait();
+   

[PATCH 5/5] x86: introduce valgrind workaround for exception guards.

2009-10-07 Thread Tomek Grabiec
Installing signal bottom half doesn't work well in valgrind
environment (it causes an error and application is killed). This
introduces a workaround for this situation when caused by exception
guards. Exception guards are used to catch signalled exceptions via
SIGSEGV handler and transfer control to the exception handler or
unwind. The workaround is to inline exception check and don't use
signal handlers when run on valgrind.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |9 +++--
 arch/x86/unwind_32.S   |   27 +++
 include/jit/exception.h|5 -
 jit/exception.c|   10 ++
 4 files changed, 48 insertions(+), 3 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 5383368..c2d06c3 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -2778,6 +2778,11 @@ static void select_exception_test(struct basic_block *bb,
unsigned long exception_guard_offset;
struct var_info *reg;
 
+   if (running_on_valgrind) {
+   select_insn(bb, tree, rel_insn(INSN_CALL_REL, (unsigned long) 
exception_check));
+   return;
+   }
+
reg = get_var(bb->b_parent, GPR_VM_TYPE);
 
exception_guard_offset = get_thread_local_offset(&exception_guard);
@@ -2970,11 +2975,11 @@ emulate_op_64(struct _MBState *state, struct 
basic_block *s,
else
method_args_cleanup(s, tree, 3);
 
-   select_exception_test(s, tree);
-
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, state->reg1));
if (edx)
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, edx, 
state->reg2));
+
+   select_exception_test(s, tree);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/unwind_32.S b/arch/x86/unwind_32.S
index 6b78563..e35ae46 100644
--- a/arch/x86/unwind_32.S
+++ b/arch/x86/unwind_32.S
@@ -1,4 +1,5 @@
 .global unwind
+.global exception_check
 .text
 
 /*
@@ -39,3 +40,29 @@ unwind:
 
pushl %eax
ret
+
+/*
+ * exception_check - is a part of valgrind workaround for exception guards.
+ * it checks whether exception has occurred and if it has control
+ * is transfered directly to exception handler (possibly unwind block).
+ */
+exception_check:
+   /* push return address - 1 */
+   pushl   (%esp)
+   decl(%esp)
+
+   pushl   %ebp
+
+   pushl   4(%esp) # return address
+   calljit_lookup_cu
+   add $4, %esp
+   push%eax
+
+   call throw_from_jit_checked
+   addl $12, %esp
+   test %eax, %eax
+   jz 1f
+   pushl %eax
+   ret
+1:
+   ret
\ No newline at end of file
diff --git a/include/jit/exception.h b/include/jit/exception.h
index aeb6843..cadb6a1 100644
--- a/include/jit/exception.h
+++ b/include/jit/exception.h
@@ -50,12 +50,15 @@ lookup_eh_entry(struct vm_method *method, unsigned long 
target);
 unsigned char *throw_from_jit(struct compilation_unit *cu,
  struct jit_stack_frame *frame,
  unsigned char *native_ptr);
-
+unsigned char *throw_from_jit_checked(struct compilation_unit *cu,
+ struct jit_stack_frame *frame,
+ unsigned char *native_ptr);
 int insert_exception_spill_insns(struct compilation_unit *cu);
 unsigned char *throw_exception(struct compilation_unit *cu,
   struct vm_object *exception);
 void throw_from_trampoline(void *ctx, struct vm_object *exception);
 void unwind(void);
+void exception_check(void);
 void signal_exception(struct vm_object *obj);
 void signal_new_exception(struct vm_class *vmc, const char *msg);
 void signal_new_exception_with_cause(struct vm_class *vmc,
diff --git a/jit/exception.c b/jit/exception.c
index 0171ca8..b412e25 100644
--- a/jit/exception.c
+++ b/jit/exception.c
@@ -362,3 +362,13 @@ print_exception_table(const struct vm_method *method,
trace_printf("Class %s\n", catch_class->name);
}
 }
+
+unsigned char *
+throw_from_jit_checked(struct compilation_unit *cu, struct jit_stack_frame 
*frame,
+  unsigned char *native_ptr)
+{
+   if (exception_occurred())
+   return throw_from_jit(cu, frame, native_ptr);
+
+   return NULL;
+}
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/5] vm: fix parse_method_type()

2009-10-07 Thread Tomek Grabiec
Call arguments were incorrectly inserted to args list causing the list
to represent the reverse order of arguments.

Signed-off-by: Tomek Grabiec 
---
 vm/types.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/vm/types.c b/vm/types.c
index e20aecf..f4fe84d 100644
--- a/vm/types.c
+++ b/vm/types.c
@@ -273,7 +273,7 @@ int parse_method_type(struct vm_method *vmm)
if (parse_type(&type_str, &arg->type_info))
return -1;
 
-   list_add(&arg->list_node, &vmm->args);
+   list_add_tail(&arg->list_node, &vmm->args);
}
 
type_str++;
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/5] vm: unify code in vm_monitor_wait() and vm_monitor_timedwait()

2009-10-07 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 vm/monitor.c |   78 +
 1 files changed, 34 insertions(+), 44 deletions(-)

diff --git a/vm/monitor.c b/vm/monitor.c
index 4a22584..5705954 100644
--- a/vm/monitor.c
+++ b/vm/monitor.c
@@ -141,10 +141,9 @@ int vm_monitor_unlock(struct vm_monitor *mon)
return err;
 }
 
-int vm_monitor_timed_wait(struct vm_monitor *mon, long long ms, int ns)
+static int vm_monitor_do_wait(struct vm_monitor *mon, struct timespec 
*timespec)
 {
struct vm_thread *self;
-   struct timespec timespec;
int old_lock_count;
int err;
 
@@ -154,33 +153,28 @@ int vm_monitor_timed_wait(struct vm_monitor *mon, long 
long ms, int ns)
return -1;
}
 
-   /*
-* XXX: we must use CLOCK_REALTIME here because
-* pthread_cond_timedwait() uses this clock.
-*/
-   clock_gettime(CLOCK_REALTIME, ×pec);
-
-   timespec.tv_sec += ms / 1000;
-   timespec.tv_nsec += (long)ns + (long)(ms % 1000) * 100l;
-
-   if (timespec.tv_nsec >= 10l) {
-   timespec.tv_sec++;
-   timespec.tv_nsec -= 10l;
-   }
-
old_lock_count = mon->lock_count;
-
mon->lock_count = 0;
vm_monitor_set_owner(mon, NULL);
 
self = vm_thread_self();
 
-   vm_thread_set_state(self, VM_THREAD_STATE_TIMED_WAITING);
-   err = pthread_cond_timedwait(&mon->cond, &mon->mutex, ×pec);
-   vm_thread_set_state(self, VM_THREAD_STATE_RUNNABLE);
+   pthread_mutex_lock(&self->mutex);
+   if (timespec != NULL)
+   self->state = VM_THREAD_STATE_TIMED_WAITING;
+   else
+   self->state = VM_THREAD_STATE_WAITING;
+
+   pthread_mutex_unlock(&self->mutex);
 
-   if (err == ETIMEDOUT)
-   err = 0;
+   if (timespec) {
+   err = pthread_cond_timedwait(&mon->cond, &mon->mutex, timespec);
+   if (err == ETIMEDOUT)
+   err = 0;
+   } else
+   err = pthread_cond_wait(&mon->cond, &mon->mutex);
+
+   vm_thread_set_state(self, VM_THREAD_STATE_RUNNABLE);
 
vm_monitor_set_owner(mon, self);
mon->lock_count = old_lock_count;
@@ -189,34 +183,30 @@ int vm_monitor_timed_wait(struct vm_monitor *mon, long 
long ms, int ns)
return err;
 }
 
-int vm_monitor_wait(struct vm_monitor *mon)
+int vm_monitor_timed_wait(struct vm_monitor *mon, long long ms, int ns)
 {
-   struct vm_thread *self;
-   int old_lock_count;
-   int err;
-
-   self = vm_thread_self();
-
-   if (vm_monitor_get_owner(mon) != vm_thread_self()) {
-   signal_new_exception(vm_java_lang_IllegalMonitorStateException,
-NULL);
-   return -1;
-   }
+   struct timespec timespec;
 
-   old_lock_count = mon->lock_count;
+   /*
+* XXX: we must use CLOCK_REALTIME here because
+* pthread_cond_timedwait() uses this clock.
+*/
+   clock_gettime(CLOCK_REALTIME, ×pec);
 
-   mon->lock_count = 0;
-   vm_monitor_set_owner(mon, NULL);
+   timespec.tv_sec += ms / 1000;
+   timespec.tv_nsec += (long)ns + (long)(ms % 1000) * 100l;
 
-   vm_thread_set_state(self, VM_THREAD_STATE_WAITING);
-   err = pthread_cond_wait(&mon->cond, &mon->mutex);
-   vm_thread_set_state(self, VM_THREAD_STATE_RUNNABLE);
+   if (timespec.tv_nsec >= 10l) {
+   timespec.tv_sec++;
+   timespec.tv_nsec -= 10l;
+   }
 
-   vm_monitor_set_owner(mon, self);
-   mon->lock_count = old_lock_count;
+   return vm_monitor_do_wait(mon, ×pec);
+}
 
-   /* TODO: check if thread has been interrupted. */
-   return err;
+int vm_monitor_wait(struct vm_monitor *mon)
+{
+   return vm_monitor_do_wait(mon, NULL);
 }
 
 int vm_monitor_notify(struct vm_monitor *mon)
-- 
1.6.0.4


--
Come build with us! The BlackBerry(R) Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9 - 12, 2009. Register now!
http://p.sf.net/sfu/devconference
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/5] vm: move monitor operations to vm/monitor.c

2009-10-07 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 Makefile   |1 +
 include/vm/object.h|1 +
 test/arch-x86/Makefile |1 +
 vm/jato.c  |1 +
 vm/monitor.c   |  242 
 vm/object.c|  191 --
 6 files changed, 246 insertions(+), 191 deletions(-)
 create mode 100644 vm/monitor.c

diff --git a/Makefile b/Makefile
index 00c5522..6dcd736 100644
--- a/Makefile
+++ b/Makefile
@@ -114,6 +114,7 @@ VM_OBJS = \
vm/jni-interface.o  \
vm/jni.o\
vm/method.o \
+   vm/monitor.o\
vm/natives.o\
vm/object.o \
vm/preload.o\
diff --git a/include/vm/object.h b/include/vm/object.h
index 0d4299b..fa918b9 100644
--- a/include/vm/object.h
+++ b/include/vm/object.h
@@ -53,6 +53,7 @@ struct vm_object {
 /* XXX: BUILD_BUG_ON(offsetof(vm_object, class) != 0); */
 
 int init_vm_objects(void);
+int init_vm_monitors(void);
 
 struct vm_object *vm_object_alloc(struct vm_class *class);
 struct vm_object *vm_object_alloc_primitive_array(int type, int count);
diff --git a/test/arch-x86/Makefile b/test/arch-x86/Makefile
index e5cbb63..484d4c7 100644
--- a/test/arch-x86/Makefile
+++ b/test/arch-x86/Makefile
@@ -68,6 +68,7 @@ TOPLEVEL_OBJS := \
vm/itable.o \
vm/jni-interface.o \
vm/method.o \
+   vm/monitor.o \
vm/object.o \
vm/static.o \
vm/string.o \
diff --git a/vm/jato.c b/vm/jato.c
index 1c84ccf..5d0b815 100644
--- a/vm/jato.c
+++ b/vm/jato.c
@@ -1321,6 +1321,7 @@ main(int argc, char *argv[])
classloader_init();
 
init_vm_objects();
+   init_vm_monitors();
 
jit_text_init();
 
diff --git a/vm/monitor.c b/vm/monitor.c
new file mode 100644
index 000..4a22584
--- /dev/null
+++ b/vm/monitor.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2009 Tomasz Grabiec
+ * 
+ * This file is released under the GPL version 2 with the following
+ * clarification and special exception:
+ *
+ * Linking this library statically or dynamically with other modules is
+ * making a combined work based on this library. Thus, the terms and
+ * conditions of the GNU General Public License cover the whole
+ * combination.
+ *
+ * As a special exception, the copyright holders of this library give you
+ * permission to link this library with independent modules to produce an
+ * executable, regardless of the license terms of these independent
+ * modules, and to copy and distribute the resulting executable under terms
+ * of your choice, provided that you also meet, for each linked independent
+ * module, the terms and conditions of the license of that module. An
+ * independent module is a module which is not derived from or based on
+ * this library. If you modify this library, you may extend this exception
+ * to your version of the library, but you are not obligated to do so. If
+ * you do not wish to do so, delete this exception statement from your
+ * version.
+ *
+ * Please refer to the file LICENSE for details.
+ */
+
+#include 
+#include 
+
+#include "jit/exception.h"
+
+#include "vm/object.h"
+#include "vm/preload.h"
+
+static pthread_mutexattr_t monitor_mutexattr;
+
+int init_vm_monitors(void)
+{
+   int err;
+
+   err = pthread_mutexattr_init(&monitor_mutexattr);
+   if (err)
+   return -err;
+
+   err = pthread_mutexattr_settype(&monitor_mutexattr,
+   PTHREAD_MUTEX_RECURSIVE);
+   if (err)
+   return -err;
+
+   return 0;
+}
+
+int vm_monitor_init(struct vm_monitor *mon)
+{
+   if (pthread_mutex_init(&mon->owner_mutex, NULL)) {
+   NOT_IMPLEMENTED;
+   return -1;
+   }
+
+   if (pthread_mutex_init(&mon->mutex, &monitor_mutexattr)) {
+   NOT_IMPLEMENTED;
+   return -1;
+   }
+
+   if (pthread_cond_init(&mon->cond, NULL)) {
+   NOT_IMPLEMENTED;
+   return -1;
+   }
+
+   mon->owner = NULL;
+   mon->lock_count = 0;
+
+   return 0;
+}
+
+struct vm_thread *vm_monitor_get_owner(struct vm_monitor *mon)
+{
+   struct vm_thread *owner;
+
+   pthread_mutex_lock(&mon->owner_mutex);
+   owner = mon->owner;
+   pthread_mutex_unlock(&mon->owner_mutex);
+
+   return owner;
+}
+
+void vm_monitor_set_owner(struct vm_monitor *mon, struct vm_thread *owner)
+{
+   pthread_mutex_lock(&mon->owner_mutex);
+   mon->owner = owner;
+   pthread_mutex_unlock(&mon->owner_mutex);
+}
+
+int vm_monitor_lock(struct vm_monitor *mon)
+{
+   struct vm_thread *self;
+   int err;
+
+   self = vm_thread_self();
+   err = 0;
+
+   if (pthread_mutex_trylock(&mon->mutex)) {
+ 

[PATCH 1/2] vm: parse method and field type on initialization

2009-10-06 Thread Tomek Grabiec
struct vm_type_info is introduced to fully describe a java type of a
call argument, return type, field type, etc. Method's and field's type
strings are parsed on initialization and type information is put to
appropriate struct vm_type_infos. Type information for method
arguments is encapsulated in struct vm_method_arg and linked in a list
pointed by struct vm_method.args.

This change removes plenty of calls to type string parsing which
should not be done at run-time but rather on method or field
initialization.

Signed-off-by: Tomek Grabiec 
---
 include/jit/expression.h |   13 ---
 include/vm/field.h   |4 +-
 include/vm/method.h  |7 ++
 include/vm/types.h   |   32 +++-
 jit/invoke-bc.c  |3 +-
 jit/trace-jit.c  |   14 ++--
 test/jit/invoke-bc-test.c|   11 ++-
 test/jit/object-bc-test.c|8 +-
 test/jit/tree-printer-test.c |4 +-
 vm/class.c   |4 +-
 vm/field.c   |   18 ++--
 vm/jni-interface.c   |9 +-
 vm/method.c  |   12 +++-
 vm/reflection.c  |   51 -
 vm/types.c   |  173 ++
 15 files changed, 224 insertions(+), 139 deletions(-)

diff --git a/include/jit/expression.h b/include/jit/expression.h
index d89ff92..6fc87e8 100644
--- a/include/jit/expression.h
+++ b/include/jit/expression.h
@@ -349,17 +349,4 @@ unsigned long nr_args(struct expression *);
 int expr_nr_kids(struct expression *);
 int expr_is_pure(struct expression *);
 
-static inline enum vm_type mimic_stack_type(enum vm_type type)
-{
-   switch (type) {
-   case J_BOOLEAN:
-   case J_BYTE:
-   case J_CHAR:
-   case J_SHORT:
-   return J_INT;
-   default:
-   return type;
-   }
-}
-
 #endif
diff --git a/include/vm/field.h b/include/vm/field.h
index aa4a25d..0c9d150 100644
--- a/include/vm/field.h
+++ b/include/vm/field.h
@@ -20,6 +20,8 @@ struct vm_field {
char *name;
char *type;
 
+   struct vm_type_info type_info;
+
unsigned int offset;
 };
 
@@ -45,7 +47,7 @@ static inline bool vm_field_is_public(const struct vm_field 
*vmf)
 
 static inline enum vm_type vm_field_type(const struct vm_field *vmf)
 {
-   return str_to_type(vmf->type);
+   return vmf->type_info.vm_type;
 }
 
 #endif
diff --git a/include/vm/method.h b/include/vm/method.h
index 54c2b94..ccf1db8 100644
--- a/include/vm/method.h
+++ b/include/vm/method.h
@@ -26,6 +26,11 @@ struct vm_args_map {
 };
 #endif
 
+struct vm_method_arg {
+   struct vm_type_info type_info;
+   struct list_head list_node;
+};
+
 struct vm_method {
struct vm_class *class;
unsigned int method_index;
@@ -40,6 +45,8 @@ struct vm_method {
struct vm_args_map *args_map;
int reg_args_count;
 #endif
+   struct list_head args;
+   struct vm_type_info return_type;
 
struct cafebabe_code_attribute code_attribute;
struct cafebabe_line_number_table_attribute line_number_table_attribute;
diff --git a/include/vm/types.h b/include/vm/types.h
index ae32f69..4695d9c 100644
--- a/include/vm/types.h
+++ b/include/vm/types.h
@@ -4,6 +4,11 @@
 #include 
 #include 
 
+#include "lib/list.h"
+
+struct vm_method;
+struct vm_field;
+
 enum vm_type {
J_VOID,
J_REFERENCE,
@@ -25,19 +30,25 @@ enum vm_type {
 #  define J_NATIVE_PTR J_LONG
 #endif
 
+struct vm_type_info {
+   enum vm_type vm_type;
+   char *class_name;
+};
+
 extern enum vm_type str_to_type(const char *);
 extern enum vm_type get_method_return_type(char *);
 extern unsigned int vm_type_size(enum vm_type);
 
 int skip_type(const char **type);
-int count_arguments(const char *);
+int count_arguments(const struct vm_method *);
 enum vm_type bytecode_type_to_vmtype(int);
 int vmtype_to_bytecode_type(enum vm_type);
 int get_vmtype_size(enum vm_type);
 const char *get_vm_type_name(enum vm_type);
-const char *parse_method_args(const char *, enum vm_type *, char **);
-const char *parse_type(const char *, enum vm_type *, char **);
-unsigned int count_java_arguments(const char *);
+int parse_type(char **, struct vm_type_info *);
+unsigned int count_java_arguments(const struct vm_method *);
+int parse_method_type(struct vm_method *);
+int parse_field_type(struct vm_field *);
 
 static inline bool vm_type_is_float(enum vm_type type)
 {
@@ -51,4 +62,17 @@ static inline int vm_type_slot_size(enum vm_type type)
return 1;
 }
 
+static inline enum vm_type mimic_stack_type(enum vm_type type)
+{
+   switch (type) {
+   case J_BOOLEAN:
+   case J_BYTE:
+   case J_CHAR:
+   case J_SHORT:
+   return J_INT;
+   default:
+   return type;
+   }
+}
+
 #endif
diff --git a/jit/invoke-bc.c b/jit/invoke-bc.c
index 698dd36..45a51f1 100644
--- a/jit/invoke-bc.c
+++ b/jit/invoke-bc.c
@@ -56,8 +56,7 @@ static unsigned int 

[PATCH 2/2] vm: implement java/lang/reflect/Method.getReturnType()

2009-10-06 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/vm/reflection.h |1 +
 vm/jato.c   |1 +
 vm/reflection.c |   18 ++
 3 files changed, 20 insertions(+), 0 deletions(-)

diff --git a/include/vm/reflection.h b/include/vm/reflection.h
index f9c709a..c1e3aa4 100644
--- a/include/vm/reflection.h
+++ b/include/vm/reflection.h
@@ -39,5 +39,6 @@ native_method_invokenative(struct vm_object *method, struct 
vm_object *o,
   struct vm_object *declaringClass,
   jint slot);
 void native_field_set(struct vm_object *this, struct vm_object *o, struct 
vm_object *value_obj);
+struct vm_object *native_method_getreturntype(struct vm_object *method);
 
 #endif /* __JATO_VM_REFLECTION_H */
diff --git a/vm/jato.c b/vm/jato.c
index 332b3fb..1c84ccf 100644
--- a/vm/jato.c
+++ b/vm/jato.c
@@ -877,6 +877,7 @@ static struct vm_native natives[] = {
DEFINE_NATIVE("java/lang/reflect/Field", "getType", 
&native_field_gettype),
DEFINE_NATIVE("java/lang/reflect/Method", "getParameterTypes", 
&native_method_get_parameter_types),
DEFINE_NATIVE("java/lang/reflect/Method", "invokeNative", 
&native_method_invokenative),
+   DEFINE_NATIVE("java/lang/reflect/Method", "getReturnType", 
&native_method_getreturntype),
DEFINE_NATIVE("jato/internal/VM", "enableFault", 
&native_vm_enable_fault),
DEFINE_NATIVE("jato/internal/VM", "disableFault", 
&native_vm_disable_fault),
DEFINE_NATIVE("sun/misc/Unsafe", "compareAndSwapInt", 
native_unsafe_compare_and_swap_int),
diff --git a/vm/reflection.c b/vm/reflection.c
index d180634..80db700 100644
--- a/vm/reflection.c
+++ b/vm/reflection.c
@@ -810,3 +810,21 @@ struct vm_object *native_field_gettype(struct vm_object 
*this)
 
return vmc->object;
 }
+
+struct vm_object *native_method_getreturntype(struct vm_object *method)
+{
+   struct vm_method *vmm = vm_object_to_vm_method(method);
+   if (!vmm)
+   return NULL;
+
+   struct vm_class *vmc;
+
+   vmc = vm_type_to_class(vmm->class->classloader, &vmm->return_type);
+   if (vmc)
+   vm_class_ensure_init(vmc);
+
+   if (!vmc || exception_occurred())
+   return NULL;
+
+   return vmc->object;
+}
-- 
1.6.0.4


--
Come build with us! The BlackBerry® Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9-12, 2009. Register now!
http://p.sf.net/sfu/devconf
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] README: fix package dependencies for Ubuntu

2009-10-04 Thread Tomek Grabiec
We need valgrind headers to compile jato, since valgrind workarounds
were introduced.

Signed-off-by: Tomek Grabiec 
---
 README |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/README b/README
index 03985d6..97a9467 100644
--- a/README
+++ b/README
@@ -13,7 +13,7 @@ How can I try it out?
 
 For Ubuntu:
 
-$ sudo apt-get install ecj classpath libffi-dev binutils-dev libzip-dev 
libglib2.0-dev
+$ sudo apt-get install ecj classpath libffi-dev binutils-dev libzip-dev 
libglib2.0-dev valgrind
 
 For Fedora:
 
-- 
1.6.0.4


--
Come build with us! The BlackBerry® Developer Conference in SF, CA
is the only developer event you need to attend this year. Jumpstart your
developing skills, take BlackBerry mobile applications to market and stay 
ahead of the curve. Join us from November 9-12, 2009. Register now!
http://p.sf.net/sfu/devconf
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] jit: fix race condition in fixup_direct_calls()

2009-09-07 Thread Tomek Grabiec
This fixes incorrect lock order reported by helgrind:
==7951== Thread #1: lock order "0x5277F84 before 0x55D7D64" violated
==7951==at 0x4026125: pthread_mutex_lock (in 
/usr/lib/valgrind/x86-linux/vgpreload_helgrind.so)
==7951==by 0x80563BE: fixup_direct_calls (emit-code.c:355)
==7951==by 0x8082A83: jit_magic_trampoline (trampoline.c:161)
==7951==by 0x88D741C: ???
==7951==by 0x88D79F9: ???
==7951==   Required order was established by acquisition of lock at 0x5277F84
==7951==at 0x4026125: pthread_mutex_lock (in 
/usr/lib/valgrind/x86-linux/vgpreload_helgrind.so)
==7951==by 0x80829E0: jit_magic_trampoline (trampoline.c:125)
==7951==by 0x88D00CC: ???
==7951==by 0x88D73CC: ???
==7951==   followed by a later acquisition of lock at 0x55D7D64
==7951==at 0x4026125: pthread_mutex_lock (in 
/usr/lib/valgrind/x86-linux/vgpreload_helgrind.so)
==7951==by 0x8076801: trampoline_add_fixup_site (fixup-site.c:53)
==7951==by 0x805DA8B: invoke (insn-selector.c:754)
==7951==by 0x8063A08: mono_burg_emit (insn-selector.c:3094)
==7951==by 0x805E139: emit_code (insn-selector.c:869)
==7951==by 0x805E37E: insn_select (insn-selector.c:920)
==7951==by 0x805E4D4: select_instructions (insn-selector.c:970)
==7951==by 0x8073303: compile (compiler.c:80)
==7951==by 0x80828A3: jit_java_trampoline (trampoline.c:97)
==7951==by 0x8082A25: jit_magic_trampoline (trampoline.c:135)
==7951==by 0x88D00CC: ???
==7951==by 0x88D73CC: ???

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c   |   12 +++-
 arch/x86/insn-selector.brg |6 +++---
 include/jit/compilation-unit.h |1 +
 include/jit/compiler.h |   11 +--
 jit/compilation-unit.c |   18 ++
 jit/emit.c |   12 
 jit/fixup-site.c   |   27 +++
 test/jit/Makefile  |1 +
 8 files changed, 70 insertions(+), 18 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index ea96a03..f7b2e2a 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -338,11 +338,9 @@ void fixup_direct_calls(struct jit_trampoline *t, unsigned 
long target)
 
pthread_mutex_lock(&t->mutex);
 
-   list_for_each_entry_safe(this, next, &t->fixup_site_list,
-fixup_list_node) {
+   list_for_each_entry_safe(this, next, &t->fixup_site_list, 
trampoline_node) {
unsigned char *site_addr;
uint32_t new_target;
-   bool is_compiled;
 
/*
 * It is possible that we're fixing calls to
@@ -352,18 +350,14 @@ void fixup_direct_calls(struct jit_trampoline *t, 
unsigned long target)
 * be set yet. We should skip fixing callsites coming
 * from not yet compiled methods.  .
 */
-   pthread_mutex_lock(&this->cu->mutex);
-   is_compiled = this->cu->is_compiled;
-   pthread_mutex_unlock(&this->cu->mutex);
-
-   if (!is_compiled)
+   if (!fixup_site_is_ready(this))
continue;
 
site_addr = fixup_site_addr(this);
new_target = target - ((unsigned long) site_addr + 
CALL_INSN_SIZE);
cpu_write_u32(site_addr+1, new_target);
 
-   list_del(&this->fixup_list_node);
+   list_del(&this->trampoline_node);
free_fixup_site(this);
}
 
diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 8eead10..5383368 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -3207,9 +3207,9 @@ static void invoke(struct basic_block *s, struct 
tree_node *tree)
if (!is_compiled) {
struct fixup_site *fixup;
 
-   fixup = alloc_fixup_site();
-   fixup->cu = s->b_parent;
-   fixup->relcall_insn = call_insn;
+   fixup = alloc_fixup_site(s->b_parent, call_insn);
+   if (!fixup)
+   error("out of memory");
 
trampoline_add_fixup_site(method->trampoline, fixup);
}
diff --git a/include/jit/compilation-unit.h b/include/jit/compilation-unit.h
index c3f55b1..4bfd9d8 100644
--- a/include/jit/compilation-unit.h
+++ b/include/jit/compilation-unit.h
@@ -62,6 +62,7 @@ struct compilation_unit {
unsigned char *unwind_bb_ptr;
 
struct list_head static_fixup_site_list;
+   struct list_head call_fixup_site_list;
struct list_head tableswitch_list;
struct list_head lookupswitch_list;
 
diff --git a/include/jit/compiler.h b/include/jit/compiler.h
index 6e1196a..7e5796b 100644
--- a/include/jit/compiler.h
+++ b/include/jit/compiler.h
@@ -19,12 +19,17 @@ struct statement;
 struct buffer;
 
 struct fi

Re: [PATCH 15/19] x86-64: move received parameters to non-fixed registers

2009-09-06 Thread Tomek Grabiec
Hi,

2009/9/5 Eduard - Gabriel Munteanu :
> We must not keep the parameters we received in fixed registers because
> they can be clobbered by other insn-selector rules that use them (e.g.
> method invocation).
>
> Signed-off-by: Eduard - Gabriel Munteanu 
> ---
>  arch/x86/insn-selector.brg     |   38 +++---
>  include/jit/compilation-unit.h |    4 
>  2 files changed, 35 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
> index f9995fc..17b68a1 100644
> --- a/arch/x86/insn-selector.brg
> +++ b/arch/x86/insn-selector.brg
> @@ -2280,7 +2280,7 @@ stmt:  STMT_STORE(EXPR_TEMPORARY, EXPR_LOCAL) 1
>                        select_insn(s, tree, 
> memlocal_reg_insn(INSN_MOV_MEMLOCAL_REG,
>                                                               slot, dest));
>                } else {
> -                       src = get_fixed_var(s->b_parent, reg);
> +                       src = 
> s->b_parent->non_fixed_args[exprsrc->local_index];
>                        select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, 
> src, dest));
>                }
>        } else {
> @@ -3388,15 +3388,39 @@ static void setup_caller_saved_regs(struct 
> compilation_unit *cu)
>  #else /* CONFIG_X86_32 */
>  static void setup_caller_saved_regs(struct compilation_unit *cu)
>  {
> +       struct var_info **map;
> +       struct var_info *rdi, *rsi, *rdx, *rcx, *r8, *r9;
> +       struct basic_block *bb = cu->entry_bb;
> +
>        get_fixed_var(cu, MACH_REG_RAX);
> -       get_fixed_var(cu, MACH_REG_RDI);
> -       get_fixed_var(cu, MACH_REG_RSI);
> -       get_fixed_var(cu, MACH_REG_RDX);
> -       get_fixed_var(cu, MACH_REG_RCX);
> -       get_fixed_var(cu, MACH_REG_R8);
> -       get_fixed_var(cu, MACH_REG_R9);
>        get_fixed_var(cu, MACH_REG_R10);
>        get_fixed_var(cu, MACH_REG_R11);
> +
> +       cu->non_fixed_args = malloc(6 * sizeof(struct var_info *));
> +       if (!cu->non_fixed_args)
> +               abort();
> +       map = cu->non_fixed_args;
> +
> +       rdi = get_fixed_var(cu, MACH_REG_RDI);
> +       rsi = get_fixed_var(cu, MACH_REG_RSI);
> +       rdx = get_fixed_var(cu, MACH_REG_RDX);
> +       rcx = get_fixed_var(cu, MACH_REG_RCX);
> +       r8 = get_fixed_var(cu, MACH_REG_R8);
> +       r9 = get_fixed_var(cu, MACH_REG_R9);
> +
> +       map[0] = get_var(cu, J_LONG);
> +       map[1] = get_var(cu, J_LONG);
> +       map[2] = get_var(cu, J_LONG);
> +       map[3] = get_var(cu, J_LONG);
> +       map[4] = get_var(cu, J_LONG);
> +       map[5] = get_var(cu, J_LONG);
> +
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, rdi, map[0]));
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, rsi, map[1]));
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, rdx, map[2]));
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, rcx, map[3]));
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, r8, map[4]));
> +       eh_add_insn(bb, reg_reg_insn(INSN_MOV_REG_REG, r9, map[5]));
>  }
>  #endif /* CONFIG_X86_32 */

It is incorrect to allocate virtual registers of J_LONG type for all
call arguments, regardless of their type.
That's because for garbage collection to work we must track which
virtual registers hold a reference and which don't. We also can't mark
all registers as J_REFERENCE because it could cause that integers
would be interpreted as pointers. I think the proper solution would be
to iterate over method's call argument types, and for argument type T
we should allocate register of type mimic_stack_type(T). I also think
that we only have to emit movs for registers which are actually
holding a call argument, not for all of them, but that's an
optimization.

-- 
Tomek Grabiec

--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] jit: fix conversion of invoke* instructions

2009-09-04 Thread Tomek Grabiec
It is incorrect to convert invocations into expressions which are
pushed onto mimic stack because pushed expressions can be evaluated
any time later after they are pushed. This can result in breaking the
execution sequence, where methods are not invoked in the order they
appear in bytecode.

Invocations are now handled as statements and their result is pushed
onto mimic stack.

Signed-off-by: Tomek Grabiec 
---
 Makefile |5 +-
 arch/x86/insn-selector.brg   |  236 ++
 include/jit/expression.h |   48 +-
 include/jit/statement.h  |   21 
 jit/expression.c |   67 
 jit/invoke-bc.c  |  142 ++
 jit/ostack-bc.c  |   14 +---
 jit/statement.c  |   14 +++
 jit/tree-printer.c   |  123 ++
 regression/jvm/InvokeTest.j  |   33 ++
 regression/run-suite.sh  |1 +
 test/jit/bc-test-utils.c |   15 ++-
 test/jit/bc-test-utils.h |3 +-
 test/jit/invoke-bc-test.c|  214 ++
 test/jit/tree-printer-test.c |   49 -
 15 files changed, 374 insertions(+), 611 deletions(-)
 create mode 100644 regression/jvm/InvokeTest.j

diff --git a/Makefile b/Makefile
index be74fb6..8d57627 100644
--- a/Makefile
+++ b/Makefile
@@ -307,10 +307,11 @@ REGRESSION_TEST_SUITE_CLASSES = \
 
 JASMIN_REGRESSION_TEST_SUITE_CLASSES = \
regression/jvm/DupTest.j \
+   regression/jvm/InvokeResultTest.j \
+   regression/jvm/InvokeTest.j \
regression/jvm/PopTest.j \
regression/jvm/SubroutineTest.j \
-   regression/jvm/WideTest.j \
-   regression/jvm/InvokeResultTest.j
+   regression/jvm/WideTest.j
 
 java-regression: FORCE
$(E) "  JAVAC   " $(REGRESSION_TEST_SUITE_CLASSES)
diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index e587c96..43f6960 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -58,7 +58,7 @@ struct _MBState;
 
 static void select_insn(struct basic_block *bb, struct tree_node *tree, struct 
insn *instruction);
 static void select_exception_test(struct basic_block *bb, struct tree_node 
*tree);
-void finvoke_return_value(struct _MBState *state, struct basic_block *s, 
struct tree_node *tree, enum vm_type ret_vm_type);
+void save_invoke_result(struct basic_block *s, struct tree_node *tree, struct 
vm_method *method, struct statement *stmt);
 
 static unsigned char size_to_scale(int size)
 {
@@ -697,130 +697,6 @@ reg:  OP_XOR(reg, reg) 1
binop_reg_reg_high(state, s, tree, INSN_XOR_REG_REG);
 }
 
-reg:   EXPR_INVOKE(arg) 1
-{
-   struct var_info *eax, *edx = NULL;
-   struct compilation_unit *cu;
-   struct vm_method *method;
-   struct expression *expr;
-
-   expr= to_expr(tree);
-   method  = expr->target_method;
-   cu  = method->compilation_unit;
-
-   eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
-   state->reg1 = get_var(s->b_parent, J_INT);
-
-   if (get_method_return_type(method->type) == J_LONG) {
-   edx = get_fixed_var(s->b_parent, MACH_REG_xDX);
-   state->reg2 = get_var(s->b_parent, J_INT);
-   }
-
-   invoke(s, tree, cu, method);
-
-   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, state->reg1));
-   if (edx != NULL)
-   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, edx, 
state->reg2));
-}
-
-freg:  EXPR_FINVOKEINTERFACE(arg) 1
-{
-   enum vm_type ret_vm_type;
-   struct expression *expr;
-   struct vm_method *method;
-
-   expr= to_expr(tree);
-   method  = expr->target_method;
-
-   ret_vm_type = method_return_type(method);
-   state->reg1 = get_var(s->b_parent, ret_vm_type);
-
-   invokeinterface(state, s, tree);
-   finvoke_return_value(state, s, tree, ret_vm_type);
-}
-
-reg:   EXPR_INVOKEINTERFACE(arg) 1
-{
-   struct var_info *eax, *edx = NULL;
-   struct expression *expr;
-   struct vm_method *method;
-
-   expr= to_expr(tree);
-   method  = expr->target_method;
-
-   eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
-   state->reg1 = get_var(s->b_parent, J_INT);
-
-   if (get_method_return_type(method->type) == J_LONG) {
-   edx = get_fixed_var(s->b_parent, MACH_REG_xDX);
-   state->reg2 = get_var(s->b_parent, J_INT);
-   }
-
-   invokeinterface(state, s, tree);
-
-   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, state->reg1));
-   if (edx != NULL)
-   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, edx, 
state->reg2));
-}
-
-freg:  EXPR_FINVOKE(arg) 1
-{
-   struct compilation_unit *cu;
-   enum vm_type ret_vm_type;
-   struct vm_method *method;
-   struct expression *expr;
-
-   expr= to_expr(tree);

[PATCH 4/4] jit: fix mimic stack content propagation

2009-09-04 Thread Tomek Grabiec
The mimic stack propagation procedure was pushing elements onto
successors' stacks on every CFG edge. This caused that the successor's
stack was larger then it should be. Suppose we have basic block X,
which has N predecessors. For each X's predecessor, Q elements were
pushed onto X's mimic stack, so finally it had N*Q elements instead of
Q. The X's mimic stack was then also propagated, along with extra
elements. This led to generation of many unnecessary register moves.

In some CFG configurations this may lead to degenerative mimic stack
resolution, where STMT_STORE is inserted with src temporary that is
assigned only on some paths. This leads to incorrect liveness analysis
of the temporary register, because it is seen to be used before it is
defined. The register appears to be always live before it's use
position, which is wrong, and could cause problems with GC.

Signed-off-by: Tomek Grabiec 
---
 include/jit/basic-block.h |9 
 include/lib/stack.h   |   11 +-
 jit/basic-block.c |1 +
 jit/bytecode-to-ir.c  |   50 
 lib/stack.c   |   17 +++
 5 files changed, 73 insertions(+), 15 deletions(-)

diff --git a/include/jit/basic-block.h b/include/jit/basic-block.h
index b3e3cc1..e175eba 100644
--- a/include/jit/basic-block.h
+++ b/include/jit/basic-block.h
@@ -46,6 +46,10 @@ struct basic_block {
   Adl-Tabatabai et al (1998) for more in-depth explanation.  */
struct stack *mimic_stack;
 
+   /* Holds the size of mimic stack at basic block entry. If
+  mimic stack has not yet been resolved it's set to -1. */
+   long entry_mimic_stack_size;
+
/* Is this basic block an exception handler? */
bool is_eh;
 
@@ -74,6 +78,11 @@ static inline struct basic_block *bb_entry(struct list_head 
*head)
return list_entry(head, struct basic_block, bb_list_node);
 }
 
+static inline bool bb_entry_mimic_stack_set(struct basic_block *bb)
+{
+   return bb->entry_mimic_stack_size != -1;
+}
+
 struct basic_block *alloc_basic_block(struct compilation_unit *, unsigned 
long, unsigned long);
 struct basic_block *get_basic_block(struct compilation_unit *, unsigned long, 
unsigned long);
 void free_basic_block(struct basic_block *);
diff --git a/include/lib/stack.h b/include/lib/stack.h
index e4e79d9..2a5f6bc 100644
--- a/include/lib/stack.h
+++ b/include/lib/stack.h
@@ -1,9 +1,11 @@
 #ifndef LIB_STACK_H
 #define LIB_STACK_H
 
+#include 
+#include 
+#include 
 #include 
 #include 
-#include 
 
 struct stack {
unsigned long   nr_elements;
@@ -37,4 +39,11 @@ static inline bool stack_is_empty(struct stack *stack)
return !stack->nr_elements;
 }
 
+static inline unsigned long stack_size(struct stack *stack)
+{
+   return stack->nr_elements;
+}
+
+void stack_copy(struct stack *src, struct stack *dst);
+
 #endif
diff --git a/jit/basic-block.c b/jit/basic-block.c
index d478315..ff8c11c 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -41,6 +41,7 @@ struct basic_block *alloc_basic_block(struct compilation_unit 
*b_parent, unsigne
bb->b_parent = b_parent;
bb->start = start;
bb->end = end;
+   bb->entry_mimic_stack_size = -1;
 
return bb;
 }
diff --git a/jit/bytecode-to-ir.c b/jit/bytecode-to-ir.c
index 2923890..b9d08a4 100644
--- a/jit/bytecode-to-ir.c
+++ b/jit/bytecode-to-ir.c
@@ -181,8 +181,14 @@ static int do_convert_bb_to_ir(struct basic_block *bb)
buffer.buffer = cu->method->code_attribute.code;
buffer.pos = bb->start;
 
-   if (bb->is_eh)
+   if (bb->is_eh) {
+   assert(!bb_entry_mimic_stack_set(bb));
stack_push(bb->mimic_stack, exception_ref_expr());
+   bb->entry_mimic_stack_size = 1;
+   }
+
+   if (!bb_entry_mimic_stack_set(bb))
+   bb->entry_mimic_stack_size = 0;
 
while (buffer.pos < bb->end) {
ctx.offset = ctx.buffer->pos;   /* this is fragile */
@@ -194,6 +200,32 @@ static int do_convert_bb_to_ir(struct basic_block *bb)
return err;
 }
 
+static int reload_mimic_stack(struct basic_block *bb, struct stack *reload)
+{
+   unsigned int i;
+
+   for (i = 0; i < reload->nr_elements; i++)
+   bb_add_mimic_stack_expr(bb, reload->elements[i]);
+
+   if (bb_entry_mimic_stack_set(bb)) {
+   if (stack_size(reload) == (unsigned long) 
bb->entry_mimic_stack_size)
+   return 0;
+
+   return warn("stack size differs on different paths"), -EINVAL;
+   }
+
+   for (i = 0; i < reload->nr_elements; i++) {
+   struct expression *elem;
+
+   elem = reload->elements[reload->nr_elements - i - 1];
+   expr_get(elem);
+   stack_push(bb->mimic_stack, elem);
+   }

[PATCH 1/4] jit: make clear_mimic_stack() work on stack instead of basic block

2009-09-04 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/jit/basic-block.h |2 +-
 jit/basic-block.c |6 +++---
 jit/exception-bc.c|2 +-
 jit/invoke-bc.c   |4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/jit/basic-block.h b/include/jit/basic-block.h
index d5452a4..b3e3cc1 100644
--- a/include/jit/basic-block.h
+++ b/include/jit/basic-block.h
@@ -88,7 +88,7 @@ unsigned char *bb_native_ptr(struct basic_block *bb);
 void resolution_block_init(struct resolution_block *block);
 bool branch_needs_resolution_block(struct basic_block *from, int idx);
 int bb_lookup_successor_index(struct basic_block *from, struct basic_block 
*to);
-void clear_mimic_stack(struct basic_block *bb);
+void clear_mimic_stack(struct stack *);
 
 #define for_each_basic_block(bb, bb_list) list_for_each_entry(bb, bb_list, 
bb_list_node)
 #define for_each_basic_block_reverse(bb, bb_list) 
list_for_each_entry_reverse(bb, bb_list, bb_list_node)
diff --git a/jit/basic-block.c b/jit/basic-block.c
index e19ee7e..401377c 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -235,12 +235,12 @@ bool branch_needs_resolution_block(struct basic_block 
*from, int idx)
return !list_is_empty(&from->resolution_blocks[idx].insns);
 }
 
-void clear_mimic_stack(struct basic_block *bb)
+void clear_mimic_stack(struct stack *stack)
 {
struct expression *expr;
 
-   while (!stack_is_empty(bb->mimic_stack)) {
-   expr = stack_pop(bb->mimic_stack);
+   while (!stack_is_empty(stack)) {
+   expr = stack_pop(stack);
expr_put(expr);
}
 }
diff --git a/jit/exception-bc.c b/jit/exception-bc.c
index 94de121..c76772f 100644
--- a/jit/exception-bc.c
+++ b/jit/exception-bc.c
@@ -61,7 +61,7 @@ int convert_athrow(struct parse_context *ctx)
 * reference is not transferred to exception handlers in
 * BC2IR layer.
 */
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
 
convert_statement(ctx, stmt);
 
diff --git a/jit/invoke-bc.c b/jit/invoke-bc.c
index cac01a9..b0a993a 100644
--- a/jit/invoke-bc.c
+++ b/jit/invoke-bc.c
@@ -35,7 +35,7 @@ int convert_xreturn(struct parse_context *ctx)
expr = stack_pop(ctx->bb->mimic_stack);
return_stmt->return_value = &expr->node;
convert_statement(ctx, return_stmt);
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
return 0;
 }
 
@@ -48,7 +48,7 @@ int convert_return(struct parse_context *ctx)
return_stmt->return_value = NULL;
 
convert_statement(ctx, return_stmt);
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
return 0;
 }
 
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/4] jit: split tableswitch/lookupswitch bsaic block at the end.

2009-09-04 Thread Tomek Grabiec
This way, the order of basic blocks matches the values of .start and .end
It also makes created basic blocks have empty bytecode range (start == end).

Signed-off-by: Tomek Grabiec 
---
 jit/basic-block.c   |2 +-
 jit/switch-bc.c |6 +++---
 test/jit/basic-block-test.c |2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/jit/basic-block.c b/jit/basic-block.c
index 401377c..d478315 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -104,7 +104,7 @@ struct basic_block *bb_split(struct basic_block *orig_bb, 
unsigned long offset)
 {
struct basic_block *new_bb;
 
-   if (offset < orig_bb->start || offset >= orig_bb->end)
+   if (offset < orig_bb->start || offset > orig_bb->end)
return NULL;
 
new_bb = alloc_basic_block(orig_bb->b_parent, offset, orig_bb->end);
diff --git a/jit/switch-bc.c b/jit/switch-bc.c
index 32cb7fc..b1ece1a 100644
--- a/jit/switch-bc.c
+++ b/jit/switch-bc.c
@@ -93,8 +93,8 @@ int convert_tableswitch(struct parse_context *ctx)
 
master_bb = ctx->bb;
 
-   b1 = bb_split(master_bb, ctx->offset);
-   b2 = bb_split(b1, ctx->offset);
+   b1 = bb_split(master_bb, master_bb->end);
+   b2 = bb_split(b1, master_bb->end);
 
assert(b1 && b2);
 
@@ -179,7 +179,7 @@ int convert_lookupswitch(struct parse_context *ctx)
 
master_bb = ctx->bb;
 
-   b1 = bb_split(master_bb, ctx->offset);
+   b1 = bb_split(master_bb, master_bb->end);
 
assert(b1);
 
diff --git a/test/jit/basic-block-test.c b/test/jit/basic-block-test.c
index 7195487..a22d3bf 100644
--- a/test/jit/basic-block-test.c
+++ b/test/jit/basic-block-test.c
@@ -24,7 +24,7 @@ void test_split_with_out_of_range_offset(void)
bb = get_basic_block(cu, 1, 2);
 
assert_ptr_equals(NULL, bb_split(bb, 0));
-   assert_ptr_equals(NULL, bb_split(bb, 2));
+   assert_ptr_equals(NULL, bb_split(bb, 3));
 
free_compilation_unit(cu);
 }
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/4] jit: set .has_branch flag for created basic blocks for tableswitch/lookupswitch.

2009-09-04 Thread Tomek Grabiec
We must set that flag so that mimic stack resolution will put moves
before branch instructions.

Signed-off-by: Tomek Grabiec 
---
 jit/switch-bc.c |7 ---
 1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/jit/switch-bc.c b/jit/switch-bc.c
index b1ece1a..fb3fef2 100644
--- a/jit/switch-bc.c
+++ b/jit/switch-bc.c
@@ -98,8 +98,9 @@ int convert_tableswitch(struct parse_context *ctx)
 
assert(b1 && b2);
 
-   b1->is_converted = true;
-   b2->is_converted = true;
+   master_bb->has_branch = true;
+   b1->has_branch = true;
+   b2->has_branch = true;
 
bb_add_successor(master_bb, default_bb );
bb_add_successor(master_bb, b1);
@@ -183,7 +184,7 @@ int convert_lookupswitch(struct parse_context *ctx)
 
assert(b1);
 
-   b1->is_converted = true;
+   b1->has_branch = true;
 
bb_add_successor(master_bb, default_bb );
bb_add_successor(master_bb, b1);
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/2][SQUASH TO 4/4] lib: fix compilation error

2009-09-03 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/lib/stack.h |2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/include/lib/stack.h b/include/lib/stack.h
index 7ec599d..aa60109 100644
--- a/include/lib/stack.h
+++ b/include/lib/stack.h
@@ -7,6 +7,8 @@
 #include 
 #include 
 
+#include "vm/die.h"
+
 struct stack {
unsigned long   nr_elements;
void**elements;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/2][SQUASH to 2/4] test: fix 'make check' breakage

2009-09-03 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 test/jit/basic-block-test.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/test/jit/basic-block-test.c b/test/jit/basic-block-test.c
index 7195487..a22d3bf 100644
--- a/test/jit/basic-block-test.c
+++ b/test/jit/basic-block-test.c
@@ -24,7 +24,7 @@ void test_split_with_out_of_range_offset(void)
bb = get_basic_block(cu, 1, 2);
 
assert_ptr_equals(NULL, bb_split(bb, 0));
-   assert_ptr_equals(NULL, bb_split(bb, 2));
+   assert_ptr_equals(NULL, bb_split(bb, 3));
 
free_compilation_unit(cu);
 }
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 3/4] jit: set .has_branch flag for created basic blocks for tableswitch/lookupswitch.

2009-09-03 Thread Tomek Grabiec
We must set that flag so that mimic stack resolution will put moves
before branch instructions.

Signed-off-by: Tomek Grabiec 
---
 jit/switch-bc.c |7 ---
 1 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/jit/switch-bc.c b/jit/switch-bc.c
index b1ece1a..fb3fef2 100644
--- a/jit/switch-bc.c
+++ b/jit/switch-bc.c
@@ -98,8 +98,9 @@ int convert_tableswitch(struct parse_context *ctx)
 
assert(b1 && b2);
 
-   b1->is_converted = true;
-   b2->is_converted = true;
+   master_bb->has_branch = true;
+   b1->has_branch = true;
+   b2->has_branch = true;
 
bb_add_successor(master_bb, default_bb );
bb_add_successor(master_bb, b1);
@@ -183,7 +184,7 @@ int convert_lookupswitch(struct parse_context *ctx)
 
assert(b1);
 
-   b1->is_converted = true;
+   b1->has_branch = true;
 
bb_add_successor(master_bb, default_bb );
bb_add_successor(master_bb, b1);
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/4] jit: fix mimic stack content propagation

2009-09-03 Thread Tomek Grabiec
The mimic stack propagation procedure was pushing elements onto
successors' stacks on every CFG edge. This caused that the successor's
stack was larger then it should be. The successor's stack was then
also propagated, along with extra elements. This leads to geneartion
of many unnecessary register moves.

In some CFG configurations this may lead to degenerative mimic stack
resolution, where STMT_STORE is inserted with src temporary that is
assigned only on some paths. This leads to incorrect liveness analysis
of the temporary register, because it is seen to be used before it is
defined. The register appears to be always live before it's use
position, which is wrong, and could cause problems with GC.

Signed-off-by: Tomek Grabiec 
---
 include/jit/basic-block.h |3 ++
 include/lib/stack.h   |   24 +-
 jit/bytecode-to-ir.c  |   49 
 3 files changed, 61 insertions(+), 15 deletions(-)

diff --git a/include/jit/basic-block.h b/include/jit/basic-block.h
index b3e3cc1..c4b74ec 100644
--- a/include/jit/basic-block.h
+++ b/include/jit/basic-block.h
@@ -46,6 +46,9 @@ struct basic_block {
   Adl-Tabatabai et al (1998) for more in-depth explanation.  */
struct stack *mimic_stack;
 
+   unsigned long entry_mimic_stack_size;
+   bool entry_mimic_stack_set;
+
/* Is this basic block an exception handler? */
bool is_eh;
 
diff --git a/include/lib/stack.h b/include/lib/stack.h
index e4e79d9..7ec599d 100644
--- a/include/lib/stack.h
+++ b/include/lib/stack.h
@@ -1,9 +1,11 @@
 #ifndef LIB_STACK_H
 #define LIB_STACK_H
 
+#include 
+#include 
+#include 
 #include 
 #include 
-#include 
 
 struct stack {
unsigned long   nr_elements;
@@ -37,4 +39,24 @@ static inline bool stack_is_empty(struct stack *stack)
return !stack->nr_elements;
 }
 
+static inline unsigned long stack_size(struct stack *stack)
+{
+   return stack->nr_elements;
+}
+
+static inline void stack_copy(struct stack *src, struct stack *dst)
+{
+   void **new_elements;
+   unsigned long size;
+
+   size = src->nr_elements * sizeof(void*);
+   new_elements = realloc(dst->elements, size);
+   if (!new_elements)
+   error("out of memory");
+
+   dst->elements = new_elements;
+   dst->nr_elements = src->nr_elements;
+   memcpy(new_elements, src->elements, size);
+}
+
 #endif
diff --git a/jit/bytecode-to-ir.c b/jit/bytecode-to-ir.c
index 2923890..4c059e0 100644
--- a/jit/bytecode-to-ir.c
+++ b/jit/bytecode-to-ir.c
@@ -181,8 +181,17 @@ static int do_convert_bb_to_ir(struct basic_block *bb)
buffer.buffer = cu->method->code_attribute.code;
buffer.pos = bb->start;
 
-   if (bb->is_eh)
+   if (bb->is_eh) {
+   assert(!bb->entry_mimic_stack_set);
stack_push(bb->mimic_stack, exception_ref_expr());
+   bb->entry_mimic_stack_size = 1;
+   bb->entry_mimic_stack_set = true;
+   }
+
+   if (!bb->entry_mimic_stack_set) {
+   bb->entry_mimic_stack_size = 0;
+   bb->entry_mimic_stack_set = true;
+   }
 
while (buffer.pos < bb->end) {
ctx.offset = ctx.buffer->pos;   /* this is fragile */
@@ -194,6 +203,28 @@ static int do_convert_bb_to_ir(struct basic_block *bb)
return err;
 }
 
+static int reload_mimic_stack(struct basic_block *bb, struct stack *reload)
+{
+   for (unsigned int i = 0; i < reload->nr_elements; i++)
+   bb_add_mimic_stack_expr(bb, reload->elements[i]);
+
+   if (!bb->entry_mimic_stack_set) {
+   for (unsigned int i = 0; i < reload->nr_elements; i++) {
+   struct expression *elem;
+
+   elem = reload->elements[reload->nr_elements - i - 1];
+   expr_get(elem);
+   stack_push(bb->mimic_stack, elem);
+   }
+
+   bb->entry_mimic_stack_size = stack_size(bb->mimic_stack);
+   bb->entry_mimic_stack_set  = true;
+   } else if (stack_size(reload) != bb->entry_mimic_stack_size)
+   return error("stack size differs on different paths"), -EINVAL;
+
+   return 0;
+}
+
 static int convert_bb_to_ir(struct basic_block *bb)
 {
struct stack *reload_stack;
@@ -227,20 +258,10 @@ static int convert_bb_to_ir(struct basic_block *bb)
if (!reload_stack)
return warn("out of memory"), -ENOMEM;
 
-   while (!stack_is_empty(reload_stack)) {
-   struct expression *expr = stack_pop(reload_stack);
-
-   for (i = 0; i < bb->nr_successors; i++) {
-   struct basic_block *s = bb->successors[i];
+   for (i = 0; i < bb->nr_successors; i++)
+   reload_mimic_stack

[PATCH 2/4] jit: split tableswitch/lookupswitch bsaic block at the end.

2009-09-03 Thread Tomek Grabiec
This way, the order of basic blocks matches the values of .start and .end
It also makes created basic blocks have empty bytecode range (start == end).

Signed-off-by: Tomek Grabiec 
---
 jit/basic-block.c |2 +-
 jit/switch-bc.c   |6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/jit/basic-block.c b/jit/basic-block.c
index 401377c..d478315 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -104,7 +104,7 @@ struct basic_block *bb_split(struct basic_block *orig_bb, 
unsigned long offset)
 {
struct basic_block *new_bb;
 
-   if (offset < orig_bb->start || offset >= orig_bb->end)
+   if (offset < orig_bb->start || offset > orig_bb->end)
return NULL;
 
new_bb = alloc_basic_block(orig_bb->b_parent, offset, orig_bb->end);
diff --git a/jit/switch-bc.c b/jit/switch-bc.c
index 32cb7fc..b1ece1a 100644
--- a/jit/switch-bc.c
+++ b/jit/switch-bc.c
@@ -93,8 +93,8 @@ int convert_tableswitch(struct parse_context *ctx)
 
master_bb = ctx->bb;
 
-   b1 = bb_split(master_bb, ctx->offset);
-   b2 = bb_split(b1, ctx->offset);
+   b1 = bb_split(master_bb, master_bb->end);
+   b2 = bb_split(b1, master_bb->end);
 
assert(b1 && b2);
 
@@ -179,7 +179,7 @@ int convert_lookupswitch(struct parse_context *ctx)
 
master_bb = ctx->bb;
 
-   b1 = bb_split(master_bb, ctx->offset);
+   b1 = bb_split(master_bb, master_bb->end);
 
assert(b1);
 
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/4] jit: make clear_mimic_stack() work on stack instead of basic block

2009-09-03 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/jit/basic-block.h |2 +-
 jit/basic-block.c |6 +++---
 jit/exception-bc.c|2 +-
 jit/invoke-bc.c   |4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/include/jit/basic-block.h b/include/jit/basic-block.h
index d5452a4..b3e3cc1 100644
--- a/include/jit/basic-block.h
+++ b/include/jit/basic-block.h
@@ -88,7 +88,7 @@ unsigned char *bb_native_ptr(struct basic_block *bb);
 void resolution_block_init(struct resolution_block *block);
 bool branch_needs_resolution_block(struct basic_block *from, int idx);
 int bb_lookup_successor_index(struct basic_block *from, struct basic_block 
*to);
-void clear_mimic_stack(struct basic_block *bb);
+void clear_mimic_stack(struct stack *);
 
 #define for_each_basic_block(bb, bb_list) list_for_each_entry(bb, bb_list, 
bb_list_node)
 #define for_each_basic_block_reverse(bb, bb_list) 
list_for_each_entry_reverse(bb, bb_list, bb_list_node)
diff --git a/jit/basic-block.c b/jit/basic-block.c
index e19ee7e..401377c 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -235,12 +235,12 @@ bool branch_needs_resolution_block(struct basic_block 
*from, int idx)
return !list_is_empty(&from->resolution_blocks[idx].insns);
 }
 
-void clear_mimic_stack(struct basic_block *bb)
+void clear_mimic_stack(struct stack *stack)
 {
struct expression *expr;
 
-   while (!stack_is_empty(bb->mimic_stack)) {
-   expr = stack_pop(bb->mimic_stack);
+   while (!stack_is_empty(stack)) {
+   expr = stack_pop(stack);
expr_put(expr);
}
 }
diff --git a/jit/exception-bc.c b/jit/exception-bc.c
index 94de121..c76772f 100644
--- a/jit/exception-bc.c
+++ b/jit/exception-bc.c
@@ -61,7 +61,7 @@ int convert_athrow(struct parse_context *ctx)
 * reference is not transferred to exception handlers in
 * BC2IR layer.
 */
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
 
convert_statement(ctx, stmt);
 
diff --git a/jit/invoke-bc.c b/jit/invoke-bc.c
index cac01a9..b0a993a 100644
--- a/jit/invoke-bc.c
+++ b/jit/invoke-bc.c
@@ -35,7 +35,7 @@ int convert_xreturn(struct parse_context *ctx)
expr = stack_pop(ctx->bb->mimic_stack);
return_stmt->return_value = &expr->node;
convert_statement(ctx, return_stmt);
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
return 0;
 }
 
@@ -48,7 +48,7 @@ int convert_return(struct parse_context *ctx)
return_stmt->return_value = NULL;
 
convert_statement(ctx, return_stmt);
-   clear_mimic_stack(ctx->bb);
+   clear_mimic_stack(ctx->bb->mimic_stack);
return 0;
 }
 
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


Re: [PATCH 1/6] vm: introduce -Xnobuf

2009-09-02 Thread Tomek Grabiec
2009/9/1 Pekka Enberg :
> Hi Tomek,
>
> On Tue, 2009-09-01 at 10:06 +0200, Tomek Grabiec wrote:
>> It disables buffering of strings passed to trace_printf().
>>
>> Signed-off-by: Tomek Grabiec 
>
> Does something like this work for your scenario?
>
>                        Pekka
>
> >From 9d6a82a9290f5f75a5018506b4febe31ad8df3e0 Mon Sep 17 00:00:00 2001
> From: Pekka Enberg 
> Date: Tue, 1 Sep 2009 21:21:15 +0300
> Subject: [PATCH] vm: Flush and disable buffering on SIGABRT
>
> Signed-off-by: Pekka Enberg 
> ---
>  include/vm/trace.h |    1 +
>  vm/signal.c        |   23 +--
>  vm/trace.c         |   36 ++--
>  3 files changed, 48 insertions(+), 12 deletions(-)
>
> diff --git a/include/vm/trace.h b/include/vm/trace.h
> index 79959ea..e95bbb9 100644
> --- a/include/vm/trace.h
> +++ b/include/vm/trace.h
> @@ -3,5 +3,6 @@
>
>  int trace_printf(const char *fmt, ...);
>  void trace_flush(void);
> +void trace_emergency_flush(void);
>
>  #endif /* _VM_TRACE_H */
> diff --git a/vm/signal.c b/vm/signal.c
> index 094fec6..50e0576 100644
> --- a/vm/signal.c
> +++ b/vm/signal.c
> @@ -35,6 +35,7 @@
>  #include "vm/preload.h"
>  #include "vm/signal.h"
>  #include "vm/stack-trace.h"
> +#include "vm/trace.h"
>
>  #include "arch/signal.h"
>
> @@ -80,7 +81,7 @@ static unsigned long gc_safepoint_bh(unsigned long addr)
>        return addr;
>  }
>
> -static void sigfpe_handler(int sig, siginfo_t *si, void *ctx)
> +static void handle_sigfpe(int sig, siginfo_t *si, void *ctx)
>  {
>        if (signal_from_native(ctx))
>                goto exit;
> @@ -96,7 +97,7 @@ static void sigfpe_handler(int sig, siginfo_t *si, void 
> *ctx)
>        print_backtrace_and_die(sig, si, ctx);
>  }
>
> -static void sigsegv_handler(int sig, siginfo_t *si, void *ctx)
> +static void handle_sigsegv(int sig, siginfo_t *si, void *ctx)
>  {
>        if (signal_from_native(ctx))
>                goto exit;
> @@ -160,11 +161,18 @@ static void sigsegv_handler(int sig, siginfo_t *si, 
> void *ctx)
>        print_backtrace_and_die(sig, si, ctx);
>  }
>
> -static void signal_handler(int sig, siginfo_t *si, void *ctx)
> +static void handle_signal(int sig, siginfo_t *si, void *ctx)
>  {
>        print_backtrace_and_die(sig, si, ctx);
>  }
>
> +static void handle_abort(int sig, siginfo_t *si, void *ctx)
> +{
> +       trace_emergency_flush();
> +
> +       print_backtrace_and_die(sig, si, ctx);
> +}
> +
>  void setup_signal_handlers(void)
>  {
>        struct sigaction sa;
> @@ -172,12 +180,15 @@ void setup_signal_handlers(void)
>        sigemptyset(&sa.sa_mask);
>        sa.sa_flags     = SA_RESTART | SA_SIGINFO;
>
> -       sa.sa_sigaction = sigsegv_handler;
> +       sa.sa_sigaction = handle_sigsegv;
>        sigaction(SIGSEGV, &sa, NULL);
>
> -       sa.sa_sigaction = sigfpe_handler;
> +       sa.sa_sigaction = handle_sigfpe;
>        sigaction(SIGFPE, &sa, NULL);
>
> -       sa.sa_sigaction = signal_handler;
> +       sa.sa_sigaction = handle_signal;
>        sigaction(SIGUSR1, &sa, NULL);
> +
> +       sa.sa_sigaction = handle_abort;
> +       sigaction(SIGABRT, &sa, NULL);
>  }
> diff --git a/vm/trace.c b/vm/trace.c
> index 0192de6..0ebafbf 100644
> --- a/vm/trace.c
> +++ b/vm/trace.c
> @@ -38,6 +38,8 @@ static pthread_mutex_t trace_mutex = 
> PTHREAD_MUTEX_INITIALIZER;
>
>  static __thread struct string *trace_buffer;
>
> +static bool disable_tracing;
> +
>  static void setup_trace_buffer(void)
>  {
>        if (trace_buffer)
> @@ -53,6 +55,13 @@ int trace_printf(const char *fmt, ...)
>        va_list args;
>        int err;
>
> +       if (disable_tracing) {
> +               va_start(args, fmt);
> +               vfprintf(stderr, fmt, args);
> +               va_end(args);
> +               return 0;
> +       }
> +
>        setup_trace_buffer();
>
>        va_start(args, fmt);
> @@ -62,14 +71,15 @@ int trace_printf(const char *fmt, ...)
>        return err;
>  }
>
> -void trace_flush(void)
> +static void do_trace_flush(void)
>  {
>        struct vm_thread *self;
>        char *thread_name;
>        char *line;
>        char *next;
>
> -       setup_trace_buffer();
> +       if (!trace_buffer)
> +               return;
>
>        self = vm_thread_self();
>        if (self)
> @@ -77,8 +87,6 @@ void trace_flush(void)
>        else
>                thread_name = strdup("unknown");
>
> -       pthread_mutex_lock(&trace_mutex);
> -
>    

[PATCH 6/6] x86: fix writes below (%esp) in insn-selector.brg

2009-09-01 Thread Tomek Grabiec
It is incorrect to write or read from memory below %esp. This patch
fixes valgrind complaints about access of uninitilized data.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |  152 +++-
 1 files changed, 80 insertions(+), 72 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 2e7367f..87c270e 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -54,10 +54,11 @@
 static double xmm_double_constant_0 = 0x1;
 static double xmm_double_constant_1 = 0x08000;
 
-static void select_insn(struct basic_block *bb, struct tree_node *tree,
-   struct insn *instruction);
-static void select_exception_test(struct basic_block *bb,
- struct tree_node *tree);
+struct _MBState;
+
+static void select_insn(struct basic_block *bb, struct tree_node *tree, struct 
insn *instruction);
+static void select_exception_test(struct basic_block *bb, struct tree_node 
*tree);
+void finvoke_return_value(struct _MBState *state, struct basic_block *s, 
struct tree_node *tree, enum vm_type ret_vm_type);
 
 static unsigned char size_to_scale(int size)
 {
@@ -93,8 +94,6 @@ static void method_args_cleanup(struct basic_block *bb, 
struct tree_node *tree,
select_insn(bb, tree, imm_reg_insn(INSN_ADD_IMM_REG, args_size, 
stack_ptr));
 }
 
-struct _MBState;
-
 static void __binop_reg_local(struct _MBState *, struct basic_block *, struct 
tree_node *, enum insn_type, struct var_info *, long);
 static void binop_reg_local_high(struct _MBState *, struct basic_block *, 
struct tree_node *, enum insn_type);
 static void binop_reg_local_low(struct _MBState *, struct basic_block *, 
struct tree_node *, enum insn_type);
@@ -141,6 +140,7 @@ freg:   EXPR_FVALUE 0
 {
struct expression *expr;
struct var_info *result, *esp;
+   struct stack_slot *scratch;
 
expr = to_expr(tree);
 
@@ -149,15 +149,23 @@ freg: EXPR_FVALUE 0
state->reg1 = result;
 
if (expr->vm_type == J_FLOAT) {
-   select_insn(s, tree, imm_membase_insn(INSN_MOV_IMM_MEMBASE, 
float_to_uint32(expr->fvalue), esp, -4));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, 
esp, -4, result));
+   scratch = get_scratch_slot_32(s->b_parent);
+   select_insn(s, tree, imm_memlocal_insn(INSN_MOV_IMM_MEMLOCAL, 
float_to_uint32(expr->fvalue), scratch));
+   select_insn(s, tree, memlocal_reg_insn(INSN_MOV_MEMLOCAL_XMM, 
scratch, result));
} else {
uint32_t high_byte, low_byte;
+   unsigned long offset;
+   struct var_info *ebp;
+
+   ebp = get_fixed_var(s->b_parent, MACH_REG_EBP);
+
+   scratch = get_scratch_slot_64(s->b_parent);
+   offset  = slot_offset_64(scratch);
 
double_to_uint64(expr->fvalue, &low_byte, &high_byte);
-   select_insn(s, tree, imm_membase_insn(INSN_MOV_IMM_MEMBASE, 
high_byte, esp, -4));
-   select_insn(s, tree, imm_membase_insn(INSN_MOV_IMM_MEMBASE, 
low_byte, esp, -8));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, 
esp, -8, result));
+   select_insn(s, tree, imm_membase_insn(INSN_MOV_IMM_MEMBASE, 
high_byte, ebp, offset + 4));
+   select_insn(s, tree, imm_membase_insn(INSN_MOV_IMM_MEMBASE, 
low_byte, ebp, offset));
+   select_insn(s, tree, 
memlocal_reg_insn(INSN_MOV_64_MEMLOCAL_XMM, scratch, result));
}
 }
 
@@ -483,6 +491,7 @@ reg:OP_REM(reg, reg) 1
 freg:  OP_DREM(freg, freg) 1
 {
struct var_info *esp, *eax;
+   struct stack_slot *scratch;
 
state->reg1 = get_var(s->b_parent, J_DOUBLE);
 
@@ -496,13 +505,15 @@ freg: OP_DREM(freg, freg) 1
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmod));
method_args_cleanup(s, tree, 4);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, -8));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, -8, 
state->reg1));
+   scratch = get_scratch_slot_64(s->b_parent);
+   select_insn(s, tree, memlocal_insn(INSN_FSTP_64_MEMLOCAL, scratch));
+   select_insn(s, tree, memlocal_reg_insn(INSN_MOV_64_MEMLOCAL_XMM, 
scratch, state->reg1));
 }
 
 freg:  OP_FREM(freg, freg) 1
 {
struct var_info *esp, *eax;
+   struct stack_slot *scratch;
 
state->reg1 = get_var(s->b_parent, J_FLOAT);
 
@@ -516,8 +527,9 @@ freg:   OP_FREM(freg, freg) 1
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmodf));
method_args_cleanup(s, tree, 2);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, -4));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, -4, 
state->reg1));
+   scratch = get_scratch_slot_32(

[PATCH 3/6] x86: cleanup emit_mov_reg_memlocal()

2009-09-01 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c |   16 +---
 1 files changed, 1 insertions(+), 15 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index a99296c..e1eb5ba 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -861,21 +861,7 @@ emit_push_membase(struct buffer *buf, struct operand *src)
 static void emit_mov_reg_memlocal(struct buffer *buf, struct operand *src,
  struct operand *dest)
 {
-   unsigned long disp;
-   int mod;
-
-   disp = slot_offset(dest->slot);
-
-   if (is_imm_8(disp))
-   mod = 0x01;
-   else
-   mod = 0x02;
-
-   emit(buf, 0x89);
-   emit(buf, encode_modrm(mod, encode_reg(&src->reg),
-  __encode_reg(MACH_REG_EBP)));
-
-   emit_imm(buf, disp);
+   __emit_mov_reg_membase(buf, mach_reg(&src->reg), MACH_REG_EBP, 
slot_offset(dest->slot));
 }
 
 static void emit_mov_xmm_memlocal(struct buffer *buf, struct operand *src,
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 4/6] x86: implement INSN_(FSTP|FLD)_{64}_MEMLOCAL

2009-09-01 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c|   24 
 arch/x86/include/arch/instruction.h |4 
 arch/x86/lir-printer.c  |   28 
 arch/x86/use-def.c  |   12 
 4 files changed, 64 insertions(+), 4 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index e1eb5ba..e84e725 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -1154,6 +1154,16 @@ static void emit_fld_64_membase(struct buffer *buf, 
struct operand *src)
__emit_membase(buf, 0xdd, mach_reg(&src->base_reg), src->disp, 0);
 }
 
+static void emit_fld_memlocal(struct buffer *buf, struct operand *src)
+{
+   __emit_membase(buf, 0xd9, MACH_REG_EBP, slot_offset(src->slot), 0);
+}
+
+static void emit_fld_64_memlocal(struct buffer *buf, struct operand *src)
+{
+   __emit_membase(buf, 0xdd, MACH_REG_EBP, slot_offset_64(src->slot), 0);
+}
+
 static void emit_fild_64_membase(struct buffer *buf, struct operand *src)
 {
__emit_membase(buf, 0xdf, mach_reg(&src->base_reg), src->disp, 5);
@@ -1179,11 +1189,21 @@ static void emit_fstp_membase(struct buffer *buf, 
struct operand *dest)
__emit_membase(buf, 0xd9, mach_reg(&dest->base_reg), dest->disp, 3);
 }
 
+static void emit_fstp_memlocal(struct buffer *buf, struct operand *dest)
+{
+   __emit_membase(buf, 0xd9, MACH_REG_EBP, slot_offset(dest->slot), 3);
+}
+
 static void emit_fstp_64_membase(struct buffer *buf, struct operand *dest)
 {
__emit_membase(buf, 0xdd, mach_reg(&dest->base_reg), dest->disp, 3);
 }
 
+static void emit_fstp_64_memlocal(struct buffer *buf, struct operand *dest)
+{
+   __emit_membase(buf, 0xdd, MACH_REG_EBP, slot_offset_64(dest->slot), 3);
+}
+
 static void emit_add_membase_reg(struct buffer *buf,
 struct operand *src, struct operand *dest)
 {
@@ -1680,13 +1700,17 @@ struct emitter emitters[] = {
DECL_EMITTER(INSN_FDIV_REG_REG, emit_fdiv_reg_reg, TWO_OPERANDS),
DECL_EMITTER(INSN_FDIV_64_REG_REG, emit_fdiv_64_reg_reg, TWO_OPERANDS),
DECL_EMITTER(INSN_FLD_MEMBASE, emit_fld_membase, TWO_OPERANDS),
+   DECL_EMITTER(INSN_FLD_MEMLOCAL, emit_fld_memlocal, TWO_OPERANDS),
DECL_EMITTER(INSN_FLD_64_MEMBASE, emit_fld_64_membase, TWO_OPERANDS),
+   DECL_EMITTER(INSN_FLD_64_MEMLOCAL, emit_fld_64_memlocal, TWO_OPERANDS),
DECL_EMITTER(INSN_FLDCW_MEMBASE, emit_fldcw_membase, SINGLE_OPERAND),
DECL_EMITTER(INSN_FILD_64_MEMBASE, emit_fild_64_membase, TWO_OPERANDS),
DECL_EMITTER(INSN_FISTP_64_MEMBASE, emit_fistp_64_membase, 
SINGLE_OPERAND),
DECL_EMITTER(INSN_FNSTCW_MEMBASE, emit_fnstcw_membase, SINGLE_OPERAND),
DECL_EMITTER(INSN_FSTP_MEMBASE, emit_fstp_membase, TWO_OPERANDS),
+   DECL_EMITTER(INSN_FSTP_MEMLOCAL, emit_fstp_memlocal, TWO_OPERANDS),
DECL_EMITTER(INSN_FSTP_64_MEMBASE, emit_fstp_64_membase, TWO_OPERANDS),
+   DECL_EMITTER(INSN_FSTP_64_MEMLOCAL, emit_fstp_64_memlocal, 
TWO_OPERANDS),
DECL_EMITTER(INSN_CONV_GPR_TO_FPU, emit_conv_gpr_to_fpu, TWO_OPERANDS),
DECL_EMITTER(INSN_CONV_GPR_TO_FPU64, emit_conv_gpr_to_fpu64, 
TWO_OPERANDS),
DECL_EMITTER(INSN_CONV_FPU_TO_GPR, emit_conv_fpu_to_gpr, TWO_OPERANDS),
diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index 0186745..50c6678 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -87,13 +87,17 @@ enum insn_type {
INSN_FSUB_REG_REG,
INSN_FSUB_64_REG_REG,
INSN_FLD_MEMBASE,
+   INSN_FLD_MEMLOCAL,
INSN_FLD_64_MEMBASE,
+   INSN_FLD_64_MEMLOCAL,
INSN_FLDCW_MEMBASE,
INSN_FILD_64_MEMBASE,
INSN_FISTP_64_MEMBASE,
INSN_FNSTCW_MEMBASE,
INSN_FSTP_MEMBASE,
+   INSN_FSTP_MEMLOCAL,
INSN_FSTP_64_MEMBASE,
+   INSN_FSTP_64_MEMLOCAL,
INSN_CONV_FPU_TO_GPR,
INSN_CONV_FPU64_TO_GPR,
INSN_CONV_GPR_TO_FPU,
diff --git a/arch/x86/lir-printer.c b/arch/x86/lir-printer.c
index 2980771..ff2a11c 100644
--- a/arch/x86/lir-printer.c
+++ b/arch/x86/lir-printer.c
@@ -335,12 +335,24 @@ static int print_fld_membase(struct string *str, struct 
insn *insn)
return print_membase(str, &insn->operand);
 }
 
+static int print_fld_memlocal(struct string *str, struct insn *insn)
+{
+   print_func_name(str);
+   return print_memlocal(str, &insn->operand);
+}
+
 static int print_fld_64_membase(struct string *str, struct insn *insn)
 {
print_func_name(str);
return print_membase(str, &insn->operand);
 }
 
+static int print_fld_64_memlocal(struct string *str, struct insn *insn)
+{
+   print_func_name(str);
+   return print_memlocal(str, &insn->operand);
+}
+
 static int print_fild_64_membase(struct string *str, struct insn *insn)
 {
pr

[PATCH 5/6] jit: introduce lazy-allocated scratch stack slots

2009-09-01 Thread Tomek Grabiec
They are needed to hold temporary results when register can not be used.

Signed-off-by: Tomek Grabiec 
---
 include/jit/compilation-unit.h |   20 +++-
 jit/compilation-unit.c |   16 
 2 files changed, 35 insertions(+), 1 deletions(-)

diff --git a/include/jit/compilation-unit.h b/include/jit/compilation-unit.h
index 4114bce..be76e5f 100644
--- a/include/jit/compilation-unit.h
+++ b/include/jit/compilation-unit.h
@@ -93,11 +93,18 @@ struct compilation_unit {
struct radix_tree *safepoint_map;
 
/*
-* Contains native pointers of exception handlers.  Indices to
+* Contains native pointers of exception handlers. Indices to
 * this table are the same as for exception table in code
 * attribute.
 */
void **exception_handlers;
+
+   /*
+* These point to stack slots for storing temporary results
+* within one monoburg rule where we can not use a register.
+*/
+   struct stack_slot *scratch_slot_32;
+   struct stack_slot *scratch_slot_64;
 };
 
 struct compilation_unit *compilation_unit_alloc(struct vm_method *);
@@ -109,6 +116,17 @@ struct var_info *get_fixed_var(struct compilation_unit *, 
enum machine_reg);
 struct basic_block *find_bb(struct compilation_unit *, unsigned long);
 unsigned long nr_bblocks(struct compilation_unit *);
 void compute_insn_positions(struct compilation_unit *);
+struct stack_slot *get_scratch_slot(struct compilation_unit *, int);
+
+static inline struct stack_slot *get_scratch_slot_32(struct compilation_unit 
*cu)
+{
+   return get_scratch_slot(cu, 1);
+}
+
+static inline struct stack_slot *get_scratch_slot_64(struct compilation_unit 
*cu)
+{
+   return get_scratch_slot(cu, 2);
+}
 
 #define for_each_variable(var, var_list) for (var = var_list; var != NULL; var 
= var->next)
 
diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index 0dd4415..918420c 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -267,3 +267,19 @@ void compute_insn_positions(struct compilation_unit *cu)
 
cu->last_insn = pos;
 }
+
+struct stack_slot *get_scratch_slot(struct compilation_unit *cu, int slot_size)
+{
+   switch (slot_size) {
+   case 1:
+   if (!cu->scratch_slot_32)
+   cu->scratch_slot_32 = 
get_spill_slot_32(cu->stack_frame);
+   return cu->scratch_slot_32;
+   case 2:
+   if (!cu->scratch_slot_64)
+   cu->scratch_slot_64 = 
get_spill_slot_64(cu->stack_frame);
+   return cu->scratch_slot_64;
+   default:
+   error("invalid slot size");
+   }
+}
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/6] vm: introduce -Xnobuf

2009-09-01 Thread Tomek Grabiec
It disables buffering of strings passed to trace_printf().

Signed-off-by: Tomek Grabiec 
---
 include/vm/trace.h |2 ++
 vm/jato.c  |7 +++
 vm/trace.c |9 -
 3 files changed, 17 insertions(+), 1 deletions(-)

diff --git a/include/vm/trace.h b/include/vm/trace.h
index 79959ea..18bdfca 100644
--- a/include/vm/trace.h
+++ b/include/vm/trace.h
@@ -1,6 +1,8 @@
 #ifndef _VM_TRACE_H
 #define _VM_TRACE_H
 
+extern bool nobuf_enabled;
+
 int trace_printf(const char *fmt, ...);
 void trace_flush(void);
 
diff --git a/vm/jato.c b/vm/jato.c
index 924ee24..83eb82d 100644
--- a/vm/jato.c
+++ b/vm/jato.c
@@ -70,6 +70,7 @@
 #include "vm/string.h"
 #include "vm/system.h"
 #include "vm/thread.h"
+#include "vm/trace.h"
 #include "vm/unsafe.h"
 #include "vm/class.h"
 #include "vm/call.h"
@@ -943,6 +944,11 @@ static void handle_jar(const char *arg)
classloader_add_to_classpath(arg);
 }
 
+static void handle_nobuf(void)
+{
+   nobuf_enabled = true;
+}
+
 static void handle_perf(void)
 {
perf_enabled = true;
@@ -1097,6 +1103,7 @@ const struct option options[] = {
DEFINE_OPTION_ARG("cp", handle_classpath),
DEFINE_OPTION_ARG("jar",handle_jar),
 
+   DEFINE_OPTION("Xnobuf", handle_nobuf),
DEFINE_OPTION("Xperf",  handle_perf),
 
DEFINE_OPTION_ARG("Xtrace:method",  handle_trace_method),
diff --git a/vm/trace.c b/vm/trace.c
index 2172b7e..d7f1f6a 100644
--- a/vm/trace.c
+++ b/vm/trace.c
@@ -36,6 +36,8 @@
 #include "vm/thread.h"
 #include "vm/trace.h"
 
+bool nobuf_enabled;
+
 static pthread_mutex_t trace_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static __thread struct string *trace_buffer = NULL;
@@ -58,7 +60,12 @@ int trace_printf(const char *fmt, ...)
ensure_trace_buffer();
 
va_start(args, fmt);
-   err = str_vappend(trace_buffer, fmt, args);
+
+   if (nobuf_enabled)
+   err = vprintf(fmt, args);
+   else
+   err = str_vappend(trace_buffer, fmt, args);
+
va_end(args);
return err;
 }
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/6] x86: implement INSN_MOV_IMM_MEMLOCAL

2009-09-01 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c|9 -
 arch/x86/include/arch/instruction.h |2 ++
 arch/x86/instruction.c  |   12 
 arch/x86/lir-printer.c  |   14 ++
 arch/x86/use-def.c  |1 +
 5 files changed, 37 insertions(+), 1 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index 5e9a21c..a99296c 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -569,7 +569,7 @@ __emit_membase(struct buffer *buf, unsigned char opc,
else
rm = __encode_reg(base_reg);
 
-   if (disp == 0)
+   if (disp == 0 && base_reg != MACH_REG_EBP)
mod = 0x00;
else if (is_imm_8(disp))
mod = 0x01;
@@ -832,6 +832,12 @@ static void emit_mov_imm_membase(struct buffer *buf, 
struct operand *src,
   dest->disp);
 }
 
+static void emit_mov_imm_memlocal(struct buffer *buf, struct operand *src,
+ struct operand *dest)
+{
+   __emit_mov_imm_membase(buf, src->imm, MACH_REG_EBP, 
slot_offset(dest->slot));
+}
+
 static void __emit_mov_reg_membase(struct buffer *buf, enum machine_reg src,
   enum machine_reg base, unsigned long disp)
 {
@@ -1708,6 +1714,7 @@ struct emitter emitters[] = {
DECL_EMITTER(INSN_MOV_XMM_MEMBASE, emit_mov_xmm_membase, TWO_OPERANDS),
DECL_EMITTER(INSN_MOV_64_XMM_MEMBASE, emit_mov_64_xmm_membase, 
TWO_OPERANDS),
DECL_EMITTER(INSN_MOV_IMM_MEMBASE, emit_mov_imm_membase, TWO_OPERANDS),
+   DECL_EMITTER(INSN_MOV_IMM_MEMLOCAL, emit_mov_imm_memlocal, 
TWO_OPERANDS),
DECL_EMITTER(INSN_MOV_IMM_REG, emit_mov_imm_reg, TWO_OPERANDS),
DECL_EMITTER(INSN_MOV_IMM_THREAD_LOCAL_MEMBASE, 
emit_mov_imm_thread_local_membase, TWO_OPERANDS),
DECL_EMITTER(INSN_MOV_IP_THREAD_LOCAL_MEMBASE, 
emit_mov_ip_thread_local_membase, SINGLE_OPERAND),
diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index 063e857..0186745 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -110,6 +110,7 @@ enum insn_type {
INSN_JMP_BRANCH,
INSN_JNE_BRANCH,
INSN_MOV_IMM_MEMBASE,
+   INSN_MOV_IMM_MEMLOCAL,
INSN_MOV_IMM_REG,
INSN_MOV_IMM_THREAD_LOCAL_MEMBASE,
INSN_MOV_IP_REG,
@@ -229,6 +230,7 @@ struct insn *memdisp_reg_insn(enum insn_type, unsigned 
long, struct var_info *);
 struct insn *reg_memdisp_insn(enum insn_type, struct var_info *, unsigned 
long);
 struct insn *imm_memdisp_insn(enum insn_type, long, long);
 struct insn *imm_membase_insn(enum insn_type, unsigned long, struct var_info 
*, long);
+struct insn *imm_memlocal_insn(enum insn_type, unsigned long, struct 
stack_slot *);
 struct insn *imm_insn(enum insn_type, unsigned long);
 struct insn *rel_insn(enum insn_type, unsigned long);
 struct insn *branch_insn(enum insn_type, struct basic_block *);
diff --git a/arch/x86/instruction.c b/arch/x86/instruction.c
index acf994b..710af52 100644
--- a/arch/x86/instruction.c
+++ b/arch/x86/instruction.c
@@ -385,6 +385,18 @@ struct insn *membase_insn(enum insn_type insn_type, struct 
var_info *src_base_re
return insn;
 }
 
+struct insn *imm_memlocal_insn(enum insn_type insn_type,
+  unsigned long imm,
+  struct stack_slot *dst_slot)
+{
+   struct insn *insn = alloc_insn(insn_type);
+   if (insn) {
+   init_imm_operand(insn, 0, imm);
+   init_memlocal_operand(insn, 1, dst_slot);
+   }
+   return insn;
+}
+
 int insert_copy_slot_32_insns(struct stack_slot *from, struct stack_slot *to,
  struct list_head *add_before, unsigned long 
bc_offset)
 {
diff --git a/arch/x86/lir-printer.c b/arch/x86/lir-printer.c
index d688f78..2980771 100644
--- a/arch/x86/lir-printer.c
+++ b/arch/x86/lir-printer.c
@@ -109,6 +109,13 @@ static int print_imm_membase(struct string *str, struct 
insn *insn)
return print_membase(str, &insn->dest);
 }
 
+static int print_imm_memlocal(struct string *str, struct insn *insn)
+{
+   print_imm(str, &insn->src);
+   str_append(str, ", ");
+   return print_memlocal(str, &insn->dest);
+}
+
 static int print_imm_memdisp(struct string *str, struct insn *insn)
 {
print_imm(str, &insn->operands[0]);
@@ -552,6 +559,12 @@ static int print_mov_imm_membase(struct string *str, 
struct insn *insn)
return print_imm_membase(str, insn);
 }
 
+static int print_mov_imm_memlocal(struct string *str, struct insn *insn)
+{
+   print_func_name(str);
+   return print_imm_memlocal(str, insn);
+}
+
 static int print_mov_imm_reg(struct string *str, struct insn *insn)
 {
print_func_name(str);
@@ -989,6 +1002,7 @@ static print_insn_fn insn_printers[] = {
[INSN_JMP_M

[PATCH] x86: fix writes below (%esp) which can be fixed at no cost

2009-08-31 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |   30 --
 1 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 2e7367f..85f2fa6 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -494,10 +494,11 @@ freg: OP_DREM(freg, freg) 1
select_insn(s, tree, reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, 
state->right->reg1, esp, 8));
 
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmod));
-   method_args_cleanup(s, tree, 4);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, -8));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, -8, 
state->reg1));
+   select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, 0));
+   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, 0, 
state->reg1));
+
+   method_args_cleanup(s, tree, 4);
 }
 
 freg:  OP_FREM(freg, freg) 1
@@ -514,10 +515,11 @@ freg: OP_FREM(freg, freg) 1
select_insn(s, tree, reg_membase_insn(INSN_MOV_XMM_MEMBASE, 
state->right->reg1, esp, 4));
 
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmodf));
-   method_args_cleanup(s, tree, 2);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, -4));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, -4, 
state->reg1));
+   select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, 0));
+   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, 0, 
state->reg1));
+
+   method_args_cleanup(s, tree, 2);
 }
 
 reg:   OP_REM_64(reg, reg) 1
@@ -1823,16 +1825,16 @@ arg:EXPR_ARG(freg)
 
size = get_vmtype_size(arg_expr->vm_type);
 
+   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+
if (arg_expr->vm_type == J_FLOAT) {
select_insn(s, tree,
-   reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, -size));
+   reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, 0));
} else {
select_insn(s, tree,
-   reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, -size));
+   reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, 0));
}
 
-   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
-
state->reg1 = NULL;
 }
 %else
@@ -1877,12 +1879,12 @@ arg:EXPR_ARG(freg)
} else {
int size = get_vmtype_size(arg_expr->vm_type);
 
+   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+
if (arg_expr->vm_type == J_FLOAT)
-   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, -size));
+   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, 0));
else
-   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, -size));
-
-   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, 0));
}
 
state->reg1 = NULL;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] [APPEND-TO-LAST-SERIES] jit: cleanup handling of expired ranges of intervals

2009-08-31 Thread Tomek Grabiec
Having ->current_range in struct interval is very error prone.
Instead of that, we maintain a list of expired ranges, which are moved
from range_list and are no longer considered as interval's
ranges. After linear scan expired ranges are restored.

This fixes a crash in trace_var_liveness().

Signed-off-by: Tomek Grabiec 
---
 include/jit/vars.h |   18 --
 jit/interval.c |   41 +++--
 jit/linear-scan.c  |   15 +++
 3 files changed, 42 insertions(+), 32 deletions(-)

diff --git a/include/jit/vars.h b/include/jit/vars.h
index 2471b17..ae48752 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -65,16 +65,13 @@ struct live_interval {
struct list_head range_list;
 
/*
-* Points to a range from range_list which should be
-* considered as interval's starting range in operations:
-* intervals_intersect(), interval_intersection_start(),
-* interval_range_at(). It's used to speedup register
-* allocation. Intervals can have a lot of live ranges. Linear
-* scan algorithm goes through intervals in ascending order by
-* interval start. We can take advantage of this and don't
-* browse ranges past current position in some operations.
+* Contains ranges which were moved from range_list to speedup
+* some interval oprations. Intervals can have a lot of live
+* ranges. Linear scan algorithm goes through intervals in
+* ascending order by interval start. We can take advantage of
+* this and don't check ranges before current position.
 */
-   struct live_range *current_range;
+   struct list_head expired_range_list;
 
/* Linked list of child intervals.  */
struct live_interval *next_child, *prev_child;
@@ -172,7 +169,8 @@ unsigned long interval_intersection_start(struct 
live_interval *, struct live_in
 bool interval_covers(struct live_interval *, unsigned long);
 int interval_add_range(struct live_interval *, unsigned long, unsigned long);
 struct live_range *interval_range_at(struct live_interval *, unsigned long);
-void interval_update_current_range(struct live_interval *, unsigned long);
+void interval_expire_ranges_before(struct live_interval *, unsigned long);
+void interval_restore_expired_ranges(struct live_interval *);
 
 static inline unsigned long first_use_pos(struct live_interval *it)
 {
diff --git a/jit/interval.c b/jit/interval.c
index 8eb7d32..c84de36 100644
--- a/jit/interval.c
+++ b/jit/interval.c
@@ -106,6 +106,7 @@ struct live_interval *alloc_interval(struct var_info *var)
INIT_LIST_HEAD(&interval->interval_node);
INIT_LIST_HEAD(&interval->use_positions);
INIT_LIST_HEAD(&interval->range_list);
+   INIT_LIST_HEAD(&interval->expired_range_list);
}
return interval;
 }
@@ -143,8 +144,6 @@ struct live_interval *split_interval_at(struct 
live_interval *interval,
return NULL;
}
 
-   new->current_range = interval_first_range(new);
-
new->fixed_reg = interval->fixed_reg;
if (new->fixed_reg)
new->reg = interval->reg;
@@ -210,25 +209,33 @@ struct live_interval *vreg_start_interval(struct 
compilation_unit *cu, unsigned
return var->interval;
 }
 
-/**
- * Advances @it->current_range to the last range which covers @pos or
- * is before @pos.
- */
-void interval_update_current_range(struct live_interval *it, unsigned long pos)
+void interval_expire_ranges_before(struct live_interval *it, unsigned long pos)
 {
+   struct live_range *range;
+
if (pos < interval_start(it) || pos >= interval_end(it))
return;
 
-   assert (pos >= it->current_range->start);
+   range = interval_first_range(it);
 
-   while (!in_range(it->current_range, pos)) {
+   while (!in_range(range, pos)) {
struct live_range *next;
 
-   next = next_range(&it->range_list, it->current_range);
+   next = next_range(&it->range_list, range);
if (pos < next->start)
break;
 
-   it->current_range = next;
+   list_move(&range->range_list_node, &it->expired_range_list);
+   range = next;
+   }
+}
+
+void interval_restore_expired_ranges(struct live_interval *it)
+{
+   struct live_range *this, *next;
+
+   list_for_each_entry_safe(this, next, &it->expired_range_list, 
range_list_node) {
+   list_move(&this->range_list_node, &it->range_list);
}
 }
 
@@ -239,9 +246,7 @@ struct live_range *interval_range_at(struct live_interval 
*it, unsigned long pos
if (pos < interval_start(it) || pos >= interval_end(it))
return

[PATCH] x86: Introduce workarounds for valgrind to work with jato.

2009-08-31 Thread Tomek Grabiec
Jato can be compiled with workarounds which make valgrind
work with jato.

To do so, define VALGRIND variable for make:
make jato VALGRIND=y

Currently workarounds eliminate class initialization
from signal handler by unconditionally selecting
calls to vm_class_ensure_init().

Signed-off-by: Tomek Grabiec 
---
 Makefile   |5 +
 arch/x86/insn-selector.brg |   44 
 2 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/Makefile b/Makefile
index 55ef9ba..f9c8d9b 100644
--- a/Makefile
+++ b/Makefile
@@ -177,6 +177,11 @@ INSTALL:= install
 
 DEFAULT_CFLAGS += $(ARCH_CFLAGS) -g -rdynamic -std=gnu99 -D_GNU_SOURCE 
-fstack-protector-all -D_FORTIFY_SOURCE=2
 
+ifdef VALGRIND
+DEFAULT_CFLAGS += -DCONFIG_VALGRIND
+MB_DEFINES += -DCONFIG_VALGRIND
+endif
+
 # XXX: Temporary hack -Vegard
 DEFAULT_CFLAGS += -DNOT_IMPLEMENTED='fprintf(stderr, "%s:%d: warning: %s not 
implemented\n", __FILE__, __LINE__, __func__)'
 
diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 8522667..9c8dd70 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -1051,6 +1051,14 @@ reg: EXPR_CLASS_FIELD 1
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -1064,6 +1072,7 @@ reg:  EXPR_CLASS_FIELD 1
/* XXX: Check return value */
add_getstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 
@@ -1097,6 +1106,18 @@ freg:EXPR_FLOAT_CLASS_FIELD 1
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned 
long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   if (expr->vm_type == J_FLOAT)
+   mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+   else
+   mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -1118,6 +1139,7 @@ freg: EXPR_FLOAT_CLASS_FIELD 1
/* XXX: Check return value */
add_getstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 }
@@ -1995,6 +2017,14 @@ stmt:STMT_STORE(EXPR_CLASS_FIELD, reg)
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP,
+   src, (unsigned long) vmc->static_values + 
vmf->offset);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -2008,6 +2038,7 @@ stmt: STMT_STORE(EXPR_CLASS_FIELD, reg)
/* XXX: Check return value */
add_putstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 
@@ -2044,6 +2075,18 @@ stmt:STMT_STORE(EXPR_FLOAT_CLASS_FIELD, freg)
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   if (store_dest->vm_type == J_FLOAT)
+   mov_insn = reg_memdisp_insn(I

[PATCH 13/14] jit: optimize register use and block position calculation in regalloc

2009-08-30 Thread Tomek Grabiec
With multiple ranges per interval it is expensive to calculate
interval intersection. We do not have to check intersection between
current and intervals which have incompatible register type. That's
because this information will not be used after all. pick_register()
will not consider those registers. For example, we do not have to
check at which position XMM0 register is available if we are
allocating a general purpose register.

Signed-off-by: Tomek Grabiec 
---
 jit/linear-scan.c |9 +
 1 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 187eb49..c824104 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -210,6 +210,9 @@ static void allocate_blocked_reg(struct live_interval 
*current,
if (it->fixed_reg)
continue;
 
+   if (!reg_supports_type(it->reg, current->var_info->vm_type))
+   continue;
+
if (intervals_intersect(it, current)) {
pos = next_use_pos(it, interval_start(current));
set_use_pos(use_pos, it->reg, pos);
@@ -227,6 +230,9 @@ static void allocate_blocked_reg(struct live_interval 
*current,
if (!it->fixed_reg)
continue;
 
+   if (!reg_supports_type(it->reg, current->var_info->vm_type))
+   continue;
+
if (intervals_intersect(it, current)) {
unsigned long pos;
 
@@ -277,6 +283,9 @@ static void try_to_allocate_free_reg(struct live_interval 
*current,
}
 
list_for_each_entry(it, inactive, interval_node) {
+   if (!reg_supports_type(it->reg, current->var_info->vm_type))
+   continue;
+
if (intervals_intersect(it, current)) {
unsigned long pos;
 
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 12/14] jit: do not put to inactive list fixed reg intervals for ESP and EBP

2009-08-30 Thread Tomek Grabiec
Those register are not considered for allocation and their numbers are
> NR_REGISTERS. Letting those fixed intervals into register allocator
can cause memory corruption becuase use position arrays are of size
NR_REGISTERS.

Signed-off-by: Tomek Grabiec 
---
 jit/linear-scan.c |9 ++---
 1 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 018daaa..187eb49 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -75,6 +75,7 @@ static void set_use_pos(unsigned long *use_pos, enum 
machine_reg reg,
/*
 * This function does the same as set_free_pos so we call this directly
 */
+   assert(reg < NR_REGISTERS);
set_free_pos(use_pos, reg, pos);
 }
 
@@ -84,6 +85,7 @@ static void set_block_pos(unsigned long *block_pos, unsigned 
long *use_pos,
/*
 * This function does the same as set_free_pos so we call this directly
 */
+   assert(reg < NR_REGISTERS);
set_free_pos(block_pos, reg, pos);
set_free_pos(use_pos, reg, pos);
 }
@@ -345,9 +347,10 @@ int allocate_registers(struct compilation_unit *cu)
 
var->interval->current_range = 
interval_first_range(var->interval);
 
-   if (var->interval->fixed_reg)
-   list_add(&var->interval->interval_node, &inactive);
-   else
+   if (var->interval->fixed_reg) {
+   if (var->interval->reg < NR_REGISTERS)
+   list_add(&var->interval->interval_node, 
&inactive);
+   } else
pqueue_insert(unhandled, var->interval);
}
 
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 10/14] jit: print variable types in regalloc trace

2009-08-30 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/trace-jit.c |1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/jit/trace-jit.c b/jit/trace-jit.c
index 58ecaa4..915571d 100644
--- a/jit/trace-jit.c
+++ b/jit/trace-jit.c
@@ -360,6 +360,7 @@ void trace_regalloc(struct compilation_unit *cu)
 interval_end(interval));
 
trace_printf("\t%s", reg_name(interval->reg));
+   trace_printf("\t%-11s", get_vm_type_name(var->vm_type));
trace_printf("\t%s", interval->fixed_reg ? "fixed\t" : 
"non-fixed");
if (interval->need_spill) {
unsigned long ndx = -1;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 11/14] jit: fix spilling of 64-bit registers.

2009-08-30 Thread Tomek Grabiec
This cleans handling of 64-bit stack slots up and fixes the buggy spilling code.
We always allocated 32-bit spill slot regardless of register type which caused
memory corruption.

Signed-off-by: Tomek Grabiec 
---
 arch/mmix/include/arch/instruction.h |   15 +-
 arch/mmix/instruction.c  |   12 +
 arch/x86/emit-code.c |4 +-
 arch/x86/include/arch/instruction.h  |   19 +--
 arch/x86/include/arch/stack-frame.h  |1 +
 arch/x86/insn-selector.brg   |   57 +--
 arch/x86/instruction.c   |   85 ++
 arch/x86/stack-frame.c   |   15 --
 include/jit/stack-slot.h |4 ++
 include/vm/types.h   |7 +++
 jit/spill-reload.c   |   42 +++--
 jit/stack-slot.c |   10 
 12 files changed, 168 insertions(+), 103 deletions(-)

diff --git a/arch/mmix/include/arch/instruction.h 
b/arch/mmix/include/arch/instruction.h
index 2fe1686..6846eb3 100644
--- a/arch/mmix/include/arch/instruction.h
+++ b/arch/mmix/include/arch/instruction.h
@@ -78,6 +78,9 @@ struct insn *ld_insn(enum insn_type, struct stack_slot *, 
struct var_info *);
  * instructions.
  */
 
+int insert_copy_slot_32_insns(struct stack_slot *, struct stack_slot *, struct 
list_head *, unsigned long);
+int insert_copy_slot_64_insns(struct stack_slot *, struct stack_slot *, struct 
list_head *, unsigned long);
+
 static inline struct insn *
 spill_insn(struct var_info *var, struct stack_slot *slot)
 {
@@ -91,18 +94,6 @@ reload_insn(struct stack_slot *slot, struct var_info *var)
 }
 
 static inline struct insn *
-push_slot_insn(struct stack_slot *slot)
-{
-   return NULL;
-}
-
-static inline struct insn *
-pop_slot_insn(struct stack_slot *slot)
-{
-   return NULL;
-}
-
-static inline struct insn *
 exception_spill_insn(struct stack_slot *slot)
 {
return NULL;
diff --git a/arch/mmix/instruction.c b/arch/mmix/instruction.c
index f79a2ca..baa4262 100644
--- a/arch/mmix/instruction.c
+++ b/arch/mmix/instruction.c
@@ -122,3 +122,15 @@ struct insn *ld_insn(enum insn_type insn_type, struct 
stack_slot *slot, struct v
}
return insn;
 }
+
+int insert_copy_slot_32_insns(struct stack_slot *from, struct stack_slot *to,
+ struct list_head *add_before, unsigned long 
bc_offset)
+{
+   return 0;
+}
+
+int insert_copy_slot_64_insns(struct stack_slot *from, struct stack_slot *to,
+ struct list_head *add_before, unsigned long 
bc_offset)
+{
+   return 0;
+}
diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index a0b85e0..2253202 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -734,7 +734,7 @@ emit_mov_64_memlocal_xmm(struct buffer *buf, struct operand 
*src, struct operand
unsigned long disp;
 
dest_reg = mach_reg(&dest->reg);
-   disp = slot_offset(src->slot);
+   disp = slot_offset_64(src->slot);
 
emit(buf, 0xf2);
emit(buf, 0x0f);
@@ -905,7 +905,7 @@ static void emit_mov_64_xmm_memlocal(struct buffer *buf, 
struct operand *src,
unsigned long disp;
int mod;
 
-   disp = slot_offset(dest->slot);
+   disp = slot_offset_64(dest->slot);
 
if (is_imm_8(disp))
mod = 0x01;
diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index c33bafa..063e857 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -240,6 +240,9 @@ struct insn *membase_insn(enum insn_type, struct var_info 
*, long);
  * instructions.
  */
 
+int insert_copy_slot_32_insns(struct stack_slot *, struct stack_slot *, struct 
list_head *, unsigned long);
+int insert_copy_slot_64_insns(struct stack_slot *, struct stack_slot *, struct 
list_head *, unsigned long);
+
 static inline struct insn *
 spill_insn(struct var_info *var, struct stack_slot *slot)
 {
@@ -282,22 +285,6 @@ reload_insn(struct stack_slot *slot, struct var_info *var)
return memlocal_reg_insn(insn_type, slot, var);
 }
 
-static inline struct insn *
-push_slot_insn(struct stack_slot *from)
-{
-   assert(from != NULL);
-
-   return memlocal_insn(INSN_PUSH_MEMLOCAL, from);
-}
-
-static inline struct insn *
-pop_slot_insn(struct stack_slot *to)
-{
-   assert(to != NULL);
-
-   return memlocal_insn(INSN_POP_MEMLOCAL, to);
-}
-
 static inline struct insn *jump_insn(struct basic_block *bb)
 {
return branch_insn(INSN_JMP_BRANCH, bb);
diff --git a/arch/x86/include/arch/stack-frame.h 
b/arch/x86/include/arch/stack-frame.h
index b0b42a2..bf69b27 100644
--- a/arch/x86/include/arch/stack-frame.h
+++ b/arch/x86/include/arch/stack-frame.h
@@ -43,6 +43,7 @@ struct jit_stack_frame {
 
 unsigned long frame_local_offset(struct vm_method *, struct expression *);
 unsigned long slot_offset(struct stack_slot *slot);
+unsigned long slot_offset_64(stru

[PATCH 14/14] x86: remove unconditional saving and restoring of XMM registers

2009-08-30 Thread Tomek Grabiec
We do not longer need to do this because this bug has been solved:
http://jato.lighthouseapp.com/projects/29055/tickets/5-sse-registers-are-saved-and-registered-unconditionally

Signed-off-by: Tomek Grabiec 
---
 arch/x86/emit-code.c|   83 ---
 arch/x86/include/arch/stack-frame.h |1 -
 2 files changed, 0 insertions(+), 84 deletions(-)

diff --git a/arch/x86/emit-code.c b/arch/x86/emit-code.c
index 2253202..92044fd 100644
--- a/arch/x86/emit-code.c
+++ b/arch/x86/emit-code.c
@@ -64,13 +64,6 @@ static void emit_indirect_jump_reg(struct buffer *buf, enum 
machine_reg reg);
 static void emit_exception_test(struct buffer *buf, enum machine_reg reg);
 static void emit_restore_regs(struct buffer *buf);
 
-static void __emit_mov_xmm_membase(struct buffer *buf, enum machine_reg src,
-  enum machine_reg base, unsigned long offs);
-static void __emit_mov_membase_xmm(struct buffer *buf, enum machine_reg base, 
unsigned long offs, enum machine_reg dst);
-static void __emit_mov_64_xmm_membase(struct buffer *buf, enum machine_reg src,
-  enum machine_reg base, unsigned long offs);
-static void __emit_mov_64_membase_xmm(struct buffer *buf, enum machine_reg 
base, unsigned long offs, enum machine_reg dst);
-
 /
  * Common code emitters *
  /
@@ -1009,27 +1002,6 @@ void emit_prolog(struct buffer *buf, unsigned long 
nr_locals)
__emit_push_reg(buf, MACH_REG_ESI);
__emit_push_reg(buf, MACH_REG_EBX);
 
-   __emit_sub_imm_reg(buf, 8 * 8, MACH_REG_ESP);
-   if (cpu_has(X86_FEATURE_SSE2)) {
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM0, MACH_REG_ESP, 0);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM1, MACH_REG_ESP, 8);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM2, MACH_REG_ESP, 16);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM3, MACH_REG_ESP, 24);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM4, MACH_REG_ESP, 32);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM5, MACH_REG_ESP, 40);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM6, MACH_REG_ESP, 48);
-   __emit_mov_64_xmm_membase(buf, MACH_REG_XMM7, MACH_REG_ESP, 56);
-   } else {
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM0, MACH_REG_ESP, 0);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM1, MACH_REG_ESP, 8);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM2, MACH_REG_ESP, 16);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM3, MACH_REG_ESP, 24);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM4, MACH_REG_ESP, 32);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM5, MACH_REG_ESP, 40);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM6, MACH_REG_ESP, 48);
-   __emit_mov_xmm_membase(buf, MACH_REG_XMM7, MACH_REG_ESP, 56);
-   }
-
__emit_push_reg(buf, MACH_REG_EBP);
__emit_mov_reg_reg(buf, MACH_REG_ESP, MACH_REG_EBP);
 
@@ -1076,27 +1048,6 @@ static void emit_push_imm(struct buffer *buf, struct 
operand *operand)
 
 static void emit_restore_regs(struct buffer *buf)
 {
-   if (cpu_has(X86_FEATURE_SSE2)) {
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 0, MACH_REG_XMM0);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 8, MACH_REG_XMM1);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 16, MACH_REG_XMM2);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 24, MACH_REG_XMM3);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 32, MACH_REG_XMM4);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 40, MACH_REG_XMM5);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 48, MACH_REG_XMM6);
-   __emit_mov_64_membase_xmm(buf, MACH_REG_ESP, 56, MACH_REG_XMM7);
-   } else {
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 0, MACH_REG_XMM0);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 8, MACH_REG_XMM1);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 16, MACH_REG_XMM2);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 24, MACH_REG_XMM3);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 32, MACH_REG_XMM4);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 40, MACH_REG_XMM5);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 48, MACH_REG_XMM6);
-   __emit_mov_membase_xmm(buf, MACH_REG_ESP, 56, MACH_REG_XMM7);
-   }
-   __emit_add_imm_reg(buf, 8 * 8, MACH_REG_ESP);
-
__emit_pop_reg(buf, MACH_REG_EBX);
__emit_pop_reg(buf, MACH_REG_ESI);
__emit_pop_reg(buf, MACH_REG_EDI);
@@ -1631,40 +1582,6 @@ static void emit_mov_memindex_xmm(struct buffer *buf, 
struct operand *src,
emit(buf, encode_sib(src->shift, encode_reg(&src->index_reg), 
encode_reg(&src->base_reg)));
 }
 
-static void __emi

[PATCH 06/14] jit: implement precise live range calculation

2009-08-30 Thread Tomek Grabiec
For each variable its live range is calculated precisely as described
in Wimmer's master thesis "Linear Scan Register Allocator" in 5.6.3
"Build Intervals".

This patch reduces register allocator stress by generating shorter,
more precise live ranges and therefore reduces number of interval spills.

This patch also introduces distinction between even and odd use
positions. Even use positions represent input to instruction and odd
positions represent output. This allows for better register
utilization. Example:

mov r1, r2
add r2, r3

after allocation:

mov ebx, ebx  ; this can be optimized out in the future
add ebx, ebx

Signed-off-by: Tomek Grabiec 
---
 arch/x86/include/arch/instruction.h |5 --
 arch/x86/instruction.c  |5 ++
 arch/x86/use-def.c  |   28 +++
 include/jit/instruction.h   |6 +++
 include/jit/use-position.h  |6 +++
 include/jit/vars.h  |   15 ++
 jit/interval.c  |   38 +--
 jit/linear-scan.c   |   21 +++--
 jit/liveness.c  |   44 +++---
 jit/spill-reload.c  |   85 --
 test/jit/liveness-test.c|   30 ++--
 test/jit/spill-reload-test.c|   14 +++---
 12 files changed, 209 insertions(+), 88 deletions(-)

diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index 5e04d92..be321de 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -214,11 +214,6 @@ struct insn {
 
 void insn_sanity_check(void);
 
-static inline unsigned long lir_position(struct use_position *reg)
-{
-   return reg->insn->lir_pos;
-}
-
 struct insn *insn(enum insn_type);
 struct insn *memlocal_reg_insn(enum insn_type, struct stack_slot *, struct 
var_info *);
 struct insn *membase_reg_insn(enum insn_type, struct var_info *, long, struct 
var_info *);
diff --git a/arch/x86/instruction.c b/arch/x86/instruction.c
index 8213e8b..0b1e145 100644
--- a/arch/x86/instruction.c
+++ b/arch/x86/instruction.c
@@ -107,6 +107,7 @@ static void init_membase_operand(struct insn *insn, 
unsigned long idx,
operand->disp = disp;
 
init_register(&operand->base_reg, insn, base_reg->interval);
+   operand->base_reg.kind = USE_KIND_INPUT;
 }
 
 static void init_memdisp_operand(struct insn *insn, unsigned long idx,
@@ -131,6 +132,9 @@ static void init_memindex_operand(struct insn *insn, 
unsigned long idx,
 
init_register(&operand->base_reg, insn, base_reg->interval);
init_register(&operand->index_reg, insn, index_reg->interval);
+
+   operand->base_reg.kind  = USE_KIND_INPUT;
+   operand->index_reg.kind = USE_KIND_INPUT;
 }
 
 static void init_memlocal_operand(struct insn *insn, unsigned long idx,
@@ -152,6 +156,7 @@ static void init_reg_operand(struct insn *insn, unsigned 
long idx,
operand->type = OPERAND_REG;
 
init_register(&operand->reg, insn, reg->interval);
+   operand->reg.kind = insn_operand_use_kind(insn, idx);
 }
 
 static void init_rel_operand(struct insn *insn, unsigned long idx,
diff --git a/arch/x86/use-def.c b/arch/x86/use-def.c
index 59e1f2a..0730a07 100644
--- a/arch/x86/use-def.c
+++ b/arch/x86/use-def.c
@@ -248,3 +248,31 @@ int insn_uses(struct insn *insn, struct var_info **uses)
 
return nr;
 }
+
+int insn_operand_use_kind(struct insn *insn, int idx)
+{
+   struct insn_info *info;
+   int use_mask;
+   int def_mask;
+   int kind_mask;
+
+   info = get_info(insn);
+
+   if (idx == 0) {
+   use_mask = USE_SRC;
+   def_mask = DEF_SRC;
+   } else {
+   assert(idx == 1);
+   use_mask = USE_DST;
+   def_mask = DEF_DST;
+   }
+
+   kind_mask = 0;
+   if (info->flags & use_mask)
+   kind_mask |= USE_KIND_INPUT;
+
+   if (info->flags & def_mask)
+   kind_mask |= USE_KIND_OUTPUT;
+
+   return kind_mask;
+}
diff --git a/include/jit/instruction.h b/include/jit/instruction.h
index cc303fe..d360c82 100644
--- a/include/jit/instruction.h
+++ b/include/jit/instruction.h
@@ -9,11 +9,17 @@ static inline struct insn *next_insn(struct insn *insn)
return list_entry(insn->insn_list_node.next, struct insn, 
insn_list_node);
 }
 
+static inline struct insn *prev_insn(struct insn *insn)
+{
+   return list_entry(insn->insn_list_node.prev, struct insn, 
insn_list_node);
+}
+
 struct insn *alloc_insn(enum insn_type);
 void free_insn(struct insn *);
 
 int insn_defs(struct compilation_unit *, struct insn *, struct var_info **);
 int insn_uses(struct insn *, struct var_info **);
+int insn_operand_use_kind(struct insn *, int);
 
 #define for_each_insn(insn, insn_list) list_for_each_entry(insn, insn_list, 
insn_list_nod

[PATCH 09/14] jit: ensure that spill variable has the same vm_type as original variable.

2009-08-30 Thread Tomek Grabiec
This is a bug fix. The bug caused that floating point variables were
spilled as if they were a general purpose registers which led to
corruption of general purpose registers.

Signed-off-by: Tomek Grabiec 
---
 jit/compilation-unit.c |4 ++--
 jit/interval.c |1 +
 jit/spill-reload.c |1 +
 3 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index 44b5c46..0dd4415 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -55,10 +55,10 @@ do_get_var(struct compilation_unit *cu, enum vm_type 
vm_type)
 
ret->vreg = cu->nr_vregs++;
ret->next = cu->var_infos;
-   ret->interval = alloc_interval(ret);
-
ret->vm_type = vm_type;
 
+   ret->interval = alloc_interval(ret);
+
cu->var_infos = ret;
   out:
return ret;
diff --git a/jit/interval.c b/jit/interval.c
index 23703a1..8eb7d32 100644
--- a/jit/interval.c
+++ b/jit/interval.c
@@ -102,6 +102,7 @@ struct live_interval *alloc_interval(struct var_info *var)
interval->reg = MACH_REG_UNASSIGNED;
interval->fixed_reg = false;
interval->spill_reload_reg.interval = interval;
+   interval->spill_reload_reg.vm_type = var->vm_type;
INIT_LIST_HEAD(&interval->interval_node);
INIT_LIST_HEAD(&interval->use_positions);
INIT_LIST_HEAD(&interval->range_list);
diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 622966b..70a5aa9 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -141,6 +141,7 @@ spill_interval(struct live_interval *interval,
if (!slot)
return NULL;
 
+   assert(interval->spill_reload_reg.vm_type == 
interval->var_info->vm_type);
spill = spill_insn(&interval->spill_reload_reg, slot);
if (!spill)
return NULL;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 08/14] jit: force spill of intervals containing caller saved registers before calls.

2009-08-30 Thread Tomek Grabiec
This fixes the follwing bug:
http://jato.lighthouseapp.com/projects/29055/tickets/7-ebcdx-are-unavailable-for-allocation-after-some-call-instructions

Signed-off-by: Tomek Grabiec 
---
 arch/mmix/include/arch/instruction.h |5 +
 arch/mmix/include/arch/registers.h   |3 +++
 arch/mmix/register.c |2 ++
 arch/x86/include/arch/instruction.h  |   11 +++
 arch/x86/include/arch/registers_32.h |3 +++
 arch/x86/registers_32.c  |   16 
 arch/x86/use-def.c   |   22 ++
 jit/liveness.c   |   15 ---
 8 files changed, 54 insertions(+), 23 deletions(-)

diff --git a/arch/mmix/include/arch/instruction.h 
b/arch/mmix/include/arch/instruction.h
index 27dc801..2fe1686 100644
--- a/arch/mmix/include/arch/instruction.h
+++ b/arch/mmix/include/arch/instruction.h
@@ -118,4 +118,9 @@ static inline const char *reg_name(enum machine_reg reg)
return "";
 }
 
+static inline bool insn_is_call(struct insn *insn)
+{
+   return false;
+}
+
 #endif /* __ARCH_INSTRUCTION_H */
diff --git a/arch/mmix/include/arch/registers.h 
b/arch/mmix/include/arch/registers.h
index 8faa73f..05d71bd 100644
--- a/arch/mmix/include/arch/registers.h
+++ b/arch/mmix/include/arch/registers.h
@@ -20,6 +20,9 @@ enum machine_reg {
MACH_REG_UNASSIGNED = INT_MAX,
 };
 
+#define NR_CALLER_SAVE_REGS 0
+extern enum machine_reg caller_save_regs[NR_CALLER_SAVE_REGS];
+
 static inline bool reg_supports_type(enum machine_reg reg, enum vm_type type)
 {
return true;
diff --git a/arch/mmix/register.c b/arch/mmix/register.c
index 1cde43d..8aa587b 100644
--- a/arch/mmix/register.c
+++ b/arch/mmix/register.c
@@ -1,2 +1,4 @@
 #include "arch/registers.h"
 #include "jit/vars.h"
+
+enum machine_reg caller_save_regs[NR_CALLER_SAVE_REGS] = {};
diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index be321de..c33bafa 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -323,4 +323,15 @@ static inline bool insn_is_branch(struct insn *insn)
}
 }
 
+static inline bool insn_is_call(struct insn *insn)
+{
+   switch (insn->type) {
+   case INSN_CALL_REG:
+   case INSN_CALL_REL:
+   return true;
+   default:
+   return false;
+   }
+}
+
 #endif
diff --git a/arch/x86/include/arch/registers_32.h 
b/arch/x86/include/arch/registers_32.h
index ac6e308..30fa29d 100644
--- a/arch/x86/include/arch/registers_32.h
+++ b/arch/x86/include/arch/registers_32.h
@@ -48,6 +48,9 @@ enum machine_reg {
 
 #define GPR_VM_TYPEJ_INT
 
+#define NR_CALLER_SAVE_REGS 11
+extern enum machine_reg caller_save_regs[NR_CALLER_SAVE_REGS];
+
 const char *reg_name(enum machine_reg reg);
 
 bool reg_supports_type(enum machine_reg reg, enum vm_type type);
diff --git a/arch/x86/registers_32.c b/arch/x86/registers_32.c
index 5d88f1c..ce3c476 100644
--- a/arch/x86/registers_32.c
+++ b/arch/x86/registers_32.c
@@ -26,10 +26,26 @@
 
 #include "arch/registers.h"
 #include "jit/vars.h"
+#include "vm/system.h"
 
 #include 
 #include 
 
+enum machine_reg caller_save_regs[NR_CALLER_SAVE_REGS] = {
+   MACH_REG_EAX,
+   MACH_REG_ECX,
+   MACH_REG_EDX,
+
+   MACH_REG_XMM0,
+   MACH_REG_XMM1,
+   MACH_REG_XMM2,
+   MACH_REG_XMM3,
+   MACH_REG_XMM4,
+   MACH_REG_XMM5,
+   MACH_REG_XMM6,
+   MACH_REG_XMM7
+};
+
 static const char *register_names[] = {
[MACH_REG_EAX] = "EAX",
[MACH_REG_ECX] = "ECX",
diff --git a/arch/x86/use-def.c b/arch/x86/use-def.c
index 0730a07..1653195 100644
--- a/arch/x86/use-def.c
+++ b/arch/x86/use-def.c
@@ -22,7 +22,6 @@ enum {
USE_NONE= 512,
USE_SRC = 1024,
USE_FP  = 2048, /* frame pointer */
-   DEF_CALLER_SAVED= 4096,
 
 #ifdef CONFIG_X86_32
DEF_EAX = DEF_xAX,
@@ -50,8 +49,8 @@ static struct insn_info insn_infos[] = {
DECLARE_INFO(INSN_ADD_REG_REG, USE_SRC | USE_DST | DEF_DST),
DECLARE_INFO(INSN_AND_MEMBASE_REG, USE_SRC | USE_DST | DEF_DST),
DECLARE_INFO(INSN_AND_REG_REG, USE_SRC | USE_DST | DEF_DST),
-   DECLARE_INFO(INSN_CALL_REG, USE_SRC | DEF_CALLER_SAVED),
-   DECLARE_INFO(INSN_CALL_REL, USE_NONE | DEF_CALLER_SAVED),
+   DECLARE_INFO(INSN_CALL_REG, USE_SRC | DEF_NONE),
+   DECLARE_INFO(INSN_CALL_REL, USE_NONE | DEF_NONE),
DECLARE_INFO(INSN_CLTD_REG_REG, USE_SRC | DEF_SRC | DEF_DST),
DECLARE_INFO(INSN_CMP_IMM_REG, USE_DST),
DECLARE_INFO(INSN_CMP_MEMBASE_REG, USE_SRC | USE_DST),
@@ -188,25 +187,8 @@ static struct mach_reg_def checkregs[] = {
{ MACH_REG_xAX, DEF_xAX },
{ MACH_REG_xCX, DEF_xCX },
{ MACH_REG_xDX, DEF_xDX },
-
-#ifdef CONFIG_X86_32
-   { MACH_REG_EAX, DEF_CALLER_SAVED },
-

[PATCH 04/14] jit: introduce multiple live ranges per interval.

2009-08-30 Thread Tomek Grabiec
This is needed for precise modeling of live ranges.

Signed-off-by: Tomek Grabiec 
---
 include/jit/vars.h  |   83 +---
 jit/interval.c  |  228 +-
 jit/linear-scan.c   |   51 ++
 jit/liveness.c  |   24 +++--
 jit/spill-reload.c  |   12 +-
 jit/trace-jit.c |   26 +++--
 test/jit/linear-scan-test.c |   18 +---
 test/jit/live-range-test.c  |   50 ++
 test/jit/liveness-test.c|8 +-
 9 files changed, 421 insertions(+), 79 deletions(-)

diff --git a/include/jit/vars.h b/include/jit/vars.h
index 6afb16b..f00c5f9 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -10,18 +10,9 @@
 
 struct live_range {
unsigned long start, end;   /* end is exclusive */
+   struct list_head range_list_node;
 };
 
-static inline unsigned long range_last_insn_pos(struct live_range *range)
-{
-   return (range->end - 1) & ~1;
-}
-
-static inline unsigned long range_first_insn_pos(struct live_range *range)
-{
-   return range->start & ~1;
-}
-
 static inline bool in_range(struct live_range *range, unsigned long offset)
 {
return (offset >= range->start) && (offset < range->end);
@@ -69,8 +60,21 @@ struct live_interval {
/* Parent variable of this interval.  */
struct var_info *var_info;
 
-   /* Live range of this interval.  */
-   struct live_range range;
+   /* Live ranges of this interval. List of not overlaping and
+  not adjacent ranges sorted in ascending order. */
+   struct list_head range_list;
+
+   /*
+* Points to a range from range_list which should be
+* considered as interval's starting range in operations:
+* intervals_intersect(), interval_intersection_start(),
+* interval_range_at(). It's used to speedup register
+* allocation. Intervals can have a lot of live ranges. Linear
+* scan algorithm goes through intervals in ascending order by
+* interval start. We can take advantage of this and don't
+* browse ranges past current position in some operations.
+*/
+   struct live_range *current_range;
 
/* Linked list of child intervals.  */
struct live_interval *next_child, *prev_child;
@@ -118,11 +122,66 @@ mark_need_reload(struct live_interval *it, struct 
live_interval *parent)
it->spill_parent = parent;
 }
 
+static inline struct live_range *node_to_range(struct list_head *node)
+{
+   return list_entry(node, struct live_range, range_list_node);
+}
+
+static inline struct live_range *
+next_range(struct list_head *list, struct live_range *range)
+{
+   if (range->range_list_node.next == list)
+   return NULL;
+
+   return list_entry(range->range_list_node.next, struct live_range,
+ range_list_node);
+}
+
+static inline unsigned long interval_start(struct live_interval *it)
+{
+   assert(!list_is_empty(&it->range_list));
+   return node_to_range(it->range_list.next)->start;
+}
+
+static inline unsigned long interval_end(struct live_interval *it)
+{
+   assert(!list_is_empty(&it->range_list));
+   return node_to_range(it->range_list.prev)->end;
+}
+
+static inline unsigned long interval_last_insn_pos(struct live_interval *it)
+{
+   return (interval_end(it) - 1) & ~1ul;
+}
+
+static inline unsigned long interval_first_insn_pos(struct live_interval *it)
+{
+   return interval_start(it) & ~1ul;
+}
+
+static inline bool interval_is_empty(struct live_interval *it)
+{
+   return list_is_empty(&it->range_list);
+}
+
+static inline struct live_range *interval_first_range(struct live_interval *it)
+{
+   assert(!interval_is_empty(it));
+   return list_first_entry(&it->range_list, struct live_range,
+   range_list_node);
+}
+
 struct live_interval *alloc_interval(struct var_info *);
 void free_interval(struct live_interval *);
 struct live_interval *split_interval_at(struct live_interval *, unsigned long 
pos);
 unsigned long next_use_pos(struct live_interval *, unsigned long);
 struct live_interval *vreg_start_interval(struct compilation_unit *, unsigned 
long);
 struct live_interval *interval_child_at(struct live_interval *, unsigned long);
+bool intervals_intersect(struct live_interval *, struct live_interval *);
+unsigned long interval_intersection_start(struct live_interval *, struct 
live_interval *);
+bool interval_covers(struct live_interval *, unsigned long);
+int interval_add_range(struct live_interval *, unsigned long, unsigned long);
+struct live_range *interval_range_at(struct live_interval *, unsigned long);
+void interval_update_current_range(struct live_interval *, unsigned long);
 
 #endif /* __JIT_VARS_H */
diff --git a/jit/interval.c b/jit/interval.c
index 9ad9d97..9e22c0c 100644
--- 

[PATCH 05/14] jit: move arch independent stuff from arch/instruction.h to jit/instruction.h

2009-08-30 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/mmix/include/arch/instruction.h |8 
 arch/x86/include/arch/instruction.h  |8 
 arch/x86/instruction.c   |3 +--
 arch/x86/use-def.c   |2 +-
 include/jit/instruction.h|   10 ++
 jit/basic-block.c|3 +--
 jit/bc-offset-mapping.c  |3 +--
 jit/compilation-unit.c   |2 +-
 jit/emit.c   |5 ++---
 jit/liveness.c   |2 +-
 jit/trace-jit.c  |1 +
 test/jit/compilation-unit-test.c |2 +-
 12 files changed, 20 insertions(+), 29 deletions(-)

diff --git a/arch/mmix/include/arch/instruction.h 
b/arch/mmix/include/arch/instruction.h
index d12bf83..27dc801 100644
--- a/arch/mmix/include/arch/instruction.h
+++ b/arch/mmix/include/arch/instruction.h
@@ -113,17 +113,9 @@ static inline bool insn_is_branch(struct insn *insn)
return insn->type == INSN_JMP;
 }
 
-struct insn *alloc_insn(enum insn_type);
-void free_insn(struct insn *);
-
-int insn_defs(struct compilation_unit *, struct insn *, struct var_info **);
-int insn_uses(struct insn *, struct var_info **);
-
 static inline const char *reg_name(enum machine_reg reg)
 {
return "";
 }
 
-#define for_each_insn(insn, insn_list) list_for_each_entry(insn, insn_list, 
insn_list_node)
-
 #endif /* __ARCH_INSTRUCTION_H */
diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index 962dc0a..5e04d92 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -328,12 +328,4 @@ static inline bool insn_is_branch(struct insn *insn)
}
 }
 
-struct insn *alloc_insn(enum insn_type);
-void free_insn(struct insn *);
-
-int insn_defs(struct compilation_unit *, struct insn *, struct var_info **);
-int insn_uses(struct insn *, struct var_info **);
-
-#define for_each_insn(insn, insn_list) list_for_each_entry(insn, insn_list, 
insn_list_node)
-
 #endif
diff --git a/arch/x86/instruction.c b/arch/x86/instruction.c
index c8e1044..8213e8b 100644
--- a/arch/x86/instruction.c
+++ b/arch/x86/instruction.c
@@ -25,8 +25,7 @@
  */
 
 #include "jit/bc-offset-mapping.h"
-
-#include "arch/instruction.h"
+#include "jit/instruction.h"
 
 #include 
 #include 
diff --git a/arch/x86/use-def.c b/arch/x86/use-def.c
index 9f76d13..59e1f2a 100644
--- a/arch/x86/use-def.c
+++ b/arch/x86/use-def.c
@@ -6,7 +6,7 @@
  */
 
 #include "jit/compilation-unit.h"
-#include "arch/instruction.h"
+#include "jit/instruction.h"
 #include "jit/vars.h"
 
 enum {
diff --git a/include/jit/instruction.h b/include/jit/instruction.h
index 376e278..cc303fe 100644
--- a/include/jit/instruction.h
+++ b/include/jit/instruction.h
@@ -9,4 +9,14 @@ static inline struct insn *next_insn(struct insn *insn)
return list_entry(insn->insn_list_node.next, struct insn, 
insn_list_node);
 }
 
+struct insn *alloc_insn(enum insn_type);
+void free_insn(struct insn *);
+
+int insn_defs(struct compilation_unit *, struct insn *, struct var_info **);
+int insn_uses(struct insn *, struct var_info **);
+
+#define for_each_insn(insn, insn_list) list_for_each_entry(insn, insn_list, 
insn_list_node)
+
+#define for_each_insn_reverse(insn, insn_list) 
list_for_each_entry_reverse(insn, insn_list, insn_list_node)
+
 #endif /* JATO_JIT_INSTRUCTION_H */
diff --git a/jit/basic-block.c b/jit/basic-block.c
index bcb2866..e19ee7e 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -9,10 +9,9 @@
 
 #include "jit/compilation-unit.h"
 #include "jit/basic-block.h"
+#include "jit/instruction.h"
 #include "jit/statement.h"
 
-#include "arch/instruction.h"
-
 #include "vm/die.h"
 
 #include 
diff --git a/jit/bc-offset-mapping.c b/jit/bc-offset-mapping.c
index ac92221..7db42f9 100644
--- a/jit/bc-offset-mapping.c
+++ b/jit/bc-offset-mapping.c
@@ -31,8 +31,7 @@
 #include "jit/bc-offset-mapping.h"
 #include "jit/statement.h"
 #include "jit/expression.h"
-
-#include "arch/instruction.h"
+#include "jit/instruction.h"
 
 #include "lib/buffer.h"
 
diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index 956d8b8..44b5c46 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -23,11 +23,11 @@
  *
  * Please refer to the file LICENSE for details.
  */
-#include "arch/instruction.h"
 #include "arch/registers.h"
 
 #include "jit/basic-block.h"
 #include "jit/compilation-unit.h"
+#include "jit/instruction.h"
 #include "jit/stack-slot.h"
 #include "jit/statement.h"
 #include "jit/vars.h"
diff --git a/jit/emit.c b/jit/emit.c
index a65f35f..ee5c4d9 100644
--- a/jit/emit.c
+++ b/jit/emit.c
@@ -16,14 +16,13 @@
 
 #include "jit/compi

[PATCH 07/14] x86: ensure fixed-reg variables are not returned as rule results

2009-08-30 Thread Tomek Grabiec
Fixed-reg variables should never be used outside a rule. When they are
returned as rule results, they can reach another rule as input
register. If that rule uses fixed register for the same machine
register, the conflict occurs and is not detected. This causes that
result is incorrect.

Lets consider a rule:
reg:OP_DIV(reg, reg)
{
...
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, state->left->reg1, 
eax));
select_insn(s, tree, reg_reg_insn(INSN_CLTD_REG_REG, eax, edx));
select_insn(s, tree, reg_reg_insn(INSN_DIV_REG_REG, state->right->reg1, 
eax));
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, result));
}

It uses fixed variables for EAX and EDX. If another rule puts a fixed
variable for EAX as OP_DIV's right input, then result will be
incorrect because content of EAX is overridden.

This example also shows, that spilling of fixed intervals will not
solve this problem. Instruction INSN_DIV_REG_REG has two inputs:
state->right->reg1 and eax. Suppose that state->right->reg1 is a fixed
variable for eax too. So we would have two fixed intervals with the
same use position. This conflict can not be solved by fixed interval
spilling. It requires reloading one of intervals to another machine
register. This can be done for regular registers.

The conclusion is: we should use fixed-reg variables only to prepare
and save registers around some special instructions. Fixed-reg
variables should not be used in place of regular virtual registers.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |  134 ++--
 1 files changed, 79 insertions(+), 55 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 8522667..c4f2ccf 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -367,14 +367,16 @@ freg: OP_FSUB(freg, freg) 1
 
 reg:   OP_MUL(reg, EXPR_LOCAL) 1
 {
-   struct var_info *eax;
+   struct var_info *eax, *result;
 
+   result = get_var(s->b_parent, J_INT);
eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
 
-   state->reg1 = eax;
+   state->reg1 = result;
 
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, state->left->reg1, 
eax));
__binop_reg_local(state, s, tree, INSN_MUL_MEMBASE_EAX, eax, 0);
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, result));
 }
 
 reg:   OP_MUL(reg, reg) 1
@@ -406,7 +408,7 @@ reg:OP_MUL_64(reg, reg) 1
eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
edx = get_fixed_var(s->b_parent, MACH_REG_xDX);
 
-   state->reg1 = eax;
+   state->reg1 = get_var(s->b_parent, J_INT);
state->reg2 = get_var(s->b_parent, J_INT);
 
tmp1 = get_var(s->b_parent, J_INT);
@@ -418,28 +420,39 @@ reg:  OP_MUL_64(reg, reg) 1
 
select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, state->right->reg1, 
eax));
select_insn(s, tree, reg_reg_insn(INSN_MUL_REG_EAX, state->left->reg1, 
eax));
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, state->reg1));
 
select_insn(s, tree, reg_reg_insn(INSN_ADD_REG_REG, edx, state->reg2));
 }
 
 reg:   OP_DIV(reg, EXPR_LOCAL) 1
 {
+   struct var_info *eax;
+
div_reg_local(state, s, tree);
+
+   eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
+   state->reg1 = get_var(s->b_parent, J_INT);
+
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, state->reg1));
 }
 
 reg:   OP_DIV(reg, reg) 1
 {
+   struct var_info *eax;
struct var_info *edx;
struct var_info *result;
 
edx = get_fixed_var(s->b_parent, MACH_REG_xDX);
-   result = get_fixed_var(s->b_parent, MACH_REG_xAX);
+   eax = get_fixed_var(s->b_parent, MACH_REG_xAX);
 
+   result = get_var(s->b_parent, J_INT);
state->reg1 = result;
 
-   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, state->left->reg1, 
result));
-   select_insn(s, tree, reg_reg_insn(INSN_CLTD_REG_REG, result, edx));
-   select_insn(s, tree, reg_reg_insn(INSN_DIV_REG_REG, state->right->reg1, 
result));
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, state->left->reg1, 
eax));
+   select_insn(s, tree, reg_reg_insn(INSN_CLTD_REG_REG, eax, edx));
+   select_insn(s, tree, reg_reg_insn(INSN_DIV_REG_REG, state->right->reg1, 
eax));
+   select_insn(s, tree, reg_reg_insn(INSN_MOV_REG_REG, eax, result));
 }
 
 freg:  OP_DDIV(freg, freg) 1
@@ -464,29 +477,32 @@ reg:  OP_DIV_64(reg, reg) 1
 
 reg:   OP_REM(reg, EXPR_LOCAL) 1
 {
-   struct var_info *result, *remainder;
+   struct var_info *edx;
 
div_reg_local(state, s, tree);
 
-   result = get_fixed_var(s->b_parent, MACH_REG_xAX);
-   remainder = get_fixed_var(s->b_parent, MACH_REG_xDX);
+   edx = get_fixed_var(s->b_pare

[PATCH 03/14] jit: cleanup interval spilling

2009-08-30 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/linear-scan.c |   86 +++--
 1 files changed, 37 insertions(+), 49 deletions(-)

diff --git a/jit/linear-scan.c b/jit/linear-scan.c
index 5538bc7..8baa914 100644
--- a/jit/linear-scan.c
+++ b/jit/linear-scan.c
@@ -110,35 +110,40 @@ static enum machine_reg pick_register(unsigned long 
*free_until_pos, enum vm_typ
return ret;
 }
 
+static void spill_interval(struct live_interval *it, unsigned long pos,
+  struct pqueue *unhandled)
+{
+   struct live_interval *new;
+
+   new = split_interval_at(it, pos);
+   if (has_use_positions(new)) {
+   unsigned long next_pos = next_use_pos(new, 0);
+
+   /* Trim interval if it does not start with a use position. */
+   if (next_pos > new->range.start)
+   new = split_interval_at(new, next_pos);
+
+   it->need_spill = true;
+   mark_need_reload(new, it);
+   pqueue_insert(unhandled, new);
+   }
+}
+
 static void __spill_interval_intersecting(struct live_interval *current,
  enum machine_reg reg,
  struct live_interval *it,
  struct pqueue *unhandled)
 {
-   struct live_interval *new;
-   unsigned long next_pos;
-
if (it->reg != reg)
return;
 
if (!ranges_intersect(&it->range, ¤t->range))
return;
 
-   new = split_interval_at(it, current->range.start);
-   it->need_spill = true;
-
-   next_pos = next_use_pos(new, new->range.start);
-
-   if (next_pos == LONG_MAX)
-   return;
-
-   new = split_interval_at(new, next_pos);
-
-   if (!has_use_positions(new))
+   if (current->range.start == it->range.start)
return;
 
-   mark_need_reload(new, it);
-   pqueue_insert(unhandled, new);
+   spill_interval(it, current->range.start, unhandled);
 }
 
 static void spill_all_intervals_intersecting(struct live_interval *current,
@@ -165,7 +170,7 @@ static void allocate_blocked_reg(struct live_interval 
*current,
 struct pqueue *unhandled)
 {
unsigned long use_pos[NR_REGISTERS], block_pos[NR_REGISTERS];
-   struct live_interval *it, *new;
+   struct live_interval *it;
int i;
enum machine_reg reg;
 
@@ -224,26 +229,16 @@ static void allocate_blocked_reg(struct live_interval 
*current,
 * so it is best to spill current itself
 */
pos = next_use_pos(current, current->range.start);
-   new = split_interval_at(current, pos);
-
-   if (has_use_positions(new)) {
-   mark_need_reload(new, current);
-   pqueue_insert(unhandled, new);
-   }
-
-   current->need_spill = 1;
-   } else if (block_pos[reg] >= current->range.end) {
-   /* Spilling made a register free for the whole current */
-   current->reg = reg;
-   spill_all_intervals_intersecting(current, reg, active,
-inactive, unhandled);
+   spill_interval(current, pos, unhandled);
} else {
-   new = split_interval_at(current, block_pos[reg]);
+   /*
+* Register is available for whole or some part of interval
+*/
+   current->reg = reg;
 
-   if (has_use_positions(new))
-   pqueue_insert(unhandled, new);
+   if (block_pos[reg] < current->range.end)
+   spill_interval(current, block_pos[reg], unhandled);
 
-   current->reg = reg;
spill_all_intervals_intersecting(current, reg, active,
 inactive, unhandled);
}
@@ -255,7 +250,7 @@ static void try_to_allocate_free_reg(struct live_interval 
*current,
 struct pqueue *unhandled)
 {
unsigned long free_until_pos[NR_REGISTERS];
-   struct live_interval *it, *new;
+   struct live_interval *it;
enum machine_reg reg;
int i;
 
@@ -292,16 +287,8 @@ static void try_to_allocate_free_reg(struct live_interval 
*current,
/*
 * Register available for the first part of the interval.
 */
-   new = split_interval_at(current, free_until_pos[reg]);
-
-   if (has_use_positions(new)) {
-   new = split_interval_at(new, next_use_pos(new, 0));
-   mark_need_reload(new, current);
-   pqueue_insert(unhandled, new);
-   }
-
+   spill_interval(current, 

[PATCH 01/14] jit: add missing trace_flush() to trace_return_value()

2009-08-30 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/trace-jit.c |4 +++-
 1 files changed, 3 insertions(+), 1 deletions(-)

diff --git a/jit/trace-jit.c b/jit/trace-jit.c
index 0246c3b..1bde5e1 100644
--- a/jit/trace-jit.c
+++ b/jit/trace-jit.c
@@ -811,8 +811,10 @@ void trace_return_value(struct vm_method *vmm, unsigned 
long long value)
 
trace_printf("trace return: %s.%s%s\n", vmm->class->name, vmm->name,
 vmm->type);
-   if (type == J_VOID)
+   if (type == J_VOID) {
+   trace_flush();
return;
+   }
 
trace_printf("%12s: ", get_vm_type_name(type));
print_arg(type,(unsigned long *)  &value, &dummy);
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 02/14] jit: assign two LIR positions for each instruction.

2009-08-30 Thread Tomek Grabiec
We will need this to optimize register allocation. Every LIR
instruction has two positions assigned - consecutive even and
odd. Even interval use positions correspond to instruction input and
odd positions correspond to instruction output. Distinction between
those allow to allocate the same physical register to adjacent
intervals where first ends at instruction input and the second starts
at instruction output. There are some more advantages of this
described in "Linear Scan Register Allocation for the Java HotSpot
Client Compiler", C. Wimmer.

This is a preliminary patch. All use positions are even yet.

Signed-off-by: Tomek Grabiec 
---
 include/jit/compilation-unit.h   |2 +
 include/jit/vars.h   |   10 +
 jit/compilation-unit.c   |4 ++-
 jit/liveness.c   |4 +-
 jit/spill-reload.c   |   16 +++--
 jit/trace-jit.c  |   68 +++--
 test/jit/compilation-unit-test.c |2 +-
 test/jit/liveness-test.c |   22 ++--
 8 files changed, 77 insertions(+), 51 deletions(-)

diff --git a/include/jit/compilation-unit.h b/include/jit/compilation-unit.h
index f6fb0e9..4114bce 100644
--- a/include/jit/compilation-unit.h
+++ b/include/jit/compilation-unit.h
@@ -85,6 +85,8 @@ struct compilation_unit {
 */
struct radix_tree *lir_insn_map;
 
+   unsigned long last_insn;
+
/*
 * This maps machine-code offset (of gc safepoint) to gc map
 */
diff --git a/include/jit/vars.h b/include/jit/vars.h
index 177c283..6afb16b 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -12,6 +12,16 @@ struct live_range {
unsigned long start, end;   /* end is exclusive */
 };
 
+static inline unsigned long range_last_insn_pos(struct live_range *range)
+{
+   return (range->end - 1) & ~1;
+}
+
+static inline unsigned long range_first_insn_pos(struct live_range *range)
+{
+   return range->start & ~1;
+}
+
 static inline bool in_range(struct live_range *range, unsigned long offset)
 {
return (offset >= range->start) && (offset < range->end);
diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index cf349b8..956d8b8 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -259,9 +259,11 @@ void compute_insn_positions(struct compilation_unit *cu)
 
radix_tree_insert(cu->lir_insn_map, pos, insn);
 
-   ++pos;
+   pos += 2;
}
 
bb->end_insn = pos;
}
+
+   cu->last_insn = pos;
 }
diff --git a/jit/liveness.c b/jit/liveness.c
index cc82933..3e0f586 100644
--- a/jit/liveness.c
+++ b/jit/liveness.c
@@ -142,8 +142,8 @@ static void __analyze_use_def(struct basic_block *bb, 
struct insn *insn)
 */
if (!test_bit(bb->def_set->bits, var->vreg))
set_bit(bb->use_set->bits, var->vreg);
-   }   
-   
+   }
+
nr_defs = insn_defs(bb->b_parent, insn, defs);
for (i = 0; i < nr_defs; i++) {
struct var_info *var = defs[i];
diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index af50046..5964682 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -50,7 +50,7 @@ static struct insn *first_insn(struct compilation_unit *cu, 
struct live_interval
 {
struct insn *ret;
 
-   ret = radix_tree_lookup(cu->lir_insn_map, interval->range.start);
+   ret = radix_tree_lookup(cu->lir_insn_map, 
range_first_insn_pos(&interval->range));
assert(ret != NULL);
 
return ret;
@@ -60,7 +60,7 @@ static struct insn *last_insn(struct compilation_unit *cu, 
struct live_interval
 {
struct insn *ret;
 
-   ret = radix_tree_lookup(cu->lir_insn_map, interval->range.end - 1);
+   ret = radix_tree_lookup(cu->lir_insn_map, 
range_last_insn_pos(&interval->range));
assert(ret != NULL);
 
return ret;
@@ -86,7 +86,7 @@ static struct list_head *bb_last_spill_node(struct 
basic_block *bb)
if (bb->end_insn == bb->start_insn)
return &bb->insn_list;
 
-   last = radix_tree_lookup(bb->b_parent->lir_insn_map, bb->end_insn - 1);
+   last = radix_tree_lookup(bb->b_parent->lir_insn_map, bb->end_insn - 2);
assert(last);
 
if (insn_is_branch(last))
@@ -190,6 +190,16 @@ static int __insert_spill_reload_insn(struct live_interval 
*interval, struct com
goto out;
 
if (interval->need_reload) {
+   /*
+* Intervals which start with a DEF position (odd
+* numbers) should not be reloaded. One reason for
+* this is that they do not have to because register
+* content is overriden. Another reason is that we
+* can't inse

[PATCH 3/3] x86: fix wrong argument cleanup count for EXPR_ANEWARRAY

2009-08-29 Thread Tomek Grabiec
Selected code was adding 4 bytes too much to ESP, which led to
a memory corruption.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index d7f506b..8522667 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -1368,7 +1368,7 @@ reg:EXPR_ANEWARRAY(reg)
 select_insn(s, tree, rel_insn(INSN_CALL_REL,
(unsigned long) vm_object_alloc_array));
 
-method_args_cleanup(s, tree, 3);
+method_args_cleanup(s, tree, 2);
select_exception_test(s, tree);
 }
 %else
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 2/3] jit: remove redundant compute_boundaries()

2009-08-29 Thread Tomek Grabiec
The values of .start_insn and .end_insn can be computed in 
compute_insn_positions().

Signed-off-by: Tomek Grabiec 
---
 jit/compilation-unit.c |4 
 jit/liveness.c |   17 -
 2 files changed, 4 insertions(+), 17 deletions(-)

diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index d9dc523..cf349b8 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -252,6 +252,8 @@ void compute_insn_positions(struct compilation_unit *cu)
for_each_basic_block(bb, &cu->bb_list) {
struct insn *insn;
 
+   bb->start_insn = pos;
+
for_each_insn(insn, &bb->insn_list) {
insn->lir_pos = pos;
 
@@ -259,5 +261,7 @@ void compute_insn_positions(struct compilation_unit *cu)
 
++pos;
}
+
+   bb->end_insn = pos;
}
 }
diff --git a/jit/liveness.c b/jit/liveness.c
index 619a988..cc82933 100644
--- a/jit/liveness.c
+++ b/jit/liveness.c
@@ -200,27 +200,10 @@ static int init_sets(struct compilation_unit *cu)
return err;
 }
 
-static void compute_boundaries(struct compilation_unit *cu)
-{
-   unsigned long insn_idx = 0;
-   struct basic_block *bb;
-   struct insn *insn;
-
-   for_each_basic_block(bb, &cu->bb_list) {
-   bb->start_insn = insn_idx;
-   for_each_insn(insn, &bb->insn_list) {
-   insn_idx++;
-   }
-   bb->end_insn = insn_idx;
-   }
-}
-
 int analyze_liveness(struct compilation_unit *cu)
 {
int err = 0;
 
-   compute_boundaries(cu);
-
err = init_sets(cu);
if (err)
goto out;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/3] jit: remove unused basic block sorting code

2009-08-29 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/jit/compilation-unit.h |1 -
 jit/compilation-unit.c |   15 ---
 2 files changed, 0 insertions(+), 16 deletions(-)

diff --git a/include/jit/compilation-unit.h b/include/jit/compilation-unit.h
index 66aef32..f6fb0e9 100644
--- a/include/jit/compilation-unit.h
+++ b/include/jit/compilation-unit.h
@@ -107,7 +107,6 @@ struct var_info *get_fixed_var(struct compilation_unit *, 
enum machine_reg);
 struct basic_block *find_bb(struct compilation_unit *, unsigned long);
 unsigned long nr_bblocks(struct compilation_unit *);
 void compute_insn_positions(struct compilation_unit *);
-int sort_basic_blocks(struct compilation_unit *);
 
 #define for_each_variable(var, var_list) for (var = var_list; var != NULL; var 
= var->next)
 
diff --git a/jit/compilation-unit.c b/jit/compilation-unit.c
index 9093d82..d9dc523 100644
--- a/jit/compilation-unit.c
+++ b/jit/compilation-unit.c
@@ -261,18 +261,3 @@ void compute_insn_positions(struct compilation_unit *cu)
}
}
 }
-
-static int bb_list_compare(const struct list_head **e1, const struct list_head 
**e2)
-{
-   struct basic_block *bb1, *bb2;
-
-   bb1 = list_entry(*e1, struct basic_block, bb_list_node);
-   bb2 = list_entry(*e2, struct basic_block, bb_list_node);
-
-   return bb1->start - bb2->start;
-}
-
-int sort_basic_blocks(struct compilation_unit *cu)
-{
-   return list_sort(&cu->bb_list, bb_list_compare);
-}
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] jit: remove trace_flush() from the middle of compile()

2009-08-23 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 jit/compiler.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/jit/compiler.c b/jit/compiler.c
index 645e9b2..1fa321a 100644
--- a/jit/compiler.c
+++ b/jit/compiler.c
@@ -66,7 +66,7 @@ int compile(struct compilation_unit *cu)
err = analyze_control_flow(cu);
if (err)
goto out;
-   trace_flush();
+
err = convert_to_ir(cu);
if (err)
goto out;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[RFC][PATCH 3/3] jit: fix data flow resolution code in presence of empty basic blocks.

2009-08-19 Thread Tomek Grabiec
Data flow resolution code assumed that basic blocks always contain
some struct insn, before which spill/reload instructions can be
put. When data flow resolution was run on empty basic block, it worked
on invalid struct insn pointer and entered infinite loop.

This patch also fixes another bug. Data flow resolution didn't work
correctly when basic block in which spill occurs was not ended by a
branch. That's because in this case spill was inserted after the last
instruction, which is correct, but the corresponding spill slot moves
(push+pop) were inserted before last instruction - before the slot was
assigned a correct value.

Empty basic blocks can be a result of the following bytecode:

  iconst_0
  iconst_1
  ifeq lab
  pop  // This and the next instruction generate no
  iconst_1 // LIR instructions.
lab:
  

Cc: Arthur HUILLET 
Signed-off-by: Tomek Grabiec 
---
 include/jit/basic-block.h |1 -
 jit/basic-block.c |   19 
 jit/spill-reload.c|   70 ++---
 3 files changed, 53 insertions(+), 37 deletions(-)

diff --git a/include/jit/basic-block.h b/include/jit/basic-block.h
index e50f6c6..d5452a4 100644
--- a/include/jit/basic-block.h
+++ b/include/jit/basic-block.h
@@ -81,7 +81,6 @@ struct basic_block *bb_split(struct basic_block *, unsigned 
long);
 void bb_add_stmt(struct basic_block *, struct statement *);
 void bb_add_insn(struct basic_block *, struct insn *);
 struct insn *bb_first_insn(struct basic_block *);
-struct insn *bb_last_insn(struct basic_block *);
 int bb_add_successor(struct basic_block *, struct basic_block *);
 int bb_add_mimic_stack_expr(struct basic_block *, struct expression *);
 struct statement *bb_remove_last_stmt(struct basic_block *bb);
diff --git a/jit/basic-block.c b/jit/basic-block.c
index 6de5c3f..bcb2866 100644
--- a/jit/basic-block.c
+++ b/jit/basic-block.c
@@ -174,25 +174,6 @@ struct insn *bb_first_insn(struct basic_block *bb)
return list_entry(bb->insn_list.next, struct insn, insn_list_node);
 }
 
-struct insn *bb_last_insn(struct basic_block *bb)
-{
-   struct insn *this = list_entry(bb->insn_list.prev, struct insn, 
insn_list_node);
-
-   /*
-* We want to return the last "real" instruction of the basic block. 
Taking the
-* last of the insn_list will not work in case a live interval has been 
spilled
-* right after the final jump of the basic block.
-* This is a side effect of the linear scan algorithm.
-*
-* As a result, we browse instructions starting from the last, in order 
to find the one
-* that has a LIR position matching the position for the end of the 
block.
-*/
-   while (this->lir_pos != bb->end_insn - 1) {
-   this = list_entry(this->insn_list_node.prev, struct insn, 
insn_list_node);
-   }
-   return this;
-}
-
 static int __bb_add_neighbor(void *new, void **array, unsigned long *nb)
 {
unsigned long new_size;
diff --git a/jit/spill-reload.c b/jit/spill-reload.c
index 5ced80e..af50046 100644
--- a/jit/spill-reload.c
+++ b/jit/spill-reload.c
@@ -25,6 +25,7 @@
  */
 
 #include "jit/compilation-unit.h"
+#include "jit/instruction.h"
 #include "jit/stack-slot.h"
 #include "jit/compiler.h"
 
@@ -65,10 +66,40 @@ static struct insn *last_insn(struct compilation_unit *cu, 
struct live_interval
return ret;
 }
 
+/**
+ * Returns the node before which spill instructions should be inserted
+ * when they are supposed to be executed just before control leaves
+ * given basic block. When basic block is ended with a branch
+ * instruction it returns node of that branch; otherwise it returns
+ * the next node.
+ */
+static struct list_head *bb_last_spill_node(struct basic_block *bb)
+{
+   struct insn *last;
+
+   /*
+* basic block's instruction list might not be empty and
+* contain only spill/reload instructions. In this situation
+* we also consider basic block as empty (no lir positions) so
+* we don't rely on ->insn_list here.
+*/
+   if (bb->end_insn == bb->start_insn)
+   return &bb->insn_list;
+
+   last = radix_tree_lookup(bb->b_parent->lir_insn_map, bb->end_insn - 1);
+   assert(last);
+
+   if (insn_is_branch(last))
+   return &last->insn_list_node;
+
+   return last->insn_list_node.next;
+}
+
 static struct stack_slot *
 spill_interval(struct live_interval *interval,
   struct compilation_unit *cu,
-  struct insn *last)
+  struct list_head *spill_before,
+  unsigned long bc_offset)
 {
struct stack_slot *slot;
struct insn *spill;
@@ -81,21 +112,23 @@ spill_interval(struct live_interval *interval,
if (!spill)
return NULL;
 
-   spill->bytecode_offset = last->by

[PATCH 2/3] x86: make insn_is_branch() return true for INSN_RET

2009-08-19 Thread Tomek Grabiec
INSN_RET should be considered a branch because it jumps to different
basic block. It is used in LIR for athrow.

Signed-off-by: Tomek Grabiec 
---
 arch/x86/include/arch/instruction.h |1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/arch/instruction.h 
b/arch/x86/include/arch/instruction.h
index b9d71b9..962dc0a 100644
--- a/arch/x86/include/arch/instruction.h
+++ b/arch/x86/include/arch/instruction.h
@@ -321,6 +321,7 @@ static inline bool insn_is_branch(struct insn *insn)
case INSN_JMP_MEMBASE:
case INSN_JMP_MEMINDEX:
case INSN_JNE_BRANCH:
+   case INSN_RET:
return true;
default:
return false;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH 1/3] jit: introduce include/jit/instruction.h for arch-independent stuff

2009-08-19 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 include/jit/instruction.h |   12 
 1 files changed, 12 insertions(+), 0 deletions(-)
 create mode 100644 include/jit/instruction.h

diff --git a/include/jit/instruction.h b/include/jit/instruction.h
new file mode 100644
index 000..376e278
--- /dev/null
+++ b/include/jit/instruction.h
@@ -0,0 +1,12 @@
+#ifndef JATO_JIT_INSTRUCTION_H
+#define JATO_JIT_INSTRUCTION_H
+
+#include "arch/instruction.h"
+#include "lib/list.h"
+
+static inline struct insn *next_insn(struct insn *insn)
+{
+   return list_entry(insn->insn_list_node.next, struct insn, 
insn_list_node);
+}
+
+#endif /* JATO_JIT_INSTRUCTION_H */
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


Re: [PATCH 3/3] spill-reload: Use radix_tree_lookup() in insert_mov_insns()

2009-08-19 Thread Tomek Grabiec
2009/8/19 Pekka Enberg :
> Hi Tomek,
>
> On Tue, 2009-08-18 at 21:21 +0200, Tomek Grabiec wrote:
>> Like I said on IRC, this will not work for empty basic blocks, because
>> spill_at_insn will belong to the preceding
>> basic block. This causes that instructions will be added to different
>> (preceding) basic block and might not be executed on some
>> execution paths.
>
> Can we eliminate empty basic blocks after instruction selection
> completely?
>

I think it is doable. We would have to browse the bb list, and for
empty basic blocks update branch targets (separately for
tableswitch&lookupswitch tables) and successors/predecessors.

In my opinion, it adds some overhead which can be avoided if
algorithms can deal with empty basic blocks.


-- 
Tomek Grabiec

--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


  1   2   3   4   5   6   7   8   9   >