From: Tomek Grabiec <tgrab...@gmail.com> Currently workarounds eliminate class initialization from signal handler by unconditionally selecting calls to vm_class_ensure_init().
[ penb...@cs.helsinki.fi: use RUNNING_ON_VALGRIND for auto-detection ] Signed-off-by: Tomek Grabiec <tgrab...@gmail.com> Signed-off-by: Pekka Enberg <penb...@cs.helsinki.fi> --- arch/x86/insn-selector.brg | 132 +++++++++++++++++++++++++++++--------------- include/jit/compiler.h | 2 + test/arch-x86/Makefile | 1 - vm/jato.c | 27 ++++++--- 4 files changed, 109 insertions(+), 53 deletions(-) diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg index 3f7070b..e587c96 100644 --- a/arch/x86/insn-selector.brg +++ b/arch/x86/insn-selector.brg @@ -1029,18 +1029,27 @@ reg: EXPR_CLASS_FIELD 1 vmc_state = vmc->state; vm_monitor_unlock(&vmc->monitor); - if (vmc_state >= VM_CLASS_INITIALIZING) { - /* Class is already initialized; no need for fix-up. We also - * don't want the fixup if we're already inside the - * initializer. */ + if (running_on_valgrind) { + select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc)); + select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)vm_class_ensure_init)); + method_args_cleanup(s, tree, 1); + mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG, - (unsigned long) vmc->static_values + vmf->offset, out); + (unsigned long) vmc->static_values + vmf->offset, out); } else { - mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG, - (unsigned long) static_guard_page, out); + if (vmc_state >= VM_CLASS_INITIALIZING) { + /* Class is already initialized; no need for fix-up. We also + * don't want the fixup if we're already inside the + * initializer. */ + mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG, + (unsigned long) vmc->static_values + vmf->offset, out); + } else { + mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG, + (unsigned long) static_guard_page, out); - /* XXX: Check return value */ - add_getstatic_fixup_site(mov_insn, vmf, s->b_parent); + /* XXX: Check return value */ + add_getstatic_fixup_site(mov_insn, vmf, s->b_parent); + } } select_insn(s, tree, mov_insn); @@ -1075,10 +1084,11 @@ freg: EXPR_FLOAT_CLASS_FIELD 1 vmc_state = vmc->state; vm_monitor_unlock(&vmc->monitor); - if (vmc_state >= VM_CLASS_INITIALIZING) { - /* Class is already initialized; no need for fix-up. We also - * don't want the fixup if we're already inside the - * initializer. */ + if (running_on_valgrind) { + select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc)); + select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)vm_class_ensure_init)); + method_args_cleanup(s, tree, 1); + if (expr->vm_type == J_FLOAT) mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM, (unsigned long) vmc->static_values + vmf->offset, out); @@ -1086,15 +1096,27 @@ freg: EXPR_FLOAT_CLASS_FIELD 1 mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM, (unsigned long) vmc->static_values + vmf->offset, out); } else { - if (expr->vm_type == J_FLOAT) - mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM, - (unsigned long) static_guard_page, out); - else - mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM, - (unsigned long) static_guard_page, out); - - /* XXX: Check return value */ - add_getstatic_fixup_site(mov_insn, vmf, s->b_parent); + if (vmc_state >= VM_CLASS_INITIALIZING) { + /* Class is already initialized; no need for fix-up. We also + * don't want the fixup if we're already inside the + * initializer. */ + if (expr->vm_type == J_FLOAT) + mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM, + (unsigned long) vmc->static_values + vmf->offset, out); + else + mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM, + (unsigned long) vmc->static_values + vmf->offset, out); + } else { + if (expr->vm_type == J_FLOAT) + mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM, + (unsigned long) static_guard_page, out); + else + mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM, + (unsigned long) static_guard_page, out); + + /* XXX: Check return value */ + add_getstatic_fixup_site(mov_insn, vmf, s->b_parent); + } } select_insn(s, tree, mov_insn); @@ -1978,18 +2000,27 @@ stmt: STMT_STORE(EXPR_CLASS_FIELD, reg) vmc_state = vmc->state; vm_monitor_unlock(&vmc->monitor); - if (vmc_state >= VM_CLASS_INITIALIZING) { - /* Class is already initialized; no need for fix-up. We also - * don't want the fixup if we're already inside the - * initializer. */ + if (running_on_valgrind) { + select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc)); + select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)vm_class_ensure_init)); + method_args_cleanup(s, tree, 1); + mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP, - src, (unsigned long) vmc->static_values + vmf->offset); + src, (unsigned long) vmc->static_values + vmf->offset); } else { - mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP, - src, (unsigned long) static_guard_page); + if (vmc_state >= VM_CLASS_INITIALIZING) { + /* Class is already initialized; no need for fix-up. We also + * don't want the fixup if we're already inside the + * initializer. */ + mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP, + src, (unsigned long) vmc->static_values + vmf->offset); + } else { + mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP, + src, (unsigned long) static_guard_page); - /* XXX: Check return value */ - add_putstatic_fixup_site(mov_insn, vmf, s->b_parent); + /* XXX: Check return value */ + add_putstatic_fixup_site(mov_insn, vmf, s->b_parent); + } } select_insn(s, tree, mov_insn); @@ -2027,10 +2058,11 @@ stmt: STMT_STORE(EXPR_FLOAT_CLASS_FIELD, freg) vmc_state = vmc->state; vm_monitor_unlock(&vmc->monitor); - if (vmc_state >= VM_CLASS_INITIALIZING) { - /* Class is already initialized; no need for fix-up. We also - * don't want the fixup if we're already inside the - * initializer. */ + if (running_on_valgrind) { + select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc)); + select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)vm_class_ensure_init)); + method_args_cleanup(s, tree, 1); + if (store_dest->vm_type == J_FLOAT) mov_insn = reg_memdisp_insn(INSN_MOV_XMM_MEMDISP, src, (unsigned long) vmc->static_values + vmf->offset); @@ -2038,15 +2070,27 @@ stmt: STMT_STORE(EXPR_FLOAT_CLASS_FIELD, freg) mov_insn = reg_memdisp_insn(INSN_MOV_64_XMM_MEMDISP, src, (unsigned long) vmc->static_values + vmf->offset); } else { - if (store_dest->vm_type == J_FLOAT) - mov_insn = reg_memdisp_insn(INSN_MOV_XMM_MEMDISP, - src, (unsigned long) static_guard_page); - else - mov_insn = reg_memdisp_insn(INSN_MOV_64_XMM_MEMDISP, - src, (unsigned long) static_guard_page); - - /* XXX: Check return value */ - add_putstatic_fixup_site(mov_insn, vmf, s->b_parent); + if (vmc_state >= VM_CLASS_INITIALIZING) { + /* Class is already initialized; no need for fix-up. We also + * don't want the fixup if we're already inside the + * initializer. */ + if (store_dest->vm_type == J_FLOAT) + mov_insn = reg_memdisp_insn(INSN_MOV_XMM_MEMDISP, + src, (unsigned long) vmc->static_values + vmf->offset); + else + mov_insn = reg_memdisp_insn(INSN_MOV_64_XMM_MEMDISP, + src, (unsigned long) vmc->static_values + vmf->offset); + } else { + if (store_dest->vm_type == J_FLOAT) + mov_insn = reg_memdisp_insn(INSN_MOV_XMM_MEMDISP, + src, (unsigned long) static_guard_page); + else + mov_insn = reg_memdisp_insn(INSN_MOV_64_XMM_MEMDISP, + src, (unsigned long) static_guard_page); + + /* XXX: Check return value */ + add_putstatic_fixup_site(mov_insn, vmf, s->b_parent); + } } select_insn(s, tree, mov_insn); diff --git a/include/jit/compiler.h b/include/jit/compiler.h index 014fb43..6e1196a 100644 --- a/include/jit/compiler.h +++ b/include/jit/compiler.h @@ -110,6 +110,8 @@ extern bool opt_trace_exceptions; extern bool opt_trace_bytecode; extern bool opt_trace_compile; +extern bool running_on_valgrind; + bool method_matches_regex(struct vm_method *vmm); static inline bool cu_matches_regex(struct compilation_unit *cu) diff --git a/test/arch-x86/Makefile b/test/arch-x86/Makefile index 83a2988..e5cbb63 100644 --- a/test/arch-x86/Makefile +++ b/test/arch-x86/Makefile @@ -15,7 +15,6 @@ TOPLEVEL_OBJS := \ arch/x86/emit-code.o \ arch/x86/exception.o \ arch/x86/init.o \ - arch/x86/insn-selector.o \ arch/x86/instruction.o \ arch/x86/registers$(ARCH_POSTFIX).o \ arch/x86/stack-frame.o \ diff --git a/vm/jato.c b/vm/jato.c index 924ee24..b50ef52 100644 --- a/vm/jato.c +++ b/vm/jato.c @@ -25,19 +25,20 @@ * Please refer to the file LICENSE for details. */ -#include <ctype.h> -#include <errno.h> -#include <regex.h> +#include <valgrind/valgrind.h> +#include <sys/utsname.h> +#include <sys/types.h> +#include <sys/stat.h> #include <signal.h> #include <stdarg.h> -#include <stdio.h> #include <stdlib.h> #include <string.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <sys/utsname.h> -#include <time.h> #include <unistd.h> +#include <ctype.h> +#include <errno.h> +#include <regex.h> +#include <stdio.h> +#include <time.h> #include "cafebabe/access.h" #include "cafebabe/attribute_info.h" @@ -87,6 +88,11 @@ static char *exe_name; static unsigned int nr_java_args; static char **java_args; +/* + * Enable JIT workarounds for valgrind. + */ +bool running_on_valgrind; + static void __attribute__((noreturn)) vm_exit(int status) { clear_exception(); @@ -1286,6 +1292,11 @@ main(int argc, char *argv[]) setvbuf(stderr, NULL, _IONBF, 0); #endif + if (RUNNING_ON_VALGRIND) { + printf("JIT: Enabling workarounds for valgrind.\n"); + running_on_valgrind = true; + } + arch_init(); init_literals_hash_map(); init_system_properties(); -- 1.5.6.3 ------------------------------------------------------------------------------ Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day trial. Simplify your report design, integration and deployment - and focus on what you do best, core application coding. Discover what's new with Crystal Reports now. http://p.sf.net/sfu/bobj-july _______________________________________________ Jatovm-devel mailing list Jatovm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/jatovm-devel