Re: [PATCH] x86: fix writes below (%esp) which can be fixed at no cost

2009-08-31 Thread Pekka Enberg
Hi Tomek,

On Mon, 2009-08-31 at 20:30 +0200, Tomek Grabiec wrote:
> Signed-off-by: Tomek Grabiec 
> ---
>  arch/x86/insn-selector.brg |   30 --
>  1 files changed, 16 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
> index 2e7367f..85f2fa6 100644
> --- a/arch/x86/insn-selector.brg
> +++ b/arch/x86/insn-selector.brg
> @@ -494,10 +494,11 @@ freg:   OP_DREM(freg, freg) 1
>   select_insn(s, tree, reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, 
> state->right->reg1, esp, 8));
>  
>   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmod));
> - method_args_cleanup(s, tree, 4);
>  
> - select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, -8));
> - select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, -8, 
> state->reg1));
> + select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, 0));
> + select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, 0, 
> state->reg1));
> +
> + method_args_cleanup(s, tree, 4);
>  }
>  
>  freg:OP_FREM(freg, freg) 1
> @@ -514,10 +515,11 @@ freg:   OP_FREM(freg, freg) 1
>   select_insn(s, tree, reg_membase_insn(INSN_MOV_XMM_MEMBASE, 
> state->right->reg1, esp, 4));
>  
>   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmodf));
> - method_args_cleanup(s, tree, 2);
>  
> - select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, -4));
> - select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, -4, 
> state->reg1));
> + select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, 0));
> + select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, 0, 
> state->reg1));
> +
> + method_args_cleanup(s, tree, 2);
>  }
>  
>  reg: OP_REM_64(reg, reg) 1

As mentioned on the IRC: the first two hunks make the generated asm less
readable so I think we should use get_scratch_slot() here.


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


Re: [PATCH] x86: Introduce workarounds for valgrind to work with jato.

2009-08-31 Thread Pekka Enberg
Hi Tomek,

On Mon, 2009-08-31 at 16:14 +0200, Tomek Grabiec wrote:
> Jato can be compiled with workarounds which make valgrind
> work with jato.
> 
> To do so, define VALGRIND variable for make:
> make jato VALGRIND=y
> 
> Currently workarounds eliminate class initialization
> from signal handler by unconditionally selecting
> calls to vm_class_ensure_init().
> 
> Signed-off-by: Tomek Grabiec 

Can we turn this into -Xvalgrind command line option instead? We're
still in early stages of development and are likely to use it a lot. An
extra compile-time option will make things debugging than necessary, I
think.

Pekka


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] x86: fix writes below (%esp) which can be fixed at no cost

2009-08-31 Thread Tomek Grabiec

Signed-off-by: Tomek Grabiec 
---
 arch/x86/insn-selector.brg |   30 --
 1 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 2e7367f..85f2fa6 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -494,10 +494,11 @@ freg: OP_DREM(freg, freg) 1
select_insn(s, tree, reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, 
state->right->reg1, esp, 8));
 
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmod));
-   method_args_cleanup(s, tree, 4);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, -8));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, -8, 
state->reg1));
+   select_insn(s, tree, membase_insn(INSN_FSTP_64_MEMBASE, esp, 0));
+   select_insn(s, tree, membase_reg_insn(INSN_MOV_64_MEMBASE_XMM, esp, 0, 
state->reg1));
+
+   method_args_cleanup(s, tree, 4);
 }
 
 freg:  OP_FREM(freg, freg) 1
@@ -514,10 +515,11 @@ freg: OP_FREM(freg, freg) 1
select_insn(s, tree, reg_membase_insn(INSN_MOV_XMM_MEMBASE, 
state->right->reg1, esp, 4));
 
select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned long)fmodf));
-   method_args_cleanup(s, tree, 2);
 
-   select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, -4));
-   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, -4, 
state->reg1));
+   select_insn(s, tree, membase_insn(INSN_FSTP_MEMBASE, esp, 0));
+   select_insn(s, tree, membase_reg_insn(INSN_MOV_MEMBASE_XMM, esp, 0, 
state->reg1));
+
+   method_args_cleanup(s, tree, 2);
 }
 
 reg:   OP_REM_64(reg, reg) 1
@@ -1823,16 +1825,16 @@ arg:EXPR_ARG(freg)
 
size = get_vmtype_size(arg_expr->vm_type);
 
+   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+
if (arg_expr->vm_type == J_FLOAT) {
select_insn(s, tree,
-   reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, -size));
+   reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, 0));
} else {
select_insn(s, tree,
-   reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, -size));
+   reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, 0));
}
 
-   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
-
state->reg1 = NULL;
 }
 %else
@@ -1877,12 +1879,12 @@ arg:EXPR_ARG(freg)
} else {
int size = get_vmtype_size(arg_expr->vm_type);
 
+   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+
if (arg_expr->vm_type == J_FLOAT)
-   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, -size));
+   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_XMM_MEMBASE, src, esp, 0));
else
-   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, -size));
-
-   select_insn(s, tree, imm_reg_insn(INSN_SUB_IMM_REG, size, esp));
+   select_insn(s, tree, 
reg_membase_insn(INSN_MOV_64_XMM_MEMBASE, src, esp, 0));
}
 
state->reg1 = NULL;
-- 
1.6.3.3


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[penberg/jato] 081ea2: Add myself to AUTHORS

2009-08-31 Thread noreply
Branch: refs/heads/master
Home:   http://github.com/penberg/jato

Commit: 081ea2355efe7996f0b816a5c557138503c323ba

http://github.com/penberg/jato/commit/081ea2355efe7996f0b816a5c557138503c323ba
Author: Arthur HUILLET 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M AUTHORS

Log Message:
---
Add myself to AUTHORS

Signed-off-by: Arthur HUILLET 
Signed-off-by: Pekka Enberg 



--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[PATCH] Add myself to AUTHORS

2009-08-31 Thread Arthur HUILLET
Signed-off-by: Arthur HUILLET 
---
 AUTHORS |5 +
 1 files changed, 5 insertions(+), 0 deletions(-)

diff --git a/AUTHORS b/AUTHORS
index 8fe5c9f..cb33d28 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -19,3 +19,8 @@ L: Oslo, Norway
 N: Saeed Siam 
 D: Array handling, VMT
 L: Dhaka, Bangladesh
+
+N: Arthur Huillet 
+W: http://www.agoctrl.org/
+D: x86, register allocation
+L: France
-- 
1.6.4


--
Let Crystal Reports handle the reporting - Free Crystal Reports 2008 30-Day 
trial. Simplify your report design, integration and deployment - and focus on 
what you do best, core application coding. Discover what's new with 
Crystal Reports now.  http://p.sf.net/sfu/bobj-july
___
Jatovm-devel mailing list
Jatovm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/jatovm-devel


[penberg/jato] 9ffc88: jit: add missing trace_flush() to trace_return_val...

2009-08-31 Thread noreply
Branch: refs/heads/master
Home:   http://github.com/penberg/jato

Commit: 9ffc88a87992000b530a19cf1fa6392e91ac778f

http://github.com/penberg/jato/commit/9ffc88a87992000b530a19cf1fa6392e91ac778f
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M jit/trace-jit.c

Log Message:
---
jit: add missing trace_flush() to trace_return_value()

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: 518ff910bbf9a64441a6ed575c1bf717e70d8e70

http://github.com/penberg/jato/commit/518ff910bbf9a64441a6ed575c1bf717e70d8e70
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M include/jit/compilation-unit.h
  M include/jit/vars.h
  M jit/compilation-unit.c
  M jit/liveness.c
  M jit/spill-reload.c
  M jit/trace-jit.c
  M test/jit/compilation-unit-test.c
  M test/jit/liveness-test.c

Log Message:
---
jit: assign two LIR positions for each instruction.

We will need this to optimize register allocation. Every LIR instruction has
two positions assigned - consecutive even and odd. Even interval use positions
correspond to instruction input and odd positions correspond to instruction
output. Distinction between those allow to allocate the same physical register
to adjacent intervals where first ends at instruction input and the second
starts at instruction output. There are some more advantages of this described
in "Linear Scan Register Allocation for the Java HotSpot Client Compiler", C.
Wimmer.

This is a preliminary patch. All use positions are even yet.

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: 4373533bcfc158846e6768871043a4353e73d082

http://github.com/penberg/jato/commit/4373533bcfc158846e6768871043a4353e73d082
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M jit/linear-scan.c

Log Message:
---
jit: cleanup interval spilling

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: 0422c37cf57eb800b0a49024ddc9fa20a4a1c1a6

http://github.com/penberg/jato/commit/0422c37cf57eb800b0a49024ddc9fa20a4a1c1a6
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M include/jit/vars.h
  M jit/interval.c
  M jit/linear-scan.c
  M jit/liveness.c
  M jit/spill-reload.c
  M jit/trace-jit.c
  M test/jit/linear-scan-test.c
  M test/jit/live-range-test.c
  M test/jit/liveness-test.c

Log Message:
---
jit: introduce multiple live ranges per interval.

This is needed for precise modeling of live ranges.

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: 52bec1a5387212ffacd489140e10fdd022ef8c60

http://github.com/penberg/jato/commit/52bec1a5387212ffacd489140e10fdd022ef8c60
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M arch/mmix/include/arch/instruction.h
  M arch/x86/include/arch/instruction.h
  M arch/x86/instruction.c
  M arch/x86/use-def.c
  M include/jit/instruction.h
  M jit/basic-block.c
  M jit/bc-offset-mapping.c
  M jit/compilation-unit.c
  M jit/emit.c
  M jit/liveness.c
  M jit/trace-jit.c
  M test/jit/compilation-unit-test.c

Log Message:
---
jit: move arch independent stuff from arch/instruction.h to jit/instruction.h

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: c0fdecf6c5a5eda9753c63dd039993bd82d4be09

http://github.com/penberg/jato/commit/c0fdecf6c5a5eda9753c63dd039993bd82d4be09
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M arch/x86/include/arch/instruction.h
  M arch/x86/instruction.c
  M arch/x86/use-def.c
  M include/jit/instruction.h
  M include/jit/use-position.h
  M include/jit/vars.h
  M jit/interval.c
  M jit/linear-scan.c
  M jit/liveness.c
  M jit/spill-reload.c
  M test/jit/liveness-test.c
  M test/jit/spill-reload-test.c

Log Message:
---
jit: implement precise live range calculation

For each variable its live range is calculated precisely as described
in Wimmer's master thesis "Linear Scan Register Allocator" in 5.6.3
"Build Intervals".

This patch reduces register allocator stress by generating shorter,
more precise live ranges and therefore reduces number of interval spills.

This patch also introduces distinction between even and odd use
positions. Even use positions represent input to instruction and odd
positions represent output. This allows for better register
utilization. Example:

mov r1, r2
add r2, r3

after allocation:

mov ebx, ebx  ; this can be optimized out in the future
add ebx, ebx

Signed-off-by: Tomek Grabiec 
Signed-off-by: Pekka Enberg 


Commit: 3534a32819fea9312b7803da13ffdb5ee39b9a34

http://github.com/penberg/jato/commit/3534a32819fea9312b7803da13ffdb5ee39b9a34
Author: Tomek Grabiec 
Date:   2009-08-31 (Mon, 31 Aug 2009)

Changed paths:
  M arch/x86/insn-selector.brg

Log Message:
---
x86: ensure fixed-reg variables are not returned as rule results

Fixed-reg variables should never be used outside a rule. When they are
returned as 

[PATCH] [APPEND-TO-LAST-SERIES] jit: cleanup handling of expired ranges of intervals

2009-08-31 Thread Tomek Grabiec
Having ->current_range in struct interval is very error prone.
Instead of that, we maintain a list of expired ranges, which are moved
from range_list and are no longer considered as interval's
ranges. After linear scan expired ranges are restored.

This fixes a crash in trace_var_liveness().

Signed-off-by: Tomek Grabiec 
---
 include/jit/vars.h |   18 --
 jit/interval.c |   41 +++--
 jit/linear-scan.c  |   15 +++
 3 files changed, 42 insertions(+), 32 deletions(-)

diff --git a/include/jit/vars.h b/include/jit/vars.h
index 2471b17..ae48752 100644
--- a/include/jit/vars.h
+++ b/include/jit/vars.h
@@ -65,16 +65,13 @@ struct live_interval {
struct list_head range_list;
 
/*
-* Points to a range from range_list which should be
-* considered as interval's starting range in operations:
-* intervals_intersect(), interval_intersection_start(),
-* interval_range_at(). It's used to speedup register
-* allocation. Intervals can have a lot of live ranges. Linear
-* scan algorithm goes through intervals in ascending order by
-* interval start. We can take advantage of this and don't
-* browse ranges past current position in some operations.
+* Contains ranges which were moved from range_list to speedup
+* some interval oprations. Intervals can have a lot of live
+* ranges. Linear scan algorithm goes through intervals in
+* ascending order by interval start. We can take advantage of
+* this and don't check ranges before current position.
 */
-   struct live_range *current_range;
+   struct list_head expired_range_list;
 
/* Linked list of child intervals.  */
struct live_interval *next_child, *prev_child;
@@ -172,7 +169,8 @@ unsigned long interval_intersection_start(struct 
live_interval *, struct live_in
 bool interval_covers(struct live_interval *, unsigned long);
 int interval_add_range(struct live_interval *, unsigned long, unsigned long);
 struct live_range *interval_range_at(struct live_interval *, unsigned long);
-void interval_update_current_range(struct live_interval *, unsigned long);
+void interval_expire_ranges_before(struct live_interval *, unsigned long);
+void interval_restore_expired_ranges(struct live_interval *);
 
 static inline unsigned long first_use_pos(struct live_interval *it)
 {
diff --git a/jit/interval.c b/jit/interval.c
index 8eb7d32..c84de36 100644
--- a/jit/interval.c
+++ b/jit/interval.c
@@ -106,6 +106,7 @@ struct live_interval *alloc_interval(struct var_info *var)
INIT_LIST_HEAD(&interval->interval_node);
INIT_LIST_HEAD(&interval->use_positions);
INIT_LIST_HEAD(&interval->range_list);
+   INIT_LIST_HEAD(&interval->expired_range_list);
}
return interval;
 }
@@ -143,8 +144,6 @@ struct live_interval *split_interval_at(struct 
live_interval *interval,
return NULL;
}
 
-   new->current_range = interval_first_range(new);
-
new->fixed_reg = interval->fixed_reg;
if (new->fixed_reg)
new->reg = interval->reg;
@@ -210,25 +209,33 @@ struct live_interval *vreg_start_interval(struct 
compilation_unit *cu, unsigned
return var->interval;
 }
 
-/**
- * Advances @it->current_range to the last range which covers @pos or
- * is before @pos.
- */
-void interval_update_current_range(struct live_interval *it, unsigned long pos)
+void interval_expire_ranges_before(struct live_interval *it, unsigned long pos)
 {
+   struct live_range *range;
+
if (pos < interval_start(it) || pos >= interval_end(it))
return;
 
-   assert (pos >= it->current_range->start);
+   range = interval_first_range(it);
 
-   while (!in_range(it->current_range, pos)) {
+   while (!in_range(range, pos)) {
struct live_range *next;
 
-   next = next_range(&it->range_list, it->current_range);
+   next = next_range(&it->range_list, range);
if (pos < next->start)
break;
 
-   it->current_range = next;
+   list_move(&range->range_list_node, &it->expired_range_list);
+   range = next;
+   }
+}
+
+void interval_restore_expired_ranges(struct live_interval *it)
+{
+   struct live_range *this, *next;
+
+   list_for_each_entry_safe(this, next, &it->expired_range_list, 
range_list_node) {
+   list_move(&this->range_list_node, &it->range_list);
}
 }
 
@@ -239,9 +246,7 @@ struct live_range *interval_range_at(struct live_interval 
*it, unsigned long pos
if (pos < interval_start(it) || pos >= interval_end(it))
return NULL;
 
-   range = it->current_range;
-   if (pos < range->start)
-   range = interval_first_range(it);
+   range = interval_first_range(it);
 
  

[PATCH] x86: Introduce workarounds for valgrind to work with jato.

2009-08-31 Thread Tomek Grabiec
Jato can be compiled with workarounds which make valgrind
work with jato.

To do so, define VALGRIND variable for make:
make jato VALGRIND=y

Currently workarounds eliminate class initialization
from signal handler by unconditionally selecting
calls to vm_class_ensure_init().

Signed-off-by: Tomek Grabiec 
---
 Makefile   |5 +
 arch/x86/insn-selector.brg |   44 
 2 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/Makefile b/Makefile
index 55ef9ba..f9c8d9b 100644
--- a/Makefile
+++ b/Makefile
@@ -177,6 +177,11 @@ INSTALL:= install
 
 DEFAULT_CFLAGS += $(ARCH_CFLAGS) -g -rdynamic -std=gnu99 -D_GNU_SOURCE 
-fstack-protector-all -D_FORTIFY_SOURCE=2
 
+ifdef VALGRIND
+DEFAULT_CFLAGS += -DCONFIG_VALGRIND
+MB_DEFINES += -DCONFIG_VALGRIND
+endif
+
 # XXX: Temporary hack -Vegard
 DEFAULT_CFLAGS += -DNOT_IMPLEMENTED='fprintf(stderr, "%s:%d: warning: %s not 
implemented\n", __FILE__, __LINE__, __func__)'
 
diff --git a/arch/x86/insn-selector.brg b/arch/x86/insn-selector.brg
index 8522667..9c8dd70 100644
--- a/arch/x86/insn-selector.brg
+++ b/arch/x86/insn-selector.brg
@@ -1051,6 +1051,14 @@ reg: EXPR_CLASS_FIELD 1
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_REG,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -1064,6 +1072,7 @@ reg:  EXPR_CLASS_FIELD 1
/* XXX: Check return value */
add_getstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 
@@ -1097,6 +1106,18 @@ freg:EXPR_FLOAT_CLASS_FIELD 1
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned 
long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   if (expr->vm_type == J_FLOAT)
+   mov_insn = memdisp_reg_insn(INSN_MOV_MEMDISP_XMM,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+   else
+   mov_insn = memdisp_reg_insn(INSN_MOV_64_MEMDISP_XMM,
+   (unsigned long) vmc->static_values + 
vmf->offset, out);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -1118,6 +1139,7 @@ freg: EXPR_FLOAT_CLASS_FIELD 1
/* XXX: Check return value */
add_getstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 }
@@ -1995,6 +2017,14 @@ stmt:STMT_STORE(EXPR_CLASS_FIELD, reg)
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   mov_insn = reg_memdisp_insn(INSN_MOV_REG_MEMDISP,
+   src, (unsigned long) vmc->static_values + 
vmf->offset);
+%else
if (vmc_state >= VM_CLASS_INITIALIZING) {
/* Class is already initialized; no need for fix-up. We also
 * don't want the fixup if we're already inside the
@@ -2008,6 +2038,7 @@ stmt: STMT_STORE(EXPR_CLASS_FIELD, reg)
/* XXX: Check return value */
add_putstatic_fixup_site(mov_insn, vmf, s->b_parent);
}
+%endif /* CONFIG_VALGRIND */
 
select_insn(s, tree, mov_insn);
 
@@ -2044,6 +2075,18 @@ stmt:STMT_STORE(EXPR_FLOAT_CLASS_FIELD, freg)
vmc_state = vmc->state;
vm_monitor_unlock(&vmc->monitor);
 
+%ifdef CONFIG_VALGRIND
+   select_insn(s, tree, imm_insn(INSN_PUSH_IMM, (unsigned long)vmc));
+   select_insn(s, tree, rel_insn(INSN_CALL_REL, (unsigned 
long)vm_class_ensure_init));
+   method_args_cleanup(s, tree, 1);
+
+   if (store_dest->vm_type == J_FLOAT)
+   mov_insn = reg_memdisp_insn(INSN_MOV_XMM_MEMDISP,
+   src, (unsigned long) vmc->static_values + vmf->offset);
+   else
+   mov_insn = reg