} else {
cc = 0;
ret |= addr & ~TARGET_PAGE_MASK;
diff --git a/target/s390x/tcg/translate.c
b/target/s390x/tcg/translate.c
index 0cef6efbef4..a6079ab7b4f 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -2932,7 +2932,7 @@ static DisasJumpType op_lctlg(DisasContext
*s, DisasOps *o)
static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
{
- gen_helper_lra(o->out, cpu_env, o->in2);
+ gen_helper_lra(o->out, cpu_env, o->out, o->in2);
set_cc_static(s);
return DISAS_NEXT;
}
Can't we use something like in1_r1 + wout_r1_32 instead ? *maybe*
cleaner :)
The problem is that we want all 64 bits for the non-error case.
Ah, I missed that detail, thanks.
--
Cheers,
David / dhildenb