From: Richard Henderson <r...@twiddle.net>

Broken in b5a73f8d8a57e940f9bbeb399a9e47897522ee9a, the carry itself was
fixed in 79482e5ab38a05ca8869040b0d8b8f451f16ff62.  But we still need to
produce the full 64-bit addition.

Simplify the conditions at the top of the functions for when we need a
new temporary.  Only plain addition is important enough to warrent avoiding
the temporary, and the extra tcg move op that would come with it.

Signed-off-by: Richard Henderson <r...@twiddle.net>
Reviewed-by: Aurelien Jarno <aurel...@aurel32.net>
Tested-by: Aurelien Jarno <aurel...@aurel32.net>
Signed-off-by: Alexander Graf <ag...@suse.de>
---
 target-ppc/translate.c |   35 ++++++++++++++++++++++-------------
 1 files changed, 22 insertions(+), 13 deletions(-)

diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 294ab58..362ca3a 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -768,22 +768,25 @@ static inline void gen_op_arith_add(DisasContext *ctx, 
TCGv ret, TCGv arg1,
 {
     TCGv t0 = ret;
 
-    if (((compute_ca && add_ca) || compute_ov)
-        && (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2)))  {
+    if (compute_ca || compute_ov) {
         t0 = tcg_temp_new();
     }
 
     if (compute_ca) {
         if (NARROW_MODE(ctx)) {
+            /* Caution: a non-obvious corner case of the spec is that we
+               must produce the *entire* 64-bit addition, but produce the
+               carry into bit 32.  */
             TCGv t1 = tcg_temp_new();
-            tcg_gen_ext32u_tl(t1, arg2);
-            tcg_gen_ext32u_tl(t0, arg1);
-            tcg_gen_add_tl(t0, t0, t1);
-            tcg_temp_free(t1);
+            tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
+            tcg_gen_add_tl(t0, arg1, arg2);
             if (add_ca) {
                 tcg_gen_add_tl(t0, t0, cpu_ca);
             }
-            tcg_gen_shri_tl(cpu_ca, t0, 32);
+            tcg_gen_xor_tl(cpu_ca, t0, t1);        /* bits changed w/ carry */
+            tcg_temp_free(t1);
+            tcg_gen_shri_tl(cpu_ca, cpu_ca, 32);   /* extract bit 32 */
+            tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
         } else {
             TCGv zero = tcg_const_tl(0);
             if (add_ca) {
@@ -1122,24 +1125,30 @@ static inline void gen_op_arith_subf(DisasContext *ctx, 
TCGv ret, TCGv arg1,
 {
     TCGv t0 = ret;
 
-    if (compute_ov && (TCGV_EQUAL(ret, arg1) || TCGV_EQUAL(ret, arg2)))  {
+    if (compute_ca || compute_ov) {
         t0 = tcg_temp_new();
     }
 
     if (compute_ca) {
         /* dest = ~arg1 + arg2 [+ ca].  */
         if (NARROW_MODE(ctx)) {
+            /* Caution: a non-obvious corner case of the spec is that we
+               must produce the *entire* 64-bit addition, but produce the
+               carry into bit 32.  */
             TCGv inv1 = tcg_temp_new();
+            TCGv t1 = tcg_temp_new();
             tcg_gen_not_tl(inv1, arg1);
-            tcg_gen_ext32u_tl(t0, arg2);
-            tcg_gen_ext32u_tl(inv1, inv1);
             if (add_ca) {
-                tcg_gen_add_tl(t0, t0, cpu_ca);
+                tcg_gen_add_tl(t0, arg2, cpu_ca);
             } else {
-                tcg_gen_addi_tl(t0, t0, 1);
+                tcg_gen_addi_tl(t0, arg2, 1);
             }
+            tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
             tcg_gen_add_tl(t0, t0, inv1);
-            tcg_gen_shri_tl(cpu_ca, t0, 32);
+            tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
+            tcg_temp_free(t1);
+            tcg_gen_shri_tl(cpu_ca, cpu_ca, 32);    /* extract bit 32 */
+            tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
         } else if (add_ca) {
             TCGv zero, inv1 = tcg_temp_new();
             tcg_gen_not_tl(inv1, arg1);
-- 
1.6.0.2


Reply via email to