On 11/08/2013 05:30 AM, Richard Sandiford wrote:
Some comments from looking through the diff with the merge point,
ignoring wide-int.h and wide-int.cc.  A few more to follow in the
form of patchses.

--------------------

dwarf2out.c has:

+    case CONST_WIDE_INT:
+      if (mode == VOIDmode)
+       mode = GET_MODE (rtl);
+
+      if (mode != VOIDmode && (dwarf_version >= 4 || !dwarf_strict))
+       {
+         gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl));
+
+         /* Note that a CONST_DOUBLE rtx could represent either an integer
+            or a floating-point constant.  A CONST_DOUBLE is used whenever
+            the constant requires more than one word in order to be
+            adequately represented.  We output CONST_DOUBLEs as blocks.  */
+         loc_result = new_loc_descr (DW_OP_implicit_value,
+                                     GET_MODE_SIZE (mode), 0);
+         loc_result->dw_loc_oprnd2.val_class = dw_val_class_wide_int;
+         loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int ();
+         *loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode);

The comment looks like a cut-&-paste.  The "mode == GET_MODE (rtl)"
bit should never be true.
i removed the assertion completely.  it really is unnecessary.
--------------------

 From fold-const.c:

@@ -13686,14 +13548,17 @@ fold_binary_loc (location_t loc,
                  break;
                }
- else if (TREE_INT_CST_HIGH (arg1) == signed_max_hi
-                    && TREE_INT_CST_LOW (arg1) == signed_max_lo
+           else if (wi::eq_p (arg1, signed_max)
                     && TYPE_UNSIGNED (arg1_type)
+                    /* KENNY QUESTIONS THE CHECKING OF THE BITSIZE
+                       HERE.  HE FEELS THAT THE PRECISION SHOULD BE
+                       CHECKED */
+
                     /* We will flip the signedness of the comparison operator
                        associated with the mode of arg1, so the sign bit is
                        specified by this mode.  Check that arg1 is the signed
                        max associated with this sign bit.  */
-                    && width == GET_MODE_BITSIZE (TYPE_MODE (arg1_type))
+                    && prec == GET_MODE_BITSIZE (TYPE_MODE (arg1_type))
                     /* signed_type does not work on pointer types.  */
                     && INTEGRAL_TYPE_P (arg1_type))
              {
i have asked this question on the public newsgroup. This patch is consistent with the patch that i posted for trunk. i will modify this when i get feedback from the other one.

Looks like it should be resolved one way or the other before the merge.

--------------------

 From gcse.c:

--- wide-int-base/gcc/gcc/gcse.c        2013-11-05 13:09:32.148376180 +0000
+++ wide-int/gcc/gcc/gcse.c     2013-11-05 13:07:28.431495118 +0000
@@ -1997,6 +1997,13 @@ prune_insertions_deletions (int n_elems)
        bitmap_clear_bit (pre_delete_map[i], j);
      }
+ if (dump_file)
+    {
+      dump_bitmap_vector (dump_file, "pre_insert_map", "", pre_insert_map, 
n_edges);
+      dump_bitmap_vector (dump_file, "pre_delete_map", "", pre_delete_map,
+                          last_basic_block);
+    }
+
    sbitmap_free (prune_exprs);
    free (insertions);
    free (deletions);

This doesn't look related.
removed.

--------------------

 From lcm.c:

diff -udpr '--exclude=.svn' '--exclude=.pc' '--exclude=patches' 
wide-int-base/gcc/gcc/lcm.c wide-int/gcc/gcc/lcm.c
--- wide-int-base/gcc/gcc/lcm.c 2013-08-22 09:00:23.068716382 +0100
+++ wide-int/gcc/gcc/lcm.c      2013-10-26 13:19:16.287277520 +0100
@@ -64,6 +64,7 @@ along with GCC; see the file COPYING3.
  #include "sbitmap.h"
  #include "dumpfile.h"
+#define LCM_DEBUG_INFO 1
  /* Edge based LCM routines.  */
  static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap 
*);
  static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *,
@@ -106,6 +107,7 @@ compute_antinout_edge (sbitmap *antloc,
    /* We want a maximal solution, so make an optimistic initialization of
       ANTIN.  */
    bitmap_vector_ones (antin, last_basic_block);
+  bitmap_vector_clear (antout, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
       optimistic initialization of ANTIN above.  */
@@ -432,6 +434,7 @@ pre_edge_lcm (int n_exprs, sbitmap *tran
/* Allocate an extra element for the exit block in the laterin vector. */
    laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
+  bitmap_vector_clear (laterin, last_basic_block);
    compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO

Same here.
i removed this also. this was done for the sanity of debugging. but i do not think that it is important enough for the trunk. I assume that the code was done this way because it is faster to do this than explicitly clear the structures.


--------------------

 From real.c:

@@ -2144,43 +2148,131 @@ real_from_string3 (REAL_VALUE_TYPE *r, c
      real_convert (r, mode, r);
  }
-/* Initialize R from the integer pair HIGH+LOW. */
+/* Initialize R from a HOST_WIDE_INT.  */
void
  real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
-                  unsigned HOST_WIDE_INT low, HOST_WIDE_INT high,
-                  int unsigned_p)
+                  HOST_WIDE_INT val,
+                  signop sgn)
  {
-  if (low == 0 && high == 0)
+  if (val == 0)
      get_zero (r, 0);
    else
      {
        memset (r, 0, sizeof (*r));
        r->cl = rvc_normal;
-      r->sign = high < 0 && !unsigned_p;
-      SET_REAL_EXP (r, HOST_BITS_PER_DOUBLE_INT);
+      r->sign = val < 0 && sgn == SIGNED;
+      SET_REAL_EXP (r, HOST_BITS_PER_WIDE_INT);
+ /* TODO: This fails for -MAXHOSTWIDEINT, wide_int version would
+        have worked.  */
        if (r->sign)
+       val = -val;

Given the TODO, is it really worth having this HOST_WIDE_INT version?
Wouldn't it be better to have just the wide_int version?
i got rid of this version.
--------------------

 From real.c:

+/* Initialize R from the integer pair HIGH+LOW.  */
+
+void
+real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
+                  const wide_int_ref &val_in, signop sgn)
+{
+  if (val_in == 0)
+    get_zero (r, 0);
+  else
+    {
+      unsigned int len = val_in.get_precision ();
+      int i, j, e=0;
+      int maxbitlen = MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT;

Why + HOST_BITS_PER_WIDE_INT?  AIUI you want + X so that you can negate
the largest negative number, but 1 bit should be enough for that.
wide int works faster with precs that are multiples of hwi.
In case the idea was to get a rounded bitlen, there's no guarantee that
MAX_BITSIZE_MODE_ANY_INT itself is rounded.

+         HOST_WIDE_INT cnt_l_z;
+         cnt_l_z = wi::clz (val);
+
+         if (maxbitlen - cnt_l_z > realmax)
+           {
+             e = maxbitlen - cnt_l_z - realmax;
+
+             /* This value is too large, we must shift it right to
+                preserve all the bits we can, and then bump the
+                exponent up by that amount.  */
+             val = wi::lrshift (val, e);

Don't we want a sticky bit for the shifted-out bits, in order to detect
the 0.5 ULP case properly?
this is something for an fp expert. what we do here is upwards compatible with what was there before.

--------------------

 From recog.c:

+int
+const_scalar_int_operand (rtx op, enum machine_mode mode)
+{
+  if (!CONST_SCALAR_INT_P (op))
+    return 0;
+
+  if (CONST_INT_P (op))
+    return const_int_operand (op, mode);
+
+  if (mode != VOIDmode)
+    {
+      int prec = GET_MODE_PRECISION (mode);
+      int bitsize = GET_MODE_BITSIZE (mode);
+
+      if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
+       return 0;
+
+      if (prec == bitsize)
+       return 1;
+      else
+       {
+         /* Multiword partial int.  */
+         HOST_WIDE_INT x
+           = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1);
+         return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x);
+       }
+    }
+  return 1;
+}

const_int_operand rejects cases where gen_int_mode (INTVAL (op), mode)
would give something different from op.  We need to do the equivalent
for CONST_WIDE_INT too.  (The current CONST_DOUBLE code doesn't bother
because at the moment the only >HWI mode we support is a 2*HWI one.)

It'd be nice to handle CONST_INT and CONST_WIDE_INT using the same code,
without going to const_int_operand.

+/* Returns 1 if OP is an operand that is a CONST_WIDE_INT of mode
+   MODE.  This most likely is not as useful as
+   const_scalar_int_operand since it does not accept CONST_INTs, but
+   is here for consistancy.  */
+int
+const_wide_int_operand (rtx op, enum machine_mode mode)
+{
+  if (!CONST_WIDE_INT_P (op))
+    return 0;
+
+  return const_scalar_int_operand (op, mode);
+}

Hmm, but have you found any use cases?  Like you say in the comment,
it just seems wrong to require a CONST_WIDE_INT over a CONST_INT,
since that's a host-dependent choice.  I think we should drop this
from the initial merge.
its gone.
--------------------

 From rtl.c:

+/* Write the wide constant X to OUTFILE.  */
+
+void
+cwi_output_hex (FILE *outfile, const_rtx x)
+{
+  int i = CWI_GET_NUM_ELEM (x);
+  gcc_assert (i > 0);
+  if (CWI_ELT (x, i-1) == 0)
+    fprintf (outfile, "0x");
+  fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, CWI_ELT (x, --i));
+  while (--i >= 0)
+    fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, CWI_ELT (x, i));
+}

Why only print "0x" if the top element is 0?  Probably deserves a comment.

--------------------

 From tree-cfg.c:

@@ -2698,24 +2700,25 @@ verify_expr (tree *tp, int *walk_subtree
if (TREE_CODE (t) == BIT_FIELD_REF)
        {
-         if (!host_integerp (TREE_OPERAND (t, 1), 1)
-             || !host_integerp (TREE_OPERAND (t, 2), 1))
+         if (!tree_fits_uhwi_p (TREE_OPERAND (t, 1))
+             || !tree_fits_uhwi_p (TREE_OPERAND (t, 2)))
            {
              error ("invalid position or size operand to BIT_FIELD_REF");
              return t;
            }
          if (INTEGRAL_TYPE_P (TREE_TYPE (t))
              && (TYPE_PRECISION (TREE_TYPE (t))
-                 != TREE_INT_CST_LOW (TREE_OPERAND (t, 1))))
+                 != tree_to_uhwi (TREE_OPERAND (t, 1))))
            {
              error ("integral result type precision does not match "
                     "field size of BIT_FIELD_REF");
              return t;
            }
          else if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
+                  && !AGGREGATE_TYPE_P (TREE_TYPE (t))
                   && TYPE_MODE (TREE_TYPE (t)) != BLKmode
                   && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t)))
-                      != TREE_INT_CST_LOW (TREE_OPERAND (t, 1))))
+                      != tree_to_uhwi (TREE_OPERAND (t, 1))))
            {
              error ("mode precision of non-integral result does not "
                     "match field size of BIT_FIELD_REF");

Why the extra !AGGREGATE_TYPE_P test?

--------------------

 From tree-vrp.c:

@@ -1893,6 +1884,10 @@ vrp_int_const_binop (enum tree_code code
      /* If the singed operation wraps then int_const_binop has done
         everything we want.  */
      ;
+  /* Signed division of -1/0 overflows and by the time it gets here
+     returns NULL_TREE.  */
+  else if (!res)
+    return NULL_TREE;
    else if ((TREE_OVERFLOW (res)
            && !TREE_OVERFLOW (val1)
            && !TREE_OVERFLOW (val2))

Why is this case different from trunk?  Or is it a bug-fix independent
of wide-int?
this will be handled in a separate set of patches.   I am working on it.

Thanks,
Richard

Reply via email to