fixed with the enclosed patch.

On 08/23/2013 11:02 AM, Richard Sandiford wrote:
/* Return true if THIS is negative based on the interpretation of SGN.
    For UNSIGNED, this is always false.  This is correct even if
    precision is 0.  */
inline bool
wide_int::neg_p (signop sgn) const
It seems odd that you have to pass SIGNED here.  I assume you were doing
it so that the caller is forced to confirm signedness in the cases where
a tree type is involved, but:

* neg_p kind of implies signedness anyway
* you don't require this for minus_one_p, so the interface isn't consistent
* at the rtl level signedness isn't a property of the "type" (mode),
   so it seems strange to add an extra hoop there



Index: gcc/ada/gcc-interface/decl.c
===================================================================
--- gcc/ada/gcc-interface/decl.c	(revision 201967)
+++ gcc/ada/gcc-interface/decl.c	(working copy)
@@ -7479,7 +7479,7 @@ annotate_value (tree gnu_size)
 	  tree op1 = TREE_OPERAND (gnu_size, 1);
 	  wide_int signed_op1
 	    = wide_int::from_tree (op1).sforce_to_size (TYPE_PRECISION (sizetype));
-	  if (signed_op1.neg_p (SIGNED))
+	  if (signed_op1.neg_p ())
 	    {
 	      op1 = wide_int_to_tree (sizetype, -signed_op1);
 	      pre_op1 = annotate_value (build1 (NEGATE_EXPR, sizetype, op1));
Index: gcc/c-family/c-ada-spec.c
===================================================================
--- gcc/c-family/c-ada-spec.c	(revision 201967)
+++ gcc/c-family/c-ada-spec.c	(working copy)
@@ -2197,7 +2197,7 @@ dump_generic_ada_node (pretty_printer *b
 	{
 	  wide_int val = node;
 	  int i;
-	  if (val.neg_p (SIGNED))
+	  if (val.neg_p ())
 	    {
 	      pp_minus (buffer);
 	      val = -val;
Index: gcc/config/sparc/sparc.c
===================================================================
--- gcc/config/sparc/sparc.c	(revision 201967)
+++ gcc/config/sparc/sparc.c	(working copy)
@@ -10624,7 +10624,7 @@ sparc_fold_builtin (tree fndecl, int n_a
 	      overall_overflow |= overall_overflow;
 	      tmp = e0.add (tmp, SIGNED, &overflow);
 	      overall_overflow |= overall_overflow;
-	      if (tmp.neg_p (SIGNED))
+	      if (tmp.neg_p ())
 		{
 		  tmp = tmp.neg (&overflow);
 		  overall_overflow |= overall_overflow;
Index: gcc/expr.c
===================================================================
--- gcc/expr.c	(revision 201967)
+++ gcc/expr.c	(working copy)
@@ -6718,7 +6718,7 @@ get_inner_reference (tree exp, HOST_WIDE
   if (offset)
     {
       /* Avoid returning a negative bitpos as this may wreak havoc later.  */
-      if (bit_offset.neg_p (SIGNED))
+      if (bit_offset.neg_p ())
         {
 	  addr_wide_int mask
 	    = addr_wide_int::mask (BITS_PER_UNIT == 8
Index: gcc/fold-const.c
===================================================================
--- gcc/fold-const.c	(revision 201967)
+++ gcc/fold-const.c	(working copy)
@@ -183,13 +183,13 @@ div_if_zero_remainder (const_tree arg1,
 	 precision by 1 bit, iff the top bit is set.  */
       if (sgn == UNSIGNED)
 	{
-	  if (warg1.neg_p (SIGNED))
+	  if (warg1.neg_p ())
 	    warg1 = warg1.force_to_size (warg1.get_precision () + 1, sgn);
 	  sgn = SIGNED;
 	}
       else
 	{
-	  if (warg2.neg_p (SIGNED))
+	  if (warg2.neg_p ())
 	    warg2 = warg2.force_to_size (warg2.get_precision () + 1, sgn2);
 	}
     }
@@ -979,7 +979,7 @@ int_const_binop_1 (enum tree_code code,
 
     case RSHIFT_EXPR:
     case LSHIFT_EXPR:
-      if (arg2.neg_p (SIGNED))
+      if (arg2.neg_p ())
 	{
 	  arg2 = -arg2;
 	  if (code == RSHIFT_EXPR)
@@ -999,7 +999,7 @@ int_const_binop_1 (enum tree_code code,
       
     case RROTATE_EXPR:
     case LROTATE_EXPR:
-      if (arg2.neg_p (SIGNED))
+      if (arg2.neg_p ())
 	{
 	  arg2 = -arg2;
 	  if (code == RROTATE_EXPR)
@@ -7180,7 +7180,7 @@ fold_plusminus_mult_expr (location_t loc
       /* As we canonicalize A - 2 to A + -2 get rid of that sign for
 	 the purpose of this canonicalization.  */
       if (TYPE_SIGN (TREE_TYPE (arg1)) == SIGNED
-	  && wide_int (arg1).neg_p (SIGNED)
+	  && wide_int (arg1).neg_p ()
 	  && negate_expr_p (arg1)
 	  && code == PLUS_EXPR)
 	{
@@ -12323,7 +12323,7 @@ fold_binary_loc (location_t loc,
 	  && TYPE_SIGN (type) == SIGNED
 	  && TREE_CODE (arg1) == INTEGER_CST
 	  && !TREE_OVERFLOW (arg1)
-	  && wide_int (arg1).neg_p (SIGNED)
+	  && wide_int (arg1).neg_p ()
 	  && !TYPE_OVERFLOW_TRAPS (type)
 	  /* Avoid this transformation if C is INT_MIN, i.e. C == -C.  */
 	  && !sign_bit_p (arg1, arg1))
Index: gcc/gimple-ssa-strength-reduction.c
===================================================================
--- gcc/gimple-ssa-strength-reduction.c	(revision 201967)
+++ gcc/gimple-ssa-strength-reduction.c	(working copy)
@@ -1824,7 +1824,7 @@ cand_abs_increment (slsr_cand_t c)
 {
   max_wide_int increment = cand_increment (c);
 
-  if (!address_arithmetic_p && increment.neg_p (SIGNED))
+  if (!address_arithmetic_p && increment.neg_p ())
     increment = -increment;
 
   return increment;
@@ -1872,7 +1872,7 @@ replace_mult_candidate (slsr_cand_t c, t
 	 types, introduce a cast.  */
       if (!useless_type_conversion_p (target_type, TREE_TYPE (basis_name)))
 	basis_name = introduce_cast_before_cand (c, target_type, basis_name);
-      if (bump.neg_p (SIGNED)) 
+      if (bump.neg_p ()) 
 	{
 	  code = MINUS_EXPR;
 	  bump = -bump;
@@ -2005,7 +2005,7 @@ create_add_on_incoming_edge (slsr_cand_t
       tree bump_tree;
       enum tree_code code = PLUS_EXPR;
       max_wide_int bump = increment * c->stride;
-      if (bump.neg_p (SIGNED))
+      if (bump.neg_p ())
 	{
 	  code = MINUS_EXPR;
 	  bump = -bump;
@@ -2018,7 +2018,7 @@ create_add_on_incoming_edge (slsr_cand_t
   else
     {
       int i;
-      bool negate_incr = (!address_arithmetic_p && increment.neg_p (SIGNED));
+      bool negate_incr = (!address_arithmetic_p && increment.neg_p ());
       i = incr_vec_index (negate_incr ? -increment : increment);
       gcc_assert (i >= 0);
 
@@ -2312,7 +2312,7 @@ record_increment (slsr_cand_t c, const m
 
   /* Treat increments that differ only in sign as identical so as to
      share initializers, unless we are generating pointer arithmetic.  */
-  if (!address_arithmetic_p && increment.neg_p (SIGNED))
+  if (!address_arithmetic_p && increment.neg_p ())
     increment = -increment;
 
   for (i = 0; i < incr_vec_len; i++)
@@ -3044,7 +3044,7 @@ all_phi_incrs_profitable (slsr_cand_t c,
 	      slsr_cand_t arg_cand = base_cand_from_table (arg);
 	      max_wide_int increment = arg_cand->index - basis->index;
 
-	      if (!address_arithmetic_p && increment.neg_p (SIGNED))
+	      if (!address_arithmetic_p && increment.neg_p ())
 		increment = -increment;
 
 	      j = incr_vec_index (increment);
Index: gcc/predict.c
===================================================================
--- gcc/predict.c	(revision 201967)
+++ gcc/predict.c	(working copy)
@@ -1260,7 +1260,7 @@ predict_iv_comparison (struct loop *loop
       loop_count = tem.div_trunc (compare_step, SIGNED, &overflow);
       overall_overflow |= overflow;
 
-      if ((!compare_step.neg_p (SIGNED))
+      if ((!compare_step.neg_p ())
           ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
 	{
 	  /* (loop_bound - compare_bound) / compare_step */
@@ -1281,9 +1281,9 @@ predict_iv_comparison (struct loop *loop
 	++compare_count;
       if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
 	++loop_count;
-      if (compare_count.neg_p (SIGNED))
+      if (compare_count.neg_p ())
         compare_count = 0;
-      if (loop_count.neg_p (SIGNED))
+      if (loop_count.neg_p ())
         loop_count = 0;
       if (loop_count.zero_p ())
 	probability = 0;
Index: gcc/simplify-rtx.c
===================================================================
--- gcc/simplify-rtx.c	(revision 201967)
+++ gcc/simplify-rtx.c	(working copy)
@@ -3787,35 +3787,35 @@ simplify_const_binary_operation (enum rt
 	  break;
 
 	case LSHIFTRT:
-	  if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED))
+	  if (wide_int (std::make_pair (op1, mode)).neg_p ())
 	    return NULL_RTX;
 
 	  result = wop0.rshiftu (pop1, bitsize, TRUNC);
 	  break;
 	  
 	case ASHIFTRT:
-	  if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED))
+	  if (wide_int (std::make_pair (op1, mode)).neg_p ())
 	    return NULL_RTX;
 
 	  result = wop0.rshifts (pop1, bitsize, TRUNC);
 	  break;
 	  
 	case ASHIFT:
-	  if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED))
+	  if (wide_int (std::make_pair (op1, mode)).neg_p ())
 	    return NULL_RTX;
 
 	  result = wop0.lshift (pop1, bitsize, TRUNC);
 	  break;
 	  
 	case ROTATE:
-	  if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED))
+	  if (wide_int (std::make_pair (op1, mode)).neg_p ())
 	    return NULL_RTX;
 
 	  result = wop0.lrotate (pop1);
 	  break;
 	  
 	case ROTATERT:
-	  if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED))
+	  if (wide_int (std::make_pair (op1, mode)).neg_p ())
 	    return NULL_RTX;
 
 	  result = wop0.rrotate (pop1);
Index: gcc/tree-affine.c
===================================================================
--- gcc/tree-affine.c	(revision 201967)
+++ gcc/tree-affine.c	(working copy)
@@ -407,7 +407,7 @@ add_elt_to_tree (tree expr, tree type, t
 			 fold_build2 (MULT_EXPR, type1, elt,
 				      wide_int_to_tree (type1, scale)));
 
-  if (scale.neg_p (SIGNED))
+  if (scale.neg_p ())
     {
       code = MINUS_EXPR;
       scale = -scale;
@@ -450,7 +450,7 @@ aff_combination_to_tree (aff_tree *comb)
 
   /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
      unsigned.  */
-  if (comb->offset.neg_p (SIGNED))
+  if (comb->offset.neg_p ())
     {
       off = -comb->offset;
       sgn = -1;
@@ -901,12 +901,12 @@ aff_comb_cannot_overlap_p (aff_tree *dif
     return false;
 
   d = diff->offset;
-  if (d.neg_p (SIGNED))
+  if (d.neg_p ())
     {
       /* The second object is before the first one, we succeed if the last
 	 element of the second object is before the start of the first one.  */
       bound = d + size2 - 1;
-      return bound.neg_p (SIGNED);
+      return bound.neg_p ();
     }
   else
     {
Index: gcc/tree-object-size.c
===================================================================
--- gcc/tree-object-size.c	(revision 201967)
+++ gcc/tree-object-size.c	(working copy)
@@ -192,7 +192,7 @@ addr_object_size (struct object_size_inf
       if (sz != unknown[object_size_type])
 	{
 	  addr_wide_int dsz = addr_wide_int (sz) - mem_ref_offset (pt_var);
-	  if (dsz.neg_p (SIGNED))
+	  if (dsz.neg_p ())
 	    sz = 0;
 	  else if (dsz.fits_uhwi_p ())
 	    sz = dsz.to_uhwi ();
Index: gcc/tree-ssa-alias.c
===================================================================
--- gcc/tree-ssa-alias.c	(revision 201967)
+++ gcc/tree-ssa-alias.c	(working copy)
@@ -883,7 +883,7 @@ indirect_ref_may_alias_decl_p (tree ref1
      so that the resulting offset adjustment is positive.  */
   moff = mem_ref_offset (base1);
   moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
-  if (moff.neg_p (SIGNED))
+  if (moff.neg_p ())
     offset2p += (-moff).to_short_addr ();
   else
     offset1p += moff.to_short_addr ();
@@ -959,7 +959,7 @@ indirect_ref_may_alias_decl_p (tree ref1
     {
       addr_wide_int moff = mem_ref_offset (dbase2);
       moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
-      if (moff.neg_p (SIGNED))
+      if (moff.neg_p ())
 	doffset1 -= (-moff).to_short_addr ();
       else
 	doffset2 -= moff.to_short_addr ();
@@ -1053,13 +1053,13 @@ indirect_refs_may_alias_p (tree ref1 ATT
 	 so that the resulting offset adjustment is positive.  */
       moff = mem_ref_offset (base1);
       moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
-      if (moff.neg_p (SIGNED))
+      if (moff.neg_p ())
 	offset2 += (-moff).to_short_addr ();
       else
 	offset1 += moff.to_shwi ();
       moff = mem_ref_offset (base2);
       moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
-      if (moff.neg_p (SIGNED))
+      if (moff.neg_p ())
 	offset1 += (-moff).to_short_addr ();
       else
 	offset2 += moff.to_short_addr ();
Index: gcc/tree-ssa-ccp.c
===================================================================
--- gcc/tree-ssa-ccp.c	(revision 201967)
+++ gcc/tree-ssa-ccp.c	(working copy)
@@ -1173,7 +1173,7 @@ bit_value_binop_1 (enum tree_code code,
 	    }
 	  else 
 	    {
-	      if (shift.neg_p (SIGNED))
+	      if (shift.neg_p ())
 		{
 		  shift = -shift;
 		  if (code == RROTATE_EXPR)
@@ -1210,7 +1210,7 @@ bit_value_binop_1 (enum tree_code code,
 	    }
 	  else 
 	    {
-	      if (shift.neg_p (SIGNED))
+	      if (shift.neg_p ())
 		{
 		  shift = -shift;
 		  if (code == RSHIFT_EXPR)
@@ -1327,7 +1327,7 @@ bit_value_binop_1 (enum tree_code code,
 	    o2mask = r2mask;
 	  }
 	/* If the most significant bits are not known we know nothing.  */
-	if (o1mask.neg_p (SIGNED) || o2mask.neg_p (SIGNED))
+	if (o1mask.neg_p () || o2mask.neg_p ())
 	  break;
 
 	/* For comparisons the signedness is in the comparison operands.  */
Index: gcc/tree-ssa-loop-niter.c
===================================================================
--- gcc/tree-ssa-loop-niter.c	(revision 201967)
+++ gcc/tree-ssa-loop-niter.c	(working copy)
@@ -2432,11 +2432,11 @@ derive_constant_upper_bound_ops (tree ty
 
       bnd = derive_constant_upper_bound (op0);
 
-      if (cst.neg_p (SIGNED))
+      if (cst.neg_p ())
 	{
 	  cst = -cst;
 	  /* Avoid CST == 0x80000...  */
-	  if (cst.neg_p (SIGNED))
+	  if (cst.neg_p ())
 	    return max;;
 
 	  /* OP0 + CST.  We need to check that
Index: gcc/tree-vrp.c
===================================================================
--- gcc/tree-vrp.c	(revision 201967)
+++ gcc/tree-vrp.c	(working copy)
@@ -5110,8 +5110,8 @@ register_edge_assert_for_2 (tree name, e
 	  cst2v = wide_int (cst2).zforce_to_size (nprec);
 	  if (TYPE_SIGN (TREE_TYPE (val)) == SIGNED)
 	    {
-	      valn = valv.sext (nprec).neg_p (SIGNED);
-	      cst2n = cst2v.sext (nprec).neg_p (SIGNED);
+	      valn = valv.sext (nprec).neg_p ();
+	      cst2n = cst2v.sext (nprec).neg_p ();
 	    }
 	  /* If CST2 doesn't have most significant bit set,
 	     but VAL is negative, we have comparison like
@@ -5154,7 +5154,7 @@ register_edge_assert_for_2 (tree name, e
 		  sgnbit = wide_int::zero (nprec);
 		  goto lt_expr;
 		}
-	      if (!cst2n && cst2v.sext (nprec).neg_p (SIGNED))
+	      if (!cst2n && cst2v.sext (nprec).neg_p ())
 		sgnbit = wide_int::set_bit_in_zero (nprec - 1, nprec);
 	      if (!sgnbit.zero_p ())
 		{
Index: gcc/tree.c
===================================================================
--- gcc/tree.c	(revision 201967)
+++ gcc/tree.c	(working copy)
@@ -1257,7 +1257,7 @@ wide_int_to_tree (tree type, const wide_
 
 	  if (cst.minus_one_p ())
 	    ix = 0;
-	  else if (!cst.neg_p (SIGNED))
+	  else if (!cst.neg_p ())
 	    {
 	      if (prec < HOST_BITS_PER_WIDE_INT)
 		{
@@ -1410,7 +1410,7 @@ cache_integer_cst (tree t)
 
 	  if (integer_minus_onep (t))
 	    ix = 0;
-	  else if (!wide_int (t).neg_p (SIGNED))
+	  else if (!wide_int (t).neg_p ())
 	    {
 	      if (prec < HOST_BITS_PER_WIDE_INT)
 		{
@@ -6842,7 +6842,7 @@ tree_int_cst_sgn (const_tree t)
     return 0;
   else if (TYPE_UNSIGNED (TREE_TYPE (t)))
     return 1;
-  else if (w.neg_p (SIGNED))
+  else if (w.neg_p ())
     return -1;
   else
     return 1;
@@ -8557,8 +8557,8 @@ retry:
       wd = type_low_bound;
       if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
 	{
-	  int c_neg = (!unsc && wc.neg_p (SIGNED));
-	  int t_neg = (unsc && wd.neg_p (SIGNED));
+	  int c_neg = (!unsc && wc.neg_p ());
+	  int t_neg = (unsc && wd.neg_p ());
 
 	  if (c_neg && !t_neg)
 	    return false;
@@ -8578,8 +8578,8 @@ retry:
       wd = type_high_bound;
       if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
 	{
-	  int c_neg = (!unsc && wc.neg_p (SIGNED));
-	  int t_neg = (unsc && wd.neg_p (SIGNED));
+	  int c_neg = (!unsc && wc.neg_p ());
+	  int t_neg = (unsc && wd.neg_p ());
 
 	  if (t_neg && !c_neg)
 	    return false;
@@ -8600,7 +8600,7 @@ retry:
   /* Perform some generic filtering which may allow making a decision
      even if the bounds are not constant.  First, negative integers
      never fit in unsigned types, */
-  if (TYPE_UNSIGNED (type) && !unsc && wc.neg_p (SIGNED))
+  if (TYPE_UNSIGNED (type) && !unsc && wc.neg_p ())
     return false;
 
   /* Second, narrower types always fit in wider ones.  */
@@ -8608,7 +8608,7 @@ retry:
     return true;
 
   /* Third, unsigned integers with top bit set never fit signed types.  */
-  if (! TYPE_UNSIGNED (type) && unsc && wc.neg_p (SIGNED))
+  if (! TYPE_UNSIGNED (type) && unsc && wc.neg_p ())
     return false;
 
   /* If we haven't been able to decide at this point, there nothing more we
Index: gcc/wide-int-print.cc
===================================================================
--- gcc/wide-int-print.cc	(revision 201967)
+++ gcc/wide-int-print.cc	(working copy)
@@ -61,7 +61,7 @@ print_decs (const wide_int &wi, char *bu
   if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
       || (wi.get_len () == 1))
     {
-      if (wi.neg_p (SIGNED))
+      if (wi.neg_p ())
       	sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -wi.to_shwi ());
       else
 	sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ());
@@ -88,7 +88,7 @@ void
 print_decu (const wide_int &wi, char *buf)
 {
   if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
-      || (wi.get_len () == 1 && !wi.neg_p (SIGNED)))
+      || (wi.get_len () == 1 && !wi.neg_p ()))
     sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ());
   else
     print_hex (wi, buf);
@@ -114,7 +114,7 @@ print_hex (const wide_int &wi, char *buf
     buf += sprintf (buf, "0x0");
   else
     {
-      if (wi.neg_p (SIGNED))
+      if (wi.neg_p ())
 	{
 	  int j;
 	  /* If the number is negative, we may need to pad value with
Index: gcc/wide-int.cc
===================================================================
--- gcc/wide-int.cc	(revision 201967)
+++ gcc/wide-int.cc	(working copy)
@@ -1662,7 +1662,7 @@ wide_int_ro::clz () const
       else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count))
 	count = precision;
     }
-  else if (neg_p (SIGNED))
+  else if (neg_p ())
     count = 0;
   else
     {
@@ -1712,7 +1712,7 @@ wide_int_ro::clrsb () const
 {
   gcc_checking_assert (precision);
 
-  if (neg_p (SIGNED))
+  if (neg_p ())
     return operator ~ ().clz () - 1;
 
   return clz () - 1;
Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h	(revision 201967)
+++ gcc/wide-int.h	(working copy)
@@ -309,11 +309,6 @@ class GTY(()) wide_int_ro
   /* Internal representation.  */
 
 protected:
-  /* VAL is set to a size that is capable of computing a full
-     multiplication on the largest mode that is represented on the
-     target.  Currently there is a part of tree-vrp that requires 2x +
-     2 bits of precision where x is the precision of the variables
-     being optimized.  */
   HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
   unsigned short len;
   unsigned int precision;
@@ -372,7 +367,7 @@ public:
   bool minus_one_p () const;
   bool zero_p () const;
   bool one_p () const;
-  bool neg_p (signop) const;
+  bool neg_p (signop sgn = SIGNED) const;
   bool multiple_of_p (const wide_int_ro &, signop, wide_int_ro *) const;
 
   /* Comparisons, note that only equality is an operator.  The other
@@ -1117,11 +1112,6 @@ wide_int_ro::zero_p () const
 
   if (precision && precision < HOST_BITS_PER_WIDE_INT)
     x = sext_hwi (val[0], precision);
-  else if (len == 0)
-    {
-      gcc_assert (precision == 0);
-      return true;
-    }
   else
     x = val[0];
 
@@ -2495,13 +2485,13 @@ wide_int_ro::div_round (const T &c, sign
       if (sgn == SIGNED)
 	{
 	  wide_int_ro p_remainder
-	    = remainder.neg_p (SIGNED) ? -remainder : remainder;
-	  wide_int_ro p_divisor = divisor.neg_p (SIGNED) ? -divisor : divisor;
+	    = remainder.neg_p () ? -remainder : remainder;
+	  wide_int_ro p_divisor = divisor.neg_p () ? -divisor : divisor;
 	  p_divisor = p_divisor.rshiftu_large (1);
 
 	  if (p_divisor.gts_p (p_remainder))
 	    {
-	      if (quotient.neg_p (SIGNED))
+	      if (quotient.neg_p ())
 		return quotient - 1;
 	      else
 		return quotient + 1;
@@ -2726,14 +2716,14 @@ wide_int_ro::mod_round (const T &c, sign
       wide_int_ro divisor = wide_int_ro::from_array (s, cl, precision);
       if (sgn == SIGNED)
 	{
-	  wide_int_ro p_remainder = (remainder.neg_p (SIGNED)
+	  wide_int_ro p_remainder = (remainder.neg_p ()
 				     ? -remainder : remainder);
-	  wide_int_ro p_divisor = divisor.neg_p (SIGNED) ? -divisor : divisor;
+	  wide_int_ro p_divisor = divisor.neg_p () ? -divisor : divisor;
 	  p_divisor = p_divisor.rshiftu_large (1);
 
 	  if (p_divisor.gts_p (p_remainder))
 	    {
-	      if (quotient.neg_p (SIGNED))
+	      if (quotient.neg_p ())
 		return remainder + divisor;
 	      else
 		return remainder - divisor;
@@ -3542,7 +3532,7 @@ template <int bitsize>
 inline fixed_wide_int <bitsize>
 fixed_wide_int <bitsize>::from_wide_int (const wide_int &w)
 {
-  if (w.neg_p (SIGNED))
+  if (w.neg_p ())
     return w.sforce_to_size (bitsize);
   return w.zforce_to_size (bitsize);
 }
@@ -3583,7 +3573,7 @@ inline fixed_wide_int <bitsize>::fixed_w
   : wide_int_ro (op0)
 {
   precision = bitsize;
-  if (neg_p (SIGNED))
+  if (neg_p ())
     static_cast <wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT);
 }
 
@@ -3593,7 +3583,7 @@ inline fixed_wide_int <bitsize>::fixed_w
 {
   precision = bitsize;
   if (sizeof (int) == sizeof (HOST_WIDE_INT)
-      && neg_p (SIGNED))
+      && neg_p ())
     *this = zext (HOST_BITS_PER_WIDE_INT);
 }
 
@@ -3651,7 +3641,7 @@ fixed_wide_int <bitsize>::operator = (co
   precision = bitsize;
 
   /* This is logically top_bit_set_p.  */
-  if (TYPE_SIGN (type) == UNSIGNED && neg_p (SIGNED))
+  if (TYPE_SIGN (type) == UNSIGNED && neg_p ())
     static_cast <wide_int_ro &> (*this) = zext (TYPE_PRECISION (type));
 
   return *this;
@@ -3685,7 +3675,7 @@ fixed_wide_int <bitsize>::operator = (un
   precision = bitsize;
 
   /* This is logically top_bit_set_p.  */
-  if (neg_p (SIGNED))
+  if (neg_p ())
     static_cast <wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT);
 
   return *this;
@@ -3699,7 +3689,7 @@ fixed_wide_int <bitsize>::operator = (un
   precision = bitsize;
 
   if (sizeof (int) == sizeof (HOST_WIDE_INT)
-      && neg_p (SIGNED))
+      && neg_p ())
     *this = zext (HOST_BITS_PER_WIDE_INT);
 
   return *this;

Reply via email to