The reason for this patch are the changes showcased in tree-vrp.c.
Basically I'd like to discourage rolling our own overflow and underflow
calculation when doing wide int arithmetic. We should have a
centralized place for this, that is-- in the wide int code itself ;-).
The only cases I care about are plus/minus, which I have implemented,
but we also get division for free, since AFAICT, division can only
positive overflow:
-MIN / -1 => +OVERFLOW
Multiplication OTOH, can underflow, but I've not implemented it because
we have no uses for it. I have added a note in the code explaining this.
Originally I tried to only change plus/minus, but that made code that
dealt with plus/minus in addition to div or mult a lot uglier. You'd
have to special case "int overflow_for_add_stuff" and "bool
overflow_for_everything_else". Changing everything to int, makes things
consistent.
Note: I have left poly-int as is, with its concept of yes/no for
overflow. I can adapt this as well if desired.
Tested on x86-64 Linux.
OK for trunk?
gcc/
* tree-vrp.c (vrp_int_const_binop): Change overflow type to int.
(combine_bound): Use wide-int overflow calculation instead of
rolling our own.
* calls.c (maybe_warn_alloc_args_overflow): Change overflow type to
int.
* fold-const.c (int_const_binop_2): Same.
(extract_muldiv_1): Same.
(fold_div_compare): Same.
(fold_abs_const): Same.
* match.pd: Same.
* poly-int.h (add): Same.
(sub): Same.
(neg): Same.
(mul): Same.
* predict.c (predict_iv_comparison): Same.
* profile-count.c (slow_safe_scale_64bit): Same.
* simplify-rtx.c (simplify_const_binary_operation): Same.
* tree-chrec.c (tree_fold_binomial): Same.
* tree-data-ref.c (split_constant_offset_1): Same.
* tree-if-conv.c (idx_within_array_bound): Same.
* tree-scalar-evolution.c (iv_can_overflow_p): Same.
* tree-ssa-phiopt.c (minmax_replacement): Same.
* tree-vect-loop.c (is_nonwrapping_integer_induction): Same.
* tree-vect-stmts.c (vect_truncate_gather_scatter_offset): Same.
* vr-values.c (vr_values::adjust_range_with_scev): Same.
* wide-int.cc (wi::add_large): Same.
(wi::mul_internal): Same.
(wi::sub_large): Same.
(wi::divmod_internal): Same.
* wide-int.h: Change overflow type to int for neg, add, mul, smul,
umul, div_trunc, div_floor, div_ceil, div_round, mod_trunc,
mod_ceil, mod_round, add_large, sub_large, mul_internal,
divmod_internal.
gcc/cp/
* decl.c (build_enumerator): Change overflow type to int.
* init.c (build_new_1): Same.
diff --git a/gcc/calls.c b/gcc/calls.c
index 1970f1c51dd..14c34cca883 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -1517,7 +1517,7 @@ maybe_warn_alloc_args_overflow (tree fn, tree exp, tree args[2], int idx[2])
wide_int x = wi::to_wide (argrange[0][0], szprec);
wide_int y = wi::to_wide (argrange[1][0], szprec);
- bool vflow;
+ int vflow;
wide_int prod = wi::umul (x, y, &vflow);
if (vflow)
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 0ea3c4a3490..dccca1502b3 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -14628,7 +14628,6 @@ build_enumerator (tree name, tree value, tree enumtype, tree attributes,
if (TYPE_VALUES (enumtype))
{
tree prev_value;
- bool overflowed;
/* C++03 7.2/4: If no initializer is specified for the first
enumerator, the type is an unspecified integral
@@ -14642,6 +14641,7 @@ build_enumerator (tree name, tree value, tree enumtype, tree attributes,
value = error_mark_node;
else
{
+ int overflowed;
tree type = TREE_TYPE (prev_value);
signop sgn = TYPE_SIGN (type);
widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn,
@@ -14668,7 +14668,7 @@ incremented enumerator value is too large for %<unsigned long%>") : G_("\
incremented enumerator value is too large for %<long%>"));
}
if (type == NULL_TREE)
- overflowed = true;
+ overflowed = 1;
else
value = wide_int_to_tree (type, wi);
}
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 76ce0b829dd..85df1a2efb9 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -2943,7 +2943,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
tree inner_nelts_cst = maybe_constant_value (inner_nelts);
if (TREE_CODE (inner_nelts_cst) == INTEGER_CST)
{
- bool overflow;
+ int overflow;
offset_int result = wi::mul (wi::to_offset (inner_nelts_cst),
inner_nelts_count, SIGNED, &overflow);
if (overflow)
@@ -3072,7 +3072,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
maximum object size and is safe even if we choose not to use
a cookie after all. */
max_size -= wi::to_offset (cookie_size);
- bool overflow;
+ int overflow;
inner_size = wi::mul (wi::to_offset (size), inner_nelts_count, SIGNED,
&overflow);
if (overflow || wi::gtu_p (inner_size, max_size))
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 8476c223e4f..5cfd5edd77d 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -976,7 +976,7 @@ int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2,
tree t;
tree type = TREE_TYPE (parg1);
signop sign = TYPE_SIGN (type);
- bool overflow = false;
+ int overflow = 0;
wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
@@ -6486,7 +6486,7 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
if (tcode == code)
{
bool overflow_p = false;
- bool overflow_mul_p;
+ int overflow_mul_p;
signop sign = TYPE_SIGN (ctype);
unsigned prec = TYPE_PRECISION (ctype);
wide_int mul = wi::mul (wi::to_wide (op1, prec),
@@ -6705,7 +6705,7 @@ fold_div_compare (enum tree_code code, tree c1, tree c2, tree *lo,
{
tree prod, tmp, type = TREE_TYPE (c1);
signop sign = TYPE_SIGN (type);
- bool overflow;
+ int overflow;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, c1, c2); */
@@ -13872,7 +13872,7 @@ fold_abs_const (tree arg0, tree type)
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
wide_int val = wi::to_wide (arg0);
- bool overflow = false;
+ int overflow = 0;
if (!wi::neg_p (val, TYPE_SIGN (TREE_TYPE (arg0))))
;
diff --git a/gcc/match.pd b/gcc/match.pd
index c1e0963da9a..b7b305b6a39 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -307,11 +307,11 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(div (div @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
- bool overflow_p;
+ int overflow;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
- TYPE_SIGN (type), &overflow_p);
+ TYPE_SIGN (type), &overflow);
}
- (if (!overflow_p)
+ (if (!overflow)
(div @0 { wide_int_to_tree (type, mul); })
(if (TYPE_UNSIGNED (type)
|| mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
@@ -322,13 +322,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
- bool overflow_p;
+ int overflow;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
- TYPE_SIGN (type), &overflow_p);
+ TYPE_SIGN (type), &overflow);
}
/* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
otherwise undefined overflow implies that @0 must be zero. */
- (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
+ (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
(mult @0 { wide_int_to_tree (type, mul); }))))
/* Optimize A / A to 1.0 if we don't care about
@@ -2807,7 +2807,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& (cmp == LT_EXPR || cmp == GE_EXPR)))
(with
{
- bool overflow = false;
+ int overflow = 0;
enum tree_code code, cmp_code = cmp;
wide_int real_c1;
wide_int c1 = wi::to_wide (@1);
@@ -3367,7 +3367,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (TREE_CODE (@1) == INTEGER_CST)
(with
{
- bool ovf;
+ int ovf;
wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
@@ -3380,7 +3380,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
(with
{
- bool ovf;
+ int ovf;
wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
diff --git a/gcc/poly-int.h b/gcc/poly-int.h
index b3b61e25e64..5a3f8be7c39 100644
--- a/gcc/poly-int.h
+++ b/gcc/poly-int.h
@@ -921,13 +921,15 @@ add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, wi::add (a.coeffs[0], b.coeffs[0], sgn, overflow));
+ int overflow_type;
+ POLY_SET_COEFF (C, r, 0, wi::add (a.coeffs[0], b.coeffs[0], sgn,
+ &overflow_type));
+ *overflow = overflow_type != 0;
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
POLY_SET_COEFF (C, r, i, wi::add (a.coeffs[i], b.coeffs[i], sgn,
- &suboverflow));
- *overflow |= suboverflow;
+ &overflow_type));
+ *overflow |= overflow_type != 0;
}
return r;
}
@@ -1020,13 +1022,16 @@ sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, wi::sub (a.coeffs[0], b.coeffs[0], sgn, overflow));
+ int overflow_type;
+ POLY_SET_COEFF (C, r, 0, wi::sub (a.coeffs[0], b.coeffs[0], sgn,
+ &overflow_type));
+ if (overflow)
+ *overflow = overflow_type != 0;
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
POLY_SET_COEFF (C, r, i, wi::sub (a.coeffs[i], b.coeffs[i], sgn,
- &suboverflow));
- *overflow |= suboverflow;
+ &overflow_type));
+ *overflow |= overflow_type != 0;
}
return r;
}
@@ -1064,12 +1069,13 @@ neg (const poly_int_pod<N, Ca> &a, bool *overflow)
{
typedef WI_UNARY_RESULT (Ca) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, wi::neg (a.coeffs[0], overflow));
+ int overflow_type;
+ POLY_SET_COEFF (C, r, 0, wi::neg (a.coeffs[0], &overflow_type));
+ *overflow = overflow_type;
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
- POLY_SET_COEFF (C, r, i, wi::neg (a.coeffs[i], &suboverflow));
- *overflow |= suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::neg (a.coeffs[i], &overflow_type));
+ *overflow |= overflow_type;
}
return r;
}
@@ -1140,12 +1146,13 @@ mul (const poly_int_pod<N, Ca> &a, const Cb &b,
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
- POLY_SET_COEFF (C, r, 0, wi::mul (a.coeffs[0], b, sgn, overflow));
+ int overflow_type;
+ POLY_SET_COEFF (C, r, 0, wi::mul (a.coeffs[0], b, sgn, &overflow_type));
+ *overflow = overflow_type;
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
- POLY_SET_COEFF (C, r, i, wi::mul (a.coeffs[i], b, sgn, &suboverflow));
- *overflow |= suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::mul (a.coeffs[i], b, sgn, &overflow_type));
+ *overflow |= overflow_type;
}
return r;
}
diff --git a/gcc/predict.c b/gcc/predict.c
index 019ff9e44cf..6f05703af75 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1628,8 +1628,8 @@ predict_iv_comparison (struct loop *loop, basic_block bb,
&& tree_fits_shwi_p (compare_var)
&& tree_fits_shwi_p (compare_base))
{
- int probability;
- bool overflow, overall_overflow = false;
+ int probability, overflow;
+ bool overall_overflow = false;
widest_int compare_count, tem;
/* (loop_bound - base) / compare_step */
diff --git a/gcc/profile-count.c b/gcc/profile-count.c
index 3d411cfbfb3..e35cf8f0679 100644
--- a/gcc/profile-count.c
+++ b/gcc/profile-count.c
@@ -210,7 +210,7 @@ bool
slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
{
FIXED_WIDE_INT (128) tmp = a;
- bool overflow;
+ int overflow;
tmp = wi::udiv_floor (wi::umul (tmp, b, &overflow) + (c / 2), c);
gcc_checking_assert (!overflow);
if (wi::fits_uhwi_p (tmp))
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 90d148c0074..e9ef4591bf0 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -4226,7 +4226,7 @@ simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
&& CONST_SCALAR_INT_P (op1))
{
wide_int result;
- bool overflow;
+ int overflow;
rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index 04d33ef625f..42da9a410a7 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -524,7 +524,7 @@ chrec_fold_multiply (tree type,
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
- bool overflow;
+ int overflow;
unsigned int i;
/* Handle the most frequent cases. */
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index b163eaf841d..6fa19f56ca8 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -734,12 +734,12 @@ split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
is known to be [A + TMP_OFF, B + TMP_OFF], with all
operations done in ITYPE. The addition must overflow
at both ends of the range or at neither. */
- bool overflow[2];
+ int overflow[2];
unsigned int prec = TYPE_PRECISION (itype);
wide_int woff = wi::to_wide (tmp_off, prec);
wide_int op0_min = wi::add (var_min, woff, sgn, &overflow[0]);
wi::add (var_max, woff, sgn, &overflow[1]);
- if (overflow[0] != overflow[1])
+ if ((bool)overflow[0] != (bool)overflow[1])
return false;
/* Calculate (ssizetype) OP0 - (ssizetype) TMP_VAR. */
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 71dac4fb48a..f9beb0b6580 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -743,7 +743,7 @@ hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
static bool
idx_within_array_bound (tree ref, tree *idx, void *dta)
{
- bool overflow;
+ int overflow;
widest_int niter, valid_niter, delta, wi_step;
tree ev, init, step;
tree low, high;
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 4b0ec02b4de..7a002b15792 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -3185,7 +3185,7 @@ iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
&& wi::le_p (base_max, type_max, sgn));
/* Account the possible increment in the last ieration. */
- bool overflow = false;
+ int overflow = 0;
nit = wi::add (nit, 1, SIGNED, &overflow);
if (overflow)
return true;
@@ -3202,7 +3202,7 @@ iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
the type. */
if (sgn == UNSIGNED || !wi::neg_p (step_max))
{
- bool overflow = false;
+ int overflow = 0;
if (wi::gtu_p (wi::mul (step_max, nit2, UNSIGNED, &overflow),
type_max - base_max)
|| overflow)
@@ -3211,7 +3211,7 @@ iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
/* If step can be negative, check that nit*(-step) <= base_min-type_min. */
if (sgn == SIGNED && wi::neg_p (step_min))
{
- bool overflow = false, overflow2 = false;
+ int overflow = 0, overflow2 = 0;
if (wi::gtu_p (wi::mul (wi::neg (step_min, &overflow2),
nit2, UNSIGNED, &overflow),
base_min - type_min)
@@ -3315,7 +3315,7 @@ simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
enum tree_code code;
tree type, ev, base, e;
wide_int extreme;
- bool folded_casts, overflow;
+ bool folded_casts;
iv->base = NULL_TREE;
iv->step = NULL_TREE;
@@ -3424,7 +3424,7 @@ simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
code = GT_EXPR;
extreme = wi::max_value (type);
}
- overflow = false;
+ int overflow = 0;
extreme = wi::sub (extreme, wi::to_wide (iv->step),
TYPE_SIGN (type), &overflow);
if (overflow)
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 8e94f6a999a..88174033920 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -1224,7 +1224,7 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
{
if (cmp == LT_EXPR)
{
- bool overflow;
+ int overflow;
wide_int alt = wi::sub (wi::to_wide (larger), 1,
TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
@@ -1233,7 +1233,7 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
}
else
{
- bool overflow;
+ int overflow;
wide_int alt = wi::add (wi::to_wide (larger), 1,
TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
@@ -1252,7 +1252,7 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
{
if (cmp == GT_EXPR)
{
- bool overflow;
+ int overflow;
wide_int alt = wi::add (wi::to_wide (smaller), 1,
TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
@@ -1261,7 +1261,7 @@ minmax_replacement (basic_block cond_bb, basic_block middle_bb,
}
else
{
- bool overflow;
+ int overflow;
wide_int alt = wi::sub (wi::to_wide (smaller), 1,
TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 67e8efe2fa9..f93037674d9 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -6051,7 +6051,7 @@ is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
widest_int ni, max_loop_value, lhs_max;
- bool overflow = false;
+ int overflow = 0;
/* Make sure the loop is integer based. */
if (TREE_CODE (base) != INTEGER_CST
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index ea303bd7023..99e29117ceb 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -2029,7 +2029,7 @@ vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
/* Try scales of 1 and the element size. */
int scales[] = { 1, vect_get_scalar_dr_size (dr) };
- bool overflow_p = false;
+ int overflow = 0;
for (int i = 0; i < 2; ++i)
{
int scale = scales[i];
@@ -2039,13 +2039,13 @@ vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
/* See whether we can calculate (COUNT - 1) * STEP / SCALE
in OFFSET_BITS bits. */
- widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
- if (overflow_p)
+ widest_int range = wi::mul (count, factor, SIGNED, &overflow);
+ if (overflow)
continue;
signop sign = range >= 0 ? UNSIGNED : SIGNED;
if (wi::min_precision (range, sign) > element_bits)
{
- overflow_p = true;
+ overflow = 1;
continue;
}
@@ -2071,7 +2071,7 @@ vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
return true;
}
- if (overflow_p && dump_enabled_p ())
+ if (overflow && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"truncating gather/scatter offset to %d bits"
" might change its value.\n", element_bits);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 65865a7f5b6..062330bfbd6 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -968,7 +968,7 @@ value_range_constant_singleton (value_range *vr)
static bool
vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
{
- bool overflow = false;
+ int overflow = 0;
signop sign = TYPE_SIGN (TREE_TYPE (val1));
switch (code)
@@ -1337,41 +1337,16 @@ combine_bound (enum tree_code code, wide_int &wi, int &ovf,
if (op0 && op1)
{
if (minus_p)
- {
- wi = wi::to_wide (op0) - wi::to_wide (op1);
-
- /* Check for overflow. */
- if (wi::cmp (0, wi::to_wide (op1), sgn)
- != wi::cmp (wi, wi::to_wide (op0), sgn))
- ovf = wi::cmp (wi::to_wide (op0),
- wi::to_wide (op1), sgn);
- }
+ wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
else
- {
- wi = wi::to_wide (op0) + wi::to_wide (op1);
-
- /* Check for overflow. */
- if (wi::cmp (wi::to_wide (op1), 0, sgn)
- != wi::cmp (wi, wi::to_wide (op0), sgn))
- ovf = wi::cmp (wi::to_wide (op0), wi, sgn);
- }
+ wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
}
else if (op0)
wi = wi::to_wide (op0);
else if (op1)
{
if (minus_p)
- {
- wi = -wi::to_wide (op1);
-
- /* Check for overflow. */
- if (sgn == SIGNED
- && wi::neg_p (wi::to_wide (op1))
- && wi::neg_p (wi))
- ovf = 1;
- else if (sgn == UNSIGNED && wi::to_wide (op1) != 0)
- ovf = -1;
- }
+ wi = wi::neg (wi::to_wide (op1));
else
wi = wi::to_wide (op1);
}
diff --git a/gcc/vr-values.c b/gcc/vr-values.c
index 74f813e7334..6eeb8ac7272 100644
--- a/gcc/vr-values.c
+++ b/gcc/vr-values.c
@@ -1810,7 +1810,7 @@ vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
{
value_range maxvr = VR_INITIALIZER;
signop sgn = TYPE_SIGN (TREE_TYPE (step));
- bool overflow;
+ int overflow;
widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
&overflow);
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index 81731465137..3aabc43a755 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -1128,7 +1128,7 @@ unsigned int
wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int op0len, const HOST_WIDE_INT *op1,
unsigned int op1len, unsigned int prec,
- signop sgn, bool *overflow)
+ signop sgn, int *overflow)
{
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
@@ -1166,7 +1166,11 @@ wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
if (sgn == SIGNED)
{
unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
- *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ if ((HOST_WIDE_INT) (x << shift) < 0)
+ *overflow = (o0 > (unsigned HOST_WIDE_INT) val[len - 1]
+ ? -1 : o0 < (unsigned HOST_WIDE_INT) val[len - 1]);
+ else
+ *overflow = 0;
}
else
{
@@ -1264,12 +1268,15 @@ wi_pack (HOST_WIDE_INT *result,
made to see if it overflows. Unfortunately there is no better way
to check for overflow than to do this. If OVERFLOW is nonnull,
record in *OVERFLOW whether the result overflowed. SGN controls
- the signedness and is used to check overflow or if HIGH is set. */
+ the signedness and is used to check overflow or if HIGH is set.
+
+ NOTE: Unlike addition and subtraction, the type of overflow is not
+ implemented, as we currently have no uses for it. */
unsigned int
wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
unsigned int op1len, const HOST_WIDE_INT *op2val,
unsigned int op2len, unsigned int prec, signop sgn,
- bool *overflow, bool high)
+ int *overflow, bool high)
{
unsigned HOST_WIDE_INT o0, o1, k, t;
unsigned int i;
@@ -1294,7 +1301,7 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
just make sure that we never attempt to set it. */
bool needs_overflow = (overflow != 0);
if (needs_overflow)
- *overflow = false;
+ *overflow = 0;
wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
@@ -1394,12 +1401,12 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
if (sgn == SIGNED)
{
if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
- *overflow = true;
+ *overflow = 1;
}
else
{
if ((r >> prec) != 0)
- *overflow = true;
+ *overflow = 1;
}
}
val[0] = high ? r >> prec : r;
@@ -1474,7 +1481,7 @@ wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
if (((HOST_WIDE_INT)(r[i] & mask)) != top)
- *overflow = true;
+ *overflow = 1;
}
int r_offset = high ? half_blocks_needed : 0;
@@ -1518,7 +1525,7 @@ unsigned int
wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int op0len, const HOST_WIDE_INT *op1,
unsigned int op1len, unsigned int prec,
- signop sgn, bool *overflow)
+ signop sgn, int *overflow)
{
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
@@ -1552,7 +1559,8 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
val[len] = mask0 - mask1 - borrow;
len++;
if (overflow)
- *overflow = (sgn == UNSIGNED && borrow);
+ /* Unsigned subtract can only trigger -OVF. */
+ *overflow = -(sgn == UNSIGNED && borrow);
}
else if (overflow)
{
@@ -1560,7 +1568,10 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
if (sgn == SIGNED)
{
unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
- *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ if ((HOST_WIDE_INT) (x << shift) < 0)
+ *overflow = o0 > o1 ? -1 : o0 < o1;
+ else
+ *overflow = 0;
}
else
{
@@ -1568,9 +1579,9 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
x <<= shift;
o0 <<= shift;
if (old_borrow)
- *overflow = (x >= o0);
+ *overflow = -(x >= o0);
else
- *overflow = (x > o0);
+ *overflow = -(x > o0);
}
}
@@ -1706,7 +1717,7 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
unsigned int dividend_len, unsigned int dividend_prec,
const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
unsigned int divisor_prec, signop sgn,
- bool *oflow)
+ int *oflow)
{
unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
@@ -1751,7 +1762,7 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
remainder[0] = 0;
}
if (oflow != 0)
- *oflow = true;
+ *oflow = 1;
if (quotient)
for (unsigned int i = 0; i < dividend_len; ++i)
quotient[i] = dividend_val[i];
@@ -1759,7 +1770,7 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
}
if (oflow)
- *oflow = false;
+ *oflow = 0;
/* Do it on the host if you can. */
if (sgn == SIGNED
@@ -2421,30 +2432,30 @@ test_overflow ()
{
int prec = precs[i];
int offset = offsets[j];
- bool overflow;
+ int overflow;
wide_int sum, diff;
sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
UNSIGNED, &overflow);
ASSERT_EQ (sum, -offset);
- ASSERT_EQ (overflow, offset == 0);
+ ASSERT_EQ (overflow != 0, offset == 0);
sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
UNSIGNED, &overflow);
ASSERT_EQ (sum, -offset);
- ASSERT_EQ (overflow, offset == 0);
+ ASSERT_EQ (overflow != 0, offset == 0);
diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
wi::max_value (prec, UNSIGNED),
UNSIGNED, &overflow);
ASSERT_EQ (diff, -offset);
- ASSERT_EQ (overflow, offset != 0);
+ ASSERT_EQ (overflow != 0, offset != 0);
diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
wi::max_value (prec, UNSIGNED) - 1,
UNSIGNED, &overflow);
ASSERT_EQ (diff, 1 - offset);
- ASSERT_EQ (overflow, offset > 1);
+ ASSERT_EQ (overflow != 0, offset > 1);
}
}
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index e93b36ef07a..3aef77f41eb 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -522,7 +522,7 @@ namespace wi
UNARY_FUNCTION bit_not (const T &);
UNARY_FUNCTION neg (const T &);
- UNARY_FUNCTION neg (const T &, bool *);
+ UNARY_FUNCTION neg (const T &, int *);
UNARY_FUNCTION abs (const T &);
UNARY_FUNCTION ext (const T &, unsigned int, signop);
UNARY_FUNCTION sext (const T &, unsigned int);
@@ -542,33 +542,33 @@ namespace wi
BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
BINARY_FUNCTION add (const T1 &, const T2 &);
- BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, int *);
BINARY_FUNCTION sub (const T1 &, const T2 &);
- BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, int *);
BINARY_FUNCTION mul (const T1 &, const T2 &);
- BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
- BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
- BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, int *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, int *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, int *);
BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
- BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
- BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
- BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
- BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
WI_BINARY_RESULT (T1, T2) *);
BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
- BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
- BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, int * = 0);
BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
- BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
- BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, int * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, int * = 0);
template <typename T1, typename T2>
bool multiple_of_p (const T1 &, const T2 &, signop);
@@ -1700,20 +1700,20 @@ namespace wi
const HOST_WIDE_INT *, unsigned int, unsigned int);
unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int,
- signop, bool *);
+ signop, int *);
unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int,
- signop, bool *);
+ signop, int *);
unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int, const HOST_WIDE_INT *,
- unsigned int, unsigned int, signop, bool *,
+ unsigned int, unsigned int, signop, int *,
bool);
unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int, unsigned int,
const HOST_WIDE_INT *,
unsigned int, unsigned int,
- signop, bool *);
+ signop, int *);
}
/* Return the number of bits that integer X can hold. */
@@ -2105,7 +2105,7 @@ wi::neg (const T &x)
/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */
template <typename T>
inline WI_UNARY_RESULT (T)
-wi::neg (const T &x, bool *overflow)
+wi::neg (const T &x, int *overflow)
{
*overflow = only_sign_bit_p (x);
return sub (0, x);
@@ -2404,10 +2404,11 @@ wi::add (const T1 &x, const T2 &y)
}
/* Return X + Y. Treat X and Y as having the signednes given by SGN
- and indicate in *OVERFLOW whether the operation overflowed. */
+ and indicate in *OVERFLOW the type of overflow that occurred
+ (-1 for underflow, +1 for overflow, and 0 for now overflow). */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::add (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
@@ -2419,9 +2420,15 @@ wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
unsigned HOST_WIDE_INT yl = yi.ulow ();
unsigned HOST_WIDE_INT resultl = xl + yl;
if (sgn == SIGNED)
- *overflow = (((resultl ^ xl) & (resultl ^ yl))
- >> (precision - 1)) & 1;
+ {
+ if ((((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1)
+ *overflow = xl > resultl ? -1 : xl < resultl;
+ else
+ *overflow = 0;
+ }
else
+ /* Unsigned add can only trigger +OVF. */
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
< (xl << (HOST_BITS_PER_WIDE_INT - precision)));
val[0] = resultl;
@@ -2477,10 +2484,11 @@ wi::sub (const T1 &x, const T2 &y)
}
/* Return X - Y. Treat X and Y as having the signednes given by SGN
- and indicate in *OVERFLOW whether the operation overflowed. */
+ and indicate in *OVERFLOW the type of overflow that occurred
+ (-1 for underflow, +1 for overflow, and 0 for now overflow). */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::sub (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
@@ -2492,10 +2500,16 @@ wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
unsigned HOST_WIDE_INT yl = yi.ulow ();
unsigned HOST_WIDE_INT resultl = xl - yl;
if (sgn == SIGNED)
- *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
+ {
+ if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1)
+ *overflow = xl > yl ? -1 : xl < yl;
+ else
+ *overflow = 0;
+ }
else
- *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
- > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ /* Unsigned subtract can only trigger -OVF. */
+ *overflow = -((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
val[0] = resultl;
result.set_len (1);
}
@@ -2527,10 +2541,13 @@ wi::mul (const T1 &x, const T2 &y)
}
/* Return X * Y. Treat X and Y as having the signednes given by SGN
- and indicate in *OVERFLOW whether the operation overflowed. */
+ and indicate in *OVERFLOW whether the operation overflowed.
+
+ NOTE: Unlike addition and subtraction, the type of overflow is not
+ implemented, as we currently have no uses for it. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mul (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
@@ -2546,7 +2563,7 @@ wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
*OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::smul (const T1 &x, const T2 &y, bool *overflow)
+wi::smul (const T1 &x, const T2 &y, int *overflow)
{
return mul (x, y, SIGNED, overflow);
}
@@ -2555,7 +2572,7 @@ wi::smul (const T1 &x, const T2 &y, bool *overflow)
*OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::umul (const T1 &x, const T2 &y, bool *overflow)
+wi::umul (const T1 &x, const T2 &y, int *overflow)
{
return mul (x, y, UNSIGNED, overflow);
}
@@ -2581,7 +2598,7 @@ wi::mul_high (const T1 &x, const T2 &y, signop sgn)
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
@@ -2616,7 +2633,7 @@ wi::udiv_trunc (const T1 &x, const T2 &y)
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
@@ -2658,7 +2675,7 @@ wi::udiv_floor (const T1 &x, const T2 &y)
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
@@ -2691,7 +2708,7 @@ wi::udiv_ceil (const T1 &x, const T2 &y)
in *OVERFLOW if the result overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
@@ -2779,7 +2796,7 @@ wi::gcd (const T1 &a, const T2 &b, signop sgn)
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (remainder);
@@ -2818,7 +2835,7 @@ wi::umod_trunc (const T1 &x, const T2 &y)
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
@@ -2854,7 +2871,7 @@ wi::umod_floor (const T1 &x, const T2 &y)
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
@@ -2880,7 +2897,7 @@ wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
given by SGN. Indicate in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, int *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);