Kenneth Zadeck <zad...@naturalbridge.com> writes: > + int blocks_needed = (precision + HOST_BITS_PER_WIDE_INT - 1) / > HOST_BITS_PER_WIDE_INT;
Watch out for the long lines. There are several others in the patch too. You'll need to clean up the existing ones before the merge :-) > + case CONST_WIDE_INT: > + len = CONST_WIDE_INT_NUNITS (x.first); > + if (small_prec && blocks_needed == len - 1) > + { > + int i; > + for (i = 0; i < len - 1; i++) > + scratch[i] = CONST_WIDE_INT_ELT (x.first, i); > + scratch[len - 1] = sext_hwi (CONST_WIDE_INT_ELT (x.first, i), > small_prec); > + } > + else > + scratch = &CONST_WIDE_INT_ELT (x.first, 0); > + break; > + > +#if TARGET_SUPPORTS_WIDE_INT == 0 > + case CONST_DOUBLE: > + len = 2; > + if (small_prec) > + { > + scratch[0] = CONST_DOUBLE_LOW (x.first); > + scratch[1] = sext_hwi (CONST_DOUBLE_HIGH (x.first), small_prec); > + } > + else > + scratch = &CONST_DOUBLE_LOW (x.first); > + break; > +#endif I'd prefer we kept these as-is if possible, and only canonise CONST_INTs. That ought to be the only problem case, because of GEN_INT not taking a mode. Code that creates CONST_DOUBLE or CONST_WIDE_INT should already be going through immed_wide_int_const and so always needs to specify a mode. Plus we don't support non-power-of-2 precisions yet anyway. > @@ -1400,9 +1418,9 @@ wi::fits_uhwi_p (const wide_int_ref &x) > if (x.precision <= HOST_BITS_PER_WIDE_INT) > return true; > if (x.len == 1) > - return x.sign_mask () == 0; > + return x.get_val ()[0] >= 0; The rest of the patch seems to use slow () for this. > if (x.precision < 2 * HOST_BITS_PER_WIDE_INT) > - return zext_hwi (x.uhigh (), x.precision % HOST_BITS_PER_WIDE_INT) == 0; > + return x.uhigh () == 0; > return x.len == 2 && x.uhigh () == 0; We don't need the x.precision < 2 * HOST_BITS_PER_WIDE_INT case any more. x.len == 2 && x.uhigh () == 0 handles anything > HOST_BITS_PER_WIDE_INT. > @@ -1415,9 +1433,7 @@ wi::neg_p (const wide_int_ref &x, signop > return false; > if (x.precision == 0) > return false; > - if (x.len * HOST_BITS_PER_WIDE_INT > x.precision) > - return (x.uhigh () >> (x.precision % HOST_BITS_PER_WIDE_INT - 1)) & 1; > - return x.sign_mask () < 0; > + return x.shigh () < 0; > } We don't need the x.precision == 0 test now. Looks good to me otherwise. Thanks, Richard