Hi Kenny,

As discussed, this patch effectively goes back to your original idea of
having excess upper bits in a HWI being undefined on read (at least as
the default assumption).  wide_int itself still ensures that the excess
bits are stored as signs though.

This patch is already quite big, so I've left ::is_sign_extended itself
to a follow-on patch.  I've also not done anything wrt the scratch array.
More to follow soon, if this patch is OK.

The easiest way of maintaining the excess bits as signs seemed to be to
have wide_int_storage::set_len do the sign extension.  This means that
we do it in one place and also means that the *_large routines don't
waste time doing it for addr_wide_int and max_wide_int, which don't have
excess bits.

The current code took advantage of the fact that operations like AND
and OR on two sign-extended inputs also produce a sign-extended result.
We no longer know without ::is_sign_extended whether the inputs are
sign-extended though, so there are very few places that can guarantee
that the final extension is redundant.  We can look at optimising extension
away later in cases where operations like AND and OR are applied to
::is_sign_extended inputs, but that'd need the follow-on patch above.
Or we could decide that it isn't worth the hassle and just leave excess
upper bits as undefined on write too, which really is going back to your
original model. :-)

I've used to_shwi and to_uhwi instead of slow and ulow in cases where
we need the upper bits to be signs or zeros respectively.

There were several places that still assumed undefined upper bits,
so the patch isn't as big as it could have been.

I got rid of a couple of signed right shifts while there (and a test
function at the end of wide-int.cc that I'd accidentally committed
at one point -- oops).

OK to install?

Thanks,
Richard


Index: gcc/tree.h
===================================================================
--- gcc/tree.h  2013-10-19 09:54:45.504387762 +0100
+++ gcc/tree.h  2013-10-19 09:54:59.671508531 +0100
@@ -5175,7 +5175,7 @@ wi::int_traits <const_tree>::get_precisi
 
 /* Convert the tree_cst X into a wide_int of PRECISION.  */
 inline wi::storage_ref
-wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *scratch,
+wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *,
                                        unsigned int precision, const_tree x)
 {
   unsigned int len = TREE_INT_CST_NUNITS (x);
@@ -5186,32 +5186,14 @@ wi::int_traits <const_tree>::decompose (
 
   gcc_assert (precision >= xprecision);
 
-  /* Got to be careful of precision 0 values.  */
-  if (precision)
-    len = MIN (len, max_len);
-  if (TYPE_SIGN (TREE_TYPE (x)) == UNSIGNED)
-    {
-      unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
-      if (small_prec)
-       {
-         /* We have to futz with this because the canonization for
-            short unsigned numbers in wide-int is different from the
-            canonized short unsigned numbers in the tree-cst.  */
-         if (len == max_len) 
-           {
-             for (unsigned int i = 0; i < len - 1; i++)
-               scratch[i] = val[i];
-             scratch[len - 1] = sext_hwi (val[len - 1], precision);
-             return wi::storage_ref (scratch, len, precision);
-           }
-       } 
-      /* We have to futz here because a large unsigned int with
-        precision 128 may look (0x0 0xFFFFFFFFFFFFFFFF 0xF...) as a
-        tree-cst and as (0xF...) as a wide-int.  */
-      else if (precision == xprecision && len == max_len)
-        while (len > 1 && val[len - 1] == (HOST_WIDE_INT)-1)
-          len--;
-    }
+  /* If an unsigned constant occupies a whole number of HWIs and has the
+     upper bit set, its representation includes an extra zero HWI,
+     so that the representation can be used for wider precisions.
+     Trim the length if we're accessing the tree in its own precision.  */
+  if (__builtin_expect (len > max_len, 0))
+    do
+      len--;
+    while (len > 1 && val[len - 1] == -1);
 
   /* Signed and the rest of the unsigned cases are easy.  */
   return wi::storage_ref (val, len, precision);
Index: gcc/wide-int.cc
===================================================================
--- gcc/wide-int.cc     2013-10-15 18:33:44.789339673 +0100
+++ gcc/wide-int.cc     2013-10-19 13:08:12.180902396 +0100
@@ -46,7 +46,7 @@ static const HOST_WIDE_INT zeros[WIDE_IN
 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
 #define BLOCKS_NEEDED(PREC) \
   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
-#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1))
+#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
 
 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
    based on the top existing bit of VAL. */
@@ -65,7 +65,6 @@ safe_uhwi (const HOST_WIDE_INT *val, uns
 static unsigned int
 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
 {
-  unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
   HOST_WIDE_INT top;
   int i;
@@ -73,15 +72,12 @@ canonize (HOST_WIDE_INT *val, unsigned i
   if (len > blocks_needed)
     len = blocks_needed;
 
-  /* Clean up the top bits for any mode that is not a multiple of a
-     HWI and is not compressed.  */
-  if (len == blocks_needed && small_prec)
-    val[len - 1] = sext_hwi (val[len - 1], small_prec);
-
   if (len == 1)
     return len;
 
   top = val[len - 1];
+  if (len * HOST_BITS_PER_WIDE_INT > precision)
+    top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
   if (top != 0 && top != (HOST_WIDE_INT)-1)
     return len;
 
@@ -215,9 +211,7 @@ wi::from_mpz (const_tree type, mpz_t x,
 {
   size_t count, numb;
   int prec = TYPE_PRECISION (type);
-  int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
   wide_int res = wide_int::create (prec);
-  unsigned int i;
 
   if (!wrap)
     {
@@ -242,22 +236,14 @@ wi::from_mpz (const_tree type, mpz_t x,
      http://gmplib.org/manual/Integer-Import-and-Export.html  */
   numb = 8*sizeof(HOST_WIDE_INT);
   count = (mpz_sizeinbase (x, 2) + numb-1) / numb;
-  if (count < 1)
-    count = 1;
-
-  /* Need to initialize the number because it writes nothing for
-     zero.  */
   HOST_WIDE_INT *val = res.write_val ();
-  for (i = 0; i < count; i++)
-    val[i] = 0;
-
-  res.set_len (count);
-
   mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
-
-  /* Canonize for small_prec.  */
-  if (small_prec && count == (size_t)BLOCKS_NEEDED (prec))
-    val[count-1] = sext_hwi (val[count-1], small_prec); 
+  if (count < 1)
+    {
+      val[0] = 0;
+      count = 1;
+    }
+  res.set_len (count);
 
   if (mpz_sgn (x) < 0)
     res = -res;
@@ -324,11 +310,11 @@ wi::force_to_size (HOST_WIDE_INT *val, c
 
   if (precision > xprecision)
     {
+      unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
+
       /* Expanding.  */
       if (sgn == UNSIGNED)
        {
-         unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
-
          if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
            val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
          else if (val[len - 1] < 0)
@@ -341,6 +327,11 @@ wi::force_to_size (HOST_WIDE_INT *val, c
                val[len++] = 0;
            }
        }
+      else
+       {
+         if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+           val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
+       }
     }
   len = canonize (val, len, precision);
 
@@ -356,28 +347,34 @@ selt (const HOST_WIDE_INT *a, unsigned i
      unsigned int small_prec,
      unsigned int index, signop sgn)
 {
-  if (index >= len)
-    {
-      if (index < blocks_needed || sgn == SIGNED)
-       /* Signed or within the precision.  */
-       return SIGN_MASK (a[len - 1]);
-      else
-       /* Unsigned extension beyond the precision. */
-       return 0;
-    }
+  HOST_WIDE_INT val;
+  if (index < len)
+    val = a[index];
+  else if (index < blocks_needed || sgn == SIGNED)
+    /* Signed or within the precision.  */
+    val = SIGN_MASK (a[len - 1]);
+  else
+    /* Unsigned extension beyond the precision. */
+    val = 0;
 
-  if (sgn == UNSIGNED && small_prec && index == blocks_needed - 1)
-    return zext_hwi (a[index], small_prec);
+  if (small_prec && index == blocks_needed - 1)
+    return (sgn == SIGNED
+           ? sext_hwi (val, small_prec)
+           : zext_hwi (val, small_prec));
   else
-    return a[index];
+    return val;
 }
 
 /* Find the highest bit represented in a wide int.  This will in
    general have the same value as the sign bit.  */
 static inline HOST_WIDE_INT
-top_bit_of (const HOST_WIDE_INT *a, unsigned int len)
+top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
 {
-  return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1;
+  int excess = len * HOST_BITS_PER_WIDE_INT - prec;
+  unsigned HOST_WIDE_INT val = a[len - 1];
+  if (excess > 0)
+    val <<= excess;
+  return val >> (HOST_BITS_PER_WIDE_INT - 1);
 }
 
 /*
@@ -759,8 +756,6 @@ unsigned int
 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
                  bool negate, unsigned int prec)
 {
-  int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
-
   gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
 
   if (start + width > prec)
@@ -787,8 +782,6 @@ wi::shifted_mask (HOST_WIDE_INT *val, un
          /* case 000111000 */
          block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1;
          val[i++] = negate ? ~block : block;
-         if (i == BLOCKS_NEEDED (prec) && small_prec)
-           val[i - 1] = sext_hwi (val[i - 1], small_prec);
          return i;
        }
       else
@@ -810,9 +803,6 @@ wi::shifted_mask (HOST_WIDE_INT *val, un
   else if (end < prec)
     val[i++] = negate ? -1 : 0;
 
-  if (i == BLOCKS_NEEDED (prec) && small_prec)
-    val[i - 1] = sext_hwi (val[i - 1], small_prec);
-
   return i;
 }
 
@@ -833,7 +823,7 @@ wi::and_large (HOST_WIDE_INT *val, const
   unsigned int len = MAX (op0len, op1len);
   if (l0 > l1)
     {
-      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
+      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
       if (op1mask  == 0)
        {
          l0 = l1;
@@ -851,7 +841,7 @@ wi::and_large (HOST_WIDE_INT *val, const
     }
   else if (l1 > l0)
     {
-      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
+      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
       if (op0mask == 0)
        len = l0 + 1;
       else
@@ -891,7 +881,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c
   unsigned int len = MAX (op0len, op1len);
   if (l0 > l1)
     {
-      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
+      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
       if (op1mask != 0)
        {
          l0 = l1;
@@ -909,7 +899,7 @@ wi::and_not_large (HOST_WIDE_INT *val, c
     }
   else if (l1 > l0)
     {
-      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
+      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
       if (op0mask == 0)
        len = l0 + 1;
       else
@@ -949,7 +939,7 @@ wi::or_large (HOST_WIDE_INT *val, const
   unsigned int len = MAX (op0len, op1len);
   if (l0 > l1)
     {
-      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
+      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
       if (op1mask != 0)
        {
          l0 = l1;
@@ -967,7 +957,7 @@ wi::or_large (HOST_WIDE_INT *val, const
     }
   else if (l1 > l0)
     {
-      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
+      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
       if (op0mask != 0)
        len = l0 + 1;
       else
@@ -1007,7 +997,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co
   unsigned int len = MAX (op0len, op1len);
   if (l0 > l1)
     {
-      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
+      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
       if (op1mask == 0)
        {
          l0 = l1;
@@ -1025,7 +1015,7 @@ wi::or_not_large (HOST_WIDE_INT *val, co
     }
   else if (l1 > l0)
     {
-      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
+      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
       if (op0mask != 0)
        len = l0 + 1;
       else
@@ -1064,7 +1054,7 @@ wi::xor_large (HOST_WIDE_INT *val, const
   unsigned int len = MAX (op0len, op1len);
   if (l0 > l1)
     {
-      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
+      HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
       while (l0 > l1)
        {
          val[l0] = op0[l0] ^ op1mask;
@@ -1074,7 +1064,7 @@ wi::xor_large (HOST_WIDE_INT *val, const
 
   if (l1 > l0)
     {
-      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
+      HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
       while (l1 > l0)
        {
          val[l1] = op0mask ^ op1[l1];
@@ -1110,17 +1100,17 @@ wi::add_large (HOST_WIDE_INT *val, const
   unsigned HOST_WIDE_INT carry = 0;
   unsigned HOST_WIDE_INT old_carry = 0;
   unsigned HOST_WIDE_INT mask0, mask1;
-  unsigned int i, small_prec;
+  unsigned int i;
 
   unsigned int len = MAX (op0len, op1len);
-  mask0 = -top_bit_of (op0, op0len);
-  mask1 = -top_bit_of (op1, op1len);
+  mask0 = -top_bit_of (op0, op0len, prec);
+  mask1 = -top_bit_of (op1, op1len, prec);
   /* Add all of the explicitly defined elements.  */
 
   for (i = 0; i < len; i++)
     {
-      o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
-      o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
+      o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
+      o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
       x = o0 + o1 + carry;
       val[i] = x;
       old_carry = carry;
@@ -1136,33 +1126,24 @@ wi::add_large (HOST_WIDE_INT *val, const
     }
   else if (overflow)
     {
+      unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
       if (sgn == SIGNED)
        {
-         unsigned int p = (len == BLOCKS_NEEDED (prec)
-                           ? HOST_BITS_PER_WIDE_INT
-                           : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
-         HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
-         x = (x >> p) & 1;
-         *overflow = (x != 0);
+         unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
+         *overflow = HOST_WIDE_INT (x << shift) < 0;
        }
       else
        {
+         /* Put the MSB of X and O0 and in the top of the HWI.  */
+         x <<= shift;
+         o0 <<= shift;
          if (old_carry)
-           *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] <= o0);
+           *overflow = (x <= o0);
          else
-           *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] < o0);
+           *overflow = (x < o0);
        }
     }
 
-  /* Canonize the top of the top block.  */
-  small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
-  if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
-    {
-      /* Modes with weird precisions.  */
-      i = len - 1;
-      val[i] = sext_hwi (val[i], small_prec);
-    }
-
   return canonize (val, len, prec);
 }
 
@@ -1224,7 +1205,7 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT *
 
   if (sgn == SIGNED)
     {
-      mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len);
+      mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
       mask &= HALF_INT_MASK;
     }
   else
@@ -1323,7 +1304,7 @@ wi::mul_internal (HOST_WIDE_INT *val, co
   if ((high || full || needs_overflow)
       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
     {
-      HOST_WIDE_INT r;
+      unsigned HOST_WIDE_INT r;
 
       if (sgn == SIGNED)
        {
@@ -1339,29 +1320,18 @@ wi::mul_internal (HOST_WIDE_INT *val, co
       r = o0 * o1;
       if (needs_overflow)
        {
-         HOST_WIDE_INT upper;
-         HOST_WIDE_INT sm
-           = (r << (HOST_BITS_PER_WIDE_INT - prec))
-           >> (HOST_BITS_PER_WIDE_INT - 1);
-         mask = ((HOST_WIDE_INT)1 << prec) - 1;
-         sm &= mask;
-         upper = (r >> prec) & mask;
-
          if (sgn == SIGNED)
            {
-             if (sm != upper)
+             if (HOST_WIDE_INT (r) != sext_hwi (r, prec))
                *overflow = true;
            }
          else
-           if (upper != 0)
-             *overflow = true;
+           {
+             if ((r >> prec) != 0)
+               *overflow = true;
+           }
        }
-      if (full)
-       val[0] = sext_hwi (r, prec * 2);
-      else if (high)
-       val[0] = r >> prec;
-      else
-       val[0] = sext_hwi (r, prec);
+      val[0] = high ? r >> prec : r;
       return 1;
     }
 
@@ -1511,11 +1481,11 @@ wi::sub_large (HOST_WIDE_INT *val, const
   unsigned HOST_WIDE_INT old_borrow = 0;
 
   unsigned HOST_WIDE_INT mask0, mask1;
-  unsigned int i, small_prec;
+  unsigned int i;
 
   unsigned int len = MAX (op0len, op1len);
-  mask0 = -top_bit_of (op0, op0len);
-  mask1 = -top_bit_of (op1, op1len);
+  mask0 = -top_bit_of (op0, op0len, prec);
+  mask1 = -top_bit_of (op1, op1len, prec);
 
   /* Subtract all of the explicitly defined elements.  */
   for (i = 0; i < len; i++)
@@ -1537,32 +1507,24 @@ wi::sub_large (HOST_WIDE_INT *val, const
     }
   else if (overflow)
     {
+      unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
       if (sgn == SIGNED)
        {
-         unsigned int p = (len == BLOCKS_NEEDED (prec)
-                           ? HOST_BITS_PER_WIDE_INT
-                           : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
-         HOST_WIDE_INT x = (((o0 ^ o1) & (val[len - 1] ^ o0)) >> p) & 1;
-         *overflow = (x != 0);
+         unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
+         *overflow = HOST_WIDE_INT (x << shift) < 0;
        }
       else
        {
+         /* Put the MSB of X and O0 and in the top of the HWI.  */
+         x <<= shift;
+         o0 <<= shift;
          if (old_borrow)
-           *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] >= o0);
+           *overflow = (x >= o0);
          else
-           *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] > o0);
+           *overflow = (x > o0);
        }
     }
 
-  /* Canonize the top of the top block.  */
-  small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
-  if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
-    {
-      /* Modes with weird precisions.  */
-      i = len - 1;
-      val[i] = sext_hwi (val[i], small_prec);
-    }
-
   return canonize (val, len, prec);
 }
 
@@ -1716,34 +1678,31 @@ wi::divmod_internal (HOST_WIDE_INT *quot
     overflow = true;
 
   /* The smallest signed number / -1 causes overflow.  */
-  if (sgn == SIGNED)
-    {
-      HOST_WIDE_INT small_prec = dividend_prec & (HOST_BITS_PER_WIDE_INT - 1);
-      if (dividend_len == BLOCKS_NEEDED (dividend_prec)
-         && divisor_len == 1
-         && divisor[0] == HOST_WIDE_INT(-1))
-
-       if ((small_prec
-            && ((HOST_WIDE_INT)zext_hwi (dividend[dividend_len - 1],
-                                         small_prec)
-                == (HOST_WIDE_INT(1) << (small_prec - 1))))
-           || dividend[dividend_len - 1]
-           == HOST_WIDE_INT(1) << (HOST_BITS_PER_WIDE_INT - 1))
-         {
-           /* The smallest neg number is 100...00.  The high word was
-              checked above, now check the rest of the words are
-              zero.  */
-           unsigned int i;
-           bool all_zero = true;
-           for (i = 0; i + 1 < dividend_len; i++)
-             if (dividend[i] != 0)
-               {
-                 all_zero = false;
-                 break;
-               }
-           if (all_zero)
-             overflow = true;
-         }
+  if (sgn == SIGNED
+      && dividend_len == BLOCKS_NEEDED (dividend_prec)
+      && divisor_len == 1)
+    {
+      HOST_WIDE_INT divisor_low = divisor[0];
+      if (divisor_prec < HOST_BITS_PER_WIDE_INT)
+       divisor_low = sext_hwi (divisor_low, divisor_prec);
+      unsigned HOST_WIDE_INT dividend_high = dividend[dividend_len - 1];
+      dividend_high <<= -dividend_prec % HOST_BITS_PER_WIDE_INT;
+      if (divisor_low == -1
+         && HOST_WIDE_INT (dividend_high) == HOST_WIDE_INT_MIN)
+       {
+         /* The smallest neg number is 100...00.  The high word was
+            checked above, now check the rest of the words are zero.  */
+         unsigned int i;
+         bool all_zero = true;
+         for (i = 0; i + 1 < dividend_len; i++)
+           if (dividend[i] != 0)
+             {
+               all_zero = false;
+               break;
+             }
+         if (all_zero)
+           overflow = true;
+       }
     }
 
   /* If overflow is set, just get out.  There will only be grief by
@@ -1775,10 +1734,10 @@ wi::divmod_internal (HOST_WIDE_INT *quot
          HOST_WIDE_INT o1 = sext_hwi (divisor[0], divisor_prec);
 
          if (quotient)
-           quotient[0] = sext_hwi (o0 / o1, dividend_prec);
+           quotient[0] = o0 / o1;
          if (remainder)
            {
-             remainder[0] = sext_hwi (o0 % o1, dividend_prec);
+             remainder[0] = o0 % o1;
              *remainder_len = 1;
            }
        }
@@ -1788,10 +1747,10 @@ wi::divmod_internal (HOST_WIDE_INT *quot
          unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec);
 
          if (quotient)
-           quotient[0] = sext_hwi (o0 / o1, dividend_prec);
+           quotient[0] = o0 / o1;
          if (remainder)
            {
-             remainder[0] = sext_hwi (o0 % o1, dividend_prec);
+             remainder[0] = o0 % o1;
              *remainder_len = 1;
            }
        }
@@ -1803,14 +1762,14 @@ wi::divmod_internal (HOST_WIDE_INT *quot
      did.  */
   if (sgn == SIGNED)
     {
-      if (top_bit_of (dividend, dividend_len))
+      if (top_bit_of (dividend, dividend_len, dividend_prec))
        {
          dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len,
                                        dividend_prec, UNSIGNED, 0);
          dividend = u0;
          dividend_neg = true;
        }
-      if (top_bit_of (divisor, divisor_len))
+      if (top_bit_of (divisor, divisor_len, divisor_prec))
        {
          divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len,
                                       divisor_prec, UNSIGNED, 0);
@@ -1824,12 +1783,12 @@ wi::divmod_internal (HOST_WIDE_INT *quot
   wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor,
             divisor_len, divisor_blocks_needed, divisor_prec, sgn);
 
-  if (top_bit_of (dividend, dividend_len) && sgn == SIGNED)
+  if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED)
     m = dividend_blocks_needed;
   else
     m = 2 * dividend_len;
 
-  if (top_bit_of (divisor, divisor_len) && sgn == SIGNED)
+  if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED)
     n = divisor_blocks_needed;
   else
     n = 2 * divisor_len;
@@ -2197,10 +2156,3 @@ wide_int_ro::dump (char* buf) const
   return buf;
 }
 #endif
-
-HOST_WIDE_INT foo (tree x)
-{
-  addr_wide_int y = x;
-  addr_wide_int z = y;
-  return z.to_shwi ();
-}
Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h      2013-10-19 09:54:45.557388214 +0100
+++ gcc/wide-int.h      2013-10-19 10:28:52.386344132 +0100
@@ -185,17 +185,16 @@ #define WIDE_INT_H
 
      assuming t is a int_cst.
 
-   Note, the bits past the precision up to the nearest HOST_WDE_INT
-   boundary are defined to be copies of the top bit of the value,
-   however the bits above those defined bits not defined and the
-   algorithms used here are careful not to depend on their value.  In
-   particular, values that come in from rtx constants may have random
-   bits.  When the precision is 0, all the bits in the LEN elements of
-   VEC are significant with no undefined bits.  Precisionless
-   constants are limited to being one or two HOST_WIDE_INTs.  When two
-   are used the upper value is 0, and the high order bit of the first
-   value is set.  (Note that this may need to be generalized if it is
-   ever necessary to support 32bit HWIs again).
+   Any bits in a wide_int above the precision are sign-extended from the
+   most significant bit.  For example, a 4-bit value 0x8 is represented as
+   VAL = { 0xf...fff8 }.  However, as an optimization, we allow other integer
+   constants to be represented with undefined bits above the precision.
+   This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
+   so that the INTEGER_CST representation can be used both in TYPE_PRECISION
+   and in wider precisions.
+
+   Precision 0 is allowed for the special case of zero-width bitfields.
+   They always have a VAL of { 0 } and a LEN of 1.
 
    Many binary operations require that the precisions of the two
    operands be the same.  However, the API tries to keep this relaxed
@@ -208,7 +207,7 @@ #define WIDE_INT_H
      This is allowed because it is always known whether to sign or zero
      extend these values.
 
-   * The comparisons do not require that the operands be the same
+   * order comparisons do not require that the operands be the same
      length.  This allows wide ints to be used in hash tables where
      all of the values may not be the same precision.  */
 
@@ -679,7 +678,13 @@ generic_wide_int <storage>::to_short_add
 inline HOST_WIDE_INT
 generic_wide_int <storage>::sign_mask () const
 {
-  return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0;
+  unsigned int len = this->get_len ();
+  unsigned int precision = this->get_precision ();
+  unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
+  int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+  if (excess > 0)
+    high <<= excess;
+  return HOST_WIDE_INT (high) < 0 ? -1 : 0;
 }
 
 /* Return the signed value of the least-significant explicitly-encoded
@@ -855,9 +860,10 @@ inline wide_int_storage::wide_int_storag
   STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
   wide_int_ref xi (x);
   precision = xi.precision;
-  len = xi.len;
-  for (unsigned int i = 0; i < len; ++i)
+  unsigned int l = xi.len;
+  for (unsigned int i = 0; i < l; ++i)
     val[i] = xi.val[i];
+  set_len (l);
 }
 
 inline unsigned int
@@ -888,6 +894,9 @@ wide_int_storage::write_val ()
 wide_int_storage::set_len (unsigned int l)
 {
   len = l;
+  if (len * HOST_BITS_PER_WIDE_INT > precision)
+    val[len - 1] = sext_hwi (val[len - 1],
+                            precision % HOST_BITS_PER_WIDE_INT);
 }
 
 /* Treat X as having signedness SGN and convert it to a PRECISION-bit
@@ -1032,6 +1041,8 @@ fixed_wide_int_storage <N>::write_val ()
 fixed_wide_int_storage <N>::set_len (unsigned int l)
 {
   len = l;
+  /* There are no excess bits in val[len - 1].  */
+  STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
 }
 
 /* Treat X as having signedness SGN and convert it to an N-bit number.  */
@@ -1302,11 +1313,7 @@ decompose (HOST_WIDE_INT *scratch, unsig
 {
   scratch[0] = x.val;
   if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
-    {
-      if (precision < HOST_BITS_PER_WIDE_INT)
-       scratch[0] = sext_hwi (scratch[0], precision);
-      return wi::storage_ref (scratch, 1, precision);
-    }
+    return wi::storage_ref (scratch, 1, precision);
   scratch[1] = 0;
   return wi::storage_ref (scratch, 2, precision);
 }
@@ -1419,7 +1426,7 @@ wi::neg_p (const wide_int_ref &x, signop
 {
   if (sgn == UNSIGNED)
     return false;
-  return x.shigh () < 0;
+  return x.sign_mask () < 0;
 }
 
 /* Return -1 if the top bit of X is set and 0 if the top bit is
@@ -1443,10 +1450,7 @@ wi::eq_p (const T1 &x, const T2 &y)
   if (precision <= HOST_BITS_PER_WIDE_INT)
     {
       unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow ();
-      bool result = (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0;
-      if (result)
-       gcc_assert (xi.ulow () == yi.ulow ());
-      return result;
+      return (diff << (-precision % HOST_BITS_PER_WIDE_INT)) == 0;
     }
   return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
 }
@@ -1472,12 +1476,12 @@ wi::lts_p (const wide_int_ref &x, const
     {
       // If x fits directly into a shwi, we can compare directly.
       if (wi::fits_shwi_p (x))
-       return x.slow () < y.slow ();
+       return x.to_shwi () < y.to_shwi ();
       // If x doesn't fit and is negative, then it must be more
       // negative than any value in y, and hence smaller than y.
       if (neg_p (x, SIGNED))
        return true;
-      // If x is positve, then it must be larger than any value in y,
+      // If x is positive, then it must be larger than any value in y,
       // and hence greater than y.
       return false;
     }
@@ -1492,8 +1496,8 @@ wi::ltu_p (const wide_int_ref &x, const
   if (x.precision <= HOST_BITS_PER_WIDE_INT
       && y.precision <= HOST_BITS_PER_WIDE_INT)
     {
-      unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision);
-      unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
+      unsigned HOST_WIDE_INT xl = x.to_uhwi ();
+      unsigned HOST_WIDE_INT yl = y.to_uhwi ();
       return xl < yl;
     }
   else
@@ -1591,8 +1595,8 @@ wi::cmps (const wide_int_ref &x, const w
   if (x.precision <= HOST_BITS_PER_WIDE_INT
       && y.precision <= HOST_BITS_PER_WIDE_INT)
     {
-      HOST_WIDE_INT xl = x.slow ();
-      HOST_WIDE_INT yl = y.slow ();
+      HOST_WIDE_INT xl = x.to_shwi ();
+      HOST_WIDE_INT yl = y.to_shwi ();
       if (xl < yl)
        return -1;
       else if (xl > yl)
@@ -1612,8 +1616,8 @@ wi::cmpu (const wide_int_ref &x, const w
   if (x.precision <= HOST_BITS_PER_WIDE_INT
       && y.precision <= HOST_BITS_PER_WIDE_INT)
     {
-      unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision);
-      unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
+      unsigned HOST_WIDE_INT xl = x.to_uhwi ();
+      unsigned HOST_WIDE_INT yl = y.to_uhwi ();
       if (xl < yl)
        return -1;
       else if (xl == yl)
@@ -1675,7 +1679,7 @@ wi::abs (const T &x)
     return neg (x);
 
   WI_UNARY_RESULT_VAR (result, val, T, x);
-  wide_int_ref xi (x, get_precision(result));
+  wide_int_ref xi (x, get_precision (result));
   for (unsigned int i = 0; i < xi.len; ++i)
     val[i] = xi.val[i];
   result.set_len (xi.len);
@@ -1951,7 +1955,7 @@ wi::add (const T1 &x, const T2 &y)
   wide_int_ref yi (y, precision);
   if (precision <= HOST_BITS_PER_WIDE_INT)
     {
-      val[0] = sext_hwi (xi.ulow () + yi.ulow (), precision);
+      val[0] = xi.ulow () + yi.ulow ();
       result.set_len (1);
     }
   else
@@ -1984,7 +1988,7 @@ wi::add (const T1 &x, const T2 &y, signo
       else
        *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
                     < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
-      val[0] = sext_hwi (resultl, precision);
+      val[0] = resultl;
       result.set_len (1);
     }
   else
@@ -2005,7 +2009,7 @@ wi::sub (const T1 &x, const T2 &y)
   wide_int_ref yi (y, precision);
   if (precision <= HOST_BITS_PER_WIDE_INT)
     {
-      val[0] = sext_hwi (xi.ulow () - yi.ulow (), precision);
+      val[0] = xi.ulow () - yi.ulow ();
       result.set_len (1);
     }
   else
@@ -2037,7 +2041,7 @@ wi::sub (const T1 &x, const T2 &y, signo
       else
        *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
                     > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
-      val[0] = sext_hwi (resultl, precision);
+      val[0] = resultl;
       result.set_len (1);
     }
   else
@@ -2058,7 +2062,7 @@ wi::mul (const T1 &x, const T2 &y)
   wide_int_ref yi (y, precision);
   if (precision <= HOST_BITS_PER_WIDE_INT)
     {
-      val[0] = sext_hwi (xi.ulow () * yi.ulow (), precision);
+      val[0] = xi.ulow () * yi.ulow ();
       result.set_len (1);
     }
   else
@@ -2462,11 +2466,7 @@ wi::trunc_shift (const wide_int_ref &x,
       if (geu_p (x, precision))
        return precision;
     }
-  /* Flush out undefined bits.  */
-  unsigned int shift = x.ulow ();
-  if (x.precision < HOST_BITS_PER_WIDE_INT)
-    shift = zext_hwi (shift, x.precision);
-  return shift & (bitsize - 1);
+  return x.to_uhwi () & (bitsize - 1);
 }
 
 /* Return X << Y.  If BITSIZE is nonzero, only use the low BITSIZE
@@ -2487,7 +2487,7 @@ wi::lshift (const T &x, const wide_int_r
     }
   else if (precision <= HOST_BITS_PER_WIDE_INT)
     {
-      val[0] = sext_hwi (xi.ulow () << shift, precision);
+      val[0] = xi.ulow () << shift;
       result.set_len (1);
     }
   else
@@ -2515,8 +2515,7 @@ wi::lrshift (const T &x, const wide_int_
     }
   else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
     {
-      val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift, 
-                        xi.precision);
+      val[0] = xi.to_uhwi () >> shift;
       result.set_len (1);
     }
   else

Reply via email to