The purpose of this set of changes is to remove assumptions in GCC
about type sizes.  Previous to this patch, GCC assumed that all types
were powers-of-two in size, and used naive math accordingly.

Old:
        POINTER_SIZE / BITS_PER_UNIT
        TYPE_SIZE
        GET_MODE_BITSIZE

New:
        POINTER_SIZE_UNITS  (ceil, not floor)
        TYPE_PRECISION
        GET_MODE_PRECISION

gcc/
        * cppbuiltin.c (define_builtin_macros_for_type_sizes): Round
        pointer size up to a power of two.
        * defaults.h (DWARF2_ADDR_SIZE): Round up.
        (POINTER_SIZE_UNITS): New, rounded up value.
        * dwarf2asm.c (size_of_encoded_value): Use it.
        (dw2_output_indirect_constant_1): Likewise.
        * expmed.c (init_expmed_one_conv): We now know the sizes of
        partial int modes.
        * loop-iv.c (iv_number_of_iterations): Use precision, not size.
        * optabs.c (expand_float): Use precision, not size.
        (expand_fix): Likewise.
        * simplify-rtx (simplify_unary_operation_1): Likewise.
        * tree-dfa.c (get_ref_base_and_extent): Likewise.
        * varasm.c (assemble_addr_to_section): Round up pointer sizes.
        (default_assemble_integer) Likewise.
        (dump_tm_clone_pairs): Likewise.
        * dwarf2out.c (mem_loc_descriptor): Allow partial-int modes also.
        * var-tracking.c (adjust_mems): Allow partial-int modes also.
        (prepare_call_arguments): Likewise.
        * stor-layout.c (finalize_type_size): Preserve precision.
        (layout_type): Use precision, not size.


Index: gcc/cppbuiltin.c
===================================================================
--- gcc/cppbuiltin.c    (revision 213886)
+++ gcc/cppbuiltin.c    (working copy)
@@ -172,13 +172,13 @@ define_builtin_macros_for_type_sizes (cp
                          ? "__ORDER_BIG_ENDIAN__"
                          : "__ORDER_LITTLE_ENDIAN__"));
 
   /* ptr_type_node can't be used here since ptr_mode is only set when
      toplev calls backend_init which is not done with -E switch.  */
   cpp_define_formatted (pfile, "__SIZEOF_POINTER__=%d",
-                       POINTER_SIZE / BITS_PER_UNIT);
+                       1 << ceil_log2 ((POINTER_SIZE + BITS_PER_UNIT - 1) / 
BITS_PER_UNIT));
 }
 
 
 /* Define macros builtins common to all language performing CPP
    preprocessing.  */
 void
Index: gcc/defaults.h
===================================================================
--- gcc/defaults.h      (revision 213886)
+++ gcc/defaults.h      (working copy)
@@ -448,13 +448,13 @@ see the files COPYING3 and COPYING.RUNTI
 /* The size of addresses as they appear in the Dwarf 2 data.
    Some architectures use word addresses to refer to code locations,
    but Dwarf 2 info always uses byte addresses.  On such machines,
    Dwarf 2 addresses need to be larger than the architecture's
    pointers.  */
 #ifndef DWARF2_ADDR_SIZE
-#define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT)
+#define DWARF2_ADDR_SIZE ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT)
 #endif
 
 /* The size in bytes of a DWARF field indicating an offset or length
    relative to a debug info section, specified to be 4 bytes in the
    DWARF-2 specification.  The SGI/MIPS ABI defines it to be the same
    as PTR_SIZE.  */
@@ -748,12 +748,16 @@ see the files COPYING3 and COPYING.RUNTI
 #endif
 
 /* Width in bits of a pointer.  Mind the value of the macro `Pmode'.  */
 #ifndef POINTER_SIZE
 #define POINTER_SIZE BITS_PER_WORD
 #endif
+#ifndef POINTER_SIZE_UNITS
+#define POINTER_SIZE_UNITS ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT)
+#endif
+
 
 #ifndef PIC_OFFSET_TABLE_REGNUM
 #define PIC_OFFSET_TABLE_REGNUM INVALID_REGNUM
 #endif
 
 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
Index: gcc/dwarf2asm.c
===================================================================
--- gcc/dwarf2asm.c     (revision 213886)
+++ gcc/dwarf2asm.c     (working copy)
@@ -387,13 +387,13 @@ size_of_encoded_value (int encoding)
   if (encoding == DW_EH_PE_omit)
     return 0;
 
   switch (encoding & 0x07)
     {
     case DW_EH_PE_absptr:
-      return POINTER_SIZE / BITS_PER_UNIT;
+      return POINTER_SIZE_UNITS;
     case DW_EH_PE_udata2:
       return 2;
     case DW_EH_PE_udata4:
       return 4;
     case DW_EH_PE_udata8:
       return 8;
@@ -917,13 +917,13 @@ dw2_output_indirect_constant_1 (splay_tr
       if (USE_LINKONCE_INDIRECT)
        DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
     }
 
   sym_ref = gen_rtx_SYMBOL_REF (Pmode, sym);
   assemble_variable (decl, 1, 1, 1);
-  assemble_integer (sym_ref, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+  assemble_integer (sym_ref, POINTER_SIZE_UNITS, POINTER_SIZE, 1);
 
   return 0;
 }
 
 /* Emit the constants queued through dw2_force_const_mem.  */
 
Index: gcc/expmed.c
===================================================================
--- gcc/expmed.c        (revision 213886)
+++ gcc/expmed.c        (working copy)
@@ -115,19 +115,25 @@ static void
 init_expmed_one_conv (struct init_expmed_rtl *all, enum machine_mode to_mode,
                      enum machine_mode from_mode, bool speed)
 {
   int to_size, from_size;
   rtx which;
 
-  /* We're given no information about the true size of a partial integer,
-     only the size of the "full" integer it requires for storage.  For
-     comparison purposes here, reduce the bit size by one in that case.  */
-  to_size = (GET_MODE_BITSIZE (to_mode)
-            - (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT));
-  from_size = (GET_MODE_BITSIZE (from_mode)
-              - (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT));
+  to_size = GET_MODE_PRECISION (to_mode);
+  from_size = GET_MODE_PRECISION (from_mode);
+
+  /* Most partial integers have a precision less than the "full"
+     integer it requires for storage.  In case one doesn't, for
+     comparison purposes here, reduce the bit size by one in that
+     case.  */
+  if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
+      && exact_log2 (to_size) != -1)
+    to_size --;
+  if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
+      && exact_log2 (from_size) != -1)
+    from_size --;
   
   /* Assume cost of zero-extend and sign-extend is the same.  */
   which = (to_size < from_size ? all->trunc : all->zext);
 
   PUT_MODE (all->reg, from_mode);
   set_convert_cost (to_mode, from_mode, speed, set_src_cost (which, speed));
@@ -446,13 +452,13 @@ strict_volatile_bitfield_p (rtx op0, uns
          && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
     return false;
 
   /* Check for cases where the C++ memory model applies.  */
   if (bitregion_end != 0
       && (bitnum - bitnum % modesize < bitregion_start
-         || bitnum - bitnum % modesize + modesize > bitregion_end))
+         || bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
     return false;
 
   return true;
 }
 
 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
Index: gcc/loop-iv.c
===================================================================
--- gcc/loop-iv.c       (revision 213886)
+++ gcc/loop-iv.c       (working copy)
@@ -2410,13 +2410,13 @@ iv_number_of_iterations (struct loop *lo
 
   if (!canonicalize_iv_subregs (&iv0, &iv1, cond, desc))
     goto fail;
 
   comp_mode = iv0.extend_mode;
   mode = iv0.mode;
-  size = GET_MODE_BITSIZE (mode);
+  size = GET_MODE_PRECISION (mode);
   get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax);
   mode_mmin = lowpart_subreg (mode, mmin, comp_mode);
   mode_mmax = lowpart_subreg (mode, mmax, comp_mode);
 
   if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step))
     goto fail;
Index: gcc/optabs.c
===================================================================
--- gcc/optabs.c        (revision 213886)
+++ gcc/optabs.c        (working copy)
@@ -5181,13 +5181,13 @@ expand_float (rtx to, rtx from, int unsi
     {
       rtx libfunc;
       rtx insns;
       rtx value;
       convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
 
-      if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
+      if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
        from = convert_to_mode (SImode, from, unsignedp);
 
       libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
       gcc_assert (libfunc);
 
       start_sequence ();
@@ -5357,13 +5357,13 @@ expand_fix (rtx to, rtx from, int unsign
        }
 
   /* We can't do it with an insn, so use a library call.  But first ensure
      that the mode of TO is at least as wide as SImode, since those are the
      only library calls we know about.  */
 
-  if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
+  if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
     {
       target = gen_reg_rtx (SImode);
 
       expand_fix (target, from, unsignedp);
     }
   else
Index: gcc/simplify-rtx.c
===================================================================
--- gcc/simplify-rtx.c  (revision 213886)
+++ gcc/simplify-rtx.c  (working copy)
@@ -1361,14 +1361,14 @@ simplify_unary_operation_1 (enum rtx_cod
        }
 
       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
         (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
        {
-         gcc_assert (GET_MODE_BITSIZE (mode)
-                     > GET_MODE_BITSIZE (GET_MODE (op)));
+         gcc_assert (GET_MODE_PRECISION (mode)
+                     > GET_MODE_PRECISION (GET_MODE (op)));
          return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
                                     GET_MODE (XEXP (op, 0)));
        }
 
       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
         is (sign_extend:M (subreg:O <X>)) if there is mode with
@@ -1473,21 +1473,21 @@ simplify_unary_operation_1 (enum rtx_cod
       if (GET_CODE (op) == ZERO_EXTEND)
        return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
                                   GET_MODE (XEXP (op, 0)));
 
       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
         is (zero_extend:M (subreg:O <X>)) if there is mode with
-        GET_MODE_BITSIZE (N) - I bits.  */
+        GET_MODE_PRECISION (N) - I bits.  */
       if (GET_CODE (op) == LSHIFTRT
          && GET_CODE (XEXP (op, 0)) == ASHIFT
          && CONST_INT_P (XEXP (op, 1))
          && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
-         && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
+         && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
        {
          enum machine_mode tmode
-           = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+           = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
                             - INTVAL (XEXP (op, 1)), MODE_INT, 1);
          if (tmode != BLKmode)
            {
              rtx inner =
                rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
              if (inner)
@@ -3076,16 +3076,16 @@ simplify_binary_operation_1 (enum rtx_co
         prefer left rotation, if op1 is from bitsize / 2 + 1 to
         bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
         amount instead.  */
 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
       if (CONST_INT_P (trueop1)
          && IN_RANGE (INTVAL (trueop1),
-                      GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
-                      GET_MODE_BITSIZE (mode) - 1))
+                      GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
+                      GET_MODE_PRECISION (mode) - 1))
        return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
-                                   mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
+                                   mode, op0, GEN_INT (GET_MODE_PRECISION 
(mode)
                                                        - INTVAL (trueop1)));
 #endif
       /* FALLTHRU */
     case ASHIFTRT:
       if (trueop1 == CONST0_RTX (mode))
        return op0;
@@ -3096,13 +3096,13 @@ simplify_binary_operation_1 (enum rtx_co
          && UINTVAL (trueop0) == GET_MODE_MASK (mode)
          && ! side_effects_p (op1))
        return op0;
     canonicalize_shift:
       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
        {
-         val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
+         val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
          if (val != INTVAL (op1))
            return simplify_gen_binary (code, mode, op0, GEN_INT (val));
        }
       break;
 
     case ASHIFT:
@@ -3776,13 +3776,14 @@ simplify_const_binary_operation (enum rt
 
          return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
        }
     }
 
   /* We can fold some multi-word operations.  */
-  if (GET_MODE_CLASS (mode) == MODE_INT
+  if ((GET_MODE_CLASS (mode) == MODE_INT
+       || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
       && CONST_SCALAR_INT_P (op0)
       && CONST_SCALAR_INT_P (op1))
     {
       wide_int result;
       bool overflow;
       rtx_mode_t pop0 = std::make_pair (op0, mode);
Index: gcc/tree-dfa.c
===================================================================
--- gcc/tree-dfa.c      (revision 213886)
+++ gcc/tree-dfa.c      (working copy)
@@ -404,13 +404,13 @@ get_ref_base_and_extent (tree exp, HOST_
   else if (!VOID_TYPE_P (TREE_TYPE (exp)))
     {
       enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
       if (mode == BLKmode)
        size_tree = TYPE_SIZE (TREE_TYPE (exp));
       else
-       bitsize = int (GET_MODE_BITSIZE (mode));
+       bitsize = int (GET_MODE_PRECISION (mode));
     }
   if (size_tree != NULL_TREE
       && TREE_CODE (size_tree) == INTEGER_CST)
     bitsize = wi::to_offset (size_tree);
 
   /* Initially, maxsize is the same as the accessed element size.
Index: gcc/varasm.c
===================================================================
--- gcc/varasm.c        (revision 213886)
+++ gcc/varasm.c        (working copy)
@@ -1481,13 +1481,13 @@ assemble_asm (tree string)
 /* Write the address of the entity given by SYMBOL to SEC.  */
 void
 assemble_addr_to_section (rtx symbol, section *sec)
 {
   switch_to_section (sec);
   assemble_align (POINTER_SIZE);
-  assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+  assemble_integer (symbol, POINTER_SIZE_UNITS, POINTER_SIZE, 1);
 }
 
 /* Return the numbered .ctors.N (if CONSTRUCTOR_P) or .dtors.N (if
    not) section for PRIORITY.  */
 section *
 get_cdtor_priority_section (int priority, bool constructor_p)
@@ -2637,13 +2637,13 @@ default_assemble_integer (rtx x ATTRIBUT
                          unsigned int size ATTRIBUTE_UNUSED,
                          int aligned_p ATTRIBUTE_UNUSED)
 {
   const char *op = integer_asm_op (size, aligned_p);
   /* Avoid GAS bugs for large values.  Specifically negative values whose
      absolute value fits in a bfd_vma, but not in a bfd_signed_vma.  */
-  if (size > UNITS_PER_WORD && size > POINTER_SIZE / BITS_PER_UNIT)
+  if (size > UNITS_PER_WORD && size > POINTER_SIZE_UNITS)
     return false;
   return op && (assemble_integer_with_op (op, x), true);
 }
 
 /* Assemble the integer constant X into an object of SIZE bytes.  ALIGN is
    the alignment of the integer in bits.  Return 1 if we were able to output
@@ -5768,15 +5768,15 @@ dump_tm_clone_pairs (vec<tm_alias_pair> 
          switch_to_section (targetm.asm_out.tm_clone_table_section ());
          assemble_align (POINTER_SIZE);
          switched = true;
        }
 
       assemble_integer (XEXP (DECL_RTL (src), 0),
-                       POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+                       POINTER_SIZE_UNITS, POINTER_SIZE, 1);
       assemble_integer (XEXP (DECL_RTL (dst), 0),
-                       POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+                       POINTER_SIZE_UNITS, POINTER_SIZE, 1);
     }
 }
 
 /* Provide a default for the tm_clone_table section.  */
 
 section *
Index: gcc/dwarf2out.c
===================================================================
--- gcc/dwarf2out.c     (revision 213886)
+++ gcc/dwarf2out.c     (working copy)
@@ -12536,13 +12536,14 @@ mem_loc_descriptor (rtx rtl, enum machin
     case LABEL_REF:
       /* Some ports can transform a symbol ref into a label ref, because
         the symbol ref is too far away and has to be dumped into a constant
         pool.  */
     case CONST:
     case SYMBOL_REF:
-      if (GET_MODE_CLASS (mode) != MODE_INT
+      if ((GET_MODE_CLASS (mode) != MODE_INT
+          && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
          || (GET_MODE_SIZE (mode) > DWARF2_ADDR_SIZE
 #ifdef POINTERS_EXTEND_UNSIGNED
              && (mode != Pmode || mem_mode == VOIDmode)
 #endif
              ))
        break;

Index: gcc/var-tracking.c
===================================================================
--- gcc/var-tracking.c  (revision 213886)
+++ gcc/var-tracking.c  (working copy)
@@ -1002,13 +1002,13 @@ use_narrower_mode (rtx x, enum machine_m
       return lowpart_subreg (mode, x, wmode);
     case PLUS:
     case MINUS:
     case MULT:
       op0 = use_narrower_mode (XEXP (x, 0), mode, wmode);
       op1 = use_narrower_mode (XEXP (x, 1), mode, wmode);
-      return simplify_gen_binary (GET_CODE (x), mode, op0, op1);
+       return simplify_gen_binary (GET_CODE (x), mode, op0, op1);
     case ASHIFT:
       op0 = use_narrower_mode (XEXP (x, 0), mode, wmode);
       return simplify_gen_binary (ASHIFT, mode, op0, XEXP (x, 1));
     default:
       gcc_unreachable ();
     }
@@ -1138,16 +1138,18 @@ adjust_mems (rtx loc, const_rtx old_rtx,
       if (MAY_HAVE_DEBUG_INSNS
          && GET_CODE (tem) == SUBREG
          && (GET_CODE (SUBREG_REG (tem)) == PLUS
              || GET_CODE (SUBREG_REG (tem)) == MINUS
              || GET_CODE (SUBREG_REG (tem)) == MULT
              || GET_CODE (SUBREG_REG (tem)) == ASHIFT)
-         && GET_MODE_CLASS (GET_MODE (tem)) == MODE_INT
-         && GET_MODE_CLASS (GET_MODE (SUBREG_REG (tem))) == MODE_INT
-         && GET_MODE_SIZE (GET_MODE (tem))
-            < GET_MODE_SIZE (GET_MODE (SUBREG_REG (tem)))
+         && (GET_MODE_CLASS (GET_MODE (tem)) == MODE_INT
+             || GET_MODE_CLASS (GET_MODE (tem)) == MODE_PARTIAL_INT)
+         && (GET_MODE_CLASS (GET_MODE (SUBREG_REG (tem))) == MODE_INT
+             || GET_MODE_CLASS (GET_MODE (SUBREG_REG (tem))) == 
MODE_PARTIAL_INT)
+         && GET_MODE_PRECISION (GET_MODE (tem))
+            < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (tem)))
          && subreg_lowpart_p (tem)
          && !for_each_rtx (&SUBREG_REG (tem), use_narrower_mode_test, tem))
        return use_narrower_mode (SUBREG_REG (tem), GET_MODE (tem),
                                  GET_MODE (SUBREG_REG (tem)));
       return tem;
     case ASM_OPERANDS:
@@ -6234,22 +6236,25 @@ prepare_call_arguments (basic_block bb, 
       {
        rtx item = NULL_RTX;
        x = XEXP (XEXP (link, 0), 0);
        if (GET_MODE (link) == VOIDmode
            || GET_MODE (link) == BLKmode
            || (GET_MODE (link) != GET_MODE (x)
-               && (GET_MODE_CLASS (GET_MODE (link)) != MODE_INT
-                   || GET_MODE_CLASS (GET_MODE (x)) != MODE_INT)))
+               && ((GET_MODE_CLASS (GET_MODE (link)) != MODE_INT
+                    && GET_MODE_CLASS (GET_MODE (link)) != MODE_PARTIAL_INT)
+                   || (GET_MODE_CLASS (GET_MODE (x)) != MODE_INT
+                       && GET_MODE_CLASS (GET_MODE (x)) != MODE_PARTIAL_INT))))
          /* Can't do anything for these, if the original type mode
             isn't known or can't be converted.  */;
        else if (REG_P (x))
          {
            cselib_val *val = cselib_lookup (x, GET_MODE (x), 0, VOIDmode);
            if (val && cselib_preserved_value_p (val))
              item = val->val_rtx;
-           else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+           else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+                    || GET_MODE_CLASS (GET_MODE (x)) == MODE_PARTIAL_INT)
              {
                enum machine_mode mode = GET_MODE (x);
 
                while ((mode = GET_MODE_WIDER_MODE (mode)) != VOIDmode
                       && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD)
                  {
@@ -6282,13 +6287,14 @@ prepare_call_arguments (basic_block bb, 
                                               &amd);
                gcc_assert (amd.side_effects == NULL_RTX);
              }
            val = cselib_lookup (mem, GET_MODE (mem), 0, VOIDmode);
            if (val && cselib_preserved_value_p (val))
              item = val->val_rtx;
-           else if (GET_MODE_CLASS (GET_MODE (mem)) != MODE_INT)
+           else if (GET_MODE_CLASS (GET_MODE (mem)) != MODE_INT
+                    && GET_MODE_CLASS (GET_MODE (mem)) != MODE_PARTIAL_INT)
              {
                /* For non-integer stack argument see also if they weren't
                   initialized by integers.  */
                enum machine_mode imode = int_mode_for_mode (GET_MODE (mem));
                if (imode != GET_MODE (mem) && imode != BLKmode)
                  {
@@ -6325,13 +6331,14 @@ prepare_call_arguments (basic_block bb, 
                                              argtype, true);
            if (TREE_CODE (argtype) == REFERENCE_TYPE
                && INTEGRAL_TYPE_P (TREE_TYPE (argtype))
                && reg
                && REG_P (reg)
                && GET_MODE (reg) == mode
-               && GET_MODE_CLASS (mode) == MODE_INT
+               && (GET_MODE_CLASS (mode) == MODE_INT
+                   || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
                && REG_P (x)
                && REGNO (x) == REGNO (reg)
                && GET_MODE (x) == mode
                && item)
              {
                enum machine_mode indmode
Index: gcc/stor-layout.c
===================================================================
--- gcc/stor-layout.c   (revision 213886)
+++ gcc/stor-layout.c   (working copy)
@@ -1782,23 +1800,25 @@ finalize_type_size (tree type)
     {
       tree variant;
       /* Record layout info of this variant.  */
       tree size = TYPE_SIZE (type);
       tree size_unit = TYPE_SIZE_UNIT (type);
       unsigned int align = TYPE_ALIGN (type);
+      unsigned int precision = TYPE_PRECISION (type);
       unsigned int user_align = TYPE_USER_ALIGN (type);
       enum machine_mode mode = TYPE_MODE (type);
 
       /* Copy it into all variants.  */
       for (variant = TYPE_MAIN_VARIANT (type);
           variant != 0;
           variant = TYPE_NEXT_VARIANT (variant))
        {
          TYPE_SIZE (variant) = size;
          TYPE_SIZE_UNIT (variant) = size_unit;
          TYPE_ALIGN (variant) = align;
+         TYPE_PRECISION (variant) = precision;
          TYPE_USER_ALIGN (variant) = user_align;
          SET_TYPE_MODE (variant, mode);
        }
     }
 }
 
@@ -2129,12 +2149,13 @@ layout_type (tree type)
     case BOOLEAN_TYPE:
     case INTEGER_TYPE:
     case ENUMERAL_TYPE:
       SET_TYPE_MODE (type,
                     smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
+      /* Don't set TYPE_PRECISION here, as it may be set by a bitfield.  */
       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
       break;
 
     case REAL_TYPE:
       SET_TYPE_MODE (type,
                     mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
@@ -2199,15 +2220,15 @@ layout_type (tree type)
       TYPE_USER_ALIGN (type) = 0;
       SET_TYPE_MODE (type, VOIDmode);
       break;
 
     case OFFSET_TYPE:
       TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
-      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
-      /* A pointer might be MODE_PARTIAL_INT,
-        but ptrdiff_t must be integral.  */
+      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
+      /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
+        integral, which may be an __intN.  */
       SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
       TYPE_PRECISION (type) = POINTER_SIZE;
       break;
 
     case FUNCTION_TYPE:
     case METHOD_TYPE:
@@ -2229,13 +2250,13 @@ layout_type (tree type)
            mode = targetm.addr_space.address_mode (as);
          }
 
        TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
        TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
        TYPE_UNSIGNED (type) = 1;
-       TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
+       TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
       }
       break;
 
     case ARRAY_TYPE:
       {
        tree index = TYPE_DOMAIN (type);

Reply via email to