ok, so now you do this only for the first field in a bitfield group.  But you
do it for _all_ bitfield groups in a struct, not only for the interesting one.

May I suggest to split the loop into two, first searching the first field
in the bitfield group that contains fld and then in a separate loop computing
the bitwidth?

Excellent idea. Done! Now there are at most two calls to get_inner_reference, and in many cases, only one.

Backing up, considering one of my earlier questions.  What is *offset
supposed to be relative to?  The docs say sth like "relative to INNERDECL",
but the code doesn't contain a reference to INNERDECL anymore.

Sorry, I see your confusion. The comments at the top were completely out of date. I have simplified and rewritten them accordingly. I am attaching get_bit_range() with these and other changes you suggested. See if it makes sense now.

Now we come to that padding thing.  What's the C++ memory model
semantic for re-used tail padding?  Consider

Andrew addressed this elsewhere.

There is too much get_inner_reference and tree folding stuff in this
patch (which makes it expensive given that the algorithm is still
inherently quadratic).  You can rely on the bitfield group advancing
by integer-cst bits (but the start offset may be non-constant, so
may the size of the underlying record).

Now there are only two tree folding calls (apart from get_inner_reference), and the common case has very simple arithmetic tuples. I see no clear way of removing the last call to get_inner_reference(), as the padding after the field can only be calculated by calling get_inner_reference() on the subsequent field.

Now seeing all this - and considering that this is purely C++ frontend
semantics.  Why can't the C++ frontend itself constrain accesses
according to the required semantics?  It could simply create
BIT_FIELD_REF<MEM_REF<&containing_record,
byte-offset-to-start-of-group>, bit-size, bit-offset>  for all bitfield
references (with a proper
type for the MEM_REF, specifying the size of the group).  That would
also avoid issues during tree optimization and would at least allow
optimizing the bitfield accesses according to the desired C++ semantics.

Andrew addressed this as well. Could you respond to his email if you think it is unsatisfactory?

a


/* In the C++ memory model, consecutive non-zero bit fields in a
   structure are considered one memory location.

   Given a COMPONENT_REF, this function calculates the byte offset
   from the containing object to the start of the contiguous bit
   region containing the field in question.  This byte offset is
   returned in *OFFSET.

   The maximum number of bits that can be addressed while storing into
   the COMPONENT_REF is returned in *MAXBITS.  This number is the
   number of bits in the contiguous bit region, up to a maximum of
   MAX_FIXED_MODE_SIZE.  */

static void
get_bit_range (tree exp, tree *offset, HOST_WIDE_INT *maxbits)
{
  tree field, record_type, fld;
  bool prev_field_is_bitfield;
  tree start_offset;
  tree start_bitpos_direct_parent = NULL_TREE;
  HOST_WIDE_INT start_bitpos;
  HOST_WIDE_INT cumulative_bitsize = 0;
  /* First field of the bitfield group containing the bitfield we are
     referencing.  */
  tree bitregion_start;

  HOST_WIDE_INT tbitsize;
  enum machine_mode tmode;
  int tunsignedp, tvolatilep;
  bool found;

  gcc_assert (TREE_CODE (exp) == COMPONENT_REF);

  /* Bit field we're storing into.  */
  field = TREE_OPERAND (exp, 1);
  record_type = DECL_FIELD_CONTEXT (field);

  /* Find the bitfield group containing the field in question, and set
     BITREGION_START to the start of the group.  */
  prev_field_is_bitfield = false;
  bitregion_start = NULL_TREE;
  for (fld = TYPE_FIELDS (record_type); fld; fld = DECL_CHAIN (fld))
    {
      if (TREE_CODE (fld) != FIELD_DECL)
        continue;
      /* If we have a bit-field with a bitsize > 0... */
      if (DECL_BIT_FIELD_TYPE (fld)
          && tree_low_cst (DECL_SIZE (fld), 1) > 0)
        {
          if (!prev_field_is_bitfield)
            {
              bitregion_start = fld;
              prev_field_is_bitfield = true;
            }
        }
      else
        prev_field_is_bitfield = false;
      if (fld == field)
        break;
    }
  gcc_assert (bitregion_start);
  gcc_assert (fld);

  /* Save the starting position of the bitregion.  */
  get_inner_reference (build3 (COMPONENT_REF,
                               TREE_TYPE (exp),
                               TREE_OPERAND (exp, 0),
                               bitregion_start, NULL_TREE),
                       &tbitsize, &start_bitpos, &start_offset,
                       &tmode, &tunsignedp, &tvolatilep, true);
  if (!start_offset)
    start_offset = size_zero_node;
  /* Calculate byte offset to the beginning of the bit region.  */
  /* OFFSET = START_OFFSET + (START_BITPOS / BITS_PER_UNIT) */
  gcc_assert (start_bitpos % BITS_PER_UNIT == 0);
  *offset = fold_build2 (PLUS_EXPR, TREE_TYPE (start_offset),
                         start_offset,
                         build_int_cst (integer_type_node,
                                        start_bitpos / BITS_PER_UNIT));
  /* Save the bit offset of the current structure.  */
  start_bitpos_direct_parent = DECL_FIELD_BIT_OFFSET (bitregion_start);

  /* Count the bitsize of the bitregion containing the field in question.  */
  found = false;
  cumulative_bitsize = 0;
  for (fld = bitregion_start; fld; fld = DECL_CHAIN (fld))
    {
      if (TREE_CODE (fld) != FIELD_DECL)
        continue;
      if (fld == field)
        found = true;

      if (DECL_BIT_FIELD_TYPE (fld)
          && tree_low_cst (DECL_SIZE (fld), 1) > 0)
        {
          cumulative_bitsize += tree_low_cst (DECL_SIZE (fld), 1);

          /* Short-circuit out if we have the max bits allowed.  */
          if (cumulative_bitsize >= MAX_FIXED_MODE_SIZE)
            {
              *maxbits = MAX_FIXED_MODE_SIZE;
              /* Calculate byte offset to the beginning of the bit region.  */
              gcc_assert (start_bitpos % BITS_PER_UNIT == 0);
              *offset = fold_build2 (PLUS_EXPR, TREE_TYPE (start_offset),
                                     start_offset,
                                     build_int_cst (integer_type_node,
                                                    start_bitpos / 
BITS_PER_UNIT));
              return;
            }
        }
      else if (found)
        break;
    }

  /* If we found the end of the bit field sequence, include the
     padding up to the next field...  */
  if (fld)
    {
      tree end_offset, maxbits_tree;
      HOST_WIDE_INT end_bitpos;

      /* Calculate bitpos and offset of the next field.  */
      get_inner_reference (build3 (COMPONENT_REF,
                                   TREE_TYPE (exp),
                                   TREE_OPERAND (exp, 0),
                                   fld, NULL_TREE),
                           &tbitsize, &end_bitpos, &end_offset,
                           &tmode, &tunsignedp, &tvolatilep, true);
      gcc_assert (end_bitpos % BITS_PER_UNIT == 0);

      if (end_offset)
        {
          tree type = TREE_TYPE (end_offset), end;

          /* Calculate byte offset to the end of the bit region.  */
          end = fold_build2 (PLUS_EXPR, type,
                             end_offset,
                             build_int_cst (type,
                                            end_bitpos / BITS_PER_UNIT));
          maxbits_tree = fold_build2 (MINUS_EXPR, type, end, *offset);
        }
      else
        maxbits_tree = build_int_cst (integer_type_node,
                                      end_bitpos - start_bitpos);

      /* ?? Can we get a variable-lengthened offset here ?? */
      gcc_assert (host_integerp (maxbits_tree, 1));
      *maxbits = TREE_INT_CST_LOW (maxbits_tree);
    }
  /* ...otherwise, this is the last element in the structure.  */
  else
    {
      /* Include the padding at the end of structure.  */
      *maxbits = TREE_INT_CST_LOW (TYPE_SIZE (record_type))
        - TREE_INT_CST_LOW (start_bitpos_direct_parent);
      if (*maxbits > MAX_FIXED_MODE_SIZE)
        *maxbits = MAX_FIXED_MODE_SIZE;
    }
}

Reply via email to