> Maybe you need to refactor __glibcxx_digits so there is a version taking 
> the bitsize as an argument rather than using sizeof(T) * __CHAR_BIT__, but 
> that should be the only change needed to handle such types with the 
> existing macros.  The bitsize macros should be the only ones needing 
> predefining to pass information to libstdc++.

Like this?

#define __glibcxx_signed_b(T,B) ((T)(-1) < 0)

#define __glibcxx_min_b(T,B)                                    \
  (__glibcxx_signed_b (T,B) ? -__glibcxx_max_b (T,B) - 1 : (T)0)

#define __glibcxx_max_b(T,B)                                            \
  (__glibcxx_signed_b (T,B) ?                                           \
   (((((T)1 << (__glibcxx_digits_b (T,B) - 1)) - 1) << 1) + 1) : ~(T)0)

#define __glibcxx_digits_b(T,B)                         \
  (B - __glibcxx_signed_b (T,B))

// The fraction 643/2136 approximates log10(2) to 7 significant digits.
#define __glibcxx_digits10_b(T,B)               \
  (__glibcxx_digits_b (T,B) * 643L / 2136)

#define __glibcxx_signed(T) \
  __glibcxx_signed_b (T, sizeof(T) * __CHAR_BIT__)
#define __glibcxx_min(T) \
  __glibcxx_min (T, sizeof(T) * __CHAR_BIT_)
#define __glibcxx_max(T) \
  __glibcxx_max (T, sizeof(T) * __CHAR_BIT_)
#define __glibcxx_digits(T) \
  __glibcxx_digits (T, sizeof(T) * __CHAR_BIT_)
#define __glibcxx_digits10(T) \
  __glibcxx_digits10 (T, sizeof(T) * __CHAR_BIT_)

Reply via email to