Hi Junio,

On Fri, 4 Oct 2019, Junio C Hamano wrote:

> "Johannes Schindelin via GitGitGadget" <gitgitgad...@gmail.com>
> writes:
>
> > While at it, we take care of reporting overflows (which are unlikely,
> > but hey, defensive programming is good!).
> >
> > We _also_ take pains of casting the unsigned value to signed: otherwise,
> > the signed operand (i.e. the `-1`) would be cast to unsigned before
> > doing the arithmetic.
>
> These three look good and too similar to each other, which makes me
> wonder if we want to allow them simply write
>
>       return insert_pos_as_negative_offset(nr);
>
> with something like
>
>       static int insert_pos_as_negative_offset(uintmax_t nr)
>       {
>               if (INT_MAX < nr)
>                       die("overflow: -1 - %"PRIuMAX, nr);
>               return -1 - (int)nr;
>       }
>
> to avoid repetition.

I tried not to do that because there are two different data types in
play: `unsigned int` and `size_t`. But I guess by making this an
`inline` function, compilers can optimize for the common case and avoid
casting _twice_.

Will be fixed in v2,
Dscho

>
> > Helped-by: Denton Liu <liu.den...@gmail.com>
> > Signed-off-by: Johannes Schindelin <johannes.schinde...@gmx.de>
> > ---
> >  read-cache.c  |  9 ++++++---
> >  sha1-lookup.c | 12 +++++++++---
> >  2 files changed, 15 insertions(+), 6 deletions(-)
> >
> > diff --git a/read-cache.c b/read-cache.c
> > index c701f7f8b8..97745c2a31 100644
> > --- a/read-cache.c
> > +++ b/read-cache.c
> > @@ -1275,8 +1275,11 @@ static int add_index_entry_with_check(struct 
> > index_state *istate, struct cache_e
> >      * we can avoid searching for it.
> >      */
> >     if (istate->cache_nr > 0 &&
> > -           strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
> > -           pos = -istate->cache_nr - 1;
> > +           strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 
> > 0) {
> > +           if (istate->cache_nr > INT_MAX)
> > +                   die("overflow: -1 - %u", istate->cache_nr);
> > +           pos = -1 - (int)istate->cache_nr;
> > +   }
> >     else
> >             pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), 
> > ce_stage(ce));
> >
> > @@ -1894,7 +1897,7 @@ static size_t estimate_cache_size(size_t ondisk_size, 
> > unsigned int entries)
> >     /*
> >      * Account for potential alignment differences.
> >      */
> > -   per_entry += align_padding_size(sizeof(struct cache_entry), 
> > -sizeof(struct ondisk_cache_entry));
> > +   per_entry += align_padding_size(per_entry, 0);
> >     return ondisk_size + entries * per_entry;
> >  }
> >
> > diff --git a/sha1-lookup.c b/sha1-lookup.c
> > index 796ab68da8..bb786b5420 100644
> > --- a/sha1-lookup.c
> > +++ b/sha1-lookup.c
> > @@ -69,8 +69,12 @@ int sha1_pos(const unsigned char *sha1, void *table, 
> > size_t nr,
> >                     miv = take2(sha1 + ofs);
> >                     if (miv < lov)
> >                             return -1;
> > -                   if (hiv < miv)
> > -                           return -1 - nr;
> > +                   if (hiv < miv) {
> > +                           if (nr > INT_MAX)
> > +                                   die("overflow: -1 - %"PRIuMAX,
> > +                                       (uintmax_t)nr);
> > +                           return -1 - (int)nr;
> > +                   }
> >                     if (lov != hiv) {
> >                             /*
> >                              * At this point miv could be equal
> > @@ -97,7 +101,9 @@ int sha1_pos(const unsigned char *sha1, void *table, 
> > size_t nr,
> >                     lo = mi + 1;
> >             mi = lo + (hi - lo) / 2;
> >     } while (lo < hi);
> > -   return -lo-1;
> > +   if (nr > INT_MAX)
> > +           die("overflow: -1 - %"PRIuMAX, (uintmax_t)lo);
> > +   return -1 - (int)lo;
> >  }
> >
> >  int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
>

Reply via email to