Philip Martin <philip.mar...@wandisco.com> writes: > Prompted by the warnings I think there are some issues to fix. For > APR_HASH_KEY_STRING keys there is no protection against abnormally long > keys. combined_long_key() will allocate strlen() memory even if it is > many GB. The item will not get cached if key+data length is more than > 4GB but the memory for the key, which could be more than 4GB, will be > permanently allocated in the cache. There is also a problem with > overflow in membuffer_cache_set_internal() when calculating key+data > length, although in practice a key large enough to trigger this will > probably fail memory allocation first.
If enforce a runtime limit on key size, both fixed and variable, making it a positive number less than MAX_ITEM_SIZE then even in 32 bits we can detect overflow with something like this: apr_size_t item_size, key_len. svn_boolean_t too_big = item_size > MAX_ITEM_SIZE - key_len; So a patch something like: Index: subversion/libsvn_subr/cache-membuffer.c =================================================================== --- subversion/libsvn_subr/cache-membuffer.c (revision 1679506) +++ subversion/libsvn_subr/cache-membuffer.c (working copy) @@ -197,7 +197,7 @@ typedef struct entry_key_t /* Length of the full key. This value is aligned to ITEM_ALIGNMENT to * make sure the subsequent item content is properly aligned. */ - apr_uint32_t key_len; + apr_size_t key_len; } entry_key_t; /* A full key, i.e. the combination of the cache's key prefix with some @@ -2013,14 +2013,19 @@ membuffer_cache_set_internal(svn_membuffer_t *cach apr_pool_t *scratch_pool) { cache_level_t *level; - apr_size_t size = item_size + to_find->entry_key.key_len; + apr_size_t size; + svn_boolean_t too_big + = item_size > MAX_ITEM_SIZE - to_find->entry_key.key_len; /* first, look for a previous entry for the given key */ entry_t *entry = find_entry(cache, group_index, to_find, FALSE); + if (!too_big) + size = item_size + to_find->entry_key.key_len; + /* if there is an old version of that entry and the new data fits into * the old spot, just re-use that space. */ - if (entry && ALIGN_VALUE(entry->size) >= size && buffer) + if (entry && !too_big && ALIGN_VALUE(entry->size) >= size && buffer) { /* Careful! We need to cast SIZE to the full width of CACHE->DATA_USED * lest we run into trouble with 32 bit underflow *not* treated as a @@ -2052,8 +2057,9 @@ membuffer_cache_set_internal(svn_membuffer_t *cach /* if necessary, enlarge the insertion window. */ - level = buffer ? select_level(cache, size, priority) : NULL; - if (level) + if (!too_big) + level = buffer ? select_level(cache, size, priority) : NULL; + if (!too_big && level) { /* Remove old data for this key, if that exists. * Get an unused entry for the key and and initialize it with @@ -2477,10 +2483,12 @@ membuffer_cache_set_partial_internal(svn_membuffer */ if (item_data != orig_data) { + svn_boolean_t too_big = item_size > MAX_ITEM_SIZE - key_len; + /* Remove the old entry and try to make space for the new one. */ drop_entry(cache, entry); - if ( (cache->max_entry_size >= item_size + key_len) + if (!too_big && ensure_data_insertable_l1(cache, item_size + key_len)) { /* Write the new entry. @@ -2598,12 +2606,18 @@ typedef struct svn_membuffer_cache_t svn_mutex__t *mutex; } svn_membuffer_cache_t; +/* Largest fixed-size key and longest APR_HASH_KEY_STRING key allowed, + * must be less than MAX_ITEM_SIZE. An arbitrary number much higher + * than the expected key sizes of couple of hundred bytes at most. + */ +#define MAX_KEY_SIZE 50000 + /* Basically calculate a hash value for KEY of length KEY_LEN, combine it * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY. * This could replace combine_key() entirely but we actually use it only * when the quick path failed. */ -static void +static svn_error_t * combine_long_key(svn_membuffer_cache_t *cache, const void *key, apr_ssize_t key_len) @@ -2615,7 +2629,12 @@ combine_long_key(svn_membuffer_cache_t *cache, /* handle variable-length keys */ if (key_len == APR_HASH_KEY_STRING) - key_len = strlen((const char *) key); + { + key_len = strlen((const char *) key); + if (key_len > MAX_KEY_SIZE) + return svn_error_create(SVN_ERR_UNSUPPORTED_FEATURE, NULL, + _("Key too big for membuffer-based cache")); + } /* Combine keys. */ aligned_key_len = ALIGN_VALUE(key_len); @@ -2636,12 +2655,14 @@ combine_long_key(svn_membuffer_cache_t *cache, ^= cache->prefix.entry_key.fingerprint[0]; cache->combined_key.entry_key.fingerprint[1] ^= cache->prefix.entry_key.fingerprint[1]; + + return SVN_NO_ERROR; } /* Basically calculate a hash value for KEY of length KEY_LEN, combine it * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY. */ -static void +static svn_error_t * combine_key(svn_membuffer_cache_t *cache, const void *key, apr_ssize_t key_len) @@ -2678,8 +2699,13 @@ combine_key(svn_membuffer_cache_t *cache, else { /* longer or variably sized keys */ - combine_long_key(cache, key, key_len); + SVN_ERR(combine_long_key(cache, key, key_len)); } + + cache->combined_key.entry_key.fingerprint[0] = 0x01010183; + cache->combined_key.entry_key.fingerprint[1] = 0x0302030F; + + return SVN_NO_ERROR; } /* Implement svn_cache__vtable_t.get (not thread-safe) @@ -2707,7 +2733,7 @@ svn_membuffer_cache_get(void **value_p, /* construct the full, i.e. globally unique, key by adding * this cache instances' prefix */ - combine_key(cache, key, cache->key_len); + SVN_ERR(combine_key(cache, key, cache->key_len)); /* Look the item up. */ SVN_ERR(membuffer_cache_get(cache->membuffer, @@ -2744,7 +2770,7 @@ svn_membuffer_cache_has_key(svn_boolean_t *found, /* construct the full, i.e. globally unique, key by adding * this cache instances' prefix */ - combine_key(cache, key, cache->key_len); + SVN_ERR(combine_key(cache, key, cache->key_len)); /* Look the item up. */ SVN_ERR(membuffer_cache_has_key(cache->membuffer, @@ -2774,7 +2800,7 @@ svn_membuffer_cache_set(void *cache_void, /* construct the full, i.e. globally unique, key by adding * this cache instances' prefix */ - combine_key(cache, key, cache->key_len); + SVN_ERR(combine_key(cache, key, cache->key_len)); /* (probably) add the item to the cache. But there is no real guarantee * that the item will actually be cached afterwards. @@ -2824,7 +2850,7 @@ svn_membuffer_cache_get_partial(void **value_p, return SVN_NO_ERROR; } - combine_key(cache, key, cache->key_len); + SVN_ERR(combine_key(cache, key, cache->key_len)); SVN_ERR(membuffer_cache_get_partial(cache->membuffer, &cache->combined_key, value_p, @@ -2852,7 +2878,7 @@ svn_membuffer_cache_set_partial(void *cache_void, if (key != NULL) { - combine_key(cache, key, cache->key_len); + SVN_ERR(combine_key(cache, key, cache->key_len)); SVN_ERR(membuffer_cache_set_partial(cache->membuffer, &cache->combined_key, func, @@ -3126,6 +3152,10 @@ svn_cache__create_membuffer_cache(svn_cache__t **c svn_cache__t *wrapper = apr_pcalloc(result_pool, sizeof(*wrapper)); svn_membuffer_cache_t *cache = apr_pcalloc(result_pool, sizeof(*cache)); + if (klen != APR_HASH_KEY_STRING && klen > MAX_KEY_SIZE) + return svn_error_create(SVN_ERR_UNSUPPORTED_FEATURE, NULL, + _("Key too big for membuffer-based cache")); + /* initialize our internal cache header */ cache->membuffer = membuffer; -- Philip Martin | Subversion Committer WANdisco // *Non-Stop Data*