On Tue, Nov 05, 2013 at 08:55:21AM -0700, Jeff Law wrote: > On 11/05/13 03:35, Florian Weimer wrote: > >On 11/04/2013 05:43 PM, Joseph S. Myers wrote: > >>On Mon, 4 Nov 2013, Jeff Law wrote: > >> > >>>You might also be referring to Greg McGary's work on bounded > >>>pointers, I don't > >>>think that ever got integrated or if it did, it got pulled long ago. > >> > >>It was integrated in 2000, removed in 2002/2003 (I removed the relics > >>from > >>glibc earlier this year). By using fat pointers, it required the entire > >>program including all libraries it used to be built with bounded pointers > >>enabled (and associated changes to all assembly sources to handle them). > > > >Yes, I was referring to Greg's work. I'm wondering if the trade-offs > >have changed since then, considering that it's again en vogue to > >bootstrap new architectures. It's difficult to tell without more > >details about that past effort, though. > Not really, IMHO. If anything as software complexity continues to > increase (specifically pulling in more and more libraries from > various sources), the problem of mixing instrumented and > uninstrumented code has actually gotten worse. > Also it is doing useless work, allocator already knows bounds so all you need to do is ask.
A query can be made O(1) with bit of work, I attached a proof of concept allocator that checks memcpy where queries are unoptimized O(n). As it is this will not get bounds on static and stack allocations. For static allocations you could extract bounds by reading debug information as gdb does. For stack allocations it takes more work, it could be done by compiler/compiler plugin/custom frontend inserting bound_add (from, to) and bound_remove (from, to) calls for each stack variable that escaped.
#define _GNU_SOURCE #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <sys/mman.h> #include <unistd.h> #include <pthread.h> #include <execinfo.h> #include <dlfcn.h> #define log(...) fprintf (stderr, __VA_ARGS__) #ifndef TRACE # define TRACE 4 #endif typedef struct left_sentinel { void *origin[TRACE]; uint64_t res2; uint64_t iteration; struct left_sentinel *previous, *next; uint64_t size; uint64_t sentinel; } left_sentinel; #define ALIGN_UP(x, no) ((((uintptr_t) x) + no - 1) - (((uintptr_t) x) + no - 1) % no) #define SENTINEL 5877815476798078077UL #define BIG_SENTINEL 1204925723299285449UL #define DOUBLE_FREE 10943064412161398437UL static pthread_mutex_t mutex; static int inited; size_t page_size; static void init () { page_size = sysconf (_SC_PAGE_SIZE); pthread_mutex_init (&mutex, NULL); inited = 1; } static void verify_sentinel (); void * malloc (size_t len) { void *ptr; posix_memalign (&ptr, 16, len); return ptr; } void * calloc (size_t nmemb, size_t size) { if (!nmemb | !size) return NULL; if (SIZE_MAX / nmemb < size) { log ("allocation overflow in calloc(%i, %i)\n", nmemb, size); abort (); } void *ptr = malloc (nmemb * size); if (ptr != NULL) memset (ptr, 0, nmemb * size); return ptr; } void * valloc (size_t size) { void *ret; posix_memalign (&ret, page_size, size); return ret; } void * pvalloc (size_t size) { void *ret; posix_memalign (&ret, page_size, size); return ret; } void * memalign (size_t alignment, size_t size) { void *ret; posix_memalign (&ret, alignment, size); return ret; } void * realloc (void *ptr, size_t size) { if (!ptr) return malloc (size); left_sentinel *cur = (left_sentinel *) (((char *) ptr) - sizeof (left_sentinel)); verify_sentinel (cur); size_t old_size = cur->size; if (old_size > size) return ptr; char *new = malloc (size); if (!new) return NULL; memcpy (new, ptr, old_size); free (ptr); return new; } static char * wrap_mmap (size_t size) { char *ret = mmap (NULL, ALIGN_UP (size, page_size) + 2 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (!ret) abort; munmap (ret, page_size); munmap (ret + page_size + ALIGN_UP (size, page_size), page_size); return ret + page_size; } static void **bucket[64]; static void return_bucket (char *ptr, size_t size) { int pow; for (pow = 0; 1UL << pow < size; pow++) ; char *old = (char *) bucket[pow]; bucket[pow] = (void **) ptr; *bucket[pow] = old; } static void add_bucket (size_t size) { int i, pow; for (pow = 0; 1UL << pow < size; pow++) ; if (1UL << pow >= page_size) { /* tighter checks. */ char *ptr = wrap_mmap ((1UL << pow)); return_bucket (ptr, size); } else { char *ptr = wrap_mmap (page_size); for (i = 0; i < page_size; i += 1UL << pow) { return_bucket (ptr + i, size); } } } static void * get_bucket (size_t size) { int pow; for (pow = 0; 1UL << pow < size; pow++) ; if (!bucket[pow]) add_bucket (size); void **ret = bucket[pow]; bucket[pow] = *ret; return (void *) ret; } static left_sentinel *previous = NULL; int posix_memalign (void **memptr, size_t alignment, size_t size) { int i; if (!inited) init (); void *origin[TRACE + 2]; for (i = 0; i < TRACE; i++) origin[i + 2] = NULL; static int in_backtrace = 0; if (!in_backtrace) { in_backtrace = 1; backtrace (origin, TRACE + 2); in_backtrace = 0; } if (pthread_mutex_lock (&mutex)) abort (); /* separate for tigther checks. */ if (size < alignment) size = alignment; if (alignment > 16) { /*TODO, excessive alignment can cause leaks. */ *memptr = get_bucket (size + alignment + sizeof (left_sentinel) + 8); *memptr = ((char *) ALIGN_UP ((((char *) *memptr) + sizeof (left_sentinel)), alignment)) - sizeof (left_sentinel); } else *memptr = get_bucket (size + sizeof (left_sentinel) + 8); left_sentinel *cur = *memptr; *memptr += sizeof (left_sentinel); for (i = 0; i < TRACE; i++) cur->origin[i] = origin[i + 2]; cur->previous = previous; if (previous) previous->next = cur; cur->next = NULL; previous = cur; cur->size = size; cur->sentinel = SENTINEL; ((uint64_t *) (((char *) *memptr) + size))[0] = SENTINEL; if (pthread_mutex_unlock (&mutex)) abort (); return 0; } static void verify_sentinel (left_sentinel *cur) { size_t old_size = cur->size; if (cur->sentinel != SENTINEL) { if (cur->sentinel == DOUBLE_FREE) log ("free: double free\n"); else log ("free: corrupt left sentinel\n"); abort (); } if (((uint64_t *) (((char*) cur) + sizeof (left_sentinel) + old_size))[0] != SENTINEL) { log ("free: corrupt rigth sentinel\n"); abort (); } } void free (void *ptr) { if (!ptr) return; if (pthread_mutex_lock (&mutex)) abort (); left_sentinel *cur = (left_sentinel *) (((char *) ptr) - sizeof (left_sentinel)); verify_sentinel (cur); cur->sentinel = DOUBLE_FREE; if (cur->previous) cur->previous->next = cur->next; if (cur->next) cur->next->previous = cur->previous; if (previous == cur) previous = cur->previous; size_t old_size = cur->size; return_bucket (((char *) cur), old_size + sizeof (left_sentinel) + 8); if (pthread_mutex_unlock (&mutex)) abort (); } struct leak { void *origin[TRACE]; size_t size; }; static void __attribute__ ((destructor)) report_leaks () { size_t i, j, origins = 0; left_sentinel *s = previous; while (s) { verify_sentinel (s); origins++; s = s->previous; } /* TODO sort */ struct leak *leaks = (struct leak *) wrap_mmap ((origins + 1) * sizeof (struct leak)); s = previous; for (i = 0; i < origins; i++) { for (j = 0; j < TRACE; j++) leaks[i].origin[j] = previous->origin[j]; leaks[i].size = previous->size; } for (i = 0; i < origins; i++) { Dl_info info; if (leaks[i].origin[0] != leaks[i + 1].origin[0]) { dladdr (leaks[i].origin[0], &info); log ("Leak of size %i\n", leaks[i].size); for (j = 0; j < TRACE; j++) { dladdr (leaks[i].origin[j], &info); log ("... %s:0x%lx\t(%s)\n", info.dli_fname, info.dli_saddr ? info.dli_saddr -info.dli_fbase : 0, info.dli_sname); } } } } static size_t get_bound (void *p) { left_sentinel *s = previous; size_t limit = SIZE_MAX; if (pthread_mutex_lock (&mutex)) abort (); while (s) { char *to = ((char *) s) + sizeof (left_sentinel) + s->size; if (((void *) s) <= p && (char *) p <= to) limit = to - (char *) p; s = s->previous; } if (pthread_mutex_unlock (&mutex)) abort (); return limit; } void *memcpy(void *dest, const void *src, size_t n) { size_t limit = get_bound (dest); if (n > limit) abort (); mempcpy(dest, src, n); return dest; }