From 384c0131ccda2656dec23a0416ad3f14101151a7 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Wed, 3 Jun 2020 19:22:12 -0400 Subject: move oldmalloc to its own directory under src/malloc this sets the stage for replacement, and makes it practical to keep oldmalloc around as a build option for a while if that ends up being useful. only the files which are actually part of the implementation are moved. memalign and posix_memalign are entirely generic. in theory calloc could be pulled out too, but it's useful to have it tied to the implementation so as to optimize out unnecessary memset when implementation details make it possible to know the memory is already clear. --- src/malloc/aligned_alloc.c | 52 --- src/malloc/malloc.c | 550 ------------------------------ src/malloc/malloc_impl.h | 39 --- src/malloc/malloc_usable_size.c | 9 - src/malloc/oldmalloc/aligned_alloc.c | 52 +++ src/malloc/oldmalloc/malloc.c | 550 ++++++++++++++++++++++++++++++ src/malloc/oldmalloc/malloc_impl.h | 39 +++ src/malloc/oldmalloc/malloc_usable_size.c | 9 + 8 files changed, 650 insertions(+), 650 deletions(-) delete mode 100644 src/malloc/aligned_alloc.c delete mode 100644 src/malloc/malloc.c delete mode 100644 src/malloc/malloc_impl.h delete mode 100644 src/malloc/malloc_usable_size.c create mode 100644 src/malloc/oldmalloc/aligned_alloc.c create mode 100644 src/malloc/oldmalloc/malloc.c create mode 100644 src/malloc/oldmalloc/malloc_impl.h create mode 100644 src/malloc/oldmalloc/malloc_usable_size.c (limited to 'src') diff --git a/src/malloc/aligned_alloc.c b/src/malloc/aligned_alloc.c deleted file mode 100644 index e06c76ed..00000000 --- a/src/malloc/aligned_alloc.c +++ /dev/null @@ -1,52 +0,0 @@ -#include -#include -#include -#include "malloc_impl.h" - -void *aligned_alloc(size_t align, size_t len) -{ - unsigned char *mem, *new; - - if ((align & -align) != align) { - errno = EINVAL; - return 0; - } - - if (len > SIZE_MAX - align || __malloc_replaced) { - errno = ENOMEM; - return 0; - } - - if (align <= SIZE_ALIGN) - return malloc(len); - - if (!(mem = malloc(len + align-1))) - return 0; - - new = (void *)((uintptr_t)mem + align-1 & -align); - if (new == mem) return mem; - - struct chunk *c = MEM_TO_CHUNK(mem); - struct chunk *n = MEM_TO_CHUNK(new); - - if (IS_MMAPPED(c)) { - /* Apply difference between aligned and original - * address to the "extra" field of mmapped chunk. */ - n->psize = c->psize + (new-mem); - n->csize = c->csize - (new-mem); - return new; - } - - struct chunk *t = NEXT_CHUNK(c); - - /* Split the allocated chunk into two chunks. The aligned part - * that will be used has the size in its footer reduced by the - * difference between the aligned and original addresses, and - * the resulting size copied to its header. A new header and - * footer are written for the split-off part to be freed. */ - n->psize = c->csize = C_INUSE | (new-mem); - n->csize = t->psize -= new-mem; - - __bin_chunk(c); - return new; -} diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c deleted file mode 100644 index df3ea1be..00000000 --- a/src/malloc/malloc.c +++ /dev/null @@ -1,550 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include "libc.h" -#include "atomic.h" -#include "pthread_impl.h" -#include "malloc_impl.h" - -#if defined(__GNUC__) && defined(__PIC__) -#define inline inline __attribute__((always_inline)) -#endif - -static struct { - volatile uint64_t binmap; - struct bin bins[64]; - volatile int split_merge_lock[2]; -} mal; - -int __malloc_replaced; - -/* Synchronization tools */ - -static inline void lock(volatile int *lk) -{ - int need_locks = libc.need_locks; - if (need_locks) { - while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); - if (need_locks < 0) libc.need_locks = 0; - } -} - -static inline void unlock(volatile int *lk) -{ - if (lk[0]) { - a_store(lk, 0); - if (lk[1]) __wake(lk, 1, 1); - } -} - -static inline void lock_bin(int i) -{ - lock(mal.bins[i].lock); - if (!mal.bins[i].head) - mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); -} - -static inline void unlock_bin(int i) -{ - unlock(mal.bins[i].lock); -} - -static int first_set(uint64_t x) -{ -#if 1 - return a_ctz_64(x); -#else - static const char debruijn64[64] = { - 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, - 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, - 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, - 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 - }; - static const char debruijn32[32] = { - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 - }; - if (sizeof(long) < 8) { - uint32_t y = x; - if (!y) { - y = x>>32; - return 32 + debruijn32[(y&-y)*0x076be629 >> 27]; - } - return debruijn32[(y&-y)*0x076be629 >> 27]; - } - return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; -#endif -} - -static const unsigned char bin_tab[60] = { - 32,33,34,35,36,36,37,37,38,38,39,39, - 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43, - 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45, - 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47, -}; - -static int bin_index(size_t x) -{ - x = x / SIZE_ALIGN - 1; - if (x <= 32) return x; - if (x < 512) return bin_tab[x/8-4]; - if (x > 0x1c00) return 63; - return bin_tab[x/128-4] + 16; -} - -static int bin_index_up(size_t x) -{ - x = x / SIZE_ALIGN - 1; - if (x <= 32) return x; - x--; - if (x < 512) return bin_tab[x/8-4] + 1; - return bin_tab[x/128-4] + 17; -} - -#if 0 -void __dump_heap(int x) -{ - struct chunk *c; - int i; - for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c)) - fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n", - c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)), - c->csize & 15, - NEXT_CHUNK(c)->psize & 15); - for (i=0; i<64; i++) { - if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) { - fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head); - if (!(mal.binmap & 1ULL< len ? b-len : 0; - if (new>a && old len ? b-len : 0; - if (new>a && old SIZE_MAX/2 - PAGE_SIZE) { - errno = ENOMEM; - return 0; - } - n += -n & PAGE_SIZE-1; - - if (!brk) { - brk = __syscall(SYS_brk, 0); - brk += -brk & PAGE_SIZE-1; - } - - if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n) - && __syscall(SYS_brk, brk+n)==brk+n) { - *pn = n; - brk += n; - return (void *)(brk-n); - } - - size_t min = (size_t)PAGE_SIZE << mmap_step/2; - if (n < min) n = min; - void *area = __mmap(0, n, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (area == MAP_FAILED) return 0; - *pn = n; - mmap_step++; - return area; -} - -static struct chunk *expand_heap(size_t n) -{ - static void *end; - void *p; - struct chunk *w; - - /* The argument n already accounts for the caller's chunk - * overhead needs, but if the heap can't be extended in-place, - * we need room for an extra zero-sized sentinel chunk. */ - n += SIZE_ALIGN; - - p = __expand_heap(&n); - if (!p) return 0; - - /* If not just expanding existing space, we need to make a - * new sentinel chunk below the allocated space. */ - if (p != end) { - /* Valid/safe because of the prologue increment. */ - n -= SIZE_ALIGN; - p = (char *)p + SIZE_ALIGN; - w = MEM_TO_CHUNK(p); - w->psize = 0 | C_INUSE; - } - - /* Record new heap end and fill in footer. */ - end = (char *)p + n; - w = MEM_TO_CHUNK(end); - w->psize = n | C_INUSE; - w->csize = 0 | C_INUSE; - - /* Fill in header, which may be new or may be replacing a - * zero-size sentinel header at the old end-of-heap. */ - w = MEM_TO_CHUNK(p); - w->csize = n | C_INUSE; - - return w; -} - -static int adjust_size(size_t *n) -{ - /* Result of pointer difference must fit in ptrdiff_t. */ - if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) { - if (*n) { - errno = ENOMEM; - return -1; - } else { - *n = SIZE_ALIGN; - return 0; - } - } - *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK; - return 0; -} - -static void unbin(struct chunk *c, int i) -{ - if (c->prev == c->next) - a_and_64(&mal.binmap, ~(1ULL<prev->next = c->next; - c->next->prev = c->prev; - c->csize |= C_INUSE; - NEXT_CHUNK(c)->psize |= C_INUSE; -} - -static void bin_chunk(struct chunk *self, int i) -{ - self->next = BIN_TO_CHUNK(i); - self->prev = mal.bins[i].tail; - self->next->prev = self; - self->prev->next = self; - if (self->prev == BIN_TO_CHUNK(i)) - a_or_64(&mal.binmap, 1ULL<= n1 - DONTCARE) return; - - next = NEXT_CHUNK(self); - split = (void *)((char *)self + n); - - split->psize = n | C_INUSE; - split->csize = n1-n; - next->psize = n1-n; - self->csize = n | C_INUSE; - - int i = bin_index(n1-n); - lock_bin(i); - - bin_chunk(split, i); - - unlock_bin(i); -} - -void *malloc(size_t n) -{ - struct chunk *c; - int i, j; - uint64_t mask; - - if (adjust_size(&n) < 0) return 0; - - if (n > MMAP_THRESHOLD) { - size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; - char *base = __mmap(0, len, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (base == (void *)-1) return 0; - c = (void *)(base + SIZE_ALIGN - OVERHEAD); - c->csize = len - (SIZE_ALIGN - OVERHEAD); - c->psize = SIZE_ALIGN - OVERHEAD; - return CHUNK_TO_MEM(c); - } - - i = bin_index_up(n); - if (i<63 && (mal.binmap & (1ULL< (size_t)-1/n) { - errno = ENOMEM; - return 0; - } - n *= m; - void *p = malloc(n); - if (!p) return p; - if (!__malloc_replaced) { - if (IS_MMAPPED(MEM_TO_CHUNK(p))) - return p; - if (n >= PAGE_SIZE) - n = mal0_clear(p, PAGE_SIZE, n); - } - return memset(p, 0, n); -} - -void *realloc(void *p, size_t n) -{ - struct chunk *self, *next; - size_t n0, n1; - void *new; - - if (!p) return malloc(n); - - if (adjust_size(&n) < 0) return 0; - - self = MEM_TO_CHUNK(p); - n1 = n0 = CHUNK_SIZE(self); - - if (n<=n0 && n0-n<=DONTCARE) return p; - - if (IS_MMAPPED(self)) { - size_t extra = self->psize; - char *base = (char *)self - extra; - size_t oldlen = n0 + extra; - size_t newlen = n + extra; - /* Crash on realloc of freed chunk */ - if (extra & 1) a_crash(); - if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) { - n0 = n; - goto copy_free_ret; - } - newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE; - if (oldlen == newlen) return p; - base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE); - if (base == (void *)-1) - goto copy_realloc; - self = (void *)(base + extra); - self->csize = newlen - extra; - return CHUNK_TO_MEM(self); - } - - next = NEXT_CHUNK(self); - - /* Crash on corrupted footer (likely from buffer overflow) */ - if (next->psize != self->csize) a_crash(); - - lock(mal.split_merge_lock); - - size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); - if (n0+nsize >= n) { - int i = bin_index(nsize); - lock_bin(i); - if (!(next->csize & C_INUSE)) { - unbin(next, i); - unlock_bin(i); - next = NEXT_CHUNK(next); - self->csize = next->psize = n0+nsize | C_INUSE; - trim(self, n); - unlock(mal.split_merge_lock); - return CHUNK_TO_MEM(self); - } - unlock_bin(i); - } - unlock(mal.split_merge_lock); - -copy_realloc: - /* As a last resort, allocate a new chunk and copy to it. */ - new = malloc(n-OVERHEAD); - if (!new) return 0; -copy_free_ret: - memcpy(new, p, n0-OVERHEAD); - free(CHUNK_TO_MEM(self)); - return new; -} - -void __bin_chunk(struct chunk *self) -{ - struct chunk *next = NEXT_CHUNK(self); - - /* Crash on corrupted footer (likely from buffer overflow) */ - if (next->psize != self->csize) a_crash(); - - lock(mal.split_merge_lock); - - size_t osize = CHUNK_SIZE(self), size = osize; - - /* Since we hold split_merge_lock, only transition from free to - * in-use can race; in-use to free is impossible */ - size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self); - size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); - - if (psize) { - int i = bin_index(psize); - lock_bin(i); - if (!(self->psize & C_INUSE)) { - struct chunk *prev = PREV_CHUNK(self); - unbin(prev, i); - self = prev; - size += psize; - } - unlock_bin(i); - } - if (nsize) { - int i = bin_index(nsize); - lock_bin(i); - if (!(next->csize & C_INUSE)) { - unbin(next, i); - next = NEXT_CHUNK(next); - size += nsize; - } - unlock_bin(i); - } - - int i = bin_index(size); - lock_bin(i); - - self->csize = size; - next->psize = size; - bin_chunk(self, i); - unlock(mal.split_merge_lock); - - /* Replace middle of large chunks with fresh zero pages */ - if (size > RECLAIM && (size^(size-osize)) > size-osize) { - uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE; - uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; -#if 1 - __madvise((void *)a, b-a, MADV_DONTNEED); -#else - __mmap((void *)a, b-a, PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0); -#endif - } - - unlock_bin(i); -} - -static void unmap_chunk(struct chunk *self) -{ - size_t extra = self->psize; - char *base = (char *)self - extra; - size_t len = CHUNK_SIZE(self) + extra; - /* Crash on double free */ - if (extra & 1) a_crash(); - __munmap(base, len); -} - -void free(void *p) -{ - if (!p) return; - - struct chunk *self = MEM_TO_CHUNK(p); - - if (IS_MMAPPED(self)) - unmap_chunk(self); - else - __bin_chunk(self); -} - -void __malloc_donate(char *start, char *end) -{ - size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD); - size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end; - - /* Getting past this condition ensures that the padding for alignment - * and header overhead will not overflow and will leave a nonzero - * multiple of SIZE_ALIGN bytes between start and end. */ - if (end - start <= OVERHEAD + align_start_up + align_end_down) - return; - start += align_start_up + OVERHEAD; - end -= align_end_down; - - struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end); - c->psize = n->csize = C_INUSE; - c->csize = n->psize = C_INUSE | (end-start); - __bin_chunk(c); -} diff --git a/src/malloc/malloc_impl.h b/src/malloc/malloc_impl.h deleted file mode 100644 index e1cf4774..00000000 --- a/src/malloc/malloc_impl.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef MALLOC_IMPL_H -#define MALLOC_IMPL_H - -#include -#include "dynlink.h" - -struct chunk { - size_t psize, csize; - struct chunk *next, *prev; -}; - -struct bin { - volatile int lock[2]; - struct chunk *head; - struct chunk *tail; -}; - -#define SIZE_ALIGN (4*sizeof(size_t)) -#define SIZE_MASK (-SIZE_ALIGN) -#define OVERHEAD (2*sizeof(size_t)) -#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN) -#define DONTCARE 16 -#define RECLAIM 163840 - -#define CHUNK_SIZE(c) ((c)->csize & -2) -#define CHUNK_PSIZE(c) ((c)->psize & -2) -#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c))) -#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c))) -#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD) -#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD) -#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head)) - -#define C_INUSE ((size_t)1) - -#define IS_MMAPPED(c) !((c)->csize & (C_INUSE)) - -hidden void __bin_chunk(struct chunk *); - -#endif diff --git a/src/malloc/malloc_usable_size.c b/src/malloc/malloc_usable_size.c deleted file mode 100644 index 672b518a..00000000 --- a/src/malloc/malloc_usable_size.c +++ /dev/null @@ -1,9 +0,0 @@ -#include -#include "malloc_impl.h" - -hidden void *(*const __realloc_dep)(void *, size_t) = realloc; - -size_t malloc_usable_size(void *p) -{ - return p ? CHUNK_SIZE(MEM_TO_CHUNK(p)) - OVERHEAD : 0; -} diff --git a/src/malloc/oldmalloc/aligned_alloc.c b/src/malloc/oldmalloc/aligned_alloc.c new file mode 100644 index 00000000..e06c76ed --- /dev/null +++ b/src/malloc/oldmalloc/aligned_alloc.c @@ -0,0 +1,52 @@ +#include +#include +#include +#include "malloc_impl.h" + +void *aligned_alloc(size_t align, size_t len) +{ + unsigned char *mem, *new; + + if ((align & -align) != align) { + errno = EINVAL; + return 0; + } + + if (len > SIZE_MAX - align || __malloc_replaced) { + errno = ENOMEM; + return 0; + } + + if (align <= SIZE_ALIGN) + return malloc(len); + + if (!(mem = malloc(len + align-1))) + return 0; + + new = (void *)((uintptr_t)mem + align-1 & -align); + if (new == mem) return mem; + + struct chunk *c = MEM_TO_CHUNK(mem); + struct chunk *n = MEM_TO_CHUNK(new); + + if (IS_MMAPPED(c)) { + /* Apply difference between aligned and original + * address to the "extra" field of mmapped chunk. */ + n->psize = c->psize + (new-mem); + n->csize = c->csize - (new-mem); + return new; + } + + struct chunk *t = NEXT_CHUNK(c); + + /* Split the allocated chunk into two chunks. The aligned part + * that will be used has the size in its footer reduced by the + * difference between the aligned and original addresses, and + * the resulting size copied to its header. A new header and + * footer are written for the split-off part to be freed. */ + n->psize = c->csize = C_INUSE | (new-mem); + n->csize = t->psize -= new-mem; + + __bin_chunk(c); + return new; +} diff --git a/src/malloc/oldmalloc/malloc.c b/src/malloc/oldmalloc/malloc.c new file mode 100644 index 00000000..df3ea1be --- /dev/null +++ b/src/malloc/oldmalloc/malloc.c @@ -0,0 +1,550 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "libc.h" +#include "atomic.h" +#include "pthread_impl.h" +#include "malloc_impl.h" + +#if defined(__GNUC__) && defined(__PIC__) +#define inline inline __attribute__((always_inline)) +#endif + +static struct { + volatile uint64_t binmap; + struct bin bins[64]; + volatile int split_merge_lock[2]; +} mal; + +int __malloc_replaced; + +/* Synchronization tools */ + +static inline void lock(volatile int *lk) +{ + int need_locks = libc.need_locks; + if (need_locks) { + while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); + if (need_locks < 0) libc.need_locks = 0; + } +} + +static inline void unlock(volatile int *lk) +{ + if (lk[0]) { + a_store(lk, 0); + if (lk[1]) __wake(lk, 1, 1); + } +} + +static inline void lock_bin(int i) +{ + lock(mal.bins[i].lock); + if (!mal.bins[i].head) + mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i); +} + +static inline void unlock_bin(int i) +{ + unlock(mal.bins[i].lock); +} + +static int first_set(uint64_t x) +{ +#if 1 + return a_ctz_64(x); +#else + static const char debruijn64[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + static const char debruijn32[32] = { + 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, + 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 + }; + if (sizeof(long) < 8) { + uint32_t y = x; + if (!y) { + y = x>>32; + return 32 + debruijn32[(y&-y)*0x076be629 >> 27]; + } + return debruijn32[(y&-y)*0x076be629 >> 27]; + } + return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; +#endif +} + +static const unsigned char bin_tab[60] = { + 32,33,34,35,36,36,37,37,38,38,39,39, + 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43, + 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45, + 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47, +}; + +static int bin_index(size_t x) +{ + x = x / SIZE_ALIGN - 1; + if (x <= 32) return x; + if (x < 512) return bin_tab[x/8-4]; + if (x > 0x1c00) return 63; + return bin_tab[x/128-4] + 16; +} + +static int bin_index_up(size_t x) +{ + x = x / SIZE_ALIGN - 1; + if (x <= 32) return x; + x--; + if (x < 512) return bin_tab[x/8-4] + 1; + return bin_tab[x/128-4] + 17; +} + +#if 0 +void __dump_heap(int x) +{ + struct chunk *c; + int i; + for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c)) + fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n", + c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)), + c->csize & 15, + NEXT_CHUNK(c)->psize & 15); + for (i=0; i<64; i++) { + if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) { + fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head); + if (!(mal.binmap & 1ULL< len ? b-len : 0; + if (new>a && old len ? b-len : 0; + if (new>a && old SIZE_MAX/2 - PAGE_SIZE) { + errno = ENOMEM; + return 0; + } + n += -n & PAGE_SIZE-1; + + if (!brk) { + brk = __syscall(SYS_brk, 0); + brk += -brk & PAGE_SIZE-1; + } + + if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n) + && __syscall(SYS_brk, brk+n)==brk+n) { + *pn = n; + brk += n; + return (void *)(brk-n); + } + + size_t min = (size_t)PAGE_SIZE << mmap_step/2; + if (n < min) n = min; + void *area = __mmap(0, n, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (area == MAP_FAILED) return 0; + *pn = n; + mmap_step++; + return area; +} + +static struct chunk *expand_heap(size_t n) +{ + static void *end; + void *p; + struct chunk *w; + + /* The argument n already accounts for the caller's chunk + * overhead needs, but if the heap can't be extended in-place, + * we need room for an extra zero-sized sentinel chunk. */ + n += SIZE_ALIGN; + + p = __expand_heap(&n); + if (!p) return 0; + + /* If not just expanding existing space, we need to make a + * new sentinel chunk below the allocated space. */ + if (p != end) { + /* Valid/safe because of the prologue increment. */ + n -= SIZE_ALIGN; + p = (char *)p + SIZE_ALIGN; + w = MEM_TO_CHUNK(p); + w->psize = 0 | C_INUSE; + } + + /* Record new heap end and fill in footer. */ + end = (char *)p + n; + w = MEM_TO_CHUNK(end); + w->psize = n | C_INUSE; + w->csize = 0 | C_INUSE; + + /* Fill in header, which may be new or may be replacing a + * zero-size sentinel header at the old end-of-heap. */ + w = MEM_TO_CHUNK(p); + w->csize = n | C_INUSE; + + return w; +} + +static int adjust_size(size_t *n) +{ + /* Result of pointer difference must fit in ptrdiff_t. */ + if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) { + if (*n) { + errno = ENOMEM; + return -1; + } else { + *n = SIZE_ALIGN; + return 0; + } + } + *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK; + return 0; +} + +static void unbin(struct chunk *c, int i) +{ + if (c->prev == c->next) + a_and_64(&mal.binmap, ~(1ULL<prev->next = c->next; + c->next->prev = c->prev; + c->csize |= C_INUSE; + NEXT_CHUNK(c)->psize |= C_INUSE; +} + +static void bin_chunk(struct chunk *self, int i) +{ + self->next = BIN_TO_CHUNK(i); + self->prev = mal.bins[i].tail; + self->next->prev = self; + self->prev->next = self; + if (self->prev == BIN_TO_CHUNK(i)) + a_or_64(&mal.binmap, 1ULL<= n1 - DONTCARE) return; + + next = NEXT_CHUNK(self); + split = (void *)((char *)self + n); + + split->psize = n | C_INUSE; + split->csize = n1-n; + next->psize = n1-n; + self->csize = n | C_INUSE; + + int i = bin_index(n1-n); + lock_bin(i); + + bin_chunk(split, i); + + unlock_bin(i); +} + +void *malloc(size_t n) +{ + struct chunk *c; + int i, j; + uint64_t mask; + + if (adjust_size(&n) < 0) return 0; + + if (n > MMAP_THRESHOLD) { + size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; + char *base = __mmap(0, len, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (base == (void *)-1) return 0; + c = (void *)(base + SIZE_ALIGN - OVERHEAD); + c->csize = len - (SIZE_ALIGN - OVERHEAD); + c->psize = SIZE_ALIGN - OVERHEAD; + return CHUNK_TO_MEM(c); + } + + i = bin_index_up(n); + if (i<63 && (mal.binmap & (1ULL< (size_t)-1/n) { + errno = ENOMEM; + return 0; + } + n *= m; + void *p = malloc(n); + if (!p) return p; + if (!__malloc_replaced) { + if (IS_MMAPPED(MEM_TO_CHUNK(p))) + return p; + if (n >= PAGE_SIZE) + n = mal0_clear(p, PAGE_SIZE, n); + } + return memset(p, 0, n); +} + +void *realloc(void *p, size_t n) +{ + struct chunk *self, *next; + size_t n0, n1; + void *new; + + if (!p) return malloc(n); + + if (adjust_size(&n) < 0) return 0; + + self = MEM_TO_CHUNK(p); + n1 = n0 = CHUNK_SIZE(self); + + if (n<=n0 && n0-n<=DONTCARE) return p; + + if (IS_MMAPPED(self)) { + size_t extra = self->psize; + char *base = (char *)self - extra; + size_t oldlen = n0 + extra; + size_t newlen = n + extra; + /* Crash on realloc of freed chunk */ + if (extra & 1) a_crash(); + if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) { + n0 = n; + goto copy_free_ret; + } + newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE; + if (oldlen == newlen) return p; + base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE); + if (base == (void *)-1) + goto copy_realloc; + self = (void *)(base + extra); + self->csize = newlen - extra; + return CHUNK_TO_MEM(self); + } + + next = NEXT_CHUNK(self); + + /* Crash on corrupted footer (likely from buffer overflow) */ + if (next->psize != self->csize) a_crash(); + + lock(mal.split_merge_lock); + + size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); + if (n0+nsize >= n) { + int i = bin_index(nsize); + lock_bin(i); + if (!(next->csize & C_INUSE)) { + unbin(next, i); + unlock_bin(i); + next = NEXT_CHUNK(next); + self->csize = next->psize = n0+nsize | C_INUSE; + trim(self, n); + unlock(mal.split_merge_lock); + return CHUNK_TO_MEM(self); + } + unlock_bin(i); + } + unlock(mal.split_merge_lock); + +copy_realloc: + /* As a last resort, allocate a new chunk and copy to it. */ + new = malloc(n-OVERHEAD); + if (!new) return 0; +copy_free_ret: + memcpy(new, p, n0-OVERHEAD); + free(CHUNK_TO_MEM(self)); + return new; +} + +void __bin_chunk(struct chunk *self) +{ + struct chunk *next = NEXT_CHUNK(self); + + /* Crash on corrupted footer (likely from buffer overflow) */ + if (next->psize != self->csize) a_crash(); + + lock(mal.split_merge_lock); + + size_t osize = CHUNK_SIZE(self), size = osize; + + /* Since we hold split_merge_lock, only transition from free to + * in-use can race; in-use to free is impossible */ + size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self); + size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); + + if (psize) { + int i = bin_index(psize); + lock_bin(i); + if (!(self->psize & C_INUSE)) { + struct chunk *prev = PREV_CHUNK(self); + unbin(prev, i); + self = prev; + size += psize; + } + unlock_bin(i); + } + if (nsize) { + int i = bin_index(nsize); + lock_bin(i); + if (!(next->csize & C_INUSE)) { + unbin(next, i); + next = NEXT_CHUNK(next); + size += nsize; + } + unlock_bin(i); + } + + int i = bin_index(size); + lock_bin(i); + + self->csize = size; + next->psize = size; + bin_chunk(self, i); + unlock(mal.split_merge_lock); + + /* Replace middle of large chunks with fresh zero pages */ + if (size > RECLAIM && (size^(size-osize)) > size-osize) { + uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE; + uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; +#if 1 + __madvise((void *)a, b-a, MADV_DONTNEED); +#else + __mmap((void *)a, b-a, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0); +#endif + } + + unlock_bin(i); +} + +static void unmap_chunk(struct chunk *self) +{ + size_t extra = self->psize; + char *base = (char *)self - extra; + size_t len = CHUNK_SIZE(self) + extra; + /* Crash on double free */ + if (extra & 1) a_crash(); + __munmap(base, len); +} + +void free(void *p) +{ + if (!p) return; + + struct chunk *self = MEM_TO_CHUNK(p); + + if (IS_MMAPPED(self)) + unmap_chunk(self); + else + __bin_chunk(self); +} + +void __malloc_donate(char *start, char *end) +{ + size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD); + size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end; + + /* Getting past this condition ensures that the padding for alignment + * and header overhead will not overflow and will leave a nonzero + * multiple of SIZE_ALIGN bytes between start and end. */ + if (end - start <= OVERHEAD + align_start_up + align_end_down) + return; + start += align_start_up + OVERHEAD; + end -= align_end_down; + + struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end); + c->psize = n->csize = C_INUSE; + c->csize = n->psize = C_INUSE | (end-start); + __bin_chunk(c); +} diff --git a/src/malloc/oldmalloc/malloc_impl.h b/src/malloc/oldmalloc/malloc_impl.h new file mode 100644 index 00000000..e1cf4774 --- /dev/null +++ b/src/malloc/oldmalloc/malloc_impl.h @@ -0,0 +1,39 @@ +#ifndef MALLOC_IMPL_H +#define MALLOC_IMPL_H + +#include +#include "dynlink.h" + +struct chunk { + size_t psize, csize; + struct chunk *next, *prev; +}; + +struct bin { + volatile int lock[2]; + struct chunk *head; + struct chunk *tail; +}; + +#define SIZE_ALIGN (4*sizeof(size_t)) +#define SIZE_MASK (-SIZE_ALIGN) +#define OVERHEAD (2*sizeof(size_t)) +#define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN) +#define DONTCARE 16 +#define RECLAIM 163840 + +#define CHUNK_SIZE(c) ((c)->csize & -2) +#define CHUNK_PSIZE(c) ((c)->psize & -2) +#define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c))) +#define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c))) +#define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD) +#define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD) +#define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head)) + +#define C_INUSE ((size_t)1) + +#define IS_MMAPPED(c) !((c)->csize & (C_INUSE)) + +hidden void __bin_chunk(struct chunk *); + +#endif diff --git a/src/malloc/oldmalloc/malloc_usable_size.c b/src/malloc/oldmalloc/malloc_usable_size.c new file mode 100644 index 00000000..672b518a --- /dev/null +++ b/src/malloc/oldmalloc/malloc_usable_size.c @@ -0,0 +1,9 @@ +#include +#include "malloc_impl.h" + +hidden void *(*const __realloc_dep)(void *, size_t) = realloc; + +size_t malloc_usable_size(void *p) +{ + return p ? CHUNK_SIZE(MEM_TO_CHUNK(p)) - OVERHEAD : 0; +} -- cgit v1.2.1