From 3721464568857e3b840e8f851d29ce540fb0884f Mon Sep 17 00:00:00 2001 From: Kia <kia@special-circumstanc.es> Date: Fri, 30 Jul 2021 21:04:07 -0600 Subject: [PATCH] Revert "start work on jemalloc interface" This reverts commit a38b35cdb839dd6c1e4190b42d0c6b830aeed759. --- SConstruct | 2 +- src/allocator.c | 242 +++++++++++++++++++++++++++++++---------- src/allocator.h | 16 +-- src/system_allocator.c | 62 ++++++++++- 4 files changed, 245 insertions(+), 77 deletions(-) diff --git a/SConstruct b/SConstruct index 71e6c3a8..3a3e7639 100644 --- a/SConstruct +++ b/SConstruct @@ -137,7 +137,7 @@ elif env['PLATFORM'] == 'win32': # no extra lib needed pass else: - env.MergeFlags('-lrt -ljemalloc') + env.MergeFlags('-lrt') if GetOption('coverage'): env.Append(CCFLAGS=['--coverage'], diff --git a/src/allocator.c b/src/allocator.c index 34ac830c..2ff5caca 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -1,6 +1,5 @@ -/* jemalloc allocator interface for Hammer. +/* Arena allocator for Hammer. * Copyright (C) 2012 Meredith L. Patterson, Dan "TQ" Hirsch - * Copyright (C) 2020 Kia * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -20,12 +19,42 @@ #include <stdint.h> #include <sys/types.h> #include <setjmp.h> -#include <jemalloc/jemalloc.h> #include "hammer.h" #include "internal.h" +struct arena_link { + // TODO: + // For efficiency, we should probably allocate the arena links in + // their own slice, and link to a block directly. That can be + // implemented later, though, with no change in interface. + struct arena_link *next; + size_t free; + size_t used; + uint8_t rest[]; +}; + +struct HArena_ { + struct arena_link *head; + struct HAllocator_ *mm__; + /* does mm__ zero blocks for us? */ + bool malloc_zeros; + size_t block_size; + size_t used; + size_t wasted; +#ifdef DETAILED_ARENA_STATS + size_t mm_malloc_count, mm_malloc_bytes; + size_t memset_count, memset_bytes; + size_t arena_malloc_count, arena_malloc_bytes; + size_t arena_su_malloc_count, arena_su_malloc_bytes; + size_t arena_si_malloc_count, arena_si_malloc_bytes; + size_t arena_lu_malloc_count, arena_lu_malloc_bytes; + size_t arena_li_malloc_count, arena_li_malloc_bytes; +#endif + + jmp_buf *except; +}; static void * h_arena_malloc_raw(HArena *arena, size_t size, bool need_zero); @@ -37,22 +66,33 @@ void* h_alloc(HAllocator* mm__, size_t size) { } HArena *h_new_arena(HAllocator* mm__, size_t block_size) { - unsigned int arena_idx; - + if (block_size == 0) + block_size = 4096; struct HArena_ *ret = h_new(struct HArena_, 1); + struct arena_link *link = (struct arena_link*)h_alloc(mm__, sizeof(struct arena_link) + block_size); assert(ret != NULL); + assert(link != NULL); + link->free = block_size; + link->used = 0; + link->next = NULL; + ret->head = link; + ret->block_size = block_size; + ret->used = 0; ret->mm__ = mm__; - - int rv; - size_t sz = sizeof(arena_idx); - rv = mallctl("arenas.create", &arena_idx, &sz, 0, 0); -// printf("ar create RV is %d\n", rv); - assert(rv == 0); - printf("XXXXXXXXXXXX creating new arena, arena idx is %d\n", arena_idx); - - ret->arena_idx = arena_idx; - - ret->is_arena_valid = 1; +#ifdef DETAILED_ARENA_STATS + ret->mm_malloc_count = 2; + ret->mm_malloc_bytes = sizeof(*ret) + sizeof(struct arena_link) + block_size; + ret->memset_count = 0; + ret->memset_bytes = 0; + ret->arena_malloc_count = ret->arena_malloc_bytes = 0; + ret->arena_su_malloc_count = ret->arena_su_malloc_bytes = 0; + ret->arena_si_malloc_count = ret->arena_si_malloc_bytes = 0; + ret->arena_lu_malloc_count = ret->arena_lu_malloc_bytes = 0; + ret->arena_li_malloc_count = ret->arena_li_malloc_bytes = 0; +#endif + /* XXX provide a mechanism to indicate mm__ returns zeroed blocks */ + ret->malloc_zeros = false; + ret->wasted = sizeof(struct arena_link) + sizeof(struct HArena_) + block_size; ret->except = NULL; return ret; } @@ -62,12 +102,9 @@ void h_arena_set_except(HArena *arena, jmp_buf *except) arena->except = except; } -/* static void *alloc_block(HArena *arena, size_t size) { - assert(arena->is_arena_valid == 1); - - void *block = mallocx(size, MALLOCX_TCACHE(arena->tcache_idx) | MALLOCX_ARENA(arena->arena_idx)); + void *block = arena->mm__->alloc(arena->mm__, size); if (!block) { if (arena->except) longjmp(*arena->except, 1); @@ -75,7 +112,7 @@ static void *alloc_block(HArena *arena, size_t size) } return block; } -*/ + void * h_arena_malloc_noinit(HArena *arena, size_t size) { return h_arena_malloc_raw(arena, size, false); } @@ -86,58 +123,147 @@ void * h_arena_malloc(HArena *arena, size_t size) { static void * h_arena_malloc_raw(HArena *arena, size_t size, bool need_zero) { - void *block; + struct arena_link *link = NULL; + void *ret = NULL; - assert(arena->is_arena_valid == 1); + if (size <= arena->head->free) { + /* fast path.. */ + ret = arena->head->rest + arena->head->used; + arena->used += size; + arena->wasted -= size; + arena->head->used += size; + arena->head->free -= size; - if (need_zero) { - block = mallocx(size, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena->arena_idx) | MALLOCX_ZERO); - } - else { - block = mallocx(size, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena->arena_idx)); - } +#ifdef DETAILED_ARENA_STATS + ++(arena->arena_malloc_count); + arena->arena_malloc_bytes += size; + if (need_zero) { + ++(arena->arena_si_malloc_count); + arena->arena_si_malloc_bytes += size; + } else { + ++(arena->arena_su_malloc_count); + arena->arena_su_malloc_bytes += size; + } +#endif + } else if (size > arena->block_size) { + /* + * We need a new, dedicated block for it, because it won't fit in a + * standard sized one. + * + * NOTE: + * + * We used to do a silly casting dance to treat blocks like this + * as special cases and make the used/free fields part of the allocated + * block, but the old code was not really proper portable C and depended + * on a bunch of implementation-specific behavior. We could have done it + * better with a union in struct arena_link, but the memory savings is + * only 0.39% for a 64-bit machine, a 4096-byte block size and all + * large allocations *only just one byte* over the block size, so I + * question the utility of it. We do still slip the large block in + * one position behind the list head so it doesn't cut off a partially + * filled list head. + * + * -- andrea + */ + link = alloc_block(arena, size + sizeof(struct arena_link)); + assert(link != NULL); + arena->used += size; + arena->wasted += sizeof(struct arena_link); + link->used = size; + link->free = 0; + link->next = arena->head->next; + arena->head->next = link; + ret = link->rest; - if (!block) { - if (arena->except) - longjmp(*arena->except, 1); - h_platform_errx(1, "memory allocation failed (%uB requested)\n", (unsigned int)size); +#ifdef DETAILED_ARENA_STATS + ++(arena->arena_malloc_count); + arena->arena_malloc_bytes += size; + if (need_zero) { + ++(arena->arena_li_malloc_count); + arena->arena_li_malloc_bytes += size; + } else { + ++(arena->arena_lu_malloc_count); + arena->arena_lu_malloc_bytes += size; + } +#endif + } else { + /* we just need to allocate an ordinary new block. */ + link = alloc_block(arena, sizeof(struct arena_link) + arena->block_size); + assert(link != NULL); +#ifdef DETAILED_ARENA_STATS + ++(arena->mm_malloc_count); + arena->mm_malloc_bytes += sizeof(struct arena_link) + arena->block_size; +#endif + link->free = arena->block_size - size; + link->used = size; + link->next = arena->head; + arena->head = link; + arena->used += size; + arena->wasted += sizeof(struct arena_link) + arena->block_size - size; + ret = link->rest; + +#ifdef DETAILED_ARENA_STATS + ++(arena->arena_malloc_count); + arena->arena_malloc_bytes += size; + if (need_zero) { + ++(arena->arena_si_malloc_count); + arena->arena_si_malloc_bytes += size; + } else { + ++(arena->arena_su_malloc_count); + arena->arena_su_malloc_bytes += size; + } +#endif } - assert(block != NULL); -// printf("XXXX ARENA MALLOC for %p, %ld\n",block, size); + /* + * Zeroize if necessary + */ + if (need_zero && !(arena->malloc_zeros)) { + memset(ret, 0, size); +#ifdef DETAILED_ARENA_STATS + ++(arena->memset_count); + arena->memset_bytes += size; +#endif + } - return block; + return ret; } void h_arena_free(HArena *arena, void* ptr) { -// printf("XXXX ARENA actual FREE for %p\n",ptr); - dallocx(ptr, MALLOCX_TCACHE_NONE); + // To be used later... } void h_delete_arena(HArena *arena) { HAllocator *mm__ = arena->mm__; - unsigned int arena_idx; - int rv; - size_t mib[3]; - size_t miblen; - - arena_idx = arena->arena_idx; - - // now we destroy the arena. - miblen = 3; - mallctlnametomib("arena.0.destroy", mib, &miblen); - mib[1] = arena_idx; - //printf("mib[1] is %zu\n", mib[1]); - - rv = mallctlbymib(mib, miblen, NULL, NULL, NULL, 0); - - //printf("arena destroy RV is %d\n", rv); - assert(rv == 0); - + struct arena_link *link = arena->head; + while (link) { + struct arena_link *next = link->next; + // Even in the case of a special block, without the full arena + // header, this is correct, because the next pointer is the first + // in the structure. + h_free(link); + link = next; + } h_free(arena); - printf("XXXXXXXXXXXXXXXXXXXX arena destroyed succesfully, arena idx %d\n", arena_idx); } void h_allocator_stats(HArena *arena, HArenaStats *stats) { - printf("THIS IS REALLY BAD XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"); + stats->used = arena->used; + stats->wasted = arena->wasted; +#ifdef DETAILED_ARENA_STATS + stats->mm_malloc_count = arena->mm_malloc_count; + stats->mm_malloc_bytes = arena->mm_malloc_bytes; + stats->memset_count = arena->memset_count; + stats->memset_bytes = arena->memset_bytes; + stats->arena_malloc_count = arena->arena_malloc_count; + stats->arena_malloc_bytes = arena->arena_malloc_bytes; + stats->arena_su_malloc_count = arena->arena_su_malloc_count; + stats->arena_su_malloc_bytes = arena->arena_su_malloc_bytes; + stats->arena_si_malloc_count = arena->arena_si_malloc_count; + stats->arena_si_malloc_bytes = arena->arena_si_malloc_bytes; + stats->arena_lu_malloc_count = arena->arena_lu_malloc_count; + stats->arena_lu_malloc_bytes = arena->arena_lu_malloc_bytes; + stats->arena_li_malloc_count = arena->arena_li_malloc_count; + stats->arena_li_malloc_bytes = arena->arena_li_malloc_bytes; +#endif } diff --git a/src/allocator.h b/src/allocator.h index ced2fa1c..06d1e6f5 100644 --- a/src/allocator.h +++ b/src/allocator.h @@ -1,6 +1,5 @@ -/* jemalloc allocator interface for Hammer. +/* Arena allocator for Hammer. * Copyright (C) 2012 Meredith L. Patterson, Dan "TQ" Hirsch - * Copyright (C) 2020 Kia * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -16,12 +15,10 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ -#ifndef HAMMER_JE_ALLOCATOR__H__ -#define HAMMER_JE_ALLOCATOR__H__ +#ifndef HAMMER_ALLOCATOR__H__ +#define HAMMER_ALLOCATOR__H__ #include <sys/types.h> #include <setjmp.h> -#include <jemalloc/jemalloc.h> - #ifdef __cplusplus extern "C" { @@ -61,13 +58,6 @@ void* h_arena_malloc(HArena *arena, size_t count) ATTR_MALLOC(2); void h_arena_free(HArena *arena, void* ptr); // For future expansion, with alternate memory managers. void h_delete_arena(HArena *arena); void h_arena_set_except(HArena *arena, jmp_buf *except); -struct HArena_ { - struct HAllocator_ *mm__; - - int is_arena_valid; - unsigned int arena_idx; - jmp_buf *except; -}; typedef struct { size_t used; diff --git a/src/system_allocator.c b/src/system_allocator.c index 57a55e29..f6e9cdcb 100644 --- a/src/system_allocator.c +++ b/src/system_allocator.c @@ -2,14 +2,57 @@ #include <stdlib.h> #include "internal.h" +// NOTE(uucidl): undefine to automatically fill up newly allocated block +// with this byte: +// #define DEBUG__MEMFILL 0xFF +#if defined(DEBUG__MEMFILL) +/** + * Blocks allocated by the system_allocator start with this header. + * I.e. the user part of the allocation directly follows. + */ +typedef struct HDebugBlockHeader_ +{ + size_t size; /** size of the user allocation */ +} HDebugBlockHeader; + +#define BLOCK_HEADER_SIZE (sizeof(HDebugBlockHeader)) +#else +#define BLOCK_HEADER_SIZE (0) +#endif + +/** + * Compute the total size needed for a given allocation size. + */ +static inline size_t block_size(size_t alloc_size) { + return BLOCK_HEADER_SIZE + alloc_size; +} + +/** + * Obtain the block containing the user pointer `uptr` + */ +static inline void* block_for_user_ptr(void *uptr) { + return ((char*)uptr) - BLOCK_HEADER_SIZE; +} + +/** + * Obtain the user area of the allocation from a given block + */ +static inline void* user_ptr(void *block) { + return ((char*)block) + BLOCK_HEADER_SIZE; +} static void* system_alloc(HAllocator *allocator, size_t size) { - void *block = malloc(size); + void *block = malloc(block_size(size)); if (!block) { return NULL; } - return block; + void *uptr = user_ptr(block); +#ifdef DEBUG__MEMFILL + memset(uptr, DEBUG__MEMFILL, size); + ((HDebugBlockHeader*)block)->size = size; +#endif + return uptr; } static void* system_realloc(HAllocator *allocator, void* uptr, size_t size) { @@ -18,16 +61,25 @@ static void* system_realloc(HAllocator *allocator, void* uptr, size_t size) { } // XXX this is incorrect if size == 0 and BLOCK_HEADER_SIZE != 0; it fails // to behave like free(3) - void* block = realloc(uptr, size); + void* block = realloc(block_for_user_ptr(uptr), block_size(size)); if (!block) { return NULL; } - return block; + uptr = user_ptr(block); + +#ifdef DEBUG__MEMFILL + // XXX this is the wrong block; this is reading uninitialized memory + size_t old_size = ((HDebugBlockHeader*)block)->size; + if (size > old_size) + memset((char*)uptr+old_size, DEBUG__MEMFILL, size - old_size); + ((HDebugBlockHeader*)block)->size = size; +#endif + return uptr; } static void system_free(HAllocator *allocator, void* uptr) { if (uptr) { - free(uptr); + free(block_for_user_ptr(uptr)); } } -- GitLab