This is the mail archive of the
libc-alpha@sources.redhat.com
mailing list for the glibc project.
Re: malloc() and spinlocks
On Monday 25 November 2002 17:39, David Boreham wrote:
[snip]
>
> Hmm....I spend most of my life grumbling that code is optimized for the
> rare case that the application is single threaded and the machine only has
> one CPU :)
>
> Please do not break the SMP performance of malloc (at least don't break it
> more than it is already).
>
> Thanks.
I don't want to break anything. I just want something like this - see the
thread-m.h.patch attachment. I don't know if the code should be duplicated or
if the spinlock code is not necessary in the not _LIBC part. The
mallo.c.patch is for __inline__ - I don't know if you even want to apply
that, and I doubt it can go in in this form (having simply __inline__ there),
I simply added a couple of __inline__ where I liked :). As I said, the
spinlock one is the really important.
--
Lubos Lunak
KDE developer
---------------------------------------------------------------------
SuSE CR, s.r.o. e-mail: l.lunak@suse.cz , l.lunak@kde.org
Drahobejlova 27 tel: +420 2 9654 2373
190 00 Praha 9 fax: +420 2 9654 2374
Czech Republic http://www.suse.cz/
--- thread-m.h.sav Mon Nov 25 14:55:58 2002
+++ thread-m.h Mon Nov 25 15:17:29 2002
@@ -38,6 +38,61 @@
typedef pthread_t thread_id;
/* mutex */
+#if (defined __i386__ || defined __x86_64__) && defined __GNUC__ && \
+ !defined USE_NO_SPINLOCKS
+
+#include <time.h>
+
+/* Use fast inline spinlocks. */
+typedef struct {
+ volatile unsigned int lock;
+ int pad0_;
+} mutex_t;
+
+#define MUTEX_INITIALIZER { 0 }
+#define mutex_init(m) ((m)->lock = 0)
+static inline int mutex_lock(mutex_t *m) {
+ int cnt = 0, r;
+ struct timespec tm;
+
+ for(;;) {
+ __asm__ __volatile__
+ ("xchgl %0, %1"
+ : "=r"(r), "=m"(m->lock)
+ : "0"(1), "m"(m->lock)
+ : "memory");
+ if(!r)
+ return 0;
+ if(cnt < 50) {
+ sched_yield();
+ cnt++;
+ } else {
+ tm.tv_sec = 0;
+ tm.tv_nsec = 2000001;
+ nanosleep(&tm, NULL);
+ cnt = 0;
+ }
+ }
+}
+static inline int mutex_trylock(mutex_t *m) {
+ int r;
+
+ __asm__ __volatile__
+ ("xchgl %0, %1"
+ : "=r"(r), "=m"(m->lock)
+ : "0"(1), "m"(m->lock)
+ : "memory");
+ return r;
+}
+static inline int mutex_unlock(mutex_t *m) {
+ m->lock = 0;
+ __asm __volatile ("" : "=m" (m->lock) : "0" (m->lock));
+ return 0;
+}
+
+#else
+
+/* Normal pthread mutex. */
typedef pthread_mutex_t mutex_t;
#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
@@ -59,6 +114,8 @@ typedef pthread_mutex_t mutex_t;
(__pthread_mutex_unlock != NULL \
? __pthread_mutex_unlock (m) : (*(int*)(m) = 0))
+#endif
+
#define thread_atfork(prepare, parent, child) \
(__pthread_atfork != NULL ? __pthread_atfork(prepare, parent, child) : 0)
--- malloc.c.sav Mon Nov 25 15:14:55 2002
+++ malloc.c Mon Nov 25 16:43:08 2002
@@ -2337,8 +2337,10 @@ void weak_variable (*__after_morecore_ho
*/
#if __STD_C
+__inline__
static void do_check_chunk(mstate av, mchunkptr p)
#else
+__inline__
static void do_check_chunk(av, p) mstate av; mchunkptr p;
#endif
{
@@ -2386,8 +2388,10 @@ static void do_check_chunk(av, p) mstate
*/
#if __STD_C
+__inline__
static void do_check_free_chunk(mstate av, mchunkptr p)
#else
+__inline__
static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
#endif
{
@@ -2424,8 +2428,10 @@ static void do_check_free_chunk(av, p) m
*/
#if __STD_C
+__inline__
static void do_check_inuse_chunk(mstate av, mchunkptr p)
#else
+__inline__
static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
#endif
{
@@ -2465,8 +2471,10 @@ static void do_check_inuse_chunk(av, p)
*/
#if __STD_C
+__inline__
static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
#else
+__inline__
static void do_check_remalloced_chunk(av, p, s)
mstate av; mchunkptr p; INTERNAL_SIZE_T s;
#endif
@@ -2498,8 +2506,10 @@ mstate av; mchunkptr p; INTERNAL_SIZE_T
*/
#if __STD_C
+__inline__
static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
#else
+__inline__
static void do_check_malloced_chunk(av, p, s)
mstate av; mchunkptr p; INTERNAL_SIZE_T s;
#endif
@@ -2532,6 +2542,7 @@ mstate av; mchunkptr p; INTERNAL_SIZE_T
display chunk addresses, sizes, bins, and other instrumentation.
*/
+__inline__
static void do_check_malloc_state(mstate av)
{
int i;
@@ -2668,8 +2679,10 @@ static void do_check_malloc_state(mstate
*/
#if __STD_C
+__inline__
static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
#else
+__inline__
static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
#endif
{
@@ -3121,8 +3134,10 @@ static Void_t* sYSMALLOc(nb, av) INTERNA
*/
#if __STD_C
+__inline__
static int sYSTRIm(size_t pad, mstate av)
#else
+__inline__
static int sYSTRIm(pad, av) size_t pad; mstate av;
#endif
{
@@ -3182,6 +3197,7 @@ static int sYSTRIm(pad, av) size_t pad;
#ifdef HAVE_MMAP
+__inline__
static void
internal_function
#if __STD_C
@@ -3211,6 +3227,7 @@ munmap_chunk(p) mchunkptr p;
#if HAVE_MREMAP
+__inline__
static mchunkptr
internal_function
#if __STD_C
@@ -3262,458 +3279,11 @@ mremap_chunk(p, new_size) mchunkptr p; s
#endif /* HAVE_MMAP */
-/*------------------------ Public wrappers. --------------------------------*/
-
-Void_t*
-public_mALLOc(size_t bytes)
-{
- mstate ar_ptr;
- Void_t *victim;
-
- __malloc_ptr_t (*hook) __MALLOC_P ((size_t, __const __malloc_ptr_t)) =
- __malloc_hook;
- if (hook != NULL)
- return (*hook)(bytes, RETURN_ADDRESS (0));
-
- arena_get(ar_ptr, bytes);
- if(!ar_ptr)
- return 0;
- victim = _int_malloc(ar_ptr, bytes);
- if(!victim) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_unlock(&ar_ptr->mutex);
- (void)mutex_lock(&main_arena.mutex);
- victim = _int_malloc(&main_arena, bytes);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
-#if USE_ARENAS
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- (void)mutex_unlock(&main_arena.mutex);
- if(ar_ptr) {
- victim = _int_malloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
-#endif
- }
- } else
- (void)mutex_unlock(&ar_ptr->mutex);
- assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
- ar_ptr == arena_for_chunk(mem2chunk(victim)));
- return victim;
-}
-
-void
-public_fREe(Void_t* mem)
-{
- mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
-
- void (*hook) __MALLOC_P ((__malloc_ptr_t, __const __malloc_ptr_t)) =
- __free_hook;
- if (hook != NULL) {
- (*hook)(mem, RETURN_ADDRESS (0));
- return;
- }
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk(mem);
-
-#if HAVE_MMAP
- if (chunk_is_mmapped(p)) /* release mmapped memory. */
- {
- munmap_chunk(p);
- return;
- }
-#endif
-
- ar_ptr = arena_for_chunk(p);
-#if THREAD_STATS
- if(!mutex_trylock(&ar_ptr->mutex))
- ++(ar_ptr->stat_lock_direct);
- else {
- (void)mutex_lock(&ar_ptr->mutex);
- ++(ar_ptr->stat_lock_wait);
- }
-#else
- (void)mutex_lock(&ar_ptr->mutex);
-#endif
- _int_free(ar_ptr, mem);
- (void)mutex_unlock(&ar_ptr->mutex);
-}
-
-Void_t*
-public_rEALLOc(Void_t* oldmem, size_t bytes)
-{
- mstate ar_ptr;
- INTERNAL_SIZE_T nb; /* padded request size */
-
- mchunkptr oldp; /* chunk corresponding to oldmem */
- INTERNAL_SIZE_T oldsize; /* its size */
-
- Void_t* newp; /* chunk to return */
-
- __malloc_ptr_t (*hook) __MALLOC_P ((__malloc_ptr_t, size_t,
- __const __malloc_ptr_t)) =
- __realloc_hook;
- if (hook != NULL)
- return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
-
-#if REALLOC_ZERO_BYTES_FREES
- if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
-#endif
-
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == 0) return public_mALLOc(bytes);
-
- oldp = mem2chunk(oldmem);
- oldsize = chunksize(oldp);
-
- checked_request2size(bytes, nb);
-
-#if HAVE_MMAP
- if (chunk_is_mmapped(oldp))
- {
- Void_t* newmem;
-
-#if HAVE_MREMAP
- newp = mremap_chunk(oldp, nb);
- if(newp) return chunk2mem(newp);
-#endif
- /* Note the extra SIZE_SZ overhead. */
- if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
- /* Must alloc, copy, free. */
- newmem = public_mALLOc(bytes);
- if (newmem == 0) return 0; /* propagate failure */
- MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
- munmap_chunk(oldp);
- return newmem;
- }
-#endif
-
- ar_ptr = arena_for_chunk(oldp);
-#if THREAD_STATS
- if(!mutex_trylock(&ar_ptr->mutex))
- ++(ar_ptr->stat_lock_direct);
- else {
- (void)mutex_lock(&ar_ptr->mutex);
- ++(ar_ptr->stat_lock_wait);
- }
-#else
- (void)mutex_lock(&ar_ptr->mutex);
-#endif
-
-#ifndef NO_THREADS
- /* As in malloc(), remember this arena for the next allocation. */
- tsd_setspecific(arena_key, (Void_t *)ar_ptr);
-#endif
-
- newp = _int_realloc(ar_ptr, oldmem, bytes);
-
- (void)mutex_unlock(&ar_ptr->mutex);
- assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
- ar_ptr == arena_for_chunk(mem2chunk(newp)));
- return newp;
-}
-
-Void_t*
-public_mEMALIGn(size_t alignment, size_t bytes)
-{
- mstate ar_ptr;
- Void_t *p;
-
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
- __const __malloc_ptr_t)) =
- __memalign_hook;
- if (hook != NULL)
- return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
-
- /* If need less alignment than we give anyway, just relay to malloc */
- if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
-
- /* Otherwise, ensure that it is at least a minimum chunk size */
- if (alignment < MINSIZE) alignment = MINSIZE;
-
- arena_get(ar_ptr, bytes + alignment + MINSIZE);
- if(!ar_ptr)
- return 0;
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- if(!p) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(ar_ptr != &main_arena) {
- (void)mutex_lock(&main_arena.mutex);
- p = _int_memalign(&main_arena, alignment, bytes);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
-#if USE_ARENAS
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
- if(ar_ptr) {
- p = _int_memalign(ar_ptr, alignment, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- }
-#endif
- }
- }
- assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
- ar_ptr == arena_for_chunk(mem2chunk(p)));
- return p;
-}
-
-Void_t*
-public_vALLOc(size_t bytes)
-{
- mstate ar_ptr;
- Void_t *p;
-
- if(__malloc_initialized < 0)
- ptmalloc_init ();
- arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
- if(!ar_ptr)
- return 0;
- p = _int_valloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- return p;
-}
-
-Void_t*
-public_pVALLOc(size_t bytes)
-{
- mstate ar_ptr;
- Void_t *p;
-
- if(__malloc_initialized < 0)
- ptmalloc_init ();
- arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
- p = _int_pvalloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
- return p;
-}
-
-Void_t*
-public_cALLOc(size_t n, size_t elem_size)
-{
- mstate av;
- mchunkptr oldtop, p;
- INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
- Void_t* mem;
- unsigned long clearsize;
- unsigned long nclears;
- INTERNAL_SIZE_T* d;
- __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
- __malloc_hook;
-
- /* size_t is unsigned so the behavior on overflow is defined. */
- bytes = n * elem_size;
-#define HALF_INTERNAL_SIZE_T \
- (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
- if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
- if (elem_size != 0 && bytes / elem_size != n) {
- MALLOC_FAILURE_ACTION;
- return 0;
- }
- }
-
- if (hook != NULL) {
- sz = bytes;
- mem = (*hook)(sz, RETURN_ADDRESS (0));
- if(mem == 0)
- return 0;
-#ifdef HAVE_MEMCPY
- return memset(mem, 0, sz);
-#else
- while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
- return mem;
-#endif
- }
-
- sz = bytes;
-
- arena_get(av, sz);
- if(!av)
- return 0;
-
- /* Check if we hand out the top chunk, in which case there may be no
- need to clear. */
-#if MORECORE_CLEARS
- oldtop = top(av);
- oldtopsize = chunksize(top(av));
-#if MORECORE_CLEARS < 2
- /* Only newly allocated memory is guaranteed to be cleared. */
- if (av == &main_arena &&
- oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
- oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
-#endif
-#endif
- mem = _int_malloc(av, sz);
-
- /* Only clearing follows, so we can unlock early. */
- (void)mutex_unlock(&av->mutex);
-
- assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
- av == arena_for_chunk(mem2chunk(mem)));
-
- if (mem == 0) {
- /* Maybe the failure is due to running out of mmapped areas. */
- if(av != &main_arena) {
- (void)mutex_lock(&main_arena.mutex);
- mem = _int_malloc(&main_arena, sz);
- (void)mutex_unlock(&main_arena.mutex);
- } else {
-#if USE_ARENAS
- /* ... or sbrk() has failed and there is still a chance to mmap() */
- (void)mutex_lock(&main_arena.mutex);
- av = arena_get2(av->next ? av : 0, sz);
- (void)mutex_unlock(&main_arena.mutex);
- if(av) {
- mem = _int_malloc(av, sz);
- (void)mutex_unlock(&av->mutex);
- }
-#endif
- }
- if (mem == 0) return 0;
- }
- p = mem2chunk(mem);
-
- /* Two optional cases in which clearing not necessary */
-#if HAVE_MMAP
- if (chunk_is_mmapped(p))
- return mem;
-#endif
-
- csz = chunksize(p);
-
-#if MORECORE_CLEARS
- if (p == oldtop && csz > oldtopsize) {
- /* clear only the bytes from non-freshly-sbrked memory */
- csz = oldtopsize;
- }
-#endif
-
- /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
- contents have an odd number of INTERNAL_SIZE_T-sized words;
- minimally 3. */
- d = (INTERNAL_SIZE_T*)mem;
- clearsize = csz - SIZE_SZ;
- nclears = clearsize / sizeof(INTERNAL_SIZE_T);
- assert(nclears >= 3);
-
- if (nclears > 9)
- MALLOC_ZERO(d, clearsize);
-
- else {
- *(d+0) = 0;
- *(d+1) = 0;
- *(d+2) = 0;
- if (nclears > 4) {
- *(d+3) = 0;
- *(d+4) = 0;
- if (nclears > 6) {
- *(d+5) = 0;
- *(d+6) = 0;
- if (nclears > 8) {
- *(d+7) = 0;
- *(d+8) = 0;
- }
- }
- }
- }
-
- return mem;
-}
-
-Void_t**
-public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
-{
- mstate ar_ptr;
- Void_t** m;
-
- arena_get(ar_ptr, n*elem_size);
- if(!ar_ptr)
- return 0;
-
- m = _int_icalloc(ar_ptr, n, elem_size, chunks);
- (void)mutex_unlock(&ar_ptr->mutex);
- return m;
-}
-
-Void_t**
-public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
-{
- mstate ar_ptr;
- Void_t** m;
-
- arena_get(ar_ptr, 0);
- if(!ar_ptr)
- return 0;
-
- m = _int_icomalloc(ar_ptr, n, sizes, chunks);
- (void)mutex_unlock(&ar_ptr->mutex);
- return m;
-}
-
-#ifndef _LIBC
-
-void
-public_cFREe(Void_t* m)
-{
- public_fREe(m);
-}
-
-#endif /* _LIBC */
-
-int
-public_mTRIm(size_t s)
-{
- int result;
-
- (void)mutex_lock(&main_arena.mutex);
- result = mTRIm(s);
- (void)mutex_unlock(&main_arena.mutex);
- return result;
-}
-
-size_t
-public_mUSABLe(Void_t* m)
-{
- size_t result;
-
- result = mUSABLe(m);
- return result;
-}
-
-void
-public_mSTATs()
-{
- mSTATs();
-}
-
-struct mallinfo public_mALLINFo()
-{
- struct mallinfo m;
-
- (void)mutex_lock(&main_arena.mutex);
- m = mALLINFo(&main_arena);
- (void)mutex_unlock(&main_arena.mutex);
- return m;
-}
-
-int
-public_mALLOPt(int p, int v)
-{
- int result;
- result = mALLOPt(p, v);
- return result;
-}
-
/*
------------------------------ malloc ------------------------------
*/
+__inline__
Void_t*
_int_malloc(mstate av, size_t bytes)
{
@@ -4098,6 +3668,7 @@ _int_malloc(mstate av, size_t bytes)
------------------------------ free ------------------------------
*/
+__inline__
void
_int_free(mstate av, Void_t* mem)
{
@@ -4270,6 +3841,7 @@ _int_free(mstate av, Void_t* mem)
initialization code.
*/
+__inline__
#if __STD_C
static void malloc_consolidate(mstate av)
#else
@@ -4372,6 +3944,7 @@ static void malloc_consolidate(av) mstat
------------------------------ realloc ------------------------------
*/
+__inline__
Void_t*
_int_realloc(mstate av, Void_t* oldmem, size_t bytes)
{
@@ -4597,6 +4170,7 @@ _int_realloc(mstate av, Void_t* oldmem,
------------------------------ memalign ------------------------------
*/
+__inline__
Void_t*
_int_memalign(mstate av, size_t alignment, size_t bytes)
{
@@ -4702,6 +4276,7 @@ _int_memalign(mstate av, size_t alignmen
------------------------------ calloc ------------------------------
*/
+__inline__
#if __STD_C
Void_t* cALLOc(size_t n_elements, size_t elem_size)
#else
@@ -4763,6 +4338,7 @@ Void_t* cALLOc(n_elements, elem_size) si
------------------------- independent_calloc -------------------------
*/
+__inline__
Void_t**
#if __STD_C
_int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
@@ -4803,6 +4379,7 @@ mstate av; size_t n_elements; size_t siz
*/
+__inline__
static Void_t**
#if __STD_C
iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
@@ -4928,6 +4505,7 @@ mstate av; size_t n_elements; size_t* si
------------------------------ valloc ------------------------------
*/
+__inline__
Void_t*
#if __STD_C
_int_valloc(mstate av, size_t bytes)
@@ -4965,6 +4543,7 @@ _int_pvalloc(av, bytes) mstate av, size_
------------------------------ malloc_trim ------------------------------
*/
+__inline__
#if __STD_C
int mTRIm(size_t pad)
#else
@@ -5187,6 +4766,455 @@ int mALLOPt(param_number, value) int par
}
+/*------------------------ Public wrappers. --------------------------------*/
+
+Void_t*
+public_mALLOc(size_t bytes)
+{
+ mstate ar_ptr;
+ Void_t *victim;
+
+ __malloc_ptr_t (*hook) __MALLOC_P ((size_t, __const __malloc_ptr_t)) =
+ __malloc_hook;
+ if (hook != NULL)
+ return (*hook)(bytes, RETURN_ADDRESS (0));
+
+ arena_get(ar_ptr, bytes);
+ if(!ar_ptr)
+ return 0;
+ victim = _int_malloc(ar_ptr, bytes);
+ if(!victim) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(ar_ptr != &main_arena) {
+ (void)mutex_unlock(&ar_ptr->mutex);
+ (void)mutex_lock(&main_arena.mutex);
+ victim = _int_malloc(&main_arena, bytes);
+ (void)mutex_unlock(&main_arena.mutex);
+ } else {
+#if USE_ARENAS
+ /* ... or sbrk() has failed and there is still a chance to mmap() */
+ ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
+ (void)mutex_unlock(&main_arena.mutex);
+ if(ar_ptr) {
+ victim = _int_malloc(ar_ptr, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ }
+#endif
+ }
+ } else
+ (void)mutex_unlock(&ar_ptr->mutex);
+ assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
+ ar_ptr == arena_for_chunk(mem2chunk(victim)));
+ return victim;
+}
+
+void
+public_fREe(Void_t* mem)
+{
+ mstate ar_ptr;
+ mchunkptr p; /* chunk corresponding to mem */
+
+ void (*hook) __MALLOC_P ((__malloc_ptr_t, __const __malloc_ptr_t)) =
+ __free_hook;
+ if (hook != NULL) {
+ (*hook)(mem, RETURN_ADDRESS (0));
+ return;
+ }
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem);
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) /* release mmapped memory. */
+ {
+ munmap_chunk(p);
+ return;
+ }
+#endif
+
+ ar_ptr = arena_for_chunk(p);
+#if THREAD_STATS
+ if(!mutex_trylock(&ar_ptr->mutex))
+ ++(ar_ptr->stat_lock_direct);
+ else {
+ (void)mutex_lock(&ar_ptr->mutex);
+ ++(ar_ptr->stat_lock_wait);
+ }
+#else
+ (void)mutex_lock(&ar_ptr->mutex);
+#endif
+ _int_free(ar_ptr, mem);
+ (void)mutex_unlock(&ar_ptr->mutex);
+}
+
+Void_t*
+public_rEALLOc(Void_t* oldmem, size_t bytes)
+{
+ mstate ar_ptr;
+ INTERNAL_SIZE_T nb; /* padded request size */
+
+ mchunkptr oldp; /* chunk corresponding to oldmem */
+ INTERNAL_SIZE_T oldsize; /* its size */
+
+ Void_t* newp; /* chunk to return */
+
+ __malloc_ptr_t (*hook) __MALLOC_P ((__malloc_ptr_t, size_t,
+ __const __malloc_ptr_t)) =
+ __realloc_hook;
+ if (hook != NULL)
+ return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
+
+#if REALLOC_ZERO_BYTES_FREES
+ if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
+#endif
+
+ /* realloc of null is supposed to be same as malloc */
+ if (oldmem == 0) return public_mALLOc(bytes);
+
+ oldp = mem2chunk(oldmem);
+ oldsize = chunksize(oldp);
+
+ checked_request2size(bytes, nb);
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(oldp))
+ {
+ Void_t* newmem;
+
+#if HAVE_MREMAP
+ newp = mremap_chunk(oldp, nb);
+ if(newp) return chunk2mem(newp);
+#endif
+ /* Note the extra SIZE_SZ overhead. */
+ if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
+ /* Must alloc, copy, free. */
+ newmem = public_mALLOc(bytes);
+ if (newmem == 0) return 0; /* propagate failure */
+ MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
+ munmap_chunk(oldp);
+ return newmem;
+ }
+#endif
+
+ ar_ptr = arena_for_chunk(oldp);
+#if THREAD_STATS
+ if(!mutex_trylock(&ar_ptr->mutex))
+ ++(ar_ptr->stat_lock_direct);
+ else {
+ (void)mutex_lock(&ar_ptr->mutex);
+ ++(ar_ptr->stat_lock_wait);
+ }
+#else
+ (void)mutex_lock(&ar_ptr->mutex);
+#endif
+
+#ifndef NO_THREADS
+ /* As in malloc(), remember this arena for the next allocation. */
+ tsd_setspecific(arena_key, (Void_t *)ar_ptr);
+#endif
+
+ newp = _int_realloc(ar_ptr, oldmem, bytes);
+
+ (void)mutex_unlock(&ar_ptr->mutex);
+ assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
+ ar_ptr == arena_for_chunk(mem2chunk(newp)));
+ return newp;
+}
+
+Void_t*
+public_mEMALIGn(size_t alignment, size_t bytes)
+{
+ mstate ar_ptr;
+ Void_t *p;
+
+ __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
+ __const __malloc_ptr_t)) =
+ __memalign_hook;
+ if (hook != NULL)
+ return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
+
+ /* If need less alignment than we give anyway, just relay to malloc */
+ if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
+
+ /* Otherwise, ensure that it is at least a minimum chunk size */
+ if (alignment < MINSIZE) alignment = MINSIZE;
+
+ arena_get(ar_ptr, bytes + alignment + MINSIZE);
+ if(!ar_ptr)
+ return 0;
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ if(!p) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(ar_ptr != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ p = _int_memalign(&main_arena, alignment, bytes);
+ (void)mutex_unlock(&main_arena.mutex);
+ } else {
+#if USE_ARENAS
+ /* ... or sbrk() has failed and there is still a chance to mmap() */
+ ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
+ if(ar_ptr) {
+ p = _int_memalign(ar_ptr, alignment, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ }
+#endif
+ }
+ }
+ assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
+ ar_ptr == arena_for_chunk(mem2chunk(p)));
+ return p;
+}
+
+Void_t*
+public_vALLOc(size_t bytes)
+{
+ mstate ar_ptr;
+ Void_t *p;
+
+ if(__malloc_initialized < 0)
+ ptmalloc_init ();
+ arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
+ if(!ar_ptr)
+ return 0;
+ p = _int_valloc(ar_ptr, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ return p;
+}
+
+Void_t*
+public_pVALLOc(size_t bytes)
+{
+ mstate ar_ptr;
+ Void_t *p;
+
+ if(__malloc_initialized < 0)
+ ptmalloc_init ();
+ arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
+ p = _int_pvalloc(ar_ptr, bytes);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ return p;
+}
+
+Void_t*
+public_cALLOc(size_t n, size_t elem_size)
+{
+ mstate av;
+ mchunkptr oldtop, p;
+ INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
+ Void_t* mem;
+ unsigned long clearsize;
+ unsigned long nclears;
+ INTERNAL_SIZE_T* d;
+ __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
+ __malloc_hook;
+
+ /* size_t is unsigned so the behavior on overflow is defined. */
+ bytes = n * elem_size;
+#define HALF_INTERNAL_SIZE_T \
+ (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
+ if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
+ if (elem_size != 0 && bytes / elem_size != n) {
+ MALLOC_FAILURE_ACTION;
+ return 0;
+ }
+ }
+
+ if (hook != NULL) {
+ sz = bytes;
+ mem = (*hook)(sz, RETURN_ADDRESS (0));
+ if(mem == 0)
+ return 0;
+#ifdef HAVE_MEMCPY
+ return memset(mem, 0, sz);
+#else
+ while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
+ return mem;
+#endif
+ }
+
+ sz = bytes;
+
+ arena_get(av, sz);
+ if(!av)
+ return 0;
+
+ /* Check if we hand out the top chunk, in which case there may be no
+ need to clear. */
+#if MORECORE_CLEARS
+ oldtop = top(av);
+ oldtopsize = chunksize(top(av));
+#if MORECORE_CLEARS < 2
+ /* Only newly allocated memory is guaranteed to be cleared. */
+ if (av == &main_arena &&
+ oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
+ oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
+#endif
+#endif
+ mem = _int_malloc(av, sz);
+
+ /* Only clearing follows, so we can unlock early. */
+ (void)mutex_unlock(&av->mutex);
+
+ assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
+ av == arena_for_chunk(mem2chunk(mem)));
+
+ if (mem == 0) {
+ /* Maybe the failure is due to running out of mmapped areas. */
+ if(av != &main_arena) {
+ (void)mutex_lock(&main_arena.mutex);
+ mem = _int_malloc(&main_arena, sz);
+ (void)mutex_unlock(&main_arena.mutex);
+ } else {
+#if USE_ARENAS
+ /* ... or sbrk() has failed and there is still a chance to mmap() */
+ (void)mutex_lock(&main_arena.mutex);
+ av = arena_get2(av->next ? av : 0, sz);
+ (void)mutex_unlock(&main_arena.mutex);
+ if(av) {
+ mem = _int_malloc(av, sz);
+ (void)mutex_unlock(&av->mutex);
+ }
+#endif
+ }
+ if (mem == 0) return 0;
+ }
+ p = mem2chunk(mem);
+
+ /* Two optional cases in which clearing not necessary */
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p))
+ return mem;
+#endif
+
+ csz = chunksize(p);
+
+#if MORECORE_CLEARS
+ if (p == oldtop && csz > oldtopsize) {
+ /* clear only the bytes from non-freshly-sbrked memory */
+ csz = oldtopsize;
+ }
+#endif
+
+ /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
+ contents have an odd number of INTERNAL_SIZE_T-sized words;
+ minimally 3. */
+ d = (INTERNAL_SIZE_T*)mem;
+ clearsize = csz - SIZE_SZ;
+ nclears = clearsize / sizeof(INTERNAL_SIZE_T);
+ assert(nclears >= 3);
+
+ if (nclears > 9)
+ MALLOC_ZERO(d, clearsize);
+
+ else {
+ *(d+0) = 0;
+ *(d+1) = 0;
+ *(d+2) = 0;
+ if (nclears > 4) {
+ *(d+3) = 0;
+ *(d+4) = 0;
+ if (nclears > 6) {
+ *(d+5) = 0;
+ *(d+6) = 0;
+ if (nclears > 8) {
+ *(d+7) = 0;
+ *(d+8) = 0;
+ }
+ }
+ }
+ }
+
+ return mem;
+}
+
+Void_t**
+public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
+{
+ mstate ar_ptr;
+ Void_t** m;
+
+ arena_get(ar_ptr, n*elem_size);
+ if(!ar_ptr)
+ return 0;
+
+ m = _int_icalloc(ar_ptr, n, elem_size, chunks);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ return m;
+}
+
+Void_t**
+public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
+{
+ mstate ar_ptr;
+ Void_t** m;
+
+ arena_get(ar_ptr, 0);
+ if(!ar_ptr)
+ return 0;
+
+ m = _int_icomalloc(ar_ptr, n, sizes, chunks);
+ (void)mutex_unlock(&ar_ptr->mutex);
+ return m;
+}
+
+#ifndef _LIBC
+
+void
+public_cFREe(Void_t* m)
+{
+ public_fREe(m);
+}
+
+#endif /* _LIBC */
+
+int
+public_mTRIm(size_t s)
+{
+ int result;
+
+ (void)mutex_lock(&main_arena.mutex);
+ result = mTRIm(s);
+ (void)mutex_unlock(&main_arena.mutex);
+ return result;
+}
+
+size_t
+public_mUSABLe(Void_t* m)
+{
+ size_t result;
+
+ result = mUSABLe(m);
+ return result;
+}
+
+void
+public_mSTATs()
+{
+ mSTATs();
+}
+
+struct mallinfo public_mALLINFo()
+{
+ struct mallinfo m;
+
+ (void)mutex_lock(&main_arena.mutex);
+ m = mALLINFo(&main_arena);
+ (void)mutex_unlock(&main_arena.mutex);
+ return m;
+}
+
+int
+public_mALLOPt(int p, int v)
+{
+ int result;
+ result = mALLOPt(p, v);
+ return result;
+}
+
+
/*
-------------------- Alternative MORECORE functions --------------------
*/