Lines Matching +full:non +full:- +full:tunable
32 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
33 ~(malloc_getpagesize-1))
34 #define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
63 this->base = bas; in makeGmListElement()
64 this->next = head; in makeGmListElement()
73 assert ( (head == NULL) || (head->base == (void*)gAddressBase)); in gcleanup()
74 if (gAddressBase && (gNextAddress - gAddressBase)) in gcleanup()
77 gNextAddress - gAddressBase, in gcleanup()
83 GmListElement* next = head->next; in gcleanup()
84 rval = VirtualFree (head->base, 0, MEM_RELEASE); in gcleanup()
153 return (void*)-1; in wsbrk()
169 return (void*)-1; in wsbrk()
175 (size + gNextAddress - in wsbrk()
179 return (void*)-1; in wsbrk()
191 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal, in wsbrk()
198 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase, in wsbrk()
201 return (void*)-1; in wsbrk()
223 struct malloc_chunk* fd; /* double links -- used only if free. */
247 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
249 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
251 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
256 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
258 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
267 thus double-word aligned.
269 Free chunks are stored in circular doubly-linked lists, and look like this:
271 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
273 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
275 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
277 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
279 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
283 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
285 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
287 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
288 chunk size (which is always a multiple of two words), is an in-use
293 preventing access to non-existent (or non-owned) memory.)
308 2. Chunks allocated via mmap, which have the second-lowest-order
326 the same-sized chunks, but facilitates best-fit allocation for
336 * `top': The top-most available chunk (i.e., the one bordering the
343 most recently split (non-top) chunk. This bin is checked
344 before other non-fitting chunks, so as to provide better
357 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
363 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
399 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
404 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
421 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
425 #define prev_inuse(p) ((p)->size & PREV_INUSE)
429 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
434 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
437 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
442 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
445 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
448 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
459 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
463 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
467 #define set_head(p, s) ((p)->size = (s))
471 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
481 heads of (initially empty) doubly-linked lists of chunks, laid out
517 #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
519 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
571 *p = (mbinptr)((ulong)*p + gd->reloc_off); in malloc_bin_reloc()
591 memset((void *)new, 0, -increment); in sbrk()
607 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start, in mem_malloc_init()
615 /* field-extraction macros */
617 #define first(b) ((b)->fd)
618 #define last(b) ((b)->bk)
647 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
652 To help compensate for the large number of bins, a one-level index
653 structure is used for bin-by-bin searching. `binblocks' is a
654 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
655 have any (possibly) non-empty bins, so they can be skipped over
666 /* bin<->block macros */
678 /* variables holding tunable values */
686 static char* sbrk_base = (char*)(-1);
734 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
755 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
769 assert(next->prev_size == sz);
775 assert(p->fd->bk == p);
776 assert(p->bk->fd == p);
820 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
821 long room = sz - s;
855 Macro-based internal utilities
877 FD = BK->fd; \
878 P->bk = BK; \
879 P->fd = FD; \
880 FD->bk = BK->fd = P; \
886 FD = BK->fd; \
890 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
891 BK = FD->bk; \
893 P->bk = BK; \
894 P->fd = FD; \
895 FD->bk = BK->fd = P; \
904 BK = P->bk; \
905 FD = P->fd; \
906 FD->bk = BK; \
907 BK->fd = FD; \
914 last_remainder->fd = last_remainder->bk = P; \
915 P->fd = P->bk = last_remainder; \
921 (last_remainder->fd = last_remainder->bk = last_remainder)
937 size_t page_mask = malloc_getpagesize - 1;
941 static int fd = -1;
953 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
963 if(p == (mchunkptr)-1) return 0;
968 /* We demand that eight bytes into a page must be 8-byte aligned. */
975 p->prev_size = 0;
998 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1000 n_mmaps--;
1001 mmapped_mem -= (size + p->prev_size);
1003 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1005 /* munmap returns non-zero on failure */
1017 size_t page_mask = malloc_getpagesize - 1;
1018 INTERNAL_SIZE_T offset = p->prev_size;
1025 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1030 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1032 if (cp == (char *)-1) return 0;
1038 assert((p->prev_size == offset));
1039 set_head(p, (new_size - offset)|IS_MMAPPED);
1041 mmapped_mem -= size + offset;
1058 Extend the top-most chunk by obtaining memory from system.
1087 if (sbrk_base != (char*)(-1))
1088 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1106 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1109 sbrked_mem += brk - (char*)old_end;
1115 correction = (MALLOC_ALIGNMENT) - front_misalign;
1123 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
1124 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
1133 top_size = new_brk - brk + correction;
1150 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1152 chunk_at_offset(old_top, old_top_size )->size =
1154 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1168 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1182 obtain 8-byte alignment and/or to obtain a size of at least
1195 whenever possible. This limited use of a first-fit style
1202 any remainder. This search is strictly by best-fit; i.e.,
1208 the best-fit search rule. In effect, `top' is treated as
1221 Memory is gathered from the system (in system page-sized
1231 chunk borders either a previously allocated and still in-use chunk,
1258 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1306 for (victim = last(bin); victim != bin; victim = victim->bk)
1309 remainder_size = victim_size - nb;
1313 --idx; /* adjust to rescan below after checking last remainder */
1330 /* Try to use the last split-off remainder */
1332 if ( (victim = last_remainder->fd) != last_remainder)
1335 remainder_size = victim_size - nb;
1337 if (remainder_size >= (long)MINSIZE) /* re-split */
1363 If there are any possibly nonempty big-enough blocks,
1375 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1395 for (victim = last(bin); victim != bin; victim = victim->bk)
1398 remainder_size = victim_size - nb;
1424 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1430 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1435 --startidx;
1458 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1470 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1526 /* free() is a no-op - all the memory will be freed on relocation */
1527 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1535 hd = p->size;
1557 prevsz = p->prev_size;
1558 p = chunk_at_offset(p, -((long) prevsz));
1576 prevsz = p->prev_size;
1577 p = chunk_at_offset(p, -((long) prevsz));
1580 if (p->fd == last_remainder) /* keep as last_remainder */
1590 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1620 chunk can be extended, it is, else a malloc-copy-free sequence is
1630 size argument of zero (re)allocates a minimum-sized chunk.
1636 The old unix realloc convention of allowing the last-free'd chunk
1686 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1688 panic("pre-reloc realloc() is not supported");
1706 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1711 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1736 set_head(top, (newsize - nb) | PREV_INUSE);
1776 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1778 set_head(top, (newsize - nb) | PREV_INUSE);
1792 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1804 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1827 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1835 if (newsize - nb >= MINSIZE) /* split off remainder */
1838 remainder_size = newsize - nb;
1868 8-byte alignment is guaranteed by normal malloc calls, so don't
1895 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1915 * The attempt to over-allocate (with a size large enough to guarantee the
1929 /* Aligned -> return it */
1938 extra = alignment - (((unsigned long)(m)) % alignment);
1946 extra2 = alignment - (((unsigned long)(m)) % alignment);
1973 next aligned spot -- we've allocated enough total room so that
1977 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
1978 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
1981 leadsize = brk - (char*)(p);
1982 newsize = chunksize(p) - leadsize;
1987 newp->prev_size = p->prev_size + leadsize;
2006 remainder_size = chunksize(p) - nb;
2052 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2089 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
2109 /* clear only the bytes from non-freshly-sbrked memory */
2115 MALLOC_ZERO(mem, csz - SIZE_SZ);
2145 memory to potentially reduce the system-level memory requirements
2154 structures will be left (one page or less). Non-zero arguments
2156 future expected allocations without having to re-obtain memory
2169 long top_size; /* Amount of top-most memory */
2171 char* current_brk; /* address returned by pre-check sbrk call */
2177 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2191 new_brk = (char*)(MORECORE (-extra));
2197 top_size = current_brk - (char*)top;
2200 sbrked_mem = current_brk - sbrk_base;
2210 set_head(top, (top_size - extra) | PREV_INUSE);
2211 sbrked_mem -= extra;
2248 return chunksize(p) - SIZE_SZ;
2250 return chunksize(p) - 2*SIZE_SZ;
2275 for (p = last(b); p != b; p = p->bk) in malloc_update_mallinfo()
2290 current_mallinfo.uordblks = sbrked_mem - avail; in malloc_update_mallinfo()
2351 mallopt is the general SVID/XPG interface to tunable parameters.
2352 The format is to provide a (parameter-number, parameter-value) pair.
2357 See descriptions of tunable parameters above.
2390 assert(gd->malloc_base); /* Set up by crt0.S */ in initf_malloc()
2391 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN); in initf_malloc()
2392 gd->malloc_ptr = 0; in initf_malloc()
2409 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2413 usage of 'assert' in non-WIN32 code
2419 * Fixed ordering problem with boundary-stamping
2433 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2436 * Use ordered bins instead of best-fit threshhold
2437 * Eliminate block-local decls to simplify tracing and debugging.
2439 * Fix error occuring when initial sbrk_base not word-aligned.
2446 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2452 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2459 * Use best fit for very large chunks to prevent some worst-cases.
2468 (wmglo@Dent.MED.Uni-Muenchen.DE).
2474 * malloc: swap order of clean-bin strategy;
2487 * Add stuff to allow compilation on non-ANSI compilers
2495 * tested on sparc, hp-700, dec-mips, rs6000
2500 * Based loosely on libg++-1.2X malloc. (It retains some of the overall