xref: /openbmc/u-boot/common/dlmalloc.src (revision 472d5460)
140c85557Swdenk/* ---------- To make a malloc.h, start cutting here ------------ */
240c85557Swdenk
340c85557Swdenk/*
440c85557Swdenk  A version of malloc/free/realloc written by Doug Lea and released to the
540c85557Swdenk  public domain.  Send questions/comments/complaints/performance data
640c85557Swdenk  to dl@cs.oswego.edu
740c85557Swdenk
840c85557Swdenk* VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
940c85557Swdenk
1040c85557Swdenk   Note: There may be an updated version of this malloc obtainable at
1140c85557Swdenk	   ftp://g.oswego.edu/pub/misc/malloc.c
1240c85557Swdenk	 Check before installing!
1340c85557Swdenk
1440c85557Swdenk* Why use this malloc?
1540c85557Swdenk
1640c85557Swdenk  This is not the fastest, most space-conserving, most portable, or
1740c85557Swdenk  most tunable malloc ever written. However it is among the fastest
1840c85557Swdenk  while also being among the most space-conserving, portable and tunable.
1940c85557Swdenk  Consistent balance across these factors results in a good general-purpose
2040c85557Swdenk  allocator. For a high-level description, see
2140c85557Swdenk     http://g.oswego.edu/dl/html/malloc.html
2240c85557Swdenk
2340c85557Swdenk* Synopsis of public routines
2440c85557Swdenk
2540c85557Swdenk  (Much fuller descriptions are contained in the program documentation below.)
2640c85557Swdenk
2740c85557Swdenk  malloc(size_t n);
2840c85557Swdenk     Return a pointer to a newly allocated chunk of at least n bytes, or null
2940c85557Swdenk     if no space is available.
3040c85557Swdenk  free(Void_t* p);
3140c85557Swdenk     Release the chunk of memory pointed to by p, or no effect if p is null.
3240c85557Swdenk  realloc(Void_t* p, size_t n);
3340c85557Swdenk     Return a pointer to a chunk of size n that contains the same data
3440c85557Swdenk     as does chunk p up to the minimum of (n, p's size) bytes, or null
3540c85557Swdenk     if no space is available. The returned pointer may or may not be
3640c85557Swdenk     the same as p. If p is null, equivalent to malloc.  Unless the
3740c85557Swdenk     #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
3840c85557Swdenk     size argument of zero (re)allocates a minimum-sized chunk.
3940c85557Swdenk  memalign(size_t alignment, size_t n);
4040c85557Swdenk     Return a pointer to a newly allocated chunk of n bytes, aligned
4140c85557Swdenk     in accord with the alignment argument, which must be a power of
4240c85557Swdenk     two.
4340c85557Swdenk  valloc(size_t n);
4440c85557Swdenk     Equivalent to memalign(pagesize, n), where pagesize is the page
4540c85557Swdenk     size of the system (or as near to this as can be figured out from
4640c85557Swdenk     all the includes/defines below.)
4740c85557Swdenk  pvalloc(size_t n);
4840c85557Swdenk     Equivalent to valloc(minimum-page-that-holds(n)), that is,
4940c85557Swdenk     round up n to nearest pagesize.
5040c85557Swdenk  calloc(size_t unit, size_t quantity);
5140c85557Swdenk     Returns a pointer to quantity * unit bytes, with all locations
5240c85557Swdenk     set to zero.
5340c85557Swdenk  cfree(Void_t* p);
5440c85557Swdenk     Equivalent to free(p).
5540c85557Swdenk  malloc_trim(size_t pad);
5640c85557Swdenk     Release all but pad bytes of freed top-most memory back
5740c85557Swdenk     to the system. Return 1 if successful, else 0.
5840c85557Swdenk  malloc_usable_size(Void_t* p);
5940c85557Swdenk     Report the number usable allocated bytes associated with allocated
6040c85557Swdenk     chunk p. This may or may not report more bytes than were requested,
6140c85557Swdenk     due to alignment and minimum size constraints.
6240c85557Swdenk  malloc_stats();
6340c85557Swdenk     Prints brief summary statistics on stderr.
6440c85557Swdenk  mallinfo()
6540c85557Swdenk     Returns (by copy) a struct containing various summary statistics.
6640c85557Swdenk  mallopt(int parameter_number, int parameter_value)
6740c85557Swdenk     Changes one of the tunable parameters described below. Returns
6840c85557Swdenk     1 if successful in changing the parameter, else 0.
6940c85557Swdenk
7040c85557Swdenk* Vital statistics:
7140c85557Swdenk
7240c85557Swdenk  Alignment:                            8-byte
7340c85557Swdenk       8 byte alignment is currently hardwired into the design.  This
7440c85557Swdenk       seems to suffice for all current machines and C compilers.
7540c85557Swdenk
7640c85557Swdenk  Assumed pointer representation:       4 or 8 bytes
7740c85557Swdenk       Code for 8-byte pointers is untested by me but has worked
7840c85557Swdenk       reliably by Wolfram Gloger, who contributed most of the
7940c85557Swdenk       changes supporting this.
8040c85557Swdenk
8140c85557Swdenk  Assumed size_t  representation:       4 or 8 bytes
8240c85557Swdenk       Note that size_t is allowed to be 4 bytes even if pointers are 8.
8340c85557Swdenk
8440c85557Swdenk  Minimum overhead per allocated chunk: 4 or 8 bytes
8540c85557Swdenk       Each malloced chunk has a hidden overhead of 4 bytes holding size
8640c85557Swdenk       and status information.
8740c85557Swdenk
8840c85557Swdenk  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
8940c85557Swdenk			  8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
9040c85557Swdenk
9140c85557Swdenk       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
9240c85557Swdenk       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
9340c85557Swdenk       needed; 4 (8) for a trailing size field
9440c85557Swdenk       and 8 (16) bytes for free list pointers. Thus, the minimum
9540c85557Swdenk       allocatable size is 16/24/32 bytes.
9640c85557Swdenk
9740c85557Swdenk       Even a request for zero bytes (i.e., malloc(0)) returns a
9840c85557Swdenk       pointer to something of the minimum allocatable size.
9940c85557Swdenk
10040c85557Swdenk  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
10140c85557Swdenk			  8-byte size_t: 2^63 - 16 bytes
10240c85557Swdenk
10340c85557Swdenk       It is assumed that (possibly signed) size_t bit values suffice to
10440c85557Swdenk       represent chunk sizes. `Possibly signed' is due to the fact
10540c85557Swdenk       that `size_t' may be defined on a system as either a signed or
10640c85557Swdenk       an unsigned type. To be conservative, values that would appear
10740c85557Swdenk       as negative numbers are avoided.
10840c85557Swdenk       Requests for sizes with a negative sign bit when the request
10940c85557Swdenk       size is treaded as a long will return null.
11040c85557Swdenk
11140c85557Swdenk  Maximum overhead wastage per allocated chunk: normally 15 bytes
11240c85557Swdenk
11340c85557Swdenk       Alignnment demands, plus the minimum allocatable size restriction
11440c85557Swdenk       make the normal worst-case wastage 15 bytes (i.e., up to 15
11540c85557Swdenk       more bytes will be allocated than were requested in malloc), with
11640c85557Swdenk       two exceptions:
11740c85557Swdenk	 1. Because requests for zero bytes allocate non-zero space,
11840c85557Swdenk	    the worst case wastage for a request of zero bytes is 24 bytes.
11940c85557Swdenk	 2. For requests >= mmap_threshold that are serviced via
12040c85557Swdenk	    mmap(), the worst case wastage is 8 bytes plus the remainder
12140c85557Swdenk	    from a system page (the minimal mmap unit); typically 4096 bytes.
12240c85557Swdenk
12340c85557Swdenk* Limitations
12440c85557Swdenk
12540c85557Swdenk    Here are some features that are NOT currently supported
12640c85557Swdenk
12740c85557Swdenk    * No user-definable hooks for callbacks and the like.
12840c85557Swdenk    * No automated mechanism for fully checking that all accesses
12940c85557Swdenk      to malloced memory stay within their bounds.
13040c85557Swdenk    * No support for compaction.
13140c85557Swdenk
13240c85557Swdenk* Synopsis of compile-time options:
13340c85557Swdenk
13440c85557Swdenk    People have reported using previous versions of this malloc on all
13540c85557Swdenk    versions of Unix, sometimes by tweaking some of the defines
13640c85557Swdenk    below. It has been tested most extensively on Solaris and
13740c85557Swdenk    Linux. It is also reported to work on WIN32 platforms.
13840c85557Swdenk    People have also reported adapting this malloc for use in
13940c85557Swdenk    stand-alone embedded systems.
14040c85557Swdenk
14140c85557Swdenk    The implementation is in straight, hand-tuned ANSI C.  Among other
14240c85557Swdenk    consequences, it uses a lot of macros.  Because of this, to be at
14340c85557Swdenk    all usable, this code should be compiled using an optimizing compiler
14440c85557Swdenk    (for example gcc -O2) that can simplify expressions and control
14540c85557Swdenk    paths.
14640c85557Swdenk
14740c85557Swdenk  __STD_C                  (default: derived from C compiler defines)
14840c85557Swdenk     Nonzero if using ANSI-standard C compiler, a C++ compiler, or
14940c85557Swdenk     a C compiler sufficiently close to ANSI to get away with it.
15040c85557Swdenk  DEBUG                    (default: NOT defined)
15140c85557Swdenk     Define to enable debugging. Adds fairly extensive assertion-based
15240c85557Swdenk     checking to help track down memory errors, but noticeably slows down
15340c85557Swdenk     execution.
15440c85557Swdenk  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
15540c85557Swdenk     Define this if you think that realloc(p, 0) should be equivalent
15640c85557Swdenk     to free(p). Otherwise, since malloc returns a unique pointer for
15740c85557Swdenk     malloc(0), so does realloc(p, 0).
15840c85557Swdenk  HAVE_MEMCPY               (default: defined)
15940c85557Swdenk     Define if you are not otherwise using ANSI STD C, but still
16040c85557Swdenk     have memcpy and memset in your C library and want to use them.
16140c85557Swdenk     Otherwise, simple internal versions are supplied.
16240c85557Swdenk  USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
16340c85557Swdenk     Define as 1 if you want the C library versions of memset and
16440c85557Swdenk     memcpy called in realloc and calloc (otherwise macro versions are used).
16540c85557Swdenk     At least on some platforms, the simple macro versions usually
16640c85557Swdenk     outperform libc versions.
16740c85557Swdenk  HAVE_MMAP                 (default: defined as 1)
16840c85557Swdenk     Define to non-zero to optionally make malloc() use mmap() to
16940c85557Swdenk     allocate very large blocks.
17040c85557Swdenk  HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
17140c85557Swdenk     Define to non-zero to optionally make realloc() use mremap() to
17240c85557Swdenk     reallocate very large blocks.
17340c85557Swdenk  malloc_getpagesize        (default: derived from system #includes)
17440c85557Swdenk     Either a constant or routine call returning the system page size.
17540c85557Swdenk  HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
17640c85557Swdenk     Optionally define if you are on a system with a /usr/include/malloc.h
17740c85557Swdenk     that declares struct mallinfo. It is not at all necessary to
17840c85557Swdenk     define this even if you do, but will ensure consistency.
17940c85557Swdenk  INTERNAL_SIZE_T           (default: size_t)
18040c85557Swdenk     Define to a 32-bit type (probably `unsigned int') if you are on a
18140c85557Swdenk     64-bit machine, yet do not want or need to allow malloc requests of
18240c85557Swdenk     greater than 2^31 to be handled. This saves space, especially for
18340c85557Swdenk     very small chunks.
18440c85557Swdenk  INTERNAL_LINUX_C_LIB      (default: NOT defined)
18540c85557Swdenk     Defined only when compiled as part of Linux libc.
18640c85557Swdenk     Also note that there is some odd internal name-mangling via defines
18740c85557Swdenk     (for example, internally, `malloc' is named `mALLOc') needed
18840c85557Swdenk     when compiling in this case. These look funny but don't otherwise
18940c85557Swdenk     affect anything.
19040c85557Swdenk  WIN32                     (default: undefined)
19140c85557Swdenk     Define this on MS win (95, nt) platforms to compile in sbrk emulation.
19240c85557Swdenk  LACKS_UNISTD_H            (default: undefined if not WIN32)
19340c85557Swdenk     Define this if your system does not have a <unistd.h>.
19440c85557Swdenk  LACKS_SYS_PARAM_H         (default: undefined if not WIN32)
19540c85557Swdenk     Define this if your system does not have a <sys/param.h>.
19640c85557Swdenk  MORECORE                  (default: sbrk)
19740c85557Swdenk     The name of the routine to call to obtain more memory from the system.
19840c85557Swdenk  MORECORE_FAILURE          (default: -1)
19940c85557Swdenk     The value returned upon failure of MORECORE.
20040c85557Swdenk  MORECORE_CLEARS           (default 1)
201*472d5460SYork Sun     true (1) if the routine mapped to MORECORE zeroes out memory (which
20240c85557Swdenk     holds for sbrk).
20340c85557Swdenk  DEFAULT_TRIM_THRESHOLD
20440c85557Swdenk  DEFAULT_TOP_PAD
20540c85557Swdenk  DEFAULT_MMAP_THRESHOLD
20640c85557Swdenk  DEFAULT_MMAP_MAX
20740c85557Swdenk     Default values of tunable parameters (described in detail below)
20840c85557Swdenk     controlling interaction with host system routines (sbrk, mmap, etc).
20940c85557Swdenk     These values may also be changed dynamically via mallopt(). The
21040c85557Swdenk     preset defaults are those that give best performance for typical
21140c85557Swdenk     programs/systems.
21240c85557Swdenk  USE_DL_PREFIX             (default: undefined)
21340c85557Swdenk     Prefix all public routines with the string 'dl'.  Useful to
21440c85557Swdenk     quickly avoid procedure declaration conflicts and linker symbol
21540c85557Swdenk     conflicts with existing memory allocation routines.
21640c85557Swdenk
21740c85557Swdenk
21840c85557Swdenk*/
21940c85557Swdenk
22040c85557Swdenk
22140c85557Swdenk
22240c85557Swdenk
22340c85557Swdenk/* Preliminaries */
22440c85557Swdenk
22540c85557Swdenk#ifndef __STD_C
22640c85557Swdenk#ifdef __STDC__
22740c85557Swdenk#define __STD_C     1
22840c85557Swdenk#else
22940c85557Swdenk#if __cplusplus
23040c85557Swdenk#define __STD_C     1
23140c85557Swdenk#else
23240c85557Swdenk#define __STD_C     0
23340c85557Swdenk#endif /*__cplusplus*/
23440c85557Swdenk#endif /*__STDC__*/
23540c85557Swdenk#endif /*__STD_C*/
23640c85557Swdenk
23740c85557Swdenk#ifndef Void_t
23840c85557Swdenk#if (__STD_C || defined(WIN32))
23940c85557Swdenk#define Void_t      void
24040c85557Swdenk#else
24140c85557Swdenk#define Void_t      char
24240c85557Swdenk#endif
24340c85557Swdenk#endif /*Void_t*/
24440c85557Swdenk
24540c85557Swdenk#if __STD_C
24640c85557Swdenk#include <stddef.h>   /* for size_t */
24740c85557Swdenk#else
24840c85557Swdenk#include <sys/types.h>
24940c85557Swdenk#endif
25040c85557Swdenk
25140c85557Swdenk#ifdef __cplusplus
25240c85557Swdenkextern "C" {
25340c85557Swdenk#endif
25440c85557Swdenk
25540c85557Swdenk#include <stdio.h>    /* needed for malloc_stats */
25640c85557Swdenk
25740c85557Swdenk
25840c85557Swdenk/*
25940c85557Swdenk  Compile-time options
26040c85557Swdenk*/
26140c85557Swdenk
26240c85557Swdenk
26340c85557Swdenk/*
26440c85557Swdenk    Debugging:
26540c85557Swdenk
26640c85557Swdenk    Because freed chunks may be overwritten with link fields, this
26740c85557Swdenk    malloc will often die when freed memory is overwritten by user
26840c85557Swdenk    programs.  This can be very effective (albeit in an annoying way)
26940c85557Swdenk    in helping track down dangling pointers.
27040c85557Swdenk
27140c85557Swdenk    If you compile with -DDEBUG, a number of assertion checks are
27240c85557Swdenk    enabled that will catch more memory errors. You probably won't be
27340c85557Swdenk    able to make much sense of the actual assertion errors, but they
27440c85557Swdenk    should help you locate incorrectly overwritten memory.  The
27540c85557Swdenk    checking is fairly extensive, and will slow down execution
27640c85557Swdenk    noticeably. Calling malloc_stats or mallinfo with DEBUG set will
27740c85557Swdenk    attempt to check every non-mmapped allocated and free chunk in the
27840c85557Swdenk    course of computing the summmaries. (By nature, mmapped regions
27940c85557Swdenk    cannot be checked very much automatically.)
28040c85557Swdenk
28140c85557Swdenk    Setting DEBUG may also be helpful if you are trying to modify
28240c85557Swdenk    this code. The assertions in the check routines spell out in more
28340c85557Swdenk    detail the assumptions and invariants underlying the algorithms.
28440c85557Swdenk
28540c85557Swdenk*/
28640c85557Swdenk
28740c85557Swdenk#if DEBUG
28840c85557Swdenk#include <assert.h>
28940c85557Swdenk#else
29040c85557Swdenk#define assert(x) ((void)0)
29140c85557Swdenk#endif
29240c85557Swdenk
29340c85557Swdenk
29440c85557Swdenk/*
29540c85557Swdenk  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
29640c85557Swdenk  of chunk sizes. On a 64-bit machine, you can reduce malloc
29740c85557Swdenk  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
29840c85557Swdenk  at the expense of not being able to handle requests greater than
29940c85557Swdenk  2^31. This limitation is hardly ever a concern; you are encouraged
30040c85557Swdenk  to set this. However, the default version is the same as size_t.
30140c85557Swdenk*/
30240c85557Swdenk
30340c85557Swdenk#ifndef INTERNAL_SIZE_T
30440c85557Swdenk#define INTERNAL_SIZE_T size_t
30540c85557Swdenk#endif
30640c85557Swdenk
30740c85557Swdenk/*
30840c85557Swdenk  REALLOC_ZERO_BYTES_FREES should be set if a call to
30940c85557Swdenk  realloc with zero bytes should be the same as a call to free.
31040c85557Swdenk  Some people think it should. Otherwise, since this malloc
31140c85557Swdenk  returns a unique pointer for malloc(0), so does realloc(p, 0).
31240c85557Swdenk*/
31340c85557Swdenk
31440c85557Swdenk
31540c85557Swdenk/*   #define REALLOC_ZERO_BYTES_FREES */
31640c85557Swdenk
31740c85557Swdenk
31840c85557Swdenk/*
31940c85557Swdenk  WIN32 causes an emulation of sbrk to be compiled in
32040c85557Swdenk  mmap-based options are not currently supported in WIN32.
32140c85557Swdenk*/
32240c85557Swdenk
32340c85557Swdenk/* #define WIN32 */
32440c85557Swdenk#ifdef WIN32
32540c85557Swdenk#define MORECORE wsbrk
32640c85557Swdenk#define HAVE_MMAP 0
32740c85557Swdenk
32840c85557Swdenk#define LACKS_UNISTD_H
32940c85557Swdenk#define LACKS_SYS_PARAM_H
33040c85557Swdenk
33140c85557Swdenk/*
33240c85557Swdenk  Include 'windows.h' to get the necessary declarations for the
33340c85557Swdenk  Microsoft Visual C++ data structures and routines used in the 'sbrk'
33440c85557Swdenk  emulation.
33540c85557Swdenk
33640c85557Swdenk  Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft
33740c85557Swdenk  Visual C++ header files are included.
33840c85557Swdenk*/
33940c85557Swdenk#define WIN32_LEAN_AND_MEAN
34040c85557Swdenk#include <windows.h>
34140c85557Swdenk#endif
34240c85557Swdenk
34340c85557Swdenk
34440c85557Swdenk/*
34540c85557Swdenk  HAVE_MEMCPY should be defined if you are not otherwise using
34640c85557Swdenk  ANSI STD C, but still have memcpy and memset in your C library
34740c85557Swdenk  and want to use them in calloc and realloc. Otherwise simple
34840c85557Swdenk  macro versions are defined here.
34940c85557Swdenk
35040c85557Swdenk  USE_MEMCPY should be defined as 1 if you actually want to
35140c85557Swdenk  have memset and memcpy called. People report that the macro
35240c85557Swdenk  versions are often enough faster than libc versions on many
35340c85557Swdenk  systems that it is better to use them.
35440c85557Swdenk
35540c85557Swdenk*/
35640c85557Swdenk
35740c85557Swdenk#define HAVE_MEMCPY
35840c85557Swdenk
35940c85557Swdenk#ifndef USE_MEMCPY
36040c85557Swdenk#ifdef HAVE_MEMCPY
36140c85557Swdenk#define USE_MEMCPY 1
36240c85557Swdenk#else
36340c85557Swdenk#define USE_MEMCPY 0
36440c85557Swdenk#endif
36540c85557Swdenk#endif
36640c85557Swdenk
36740c85557Swdenk#if (__STD_C || defined(HAVE_MEMCPY))
36840c85557Swdenk
36940c85557Swdenk#if __STD_C
37040c85557Swdenkvoid* memset(void*, int, size_t);
37140c85557Swdenkvoid* memcpy(void*, const void*, size_t);
37240c85557Swdenk#else
37340c85557Swdenk#ifdef WIN32
3748bde7f77Swdenk/* On Win32 platforms, 'memset()' and 'memcpy()' are already declared in */
3758bde7f77Swdenk/* 'windows.h' */
37640c85557Swdenk#else
37740c85557SwdenkVoid_t* memset();
37840c85557SwdenkVoid_t* memcpy();
37940c85557Swdenk#endif
38040c85557Swdenk#endif
38140c85557Swdenk#endif
38240c85557Swdenk
38340c85557Swdenk#if USE_MEMCPY
38440c85557Swdenk
38540c85557Swdenk/* The following macros are only invoked with (2n+1)-multiples of
38640c85557Swdenk   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
38740c85557Swdenk   for fast inline execution when n is small. */
38840c85557Swdenk
38940c85557Swdenk#define MALLOC_ZERO(charp, nbytes)                                            \
39040c85557Swdenkdo {                                                                          \
39140c85557Swdenk  INTERNAL_SIZE_T mzsz = (nbytes);                                            \
39240c85557Swdenk  if(mzsz <= 9*sizeof(mzsz)) {                                                \
39340c85557Swdenk    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
39440c85557Swdenk    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
39540c85557Swdenk				     *mz++ = 0;                               \
39640c85557Swdenk      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
39740c85557Swdenk				     *mz++ = 0;                               \
39840c85557Swdenk	if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
39940c85557Swdenk				     *mz++ = 0; }}}                           \
40040c85557Swdenk				     *mz++ = 0;                               \
40140c85557Swdenk				     *mz++ = 0;                               \
40240c85557Swdenk				     *mz   = 0;                               \
40340c85557Swdenk  } else memset((charp), 0, mzsz);                                            \
40440c85557Swdenk} while(0)
40540c85557Swdenk
40640c85557Swdenk#define MALLOC_COPY(dest,src,nbytes)                                          \
40740c85557Swdenkdo {                                                                          \
40840c85557Swdenk  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
40940c85557Swdenk  if(mcsz <= 9*sizeof(mcsz)) {                                                \
41040c85557Swdenk    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
41140c85557Swdenk    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
41240c85557Swdenk    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
41340c85557Swdenk				     *mcdst++ = *mcsrc++;                     \
41440c85557Swdenk      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
41540c85557Swdenk				     *mcdst++ = *mcsrc++;                     \
41640c85557Swdenk	if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
41740c85557Swdenk				     *mcdst++ = *mcsrc++; }}}                 \
41840c85557Swdenk				     *mcdst++ = *mcsrc++;                     \
41940c85557Swdenk				     *mcdst++ = *mcsrc++;                     \
42040c85557Swdenk				     *mcdst   = *mcsrc  ;                     \
42140c85557Swdenk  } else memcpy(dest, src, mcsz);                                             \
42240c85557Swdenk} while(0)
42340c85557Swdenk
42440c85557Swdenk#else /* !USE_MEMCPY */
42540c85557Swdenk
42640c85557Swdenk/* Use Duff's device for good zeroing/copying performance. */
42740c85557Swdenk
42840c85557Swdenk#define MALLOC_ZERO(charp, nbytes)                                            \
42940c85557Swdenkdo {                                                                          \
43040c85557Swdenk  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
43140c85557Swdenk  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
43240c85557Swdenk  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
43340c85557Swdenk  switch (mctmp) {                                                            \
43440c85557Swdenk    case 0: for(;;) { *mzp++ = 0;                                             \
43540c85557Swdenk    case 7:           *mzp++ = 0;                                             \
43640c85557Swdenk    case 6:           *mzp++ = 0;                                             \
43740c85557Swdenk    case 5:           *mzp++ = 0;                                             \
43840c85557Swdenk    case 4:           *mzp++ = 0;                                             \
43940c85557Swdenk    case 3:           *mzp++ = 0;                                             \
44040c85557Swdenk    case 2:           *mzp++ = 0;                                             \
44140c85557Swdenk    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
44240c85557Swdenk  }                                                                           \
44340c85557Swdenk} while(0)
44440c85557Swdenk
44540c85557Swdenk#define MALLOC_COPY(dest,src,nbytes)                                          \
44640c85557Swdenkdo {                                                                          \
44740c85557Swdenk  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
44840c85557Swdenk  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
44940c85557Swdenk  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
45040c85557Swdenk  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
45140c85557Swdenk  switch (mctmp) {                                                            \
45240c85557Swdenk    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
45340c85557Swdenk    case 7:           *mcdst++ = *mcsrc++;                                    \
45440c85557Swdenk    case 6:           *mcdst++ = *mcsrc++;                                    \
45540c85557Swdenk    case 5:           *mcdst++ = *mcsrc++;                                    \
45640c85557Swdenk    case 4:           *mcdst++ = *mcsrc++;                                    \
45740c85557Swdenk    case 3:           *mcdst++ = *mcsrc++;                                    \
45840c85557Swdenk    case 2:           *mcdst++ = *mcsrc++;                                    \
45940c85557Swdenk    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
46040c85557Swdenk  }                                                                           \
46140c85557Swdenk} while(0)
46240c85557Swdenk
46340c85557Swdenk#endif
46440c85557Swdenk
46540c85557Swdenk
46640c85557Swdenk/*
46740c85557Swdenk  Define HAVE_MMAP to optionally make malloc() use mmap() to
46840c85557Swdenk  allocate very large blocks.  These will be returned to the
46940c85557Swdenk  operating system immediately after a free().
47040c85557Swdenk*/
47140c85557Swdenk
47240c85557Swdenk#ifndef HAVE_MMAP
47340c85557Swdenk#define HAVE_MMAP 1
47440c85557Swdenk#endif
47540c85557Swdenk
47640c85557Swdenk/*
47740c85557Swdenk  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
47840c85557Swdenk  large blocks.  This is currently only possible on Linux with
47940c85557Swdenk  kernel versions newer than 1.3.77.
48040c85557Swdenk*/
48140c85557Swdenk
48240c85557Swdenk#ifndef HAVE_MREMAP
48340c85557Swdenk#ifdef INTERNAL_LINUX_C_LIB
48440c85557Swdenk#define HAVE_MREMAP 1
48540c85557Swdenk#else
48640c85557Swdenk#define HAVE_MREMAP 0
48740c85557Swdenk#endif
48840c85557Swdenk#endif
48940c85557Swdenk
49040c85557Swdenk#if HAVE_MMAP
49140c85557Swdenk
49240c85557Swdenk#include <unistd.h>
49340c85557Swdenk#include <fcntl.h>
49440c85557Swdenk#include <sys/mman.h>
49540c85557Swdenk
49640c85557Swdenk#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
49740c85557Swdenk#define MAP_ANONYMOUS MAP_ANON
49840c85557Swdenk#endif
49940c85557Swdenk
50040c85557Swdenk#endif /* HAVE_MMAP */
50140c85557Swdenk
50240c85557Swdenk/*
50340c85557Swdenk  Access to system page size. To the extent possible, this malloc
50440c85557Swdenk  manages memory from the system in page-size units.
50540c85557Swdenk
50640c85557Swdenk  The following mechanics for getpagesize were adapted from
50740c85557Swdenk  bsd/gnu getpagesize.h
50840c85557Swdenk*/
50940c85557Swdenk
51040c85557Swdenk#ifndef LACKS_UNISTD_H
51140c85557Swdenk#  include <unistd.h>
51240c85557Swdenk#endif
51340c85557Swdenk
51440c85557Swdenk#ifndef malloc_getpagesize
51540c85557Swdenk#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
51640c85557Swdenk#    ifndef _SC_PAGE_SIZE
51740c85557Swdenk#      define _SC_PAGE_SIZE _SC_PAGESIZE
51840c85557Swdenk#    endif
51940c85557Swdenk#  endif
52040c85557Swdenk#  ifdef _SC_PAGE_SIZE
52140c85557Swdenk#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
52240c85557Swdenk#  else
52340c85557Swdenk#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
52440c85557Swdenk       extern size_t getpagesize();
52540c85557Swdenk#      define malloc_getpagesize getpagesize()
52640c85557Swdenk#    else
52740c85557Swdenk#      ifdef WIN32
52840c85557Swdenk#        define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */
52940c85557Swdenk#      else
53040c85557Swdenk#        ifndef LACKS_SYS_PARAM_H
53140c85557Swdenk#          include <sys/param.h>
53240c85557Swdenk#        endif
53340c85557Swdenk#        ifdef EXEC_PAGESIZE
53440c85557Swdenk#          define malloc_getpagesize EXEC_PAGESIZE
53540c85557Swdenk#        else
53640c85557Swdenk#          ifdef NBPG
53740c85557Swdenk#            ifndef CLSIZE
53840c85557Swdenk#              define malloc_getpagesize NBPG
53940c85557Swdenk#            else
54040c85557Swdenk#              define malloc_getpagesize (NBPG * CLSIZE)
54140c85557Swdenk#            endif
54240c85557Swdenk#          else
54340c85557Swdenk#            ifdef NBPC
54440c85557Swdenk#              define malloc_getpagesize NBPC
54540c85557Swdenk#            else
54640c85557Swdenk#              ifdef PAGESIZE
54740c85557Swdenk#                define malloc_getpagesize PAGESIZE
54840c85557Swdenk#              else
54940c85557Swdenk#                define malloc_getpagesize (4096) /* just guess */
55040c85557Swdenk#              endif
55140c85557Swdenk#            endif
55240c85557Swdenk#          endif
55340c85557Swdenk#        endif
55440c85557Swdenk#      endif
55540c85557Swdenk#    endif
55640c85557Swdenk#  endif
55740c85557Swdenk#endif
55840c85557Swdenk
55940c85557Swdenk
56040c85557Swdenk/*
56140c85557Swdenk
56240c85557Swdenk  This version of malloc supports the standard SVID/XPG mallinfo
56340c85557Swdenk  routine that returns a struct containing the same kind of
56440c85557Swdenk  information you can get from malloc_stats. It should work on
56540c85557Swdenk  any SVID/XPG compliant system that has a /usr/include/malloc.h
56640c85557Swdenk  defining struct mallinfo. (If you'd like to install such a thing
56740c85557Swdenk  yourself, cut out the preliminary declarations as described above
56840c85557Swdenk  and below and save them in a malloc.h file. But there's no
56940c85557Swdenk  compelling reason to bother to do this.)
57040c85557Swdenk
57140c85557Swdenk  The main declaration needed is the mallinfo struct that is returned
57240c85557Swdenk  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
57340c85557Swdenk  bunch of fields, most of which are not even meaningful in this
57440c85557Swdenk  version of malloc. Some of these fields are are instead filled by
57540c85557Swdenk  mallinfo() with other numbers that might possibly be of interest.
57640c85557Swdenk
57740c85557Swdenk  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
57840c85557Swdenk  /usr/include/malloc.h file that includes a declaration of struct
57940c85557Swdenk  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
58040c85557Swdenk  version is declared below.  These must be precisely the same for
58140c85557Swdenk  mallinfo() to work.
58240c85557Swdenk
58340c85557Swdenk*/
58440c85557Swdenk
58540c85557Swdenk/* #define HAVE_USR_INCLUDE_MALLOC_H */
58640c85557Swdenk
58740c85557Swdenk#if HAVE_USR_INCLUDE_MALLOC_H
58840c85557Swdenk#include "/usr/include/malloc.h"
58940c85557Swdenk#else
59040c85557Swdenk
59140c85557Swdenk/* SVID2/XPG mallinfo structure */
59240c85557Swdenk
59340c85557Swdenkstruct mallinfo {
59440c85557Swdenk  int arena;    /* total space allocated from system */
59540c85557Swdenk  int ordblks;  /* number of non-inuse chunks */
59640c85557Swdenk  int smblks;   /* unused -- always zero */
59740c85557Swdenk  int hblks;    /* number of mmapped regions */
59840c85557Swdenk  int hblkhd;   /* total space in mmapped regions */
59940c85557Swdenk  int usmblks;  /* unused -- always zero */
60040c85557Swdenk  int fsmblks;  /* unused -- always zero */
60140c85557Swdenk  int uordblks; /* total allocated space */
60240c85557Swdenk  int fordblks; /* total non-inuse space */
60340c85557Swdenk  int keepcost; /* top-most, releasable (via malloc_trim) space */
60440c85557Swdenk};
60540c85557Swdenk
60640c85557Swdenk/* SVID2/XPG mallopt options */
60740c85557Swdenk
60840c85557Swdenk#define M_MXFAST  1    /* UNUSED in this malloc */
60940c85557Swdenk#define M_NLBLKS  2    /* UNUSED in this malloc */
61040c85557Swdenk#define M_GRAIN   3    /* UNUSED in this malloc */
61140c85557Swdenk#define M_KEEP    4    /* UNUSED in this malloc */
61240c85557Swdenk
61340c85557Swdenk#endif
61440c85557Swdenk
61540c85557Swdenk/* mallopt options that actually do something */
61640c85557Swdenk
61740c85557Swdenk#define M_TRIM_THRESHOLD    -1
61840c85557Swdenk#define M_TOP_PAD           -2
61940c85557Swdenk#define M_MMAP_THRESHOLD    -3
62040c85557Swdenk#define M_MMAP_MAX          -4
62140c85557Swdenk
62240c85557Swdenk
62340c85557Swdenk#ifndef DEFAULT_TRIM_THRESHOLD
62440c85557Swdenk#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
62540c85557Swdenk#endif
62640c85557Swdenk
62740c85557Swdenk/*
62840c85557Swdenk    M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
62940c85557Swdenk      to keep before releasing via malloc_trim in free().
63040c85557Swdenk
63140c85557Swdenk      Automatic trimming is mainly useful in long-lived programs.
63240c85557Swdenk      Because trimming via sbrk can be slow on some systems, and can
63340c85557Swdenk      sometimes be wasteful (in cases where programs immediately
63440c85557Swdenk      afterward allocate more large chunks) the value should be high
63540c85557Swdenk      enough so that your overall system performance would improve by
63640c85557Swdenk      releasing.
63740c85557Swdenk
63840c85557Swdenk      The trim threshold and the mmap control parameters (see below)
63940c85557Swdenk      can be traded off with one another. Trimming and mmapping are
64040c85557Swdenk      two different ways of releasing unused memory back to the
64140c85557Swdenk      system. Between these two, it is often possible to keep
64240c85557Swdenk      system-level demands of a long-lived program down to a bare
64340c85557Swdenk      minimum. For example, in one test suite of sessions measuring
64440c85557Swdenk      the XF86 X server on Linux, using a trim threshold of 128K and a
64540c85557Swdenk      mmap threshold of 192K led to near-minimal long term resource
64640c85557Swdenk      consumption.
64740c85557Swdenk
64840c85557Swdenk      If you are using this malloc in a long-lived program, it should
64940c85557Swdenk      pay to experiment with these values.  As a rough guide, you
65040c85557Swdenk      might set to a value close to the average size of a process
65140c85557Swdenk      (program) running on your system.  Releasing this much memory
65240c85557Swdenk      would allow such a process to run in memory.  Generally, it's
65340c85557Swdenk      worth it to tune for trimming rather tham memory mapping when a
65440c85557Swdenk      program undergoes phases where several large chunks are
65540c85557Swdenk      allocated and released in ways that can reuse each other's
65640c85557Swdenk      storage, perhaps mixed with phases where there are no such
65740c85557Swdenk      chunks at all.  And in well-behaved long-lived programs,
65840c85557Swdenk      controlling release of large blocks via trimming versus mapping
65940c85557Swdenk      is usually faster.
66040c85557Swdenk
66140c85557Swdenk      However, in most programs, these parameters serve mainly as
66240c85557Swdenk      protection against the system-level effects of carrying around
66340c85557Swdenk      massive amounts of unneeded memory. Since frequent calls to
66440c85557Swdenk      sbrk, mmap, and munmap otherwise degrade performance, the default
66540c85557Swdenk      parameters are set to relatively high values that serve only as
66640c85557Swdenk      safeguards.
66740c85557Swdenk
66840c85557Swdenk      The default trim value is high enough to cause trimming only in
66940c85557Swdenk      fairly extreme (by current memory consumption standards) cases.
67040c85557Swdenk      It must be greater than page size to have any useful effect.  To
67140c85557Swdenk      disable trimming completely, you can set to (unsigned long)(-1);
67240c85557Swdenk
67340c85557Swdenk
67440c85557Swdenk*/
67540c85557Swdenk
67640c85557Swdenk
67740c85557Swdenk#ifndef DEFAULT_TOP_PAD
67840c85557Swdenk#define DEFAULT_TOP_PAD        (0)
67940c85557Swdenk#endif
68040c85557Swdenk
68140c85557Swdenk/*
68240c85557Swdenk    M_TOP_PAD is the amount of extra `padding' space to allocate or
68340c85557Swdenk      retain whenever sbrk is called. It is used in two ways internally:
68440c85557Swdenk
68540c85557Swdenk      * When sbrk is called to extend the top of the arena to satisfy
68640c85557Swdenk	a new malloc request, this much padding is added to the sbrk
68740c85557Swdenk	request.
68840c85557Swdenk
68940c85557Swdenk      * When malloc_trim is called automatically from free(),
69040c85557Swdenk	it is used as the `pad' argument.
69140c85557Swdenk
69240c85557Swdenk      In both cases, the actual amount of padding is rounded
69340c85557Swdenk      so that the end of the arena is always a system page boundary.
69440c85557Swdenk
69540c85557Swdenk      The main reason for using padding is to avoid calling sbrk so
69640c85557Swdenk      often. Having even a small pad greatly reduces the likelihood
69740c85557Swdenk      that nearly every malloc request during program start-up (or
69840c85557Swdenk      after trimming) will invoke sbrk, which needlessly wastes
69940c85557Swdenk      time.
70040c85557Swdenk
70140c85557Swdenk      Automatic rounding-up to page-size units is normally sufficient
70240c85557Swdenk      to avoid measurable overhead, so the default is 0.  However, in
70340c85557Swdenk      systems where sbrk is relatively slow, it can pay to increase
70440c85557Swdenk      this value, at the expense of carrying around more memory than
70540c85557Swdenk      the program needs.
70640c85557Swdenk
70740c85557Swdenk*/
70840c85557Swdenk
70940c85557Swdenk
71040c85557Swdenk#ifndef DEFAULT_MMAP_THRESHOLD
71140c85557Swdenk#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
71240c85557Swdenk#endif
71340c85557Swdenk
71440c85557Swdenk/*
71540c85557Swdenk
71640c85557Swdenk    M_MMAP_THRESHOLD is the request size threshold for using mmap()
71740c85557Swdenk      to service a request. Requests of at least this size that cannot
71840c85557Swdenk      be allocated using already-existing space will be serviced via mmap.
71940c85557Swdenk      (If enough normal freed space already exists it is used instead.)
72040c85557Swdenk
72140c85557Swdenk      Using mmap segregates relatively large chunks of memory so that
72240c85557Swdenk      they can be individually obtained and released from the host
72340c85557Swdenk      system. A request serviced through mmap is never reused by any
72440c85557Swdenk      other request (at least not directly; the system may just so
72540c85557Swdenk      happen to remap successive requests to the same locations).
72640c85557Swdenk
72740c85557Swdenk      Segregating space in this way has the benefit that mmapped space
72840c85557Swdenk      can ALWAYS be individually released back to the system, which
72940c85557Swdenk      helps keep the system level memory demands of a long-lived
73040c85557Swdenk      program low. Mapped memory can never become `locked' between
73140c85557Swdenk      other chunks, as can happen with normally allocated chunks, which
73240c85557Swdenk      menas that even trimming via malloc_trim would not release them.
73340c85557Swdenk
73440c85557Swdenk      However, it has the disadvantages that:
73540c85557Swdenk
73640c85557Swdenk	 1. The space cannot be reclaimed, consolidated, and then
73740c85557Swdenk	    used to service later requests, as happens with normal chunks.
73840c85557Swdenk	 2. It can lead to more wastage because of mmap page alignment
73940c85557Swdenk	    requirements
74040c85557Swdenk	 3. It causes malloc performance to be more dependent on host
74140c85557Swdenk	    system memory management support routines which may vary in
74240c85557Swdenk	    implementation quality and may impose arbitrary
74340c85557Swdenk	    limitations. Generally, servicing a request via normal
74440c85557Swdenk	    malloc steps is faster than going through a system's mmap.
74540c85557Swdenk
74640c85557Swdenk      All together, these considerations should lead you to use mmap
74740c85557Swdenk      only for relatively large requests.
74840c85557Swdenk
74940c85557Swdenk
75040c85557Swdenk*/
75140c85557Swdenk
75240c85557Swdenk
75340c85557Swdenk#ifndef DEFAULT_MMAP_MAX
75440c85557Swdenk#if HAVE_MMAP
75540c85557Swdenk#define DEFAULT_MMAP_MAX       (64)
75640c85557Swdenk#else
75740c85557Swdenk#define DEFAULT_MMAP_MAX       (0)
75840c85557Swdenk#endif
75940c85557Swdenk#endif
76040c85557Swdenk
76140c85557Swdenk/*
76240c85557Swdenk    M_MMAP_MAX is the maximum number of requests to simultaneously
76340c85557Swdenk      service using mmap. This parameter exists because:
76440c85557Swdenk
76540c85557Swdenk	 1. Some systems have a limited number of internal tables for
76640c85557Swdenk	    use by mmap.
76740c85557Swdenk	 2. In most systems, overreliance on mmap can degrade overall
76840c85557Swdenk	    performance.
76940c85557Swdenk	 3. If a program allocates many large regions, it is probably
77040c85557Swdenk	    better off using normal sbrk-based allocation routines that
77140c85557Swdenk	    can reclaim and reallocate normal heap memory. Using a
77240c85557Swdenk	    small value allows transition into this mode after the
77340c85557Swdenk	    first few allocations.
77440c85557Swdenk
77540c85557Swdenk      Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
77640c85557Swdenk      the default value is 0, and attempts to set it to non-zero values
77740c85557Swdenk      in mallopt will fail.
77840c85557Swdenk*/
77940c85557Swdenk
78040c85557Swdenk
78140c85557Swdenk/*
78240c85557Swdenk    USE_DL_PREFIX will prefix all public routines with the string 'dl'.
78340c85557Swdenk      Useful to quickly avoid procedure declaration conflicts and linker
78440c85557Swdenk      symbol conflicts with existing memory allocation routines.
78540c85557Swdenk
78640c85557Swdenk*/
78740c85557Swdenk
78840c85557Swdenk/* #define USE_DL_PREFIX */
78940c85557Swdenk
79040c85557Swdenk
79140c85557Swdenk/*
79240c85557Swdenk
79340c85557Swdenk  Special defines for linux libc
79440c85557Swdenk
79540c85557Swdenk  Except when compiled using these special defines for Linux libc
79640c85557Swdenk  using weak aliases, this malloc is NOT designed to work in
79740c85557Swdenk  multithreaded applications.  No semaphores or other concurrency
79840c85557Swdenk  control are provided to ensure that multiple malloc or free calls
79940c85557Swdenk  don't run at the same time, which could be disasterous. A single
80040c85557Swdenk  semaphore could be used across malloc, realloc, and free (which is
80140c85557Swdenk  essentially the effect of the linux weak alias approach). It would
80240c85557Swdenk  be hard to obtain finer granularity.
80340c85557Swdenk
80440c85557Swdenk*/
80540c85557Swdenk
80640c85557Swdenk
80740c85557Swdenk#ifdef INTERNAL_LINUX_C_LIB
80840c85557Swdenk
80940c85557Swdenk#if __STD_C
81040c85557Swdenk
81140c85557SwdenkVoid_t * __default_morecore_init (ptrdiff_t);
81240c85557SwdenkVoid_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
81340c85557Swdenk
81440c85557Swdenk#else
81540c85557Swdenk
81640c85557SwdenkVoid_t * __default_morecore_init ();
81740c85557SwdenkVoid_t *(*__morecore)() = __default_morecore_init;
81840c85557Swdenk
81940c85557Swdenk#endif
82040c85557Swdenk
82140c85557Swdenk#define MORECORE (*__morecore)
82240c85557Swdenk#define MORECORE_FAILURE 0
82340c85557Swdenk#define MORECORE_CLEARS 1
82440c85557Swdenk
82540c85557Swdenk#else /* INTERNAL_LINUX_C_LIB */
82640c85557Swdenk
82740c85557Swdenk#if __STD_C
82840c85557Swdenkextern Void_t*     sbrk(ptrdiff_t);
82940c85557Swdenk#else
83040c85557Swdenkextern Void_t*     sbrk();
83140c85557Swdenk#endif
83240c85557Swdenk
83340c85557Swdenk#ifndef MORECORE
83440c85557Swdenk#define MORECORE sbrk
83540c85557Swdenk#endif
83640c85557Swdenk
83740c85557Swdenk#ifndef MORECORE_FAILURE
83840c85557Swdenk#define MORECORE_FAILURE -1
83940c85557Swdenk#endif
84040c85557Swdenk
84140c85557Swdenk#ifndef MORECORE_CLEARS
84240c85557Swdenk#define MORECORE_CLEARS 1
84340c85557Swdenk#endif
84440c85557Swdenk
84540c85557Swdenk#endif /* INTERNAL_LINUX_C_LIB */
84640c85557Swdenk
84740c85557Swdenk#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
84840c85557Swdenk
84940c85557Swdenk#define cALLOc		__libc_calloc
85040c85557Swdenk#define fREe		__libc_free
85140c85557Swdenk#define mALLOc		__libc_malloc
85240c85557Swdenk#define mEMALIGn	__libc_memalign
85340c85557Swdenk#define rEALLOc		__libc_realloc
85440c85557Swdenk#define vALLOc		__libc_valloc
85540c85557Swdenk#define pvALLOc		__libc_pvalloc
85640c85557Swdenk#define mALLINFo	__libc_mallinfo
85740c85557Swdenk#define mALLOPt		__libc_mallopt
85840c85557Swdenk
85940c85557Swdenk#pragma weak calloc = __libc_calloc
86040c85557Swdenk#pragma weak free = __libc_free
86140c85557Swdenk#pragma weak cfree = __libc_free
86240c85557Swdenk#pragma weak malloc = __libc_malloc
86340c85557Swdenk#pragma weak memalign = __libc_memalign
86440c85557Swdenk#pragma weak realloc = __libc_realloc
86540c85557Swdenk#pragma weak valloc = __libc_valloc
86640c85557Swdenk#pragma weak pvalloc = __libc_pvalloc
86740c85557Swdenk#pragma weak mallinfo = __libc_mallinfo
86840c85557Swdenk#pragma weak mallopt = __libc_mallopt
86940c85557Swdenk
87040c85557Swdenk#else
87140c85557Swdenk
87240c85557Swdenk#ifdef USE_DL_PREFIX
87340c85557Swdenk#define cALLOc		dlcalloc
87440c85557Swdenk#define fREe		dlfree
87540c85557Swdenk#define mALLOc		dlmalloc
87640c85557Swdenk#define mEMALIGn	dlmemalign
87740c85557Swdenk#define rEALLOc		dlrealloc
87840c85557Swdenk#define vALLOc		dlvalloc
87940c85557Swdenk#define pvALLOc		dlpvalloc
88040c85557Swdenk#define mALLINFo	dlmallinfo
88140c85557Swdenk#define mALLOPt		dlmallopt
88240c85557Swdenk#else /* USE_DL_PREFIX */
88340c85557Swdenk#define cALLOc		calloc
88440c85557Swdenk#define fREe		free
88540c85557Swdenk#define mALLOc		malloc
88640c85557Swdenk#define mEMALIGn	memalign
88740c85557Swdenk#define rEALLOc		realloc
88840c85557Swdenk#define vALLOc		valloc
88940c85557Swdenk#define pvALLOc		pvalloc
89040c85557Swdenk#define mALLINFo	mallinfo
89140c85557Swdenk#define mALLOPt		mallopt
89240c85557Swdenk#endif /* USE_DL_PREFIX */
89340c85557Swdenk
89440c85557Swdenk#endif
89540c85557Swdenk
89640c85557Swdenk/* Public routines */
89740c85557Swdenk
89840c85557Swdenk#if __STD_C
89940c85557Swdenk
90040c85557SwdenkVoid_t* mALLOc(size_t);
90140c85557Swdenkvoid    fREe(Void_t*);
90240c85557SwdenkVoid_t* rEALLOc(Void_t*, size_t);
90340c85557SwdenkVoid_t* mEMALIGn(size_t, size_t);
90440c85557SwdenkVoid_t* vALLOc(size_t);
90540c85557SwdenkVoid_t* pvALLOc(size_t);
90640c85557SwdenkVoid_t* cALLOc(size_t, size_t);
90740c85557Swdenkvoid    cfree(Void_t*);
90840c85557Swdenkint     malloc_trim(size_t);
90940c85557Swdenksize_t  malloc_usable_size(Void_t*);
91040c85557Swdenkvoid    malloc_stats();
91140c85557Swdenkint     mALLOPt(int, int);
91240c85557Swdenkstruct mallinfo mALLINFo(void);
91340c85557Swdenk#else
91440c85557SwdenkVoid_t* mALLOc();
91540c85557Swdenkvoid    fREe();
91640c85557SwdenkVoid_t* rEALLOc();
91740c85557SwdenkVoid_t* mEMALIGn();
91840c85557SwdenkVoid_t* vALLOc();
91940c85557SwdenkVoid_t* pvALLOc();
92040c85557SwdenkVoid_t* cALLOc();
92140c85557Swdenkvoid    cfree();
92240c85557Swdenkint     malloc_trim();
92340c85557Swdenksize_t  malloc_usable_size();
92440c85557Swdenkvoid    malloc_stats();
92540c85557Swdenkint     mALLOPt();
92640c85557Swdenkstruct mallinfo mALLINFo();
92740c85557Swdenk#endif
92840c85557Swdenk
92940c85557Swdenk
93040c85557Swdenk#ifdef __cplusplus
93140c85557Swdenk};  /* end of extern "C" */
93240c85557Swdenk#endif
93340c85557Swdenk
93440c85557Swdenk/* ---------- To make a malloc.h, end cutting here ------------ */
93540c85557Swdenk
93640c85557Swdenk
93740c85557Swdenk/*
93840c85557Swdenk  Emulation of sbrk for WIN32
93940c85557Swdenk  All code within the ifdef WIN32 is untested by me.
94040c85557Swdenk
94140c85557Swdenk  Thanks to Martin Fong and others for supplying this.
94240c85557Swdenk*/
94340c85557Swdenk
94440c85557Swdenk
94540c85557Swdenk#ifdef WIN32
94640c85557Swdenk
94740c85557Swdenk#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
94840c85557Swdenk~(malloc_getpagesize-1))
94940c85557Swdenk#define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
95040c85557Swdenk
95140c85557Swdenk/* resrve 64MB to insure large contiguous space */
95240c85557Swdenk#define RESERVED_SIZE (1024*1024*64)
95340c85557Swdenk#define NEXT_SIZE (2048*1024)
95440c85557Swdenk#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
95540c85557Swdenk
95640c85557Swdenkstruct GmListElement;
95740c85557Swdenktypedef struct GmListElement GmListElement;
95840c85557Swdenk
95940c85557Swdenkstruct GmListElement
96040c85557Swdenk{
96140c85557Swdenk	GmListElement* next;
96240c85557Swdenk	void* base;
96340c85557Swdenk};
96440c85557Swdenk
96540c85557Swdenkstatic GmListElement* head = 0;
96640c85557Swdenkstatic unsigned int gNextAddress = 0;
96740c85557Swdenkstatic unsigned int gAddressBase = 0;
96840c85557Swdenkstatic unsigned int gAllocatedSize = 0;
96940c85557Swdenk
97040c85557Swdenkstatic
97140c85557SwdenkGmListElement* makeGmListElement (void* bas)
97240c85557Swdenk{
97340c85557Swdenk	GmListElement* this;
97440c85557Swdenk	this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
97540c85557Swdenk	assert (this);
97640c85557Swdenk	if (this)
97740c85557Swdenk	{
97840c85557Swdenk		this->base = bas;
97940c85557Swdenk		this->next = head;
98040c85557Swdenk		head = this;
98140c85557Swdenk	}
98240c85557Swdenk	return this;
98340c85557Swdenk}
98440c85557Swdenk
98540c85557Swdenkvoid gcleanup ()
98640c85557Swdenk{
98740c85557Swdenk	BOOL rval;
98840c85557Swdenk	assert ( (head == NULL) || (head->base == (void*)gAddressBase));
98940c85557Swdenk	if (gAddressBase && (gNextAddress - gAddressBase))
99040c85557Swdenk	{
99140c85557Swdenk		rval = VirtualFree ((void*)gAddressBase,
99240c85557Swdenk							gNextAddress - gAddressBase,
99340c85557Swdenk							MEM_DECOMMIT);
99440c85557Swdenk	assert (rval);
99540c85557Swdenk	}
99640c85557Swdenk	while (head)
99740c85557Swdenk	{
99840c85557Swdenk		GmListElement* next = head->next;
99940c85557Swdenk		rval = VirtualFree (head->base, 0, MEM_RELEASE);
100040c85557Swdenk		assert (rval);
100140c85557Swdenk		LocalFree (head);
100240c85557Swdenk		head = next;
100340c85557Swdenk	}
100440c85557Swdenk}
100540c85557Swdenk
100640c85557Swdenkstatic
100740c85557Swdenkvoid* findRegion (void* start_address, unsigned long size)
100840c85557Swdenk{
100940c85557Swdenk	MEMORY_BASIC_INFORMATION info;
101040c85557Swdenk	if (size >= TOP_MEMORY) return NULL;
101140c85557Swdenk
101240c85557Swdenk	while ((unsigned long)start_address + size < TOP_MEMORY)
101340c85557Swdenk	{
101440c85557Swdenk		VirtualQuery (start_address, &info, sizeof (info));
101540c85557Swdenk		if ((info.State == MEM_FREE) && (info.RegionSize >= size))
101640c85557Swdenk			return start_address;
101740c85557Swdenk		else
101840c85557Swdenk		{
10198bde7f77Swdenk			/* Requested region is not available so see if the */
10208bde7f77Swdenk			/* next region is available.  Set 'start_address' */
10218bde7f77Swdenk			/* to the next region and call 'VirtualQuery()' */
10228bde7f77Swdenk			/* again. */
102340c85557Swdenk
102440c85557Swdenk			start_address = (char*)info.BaseAddress + info.RegionSize;
102540c85557Swdenk
10268bde7f77Swdenk			/* Make sure we start looking for the next region */
10278bde7f77Swdenk			/* on the *next* 64K boundary.  Otherwise, even if */
10288bde7f77Swdenk			/* the new region is free according to */
10298bde7f77Swdenk			/* 'VirtualQuery()', the subsequent call to */
10308bde7f77Swdenk			/* 'VirtualAlloc()' (which follows the call to */
10318bde7f77Swdenk			/* this routine in 'wsbrk()') will round *down* */
10328bde7f77Swdenk			/* the requested address to a 64K boundary which */
10338bde7f77Swdenk			/* we already know is an address in the */
10348bde7f77Swdenk			/* unavailable region.  Thus, the subsequent call */
10358bde7f77Swdenk			/* to 'VirtualAlloc()' will fail and bring us back */
10368bde7f77Swdenk			/* here, causing us to go into an infinite loop. */
103740c85557Swdenk
103840c85557Swdenk			start_address =
103940c85557Swdenk				(void *) AlignPage64K((unsigned long) start_address);
104040c85557Swdenk		}
104140c85557Swdenk	}
104240c85557Swdenk	return NULL;
104340c85557Swdenk
104440c85557Swdenk}
104540c85557Swdenk
104640c85557Swdenk
104740c85557Swdenkvoid* wsbrk (long size)
104840c85557Swdenk{
104940c85557Swdenk	void* tmp;
105040c85557Swdenk	if (size > 0)
105140c85557Swdenk	{
105240c85557Swdenk		if (gAddressBase == 0)
105340c85557Swdenk		{
105440c85557Swdenk			gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
105540c85557Swdenk			gNextAddress = gAddressBase =
105640c85557Swdenk				(unsigned int)VirtualAlloc (NULL, gAllocatedSize,
105740c85557Swdenk											MEM_RESERVE, PAGE_NOACCESS);
105840c85557Swdenk		} else if (AlignPage (gNextAddress + size) > (gAddressBase +
105940c85557SwdenkgAllocatedSize))
106040c85557Swdenk		{
106140c85557Swdenk			long new_size = max (NEXT_SIZE, AlignPage (size));
106240c85557Swdenk			void* new_address = (void*)(gAddressBase+gAllocatedSize);
106340c85557Swdenk			do
106440c85557Swdenk			{
106540c85557Swdenk				new_address = findRegion (new_address, new_size);
106640c85557Swdenk
106740c85557Swdenk				if (new_address == 0)
106840c85557Swdenk					return (void*)-1;
106940c85557Swdenk
107040c85557Swdenk				gAddressBase = gNextAddress =
107140c85557Swdenk					(unsigned int)VirtualAlloc (new_address, new_size,
107240c85557Swdenk												MEM_RESERVE, PAGE_NOACCESS);
10738bde7f77Swdenk				/* repeat in case of race condition */
10748bde7f77Swdenk				/* The region that we found has been snagged */
10758bde7f77Swdenk				/* by another thread */
107640c85557Swdenk			}
107740c85557Swdenk			while (gAddressBase == 0);
107840c85557Swdenk
107940c85557Swdenk			assert (new_address == (void*)gAddressBase);
108040c85557Swdenk
108140c85557Swdenk			gAllocatedSize = new_size;
108240c85557Swdenk
108340c85557Swdenk			if (!makeGmListElement ((void*)gAddressBase))
108440c85557Swdenk				return (void*)-1;
108540c85557Swdenk		}
108640c85557Swdenk		if ((size + gNextAddress) > AlignPage (gNextAddress))
108740c85557Swdenk		{
108840c85557Swdenk			void* res;
108940c85557Swdenk			res = VirtualAlloc ((void*)AlignPage (gNextAddress),
109040c85557Swdenk								(size + gNextAddress -
109140c85557Swdenk								 AlignPage (gNextAddress)),
109240c85557Swdenk								MEM_COMMIT, PAGE_READWRITE);
109340c85557Swdenk			if (res == 0)
109440c85557Swdenk				return (void*)-1;
109540c85557Swdenk		}
109640c85557Swdenk		tmp = (void*)gNextAddress;
109740c85557Swdenk		gNextAddress = (unsigned int)tmp + size;
109840c85557Swdenk		return tmp;
109940c85557Swdenk	}
110040c85557Swdenk	else if (size < 0)
110140c85557Swdenk	{
110240c85557Swdenk		unsigned int alignedGoal = AlignPage (gNextAddress + size);
110340c85557Swdenk		/* Trim by releasing the virtual memory */
110440c85557Swdenk		if (alignedGoal >= gAddressBase)
110540c85557Swdenk		{
110640c85557Swdenk			VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
110740c85557Swdenk						 MEM_DECOMMIT);
110840c85557Swdenk			gNextAddress = gNextAddress + size;
110940c85557Swdenk			return (void*)gNextAddress;
111040c85557Swdenk		}
111140c85557Swdenk		else
111240c85557Swdenk		{
111340c85557Swdenk			VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
111440c85557Swdenk						 MEM_DECOMMIT);
111540c85557Swdenk			gNextAddress = gAddressBase;
111640c85557Swdenk			return (void*)-1;
111740c85557Swdenk		}
111840c85557Swdenk	}
111940c85557Swdenk	else
112040c85557Swdenk	{
112140c85557Swdenk		return (void*)gNextAddress;
112240c85557Swdenk	}
112340c85557Swdenk}
112440c85557Swdenk
112540c85557Swdenk#endif
112640c85557Swdenk
112740c85557Swdenk
112840c85557Swdenk
112940c85557Swdenk/*
113040c85557Swdenk  Type declarations
113140c85557Swdenk*/
113240c85557Swdenk
113340c85557Swdenk
113440c85557Swdenkstruct malloc_chunk
113540c85557Swdenk{
113640c85557Swdenk  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
113740c85557Swdenk  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
113840c85557Swdenk  struct malloc_chunk* fd;   /* double links -- used only if free. */
113940c85557Swdenk  struct malloc_chunk* bk;
114040c85557Swdenk};
114140c85557Swdenk
114240c85557Swdenktypedef struct malloc_chunk* mchunkptr;
114340c85557Swdenk
114440c85557Swdenk/*
114540c85557Swdenk
114640c85557Swdenk   malloc_chunk details:
114740c85557Swdenk
114840c85557Swdenk    (The following includes lightly edited explanations by Colin Plumb.)
114940c85557Swdenk
115040c85557Swdenk    Chunks of memory are maintained using a `boundary tag' method as
115140c85557Swdenk    described in e.g., Knuth or Standish.  (See the paper by Paul
115240c85557Swdenk    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
115340c85557Swdenk    survey of such techniques.)  Sizes of free chunks are stored both
115440c85557Swdenk    in the front of each chunk and at the end.  This makes
115540c85557Swdenk    consolidating fragmented chunks into bigger chunks very fast.  The
115640c85557Swdenk    size fields also hold bits representing whether chunks are free or
115740c85557Swdenk    in use.
115840c85557Swdenk
115940c85557Swdenk    An allocated chunk looks like this:
116040c85557Swdenk
116140c85557Swdenk
116240c85557Swdenk    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116340c85557Swdenk	    |             Size of previous chunk, if allocated            | |
116440c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116540c85557Swdenk	    |             Size of chunk, in bytes                         |P|
116640c85557Swdenk      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116740c85557Swdenk	    |             User data starts here...                          .
116840c85557Swdenk	    .                                                               .
116940c85557Swdenk	    .             (malloc_usable_space() bytes)                     .
117040c85557Swdenk	    .                                                               |
117140c85557Swdenknextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
117240c85557Swdenk	    |             Size of chunk                                     |
117340c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
117440c85557Swdenk
117540c85557Swdenk
117640c85557Swdenk    Where "chunk" is the front of the chunk for the purpose of most of
117740c85557Swdenk    the malloc code, but "mem" is the pointer that is returned to the
117840c85557Swdenk    user.  "Nextchunk" is the beginning of the next contiguous chunk.
117940c85557Swdenk
118040c85557Swdenk    Chunks always begin on even word boundries, so the mem portion
118140c85557Swdenk    (which is returned to the user) is also on an even word boundary, and
118240c85557Swdenk    thus double-word aligned.
118340c85557Swdenk
118440c85557Swdenk    Free chunks are stored in circular doubly-linked lists, and look like this:
118540c85557Swdenk
118640c85557Swdenk    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118740c85557Swdenk	    |             Size of previous chunk                            |
118840c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118940c85557Swdenk    `head:' |             Size of chunk, in bytes                         |P|
119040c85557Swdenk      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119140c85557Swdenk	    |             Forward pointer to next chunk in list             |
119240c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119340c85557Swdenk	    |             Back pointer to previous chunk in list            |
119440c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119540c85557Swdenk	    |             Unused space (may be 0 bytes long)                .
119640c85557Swdenk	    .                                                               .
119740c85557Swdenk	    .                                                               |
119840c85557Swdenknextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119940c85557Swdenk    `foot:' |             Size of chunk, in bytes                           |
120040c85557Swdenk	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120140c85557Swdenk
120240c85557Swdenk    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
120340c85557Swdenk    chunk size (which is always a multiple of two words), is an in-use
120440c85557Swdenk    bit for the *previous* chunk.  If that bit is *clear*, then the
120540c85557Swdenk    word before the current chunk size contains the previous chunk
120640c85557Swdenk    size, and can be used to find the front of the previous chunk.
120740c85557Swdenk    (The very first chunk allocated always has this bit set,
120840c85557Swdenk    preventing access to non-existent (or non-owned) memory.)
120940c85557Swdenk
121040c85557Swdenk    Note that the `foot' of the current chunk is actually represented
121140c85557Swdenk    as the prev_size of the NEXT chunk. (This makes it easier to
121240c85557Swdenk    deal with alignments etc).
121340c85557Swdenk
121440c85557Swdenk    The two exceptions to all this are
121540c85557Swdenk
121640c85557Swdenk     1. The special chunk `top', which doesn't bother using the
121740c85557Swdenk	trailing size field since there is no
121840c85557Swdenk	next contiguous chunk that would have to index off it. (After
121940c85557Swdenk	initialization, `top' is forced to always exist.  If it would
122040c85557Swdenk	become less than MINSIZE bytes long, it is replenished via
122140c85557Swdenk	malloc_extend_top.)
122240c85557Swdenk
122340c85557Swdenk     2. Chunks allocated via mmap, which have the second-lowest-order
122440c85557Swdenk	bit (IS_MMAPPED) set in their size fields.  Because they are
122540c85557Swdenk	never merged or traversed from any other chunk, they have no
122640c85557Swdenk	foot size or inuse information.
122740c85557Swdenk
122840c85557Swdenk    Available chunks are kept in any of several places (all declared below):
122940c85557Swdenk
123040c85557Swdenk    * `av': An array of chunks serving as bin headers for consolidated
123140c85557Swdenk       chunks. Each bin is doubly linked.  The bins are approximately
123240c85557Swdenk       proportionally (log) spaced.  There are a lot of these bins
123340c85557Swdenk       (128). This may look excessive, but works very well in
123440c85557Swdenk       practice.  All procedures maintain the invariant that no
123540c85557Swdenk       consolidated chunk physically borders another one. Chunks in
123640c85557Swdenk       bins are kept in size order, with ties going to the
123740c85557Swdenk       approximately least recently used chunk.
123840c85557Swdenk
123940c85557Swdenk       The chunks in each bin are maintained in decreasing sorted order by
124040c85557Swdenk       size.  This is irrelevant for the small bins, which all contain
124140c85557Swdenk       the same-sized chunks, but facilitates best-fit allocation for
124240c85557Swdenk       larger chunks. (These lists are just sequential. Keeping them in
124340c85557Swdenk       order almost never requires enough traversal to warrant using
124440c85557Swdenk       fancier ordered data structures.)  Chunks of the same size are
124540c85557Swdenk       linked with the most recently freed at the front, and allocations
124640c85557Swdenk       are taken from the back.  This results in LRU or FIFO allocation
124740c85557Swdenk       order, which tends to give each chunk an equal opportunity to be
124840c85557Swdenk       consolidated with adjacent freed chunks, resulting in larger free
124940c85557Swdenk       chunks and less fragmentation.
125040c85557Swdenk
125140c85557Swdenk    * `top': The top-most available chunk (i.e., the one bordering the
125240c85557Swdenk       end of available memory) is treated specially. It is never
125340c85557Swdenk       included in any bin, is used only if no other chunk is
125440c85557Swdenk       available, and is released back to the system if it is very
125540c85557Swdenk       large (see M_TRIM_THRESHOLD).
125640c85557Swdenk
125740c85557Swdenk    * `last_remainder': A bin holding only the remainder of the
125840c85557Swdenk       most recently split (non-top) chunk. This bin is checked
125940c85557Swdenk       before other non-fitting chunks, so as to provide better
126040c85557Swdenk       locality for runs of sequentially allocated chunks.
126140c85557Swdenk
126240c85557Swdenk    *  Implicitly, through the host system's memory mapping tables.
126340c85557Swdenk       If supported, requests greater than a threshold are usually
126440c85557Swdenk       serviced via calls to mmap, and then later released via munmap.
126540c85557Swdenk
126640c85557Swdenk*/
126740c85557Swdenk
126840c85557Swdenk
126940c85557Swdenk
127040c85557Swdenk
127140c85557Swdenk
127240c85557Swdenk/*  sizes, alignments */
127340c85557Swdenk
127440c85557Swdenk#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
127540c85557Swdenk#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
127640c85557Swdenk#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
127740c85557Swdenk#define MINSIZE                (sizeof(struct malloc_chunk))
127840c85557Swdenk
127940c85557Swdenk/* conversion from malloc headers to user pointers, and back */
128040c85557Swdenk
128140c85557Swdenk#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
128240c85557Swdenk#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
128340c85557Swdenk
128440c85557Swdenk/* pad request bytes into a usable size */
128540c85557Swdenk
128640c85557Swdenk#define request2size(req) \
128740c85557Swdenk (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
128840c85557Swdenk  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
128940c85557Swdenk   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
129040c85557Swdenk
129140c85557Swdenk/* Check if m has acceptable alignment */
129240c85557Swdenk
129340c85557Swdenk#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
129440c85557Swdenk
129540c85557Swdenk
129640c85557Swdenk
129740c85557Swdenk
129840c85557Swdenk/*
129940c85557Swdenk  Physical chunk operations
130040c85557Swdenk*/
130140c85557Swdenk
130240c85557Swdenk
130340c85557Swdenk/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
130440c85557Swdenk
130540c85557Swdenk#define PREV_INUSE 0x1
130640c85557Swdenk
130740c85557Swdenk/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
130840c85557Swdenk
130940c85557Swdenk#define IS_MMAPPED 0x2
131040c85557Swdenk
131140c85557Swdenk/* Bits to mask off when extracting size */
131240c85557Swdenk
131340c85557Swdenk#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
131440c85557Swdenk
131540c85557Swdenk
131640c85557Swdenk/* Ptr to next physical malloc_chunk. */
131740c85557Swdenk
131840c85557Swdenk#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
131940c85557Swdenk
132040c85557Swdenk/* Ptr to previous physical malloc_chunk */
132140c85557Swdenk
132240c85557Swdenk#define prev_chunk(p)\
132340c85557Swdenk   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
132440c85557Swdenk
132540c85557Swdenk
132640c85557Swdenk/* Treat space at ptr + offset as a chunk */
132740c85557Swdenk
132840c85557Swdenk#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
132940c85557Swdenk
133040c85557Swdenk
133140c85557Swdenk
133240c85557Swdenk
133340c85557Swdenk/*
133440c85557Swdenk  Dealing with use bits
133540c85557Swdenk*/
133640c85557Swdenk
133740c85557Swdenk/* extract p's inuse bit */
133840c85557Swdenk
133940c85557Swdenk#define inuse(p)\
134040c85557Swdenk((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
134140c85557Swdenk
134240c85557Swdenk/* extract inuse bit of previous chunk */
134340c85557Swdenk
134440c85557Swdenk#define prev_inuse(p)  ((p)->size & PREV_INUSE)
134540c85557Swdenk
134640c85557Swdenk/* check for mmap()'ed chunk */
134740c85557Swdenk
134840c85557Swdenk#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
134940c85557Swdenk
135040c85557Swdenk/* set/clear chunk as in use without otherwise disturbing */
135140c85557Swdenk
135240c85557Swdenk#define set_inuse(p)\
135340c85557Swdenk((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
135440c85557Swdenk
135540c85557Swdenk#define clear_inuse(p)\
135640c85557Swdenk((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
135740c85557Swdenk
135840c85557Swdenk/* check/set/clear inuse bits in known places */
135940c85557Swdenk
136040c85557Swdenk#define inuse_bit_at_offset(p, s)\
136140c85557Swdenk (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
136240c85557Swdenk
136340c85557Swdenk#define set_inuse_bit_at_offset(p, s)\
136440c85557Swdenk (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
136540c85557Swdenk
136640c85557Swdenk#define clear_inuse_bit_at_offset(p, s)\
136740c85557Swdenk (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
136840c85557Swdenk
136940c85557Swdenk
137040c85557Swdenk
137140c85557Swdenk
137240c85557Swdenk/*
137340c85557Swdenk  Dealing with size fields
137440c85557Swdenk*/
137540c85557Swdenk
137640c85557Swdenk/* Get size, ignoring use bits */
137740c85557Swdenk
137840c85557Swdenk#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
137940c85557Swdenk
138040c85557Swdenk/* Set size at head, without disturbing its use bit */
138140c85557Swdenk
138240c85557Swdenk#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
138340c85557Swdenk
138440c85557Swdenk/* Set size/use ignoring previous bits in header */
138540c85557Swdenk
138640c85557Swdenk#define set_head(p, s)        ((p)->size = (s))
138740c85557Swdenk
138840c85557Swdenk/* Set size at footer (only when chunk is not in use) */
138940c85557Swdenk
139040c85557Swdenk#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
139140c85557Swdenk
139240c85557Swdenk
139340c85557Swdenk
139440c85557Swdenk
139540c85557Swdenk
139640c85557Swdenk/*
139740c85557Swdenk   Bins
139840c85557Swdenk
139940c85557Swdenk    The bins, `av_' are an array of pairs of pointers serving as the
140040c85557Swdenk    heads of (initially empty) doubly-linked lists of chunks, laid out
140140c85557Swdenk    in a way so that each pair can be treated as if it were in a
140240c85557Swdenk    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
140340c85557Swdenk    and chunks are the same).
140440c85557Swdenk
140540c85557Swdenk    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
140640c85557Swdenk    8 bytes apart. Larger bins are approximately logarithmically
140740c85557Swdenk    spaced. (See the table below.) The `av_' array is never mentioned
140840c85557Swdenk    directly in the code, but instead via bin access macros.
140940c85557Swdenk
141040c85557Swdenk    Bin layout:
141140c85557Swdenk
141240c85557Swdenk    64 bins of size       8
141340c85557Swdenk    32 bins of size      64
141440c85557Swdenk    16 bins of size     512
141540c85557Swdenk     8 bins of size    4096
141640c85557Swdenk     4 bins of size   32768
141740c85557Swdenk     2 bins of size  262144
141840c85557Swdenk     1 bin  of size what's left
141940c85557Swdenk
142040c85557Swdenk    There is actually a little bit of slop in the numbers in bin_index
142140c85557Swdenk    for the sake of speed. This makes no difference elsewhere.
142240c85557Swdenk
142340c85557Swdenk    The special chunks `top' and `last_remainder' get their own bins,
142440c85557Swdenk    (this is implemented via yet more trickery with the av_ array),
142540c85557Swdenk    although `top' is never properly linked to its bin since it is
142640c85557Swdenk    always handled specially.
142740c85557Swdenk
142840c85557Swdenk*/
142940c85557Swdenk
143040c85557Swdenk#define NAV             128   /* number of bins */
143140c85557Swdenk
143240c85557Swdenktypedef struct malloc_chunk* mbinptr;
143340c85557Swdenk
143440c85557Swdenk/* access macros */
143540c85557Swdenk
143640c85557Swdenk#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
143740c85557Swdenk#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
143840c85557Swdenk#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
143940c85557Swdenk
144040c85557Swdenk/*
144140c85557Swdenk   The first 2 bins are never indexed. The corresponding av_ cells are instead
144240c85557Swdenk   used for bookkeeping. This is not to save space, but to simplify
144340c85557Swdenk   indexing, maintain locality, and avoid some initialization tests.
144440c85557Swdenk*/
144540c85557Swdenk
144640c85557Swdenk#define top            (bin_at(0)->fd)   /* The topmost chunk */
144740c85557Swdenk#define last_remainder (bin_at(1))       /* remainder from last split */
144840c85557Swdenk
144940c85557Swdenk
145040c85557Swdenk/*
145140c85557Swdenk   Because top initially points to its own bin with initial
145240c85557Swdenk   zero size, thus forcing extension on the first malloc request,
145340c85557Swdenk   we avoid having any special code in malloc to check whether
145440c85557Swdenk   it even exists yet. But we still need to in malloc_extend_top.
145540c85557Swdenk*/
145640c85557Swdenk
145740c85557Swdenk#define initial_top    ((mchunkptr)(bin_at(0)))
145840c85557Swdenk
145940c85557Swdenk/* Helper macro to initialize bins */
146040c85557Swdenk
146140c85557Swdenk#define IAV(i)  bin_at(i), bin_at(i)
146240c85557Swdenk
146340c85557Swdenkstatic mbinptr av_[NAV * 2 + 2] = {
146440c85557Swdenk 0, 0,
146540c85557Swdenk IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
146640c85557Swdenk IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
146740c85557Swdenk IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
146840c85557Swdenk IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
146940c85557Swdenk IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
147040c85557Swdenk IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
147140c85557Swdenk IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
147240c85557Swdenk IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
147340c85557Swdenk IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
147440c85557Swdenk IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
147540c85557Swdenk IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
147640c85557Swdenk IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
147740c85557Swdenk IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
147840c85557Swdenk IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
147940c85557Swdenk IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
148040c85557Swdenk IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
148140c85557Swdenk};
148240c85557Swdenk
148340c85557Swdenk
148440c85557Swdenk
148540c85557Swdenk/* field-extraction macros */
148640c85557Swdenk
148740c85557Swdenk#define first(b) ((b)->fd)
148840c85557Swdenk#define last(b)  ((b)->bk)
148940c85557Swdenk
149040c85557Swdenk/*
149140c85557Swdenk  Indexing into bins
149240c85557Swdenk*/
149340c85557Swdenk
149440c85557Swdenk#define bin_index(sz)                                                          \
149540c85557Swdenk(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
149640c85557Swdenk ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
149740c85557Swdenk ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
149840c85557Swdenk ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
149940c85557Swdenk ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
150040c85557Swdenk ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
150140c85557Swdenk					  126)
150240c85557Swdenk/*
150340c85557Swdenk  bins for chunks < 512 are all spaced 8 bytes apart, and hold
150440c85557Swdenk  identically sized chunks. This is exploited in malloc.
150540c85557Swdenk*/
150640c85557Swdenk
150740c85557Swdenk#define MAX_SMALLBIN         63
150840c85557Swdenk#define MAX_SMALLBIN_SIZE   512
150940c85557Swdenk#define SMALLBIN_WIDTH        8
151040c85557Swdenk
151140c85557Swdenk#define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
151240c85557Swdenk
151340c85557Swdenk/*
151440c85557Swdenk   Requests are `small' if both the corresponding and the next bin are small
151540c85557Swdenk*/
151640c85557Swdenk
151740c85557Swdenk#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
151840c85557Swdenk
151940c85557Swdenk
152040c85557Swdenk
152140c85557Swdenk/*
152240c85557Swdenk    To help compensate for the large number of bins, a one-level index
152340c85557Swdenk    structure is used for bin-by-bin searching.  `binblocks' is a
152440c85557Swdenk    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
152540c85557Swdenk    have any (possibly) non-empty bins, so they can be skipped over
152640c85557Swdenk    all at once during during traversals. The bits are NOT always
152740c85557Swdenk    cleared as soon as all bins in a block are empty, but instead only
152840c85557Swdenk    when all are noticed to be empty during traversal in malloc.
152940c85557Swdenk*/
153040c85557Swdenk
153140c85557Swdenk#define BINBLOCKWIDTH     4   /* bins per block */
153240c85557Swdenk
153340c85557Swdenk#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
153440c85557Swdenk
153540c85557Swdenk/* bin<->block macros */
153640c85557Swdenk
153740c85557Swdenk#define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))
153840c85557Swdenk#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
153940c85557Swdenk#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
154040c85557Swdenk
154140c85557Swdenk
154240c85557Swdenk
154340c85557Swdenk
154440c85557Swdenk
154540c85557Swdenk/*  Other static bookkeeping data */
154640c85557Swdenk
154740c85557Swdenk/* variables holding tunable values */
154840c85557Swdenk
154940c85557Swdenkstatic unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
155040c85557Swdenkstatic unsigned long top_pad          = DEFAULT_TOP_PAD;
155140c85557Swdenkstatic unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
155240c85557Swdenkstatic unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
155340c85557Swdenk
155440c85557Swdenk/* The first value returned from sbrk */
155540c85557Swdenkstatic char* sbrk_base = (char*)(-1);
155640c85557Swdenk
155740c85557Swdenk/* The maximum memory obtained from system via sbrk */
155840c85557Swdenkstatic unsigned long max_sbrked_mem = 0;
155940c85557Swdenk
156040c85557Swdenk/* The maximum via either sbrk or mmap */
156140c85557Swdenkstatic unsigned long max_total_mem = 0;
156240c85557Swdenk
156340c85557Swdenk/* internal working copy of mallinfo */
156440c85557Swdenkstatic struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
156540c85557Swdenk
156640c85557Swdenk/* The total memory obtained from system via sbrk */
156740c85557Swdenk#define sbrked_mem  (current_mallinfo.arena)
156840c85557Swdenk
156940c85557Swdenk/* Tracking mmaps */
157040c85557Swdenk
157140c85557Swdenkstatic unsigned int n_mmaps = 0;
157240c85557Swdenkstatic unsigned int max_n_mmaps = 0;
157340c85557Swdenkstatic unsigned long mmapped_mem = 0;
157440c85557Swdenkstatic unsigned long max_mmapped_mem = 0;
157540c85557Swdenk
157640c85557Swdenk
157740c85557Swdenk
157840c85557Swdenk/*
157940c85557Swdenk  Debugging support
158040c85557Swdenk*/
158140c85557Swdenk
158240c85557Swdenk#if DEBUG
158340c85557Swdenk
158440c85557Swdenk
158540c85557Swdenk/*
158640c85557Swdenk  These routines make a number of assertions about the states
158740c85557Swdenk  of data structures that should be true at all times. If any
158840c85557Swdenk  are not true, it's very likely that a user program has somehow
158940c85557Swdenk  trashed memory. (It's also possible that there is a coding error
159040c85557Swdenk  in malloc. In which case, please report it!)
159140c85557Swdenk*/
159240c85557Swdenk
159340c85557Swdenk#if __STD_C
159440c85557Swdenkstatic void do_check_chunk(mchunkptr p)
159540c85557Swdenk#else
159640c85557Swdenkstatic void do_check_chunk(p) mchunkptr p;
159740c85557Swdenk#endif
159840c85557Swdenk{
159940c85557Swdenk  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
160040c85557Swdenk
160140c85557Swdenk  /* No checkable chunk is mmapped */
160240c85557Swdenk  assert(!chunk_is_mmapped(p));
160340c85557Swdenk
160440c85557Swdenk  /* Check for legal address ... */
160540c85557Swdenk  assert((char*)p >= sbrk_base);
160640c85557Swdenk  if (p != top)
160740c85557Swdenk    assert((char*)p + sz <= (char*)top);
160840c85557Swdenk  else
160940c85557Swdenk    assert((char*)p + sz <= sbrk_base + sbrked_mem);
161040c85557Swdenk
161140c85557Swdenk}
161240c85557Swdenk
161340c85557Swdenk
161440c85557Swdenk#if __STD_C
161540c85557Swdenkstatic void do_check_free_chunk(mchunkptr p)
161640c85557Swdenk#else
161740c85557Swdenkstatic void do_check_free_chunk(p) mchunkptr p;
161840c85557Swdenk#endif
161940c85557Swdenk{
162040c85557Swdenk  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
162140c85557Swdenk  mchunkptr next = chunk_at_offset(p, sz);
162240c85557Swdenk
162340c85557Swdenk  do_check_chunk(p);
162440c85557Swdenk
162540c85557Swdenk  /* Check whether it claims to be free ... */
162640c85557Swdenk  assert(!inuse(p));
162740c85557Swdenk
162840c85557Swdenk  /* Unless a special marker, must have OK fields */
162940c85557Swdenk  if ((long)sz >= (long)MINSIZE)
163040c85557Swdenk  {
163140c85557Swdenk    assert((sz & MALLOC_ALIGN_MASK) == 0);
163240c85557Swdenk    assert(aligned_OK(chunk2mem(p)));
163340c85557Swdenk    /* ... matching footer field */
163440c85557Swdenk    assert(next->prev_size == sz);
163540c85557Swdenk    /* ... and is fully consolidated */
163640c85557Swdenk    assert(prev_inuse(p));
163740c85557Swdenk    assert (next == top || inuse(next));
163840c85557Swdenk
163940c85557Swdenk    /* ... and has minimally sane links */
164040c85557Swdenk    assert(p->fd->bk == p);
164140c85557Swdenk    assert(p->bk->fd == p);
164240c85557Swdenk  }
164340c85557Swdenk  else /* markers are always of size SIZE_SZ */
164440c85557Swdenk    assert(sz == SIZE_SZ);
164540c85557Swdenk}
164640c85557Swdenk
164740c85557Swdenk#if __STD_C
164840c85557Swdenkstatic void do_check_inuse_chunk(mchunkptr p)
164940c85557Swdenk#else
165040c85557Swdenkstatic void do_check_inuse_chunk(p) mchunkptr p;
165140c85557Swdenk#endif
165240c85557Swdenk{
165340c85557Swdenk  mchunkptr next = next_chunk(p);
165440c85557Swdenk  do_check_chunk(p);
165540c85557Swdenk
165640c85557Swdenk  /* Check whether it claims to be in use ... */
165740c85557Swdenk  assert(inuse(p));
165840c85557Swdenk
165940c85557Swdenk  /* ... and is surrounded by OK chunks.
166040c85557Swdenk    Since more things can be checked with free chunks than inuse ones,
166140c85557Swdenk    if an inuse chunk borders them and debug is on, it's worth doing them.
166240c85557Swdenk  */
166340c85557Swdenk  if (!prev_inuse(p))
166440c85557Swdenk  {
166540c85557Swdenk    mchunkptr prv = prev_chunk(p);
166640c85557Swdenk    assert(next_chunk(prv) == p);
166740c85557Swdenk    do_check_free_chunk(prv);
166840c85557Swdenk  }
166940c85557Swdenk  if (next == top)
167040c85557Swdenk  {
167140c85557Swdenk    assert(prev_inuse(next));
167240c85557Swdenk    assert(chunksize(next) >= MINSIZE);
167340c85557Swdenk  }
167440c85557Swdenk  else if (!inuse(next))
167540c85557Swdenk    do_check_free_chunk(next);
167640c85557Swdenk
167740c85557Swdenk}
167840c85557Swdenk
167940c85557Swdenk#if __STD_C
168040c85557Swdenkstatic void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
168140c85557Swdenk#else
168240c85557Swdenkstatic void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
168340c85557Swdenk#endif
168440c85557Swdenk{
168540c85557Swdenk  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
168640c85557Swdenk  long room = sz - s;
168740c85557Swdenk
168840c85557Swdenk  do_check_inuse_chunk(p);
168940c85557Swdenk
169040c85557Swdenk  /* Legal size ... */
169140c85557Swdenk  assert((long)sz >= (long)MINSIZE);
169240c85557Swdenk  assert((sz & MALLOC_ALIGN_MASK) == 0);
169340c85557Swdenk  assert(room >= 0);
169440c85557Swdenk  assert(room < (long)MINSIZE);
169540c85557Swdenk
169640c85557Swdenk  /* ... and alignment */
169740c85557Swdenk  assert(aligned_OK(chunk2mem(p)));
169840c85557Swdenk
169940c85557Swdenk
170040c85557Swdenk  /* ... and was allocated at front of an available chunk */
170140c85557Swdenk  assert(prev_inuse(p));
170240c85557Swdenk
170340c85557Swdenk}
170440c85557Swdenk
170540c85557Swdenk
170640c85557Swdenk#define check_free_chunk(P)  do_check_free_chunk(P)
170740c85557Swdenk#define check_inuse_chunk(P) do_check_inuse_chunk(P)
170840c85557Swdenk#define check_chunk(P) do_check_chunk(P)
170940c85557Swdenk#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
171040c85557Swdenk#else
171140c85557Swdenk#define check_free_chunk(P)
171240c85557Swdenk#define check_inuse_chunk(P)
171340c85557Swdenk#define check_chunk(P)
171440c85557Swdenk#define check_malloced_chunk(P,N)
171540c85557Swdenk#endif
171640c85557Swdenk
171740c85557Swdenk
171840c85557Swdenk
171940c85557Swdenk/*
172040c85557Swdenk  Macro-based internal utilities
172140c85557Swdenk*/
172240c85557Swdenk
172340c85557Swdenk
172440c85557Swdenk/*
172540c85557Swdenk  Linking chunks in bin lists.
172640c85557Swdenk  Call these only with variables, not arbitrary expressions, as arguments.
172740c85557Swdenk*/
172840c85557Swdenk
172940c85557Swdenk/*
173040c85557Swdenk  Place chunk p of size s in its bin, in size order,
173140c85557Swdenk  putting it ahead of others of same size.
173240c85557Swdenk*/
173340c85557Swdenk
173440c85557Swdenk
173540c85557Swdenk#define frontlink(P, S, IDX, BK, FD)                                          \
173640c85557Swdenk{                                                                             \
173740c85557Swdenk  if (S < MAX_SMALLBIN_SIZE)                                                  \
173840c85557Swdenk  {                                                                           \
173940c85557Swdenk    IDX = smallbin_index(S);                                                  \
174040c85557Swdenk    mark_binblock(IDX);                                                       \
174140c85557Swdenk    BK = bin_at(IDX);                                                         \
174240c85557Swdenk    FD = BK->fd;                                                              \
174340c85557Swdenk    P->bk = BK;                                                               \
174440c85557Swdenk    P->fd = FD;                                                               \
174540c85557Swdenk    FD->bk = BK->fd = P;                                                      \
174640c85557Swdenk  }                                                                           \
174740c85557Swdenk  else                                                                        \
174840c85557Swdenk  {                                                                           \
174940c85557Swdenk    IDX = bin_index(S);                                                       \
175040c85557Swdenk    BK = bin_at(IDX);                                                         \
175140c85557Swdenk    FD = BK->fd;                                                              \
175240c85557Swdenk    if (FD == BK) mark_binblock(IDX);                                         \
175340c85557Swdenk    else                                                                      \
175440c85557Swdenk    {                                                                         \
175540c85557Swdenk      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
175640c85557Swdenk      BK = FD->bk;                                                            \
175740c85557Swdenk    }                                                                         \
175840c85557Swdenk    P->bk = BK;                                                               \
175940c85557Swdenk    P->fd = FD;                                                               \
176040c85557Swdenk    FD->bk = BK->fd = P;                                                      \
176140c85557Swdenk  }                                                                           \
176240c85557Swdenk}
176340c85557Swdenk
176440c85557Swdenk
176540c85557Swdenk/* take a chunk off a list */
176640c85557Swdenk
176740c85557Swdenk#define unlink(P, BK, FD)                                                     \
176840c85557Swdenk{                                                                             \
176940c85557Swdenk  BK = P->bk;                                                                 \
177040c85557Swdenk  FD = P->fd;                                                                 \
177140c85557Swdenk  FD->bk = BK;                                                                \
177240c85557Swdenk  BK->fd = FD;                                                                \
177340c85557Swdenk}                                                                             \
177440c85557Swdenk
177540c85557Swdenk/* Place p as the last remainder */
177640c85557Swdenk
177740c85557Swdenk#define link_last_remainder(P)                                                \
177840c85557Swdenk{                                                                             \
177940c85557Swdenk  last_remainder->fd = last_remainder->bk =  P;                               \
178040c85557Swdenk  P->fd = P->bk = last_remainder;                                             \
178140c85557Swdenk}
178240c85557Swdenk
178340c85557Swdenk/* Clear the last_remainder bin */
178440c85557Swdenk
178540c85557Swdenk#define clear_last_remainder \
178640c85557Swdenk  (last_remainder->fd = last_remainder->bk = last_remainder)
178740c85557Swdenk
178840c85557Swdenk
178940c85557Swdenk
179040c85557Swdenk
179140c85557Swdenk
179240c85557Swdenk/* Routines dealing with mmap(). */
179340c85557Swdenk
179440c85557Swdenk#if HAVE_MMAP
179540c85557Swdenk
179640c85557Swdenk#if __STD_C
179740c85557Swdenkstatic mchunkptr mmap_chunk(size_t size)
179840c85557Swdenk#else
179940c85557Swdenkstatic mchunkptr mmap_chunk(size) size_t size;
180040c85557Swdenk#endif
180140c85557Swdenk{
180240c85557Swdenk  size_t page_mask = malloc_getpagesize - 1;
180340c85557Swdenk  mchunkptr p;
180440c85557Swdenk
180540c85557Swdenk#ifndef MAP_ANONYMOUS
180640c85557Swdenk  static int fd = -1;
180740c85557Swdenk#endif
180840c85557Swdenk
180940c85557Swdenk  if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
181040c85557Swdenk
181140c85557Swdenk  /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
181240c85557Swdenk   * there is no following chunk whose prev_size field could be used.
181340c85557Swdenk   */
181440c85557Swdenk  size = (size + SIZE_SZ + page_mask) & ~page_mask;
181540c85557Swdenk
181640c85557Swdenk#ifdef MAP_ANONYMOUS
181740c85557Swdenk  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
181840c85557Swdenk		      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
181940c85557Swdenk#else /* !MAP_ANONYMOUS */
182040c85557Swdenk  if (fd < 0)
182140c85557Swdenk  {
182240c85557Swdenk    fd = open("/dev/zero", O_RDWR);
182340c85557Swdenk    if(fd < 0) return 0;
182440c85557Swdenk  }
182540c85557Swdenk  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
182640c85557Swdenk#endif
182740c85557Swdenk
182840c85557Swdenk  if(p == (mchunkptr)-1) return 0;
182940c85557Swdenk
183040c85557Swdenk  n_mmaps++;
183140c85557Swdenk  if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
183240c85557Swdenk
183340c85557Swdenk  /* We demand that eight bytes into a page must be 8-byte aligned. */
183440c85557Swdenk  assert(aligned_OK(chunk2mem(p)));
183540c85557Swdenk
183640c85557Swdenk  /* The offset to the start of the mmapped region is stored
183740c85557Swdenk   * in the prev_size field of the chunk; normally it is zero,
183840c85557Swdenk   * but that can be changed in memalign().
183940c85557Swdenk   */
184040c85557Swdenk  p->prev_size = 0;
184140c85557Swdenk  set_head(p, size|IS_MMAPPED);
184240c85557Swdenk
184340c85557Swdenk  mmapped_mem += size;
184440c85557Swdenk  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
184540c85557Swdenk    max_mmapped_mem = mmapped_mem;
184640c85557Swdenk  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
184740c85557Swdenk    max_total_mem = mmapped_mem + sbrked_mem;
184840c85557Swdenk  return p;
184940c85557Swdenk}
185040c85557Swdenk
185140c85557Swdenk#if __STD_C
185240c85557Swdenkstatic void munmap_chunk(mchunkptr p)
185340c85557Swdenk#else
185440c85557Swdenkstatic void munmap_chunk(p) mchunkptr p;
185540c85557Swdenk#endif
185640c85557Swdenk{
185740c85557Swdenk  INTERNAL_SIZE_T size = chunksize(p);
185840c85557Swdenk  int ret;
185940c85557Swdenk
186040c85557Swdenk  assert (chunk_is_mmapped(p));
186140c85557Swdenk  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
186240c85557Swdenk  assert((n_mmaps > 0));
186340c85557Swdenk  assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
186440c85557Swdenk
186540c85557Swdenk  n_mmaps--;
186640c85557Swdenk  mmapped_mem -= (size + p->prev_size);
186740c85557Swdenk
186840c85557Swdenk  ret = munmap((char *)p - p->prev_size, size + p->prev_size);
186940c85557Swdenk
187040c85557Swdenk  /* munmap returns non-zero on failure */
187140c85557Swdenk  assert(ret == 0);
187240c85557Swdenk}
187340c85557Swdenk
187440c85557Swdenk#if HAVE_MREMAP
187540c85557Swdenk
187640c85557Swdenk#if __STD_C
187740c85557Swdenkstatic mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
187840c85557Swdenk#else
187940c85557Swdenkstatic mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
188040c85557Swdenk#endif
188140c85557Swdenk{
188240c85557Swdenk  size_t page_mask = malloc_getpagesize - 1;
188340c85557Swdenk  INTERNAL_SIZE_T offset = p->prev_size;
188440c85557Swdenk  INTERNAL_SIZE_T size = chunksize(p);
188540c85557Swdenk  char *cp;
188640c85557Swdenk
188740c85557Swdenk  assert (chunk_is_mmapped(p));
188840c85557Swdenk  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
188940c85557Swdenk  assert((n_mmaps > 0));
189040c85557Swdenk  assert(((size + offset) & (malloc_getpagesize-1)) == 0);
189140c85557Swdenk
189240c85557Swdenk  /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
189340c85557Swdenk  new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
189440c85557Swdenk
189540c85557Swdenk  cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
189640c85557Swdenk
189740c85557Swdenk  if (cp == (char *)-1) return 0;
189840c85557Swdenk
189940c85557Swdenk  p = (mchunkptr)(cp + offset);
190040c85557Swdenk
190140c85557Swdenk  assert(aligned_OK(chunk2mem(p)));
190240c85557Swdenk
190340c85557Swdenk  assert((p->prev_size == offset));
190440c85557Swdenk  set_head(p, (new_size - offset)|IS_MMAPPED);
190540c85557Swdenk
190640c85557Swdenk  mmapped_mem -= size + offset;
190740c85557Swdenk  mmapped_mem += new_size;
190840c85557Swdenk  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
190940c85557Swdenk    max_mmapped_mem = mmapped_mem;
191040c85557Swdenk  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
191140c85557Swdenk    max_total_mem = mmapped_mem + sbrked_mem;
191240c85557Swdenk  return p;
191340c85557Swdenk}
191440c85557Swdenk
191540c85557Swdenk#endif /* HAVE_MREMAP */
191640c85557Swdenk
191740c85557Swdenk#endif /* HAVE_MMAP */
191840c85557Swdenk
191940c85557Swdenk
192040c85557Swdenk
192140c85557Swdenk
192240c85557Swdenk/*
192340c85557Swdenk  Extend the top-most chunk by obtaining memory from system.
192440c85557Swdenk  Main interface to sbrk (but see also malloc_trim).
192540c85557Swdenk*/
192640c85557Swdenk
192740c85557Swdenk#if __STD_C
192840c85557Swdenkstatic void malloc_extend_top(INTERNAL_SIZE_T nb)
192940c85557Swdenk#else
193040c85557Swdenkstatic void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
193140c85557Swdenk#endif
193240c85557Swdenk{
193340c85557Swdenk  char*     brk;                  /* return value from sbrk */
193440c85557Swdenk  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
193540c85557Swdenk  INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
193640c85557Swdenk  char*     new_brk;              /* return of 2nd sbrk call */
193740c85557Swdenk  INTERNAL_SIZE_T top_size;       /* new size of top chunk */
193840c85557Swdenk
193940c85557Swdenk  mchunkptr old_top     = top;  /* Record state of old top */
194040c85557Swdenk  INTERNAL_SIZE_T old_top_size = chunksize(old_top);
194140c85557Swdenk  char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
194240c85557Swdenk
194340c85557Swdenk  /* Pad request with top_pad plus minimal overhead */
194440c85557Swdenk
194540c85557Swdenk  INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
194640c85557Swdenk  unsigned long pagesz    = malloc_getpagesize;
194740c85557Swdenk
194840c85557Swdenk  /* If not the first time through, round to preserve page boundary */
194940c85557Swdenk  /* Otherwise, we need to correct to a page size below anyway. */
195040c85557Swdenk  /* (We also correct below if an intervening foreign sbrk call.) */
195140c85557Swdenk
195240c85557Swdenk  if (sbrk_base != (char*)(-1))
195340c85557Swdenk    sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
195440c85557Swdenk
195540c85557Swdenk  brk = (char*)(MORECORE (sbrk_size));
195640c85557Swdenk
195740c85557Swdenk  /* Fail if sbrk failed or if a foreign sbrk call killed our space */
195840c85557Swdenk  if (brk == (char*)(MORECORE_FAILURE) ||
195940c85557Swdenk      (brk < old_end && old_top != initial_top))
196040c85557Swdenk    return;
196140c85557Swdenk
196240c85557Swdenk  sbrked_mem += sbrk_size;
196340c85557Swdenk
196440c85557Swdenk  if (brk == old_end) /* can just add bytes to current top */
196540c85557Swdenk  {
196640c85557Swdenk    top_size = sbrk_size + old_top_size;
196740c85557Swdenk    set_head(top, top_size | PREV_INUSE);
196840c85557Swdenk  }
196940c85557Swdenk  else
197040c85557Swdenk  {
197140c85557Swdenk    if (sbrk_base == (char*)(-1))  /* First time through. Record base */
197240c85557Swdenk      sbrk_base = brk;
197340c85557Swdenk    else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
197440c85557Swdenk      sbrked_mem += brk - (char*)old_end;
197540c85557Swdenk
197640c85557Swdenk    /* Guarantee alignment of first new chunk made from this space */
197740c85557Swdenk    front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
197840c85557Swdenk    if (front_misalign > 0)
197940c85557Swdenk    {
198040c85557Swdenk      correction = (MALLOC_ALIGNMENT) - front_misalign;
198140c85557Swdenk      brk += correction;
198240c85557Swdenk    }
198340c85557Swdenk    else
198440c85557Swdenk      correction = 0;
198540c85557Swdenk
198640c85557Swdenk    /* Guarantee the next brk will be at a page boundary */
198740c85557Swdenk
198840c85557Swdenk    correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
198940c85557Swdenk		   ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
199040c85557Swdenk
199140c85557Swdenk    /* Allocate correction */
199240c85557Swdenk    new_brk = (char*)(MORECORE (correction));
199340c85557Swdenk    if (new_brk == (char*)(MORECORE_FAILURE)) return;
199440c85557Swdenk
199540c85557Swdenk    sbrked_mem += correction;
199640c85557Swdenk
199740c85557Swdenk    top = (mchunkptr)brk;
199840c85557Swdenk    top_size = new_brk - brk + correction;
199940c85557Swdenk    set_head(top, top_size | PREV_INUSE);
200040c85557Swdenk
200140c85557Swdenk    if (old_top != initial_top)
200240c85557Swdenk    {
200340c85557Swdenk
200440c85557Swdenk      /* There must have been an intervening foreign sbrk call. */
200540c85557Swdenk      /* A double fencepost is necessary to prevent consolidation */
200640c85557Swdenk
200740c85557Swdenk      /* If not enough space to do this, then user did something very wrong */
200840c85557Swdenk      if (old_top_size < MINSIZE)
200940c85557Swdenk      {
201040c85557Swdenk	set_head(top, PREV_INUSE); /* will force null return from malloc */
201140c85557Swdenk	return;
201240c85557Swdenk      }
201340c85557Swdenk
201440c85557Swdenk      /* Also keep size a multiple of MALLOC_ALIGNMENT */
201540c85557Swdenk      old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
201640c85557Swdenk      set_head_size(old_top, old_top_size);
201740c85557Swdenk      chunk_at_offset(old_top, old_top_size          )->size =
201840c85557Swdenk	SIZE_SZ|PREV_INUSE;
201940c85557Swdenk      chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
202040c85557Swdenk	SIZE_SZ|PREV_INUSE;
202140c85557Swdenk      /* If possible, release the rest. */
202240c85557Swdenk      if (old_top_size >= MINSIZE)
202340c85557Swdenk	fREe(chunk2mem(old_top));
202440c85557Swdenk    }
202540c85557Swdenk  }
202640c85557Swdenk
202740c85557Swdenk  if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
202840c85557Swdenk    max_sbrked_mem = sbrked_mem;
202940c85557Swdenk  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
203040c85557Swdenk    max_total_mem = mmapped_mem + sbrked_mem;
203140c85557Swdenk
203240c85557Swdenk  /* We always land on a page boundary */
203340c85557Swdenk  assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
203440c85557Swdenk}
203540c85557Swdenk
203640c85557Swdenk
203740c85557Swdenk
203840c85557Swdenk
203940c85557Swdenk/* Main public routines */
204040c85557Swdenk
204140c85557Swdenk
204240c85557Swdenk/*
204340c85557Swdenk  Malloc Algorthim:
204440c85557Swdenk
204540c85557Swdenk    The requested size is first converted into a usable form, `nb'.
204640c85557Swdenk    This currently means to add 4 bytes overhead plus possibly more to
204740c85557Swdenk    obtain 8-byte alignment and/or to obtain a size of at least
204840c85557Swdenk    MINSIZE (currently 16 bytes), the smallest allocatable size.
204940c85557Swdenk    (All fits are considered `exact' if they are within MINSIZE bytes.)
205040c85557Swdenk
205140c85557Swdenk    From there, the first successful of the following steps is taken:
205240c85557Swdenk
205340c85557Swdenk      1. The bin corresponding to the request size is scanned, and if
205440c85557Swdenk	 a chunk of exactly the right size is found, it is taken.
205540c85557Swdenk
205640c85557Swdenk      2. The most recently remaindered chunk is used if it is big
205740c85557Swdenk	 enough.  This is a form of (roving) first fit, used only in
205840c85557Swdenk	 the absence of exact fits. Runs of consecutive requests use
205940c85557Swdenk	 the remainder of the chunk used for the previous such request
206040c85557Swdenk	 whenever possible. This limited use of a first-fit style
206140c85557Swdenk	 allocation strategy tends to give contiguous chunks
206240c85557Swdenk	 coextensive lifetimes, which improves locality and can reduce
206340c85557Swdenk	 fragmentation in the long run.
206440c85557Swdenk
206540c85557Swdenk      3. Other bins are scanned in increasing size order, using a
206640c85557Swdenk	 chunk big enough to fulfill the request, and splitting off
206740c85557Swdenk	 any remainder.  This search is strictly by best-fit; i.e.,
206840c85557Swdenk	 the smallest (with ties going to approximately the least
206940c85557Swdenk	 recently used) chunk that fits is selected.
207040c85557Swdenk
207140c85557Swdenk      4. If large enough, the chunk bordering the end of memory
207240c85557Swdenk	 (`top') is split off. (This use of `top' is in accord with
207340c85557Swdenk	 the best-fit search rule.  In effect, `top' is treated as
207440c85557Swdenk	 larger (and thus less well fitting) than any other available
207540c85557Swdenk	 chunk since it can be extended to be as large as necessary
207640c85557Swdenk	 (up to system limitations).
207740c85557Swdenk
207840c85557Swdenk      5. If the request size meets the mmap threshold and the
207940c85557Swdenk	 system supports mmap, and there are few enough currently
208040c85557Swdenk	 allocated mmapped regions, and a call to mmap succeeds,
208140c85557Swdenk	 the request is allocated via direct memory mapping.
208240c85557Swdenk
208340c85557Swdenk      6. Otherwise, the top of memory is extended by
208440c85557Swdenk	 obtaining more space from the system (normally using sbrk,
208540c85557Swdenk	 but definable to anything else via the MORECORE macro).
208640c85557Swdenk	 Memory is gathered from the system (in system page-sized
208740c85557Swdenk	 units) in a way that allows chunks obtained across different
208840c85557Swdenk	 sbrk calls to be consolidated, but does not require
208940c85557Swdenk	 contiguous memory. Thus, it should be safe to intersperse
209040c85557Swdenk	 mallocs with other sbrk calls.
209140c85557Swdenk
209240c85557Swdenk
209340c85557Swdenk      All allocations are made from the the `lowest' part of any found
209440c85557Swdenk      chunk. (The implementation invariant is that prev_inuse is
209540c85557Swdenk      always true of any allocated chunk; i.e., that each allocated
209640c85557Swdenk      chunk borders either a previously allocated and still in-use chunk,
209740c85557Swdenk      or the base of its memory arena.)
209840c85557Swdenk
209940c85557Swdenk*/
210040c85557Swdenk
210140c85557Swdenk#if __STD_C
210240c85557SwdenkVoid_t* mALLOc(size_t bytes)
210340c85557Swdenk#else
210440c85557SwdenkVoid_t* mALLOc(bytes) size_t bytes;
210540c85557Swdenk#endif
210640c85557Swdenk{
210740c85557Swdenk  mchunkptr victim;                  /* inspected/selected chunk */
210840c85557Swdenk  INTERNAL_SIZE_T victim_size;       /* its size */
210940c85557Swdenk  int       idx;                     /* index for bin traversal */
211040c85557Swdenk  mbinptr   bin;                     /* associated bin */
211140c85557Swdenk  mchunkptr remainder;               /* remainder from a split */
211240c85557Swdenk  long      remainder_size;          /* its size */
211340c85557Swdenk  int       remainder_index;         /* its bin index */
211440c85557Swdenk  unsigned long block;               /* block traverser bit */
211540c85557Swdenk  int       startidx;                /* first bin of a traversed block */
211640c85557Swdenk  mchunkptr fwd;                     /* misc temp for linking */
211740c85557Swdenk  mchunkptr bck;                     /* misc temp for linking */
211840c85557Swdenk  mbinptr q;                         /* misc temp */
211940c85557Swdenk
212040c85557Swdenk  INTERNAL_SIZE_T nb;
212140c85557Swdenk
212240c85557Swdenk  if ((long)bytes < 0) return 0;
212340c85557Swdenk
212440c85557Swdenk  nb = request2size(bytes);  /* padded request size; */
212540c85557Swdenk
212640c85557Swdenk  /* Check for exact match in a bin */
212740c85557Swdenk
212840c85557Swdenk  if (is_small_request(nb))  /* Faster version for small requests */
212940c85557Swdenk  {
213040c85557Swdenk    idx = smallbin_index(nb);
213140c85557Swdenk
213240c85557Swdenk    /* No traversal or size check necessary for small bins.  */
213340c85557Swdenk
213440c85557Swdenk    q = bin_at(idx);
213540c85557Swdenk    victim = last(q);
213640c85557Swdenk
213740c85557Swdenk    /* Also scan the next one, since it would have a remainder < MINSIZE */
213840c85557Swdenk    if (victim == q)
213940c85557Swdenk    {
214040c85557Swdenk      q = next_bin(q);
214140c85557Swdenk      victim = last(q);
214240c85557Swdenk    }
214340c85557Swdenk    if (victim != q)
214440c85557Swdenk    {
214540c85557Swdenk      victim_size = chunksize(victim);
214640c85557Swdenk      unlink(victim, bck, fwd);
214740c85557Swdenk      set_inuse_bit_at_offset(victim, victim_size);
214840c85557Swdenk      check_malloced_chunk(victim, nb);
214940c85557Swdenk      return chunk2mem(victim);
215040c85557Swdenk    }
215140c85557Swdenk
215240c85557Swdenk    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
215340c85557Swdenk
215440c85557Swdenk  }
215540c85557Swdenk  else
215640c85557Swdenk  {
215740c85557Swdenk    idx = bin_index(nb);
215840c85557Swdenk    bin = bin_at(idx);
215940c85557Swdenk
216040c85557Swdenk    for (victim = last(bin); victim != bin; victim = victim->bk)
216140c85557Swdenk    {
216240c85557Swdenk      victim_size = chunksize(victim);
216340c85557Swdenk      remainder_size = victim_size - nb;
216440c85557Swdenk
216540c85557Swdenk      if (remainder_size >= (long)MINSIZE) /* too big */
216640c85557Swdenk      {
216740c85557Swdenk	--idx; /* adjust to rescan below after checking last remainder */
216840c85557Swdenk	break;
216940c85557Swdenk      }
217040c85557Swdenk
217140c85557Swdenk      else if (remainder_size >= 0) /* exact fit */
217240c85557Swdenk      {
217340c85557Swdenk	unlink(victim, bck, fwd);
217440c85557Swdenk	set_inuse_bit_at_offset(victim, victim_size);
217540c85557Swdenk	check_malloced_chunk(victim, nb);
217640c85557Swdenk	return chunk2mem(victim);
217740c85557Swdenk      }
217840c85557Swdenk    }
217940c85557Swdenk
218040c85557Swdenk    ++idx;
218140c85557Swdenk
218240c85557Swdenk  }
218340c85557Swdenk
218440c85557Swdenk  /* Try to use the last split-off remainder */
218540c85557Swdenk
218640c85557Swdenk  if ( (victim = last_remainder->fd) != last_remainder)
218740c85557Swdenk  {
218840c85557Swdenk    victim_size = chunksize(victim);
218940c85557Swdenk    remainder_size = victim_size - nb;
219040c85557Swdenk
219140c85557Swdenk    if (remainder_size >= (long)MINSIZE) /* re-split */
219240c85557Swdenk    {
219340c85557Swdenk      remainder = chunk_at_offset(victim, nb);
219440c85557Swdenk      set_head(victim, nb | PREV_INUSE);
219540c85557Swdenk      link_last_remainder(remainder);
219640c85557Swdenk      set_head(remainder, remainder_size | PREV_INUSE);
219740c85557Swdenk      set_foot(remainder, remainder_size);
219840c85557Swdenk      check_malloced_chunk(victim, nb);
219940c85557Swdenk      return chunk2mem(victim);
220040c85557Swdenk    }
220140c85557Swdenk
220240c85557Swdenk    clear_last_remainder;
220340c85557Swdenk
220440c85557Swdenk    if (remainder_size >= 0)  /* exhaust */
220540c85557Swdenk    {
220640c85557Swdenk      set_inuse_bit_at_offset(victim, victim_size);
220740c85557Swdenk      check_malloced_chunk(victim, nb);
220840c85557Swdenk      return chunk2mem(victim);
220940c85557Swdenk    }
221040c85557Swdenk
221140c85557Swdenk    /* Else place in bin */
221240c85557Swdenk
221340c85557Swdenk    frontlink(victim, victim_size, remainder_index, bck, fwd);
221440c85557Swdenk  }
221540c85557Swdenk
221640c85557Swdenk  /*
221740c85557Swdenk     If there are any possibly nonempty big-enough blocks,
221840c85557Swdenk     search for best fitting chunk by scanning bins in blockwidth units.
221940c85557Swdenk  */
222040c85557Swdenk
222140c85557Swdenk  if ( (block = idx2binblock(idx)) <= binblocks)
222240c85557Swdenk  {
222340c85557Swdenk
222440c85557Swdenk    /* Get to the first marked block */
222540c85557Swdenk
222640c85557Swdenk    if ( (block & binblocks) == 0)
222740c85557Swdenk    {
222840c85557Swdenk      /* force to an even block boundary */
222940c85557Swdenk      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
223040c85557Swdenk      block <<= 1;
223140c85557Swdenk      while ((block & binblocks) == 0)
223240c85557Swdenk      {
223340c85557Swdenk	idx += BINBLOCKWIDTH;
223440c85557Swdenk	block <<= 1;
223540c85557Swdenk      }
223640c85557Swdenk    }
223740c85557Swdenk
223840c85557Swdenk    /* For each possibly nonempty block ... */
223940c85557Swdenk    for (;;)
224040c85557Swdenk    {
224140c85557Swdenk      startidx = idx;          /* (track incomplete blocks) */
224240c85557Swdenk      q = bin = bin_at(idx);
224340c85557Swdenk
224440c85557Swdenk      /* For each bin in this block ... */
224540c85557Swdenk      do
224640c85557Swdenk      {
224740c85557Swdenk	/* Find and use first big enough chunk ... */
224840c85557Swdenk
224940c85557Swdenk	for (victim = last(bin); victim != bin; victim = victim->bk)
225040c85557Swdenk	{
225140c85557Swdenk	  victim_size = chunksize(victim);
225240c85557Swdenk	  remainder_size = victim_size - nb;
225340c85557Swdenk
225440c85557Swdenk	  if (remainder_size >= (long)MINSIZE) /* split */
225540c85557Swdenk	  {
225640c85557Swdenk	    remainder = chunk_at_offset(victim, nb);
225740c85557Swdenk	    set_head(victim, nb | PREV_INUSE);
225840c85557Swdenk	    unlink(victim, bck, fwd);
225940c85557Swdenk	    link_last_remainder(remainder);
226040c85557Swdenk	    set_head(remainder, remainder_size | PREV_INUSE);
226140c85557Swdenk	    set_foot(remainder, remainder_size);
226240c85557Swdenk	    check_malloced_chunk(victim, nb);
226340c85557Swdenk	    return chunk2mem(victim);
226440c85557Swdenk	  }
226540c85557Swdenk
226640c85557Swdenk	  else if (remainder_size >= 0)  /* take */
226740c85557Swdenk	  {
226840c85557Swdenk	    set_inuse_bit_at_offset(victim, victim_size);
226940c85557Swdenk	    unlink(victim, bck, fwd);
227040c85557Swdenk	    check_malloced_chunk(victim, nb);
227140c85557Swdenk	    return chunk2mem(victim);
227240c85557Swdenk	  }
227340c85557Swdenk
227440c85557Swdenk	}
227540c85557Swdenk
227640c85557Swdenk       bin = next_bin(bin);
227740c85557Swdenk
227840c85557Swdenk      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
227940c85557Swdenk
228040c85557Swdenk      /* Clear out the block bit. */
228140c85557Swdenk
228240c85557Swdenk      do   /* Possibly backtrack to try to clear a partial block */
228340c85557Swdenk      {
228440c85557Swdenk	if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
228540c85557Swdenk	{
228640c85557Swdenk	  binblocks &= ~block;
228740c85557Swdenk	  break;
228840c85557Swdenk	}
228940c85557Swdenk	--startidx;
229040c85557Swdenk       q = prev_bin(q);
229140c85557Swdenk      } while (first(q) == q);
229240c85557Swdenk
229340c85557Swdenk      /* Get to the next possibly nonempty block */
229440c85557Swdenk
229540c85557Swdenk      if ( (block <<= 1) <= binblocks && (block != 0) )
229640c85557Swdenk      {
229740c85557Swdenk	while ((block & binblocks) == 0)
229840c85557Swdenk	{
229940c85557Swdenk	  idx += BINBLOCKWIDTH;
230040c85557Swdenk	  block <<= 1;
230140c85557Swdenk	}
230240c85557Swdenk      }
230340c85557Swdenk      else
230440c85557Swdenk	break;
230540c85557Swdenk    }
230640c85557Swdenk  }
230740c85557Swdenk
230840c85557Swdenk
230940c85557Swdenk  /* Try to use top chunk */
231040c85557Swdenk
231140c85557Swdenk  /* Require that there be a remainder, ensuring top always exists  */
231240c85557Swdenk  if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
231340c85557Swdenk  {
231440c85557Swdenk
231540c85557Swdenk#if HAVE_MMAP
231640c85557Swdenk    /* If big and would otherwise need to extend, try to use mmap instead */
231740c85557Swdenk    if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
231840c85557Swdenk	(victim = mmap_chunk(nb)) != 0)
231940c85557Swdenk      return chunk2mem(victim);
232040c85557Swdenk#endif
232140c85557Swdenk
232240c85557Swdenk    /* Try to extend */
232340c85557Swdenk    malloc_extend_top(nb);
232440c85557Swdenk    if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
232540c85557Swdenk      return 0; /* propagate failure */
232640c85557Swdenk  }
232740c85557Swdenk
232840c85557Swdenk  victim = top;
232940c85557Swdenk  set_head(victim, nb | PREV_INUSE);
233040c85557Swdenk  top = chunk_at_offset(victim, nb);
233140c85557Swdenk  set_head(top, remainder_size | PREV_INUSE);
233240c85557Swdenk  check_malloced_chunk(victim, nb);
233340c85557Swdenk  return chunk2mem(victim);
233440c85557Swdenk
233540c85557Swdenk}
233640c85557Swdenk
233740c85557Swdenk
233840c85557Swdenk
233940c85557Swdenk
234040c85557Swdenk/*
234140c85557Swdenk
234240c85557Swdenk  free() algorithm :
234340c85557Swdenk
234440c85557Swdenk    cases:
234540c85557Swdenk
234640c85557Swdenk       1. free(0) has no effect.
234740c85557Swdenk
234840c85557Swdenk       2. If the chunk was allocated via mmap, it is release via munmap().
234940c85557Swdenk
235040c85557Swdenk       3. If a returned chunk borders the current high end of memory,
235140c85557Swdenk	  it is consolidated into the top, and if the total unused
235240c85557Swdenk	  topmost memory exceeds the trim threshold, malloc_trim is
235340c85557Swdenk	  called.
235440c85557Swdenk
235540c85557Swdenk       4. Other chunks are consolidated as they arrive, and
235640c85557Swdenk	  placed in corresponding bins. (This includes the case of
235740c85557Swdenk	  consolidating with the current `last_remainder').
235840c85557Swdenk
235940c85557Swdenk*/
236040c85557Swdenk
236140c85557Swdenk
236240c85557Swdenk#if __STD_C
236340c85557Swdenkvoid fREe(Void_t* mem)
236440c85557Swdenk#else
236540c85557Swdenkvoid fREe(mem) Void_t* mem;
236640c85557Swdenk#endif
236740c85557Swdenk{
236840c85557Swdenk  mchunkptr p;         /* chunk corresponding to mem */
236940c85557Swdenk  INTERNAL_SIZE_T hd;  /* its head field */
237040c85557Swdenk  INTERNAL_SIZE_T sz;  /* its size */
237140c85557Swdenk  int       idx;       /* its bin index */
237240c85557Swdenk  mchunkptr next;      /* next contiguous chunk */
237340c85557Swdenk  INTERNAL_SIZE_T nextsz; /* its size */
237440c85557Swdenk  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
237540c85557Swdenk  mchunkptr bck;       /* misc temp for linking */
237640c85557Swdenk  mchunkptr fwd;       /* misc temp for linking */
237740c85557Swdenk  int       islr;      /* track whether merging with last_remainder */
237840c85557Swdenk
237940c85557Swdenk  if (mem == 0)                              /* free(0) has no effect */
238040c85557Swdenk    return;
238140c85557Swdenk
238240c85557Swdenk  p = mem2chunk(mem);
238340c85557Swdenk  hd = p->size;
238440c85557Swdenk
238540c85557Swdenk#if HAVE_MMAP
238640c85557Swdenk  if (hd & IS_MMAPPED)                       /* release mmapped memory. */
238740c85557Swdenk  {
238840c85557Swdenk    munmap_chunk(p);
238940c85557Swdenk    return;
239040c85557Swdenk  }
239140c85557Swdenk#endif
239240c85557Swdenk
239340c85557Swdenk  check_inuse_chunk(p);
239440c85557Swdenk
239540c85557Swdenk  sz = hd & ~PREV_INUSE;
239640c85557Swdenk  next = chunk_at_offset(p, sz);
239740c85557Swdenk  nextsz = chunksize(next);
239840c85557Swdenk
239940c85557Swdenk  if (next == top)                            /* merge with top */
240040c85557Swdenk  {
240140c85557Swdenk    sz += nextsz;
240240c85557Swdenk
240340c85557Swdenk    if (!(hd & PREV_INUSE))                    /* consolidate backward */
240440c85557Swdenk    {
240540c85557Swdenk      prevsz = p->prev_size;
240640c85557Swdenk      p = chunk_at_offset(p, -((long) prevsz));
240740c85557Swdenk      sz += prevsz;
240840c85557Swdenk      unlink(p, bck, fwd);
240940c85557Swdenk    }
241040c85557Swdenk
241140c85557Swdenk    set_head(p, sz | PREV_INUSE);
241240c85557Swdenk    top = p;
241340c85557Swdenk    if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
241440c85557Swdenk      malloc_trim(top_pad);
241540c85557Swdenk    return;
241640c85557Swdenk  }
241740c85557Swdenk
241840c85557Swdenk  set_head(next, nextsz);                    /* clear inuse bit */
241940c85557Swdenk
242040c85557Swdenk  islr = 0;
242140c85557Swdenk
242240c85557Swdenk  if (!(hd & PREV_INUSE))                    /* consolidate backward */
242340c85557Swdenk  {
242440c85557Swdenk    prevsz = p->prev_size;
242540c85557Swdenk    p = chunk_at_offset(p, -((long) prevsz));
242640c85557Swdenk    sz += prevsz;
242740c85557Swdenk
242840c85557Swdenk    if (p->fd == last_remainder)             /* keep as last_remainder */
242940c85557Swdenk      islr = 1;
243040c85557Swdenk    else
243140c85557Swdenk      unlink(p, bck, fwd);
243240c85557Swdenk  }
243340c85557Swdenk
243440c85557Swdenk  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
243540c85557Swdenk  {
243640c85557Swdenk    sz += nextsz;
243740c85557Swdenk
243840c85557Swdenk    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
243940c85557Swdenk    {
244040c85557Swdenk      islr = 1;
244140c85557Swdenk      link_last_remainder(p);
244240c85557Swdenk    }
244340c85557Swdenk    else
244440c85557Swdenk      unlink(next, bck, fwd);
244540c85557Swdenk  }
244640c85557Swdenk
244740c85557Swdenk
244840c85557Swdenk  set_head(p, sz | PREV_INUSE);
244940c85557Swdenk  set_foot(p, sz);
245040c85557Swdenk  if (!islr)
245140c85557Swdenk    frontlink(p, sz, idx, bck, fwd);
245240c85557Swdenk}
245340c85557Swdenk
245440c85557Swdenk
245540c85557Swdenk
245640c85557Swdenk
245740c85557Swdenk
245840c85557Swdenk/*
245940c85557Swdenk
246040c85557Swdenk  Realloc algorithm:
246140c85557Swdenk
246240c85557Swdenk    Chunks that were obtained via mmap cannot be extended or shrunk
246340c85557Swdenk    unless HAVE_MREMAP is defined, in which case mremap is used.
246440c85557Swdenk    Otherwise, if their reallocation is for additional space, they are
246540c85557Swdenk    copied.  If for less, they are just left alone.
246640c85557Swdenk
246740c85557Swdenk    Otherwise, if the reallocation is for additional space, and the
246840c85557Swdenk    chunk can be extended, it is, else a malloc-copy-free sequence is
246940c85557Swdenk    taken.  There are several different ways that a chunk could be
247040c85557Swdenk    extended. All are tried:
247140c85557Swdenk
247240c85557Swdenk       * Extending forward into following adjacent free chunk.
247340c85557Swdenk       * Shifting backwards, joining preceding adjacent space
247440c85557Swdenk       * Both shifting backwards and extending forward.
247540c85557Swdenk       * Extending into newly sbrked space
247640c85557Swdenk
247740c85557Swdenk    Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
247840c85557Swdenk    size argument of zero (re)allocates a minimum-sized chunk.
247940c85557Swdenk
248040c85557Swdenk    If the reallocation is for less space, and the new request is for
248140c85557Swdenk    a `small' (<512 bytes) size, then the newly unused space is lopped
248240c85557Swdenk    off and freed.
248340c85557Swdenk
248440c85557Swdenk    The old unix realloc convention of allowing the last-free'd chunk
248540c85557Swdenk    to be used as an argument to realloc is no longer supported.
248640c85557Swdenk    I don't know of any programs still relying on this feature,
248740c85557Swdenk    and allowing it would also allow too many other incorrect
248840c85557Swdenk    usages of realloc to be sensible.
248940c85557Swdenk
249040c85557Swdenk
249140c85557Swdenk*/
249240c85557Swdenk
249340c85557Swdenk
249440c85557Swdenk#if __STD_C
249540c85557SwdenkVoid_t* rEALLOc(Void_t* oldmem, size_t bytes)
249640c85557Swdenk#else
249740c85557SwdenkVoid_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
249840c85557Swdenk#endif
249940c85557Swdenk{
250040c85557Swdenk  INTERNAL_SIZE_T    nb;      /* padded request size */
250140c85557Swdenk
250240c85557Swdenk  mchunkptr oldp;             /* chunk corresponding to oldmem */
250340c85557Swdenk  INTERNAL_SIZE_T    oldsize; /* its size */
250440c85557Swdenk
250540c85557Swdenk  mchunkptr newp;             /* chunk to return */
250640c85557Swdenk  INTERNAL_SIZE_T    newsize; /* its size */
250740c85557Swdenk  Void_t*   newmem;           /* corresponding user mem */
250840c85557Swdenk
250940c85557Swdenk  mchunkptr next;             /* next contiguous chunk after oldp */
251040c85557Swdenk  INTERNAL_SIZE_T  nextsize;  /* its size */
251140c85557Swdenk
251240c85557Swdenk  mchunkptr prev;             /* previous contiguous chunk before oldp */
251340c85557Swdenk  INTERNAL_SIZE_T  prevsize;  /* its size */
251440c85557Swdenk
251540c85557Swdenk  mchunkptr remainder;        /* holds split off extra space from newp */
251640c85557Swdenk  INTERNAL_SIZE_T  remainder_size;   /* its size */
251740c85557Swdenk
251840c85557Swdenk  mchunkptr bck;              /* misc temp for linking */
251940c85557Swdenk  mchunkptr fwd;              /* misc temp for linking */
252040c85557Swdenk
252140c85557Swdenk#ifdef REALLOC_ZERO_BYTES_FREES
252240c85557Swdenk  if (bytes == 0) { fREe(oldmem); return 0; }
252340c85557Swdenk#endif
252440c85557Swdenk
252540c85557Swdenk  if ((long)bytes < 0) return 0;
252640c85557Swdenk
252740c85557Swdenk  /* realloc of null is supposed to be same as malloc */
252840c85557Swdenk  if (oldmem == 0) return mALLOc(bytes);
252940c85557Swdenk
253040c85557Swdenk  newp    = oldp    = mem2chunk(oldmem);
253140c85557Swdenk  newsize = oldsize = chunksize(oldp);
253240c85557Swdenk
253340c85557Swdenk
253440c85557Swdenk  nb = request2size(bytes);
253540c85557Swdenk
253640c85557Swdenk#if HAVE_MMAP
253740c85557Swdenk  if (chunk_is_mmapped(oldp))
253840c85557Swdenk  {
253940c85557Swdenk#if HAVE_MREMAP
254040c85557Swdenk    newp = mremap_chunk(oldp, nb);
254140c85557Swdenk    if(newp) return chunk2mem(newp);
254240c85557Swdenk#endif
254340c85557Swdenk    /* Note the extra SIZE_SZ overhead. */
254440c85557Swdenk    if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
254540c85557Swdenk    /* Must alloc, copy, free. */
254640c85557Swdenk    newmem = mALLOc(bytes);
254740c85557Swdenk    if (newmem == 0) return 0; /* propagate failure */
254840c85557Swdenk    MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
254940c85557Swdenk    munmap_chunk(oldp);
255040c85557Swdenk    return newmem;
255140c85557Swdenk  }
255240c85557Swdenk#endif
255340c85557Swdenk
255440c85557Swdenk  check_inuse_chunk(oldp);
255540c85557Swdenk
255640c85557Swdenk  if ((long)(oldsize) < (long)(nb))
255740c85557Swdenk  {
255840c85557Swdenk
255940c85557Swdenk    /* Try expanding forward */
256040c85557Swdenk
256140c85557Swdenk    next = chunk_at_offset(oldp, oldsize);
256240c85557Swdenk    if (next == top || !inuse(next))
256340c85557Swdenk    {
256440c85557Swdenk      nextsize = chunksize(next);
256540c85557Swdenk
256640c85557Swdenk      /* Forward into top only if a remainder */
256740c85557Swdenk      if (next == top)
256840c85557Swdenk      {
256940c85557Swdenk	if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
257040c85557Swdenk	{
257140c85557Swdenk	  newsize += nextsize;
257240c85557Swdenk	  top = chunk_at_offset(oldp, nb);
257340c85557Swdenk	  set_head(top, (newsize - nb) | PREV_INUSE);
257440c85557Swdenk	  set_head_size(oldp, nb);
257540c85557Swdenk	  return chunk2mem(oldp);
257640c85557Swdenk	}
257740c85557Swdenk      }
257840c85557Swdenk
257940c85557Swdenk      /* Forward into next chunk */
258040c85557Swdenk      else if (((long)(nextsize + newsize) >= (long)(nb)))
258140c85557Swdenk      {
258240c85557Swdenk	unlink(next, bck, fwd);
258340c85557Swdenk	newsize  += nextsize;
258440c85557Swdenk	goto split;
258540c85557Swdenk      }
258640c85557Swdenk    }
258740c85557Swdenk    else
258840c85557Swdenk    {
258940c85557Swdenk      next = 0;
259040c85557Swdenk      nextsize = 0;
259140c85557Swdenk    }
259240c85557Swdenk
259340c85557Swdenk    /* Try shifting backwards. */
259440c85557Swdenk
259540c85557Swdenk    if (!prev_inuse(oldp))
259640c85557Swdenk    {
259740c85557Swdenk      prev = prev_chunk(oldp);
259840c85557Swdenk      prevsize = chunksize(prev);
259940c85557Swdenk
260040c85557Swdenk      /* try forward + backward first to save a later consolidation */
260140c85557Swdenk
260240c85557Swdenk      if (next != 0)
260340c85557Swdenk      {
260440c85557Swdenk	/* into top */
260540c85557Swdenk	if (next == top)
260640c85557Swdenk	{
260740c85557Swdenk	  if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
260840c85557Swdenk	  {
260940c85557Swdenk	    unlink(prev, bck, fwd);
261040c85557Swdenk	    newp = prev;
261140c85557Swdenk	    newsize += prevsize + nextsize;
261240c85557Swdenk	    newmem = chunk2mem(newp);
261340c85557Swdenk	    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
261440c85557Swdenk	    top = chunk_at_offset(newp, nb);
261540c85557Swdenk	    set_head(top, (newsize - nb) | PREV_INUSE);
261640c85557Swdenk	    set_head_size(newp, nb);
261740c85557Swdenk	    return newmem;
261840c85557Swdenk	  }
261940c85557Swdenk	}
262040c85557Swdenk
262140c85557Swdenk	/* into next chunk */
262240c85557Swdenk	else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
262340c85557Swdenk	{
262440c85557Swdenk	  unlink(next, bck, fwd);
262540c85557Swdenk	  unlink(prev, bck, fwd);
262640c85557Swdenk	  newp = prev;
262740c85557Swdenk	  newsize += nextsize + prevsize;
262840c85557Swdenk	  newmem = chunk2mem(newp);
262940c85557Swdenk	  MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
263040c85557Swdenk	  goto split;
263140c85557Swdenk	}
263240c85557Swdenk      }
263340c85557Swdenk
263440c85557Swdenk      /* backward only */
263540c85557Swdenk      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
263640c85557Swdenk      {
263740c85557Swdenk	unlink(prev, bck, fwd);
263840c85557Swdenk	newp = prev;
263940c85557Swdenk	newsize += prevsize;
264040c85557Swdenk	newmem = chunk2mem(newp);
264140c85557Swdenk	MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
264240c85557Swdenk	goto split;
264340c85557Swdenk      }
264440c85557Swdenk    }
264540c85557Swdenk
264640c85557Swdenk    /* Must allocate */
264740c85557Swdenk
264840c85557Swdenk    newmem = mALLOc (bytes);
264940c85557Swdenk
265040c85557Swdenk    if (newmem == 0)  /* propagate failure */
265140c85557Swdenk      return 0;
265240c85557Swdenk
265340c85557Swdenk    /* Avoid copy if newp is next chunk after oldp. */
265440c85557Swdenk    /* (This can only happen when new chunk is sbrk'ed.) */
265540c85557Swdenk
265640c85557Swdenk    if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
265740c85557Swdenk    {
265840c85557Swdenk      newsize += chunksize(newp);
265940c85557Swdenk      newp = oldp;
266040c85557Swdenk      goto split;
266140c85557Swdenk    }
266240c85557Swdenk
266340c85557Swdenk    /* Otherwise copy, free, and exit */
266440c85557Swdenk    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
266540c85557Swdenk    fREe(oldmem);
266640c85557Swdenk    return newmem;
266740c85557Swdenk  }
266840c85557Swdenk
266940c85557Swdenk
267040c85557Swdenk split:  /* split off extra room in old or expanded chunk */
267140c85557Swdenk
267240c85557Swdenk  if (newsize - nb >= MINSIZE) /* split off remainder */
267340c85557Swdenk  {
267440c85557Swdenk    remainder = chunk_at_offset(newp, nb);
267540c85557Swdenk    remainder_size = newsize - nb;
267640c85557Swdenk    set_head_size(newp, nb);
267740c85557Swdenk    set_head(remainder, remainder_size | PREV_INUSE);
267840c85557Swdenk    set_inuse_bit_at_offset(remainder, remainder_size);
267940c85557Swdenk    fREe(chunk2mem(remainder)); /* let free() deal with it */
268040c85557Swdenk  }
268140c85557Swdenk  else
268240c85557Swdenk  {
268340c85557Swdenk    set_head_size(newp, newsize);
268440c85557Swdenk    set_inuse_bit_at_offset(newp, newsize);
268540c85557Swdenk  }
268640c85557Swdenk
268740c85557Swdenk  check_inuse_chunk(newp);
268840c85557Swdenk  return chunk2mem(newp);
268940c85557Swdenk}
269040c85557Swdenk
269140c85557Swdenk
269240c85557Swdenk
269340c85557Swdenk
269440c85557Swdenk/*
269540c85557Swdenk
269640c85557Swdenk  memalign algorithm:
269740c85557Swdenk
269840c85557Swdenk    memalign requests more than enough space from malloc, finds a spot
269940c85557Swdenk    within that chunk that meets the alignment request, and then
270040c85557Swdenk    possibly frees the leading and trailing space.
270140c85557Swdenk
270240c85557Swdenk    The alignment argument must be a power of two. This property is not
270340c85557Swdenk    checked by memalign, so misuse may result in random runtime errors.
270440c85557Swdenk
270540c85557Swdenk    8-byte alignment is guaranteed by normal malloc calls, so don't
270640c85557Swdenk    bother calling memalign with an argument of 8 or less.
270740c85557Swdenk
270840c85557Swdenk    Overreliance on memalign is a sure way to fragment space.
270940c85557Swdenk
271040c85557Swdenk*/
271140c85557Swdenk
271240c85557Swdenk
271340c85557Swdenk#if __STD_C
271440c85557SwdenkVoid_t* mEMALIGn(size_t alignment, size_t bytes)
271540c85557Swdenk#else
271640c85557SwdenkVoid_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
271740c85557Swdenk#endif
271840c85557Swdenk{
271940c85557Swdenk  INTERNAL_SIZE_T    nb;      /* padded  request size */
272040c85557Swdenk  char*     m;                /* memory returned by malloc call */
272140c85557Swdenk  mchunkptr p;                /* corresponding chunk */
272240c85557Swdenk  char*     brk;              /* alignment point within p */
272340c85557Swdenk  mchunkptr newp;             /* chunk to return */
272440c85557Swdenk  INTERNAL_SIZE_T  newsize;   /* its size */
272540c85557Swdenk  INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
272640c85557Swdenk  mchunkptr remainder;        /* spare room at end to split off */
272740c85557Swdenk  long      remainder_size;   /* its size */
272840c85557Swdenk
272940c85557Swdenk  if ((long)bytes < 0) return 0;
273040c85557Swdenk
273140c85557Swdenk  /* If need less alignment than we give anyway, just relay to malloc */
273240c85557Swdenk
273340c85557Swdenk  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
273440c85557Swdenk
273540c85557Swdenk  /* Otherwise, ensure that it is at least a minimum chunk size */
273640c85557Swdenk
273740c85557Swdenk  if (alignment <  MINSIZE) alignment = MINSIZE;
273840c85557Swdenk
273940c85557Swdenk  /* Call malloc with worst case padding to hit alignment. */
274040c85557Swdenk
274140c85557Swdenk  nb = request2size(bytes);
274240c85557Swdenk  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
274340c85557Swdenk
274440c85557Swdenk  if (m == 0) return 0; /* propagate failure */
274540c85557Swdenk
274640c85557Swdenk  p = mem2chunk(m);
274740c85557Swdenk
274840c85557Swdenk  if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
274940c85557Swdenk  {
275040c85557Swdenk#if HAVE_MMAP
275140c85557Swdenk    if(chunk_is_mmapped(p))
275240c85557Swdenk      return chunk2mem(p); /* nothing more to do */
275340c85557Swdenk#endif
275440c85557Swdenk  }
275540c85557Swdenk  else /* misaligned */
275640c85557Swdenk  {
275740c85557Swdenk    /*
275840c85557Swdenk      Find an aligned spot inside chunk.
275940c85557Swdenk      Since we need to give back leading space in a chunk of at
276040c85557Swdenk      least MINSIZE, if the first calculation places us at
276140c85557Swdenk      a spot with less than MINSIZE leader, we can move to the
276240c85557Swdenk      next aligned spot -- we've allocated enough total room so that
276340c85557Swdenk      this is always possible.
276440c85557Swdenk    */
276540c85557Swdenk
276640c85557Swdenk    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
276740c85557Swdenk    if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
276840c85557Swdenk
276940c85557Swdenk    newp = (mchunkptr)brk;
277040c85557Swdenk    leadsize = brk - (char*)(p);
277140c85557Swdenk    newsize = chunksize(p) - leadsize;
277240c85557Swdenk
277340c85557Swdenk#if HAVE_MMAP
277440c85557Swdenk    if(chunk_is_mmapped(p))
277540c85557Swdenk    {
277640c85557Swdenk      newp->prev_size = p->prev_size + leadsize;
277740c85557Swdenk      set_head(newp, newsize|IS_MMAPPED);
277840c85557Swdenk      return chunk2mem(newp);
277940c85557Swdenk    }
278040c85557Swdenk#endif
278140c85557Swdenk
278240c85557Swdenk    /* give back leader, use the rest */
278340c85557Swdenk
278440c85557Swdenk    set_head(newp, newsize | PREV_INUSE);
278540c85557Swdenk    set_inuse_bit_at_offset(newp, newsize);
278640c85557Swdenk    set_head_size(p, leadsize);
278740c85557Swdenk    fREe(chunk2mem(p));
278840c85557Swdenk    p = newp;
278940c85557Swdenk
279040c85557Swdenk    assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
279140c85557Swdenk  }
279240c85557Swdenk
279340c85557Swdenk  /* Also give back spare room at the end */
279440c85557Swdenk
279540c85557Swdenk  remainder_size = chunksize(p) - nb;
279640c85557Swdenk
279740c85557Swdenk  if (remainder_size >= (long)MINSIZE)
279840c85557Swdenk  {
279940c85557Swdenk    remainder = chunk_at_offset(p, nb);
280040c85557Swdenk    set_head(remainder, remainder_size | PREV_INUSE);
280140c85557Swdenk    set_head_size(p, nb);
280240c85557Swdenk    fREe(chunk2mem(remainder));
280340c85557Swdenk  }
280440c85557Swdenk
280540c85557Swdenk  check_inuse_chunk(p);
280640c85557Swdenk  return chunk2mem(p);
280740c85557Swdenk
280840c85557Swdenk}
280940c85557Swdenk
281040c85557Swdenk
281140c85557Swdenk
281240c85557Swdenk
281340c85557Swdenk/*
281440c85557Swdenk    valloc just invokes memalign with alignment argument equal
281540c85557Swdenk    to the page size of the system (or as near to this as can
281640c85557Swdenk    be figured out from all the includes/defines above.)
281740c85557Swdenk*/
281840c85557Swdenk
281940c85557Swdenk#if __STD_C
282040c85557SwdenkVoid_t* vALLOc(size_t bytes)
282140c85557Swdenk#else
282240c85557SwdenkVoid_t* vALLOc(bytes) size_t bytes;
282340c85557Swdenk#endif
282440c85557Swdenk{
282540c85557Swdenk  return mEMALIGn (malloc_getpagesize, bytes);
282640c85557Swdenk}
282740c85557Swdenk
282840c85557Swdenk/*
282940c85557Swdenk  pvalloc just invokes valloc for the nearest pagesize
283040c85557Swdenk  that will accommodate request
283140c85557Swdenk*/
283240c85557Swdenk
283340c85557Swdenk
283440c85557Swdenk#if __STD_C
283540c85557SwdenkVoid_t* pvALLOc(size_t bytes)
283640c85557Swdenk#else
283740c85557SwdenkVoid_t* pvALLOc(bytes) size_t bytes;
283840c85557Swdenk#endif
283940c85557Swdenk{
284040c85557Swdenk  size_t pagesize = malloc_getpagesize;
284140c85557Swdenk  return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
284240c85557Swdenk}
284340c85557Swdenk
284440c85557Swdenk/*
284540c85557Swdenk
284640c85557Swdenk  calloc calls malloc, then zeroes out the allocated chunk.
284740c85557Swdenk
284840c85557Swdenk*/
284940c85557Swdenk
285040c85557Swdenk#if __STD_C
285140c85557SwdenkVoid_t* cALLOc(size_t n, size_t elem_size)
285240c85557Swdenk#else
285340c85557SwdenkVoid_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
285440c85557Swdenk#endif
285540c85557Swdenk{
285640c85557Swdenk  mchunkptr p;
285740c85557Swdenk  INTERNAL_SIZE_T csz;
285840c85557Swdenk
285940c85557Swdenk  INTERNAL_SIZE_T sz = n * elem_size;
286040c85557Swdenk
286140c85557Swdenk
286240c85557Swdenk  /* check if expand_top called, in which case don't need to clear */
286340c85557Swdenk#if MORECORE_CLEARS
286440c85557Swdenk  mchunkptr oldtop = top;
286540c85557Swdenk  INTERNAL_SIZE_T oldtopsize = chunksize(top);
286640c85557Swdenk#endif
286740c85557Swdenk  Void_t* mem = mALLOc (sz);
286840c85557Swdenk
286940c85557Swdenk  if ((long)n < 0) return 0;
287040c85557Swdenk
287140c85557Swdenk  if (mem == 0)
287240c85557Swdenk    return 0;
287340c85557Swdenk  else
287440c85557Swdenk  {
287540c85557Swdenk    p = mem2chunk(mem);
287640c85557Swdenk
287740c85557Swdenk    /* Two optional cases in which clearing not necessary */
287840c85557Swdenk
287940c85557Swdenk
288040c85557Swdenk#if HAVE_MMAP
288140c85557Swdenk    if (chunk_is_mmapped(p)) return mem;
288240c85557Swdenk#endif
288340c85557Swdenk
288440c85557Swdenk    csz = chunksize(p);
288540c85557Swdenk
288640c85557Swdenk#if MORECORE_CLEARS
288740c85557Swdenk    if (p == oldtop && csz > oldtopsize)
288840c85557Swdenk    {
288940c85557Swdenk      /* clear only the bytes from non-freshly-sbrked memory */
289040c85557Swdenk      csz = oldtopsize;
289140c85557Swdenk    }
289240c85557Swdenk#endif
289340c85557Swdenk
289440c85557Swdenk    MALLOC_ZERO(mem, csz - SIZE_SZ);
289540c85557Swdenk    return mem;
289640c85557Swdenk  }
289740c85557Swdenk}
289840c85557Swdenk
289940c85557Swdenk/*
290040c85557Swdenk
290140c85557Swdenk  cfree just calls free. It is needed/defined on some systems
290240c85557Swdenk  that pair it with calloc, presumably for odd historical reasons.
290340c85557Swdenk
290440c85557Swdenk*/
290540c85557Swdenk
290640c85557Swdenk#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
290740c85557Swdenk#if __STD_C
290840c85557Swdenkvoid cfree(Void_t *mem)
290940c85557Swdenk#else
291040c85557Swdenkvoid cfree(mem) Void_t *mem;
291140c85557Swdenk#endif
291240c85557Swdenk{
291340c85557Swdenk  fREe(mem);
291440c85557Swdenk}
291540c85557Swdenk#endif
291640c85557Swdenk
291740c85557Swdenk
291840c85557Swdenk
291940c85557Swdenk/*
292040c85557Swdenk
292140c85557Swdenk    Malloc_trim gives memory back to the system (via negative
292240c85557Swdenk    arguments to sbrk) if there is unused memory at the `high' end of
292340c85557Swdenk    the malloc pool. You can call this after freeing large blocks of
292440c85557Swdenk    memory to potentially reduce the system-level memory requirements
292540c85557Swdenk    of a program. However, it cannot guarantee to reduce memory. Under
292640c85557Swdenk    some allocation patterns, some large free blocks of memory will be
292740c85557Swdenk    locked between two used chunks, so they cannot be given back to
292840c85557Swdenk    the system.
292940c85557Swdenk
293040c85557Swdenk    The `pad' argument to malloc_trim represents the amount of free
293140c85557Swdenk    trailing space to leave untrimmed. If this argument is zero,
293240c85557Swdenk    only the minimum amount of memory to maintain internal data
293340c85557Swdenk    structures will be left (one page or less). Non-zero arguments
293440c85557Swdenk    can be supplied to maintain enough trailing space to service
293540c85557Swdenk    future expected allocations without having to re-obtain memory
293640c85557Swdenk    from the system.
293740c85557Swdenk
293840c85557Swdenk    Malloc_trim returns 1 if it actually released any memory, else 0.
293940c85557Swdenk
294040c85557Swdenk*/
294140c85557Swdenk
294240c85557Swdenk#if __STD_C
294340c85557Swdenkint malloc_trim(size_t pad)
294440c85557Swdenk#else
294540c85557Swdenkint malloc_trim(pad) size_t pad;
294640c85557Swdenk#endif
294740c85557Swdenk{
294840c85557Swdenk  long  top_size;        /* Amount of top-most memory */
294940c85557Swdenk  long  extra;           /* Amount to release */
295040c85557Swdenk  char* current_brk;     /* address returned by pre-check sbrk call */
295140c85557Swdenk  char* new_brk;         /* address returned by negative sbrk call */
295240c85557Swdenk
295340c85557Swdenk  unsigned long pagesz = malloc_getpagesize;
295440c85557Swdenk
295540c85557Swdenk  top_size = chunksize(top);
295640c85557Swdenk  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
295740c85557Swdenk
295840c85557Swdenk  if (extra < (long)pagesz)  /* Not enough memory to release */
295940c85557Swdenk    return 0;
296040c85557Swdenk
296140c85557Swdenk  else
296240c85557Swdenk  {
296340c85557Swdenk    /* Test to make sure no one else called sbrk */
296440c85557Swdenk    current_brk = (char*)(MORECORE (0));
296540c85557Swdenk    if (current_brk != (char*)(top) + top_size)
296640c85557Swdenk      return 0;     /* Apparently we don't own memory; must fail */
296740c85557Swdenk
296840c85557Swdenk    else
296940c85557Swdenk    {
297040c85557Swdenk      new_brk = (char*)(MORECORE (-extra));
297140c85557Swdenk
297240c85557Swdenk      if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
297340c85557Swdenk      {
297440c85557Swdenk	/* Try to figure out what we have */
297540c85557Swdenk	current_brk = (char*)(MORECORE (0));
297640c85557Swdenk	top_size = current_brk - (char*)top;
297740c85557Swdenk	if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
297840c85557Swdenk	{
297940c85557Swdenk	  sbrked_mem = current_brk - sbrk_base;
298040c85557Swdenk	  set_head(top, top_size | PREV_INUSE);
298140c85557Swdenk	}
298240c85557Swdenk	check_chunk(top);
298340c85557Swdenk	return 0;
298440c85557Swdenk      }
298540c85557Swdenk
298640c85557Swdenk      else
298740c85557Swdenk      {
298840c85557Swdenk	/* Success. Adjust top accordingly. */
298940c85557Swdenk	set_head(top, (top_size - extra) | PREV_INUSE);
299040c85557Swdenk	sbrked_mem -= extra;
299140c85557Swdenk	check_chunk(top);
299240c85557Swdenk	return 1;
299340c85557Swdenk      }
299440c85557Swdenk    }
299540c85557Swdenk  }
299640c85557Swdenk}
299740c85557Swdenk
299840c85557Swdenk
299940c85557Swdenk
300040c85557Swdenk/*
300140c85557Swdenk  malloc_usable_size:
300240c85557Swdenk
300340c85557Swdenk    This routine tells you how many bytes you can actually use in an
300440c85557Swdenk    allocated chunk, which may be more than you requested (although
300540c85557Swdenk    often not). You can use this many bytes without worrying about
300640c85557Swdenk    overwriting other allocated objects. Not a particularly great
300740c85557Swdenk    programming practice, but still sometimes useful.
300840c85557Swdenk
300940c85557Swdenk*/
301040c85557Swdenk
301140c85557Swdenk#if __STD_C
301240c85557Swdenksize_t malloc_usable_size(Void_t* mem)
301340c85557Swdenk#else
301440c85557Swdenksize_t malloc_usable_size(mem) Void_t* mem;
301540c85557Swdenk#endif
301640c85557Swdenk{
301740c85557Swdenk  mchunkptr p;
301840c85557Swdenk  if (mem == 0)
301940c85557Swdenk    return 0;
302040c85557Swdenk  else
302140c85557Swdenk  {
302240c85557Swdenk    p = mem2chunk(mem);
302340c85557Swdenk    if(!chunk_is_mmapped(p))
302440c85557Swdenk    {
302540c85557Swdenk      if (!inuse(p)) return 0;
302640c85557Swdenk      check_inuse_chunk(p);
302740c85557Swdenk      return chunksize(p) - SIZE_SZ;
302840c85557Swdenk    }
302940c85557Swdenk    return chunksize(p) - 2*SIZE_SZ;
303040c85557Swdenk  }
303140c85557Swdenk}
303240c85557Swdenk
303340c85557Swdenk
303440c85557Swdenk
303540c85557Swdenk
303640c85557Swdenk/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
303740c85557Swdenk
303840c85557Swdenkstatic void malloc_update_mallinfo()
303940c85557Swdenk{
304040c85557Swdenk  int i;
304140c85557Swdenk  mbinptr b;
304240c85557Swdenk  mchunkptr p;
304340c85557Swdenk#if DEBUG
304440c85557Swdenk  mchunkptr q;
304540c85557Swdenk#endif
304640c85557Swdenk
304740c85557Swdenk  INTERNAL_SIZE_T avail = chunksize(top);
304840c85557Swdenk  int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
304940c85557Swdenk
305040c85557Swdenk  for (i = 1; i < NAV; ++i)
305140c85557Swdenk  {
305240c85557Swdenk    b = bin_at(i);
305340c85557Swdenk    for (p = last(b); p != b; p = p->bk)
305440c85557Swdenk    {
305540c85557Swdenk#if DEBUG
305640c85557Swdenk      check_free_chunk(p);
305740c85557Swdenk      for (q = next_chunk(p);
305840c85557Swdenk	   q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
305940c85557Swdenk	   q = next_chunk(q))
306040c85557Swdenk	check_inuse_chunk(q);
306140c85557Swdenk#endif
306240c85557Swdenk      avail += chunksize(p);
306340c85557Swdenk      navail++;
306440c85557Swdenk    }
306540c85557Swdenk  }
306640c85557Swdenk
306740c85557Swdenk  current_mallinfo.ordblks = navail;
306840c85557Swdenk  current_mallinfo.uordblks = sbrked_mem - avail;
306940c85557Swdenk  current_mallinfo.fordblks = avail;
307040c85557Swdenk  current_mallinfo.hblks = n_mmaps;
307140c85557Swdenk  current_mallinfo.hblkhd = mmapped_mem;
307240c85557Swdenk  current_mallinfo.keepcost = chunksize(top);
307340c85557Swdenk
307440c85557Swdenk}
307540c85557Swdenk
307640c85557Swdenk
307740c85557Swdenk
307840c85557Swdenk/*
307940c85557Swdenk
308040c85557Swdenk  malloc_stats:
308140c85557Swdenk
308240c85557Swdenk    Prints on stderr the amount of space obtain from the system (both
308340c85557Swdenk    via sbrk and mmap), the maximum amount (which may be more than
308440c85557Swdenk    current if malloc_trim and/or munmap got called), the maximum
308540c85557Swdenk    number of simultaneous mmap regions used, and the current number
308640c85557Swdenk    of bytes allocated via malloc (or realloc, etc) but not yet
308740c85557Swdenk    freed. (Note that this is the number of bytes allocated, not the
308840c85557Swdenk    number requested. It will be larger than the number requested
308940c85557Swdenk    because of alignment and bookkeeping overhead.)
309040c85557Swdenk
309140c85557Swdenk*/
309240c85557Swdenk
309340c85557Swdenkvoid malloc_stats()
309440c85557Swdenk{
309540c85557Swdenk  malloc_update_mallinfo();
309640c85557Swdenk  fprintf(stderr, "max system bytes = %10u\n",
309740c85557Swdenk	  (unsigned int)(max_total_mem));
309840c85557Swdenk  fprintf(stderr, "system bytes     = %10u\n",
309940c85557Swdenk	  (unsigned int)(sbrked_mem + mmapped_mem));
310040c85557Swdenk  fprintf(stderr, "in use bytes     = %10u\n",
310140c85557Swdenk	  (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
310240c85557Swdenk#if HAVE_MMAP
310340c85557Swdenk  fprintf(stderr, "max mmap regions = %10u\n",
310440c85557Swdenk	  (unsigned int)max_n_mmaps);
310540c85557Swdenk#endif
310640c85557Swdenk}
310740c85557Swdenk
310840c85557Swdenk/*
310940c85557Swdenk  mallinfo returns a copy of updated current mallinfo.
311040c85557Swdenk*/
311140c85557Swdenk
311240c85557Swdenkstruct mallinfo mALLINFo()
311340c85557Swdenk{
311440c85557Swdenk  malloc_update_mallinfo();
311540c85557Swdenk  return current_mallinfo;
311640c85557Swdenk}
311740c85557Swdenk
311840c85557Swdenk
311940c85557Swdenk
312040c85557Swdenk
312140c85557Swdenk/*
312240c85557Swdenk  mallopt:
312340c85557Swdenk
312440c85557Swdenk    mallopt is the general SVID/XPG interface to tunable parameters.
312540c85557Swdenk    The format is to provide a (parameter-number, parameter-value) pair.
312640c85557Swdenk    mallopt then sets the corresponding parameter to the argument
312740c85557Swdenk    value if it can (i.e., so long as the value is meaningful),
312840c85557Swdenk    and returns 1 if successful else 0.
312940c85557Swdenk
313040c85557Swdenk    See descriptions of tunable parameters above.
313140c85557Swdenk
313240c85557Swdenk*/
313340c85557Swdenk
313440c85557Swdenk#if __STD_C
313540c85557Swdenkint mALLOPt(int param_number, int value)
313640c85557Swdenk#else
313740c85557Swdenkint mALLOPt(param_number, value) int param_number; int value;
313840c85557Swdenk#endif
313940c85557Swdenk{
314040c85557Swdenk  switch(param_number)
314140c85557Swdenk  {
314240c85557Swdenk    case M_TRIM_THRESHOLD:
314340c85557Swdenk      trim_threshold = value; return 1;
314440c85557Swdenk    case M_TOP_PAD:
314540c85557Swdenk      top_pad = value; return 1;
314640c85557Swdenk    case M_MMAP_THRESHOLD:
314740c85557Swdenk      mmap_threshold = value; return 1;
314840c85557Swdenk    case M_MMAP_MAX:
314940c85557Swdenk#if HAVE_MMAP
315040c85557Swdenk      n_mmaps_max = value; return 1;
315140c85557Swdenk#else
315240c85557Swdenk      if (value != 0) return 0; else  n_mmaps_max = value; return 1;
315340c85557Swdenk#endif
315440c85557Swdenk
315540c85557Swdenk    default:
315640c85557Swdenk      return 0;
315740c85557Swdenk  }
315840c85557Swdenk}
315940c85557Swdenk
316040c85557Swdenk/*
316140c85557Swdenk
316240c85557SwdenkHistory:
316340c85557Swdenk
316440c85557Swdenk    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
316540c85557Swdenk      * return null for negative arguments
316640c85557Swdenk      * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
316740c85557Swdenk	 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
316840c85557Swdenk	  (e.g. WIN32 platforms)
316940c85557Swdenk	 * Cleanup up header file inclusion for WIN32 platforms
317040c85557Swdenk	 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
317140c85557Swdenk	 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
317240c85557Swdenk	   memory allocation routines
317340c85557Swdenk	 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
317440c85557Swdenk	 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
317540c85557Swdenk	   usage of 'assert' in non-WIN32 code
317640c85557Swdenk	 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
317740c85557Swdenk	   avoid infinite loop
317840c85557Swdenk      * Always call 'fREe()' rather than 'free()'
317940c85557Swdenk
318040c85557Swdenk    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
318140c85557Swdenk      * Fixed ordering problem with boundary-stamping
318240c85557Swdenk
318340c85557Swdenk    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
318440c85557Swdenk      * Added pvalloc, as recommended by H.J. Liu
318540c85557Swdenk      * Added 64bit pointer support mainly from Wolfram Gloger
318640c85557Swdenk      * Added anonymously donated WIN32 sbrk emulation
318740c85557Swdenk      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
318840c85557Swdenk      * malloc_extend_top: fix mask error that caused wastage after
318940c85557Swdenk	foreign sbrks
319040c85557Swdenk      * Add linux mremap support code from HJ Liu
319140c85557Swdenk
319240c85557Swdenk    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
319340c85557Swdenk      * Integrated most documentation with the code.
319440c85557Swdenk      * Add support for mmap, with help from
319540c85557Swdenk	Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
319640c85557Swdenk      * Use last_remainder in more cases.
319740c85557Swdenk      * Pack bins using idea from  colin@nyx10.cs.du.edu
319840c85557Swdenk      * Use ordered bins instead of best-fit threshhold
319940c85557Swdenk      * Eliminate block-local decls to simplify tracing and debugging.
320040c85557Swdenk      * Support another case of realloc via move into top
320140c85557Swdenk      * Fix error occuring when initial sbrk_base not word-aligned.
320240c85557Swdenk      * Rely on page size for units instead of SBRK_UNIT to
320340c85557Swdenk	avoid surprises about sbrk alignment conventions.
320440c85557Swdenk      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
320540c85557Swdenk	(raymond@es.ele.tue.nl) for the suggestion.
320640c85557Swdenk      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
320740c85557Swdenk      * More precautions for cases where other routines call sbrk,
320840c85557Swdenk	courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
320940c85557Swdenk      * Added macros etc., allowing use in linux libc from
321040c85557Swdenk	H.J. Lu (hjl@gnu.ai.mit.edu)
321140c85557Swdenk      * Inverted this history list
321240c85557Swdenk
321340c85557Swdenk    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
321440c85557Swdenk      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
321540c85557Swdenk      * Removed all preallocation code since under current scheme
321640c85557Swdenk	the work required to undo bad preallocations exceeds
321740c85557Swdenk	the work saved in good cases for most test programs.
321840c85557Swdenk      * No longer use return list or unconsolidated bins since
321940c85557Swdenk	no scheme using them consistently outperforms those that don't
322040c85557Swdenk	given above changes.
322140c85557Swdenk      * Use best fit for very large chunks to prevent some worst-cases.
322240c85557Swdenk      * Added some support for debugging
322340c85557Swdenk
322440c85557Swdenk    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
322540c85557Swdenk      * Removed footers when chunks are in use. Thanks to
322640c85557Swdenk	Paul Wilson (wilson@cs.texas.edu) for the suggestion.
322740c85557Swdenk
322840c85557Swdenk    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
322940c85557Swdenk      * Added malloc_trim, with help from Wolfram Gloger
323040c85557Swdenk	(wmglo@Dent.MED.Uni-Muenchen.DE).
323140c85557Swdenk
323240c85557Swdenk    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
323340c85557Swdenk
323440c85557Swdenk    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
323540c85557Swdenk      * realloc: try to expand in both directions
323640c85557Swdenk      * malloc: swap order of clean-bin strategy;
323740c85557Swdenk      * realloc: only conditionally expand backwards
323840c85557Swdenk      * Try not to scavenge used bins
323940c85557Swdenk      * Use bin counts as a guide to preallocation
324040c85557Swdenk      * Occasionally bin return list chunks in first scan
324140c85557Swdenk      * Add a few optimizations from colin@nyx10.cs.du.edu
324240c85557Swdenk
324340c85557Swdenk    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
324440c85557Swdenk      * faster bin computation & slightly different binning
324540c85557Swdenk      * merged all consolidations to one part of malloc proper
324640c85557Swdenk	 (eliminating old malloc_find_space & malloc_clean_bin)
324740c85557Swdenk      * Scan 2 returns chunks (not just 1)
324840c85557Swdenk      * Propagate failure in realloc if malloc returns 0
324940c85557Swdenk      * Add stuff to allow compilation on non-ANSI compilers
325040c85557Swdenk	  from kpv@research.att.com
325140c85557Swdenk
325240c85557Swdenk    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
325340c85557Swdenk      * removed potential for odd address access in prev_chunk
325440c85557Swdenk      * removed dependency on getpagesize.h
325540c85557Swdenk      * misc cosmetics and a bit more internal documentation
325640c85557Swdenk      * anticosmetics: mangled names in macros to evade debugger strangeness
325740c85557Swdenk      * tested on sparc, hp-700, dec-mips, rs6000
325840c85557Swdenk	  with gcc & native cc (hp, dec only) allowing
325940c85557Swdenk	  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
326040c85557Swdenk
326140c85557Swdenk    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
326240c85557Swdenk      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
326340c85557Swdenk	 structure of old version,  but most details differ.)
326440c85557Swdenk
326540c85557Swdenk*/
3266