xref: /openbmc/u-boot/include/malloc.h (revision 13bdce8f8cadf07bc81d7000a04e48f3028de543)
1  /*
2    A version of malloc/free/realloc written by Doug Lea and released to the
3    public domain.  Send questions/comments/complaints/performance data
4    to dl@cs.oswego.edu
5  
6  * VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
7  
8     Note: There may be an updated version of this malloc obtainable at
9  	   ftp://g.oswego.edu/pub/misc/malloc.c
10  	 Check before installing!
11  
12  * Why use this malloc?
13  
14    This is not the fastest, most space-conserving, most portable, or
15    most tunable malloc ever written. However it is among the fastest
16    while also being among the most space-conserving, portable and tunable.
17    Consistent balance across these factors results in a good general-purpose
18    allocator. For a high-level description, see
19       http://g.oswego.edu/dl/html/malloc.html
20  
21  * Synopsis of public routines
22  
23    (Much fuller descriptions are contained in the program documentation below.)
24  
25    malloc(size_t n);
26       Return a pointer to a newly allocated chunk of at least n bytes, or null
27       if no space is available.
28    free(Void_t* p);
29       Release the chunk of memory pointed to by p, or no effect if p is null.
30    realloc(Void_t* p, size_t n);
31       Return a pointer to a chunk of size n that contains the same data
32       as does chunk p up to the minimum of (n, p's size) bytes, or null
33       if no space is available. The returned pointer may or may not be
34       the same as p. If p is null, equivalent to malloc.  Unless the
35       #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
36       size argument of zero (re)allocates a minimum-sized chunk.
37    memalign(size_t alignment, size_t n);
38       Return a pointer to a newly allocated chunk of n bytes, aligned
39       in accord with the alignment argument, which must be a power of
40       two.
41    valloc(size_t n);
42       Equivalent to memalign(pagesize, n), where pagesize is the page
43       size of the system (or as near to this as can be figured out from
44       all the includes/defines below.)
45    pvalloc(size_t n);
46       Equivalent to valloc(minimum-page-that-holds(n)), that is,
47       round up n to nearest pagesize.
48    calloc(size_t unit, size_t quantity);
49       Returns a pointer to quantity * unit bytes, with all locations
50       set to zero.
51    cfree(Void_t* p);
52       Equivalent to free(p).
53    malloc_trim(size_t pad);
54       Release all but pad bytes of freed top-most memory back
55       to the system. Return 1 if successful, else 0.
56    malloc_usable_size(Void_t* p);
57       Report the number usable allocated bytes associated with allocated
58       chunk p. This may or may not report more bytes than were requested,
59       due to alignment and minimum size constraints.
60    malloc_stats();
61       Prints brief summary statistics on stderr.
62    mallinfo()
63       Returns (by copy) a struct containing various summary statistics.
64    mallopt(int parameter_number, int parameter_value)
65       Changes one of the tunable parameters described below. Returns
66       1 if successful in changing the parameter, else 0.
67  
68  * Vital statistics:
69  
70    Alignment:                            8-byte
71         8 byte alignment is currently hardwired into the design.  This
72         seems to suffice for all current machines and C compilers.
73  
74    Assumed pointer representation:       4 or 8 bytes
75         Code for 8-byte pointers is untested by me but has worked
76         reliably by Wolfram Gloger, who contributed most of the
77         changes supporting this.
78  
79    Assumed size_t  representation:       4 or 8 bytes
80         Note that size_t is allowed to be 4 bytes even if pointers are 8.
81  
82    Minimum overhead per allocated chunk: 4 or 8 bytes
83         Each malloced chunk has a hidden overhead of 4 bytes holding size
84         and status information.
85  
86    Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
87  			  8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
88  
89         When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
90         ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
91         needed; 4 (8) for a trailing size field
92         and 8 (16) bytes for free list pointers. Thus, the minimum
93         allocatable size is 16/24/32 bytes.
94  
95         Even a request for zero bytes (i.e., malloc(0)) returns a
96         pointer to something of the minimum allocatable size.
97  
98    Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
99  			  8-byte size_t: 2^63 - 16 bytes
100  
101         It is assumed that (possibly signed) size_t bit values suffice to
102         represent chunk sizes. `Possibly signed' is due to the fact
103         that `size_t' may be defined on a system as either a signed or
104         an unsigned type. To be conservative, values that would appear
105         as negative numbers are avoided.
106         Requests for sizes with a negative sign bit when the request
107         size is treaded as a long will return null.
108  
109    Maximum overhead wastage per allocated chunk: normally 15 bytes
110  
111         Alignnment demands, plus the minimum allocatable size restriction
112         make the normal worst-case wastage 15 bytes (i.e., up to 15
113         more bytes will be allocated than were requested in malloc), with
114         two exceptions:
115  	 1. Because requests for zero bytes allocate non-zero space,
116  	    the worst case wastage for a request of zero bytes is 24 bytes.
117  	 2. For requests >= mmap_threshold that are serviced via
118  	    mmap(), the worst case wastage is 8 bytes plus the remainder
119  	    from a system page (the minimal mmap unit); typically 4096 bytes.
120  
121  * Limitations
122  
123      Here are some features that are NOT currently supported
124  
125      * No user-definable hooks for callbacks and the like.
126      * No automated mechanism for fully checking that all accesses
127        to malloced memory stay within their bounds.
128      * No support for compaction.
129  
130  * Synopsis of compile-time options:
131  
132      People have reported using previous versions of this malloc on all
133      versions of Unix, sometimes by tweaking some of the defines
134      below. It has been tested most extensively on Solaris and
135      Linux. It is also reported to work on WIN32 platforms.
136      People have also reported adapting this malloc for use in
137      stand-alone embedded systems.
138  
139      The implementation is in straight, hand-tuned ANSI C.  Among other
140      consequences, it uses a lot of macros.  Because of this, to be at
141      all usable, this code should be compiled using an optimizing compiler
142      (for example gcc -O2) that can simplify expressions and control
143      paths.
144  
145    __STD_C                  (default: derived from C compiler defines)
146       Nonzero if using ANSI-standard C compiler, a C++ compiler, or
147       a C compiler sufficiently close to ANSI to get away with it.
148    DEBUG                    (default: NOT defined)
149       Define to enable debugging. Adds fairly extensive assertion-based
150       checking to help track down memory errors, but noticeably slows down
151       execution.
152    REALLOC_ZERO_BYTES_FREES (default: NOT defined)
153       Define this if you think that realloc(p, 0) should be equivalent
154       to free(p). Otherwise, since malloc returns a unique pointer for
155       malloc(0), so does realloc(p, 0).
156    HAVE_MEMCPY               (default: defined)
157       Define if you are not otherwise using ANSI STD C, but still
158       have memcpy and memset in your C library and want to use them.
159       Otherwise, simple internal versions are supplied.
160    USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
161       Define as 1 if you want the C library versions of memset and
162       memcpy called in realloc and calloc (otherwise macro versions are used).
163       At least on some platforms, the simple macro versions usually
164       outperform libc versions.
165    HAVE_MMAP                 (default: defined as 1)
166       Define to non-zero to optionally make malloc() use mmap() to
167       allocate very large blocks.
168    HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
169       Define to non-zero to optionally make realloc() use mremap() to
170       reallocate very large blocks.
171    malloc_getpagesize        (default: derived from system #includes)
172       Either a constant or routine call returning the system page size.
173    HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
174       Optionally define if you are on a system with a /usr/include/malloc.h
175       that declares struct mallinfo. It is not at all necessary to
176       define this even if you do, but will ensure consistency.
177    INTERNAL_SIZE_T           (default: size_t)
178       Define to a 32-bit type (probably `unsigned int') if you are on a
179       64-bit machine, yet do not want or need to allow malloc requests of
180       greater than 2^31 to be handled. This saves space, especially for
181       very small chunks.
182    INTERNAL_LINUX_C_LIB      (default: NOT defined)
183       Defined only when compiled as part of Linux libc.
184       Also note that there is some odd internal name-mangling via defines
185       (for example, internally, `malloc' is named `mALLOc') needed
186       when compiling in this case. These look funny but don't otherwise
187       affect anything.
188    WIN32                     (default: undefined)
189       Define this on MS win (95, nt) platforms to compile in sbrk emulation.
190    LACKS_UNISTD_H            (default: undefined if not WIN32)
191       Define this if your system does not have a <unistd.h>.
192    LACKS_SYS_PARAM_H         (default: undefined if not WIN32)
193       Define this if your system does not have a <sys/param.h>.
194    MORECORE                  (default: sbrk)
195       The name of the routine to call to obtain more memory from the system.
196    MORECORE_FAILURE          (default: -1)
197       The value returned upon failure of MORECORE.
198    MORECORE_CLEARS           (default 1)
199       true (1) if the routine mapped to MORECORE zeroes out memory (which
200       holds for sbrk).
201    DEFAULT_TRIM_THRESHOLD
202    DEFAULT_TOP_PAD
203    DEFAULT_MMAP_THRESHOLD
204    DEFAULT_MMAP_MAX
205       Default values of tunable parameters (described in detail below)
206       controlling interaction with host system routines (sbrk, mmap, etc).
207       These values may also be changed dynamically via mallopt(). The
208       preset defaults are those that give best performance for typical
209       programs/systems.
210    USE_DL_PREFIX             (default: undefined)
211       Prefix all public routines with the string 'dl'.  Useful to
212       quickly avoid procedure declaration conflicts and linker symbol
213       conflicts with existing memory allocation routines.
214  
215  
216  */
217  
218  
219  #ifndef __MALLOC_H__
220  #define __MALLOC_H__
221  
222  /* Preliminaries */
223  
224  #ifndef __STD_C
225  #ifdef __STDC__
226  #define __STD_C     1
227  #else
228  #if __cplusplus
229  #define __STD_C     1
230  #else
231  #define __STD_C     0
232  #endif /*__cplusplus*/
233  #endif /*__STDC__*/
234  #endif /*__STD_C*/
235  
236  #ifndef Void_t
237  #if (__STD_C || defined(WIN32))
238  #define Void_t      void
239  #else
240  #define Void_t      char
241  #endif
242  #endif /*Void_t*/
243  
244  #if __STD_C
245  #include <linux/stddef.h>	/* for size_t */
246  #else
247  #include <sys/types.h>
248  #endif	/* __STD_C */
249  
250  #ifdef __cplusplus
251  extern "C" {
252  #endif
253  
254  #if 0	/* not for U-Boot */
255  #include <stdio.h>	/* needed for malloc_stats */
256  #endif
257  
258  
259  /*
260    Compile-time options
261  */
262  
263  
264  /*
265      Debugging:
266  
267      Because freed chunks may be overwritten with link fields, this
268      malloc will often die when freed memory is overwritten by user
269      programs.  This can be very effective (albeit in an annoying way)
270      in helping track down dangling pointers.
271  
272      If you compile with -DDEBUG, a number of assertion checks are
273      enabled that will catch more memory errors. You probably won't be
274      able to make much sense of the actual assertion errors, but they
275      should help you locate incorrectly overwritten memory.  The
276      checking is fairly extensive, and will slow down execution
277      noticeably. Calling malloc_stats or mallinfo with DEBUG set will
278      attempt to check every non-mmapped allocated and free chunk in the
279      course of computing the summmaries. (By nature, mmapped regions
280      cannot be checked very much automatically.)
281  
282      Setting DEBUG may also be helpful if you are trying to modify
283      this code. The assertions in the check routines spell out in more
284      detail the assumptions and invariants underlying the algorithms.
285  
286  */
287  
288  /*
289    INTERNAL_SIZE_T is the word-size used for internal bookkeeping
290    of chunk sizes. On a 64-bit machine, you can reduce malloc
291    overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
292    at the expense of not being able to handle requests greater than
293    2^31. This limitation is hardly ever a concern; you are encouraged
294    to set this. However, the default version is the same as size_t.
295  */
296  
297  #ifndef INTERNAL_SIZE_T
298  #define INTERNAL_SIZE_T size_t
299  #endif
300  
301  /*
302    REALLOC_ZERO_BYTES_FREES should be set if a call to
303    realloc with zero bytes should be the same as a call to free.
304    Some people think it should. Otherwise, since this malloc
305    returns a unique pointer for malloc(0), so does realloc(p, 0).
306  */
307  
308  
309  /*   #define REALLOC_ZERO_BYTES_FREES */
310  
311  
312  /*
313    WIN32 causes an emulation of sbrk to be compiled in
314    mmap-based options are not currently supported in WIN32.
315  */
316  
317  /* #define WIN32 */
318  #ifdef WIN32
319  #define MORECORE wsbrk
320  #define HAVE_MMAP 0
321  
322  #define LACKS_UNISTD_H
323  #define LACKS_SYS_PARAM_H
324  
325  /*
326    Include 'windows.h' to get the necessary declarations for the
327    Microsoft Visual C++ data structures and routines used in the 'sbrk'
328    emulation.
329  
330    Define WIN32_LEAN_AND_MEAN so that only the essential Microsoft
331    Visual C++ header files are included.
332  */
333  #define WIN32_LEAN_AND_MEAN
334  #include <windows.h>
335  #endif
336  
337  
338  /*
339    HAVE_MEMCPY should be defined if you are not otherwise using
340    ANSI STD C, but still have memcpy and memset in your C library
341    and want to use them in calloc and realloc. Otherwise simple
342    macro versions are defined here.
343  
344    USE_MEMCPY should be defined as 1 if you actually want to
345    have memset and memcpy called. People report that the macro
346    versions are often enough faster than libc versions on many
347    systems that it is better to use them.
348  
349  */
350  
351  #define HAVE_MEMCPY
352  
353  #ifndef USE_MEMCPY
354  #ifdef HAVE_MEMCPY
355  #define USE_MEMCPY 1
356  #else
357  #define USE_MEMCPY 0
358  #endif
359  #endif
360  
361  #if (__STD_C || defined(HAVE_MEMCPY))
362  
363  #if __STD_C
364  void* memset(void*, int, size_t);
365  void* memcpy(void*, const void*, size_t);
366  #else
367  #ifdef WIN32
368  /* On Win32 platforms, 'memset()' and 'memcpy()' are already declared in */
369  /* 'windows.h' */
370  #else
371  Void_t* memset();
372  Void_t* memcpy();
373  #endif
374  #endif
375  #endif
376  
377  #if USE_MEMCPY
378  
379  /* The following macros are only invoked with (2n+1)-multiples of
380     INTERNAL_SIZE_T units, with a positive integer n. This is exploited
381     for fast inline execution when n is small. */
382  
383  #define MALLOC_ZERO(charp, nbytes)                                            \
384  do {                                                                          \
385    INTERNAL_SIZE_T mzsz = (nbytes);                                            \
386    if(mzsz <= 9*sizeof(mzsz)) {                                                \
387      INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
388      if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
389  				     *mz++ = 0;                               \
390        if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
391  				     *mz++ = 0;                               \
392  	if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
393  				     *mz++ = 0; }}}                           \
394  				     *mz++ = 0;                               \
395  				     *mz++ = 0;                               \
396  				     *mz   = 0;                               \
397    } else memset((charp), 0, mzsz);                                            \
398  } while(0)
399  
400  #define MALLOC_COPY(dest,src,nbytes)                                          \
401  do {                                                                          \
402    INTERNAL_SIZE_T mcsz = (nbytes);                                            \
403    if(mcsz <= 9*sizeof(mcsz)) {                                                \
404      INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
405      INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
406      if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
407  				     *mcdst++ = *mcsrc++;                     \
408        if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
409  				     *mcdst++ = *mcsrc++;                     \
410  	if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
411  				     *mcdst++ = *mcsrc++; }}}                 \
412  				     *mcdst++ = *mcsrc++;                     \
413  				     *mcdst++ = *mcsrc++;                     \
414  				     *mcdst   = *mcsrc  ;                     \
415    } else memcpy(dest, src, mcsz);                                             \
416  } while(0)
417  
418  #else /* !USE_MEMCPY */
419  
420  /* Use Duff's device for good zeroing/copying performance. */
421  
422  #define MALLOC_ZERO(charp, nbytes)                                            \
423  do {                                                                          \
424    INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
425    long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
426    if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
427    switch (mctmp) {                                                            \
428      case 0: for(;;) { *mzp++ = 0;                                             \
429      case 7:           *mzp++ = 0;                                             \
430      case 6:           *mzp++ = 0;                                             \
431      case 5:           *mzp++ = 0;                                             \
432      case 4:           *mzp++ = 0;                                             \
433      case 3:           *mzp++ = 0;                                             \
434      case 2:           *mzp++ = 0;                                             \
435      case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
436    }                                                                           \
437  } while(0)
438  
439  #define MALLOC_COPY(dest,src,nbytes)                                          \
440  do {                                                                          \
441    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
442    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
443    long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
444    if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
445    switch (mctmp) {                                                            \
446      case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
447      case 7:           *mcdst++ = *mcsrc++;                                    \
448      case 6:           *mcdst++ = *mcsrc++;                                    \
449      case 5:           *mcdst++ = *mcsrc++;                                    \
450      case 4:           *mcdst++ = *mcsrc++;                                    \
451      case 3:           *mcdst++ = *mcsrc++;                                    \
452      case 2:           *mcdst++ = *mcsrc++;                                    \
453      case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
454    }                                                                           \
455  } while(0)
456  
457  #endif
458  
459  
460  /*
461    Define HAVE_MMAP to optionally make malloc() use mmap() to
462    allocate very large blocks.  These will be returned to the
463    operating system immediately after a free().
464  */
465  
466  /***
467  #ifndef HAVE_MMAP
468  #define HAVE_MMAP 1
469  #endif
470  ***/
471  #undef	HAVE_MMAP	/* Not available for U-Boot */
472  
473  /*
474    Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
475    large blocks.  This is currently only possible on Linux with
476    kernel versions newer than 1.3.77.
477  */
478  
479  /***
480  #ifndef HAVE_MREMAP
481  #ifdef INTERNAL_LINUX_C_LIB
482  #define HAVE_MREMAP 1
483  #else
484  #define HAVE_MREMAP 0
485  #endif
486  #endif
487  ***/
488  #undef	HAVE_MREMAP	/* Not available for U-Boot */
489  
490  #ifdef HAVE_MMAP
491  
492  #include <unistd.h>
493  #include <fcntl.h>
494  #include <sys/mman.h>
495  
496  #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
497  #define MAP_ANONYMOUS MAP_ANON
498  #endif
499  
500  #endif /* HAVE_MMAP */
501  
502  /*
503    Access to system page size. To the extent possible, this malloc
504    manages memory from the system in page-size units.
505  
506    The following mechanics for getpagesize were adapted from
507    bsd/gnu getpagesize.h
508  */
509  
510  #define	LACKS_UNISTD_H	/* Shortcut for U-Boot */
511  #define	malloc_getpagesize	4096
512  
513  #ifndef LACKS_UNISTD_H
514  #  include <unistd.h>
515  #endif
516  
517  #ifndef malloc_getpagesize
518  #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
519  #    ifndef _SC_PAGE_SIZE
520  #      define _SC_PAGE_SIZE _SC_PAGESIZE
521  #    endif
522  #  endif
523  #  ifdef _SC_PAGE_SIZE
524  #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
525  #  else
526  #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
527         extern size_t getpagesize();
528  #      define malloc_getpagesize getpagesize()
529  #    else
530  #      ifdef WIN32
531  #        define malloc_getpagesize (4096) /* TBD: Use 'GetSystemInfo' instead */
532  #      else
533  #        ifndef LACKS_SYS_PARAM_H
534  #          include <sys/param.h>
535  #        endif
536  #        ifdef EXEC_PAGESIZE
537  #          define malloc_getpagesize EXEC_PAGESIZE
538  #        else
539  #          ifdef NBPG
540  #            ifndef CLSIZE
541  #              define malloc_getpagesize NBPG
542  #            else
543  #              define malloc_getpagesize (NBPG * CLSIZE)
544  #            endif
545  #          else
546  #            ifdef NBPC
547  #              define malloc_getpagesize NBPC
548  #            else
549  #              ifdef PAGESIZE
550  #                define malloc_getpagesize PAGESIZE
551  #              else
552  #                define malloc_getpagesize (4096) /* just guess */
553  #              endif
554  #            endif
555  #          endif
556  #        endif
557  #      endif
558  #    endif
559  #  endif
560  #endif
561  
562  
563  /*
564  
565    This version of malloc supports the standard SVID/XPG mallinfo
566    routine that returns a struct containing the same kind of
567    information you can get from malloc_stats. It should work on
568    any SVID/XPG compliant system that has a /usr/include/malloc.h
569    defining struct mallinfo. (If you'd like to install such a thing
570    yourself, cut out the preliminary declarations as described above
571    and below and save them in a malloc.h file. But there's no
572    compelling reason to bother to do this.)
573  
574    The main declaration needed is the mallinfo struct that is returned
575    (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
576    bunch of fields, most of which are not even meaningful in this
577    version of malloc. Some of these fields are are instead filled by
578    mallinfo() with other numbers that might possibly be of interest.
579  
580    HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
581    /usr/include/malloc.h file that includes a declaration of struct
582    mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
583    version is declared below.  These must be precisely the same for
584    mallinfo() to work.
585  
586  */
587  
588  /* #define HAVE_USR_INCLUDE_MALLOC_H */
589  
590  #ifdef HAVE_USR_INCLUDE_MALLOC_H
591  #include "/usr/include/malloc.h"
592  #else
593  
594  /* SVID2/XPG mallinfo structure */
595  
596  struct mallinfo {
597    int arena;    /* total space allocated from system */
598    int ordblks;  /* number of non-inuse chunks */
599    int smblks;   /* unused -- always zero */
600    int hblks;    /* number of mmapped regions */
601    int hblkhd;   /* total space in mmapped regions */
602    int usmblks;  /* unused -- always zero */
603    int fsmblks;  /* unused -- always zero */
604    int uordblks; /* total allocated space */
605    int fordblks; /* total non-inuse space */
606    int keepcost; /* top-most, releasable (via malloc_trim) space */
607  };
608  
609  /* SVID2/XPG mallopt options */
610  
611  #define M_MXFAST  1    /* UNUSED in this malloc */
612  #define M_NLBLKS  2    /* UNUSED in this malloc */
613  #define M_GRAIN   3    /* UNUSED in this malloc */
614  #define M_KEEP    4    /* UNUSED in this malloc */
615  
616  #endif
617  
618  /* mallopt options that actually do something */
619  
620  #define M_TRIM_THRESHOLD    -1
621  #define M_TOP_PAD           -2
622  #define M_MMAP_THRESHOLD    -3
623  #define M_MMAP_MAX          -4
624  
625  
626  #ifndef DEFAULT_TRIM_THRESHOLD
627  #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
628  #endif
629  
630  /*
631      M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
632        to keep before releasing via malloc_trim in free().
633  
634        Automatic trimming is mainly useful in long-lived programs.
635        Because trimming via sbrk can be slow on some systems, and can
636        sometimes be wasteful (in cases where programs immediately
637        afterward allocate more large chunks) the value should be high
638        enough so that your overall system performance would improve by
639        releasing.
640  
641        The trim threshold and the mmap control parameters (see below)
642        can be traded off with one another. Trimming and mmapping are
643        two different ways of releasing unused memory back to the
644        system. Between these two, it is often possible to keep
645        system-level demands of a long-lived program down to a bare
646        minimum. For example, in one test suite of sessions measuring
647        the XF86 X server on Linux, using a trim threshold of 128K and a
648        mmap threshold of 192K led to near-minimal long term resource
649        consumption.
650  
651        If you are using this malloc in a long-lived program, it should
652        pay to experiment with these values.  As a rough guide, you
653        might set to a value close to the average size of a process
654        (program) running on your system.  Releasing this much memory
655        would allow such a process to run in memory.  Generally, it's
656        worth it to tune for trimming rather tham memory mapping when a
657        program undergoes phases where several large chunks are
658        allocated and released in ways that can reuse each other's
659        storage, perhaps mixed with phases where there are no such
660        chunks at all.  And in well-behaved long-lived programs,
661        controlling release of large blocks via trimming versus mapping
662        is usually faster.
663  
664        However, in most programs, these parameters serve mainly as
665        protection against the system-level effects of carrying around
666        massive amounts of unneeded memory. Since frequent calls to
667        sbrk, mmap, and munmap otherwise degrade performance, the default
668        parameters are set to relatively high values that serve only as
669        safeguards.
670  
671        The default trim value is high enough to cause trimming only in
672        fairly extreme (by current memory consumption standards) cases.
673        It must be greater than page size to have any useful effect.  To
674        disable trimming completely, you can set to (unsigned long)(-1);
675  
676  
677  */
678  
679  
680  #ifndef DEFAULT_TOP_PAD
681  #define DEFAULT_TOP_PAD        (0)
682  #endif
683  
684  /*
685      M_TOP_PAD is the amount of extra `padding' space to allocate or
686        retain whenever sbrk is called. It is used in two ways internally:
687  
688        * When sbrk is called to extend the top of the arena to satisfy
689  	a new malloc request, this much padding is added to the sbrk
690  	request.
691  
692        * When malloc_trim is called automatically from free(),
693  	it is used as the `pad' argument.
694  
695        In both cases, the actual amount of padding is rounded
696        so that the end of the arena is always a system page boundary.
697  
698        The main reason for using padding is to avoid calling sbrk so
699        often. Having even a small pad greatly reduces the likelihood
700        that nearly every malloc request during program start-up (or
701        after trimming) will invoke sbrk, which needlessly wastes
702        time.
703  
704        Automatic rounding-up to page-size units is normally sufficient
705        to avoid measurable overhead, so the default is 0.  However, in
706        systems where sbrk is relatively slow, it can pay to increase
707        this value, at the expense of carrying around more memory than
708        the program needs.
709  
710  */
711  
712  
713  #ifndef DEFAULT_MMAP_THRESHOLD
714  #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
715  #endif
716  
717  /*
718  
719      M_MMAP_THRESHOLD is the request size threshold for using mmap()
720        to service a request. Requests of at least this size that cannot
721        be allocated using already-existing space will be serviced via mmap.
722        (If enough normal freed space already exists it is used instead.)
723  
724        Using mmap segregates relatively large chunks of memory so that
725        they can be individually obtained and released from the host
726        system. A request serviced through mmap is never reused by any
727        other request (at least not directly; the system may just so
728        happen to remap successive requests to the same locations).
729  
730        Segregating space in this way has the benefit that mmapped space
731        can ALWAYS be individually released back to the system, which
732        helps keep the system level memory demands of a long-lived
733        program low. Mapped memory can never become `locked' between
734        other chunks, as can happen with normally allocated chunks, which
735        menas that even trimming via malloc_trim would not release them.
736  
737        However, it has the disadvantages that:
738  
739  	 1. The space cannot be reclaimed, consolidated, and then
740  	    used to service later requests, as happens with normal chunks.
741  	 2. It can lead to more wastage because of mmap page alignment
742  	    requirements
743  	 3. It causes malloc performance to be more dependent on host
744  	    system memory management support routines which may vary in
745  	    implementation quality and may impose arbitrary
746  	    limitations. Generally, servicing a request via normal
747  	    malloc steps is faster than going through a system's mmap.
748  
749        All together, these considerations should lead you to use mmap
750        only for relatively large requests.
751  
752  
753  */
754  
755  
756  #ifndef DEFAULT_MMAP_MAX
757  #ifdef HAVE_MMAP
758  #define DEFAULT_MMAP_MAX       (64)
759  #else
760  #define DEFAULT_MMAP_MAX       (0)
761  #endif
762  #endif
763  
764  /*
765      M_MMAP_MAX is the maximum number of requests to simultaneously
766        service using mmap. This parameter exists because:
767  
768  	 1. Some systems have a limited number of internal tables for
769  	    use by mmap.
770  	 2. In most systems, overreliance on mmap can degrade overall
771  	    performance.
772  	 3. If a program allocates many large regions, it is probably
773  	    better off using normal sbrk-based allocation routines that
774  	    can reclaim and reallocate normal heap memory. Using a
775  	    small value allows transition into this mode after the
776  	    first few allocations.
777  
778        Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
779        the default value is 0, and attempts to set it to non-zero values
780        in mallopt will fail.
781  */
782  
783  
784  /*
785      USE_DL_PREFIX will prefix all public routines with the string 'dl'.
786        Useful to quickly avoid procedure declaration conflicts and linker
787        symbol conflicts with existing memory allocation routines.
788  
789  */
790  
791  /* #define USE_DL_PREFIX */
792  
793  
794  /*
795  
796    Special defines for linux libc
797  
798    Except when compiled using these special defines for Linux libc
799    using weak aliases, this malloc is NOT designed to work in
800    multithreaded applications.  No semaphores or other concurrency
801    control are provided to ensure that multiple malloc or free calls
802    don't run at the same time, which could be disasterous. A single
803    semaphore could be used across malloc, realloc, and free (which is
804    essentially the effect of the linux weak alias approach). It would
805    be hard to obtain finer granularity.
806  
807  */
808  
809  
810  #ifdef INTERNAL_LINUX_C_LIB
811  
812  #if __STD_C
813  
814  Void_t * __default_morecore_init (ptrdiff_t);
815  Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
816  
817  #else
818  
819  Void_t * __default_morecore_init ();
820  Void_t *(*__morecore)() = __default_morecore_init;
821  
822  #endif
823  
824  #define MORECORE (*__morecore)
825  #define MORECORE_FAILURE 0
826  #define MORECORE_CLEARS 1
827  
828  #else /* INTERNAL_LINUX_C_LIB */
829  
830  #if __STD_C
831  extern Void_t*     sbrk(ptrdiff_t);
832  #else
833  extern Void_t*     sbrk();
834  #endif
835  
836  #ifndef MORECORE
837  #define MORECORE sbrk
838  #endif
839  
840  #ifndef MORECORE_FAILURE
841  #define MORECORE_FAILURE -1
842  #endif
843  
844  #ifndef MORECORE_CLEARS
845  #define MORECORE_CLEARS 1
846  #endif
847  
848  #endif /* INTERNAL_LINUX_C_LIB */
849  
850  #if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
851  
852  #define cALLOc		__libc_calloc
853  #define fREe		__libc_free
854  #define mALLOc		__libc_malloc
855  #define mEMALIGn	__libc_memalign
856  #define rEALLOc		__libc_realloc
857  #define vALLOc		__libc_valloc
858  #define pvALLOc		__libc_pvalloc
859  #define mALLINFo	__libc_mallinfo
860  #define mALLOPt		__libc_mallopt
861  
862  #pragma weak calloc = __libc_calloc
863  #pragma weak free = __libc_free
864  #pragma weak cfree = __libc_free
865  #pragma weak malloc = __libc_malloc
866  #pragma weak memalign = __libc_memalign
867  #pragma weak realloc = __libc_realloc
868  #pragma weak valloc = __libc_valloc
869  #pragma weak pvalloc = __libc_pvalloc
870  #pragma weak mallinfo = __libc_mallinfo
871  #pragma weak mallopt = __libc_mallopt
872  
873  #else
874  
875  #if CONFIG_IS_ENABLED(SYS_MALLOC_SIMPLE)
876  #define malloc malloc_simple
877  #define realloc realloc_simple
878  #define memalign memalign_simple
879  static inline void free(void *ptr) {}
880  void *calloc(size_t nmemb, size_t size);
881  void *memalign_simple(size_t alignment, size_t bytes);
882  void *realloc_simple(void *ptr, size_t size);
883  #else
884  
885  # ifdef USE_DL_PREFIX
886  # define cALLOc		dlcalloc
887  # define fREe		dlfree
888  # define mALLOc		dlmalloc
889  # define mEMALIGn	dlmemalign
890  # define rEALLOc		dlrealloc
891  # define vALLOc		dlvalloc
892  # define pvALLOc		dlpvalloc
893  # define mALLINFo	dlmallinfo
894  # define mALLOPt		dlmallopt
895  # else /* USE_DL_PREFIX */
896  # define cALLOc		calloc
897  # define fREe		free
898  # define mALLOc		malloc
899  # define mEMALIGn	memalign
900  # define rEALLOc		realloc
901  # define vALLOc		valloc
902  # define pvALLOc		pvalloc
903  # define mALLINFo	mallinfo
904  # define mALLOPt		mallopt
905  # endif /* USE_DL_PREFIX */
906  
907  #endif
908  
909  /* Set up pre-relocation malloc() ready for use */
910  int initf_malloc(void);
911  
912  /* Public routines */
913  
914  /* Simple versions which can be used when space is tight */
915  void *malloc_simple(size_t size);
916  
917  #pragma GCC visibility push(hidden)
918  # if __STD_C
919  
920  Void_t* mALLOc(size_t);
921  void    fREe(Void_t*);
922  Void_t* rEALLOc(Void_t*, size_t);
923  Void_t* mEMALIGn(size_t, size_t);
924  Void_t* vALLOc(size_t);
925  Void_t* pvALLOc(size_t);
926  Void_t* cALLOc(size_t, size_t);
927  void    cfree(Void_t*);
928  int     malloc_trim(size_t);
929  size_t  malloc_usable_size(Void_t*);
930  void    malloc_stats(void);
931  int     mALLOPt(int, int);
932  struct mallinfo mALLINFo(void);
933  # else
934  Void_t* mALLOc();
935  void    fREe();
936  Void_t* rEALLOc();
937  Void_t* mEMALIGn();
938  Void_t* vALLOc();
939  Void_t* pvALLOc();
940  Void_t* cALLOc();
941  void    cfree();
942  int     malloc_trim();
943  size_t  malloc_usable_size();
944  void    malloc_stats();
945  int     mALLOPt();
946  struct mallinfo mALLINFo();
947  # endif
948  #endif
949  #pragma GCC visibility pop
950  
951  /*
952   * Begin and End of memory area for malloc(), and current "brk"
953   */
954  extern ulong mem_malloc_start;
955  extern ulong mem_malloc_end;
956  extern ulong mem_malloc_brk;
957  
958  void mem_malloc_init(ulong start, ulong size);
959  
960  #ifdef __cplusplus
961  };  /* end of extern "C" */
962  #endif
963  
964  #endif /* __MALLOC_H__ */
965