xref: /openbmc/linux/include/linux/slab.h (revision 4a2f0946)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/cache.h>
16 #include <linux/gfp.h>
17 #include <linux/overflow.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/cleanup.h>
22 #include <linux/hash.h>
23 
24 
25 /*
26  * Flags to pass to kmem_cache_create().
27  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
28  */
29 /* DEBUG: Perform (expensive) checks on alloc/free */
30 #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
31 /* DEBUG: Red zone objs in a cache */
32 #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
33 /* DEBUG: Poison objects */
34 #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
35 /* Indicate a kmalloc slab */
36 #define SLAB_KMALLOC		((slab_flags_t __force)0x00001000U)
37 /* Align objs on cache lines */
38 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
39 /* Use GFP_DMA memory */
40 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
41 /* Use GFP_DMA32 memory */
42 #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
43 /* DEBUG: Store the last owner for bug hunting */
44 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
45 /* Panic if kmem_cache_create() fails */
46 #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
47 /*
48  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
49  *
50  * This delays freeing the SLAB page by a grace period, it does _NOT_
51  * delay object freeing. This means that if you do kmem_cache_free()
52  * that memory location is free to be reused at any time. Thus it may
53  * be possible to see another object there in the same RCU grace period.
54  *
55  * This feature only ensures the memory location backing the object
56  * stays valid, the trick to using this is relying on an independent
57  * object validation pass. Something like:
58  *
59  * begin:
60  *  rcu_read_lock();
61  *  obj = lockless_lookup(key);
62  *  if (obj) {
63  *    if (!try_get_ref(obj)) // might fail for free objects
64  *      rcu_read_unlock();
65  *      goto begin;
66  *
67  *    if (obj->key != key) { // not the object we expected
68  *      put_ref(obj);
69  *      rcu_read_unlock();
70  *      goto begin;
71  *    }
72  *  }
73  *  rcu_read_unlock();
74  *
75  * This is useful if we need to approach a kernel structure obliquely,
76  * from its address obtained without the usual locking. We can lock
77  * the structure to stabilize it and check it's still at the given address,
78  * only if we can be sure that the memory has not been meanwhile reused
79  * for some other kind of object (which our subsystem's lock might corrupt).
80  *
81  * rcu_read_lock before reading the address, then rcu_read_unlock after
82  * taking the spinlock within the structure expected at that address.
83  *
84  * Note that it is not possible to acquire a lock within a structure
85  * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
86  * as described above.  The reason is that SLAB_TYPESAFE_BY_RCU pages
87  * are not zeroed before being given to the slab, which means that any
88  * locks must be initialized after each and every kmem_struct_alloc().
89  * Alternatively, make the ctor passed to kmem_cache_create() initialize
90  * the locks at page-allocation time, as is done in __i915_request_ctor(),
91  * sighand_ctor(), and anon_vma_ctor().  Such a ctor permits readers
92  * to safely acquire those ctor-initialized locks under rcu_read_lock()
93  * protection.
94  *
95  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
96  */
97 /* Defer freeing slabs to RCU */
98 #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
99 /* Spread some memory over cpuset */
100 #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
101 /* Trace allocations and frees */
102 #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
103 
104 /* Flag to prevent checks on free */
105 #ifdef CONFIG_DEBUG_OBJECTS
106 # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
107 #else
108 # define SLAB_DEBUG_OBJECTS	0
109 #endif
110 
111 /* Avoid kmemleak tracing */
112 #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
113 
114 /*
115  * Prevent merging with compatible kmem caches. This flag should be used
116  * cautiously. Valid use cases:
117  *
118  * - caches created for self-tests (e.g. kunit)
119  * - general caches created and used by a subsystem, only when a
120  *   (subsystem-specific) debug option is enabled
121  * - performance critical caches, should be very rare and consulted with slab
122  *   maintainers, and not used together with CONFIG_SLUB_TINY
123  */
124 #define SLAB_NO_MERGE		((slab_flags_t __force)0x01000000U)
125 
126 /* Fault injection mark */
127 #ifdef CONFIG_FAILSLAB
128 # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
129 #else
130 # define SLAB_FAILSLAB		0
131 #endif
132 /* Account to memcg */
133 #ifdef CONFIG_MEMCG_KMEM
134 # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
135 #else
136 # define SLAB_ACCOUNT		0
137 #endif
138 
139 #ifdef CONFIG_KASAN_GENERIC
140 #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
141 #else
142 #define SLAB_KASAN		0
143 #endif
144 
145 /*
146  * Ignore user specified debugging flags.
147  * Intended for caches created for self-tests so they have only flags
148  * specified in the code and other flags are ignored.
149  */
150 #define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
151 
152 #ifdef CONFIG_KFENCE
153 #define SLAB_SKIP_KFENCE	((slab_flags_t __force)0x20000000U)
154 #else
155 #define SLAB_SKIP_KFENCE	0
156 #endif
157 
158 /* The following flags affect the page allocator grouping pages by mobility */
159 /* Objects are reclaimable */
160 #ifndef CONFIG_SLUB_TINY
161 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
162 #else
163 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0)
164 #endif
165 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
166 
167 /*
168  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
169  *
170  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
171  *
172  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
173  * Both make kfree a no-op.
174  */
175 #define ZERO_SIZE_PTR ((void *)16)
176 
177 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
178 				(unsigned long)ZERO_SIZE_PTR)
179 
180 #include <linux/kasan.h>
181 
182 struct list_lru;
183 struct mem_cgroup;
184 /*
185  * struct kmem_cache related prototypes
186  */
187 bool slab_is_available(void);
188 
189 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
190 			unsigned int align, slab_flags_t flags,
191 			void (*ctor)(void *));
192 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
193 			unsigned int size, unsigned int align,
194 			slab_flags_t flags,
195 			unsigned int useroffset, unsigned int usersize,
196 			void (*ctor)(void *));
197 void kmem_cache_destroy(struct kmem_cache *s);
198 int kmem_cache_shrink(struct kmem_cache *s);
199 
200 /*
201  * Please use this macro to create slab caches. Simply specify the
202  * name of the structure and maybe some flags that are listed above.
203  *
204  * The alignment of the struct determines object alignment. If you
205  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
206  * then the objects will be properly aligned in SMP configurations.
207  */
208 #define KMEM_CACHE(__struct, __flags)					\
209 		kmem_cache_create(#__struct, sizeof(struct __struct),	\
210 			__alignof__(struct __struct), (__flags), NULL)
211 
212 /*
213  * To whitelist a single field for copying to/from usercopy, use this
214  * macro instead for KMEM_CACHE() above.
215  */
216 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
217 		kmem_cache_create_usercopy(#__struct,			\
218 			sizeof(struct __struct),			\
219 			__alignof__(struct __struct), (__flags),	\
220 			offsetof(struct __struct, __field),		\
221 			sizeof_field(struct __struct, __field), NULL)
222 
223 /*
224  * Common kmalloc functions provided by all allocators
225  */
226 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
227 void kfree(const void *objp);
228 void kfree_sensitive(const void *objp);
229 size_t __ksize(const void *objp);
230 
231 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
232 
233 /**
234  * ksize - Report actual allocation size of associated object
235  *
236  * @objp: Pointer returned from a prior kmalloc()-family allocation.
237  *
238  * This should not be used for writing beyond the originally requested
239  * allocation size. Either use krealloc() or round up the allocation size
240  * with kmalloc_size_roundup() prior to allocation. If this is used to
241  * access beyond the originally requested allocation size, UBSAN_BOUNDS
242  * and/or FORTIFY_SOURCE may trip, since they only know about the
243  * originally allocated size via the __alloc_size attribute.
244  */
245 size_t ksize(const void *objp);
246 
247 #ifdef CONFIG_PRINTK
248 bool kmem_dump_obj(void *object);
249 #else
kmem_dump_obj(void * object)250 static inline bool kmem_dump_obj(void *object) { return false; }
251 #endif
252 
253 /*
254  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
255  * alignment larger than the alignment of a 64-bit integer.
256  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
257  */
258 #ifdef ARCH_HAS_DMA_MINALIGN
259 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
260 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
261 #endif
262 #endif
263 
264 #ifndef ARCH_KMALLOC_MINALIGN
265 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
266 #elif ARCH_KMALLOC_MINALIGN > 8
267 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
268 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
269 #endif
270 
271 /*
272  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
273  * Intended for arches that get misalignment faults even for 64 bit integer
274  * aligned buffers.
275  */
276 #ifndef ARCH_SLAB_MINALIGN
277 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
278 #endif
279 
280 /*
281  * Arches can define this function if they want to decide the minimum slab
282  * alignment at runtime. The value returned by the function must be a power
283  * of two and >= ARCH_SLAB_MINALIGN.
284  */
285 #ifndef arch_slab_minalign
arch_slab_minalign(void)286 static inline unsigned int arch_slab_minalign(void)
287 {
288 	return ARCH_SLAB_MINALIGN;
289 }
290 #endif
291 
292 /*
293  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
294  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
295  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
296  */
297 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
298 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
299 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
300 
301 /*
302  * Kmalloc array related definitions
303  */
304 
305 #ifdef CONFIG_SLAB
306 /*
307  * SLAB and SLUB directly allocates requests fitting in to an order-1 page
308  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
309  */
310 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
311 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
312 #ifndef KMALLOC_SHIFT_LOW
313 #define KMALLOC_SHIFT_LOW	5
314 #endif
315 #endif
316 
317 #ifdef CONFIG_SLUB
318 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
319 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
320 #ifndef KMALLOC_SHIFT_LOW
321 #define KMALLOC_SHIFT_LOW	3
322 #endif
323 #endif
324 
325 /* Maximum allocatable size */
326 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
327 /* Maximum size for which we actually use a slab cache */
328 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
329 /* Maximum order allocatable via the slab allocator */
330 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
331 
332 /*
333  * Kmalloc subsystem.
334  */
335 #ifndef KMALLOC_MIN_SIZE
336 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
337 #endif
338 
339 /*
340  * This restriction comes from byte sized index implementation.
341  * Page size is normally 2^12 bytes and, in this case, if we want to use
342  * byte sized index which can represent 2^8 entries, the size of the object
343  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
344  * If minimum size of kmalloc is less than 16, we use it as minimum object
345  * size and give up to use byte sized index.
346  */
347 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
348                                (KMALLOC_MIN_SIZE) : 16)
349 
350 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
351 #define RANDOM_KMALLOC_CACHES_NR	15 // # of cache copies
352 #else
353 #define RANDOM_KMALLOC_CACHES_NR	0
354 #endif
355 
356 /*
357  * Whenever changing this, take care of that kmalloc_type() and
358  * create_kmalloc_caches() still work as intended.
359  *
360  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
361  * is for accounted but unreclaimable and non-dma objects. All the other
362  * kmem caches can have both accounted and unaccounted objects.
363  */
364 enum kmalloc_cache_type {
365 	KMALLOC_NORMAL = 0,
366 #ifndef CONFIG_ZONE_DMA
367 	KMALLOC_DMA = KMALLOC_NORMAL,
368 #endif
369 #ifndef CONFIG_MEMCG_KMEM
370 	KMALLOC_CGROUP = KMALLOC_NORMAL,
371 #endif
372 	KMALLOC_RANDOM_START = KMALLOC_NORMAL,
373 	KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
374 #ifdef CONFIG_SLUB_TINY
375 	KMALLOC_RECLAIM = KMALLOC_NORMAL,
376 #else
377 	KMALLOC_RECLAIM,
378 #endif
379 #ifdef CONFIG_ZONE_DMA
380 	KMALLOC_DMA,
381 #endif
382 #ifdef CONFIG_MEMCG_KMEM
383 	KMALLOC_CGROUP,
384 #endif
385 	NR_KMALLOC_TYPES
386 };
387 
388 extern struct kmem_cache *
389 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
390 
391 /*
392  * Define gfp bits that should not be set for KMALLOC_NORMAL.
393  */
394 #define KMALLOC_NOT_NORMAL_BITS					\
395 	(__GFP_RECLAIMABLE |					\
396 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
397 	(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
398 
399 extern unsigned long random_kmalloc_seed;
400 
kmalloc_type(gfp_t flags,unsigned long caller)401 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
402 {
403 	/*
404 	 * The most common case is KMALLOC_NORMAL, so test for it
405 	 * with a single branch for all the relevant flags.
406 	 */
407 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
408 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
409 		/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
410 		return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
411 						      ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
412 #else
413 		return KMALLOC_NORMAL;
414 #endif
415 
416 	/*
417 	 * At least one of the flags has to be set. Their priorities in
418 	 * decreasing order are:
419 	 *  1) __GFP_DMA
420 	 *  2) __GFP_RECLAIMABLE
421 	 *  3) __GFP_ACCOUNT
422 	 */
423 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
424 		return KMALLOC_DMA;
425 	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
426 		return KMALLOC_RECLAIM;
427 	else
428 		return KMALLOC_CGROUP;
429 }
430 
431 /*
432  * Figure out which kmalloc slab an allocation of a certain size
433  * belongs to.
434  * 0 = zero alloc
435  * 1 =  65 .. 96 bytes
436  * 2 = 129 .. 192 bytes
437  * n = 2^(n-1)+1 .. 2^n
438  *
439  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
440  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
441  * Callers where !size_is_constant should only be test modules, where runtime
442  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
443  */
__kmalloc_index(size_t size,bool size_is_constant)444 static __always_inline unsigned int __kmalloc_index(size_t size,
445 						    bool size_is_constant)
446 {
447 	if (!size)
448 		return 0;
449 
450 	if (size <= KMALLOC_MIN_SIZE)
451 		return KMALLOC_SHIFT_LOW;
452 
453 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
454 		return 1;
455 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
456 		return 2;
457 	if (size <=          8) return 3;
458 	if (size <=         16) return 4;
459 	if (size <=         32) return 5;
460 	if (size <=         64) return 6;
461 	if (size <=        128) return 7;
462 	if (size <=        256) return 8;
463 	if (size <=        512) return 9;
464 	if (size <=       1024) return 10;
465 	if (size <=   2 * 1024) return 11;
466 	if (size <=   4 * 1024) return 12;
467 	if (size <=   8 * 1024) return 13;
468 	if (size <=  16 * 1024) return 14;
469 	if (size <=  32 * 1024) return 15;
470 	if (size <=  64 * 1024) return 16;
471 	if (size <= 128 * 1024) return 17;
472 	if (size <= 256 * 1024) return 18;
473 	if (size <= 512 * 1024) return 19;
474 	if (size <= 1024 * 1024) return 20;
475 	if (size <=  2 * 1024 * 1024) return 21;
476 
477 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
478 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
479 	else
480 		BUG();
481 
482 	/* Will never be reached. Needed because the compiler may complain */
483 	return -1;
484 }
485 static_assert(PAGE_SHIFT <= 20);
486 #define kmalloc_index(s) __kmalloc_index(s, true)
487 
488 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
489 
490 /**
491  * kmem_cache_alloc - Allocate an object
492  * @cachep: The cache to allocate from.
493  * @flags: See kmalloc().
494  *
495  * Allocate an object from this cache.
496  * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
497  *
498  * Return: pointer to the new object or %NULL in case of error
499  */
500 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
501 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
502 			   gfp_t gfpflags) __assume_slab_alignment __malloc;
503 void kmem_cache_free(struct kmem_cache *s, void *objp);
504 
505 /*
506  * Bulk allocation and freeing operations. These are accelerated in an
507  * allocator specific way to avoid taking locks repeatedly or building
508  * metadata structures unnecessarily.
509  *
510  * Note that interrupts must be enabled when calling these functions.
511  */
512 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
513 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
514 
kfree_bulk(size_t size,void ** p)515 static __always_inline void kfree_bulk(size_t size, void **p)
516 {
517 	kmem_cache_free_bulk(NULL, size, p);
518 }
519 
520 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
521 							 __alloc_size(1);
522 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
523 									 __malloc;
524 
525 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
526 		    __assume_kmalloc_alignment __alloc_size(3);
527 
528 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
529 			 int node, size_t size) __assume_kmalloc_alignment
530 						__alloc_size(4);
531 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
532 					      __alloc_size(1);
533 
534 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
535 							     __alloc_size(1);
536 
537 /**
538  * kmalloc - allocate kernel memory
539  * @size: how many bytes of memory are required.
540  * @flags: describe the allocation context
541  *
542  * kmalloc is the normal method of allocating memory
543  * for objects smaller than page size in the kernel.
544  *
545  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
546  * bytes. For @size of power of two bytes, the alignment is also guaranteed
547  * to be at least to the size.
548  *
549  * The @flags argument may be one of the GFP flags defined at
550  * include/linux/gfp_types.h and described at
551  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
552  *
553  * The recommended usage of the @flags is described at
554  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
555  *
556  * Below is a brief outline of the most useful GFP flags
557  *
558  * %GFP_KERNEL
559  *	Allocate normal kernel ram. May sleep.
560  *
561  * %GFP_NOWAIT
562  *	Allocation will not sleep.
563  *
564  * %GFP_ATOMIC
565  *	Allocation will not sleep.  May use emergency pools.
566  *
567  * Also it is possible to set different flags by OR'ing
568  * in one or more of the following additional @flags:
569  *
570  * %__GFP_ZERO
571  *	Zero the allocated memory before returning. Also see kzalloc().
572  *
573  * %__GFP_HIGH
574  *	This allocation has high priority and may use emergency pools.
575  *
576  * %__GFP_NOFAIL
577  *	Indicate that this allocation is in no way allowed to fail
578  *	(think twice before using).
579  *
580  * %__GFP_NORETRY
581  *	If memory is not immediately available,
582  *	then give up at once.
583  *
584  * %__GFP_NOWARN
585  *	If allocation fails, don't issue any warnings.
586  *
587  * %__GFP_RETRY_MAYFAIL
588  *	Try really hard to succeed the allocation but fail
589  *	eventually.
590  */
kmalloc(size_t size,gfp_t flags)591 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
592 {
593 	if (__builtin_constant_p(size) && size) {
594 		unsigned int index;
595 
596 		if (size > KMALLOC_MAX_CACHE_SIZE)
597 			return kmalloc_large(size, flags);
598 
599 		index = kmalloc_index(size);
600 		return kmalloc_trace(
601 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
602 				flags, size);
603 	}
604 	return __kmalloc(size, flags);
605 }
606 
kmalloc_node(size_t size,gfp_t flags,int node)607 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
608 {
609 	if (__builtin_constant_p(size) && size) {
610 		unsigned int index;
611 
612 		if (size > KMALLOC_MAX_CACHE_SIZE)
613 			return kmalloc_large_node(size, flags, node);
614 
615 		index = kmalloc_index(size);
616 		return kmalloc_node_trace(
617 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
618 				flags, node, size);
619 	}
620 	return __kmalloc_node(size, flags, node);
621 }
622 
623 /**
624  * kmalloc_array - allocate memory for an array.
625  * @n: number of elements.
626  * @size: element size.
627  * @flags: the type of memory to allocate (see kmalloc).
628  */
kmalloc_array(size_t n,size_t size,gfp_t flags)629 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
630 {
631 	size_t bytes;
632 
633 	if (unlikely(check_mul_overflow(n, size, &bytes)))
634 		return NULL;
635 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
636 		return kmalloc(bytes, flags);
637 	return __kmalloc(bytes, flags);
638 }
639 
640 /**
641  * krealloc_array - reallocate memory for an array.
642  * @p: pointer to the memory chunk to reallocate
643  * @new_n: new number of elements to alloc
644  * @new_size: new size of a single member of the array
645  * @flags: the type of memory to allocate (see kmalloc)
646  */
krealloc_array(void * p,size_t new_n,size_t new_size,gfp_t flags)647 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
648 								      size_t new_n,
649 								      size_t new_size,
650 								      gfp_t flags)
651 {
652 	size_t bytes;
653 
654 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
655 		return NULL;
656 
657 	return krealloc(p, bytes, flags);
658 }
659 
660 /**
661  * kcalloc - allocate memory for an array. The memory is set to zero.
662  * @n: number of elements.
663  * @size: element size.
664  * @flags: the type of memory to allocate (see kmalloc).
665  */
kcalloc(size_t n,size_t size,gfp_t flags)666 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
667 {
668 	return kmalloc_array(n, size, flags | __GFP_ZERO);
669 }
670 
671 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
672 				  unsigned long caller) __alloc_size(1);
673 #define kmalloc_node_track_caller(size, flags, node) \
674 	__kmalloc_node_track_caller(size, flags, node, \
675 				    _RET_IP_)
676 
677 /*
678  * kmalloc_track_caller is a special version of kmalloc that records the
679  * calling function of the routine calling it for slab leak tracking instead
680  * of just the calling function (confusing, eh?).
681  * It's useful when the call to kmalloc comes from a widely-used standard
682  * allocator where we care about the real place the memory allocation
683  * request comes from.
684  */
685 #define kmalloc_track_caller(size, flags) \
686 	__kmalloc_node_track_caller(size, flags, \
687 				    NUMA_NO_NODE, _RET_IP_)
688 
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)689 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
690 							  int node)
691 {
692 	size_t bytes;
693 
694 	if (unlikely(check_mul_overflow(n, size, &bytes)))
695 		return NULL;
696 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
697 		return kmalloc_node(bytes, flags, node);
698 	return __kmalloc_node(bytes, flags, node);
699 }
700 
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)701 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
702 {
703 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
704 }
705 
706 /*
707  * Shortcuts
708  */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)709 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
710 {
711 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
712 }
713 
714 /**
715  * kzalloc - allocate memory. The memory is set to zero.
716  * @size: how many bytes of memory are required.
717  * @flags: the type of memory to allocate (see kmalloc).
718  */
kzalloc(size_t size,gfp_t flags)719 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
720 {
721 	return kmalloc(size, flags | __GFP_ZERO);
722 }
723 
724 /**
725  * kzalloc_node - allocate zeroed memory from a particular memory node.
726  * @size: how many bytes of memory are required.
727  * @flags: the type of memory to allocate (see kmalloc).
728  * @node: memory node from which to allocate
729  */
kzalloc_node(size_t size,gfp_t flags,int node)730 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
731 {
732 	return kmalloc_node(size, flags | __GFP_ZERO, node);
733 }
734 
735 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
kvmalloc(size_t size,gfp_t flags)736 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
737 {
738 	return kvmalloc_node(size, flags, NUMA_NO_NODE);
739 }
kvzalloc_node(size_t size,gfp_t flags,int node)740 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
741 {
742 	return kvmalloc_node(size, flags | __GFP_ZERO, node);
743 }
kvzalloc(size_t size,gfp_t flags)744 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
745 {
746 	return kvmalloc(size, flags | __GFP_ZERO);
747 }
748 
kvmalloc_array(size_t n,size_t size,gfp_t flags)749 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
750 {
751 	size_t bytes;
752 
753 	if (unlikely(check_mul_overflow(n, size, &bytes)))
754 		return NULL;
755 
756 	return kvmalloc(bytes, flags);
757 }
758 
kvcalloc(size_t n,size_t size,gfp_t flags)759 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
760 {
761 	return kvmalloc_array(n, size, flags | __GFP_ZERO);
762 }
763 
764 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
765 		      __realloc_size(3);
766 extern void kvfree(const void *addr);
767 extern void kvfree_sensitive(const void *addr, size_t len);
768 
769 unsigned int kmem_cache_size(struct kmem_cache *s);
770 
771 /**
772  * kmalloc_size_roundup - Report allocation bucket size for the given size
773  *
774  * @size: Number of bytes to round up from.
775  *
776  * This returns the number of bytes that would be available in a kmalloc()
777  * allocation of @size bytes. For example, a 126 byte request would be
778  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
779  * for the general-purpose kmalloc()-based allocations, and is not for the
780  * pre-sized kmem_cache_alloc()-based allocations.)
781  *
782  * Use this to kmalloc() the full bucket size ahead of time instead of using
783  * ksize() to query the size after an allocation.
784  */
785 size_t kmalloc_size_roundup(size_t size);
786 
787 void __init kmem_cache_init_late(void);
788 
789 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
790 int slab_prepare_cpu(unsigned int cpu);
791 int slab_dead_cpu(unsigned int cpu);
792 #else
793 #define slab_prepare_cpu	NULL
794 #define slab_dead_cpu		NULL
795 #endif
796 
797 #endif	/* _LINUX_SLAB_H */
798