xref: /openbmc/linux/mm/slab.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
497d06609SChristoph Lameter /*
597d06609SChristoph Lameter  * Internal slab definitions
697d06609SChristoph Lameter  */
7d5d2c02aSMike Rapoport (IBM) void __init kmem_cache_init(void);
897d06609SChristoph Lameter 
96801be4fSPeter Zijlstra #ifdef CONFIG_64BIT
106801be4fSPeter Zijlstra # ifdef system_has_cmpxchg128
116801be4fSPeter Zijlstra # define system_has_freelist_aba()	system_has_cmpxchg128()
126801be4fSPeter Zijlstra # define try_cmpxchg_freelist		try_cmpxchg128
136801be4fSPeter Zijlstra # endif
146801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg128
156801be4fSPeter Zijlstra typedef u128 freelist_full_t;
166801be4fSPeter Zijlstra #else /* CONFIG_64BIT */
176801be4fSPeter Zijlstra # ifdef system_has_cmpxchg64
186801be4fSPeter Zijlstra # define system_has_freelist_aba()	system_has_cmpxchg64()
196801be4fSPeter Zijlstra # define try_cmpxchg_freelist		try_cmpxchg64
206801be4fSPeter Zijlstra # endif
216801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg64
226801be4fSPeter Zijlstra typedef u64 freelist_full_t;
236801be4fSPeter Zijlstra #endif /* CONFIG_64BIT */
246801be4fSPeter Zijlstra 
256801be4fSPeter Zijlstra #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
266801be4fSPeter Zijlstra #undef system_has_freelist_aba
276801be4fSPeter Zijlstra #endif
286801be4fSPeter Zijlstra 
296801be4fSPeter Zijlstra /*
306801be4fSPeter Zijlstra  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
316801be4fSPeter Zijlstra  * problems with cmpxchg of just a pointer.
326801be4fSPeter Zijlstra  */
336801be4fSPeter Zijlstra typedef union {
346801be4fSPeter Zijlstra 	struct {
356801be4fSPeter Zijlstra 		void *freelist;
366801be4fSPeter Zijlstra 		unsigned long counter;
376801be4fSPeter Zijlstra 	};
386801be4fSPeter Zijlstra 	freelist_full_t full;
396801be4fSPeter Zijlstra } freelist_aba_t;
406801be4fSPeter Zijlstra 
41d122019bSMatthew Wilcox (Oracle) /* Reuses the bits in struct page */
42d122019bSMatthew Wilcox (Oracle) struct slab {
43d122019bSMatthew Wilcox (Oracle) 	unsigned long __page_flags;
44401fb12cSVlastimil Babka 
45401fb12cSVlastimil Babka #if defined(CONFIG_SLAB)
46401fb12cSVlastimil Babka 
47401fb12cSVlastimil Babka 	struct kmem_cache *slab_cache;
48130d4df5SVlastimil Babka 	union {
49130d4df5SVlastimil Babka 		struct {
50130d4df5SVlastimil Babka 			struct list_head slab_list;
51401fb12cSVlastimil Babka 			void *freelist;	/* array of free object indexes */
52401fb12cSVlastimil Babka 			void *s_mem;	/* first object */
53130d4df5SVlastimil Babka 		};
54130d4df5SVlastimil Babka 		struct rcu_head rcu_head;
55130d4df5SVlastimil Babka 	};
56401fb12cSVlastimil Babka 	unsigned int active;
57401fb12cSVlastimil Babka 
58401fb12cSVlastimil Babka #elif defined(CONFIG_SLUB)
59401fb12cSVlastimil Babka 
60130d4df5SVlastimil Babka 	struct kmem_cache *slab_cache;
61130d4df5SVlastimil Babka 	union {
62130d4df5SVlastimil Babka 		struct {
63401fb12cSVlastimil Babka 			union {
64401fb12cSVlastimil Babka 				struct list_head slab_list;
659c01e9afSVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
66401fb12cSVlastimil Babka 				struct {
67d122019bSMatthew Wilcox (Oracle) 					struct slab *next;
68d122019bSMatthew Wilcox (Oracle) 					int slabs;	/* Nr of slabs left */
69d122019bSMatthew Wilcox (Oracle) 				};
709c01e9afSVlastimil Babka #endif
71d122019bSMatthew Wilcox (Oracle) 			};
72d122019bSMatthew Wilcox (Oracle) 			/* Double-word boundary */
736801be4fSPeter Zijlstra 			union {
746801be4fSPeter Zijlstra 				struct {
75d122019bSMatthew Wilcox (Oracle) 					void *freelist;		/* first free object */
76d122019bSMatthew Wilcox (Oracle) 					union {
77401fb12cSVlastimil Babka 						unsigned long counters;
78401fb12cSVlastimil Babka 						struct {
79d122019bSMatthew Wilcox (Oracle) 							unsigned inuse:16;
80d122019bSMatthew Wilcox (Oracle) 							unsigned objects:15;
81*33a213c0Syuan.gao 							/*
82*33a213c0Syuan.gao 							 * If slab debugging is enabled then the
83*33a213c0Syuan.gao 							 * frozen bit can be reused to indicate
84*33a213c0Syuan.gao 							 * that the slab was corrupted
85*33a213c0Syuan.gao 							 */
86d122019bSMatthew Wilcox (Oracle) 							unsigned frozen:1;
87d122019bSMatthew Wilcox (Oracle) 						};
88d122019bSMatthew Wilcox (Oracle) 					};
89130d4df5SVlastimil Babka 				};
906801be4fSPeter Zijlstra #ifdef system_has_freelist_aba
916801be4fSPeter Zijlstra 				freelist_aba_t freelist_counter;
926801be4fSPeter Zijlstra #endif
936801be4fSPeter Zijlstra 			};
946801be4fSPeter Zijlstra 		};
95130d4df5SVlastimil Babka 		struct rcu_head rcu_head;
96130d4df5SVlastimil Babka 	};
97401fb12cSVlastimil Babka 	unsigned int __unused;
98d122019bSMatthew Wilcox (Oracle) 
99401fb12cSVlastimil Babka #else
100401fb12cSVlastimil Babka #error "Unexpected slab allocator configured"
101401fb12cSVlastimil Babka #endif
102401fb12cSVlastimil Babka 
103d122019bSMatthew Wilcox (Oracle) 	atomic_t __page_refcount;
104d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
105d122019bSMatthew Wilcox (Oracle) 	unsigned long memcg_data;
106d122019bSMatthew Wilcox (Oracle) #endif
107d122019bSMatthew Wilcox (Oracle) };
108d122019bSMatthew Wilcox (Oracle) 
109d122019bSMatthew Wilcox (Oracle) #define SLAB_MATCH(pg, sl)						\
110d122019bSMatthew Wilcox (Oracle) 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
111d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(flags, __page_flags);
112130d4df5SVlastimil Babka SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
113d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(_refcount, __page_refcount);
114d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
115d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(memcg_data, memcg_data);
116d122019bSMatthew Wilcox (Oracle) #endif
117d122019bSMatthew Wilcox (Oracle) #undef SLAB_MATCH
118d122019bSMatthew Wilcox (Oracle) static_assert(sizeof(struct slab) <= sizeof(struct page));
1196801be4fSPeter Zijlstra #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB)
1206801be4fSPeter Zijlstra static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
121130d4df5SVlastimil Babka #endif
122d122019bSMatthew Wilcox (Oracle) 
123d122019bSMatthew Wilcox (Oracle) /**
124d122019bSMatthew Wilcox (Oracle)  * folio_slab - Converts from folio to slab.
125d122019bSMatthew Wilcox (Oracle)  * @folio: The folio.
126d122019bSMatthew Wilcox (Oracle)  *
127d122019bSMatthew Wilcox (Oracle)  * Currently struct slab is a different representation of a folio where
128d122019bSMatthew Wilcox (Oracle)  * folio_test_slab() is true.
129d122019bSMatthew Wilcox (Oracle)  *
130d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this folio.
131d122019bSMatthew Wilcox (Oracle)  */
132d122019bSMatthew Wilcox (Oracle) #define folio_slab(folio)	(_Generic((folio),			\
133d122019bSMatthew Wilcox (Oracle) 	const struct folio *:	(const struct slab *)(folio),		\
134d122019bSMatthew Wilcox (Oracle) 	struct folio *:		(struct slab *)(folio)))
135d122019bSMatthew Wilcox (Oracle) 
136d122019bSMatthew Wilcox (Oracle) /**
137d122019bSMatthew Wilcox (Oracle)  * slab_folio - The folio allocated for a slab
138d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
139d122019bSMatthew Wilcox (Oracle)  *
140d122019bSMatthew Wilcox (Oracle)  * Slabs are allocated as folios that contain the individual objects and are
141d122019bSMatthew Wilcox (Oracle)  * using some fields in the first struct page of the folio - those fields are
142d122019bSMatthew Wilcox (Oracle)  * now accessed by struct slab. It is occasionally necessary to convert back to
143d122019bSMatthew Wilcox (Oracle)  * a folio in order to communicate with the rest of the mm.  Please use this
144d122019bSMatthew Wilcox (Oracle)  * helper function instead of casting yourself, as the implementation may change
145d122019bSMatthew Wilcox (Oracle)  * in the future.
146d122019bSMatthew Wilcox (Oracle)  */
147d122019bSMatthew Wilcox (Oracle) #define slab_folio(s)		(_Generic((s),				\
148d122019bSMatthew Wilcox (Oracle) 	const struct slab *:	(const struct folio *)s,		\
149d122019bSMatthew Wilcox (Oracle) 	struct slab *:		(struct folio *)s))
150d122019bSMatthew Wilcox (Oracle) 
151d122019bSMatthew Wilcox (Oracle) /**
152d122019bSMatthew Wilcox (Oracle)  * page_slab - Converts from first struct page to slab.
153d122019bSMatthew Wilcox (Oracle)  * @p: The first (either head of compound or single) page of slab.
154d122019bSMatthew Wilcox (Oracle)  *
155d122019bSMatthew Wilcox (Oracle)  * A temporary wrapper to convert struct page to struct slab in situations where
156d122019bSMatthew Wilcox (Oracle)  * we know the page is the compound head, or single order-0 page.
157d122019bSMatthew Wilcox (Oracle)  *
158d122019bSMatthew Wilcox (Oracle)  * Long-term ideally everything would work with struct slab directly or go
159d122019bSMatthew Wilcox (Oracle)  * through folio to struct slab.
160d122019bSMatthew Wilcox (Oracle)  *
161d122019bSMatthew Wilcox (Oracle)  * Return: The slab which contains this page
162d122019bSMatthew Wilcox (Oracle)  */
163d122019bSMatthew Wilcox (Oracle) #define page_slab(p)		(_Generic((p),				\
164d122019bSMatthew Wilcox (Oracle) 	const struct page *:	(const struct slab *)(p),		\
165d122019bSMatthew Wilcox (Oracle) 	struct page *:		(struct slab *)(p)))
166d122019bSMatthew Wilcox (Oracle) 
167d122019bSMatthew Wilcox (Oracle) /**
168d122019bSMatthew Wilcox (Oracle)  * slab_page - The first struct page allocated for a slab
169d122019bSMatthew Wilcox (Oracle)  * @slab: The slab.
170d122019bSMatthew Wilcox (Oracle)  *
171d122019bSMatthew Wilcox (Oracle)  * A convenience wrapper for converting slab to the first struct page of the
172d122019bSMatthew Wilcox (Oracle)  * underlying folio, to communicate with code not yet converted to folio or
173d122019bSMatthew Wilcox (Oracle)  * struct slab.
174d122019bSMatthew Wilcox (Oracle)  */
175d122019bSMatthew Wilcox (Oracle) #define slab_page(s) folio_page(slab_folio(s), 0)
176d122019bSMatthew Wilcox (Oracle) 
177d122019bSMatthew Wilcox (Oracle) /*
178d122019bSMatthew Wilcox (Oracle)  * If network-based swap is enabled, sl*b must keep track of whether pages
179d122019bSMatthew Wilcox (Oracle)  * were allocated from pfmemalloc reserves.
180d122019bSMatthew Wilcox (Oracle)  */
slab_test_pfmemalloc(const struct slab * slab)181d122019bSMatthew Wilcox (Oracle) static inline bool slab_test_pfmemalloc(const struct slab *slab)
182d122019bSMatthew Wilcox (Oracle) {
183d122019bSMatthew Wilcox (Oracle) 	return folio_test_active((struct folio *)slab_folio(slab));
184d122019bSMatthew Wilcox (Oracle) }
185d122019bSMatthew Wilcox (Oracle) 
slab_set_pfmemalloc(struct slab * slab)186d122019bSMatthew Wilcox (Oracle) static inline void slab_set_pfmemalloc(struct slab *slab)
187d122019bSMatthew Wilcox (Oracle) {
188d122019bSMatthew Wilcox (Oracle) 	folio_set_active(slab_folio(slab));
189d122019bSMatthew Wilcox (Oracle) }
190d122019bSMatthew Wilcox (Oracle) 
slab_clear_pfmemalloc(struct slab * slab)191d122019bSMatthew Wilcox (Oracle) static inline void slab_clear_pfmemalloc(struct slab *slab)
192d122019bSMatthew Wilcox (Oracle) {
193d122019bSMatthew Wilcox (Oracle) 	folio_clear_active(slab_folio(slab));
194d122019bSMatthew Wilcox (Oracle) }
195d122019bSMatthew Wilcox (Oracle) 
__slab_clear_pfmemalloc(struct slab * slab)196d122019bSMatthew Wilcox (Oracle) static inline void __slab_clear_pfmemalloc(struct slab *slab)
197d122019bSMatthew Wilcox (Oracle) {
198d122019bSMatthew Wilcox (Oracle) 	__folio_clear_active(slab_folio(slab));
199d122019bSMatthew Wilcox (Oracle) }
200d122019bSMatthew Wilcox (Oracle) 
slab_address(const struct slab * slab)201d122019bSMatthew Wilcox (Oracle) static inline void *slab_address(const struct slab *slab)
202d122019bSMatthew Wilcox (Oracle) {
203d122019bSMatthew Wilcox (Oracle) 	return folio_address(slab_folio(slab));
204d122019bSMatthew Wilcox (Oracle) }
205d122019bSMatthew Wilcox (Oracle) 
slab_nid(const struct slab * slab)206d122019bSMatthew Wilcox (Oracle) static inline int slab_nid(const struct slab *slab)
207d122019bSMatthew Wilcox (Oracle) {
208d122019bSMatthew Wilcox (Oracle) 	return folio_nid(slab_folio(slab));
209d122019bSMatthew Wilcox (Oracle) }
210d122019bSMatthew Wilcox (Oracle) 
slab_pgdat(const struct slab * slab)211d122019bSMatthew Wilcox (Oracle) static inline pg_data_t *slab_pgdat(const struct slab *slab)
212d122019bSMatthew Wilcox (Oracle) {
213d122019bSMatthew Wilcox (Oracle) 	return folio_pgdat(slab_folio(slab));
214d122019bSMatthew Wilcox (Oracle) }
215d122019bSMatthew Wilcox (Oracle) 
virt_to_slab(const void * addr)216d122019bSMatthew Wilcox (Oracle) static inline struct slab *virt_to_slab(const void *addr)
217d122019bSMatthew Wilcox (Oracle) {
218d122019bSMatthew Wilcox (Oracle) 	struct folio *folio = virt_to_folio(addr);
219d122019bSMatthew Wilcox (Oracle) 
220d122019bSMatthew Wilcox (Oracle) 	if (!folio_test_slab(folio))
221d122019bSMatthew Wilcox (Oracle) 		return NULL;
222d122019bSMatthew Wilcox (Oracle) 
223d122019bSMatthew Wilcox (Oracle) 	return folio_slab(folio);
224d122019bSMatthew Wilcox (Oracle) }
225d122019bSMatthew Wilcox (Oracle) 
slab_order(const struct slab * slab)226d122019bSMatthew Wilcox (Oracle) static inline int slab_order(const struct slab *slab)
227d122019bSMatthew Wilcox (Oracle) {
228d122019bSMatthew Wilcox (Oracle) 	return folio_order((struct folio *)slab_folio(slab));
229d122019bSMatthew Wilcox (Oracle) }
230d122019bSMatthew Wilcox (Oracle) 
slab_size(const struct slab * slab)231d122019bSMatthew Wilcox (Oracle) static inline size_t slab_size(const struct slab *slab)
232d122019bSMatthew Wilcox (Oracle) {
233d122019bSMatthew Wilcox (Oracle) 	return PAGE_SIZE << slab_order(slab);
234d122019bSMatthew Wilcox (Oracle) }
235d122019bSMatthew Wilcox (Oracle) 
23607f361b2SJoonsoo Kim #ifdef CONFIG_SLAB
23707f361b2SJoonsoo Kim #include <linux/slab_def.h>
23807f361b2SJoonsoo Kim #endif
23907f361b2SJoonsoo Kim 
24007f361b2SJoonsoo Kim #ifdef CONFIG_SLUB
24107f361b2SJoonsoo Kim #include <linux/slub_def.h>
24207f361b2SJoonsoo Kim #endif
24307f361b2SJoonsoo Kim 
24407f361b2SJoonsoo Kim #include <linux/memcontrol.h>
24511c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h>
24611c7aec2SJesper Dangaard Brouer #include <linux/kasan.h>
24711c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h>
2487c00fce9SThomas Garnier #include <linux/random.h>
249d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h>
25088f2ef73SMuchun Song #include <linux/list_lru.h>
25107f361b2SJoonsoo Kim 
25297d06609SChristoph Lameter /*
25397d06609SChristoph Lameter  * State of the slab allocator.
25497d06609SChristoph Lameter  *
25597d06609SChristoph Lameter  * This is used to describe the states of the allocator during bootup.
25697d06609SChristoph Lameter  * Allocators use this to gradually bootstrap themselves. Most allocators
25797d06609SChristoph Lameter  * have the problem that the structures used for managing slab caches are
25897d06609SChristoph Lameter  * allocated from slab caches themselves.
25997d06609SChristoph Lameter  */
26097d06609SChristoph Lameter enum slab_state {
26197d06609SChristoph Lameter 	DOWN,			/* No slab functionality yet */
26297d06609SChristoph Lameter 	PARTIAL,		/* SLUB: kmem_cache_node available */
263ce8eb6c4SChristoph Lameter 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
26497d06609SChristoph Lameter 	UP,			/* Slab caches usable but not all extras yet */
26597d06609SChristoph Lameter 	FULL			/* Everything is working */
26697d06609SChristoph Lameter };
26797d06609SChristoph Lameter 
26897d06609SChristoph Lameter extern enum slab_state slab_state;
26997d06609SChristoph Lameter 
27018004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
27118004c5dSChristoph Lameter extern struct mutex slab_mutex;
2729b030cb8SChristoph Lameter 
2739b030cb8SChristoph Lameter /* The list of all slab caches on the system */
27418004c5dSChristoph Lameter extern struct list_head slab_caches;
27518004c5dSChristoph Lameter 
2769b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
2779b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
2789b030cb8SChristoph Lameter 
279af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
280af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
281cb5d9fb3SPengfei Li 	const char *name[NR_KMALLOC_TYPES];
28255de8b9cSAlexey Dobriyan 	unsigned int size;
283af3b5f87SVlastimil Babka } kmalloc_info[];
284af3b5f87SVlastimil Babka 
285f97d5f63SChristoph Lameter /* Kmalloc array related functions */
28634cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
287d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t);
2882c59dd65SChristoph Lameter 
2892c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */
2903c615294SGONG, Ruiqi struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
291ed4cd17eSHyeonggon Yoo 
292ed4cd17eSHyeonggon Yoo void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
293ed4cd17eSHyeonggon Yoo 			      int node, size_t orig_size,
294ed4cd17eSHyeonggon Yoo 			      unsigned long caller);
295ed4cd17eSHyeonggon Yoo void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
296f97d5f63SChristoph Lameter 
29744405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags);
298f97d5f63SChristoph Lameter 
2999b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
300d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
30197d06609SChristoph Lameter 
3020c474d31SCatalin Marinas void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type,
3030c474d31SCatalin Marinas 			      slab_flags_t flags);
30445530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
305361d575eSAlexey Dobriyan 			unsigned int size, slab_flags_t flags,
306361d575eSAlexey Dobriyan 			unsigned int useroffset, unsigned int usersize);
30745530c44SChristoph Lameter 
308423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
309f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
310d50112edSAlexey Dobriyan 		slab_flags_t flags, const char *name, void (*ctor)(void *));
3112633d7a0SGlauber Costa struct kmem_cache *
312f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
313d50112edSAlexey Dobriyan 		   slab_flags_t flags, void (*ctor)(void *));
314423c929cSJoonsoo Kim 
3150293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size,
31637540008SNikolay Borisov 	slab_flags_t flags, const char *name);
317cbb79694SChristoph Lameter 
is_kmalloc_cache(struct kmem_cache * s)318bb944290SFeng Tang static inline bool is_kmalloc_cache(struct kmem_cache *s)
319bb944290SFeng Tang {
320bb944290SFeng Tang 	return (s->flags & SLAB_KMALLOC);
321bb944290SFeng Tang }
322cbb79694SChristoph Lameter 
323d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
3246d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3256d6ea1e9SNicolas Boichat 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
3265f0d5a3aSPaul E. McKenney 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
327d8843922SGlauber Costa 
328d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB)
329d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
330d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG)
331d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
332becfda68SLaura Abbott 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
333d8843922SGlauber Costa #else
334d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
335d8843922SGlauber Costa #endif
336d8843922SGlauber Costa 
337d8843922SGlauber Costa #if defined(CONFIG_SLAB)
338d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
339230e9fc2SVladimir Davydov 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
340d0bf7d57SJesper Dangaard Brouer 			  SLAB_ACCOUNT | SLAB_NO_MERGE)
341d8843922SGlauber Costa #elif defined(CONFIG_SLUB)
342d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
3436cd6d33cSFeng Tang 			  SLAB_TEMPORARY | SLAB_ACCOUNT | \
344d0bf7d57SJesper Dangaard Brouer 			  SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
345d8843922SGlauber Costa #else
34634dbc3aaSRustam Kovhaev #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
347d8843922SGlauber Costa #endif
348d8843922SGlauber Costa 
349e70954fdSThomas Garnier /* Common flags available with current configuration */
350d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
351d8843922SGlauber Costa 
352e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
353e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
354e70954fdSThomas Garnier 			      SLAB_RED_ZONE | \
355e70954fdSThomas Garnier 			      SLAB_POISON | \
356e70954fdSThomas Garnier 			      SLAB_STORE_USER | \
357e70954fdSThomas Garnier 			      SLAB_TRACE | \
358e70954fdSThomas Garnier 			      SLAB_CONSISTENCY_CHECKS | \
359e70954fdSThomas Garnier 			      SLAB_MEM_SPREAD | \
360e70954fdSThomas Garnier 			      SLAB_NOLEAKTRACE | \
361e70954fdSThomas Garnier 			      SLAB_RECLAIM_ACCOUNT | \
362e70954fdSThomas Garnier 			      SLAB_TEMPORARY | \
363a285909fSHyeonggon Yoo 			      SLAB_ACCOUNT | \
3646cd6d33cSFeng Tang 			      SLAB_KMALLOC | \
365d0bf7d57SJesper Dangaard Brouer 			      SLAB_NO_MERGE | \
366a285909fSHyeonggon Yoo 			      SLAB_NO_USER_FLAGS)
367e70954fdSThomas Garnier 
368f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
369945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
37052b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
371c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
37241a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
373945cf2b6SChristoph Lameter 
374b7454ad3SGlauber Costa struct seq_file;
375b7454ad3SGlauber Costa struct file;
376b7454ad3SGlauber Costa 
3770d7561c6SGlauber Costa struct slabinfo {
3780d7561c6SGlauber Costa 	unsigned long active_objs;
3790d7561c6SGlauber Costa 	unsigned long num_objs;
3800d7561c6SGlauber Costa 	unsigned long active_slabs;
3810d7561c6SGlauber Costa 	unsigned long num_slabs;
3820d7561c6SGlauber Costa 	unsigned long shared_avail;
3830d7561c6SGlauber Costa 	unsigned int limit;
3840d7561c6SGlauber Costa 	unsigned int batchcount;
3850d7561c6SGlauber Costa 	unsigned int shared;
3860d7561c6SGlauber Costa 	unsigned int objects_per_slab;
3870d7561c6SGlauber Costa 	unsigned int cache_order;
3880d7561c6SGlauber Costa };
3890d7561c6SGlauber Costa 
3900d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
3910d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
392b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
393b7454ad3SGlauber Costa 		       size_t count, loff_t *ppos);
394ba6c496eSGlauber Costa 
cache_vmstat_idx(struct kmem_cache * s)3951a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
3966cea1d56SRoman Gushchin {
3976cea1d56SRoman Gushchin 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
398d42f3245SRoman Gushchin 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
3996cea1d56SRoman Gushchin }
4006cea1d56SRoman Gushchin 
401e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
402e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON
403e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
404e42f174eSVlastimil Babka #else
405e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
406e42f174eSVlastimil Babka #endif
407e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object);
4081f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)4090d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4100d4a062aSMarco Elver {
4110d4a062aSMarco Elver 	return static_branch_unlikely(&slub_debug_enabled);
4120d4a062aSMarco Elver }
413e42f174eSVlastimil Babka #else
print_tracking(struct kmem_cache * s,void * object)414e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object)
415e42f174eSVlastimil Babka {
416e42f174eSVlastimil Babka }
__slub_debug_enabled(void)4170d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4180d4a062aSMarco Elver {
4190d4a062aSMarco Elver 	return false;
4200d4a062aSMarco Elver }
421e42f174eSVlastimil Babka #endif
422e42f174eSVlastimil Babka 
423e42f174eSVlastimil Babka /*
424e42f174eSVlastimil Babka  * Returns true if any of the specified slub_debug flags is enabled for the
425e42f174eSVlastimil Babka  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
426e42f174eSVlastimil Babka  * the static key.
427e42f174eSVlastimil Babka  */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)428e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
429e42f174eSVlastimil Babka {
4300d4a062aSMarco Elver 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
431e42f174eSVlastimil Babka 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
4320d4a062aSMarco Elver 	if (__slub_debug_enabled())
433e42f174eSVlastimil Babka 		return s->flags & flags;
434e42f174eSVlastimil Babka 	return false;
435e42f174eSVlastimil Babka }
436e42f174eSVlastimil Babka 
43784c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
4384b5f8d9aSVlastimil Babka /*
4394b5f8d9aSVlastimil Babka  * slab_objcgs - get the object cgroups vector associated with a slab
4404b5f8d9aSVlastimil Babka  * @slab: a pointer to the slab struct
4414b5f8d9aSVlastimil Babka  *
4424b5f8d9aSVlastimil Babka  * Returns a pointer to the object cgroups vector associated with the slab,
4434b5f8d9aSVlastimil Babka  * or NULL if no such vector has been associated yet.
4444b5f8d9aSVlastimil Babka  */
slab_objcgs(struct slab * slab)4454b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
4464b5f8d9aSVlastimil Babka {
4474b5f8d9aSVlastimil Babka 	unsigned long memcg_data = READ_ONCE(slab->memcg_data);
4484b5f8d9aSVlastimil Babka 
4494b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
4504b5f8d9aSVlastimil Babka 							slab_page(slab));
4514b5f8d9aSVlastimil Babka 	VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
4524b5f8d9aSVlastimil Babka 
4534b5f8d9aSVlastimil Babka 	return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
4544b5f8d9aSVlastimil Babka }
4554b5f8d9aSVlastimil Babka 
4564b5f8d9aSVlastimil Babka int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
4574b5f8d9aSVlastimil Babka 				 gfp_t gfp, bool new_slab);
458fdbcb2a6SWaiman Long void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
459fdbcb2a6SWaiman Long 		     enum node_stat_item idx, int nr);
460286e04b8SRoman Gushchin 
memcg_free_slab_cgroups(struct slab * slab)4614b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
462286e04b8SRoman Gushchin {
4634b5f8d9aSVlastimil Babka 	kfree(slab_objcgs(slab));
4644b5f8d9aSVlastimil Babka 	slab->memcg_data = 0;
465286e04b8SRoman Gushchin }
466286e04b8SRoman Gushchin 
obj_full_size(struct kmem_cache * s)467f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s)
468f2fe7b09SRoman Gushchin {
469f2fe7b09SRoman Gushchin 	/*
470f2fe7b09SRoman Gushchin 	 * For each accounted object there is an extra space which is used
471f2fe7b09SRoman Gushchin 	 * to store obj_cgroup membership. Charge it too.
472f2fe7b09SRoman Gushchin 	 */
473f2fe7b09SRoman Gushchin 	return s->size + sizeof(struct obj_cgroup *);
474f2fe7b09SRoman Gushchin }
475f2fe7b09SRoman Gushchin 
476becaba65SRoman Gushchin /*
477becaba65SRoman Gushchin  * Returns false if the allocation should fail.
478becaba65SRoman Gushchin  */
memcg_slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t objects,gfp_t flags)479becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
48088f2ef73SMuchun Song 					     struct list_lru *lru,
481becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
482becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
483f2fe7b09SRoman Gushchin {
4849855609bSRoman Gushchin 	struct obj_cgroup *objcg;
485f2fe7b09SRoman Gushchin 
486f7a449f7SRoman Gushchin 	if (!memcg_kmem_online())
487becaba65SRoman Gushchin 		return true;
488becaba65SRoman Gushchin 
489becaba65SRoman Gushchin 	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
490becaba65SRoman Gushchin 		return true;
491becaba65SRoman Gushchin 
4929855609bSRoman Gushchin 	objcg = get_obj_cgroup_from_current();
4939855609bSRoman Gushchin 	if (!objcg)
494becaba65SRoman Gushchin 		return true;
4959855609bSRoman Gushchin 
49688f2ef73SMuchun Song 	if (lru) {
49788f2ef73SMuchun Song 		int ret;
49888f2ef73SMuchun Song 		struct mem_cgroup *memcg;
49988f2ef73SMuchun Song 
50088f2ef73SMuchun Song 		memcg = get_mem_cgroup_from_objcg(objcg);
50188f2ef73SMuchun Song 		ret = memcg_list_lru_alloc(memcg, lru, flags);
50288f2ef73SMuchun Song 		css_put(&memcg->css);
50388f2ef73SMuchun Song 
50488f2ef73SMuchun Song 		if (ret)
50588f2ef73SMuchun Song 			goto out;
506f2fe7b09SRoman Gushchin 	}
507f2fe7b09SRoman Gushchin 
50888f2ef73SMuchun Song 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
50988f2ef73SMuchun Song 		goto out;
51088f2ef73SMuchun Song 
511becaba65SRoman Gushchin 	*objcgp = objcg;
512becaba65SRoman Gushchin 	return true;
51388f2ef73SMuchun Song out:
51488f2ef73SMuchun Song 	obj_cgroup_put(objcg);
51588f2ef73SMuchun Song 	return false;
516f2fe7b09SRoman Gushchin }
517f2fe7b09SRoman Gushchin 
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p)518964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
519964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
52010befea9SRoman Gushchin 					      gfp_t flags, size_t size,
52110befea9SRoman Gushchin 					      void **p)
522964d4bd3SRoman Gushchin {
5234b5f8d9aSVlastimil Babka 	struct slab *slab;
524964d4bd3SRoman Gushchin 	unsigned long off;
525964d4bd3SRoman Gushchin 	size_t i;
526964d4bd3SRoman Gushchin 
527f7a449f7SRoman Gushchin 	if (!memcg_kmem_online() || !objcg)
52810befea9SRoman Gushchin 		return;
52910befea9SRoman Gushchin 
530964d4bd3SRoman Gushchin 	for (i = 0; i < size; i++) {
531964d4bd3SRoman Gushchin 		if (likely(p[i])) {
5324b5f8d9aSVlastimil Babka 			slab = virt_to_slab(p[i]);
53310befea9SRoman Gushchin 
5344b5f8d9aSVlastimil Babka 			if (!slab_objcgs(slab) &&
5354b5f8d9aSVlastimil Babka 			    memcg_alloc_slab_cgroups(slab, s, flags,
5362e9bd483SRoman Gushchin 							 false)) {
53710befea9SRoman Gushchin 				obj_cgroup_uncharge(objcg, obj_full_size(s));
53810befea9SRoman Gushchin 				continue;
53910befea9SRoman Gushchin 			}
54010befea9SRoman Gushchin 
5414b5f8d9aSVlastimil Babka 			off = obj_to_index(s, slab, p[i]);
542964d4bd3SRoman Gushchin 			obj_cgroup_get(objcg);
5434b5f8d9aSVlastimil Babka 			slab_objcgs(slab)[off] = objcg;
5444b5f8d9aSVlastimil Babka 			mod_objcg_state(objcg, slab_pgdat(slab),
545f2fe7b09SRoman Gushchin 					cache_vmstat_idx(s), obj_full_size(s));
546f2fe7b09SRoman Gushchin 		} else {
547f2fe7b09SRoman Gushchin 			obj_cgroup_uncharge(objcg, obj_full_size(s));
548964d4bd3SRoman Gushchin 		}
549964d4bd3SRoman Gushchin 	}
550964d4bd3SRoman Gushchin 	obj_cgroup_put(objcg);
551964d4bd3SRoman Gushchin }
552964d4bd3SRoman Gushchin 
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)553b77d5b1bSMuchun Song static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
554d1b2cf6cSBharata B Rao 					void **p, int objects)
555964d4bd3SRoman Gushchin {
556270c6a71SRoman Gushchin 	struct obj_cgroup **objcgs;
557d1b2cf6cSBharata B Rao 	int i;
558964d4bd3SRoman Gushchin 
559f7a449f7SRoman Gushchin 	if (!memcg_kmem_online())
56010befea9SRoman Gushchin 		return;
56110befea9SRoman Gushchin 
5624b5f8d9aSVlastimil Babka 	objcgs = slab_objcgs(slab);
563270c6a71SRoman Gushchin 	if (!objcgs)
564b77d5b1bSMuchun Song 		return;
565964d4bd3SRoman Gushchin 
566b77d5b1bSMuchun Song 	for (i = 0; i < objects; i++) {
567b77d5b1bSMuchun Song 		struct obj_cgroup *objcg;
568b77d5b1bSMuchun Song 		unsigned int off;
569d1b2cf6cSBharata B Rao 
5704b5f8d9aSVlastimil Babka 		off = obj_to_index(s, slab, p[i]);
571270c6a71SRoman Gushchin 		objcg = objcgs[off];
57210befea9SRoman Gushchin 		if (!objcg)
573d1b2cf6cSBharata B Rao 			continue;
57410befea9SRoman Gushchin 
575270c6a71SRoman Gushchin 		objcgs[off] = NULL;
576f2fe7b09SRoman Gushchin 		obj_cgroup_uncharge(objcg, obj_full_size(s));
5774b5f8d9aSVlastimil Babka 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
578f2fe7b09SRoman Gushchin 				-obj_full_size(s));
579964d4bd3SRoman Gushchin 		obj_cgroup_put(objcg);
580964d4bd3SRoman Gushchin 	}
581d1b2cf6cSBharata B Rao }
582964d4bd3SRoman Gushchin 
58384c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
slab_objcgs(struct slab * slab)5844b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
5854b5f8d9aSVlastimil Babka {
5864b5f8d9aSVlastimil Babka 	return NULL;
5874b5f8d9aSVlastimil Babka }
5884b5f8d9aSVlastimil Babka 
memcg_from_slab_obj(void * ptr)5899855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
5904d96ba35SRoman Gushchin {
5914d96ba35SRoman Gushchin 	return NULL;
5924d96ba35SRoman Gushchin }
5934d96ba35SRoman Gushchin 
memcg_alloc_slab_cgroups(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)5944b5f8d9aSVlastimil Babka static inline int memcg_alloc_slab_cgroups(struct slab *slab,
5952e9bd483SRoman Gushchin 					       struct kmem_cache *s, gfp_t gfp,
5964b5f8d9aSVlastimil Babka 					       bool new_slab)
597286e04b8SRoman Gushchin {
598286e04b8SRoman Gushchin 	return 0;
599286e04b8SRoman Gushchin }
600286e04b8SRoman Gushchin 
memcg_free_slab_cgroups(struct slab * slab)6014b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
602286e04b8SRoman Gushchin {
603286e04b8SRoman Gushchin }
604286e04b8SRoman Gushchin 
memcg_slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t objects,gfp_t flags)605becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
60688f2ef73SMuchun Song 					     struct list_lru *lru,
607becaba65SRoman Gushchin 					     struct obj_cgroup **objcgp,
608becaba65SRoman Gushchin 					     size_t objects, gfp_t flags)
609f2fe7b09SRoman Gushchin {
610becaba65SRoman Gushchin 	return true;
611f2fe7b09SRoman Gushchin }
612f2fe7b09SRoman Gushchin 
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p)613964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
614964d4bd3SRoman Gushchin 					      struct obj_cgroup *objcg,
61510befea9SRoman Gushchin 					      gfp_t flags, size_t size,
61610befea9SRoman Gushchin 					      void **p)
617964d4bd3SRoman Gushchin {
618964d4bd3SRoman Gushchin }
619964d4bd3SRoman Gushchin 
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)620b77d5b1bSMuchun Song static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
621d1b2cf6cSBharata B Rao 					void **p, int objects)
622964d4bd3SRoman Gushchin {
623964d4bd3SRoman Gushchin }
62484c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
625b9ce5ef4SGlauber Costa 
virt_to_cache(const void * obj)626a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj)
627a64b5378SKees Cook {
62882c1775dSMatthew Wilcox (Oracle) 	struct slab *slab;
629a64b5378SKees Cook 
63082c1775dSMatthew Wilcox (Oracle) 	slab = virt_to_slab(obj);
63182c1775dSMatthew Wilcox (Oracle) 	if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
632a64b5378SKees Cook 					__func__))
633a64b5378SKees Cook 		return NULL;
63482c1775dSMatthew Wilcox (Oracle) 	return slab->slab_cache;
635a64b5378SKees Cook }
636a64b5378SKees Cook 
account_slab(struct slab * slab,int order,struct kmem_cache * s,gfp_t gfp)637b918653bSMatthew Wilcox (Oracle) static __always_inline void account_slab(struct slab *slab, int order,
638b918653bSMatthew Wilcox (Oracle) 					 struct kmem_cache *s, gfp_t gfp)
6396cea1d56SRoman Gushchin {
640f7a449f7SRoman Gushchin 	if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
6414b5f8d9aSVlastimil Babka 		memcg_alloc_slab_cgroups(slab, s, gfp, true);
6422e9bd483SRoman Gushchin 
643b918653bSMatthew Wilcox (Oracle) 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
644f2fe7b09SRoman Gushchin 			    PAGE_SIZE << order);
6456cea1d56SRoman Gushchin }
6466cea1d56SRoman Gushchin 
unaccount_slab(struct slab * slab,int order,struct kmem_cache * s)647b918653bSMatthew Wilcox (Oracle) static __always_inline void unaccount_slab(struct slab *slab, int order,
6486cea1d56SRoman Gushchin 					   struct kmem_cache *s)
6496cea1d56SRoman Gushchin {
650f7a449f7SRoman Gushchin 	if (memcg_kmem_online())
6514b5f8d9aSVlastimil Babka 		memcg_free_slab_cgroups(slab);
6529855609bSRoman Gushchin 
653b918653bSMatthew Wilcox (Oracle) 	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
654d42f3245SRoman Gushchin 			    -(PAGE_SIZE << order));
6556cea1d56SRoman Gushchin }
6566cea1d56SRoman Gushchin 
cache_from_obj(struct kmem_cache * s,void * x)657e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
658e42f174eSVlastimil Babka {
659e42f174eSVlastimil Babka 	struct kmem_cache *cachep;
660e42f174eSVlastimil Babka 
661e42f174eSVlastimil Babka 	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
662e42f174eSVlastimil Babka 	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
663e42f174eSVlastimil Babka 		return s;
664e42f174eSVlastimil Babka 
665e42f174eSVlastimil Babka 	cachep = virt_to_cache(x);
66610befea9SRoman Gushchin 	if (WARN(cachep && cachep != s,
667e42f174eSVlastimil Babka 		  "%s: Wrong slab cache. %s but object is from %s\n",
668e42f174eSVlastimil Babka 		  __func__, s->name, cachep->name))
669e42f174eSVlastimil Babka 		print_tracking(cachep, x);
670e42f174eSVlastimil Babka 	return cachep;
671e42f174eSVlastimil Babka }
672d6a71648SHyeonggon Yoo 
673d6a71648SHyeonggon Yoo void free_large_kmalloc(struct folio *folio, void *object);
674d6a71648SHyeonggon Yoo 
6758dfa9d55SHyeonggon Yoo size_t __ksize(const void *objp);
6768dfa9d55SHyeonggon Yoo 
slab_ksize(const struct kmem_cache * s)67711c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
67811c7aec2SJesper Dangaard Brouer {
67911c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB
68011c7aec2SJesper Dangaard Brouer 	return s->object_size;
68111c7aec2SJesper Dangaard Brouer 
68211c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */
68311c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG
68411c7aec2SJesper Dangaard Brouer 	/*
68511c7aec2SJesper Dangaard Brouer 	 * Debugging requires use of the padding between object
68611c7aec2SJesper Dangaard Brouer 	 * and whatever may come after it.
68711c7aec2SJesper Dangaard Brouer 	 */
68811c7aec2SJesper Dangaard Brouer 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
68911c7aec2SJesper Dangaard Brouer 		return s->object_size;
69011c7aec2SJesper Dangaard Brouer # endif
69180a9201aSAlexander Potapenko 	if (s->flags & SLAB_KASAN)
69280a9201aSAlexander Potapenko 		return s->object_size;
69311c7aec2SJesper Dangaard Brouer 	/*
69411c7aec2SJesper Dangaard Brouer 	 * If we have the need to store the freelist pointer
69511c7aec2SJesper Dangaard Brouer 	 * back there or track user information then we can
69611c7aec2SJesper Dangaard Brouer 	 * only use the space before that information.
69711c7aec2SJesper Dangaard Brouer 	 */
6985f0d5a3aSPaul E. McKenney 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
69911c7aec2SJesper Dangaard Brouer 		return s->inuse;
70011c7aec2SJesper Dangaard Brouer 	/*
70111c7aec2SJesper Dangaard Brouer 	 * Else we can use all the padding etc for the allocation
70211c7aec2SJesper Dangaard Brouer 	 */
70311c7aec2SJesper Dangaard Brouer 	return s->size;
70411c7aec2SJesper Dangaard Brouer #endif
70511c7aec2SJesper Dangaard Brouer }
70611c7aec2SJesper Dangaard Brouer 
slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t size,gfp_t flags)70711c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
70888f2ef73SMuchun Song 						     struct list_lru *lru,
709964d4bd3SRoman Gushchin 						     struct obj_cgroup **objcgp,
710964d4bd3SRoman Gushchin 						     size_t size, gfp_t flags)
71111c7aec2SJesper Dangaard Brouer {
71211c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
713d92a8cfcSPeter Zijlstra 
71495d6c701SDaniel Vetter 	might_alloc(flags);
71511c7aec2SJesper Dangaard Brouer 
716fab9963aSJesper Dangaard Brouer 	if (should_failslab(s, flags))
71711c7aec2SJesper Dangaard Brouer 		return NULL;
71811c7aec2SJesper Dangaard Brouer 
71988f2ef73SMuchun Song 	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
720becaba65SRoman Gushchin 		return NULL;
72145264778SVladimir Davydov 
72245264778SVladimir Davydov 	return s;
72311c7aec2SJesper Dangaard Brouer }
72411c7aec2SJesper Dangaard Brouer 
slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p,bool init,unsigned int orig_size)725964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s,
726da844b78SAndrey Konovalov 					struct obj_cgroup *objcg, gfp_t flags,
7279ce67395SFeng Tang 					size_t size, void **p, bool init,
7289ce67395SFeng Tang 					unsigned int orig_size)
72911c7aec2SJesper Dangaard Brouer {
7309ce67395SFeng Tang 	unsigned int zero_size = s->object_size;
731fdb54d96SAndrey Konovalov 	bool kasan_init = init;
73211c7aec2SJesper Dangaard Brouer 	size_t i;
73311c7aec2SJesper Dangaard Brouer 
73411c7aec2SJesper Dangaard Brouer 	flags &= gfp_allowed_mask;
735da844b78SAndrey Konovalov 
736da844b78SAndrey Konovalov 	/*
7379ce67395SFeng Tang 	 * For kmalloc object, the allocated memory size(object_size) is likely
7389ce67395SFeng Tang 	 * larger than the requested size(orig_size). If redzone check is
7399ce67395SFeng Tang 	 * enabled for the extra space, don't zero it, as it will be redzoned
7409ce67395SFeng Tang 	 * soon. The redzone operation for this extra space could be seen as a
7419ce67395SFeng Tang 	 * replacement of current poisoning under certain debug option, and
7429ce67395SFeng Tang 	 * won't break other sanity checks.
7439ce67395SFeng Tang 	 */
7449ce67395SFeng Tang 	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
7459ce67395SFeng Tang 	    (s->flags & SLAB_KMALLOC))
7469ce67395SFeng Tang 		zero_size = orig_size;
7479ce67395SFeng Tang 
7489ce67395SFeng Tang 	/*
749fdb54d96SAndrey Konovalov 	 * When slub_debug is enabled, avoid memory initialization integrated
750fdb54d96SAndrey Konovalov 	 * into KASAN and instead zero out the memory via the memset below with
751fdb54d96SAndrey Konovalov 	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
752fdb54d96SAndrey Konovalov 	 * cause false-positive reports. This does not lead to a performance
753fdb54d96SAndrey Konovalov 	 * penalty on production builds, as slub_debug is not intended to be
754fdb54d96SAndrey Konovalov 	 * enabled there.
755fdb54d96SAndrey Konovalov 	 */
756fdb54d96SAndrey Konovalov 	if (__slub_debug_enabled())
757fdb54d96SAndrey Konovalov 		kasan_init = false;
758fdb54d96SAndrey Konovalov 
759fdb54d96SAndrey Konovalov 	/*
760da844b78SAndrey Konovalov 	 * As memory initialization might be integrated into KASAN,
761da844b78SAndrey Konovalov 	 * kasan_slab_alloc and initialization memset must be
762da844b78SAndrey Konovalov 	 * kept together to avoid discrepancies in behavior.
763da844b78SAndrey Konovalov 	 *
764da844b78SAndrey Konovalov 	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
765da844b78SAndrey Konovalov 	 */
76611c7aec2SJesper Dangaard Brouer 	for (i = 0; i < size; i++) {
767fdb54d96SAndrey Konovalov 		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
768fdb54d96SAndrey Konovalov 		if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
7699ce67395SFeng Tang 			memset(p[i], 0, zero_size);
77053128245SAndrey Konovalov 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
77111c7aec2SJesper Dangaard Brouer 					 s->flags, flags);
77268ef169aSAlexander Potapenko 		kmsan_slab_alloc(s, p[i], flags);
77311c7aec2SJesper Dangaard Brouer 	}
77445264778SVladimir Davydov 
77510befea9SRoman Gushchin 	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
77611c7aec2SJesper Dangaard Brouer }
77711c7aec2SJesper Dangaard Brouer 
778ca34956bSChristoph Lameter /*
779ca34956bSChristoph Lameter  * The slab lists for all objects.
780ca34956bSChristoph Lameter  */
781ca34956bSChristoph Lameter struct kmem_cache_node {
782ca34956bSChristoph Lameter #ifdef CONFIG_SLAB
783b539ce9fSJiri Kosina 	raw_spinlock_t list_lock;
784ca34956bSChristoph Lameter 	struct list_head slabs_partial;	/* partial list first, better asm code */
785ca34956bSChristoph Lameter 	struct list_head slabs_full;
786ca34956bSChristoph Lameter 	struct list_head slabs_free;
787bf00bd34SDavid Rientjes 	unsigned long total_slabs;	/* length of all slab lists */
788bf00bd34SDavid Rientjes 	unsigned long free_slabs;	/* length of free slab list only */
789ca34956bSChristoph Lameter 	unsigned long free_objects;
790ca34956bSChristoph Lameter 	unsigned int free_limit;
791ca34956bSChristoph Lameter 	unsigned int colour_next;	/* Per-node cache coloring */
792ca34956bSChristoph Lameter 	struct array_cache *shared;	/* shared per node */
793c8522a3aSJoonsoo Kim 	struct alien_cache **alien;	/* on other nodes */
794ca34956bSChristoph Lameter 	unsigned long next_reap;	/* updated without locking */
795ca34956bSChristoph Lameter 	int free_touched;		/* updated without locking */
796ca34956bSChristoph Lameter #endif
797ca34956bSChristoph Lameter 
798ca34956bSChristoph Lameter #ifdef CONFIG_SLUB
799b539ce9fSJiri Kosina 	spinlock_t list_lock;
800ca34956bSChristoph Lameter 	unsigned long nr_partial;
801ca34956bSChristoph Lameter 	struct list_head partial;
802ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG
803ca34956bSChristoph Lameter 	atomic_long_t nr_slabs;
804ca34956bSChristoph Lameter 	atomic_long_t total_objects;
805ca34956bSChristoph Lameter 	struct list_head full;
806ca34956bSChristoph Lameter #endif
807ca34956bSChristoph Lameter #endif
808ca34956bSChristoph Lameter 
809ca34956bSChristoph Lameter };
810e25839f6SWanpeng Li 
get_node(struct kmem_cache * s,int node)81144c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
81244c5356fSChristoph Lameter {
81344c5356fSChristoph Lameter 	return s->node[node];
81444c5356fSChristoph Lameter }
81544c5356fSChristoph Lameter 
81644c5356fSChristoph Lameter /*
81744c5356fSChristoph Lameter  * Iterator over all nodes. The body will be executed for each node that has
81844c5356fSChristoph Lameter  * a kmem_cache_node structure allocated (which is true for all online nodes)
81944c5356fSChristoph Lameter  */
82044c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \
8219163582cSMikulas Patocka 	for (__node = 0; __node < nr_node_ids; __node++) \
8229163582cSMikulas Patocka 		 if ((__n = get_node(__s, __node)))
82344c5356fSChristoph Lameter 
82444c5356fSChristoph Lameter 
825852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
826852d8be0SYang Shi void dump_unreclaimable_slab(void);
827852d8be0SYang Shi #else
dump_unreclaimable_slab(void)828852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
829852d8be0SYang Shi {
830852d8be0SYang Shi }
831852d8be0SYang Shi #endif
832852d8be0SYang Shi 
83355834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
83455834c59SAlexander Potapenko 
8357c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
8367c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
8377c00fce9SThomas Garnier 			gfp_t gfp);
8387c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
8397c00fce9SThomas Garnier #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)8407c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
8417c00fce9SThomas Garnier 					unsigned int count, gfp_t gfp)
8427c00fce9SThomas Garnier {
8437c00fce9SThomas Garnier 	return 0;
8447c00fce9SThomas Garnier }
cache_random_seq_destroy(struct kmem_cache * cachep)8457c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
8467c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
8477c00fce9SThomas Garnier 
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)8486471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
8496471384aSAlexander Potapenko {
85051cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
85151cba1ebSKees Cook 				&init_on_alloc)) {
8526471384aSAlexander Potapenko 		if (c->ctor)
8536471384aSAlexander Potapenko 			return false;
8546471384aSAlexander Potapenko 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
8556471384aSAlexander Potapenko 			return flags & __GFP_ZERO;
8566471384aSAlexander Potapenko 		return true;
8576471384aSAlexander Potapenko 	}
8586471384aSAlexander Potapenko 	return flags & __GFP_ZERO;
8596471384aSAlexander Potapenko }
8606471384aSAlexander Potapenko 
slab_want_init_on_free(struct kmem_cache * c)8616471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
8626471384aSAlexander Potapenko {
86351cba1ebSKees Cook 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
86451cba1ebSKees Cook 				&init_on_free))
8656471384aSAlexander Potapenko 		return !(c->ctor ||
8666471384aSAlexander Potapenko 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
8676471384aSAlexander Potapenko 	return false;
8686471384aSAlexander Potapenko }
8696471384aSAlexander Potapenko 
87064dd6849SFaiyaz Mohammed #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
87164dd6849SFaiyaz Mohammed void debugfs_slab_release(struct kmem_cache *);
87264dd6849SFaiyaz Mohammed #else
debugfs_slab_release(struct kmem_cache * s)87364dd6849SFaiyaz Mohammed static inline void debugfs_slab_release(struct kmem_cache *s) { }
87464dd6849SFaiyaz Mohammed #endif
87564dd6849SFaiyaz Mohammed 
8765bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
8778e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16
8788e7f37f2SPaul E. McKenney struct kmem_obj_info {
8798e7f37f2SPaul E. McKenney 	void *kp_ptr;
8807213230aSMatthew Wilcox (Oracle) 	struct slab *kp_slab;
8818e7f37f2SPaul E. McKenney 	void *kp_objp;
8828e7f37f2SPaul E. McKenney 	unsigned long kp_data_offset;
8838e7f37f2SPaul E. McKenney 	struct kmem_cache *kp_slab_cache;
8848e7f37f2SPaul E. McKenney 	void *kp_ret;
8858e7f37f2SPaul E. McKenney 	void *kp_stack[KS_ADDRS_COUNT];
886e548eaa1SManinder Singh 	void *kp_free_stack[KS_ADDRS_COUNT];
8878e7f37f2SPaul E. McKenney };
8882dfe63e6SMarco Elver void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
8895bb1bb35SPaul E. McKenney #endif
8908e7f37f2SPaul E. McKenney 
8910b3eb091SMatthew Wilcox (Oracle) void __check_heap_object(const void *ptr, unsigned long n,
8920b3eb091SMatthew Wilcox (Oracle) 			 const struct slab *slab, bool to_user);
8930b3eb091SMatthew Wilcox (Oracle) 
894946fa0dbSFeng Tang #ifdef CONFIG_SLUB_DEBUG
895946fa0dbSFeng Tang void skip_orig_size_check(struct kmem_cache *s, const void *object);
896946fa0dbSFeng Tang #endif
897946fa0dbSFeng Tang 
8985240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
899