1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297d06609SChristoph Lameter #ifndef MM_SLAB_H
397d06609SChristoph Lameter #define MM_SLAB_H
497d06609SChristoph Lameter /*
597d06609SChristoph Lameter * Internal slab definitions
697d06609SChristoph Lameter */
7d5d2c02aSMike Rapoport (IBM) void __init kmem_cache_init(void);
897d06609SChristoph Lameter
96801be4fSPeter Zijlstra #ifdef CONFIG_64BIT
106801be4fSPeter Zijlstra # ifdef system_has_cmpxchg128
116801be4fSPeter Zijlstra # define system_has_freelist_aba() system_has_cmpxchg128()
126801be4fSPeter Zijlstra # define try_cmpxchg_freelist try_cmpxchg128
136801be4fSPeter Zijlstra # endif
146801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
156801be4fSPeter Zijlstra typedef u128 freelist_full_t;
166801be4fSPeter Zijlstra #else /* CONFIG_64BIT */
176801be4fSPeter Zijlstra # ifdef system_has_cmpxchg64
186801be4fSPeter Zijlstra # define system_has_freelist_aba() system_has_cmpxchg64()
196801be4fSPeter Zijlstra # define try_cmpxchg_freelist try_cmpxchg64
206801be4fSPeter Zijlstra # endif
216801be4fSPeter Zijlstra #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
226801be4fSPeter Zijlstra typedef u64 freelist_full_t;
236801be4fSPeter Zijlstra #endif /* CONFIG_64BIT */
246801be4fSPeter Zijlstra
256801be4fSPeter Zijlstra #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
266801be4fSPeter Zijlstra #undef system_has_freelist_aba
276801be4fSPeter Zijlstra #endif
286801be4fSPeter Zijlstra
296801be4fSPeter Zijlstra /*
306801be4fSPeter Zijlstra * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
316801be4fSPeter Zijlstra * problems with cmpxchg of just a pointer.
326801be4fSPeter Zijlstra */
336801be4fSPeter Zijlstra typedef union {
346801be4fSPeter Zijlstra struct {
356801be4fSPeter Zijlstra void *freelist;
366801be4fSPeter Zijlstra unsigned long counter;
376801be4fSPeter Zijlstra };
386801be4fSPeter Zijlstra freelist_full_t full;
396801be4fSPeter Zijlstra } freelist_aba_t;
406801be4fSPeter Zijlstra
41d122019bSMatthew Wilcox (Oracle) /* Reuses the bits in struct page */
42d122019bSMatthew Wilcox (Oracle) struct slab {
43d122019bSMatthew Wilcox (Oracle) unsigned long __page_flags;
44401fb12cSVlastimil Babka
45401fb12cSVlastimil Babka #if defined(CONFIG_SLAB)
46401fb12cSVlastimil Babka
47401fb12cSVlastimil Babka struct kmem_cache *slab_cache;
48130d4df5SVlastimil Babka union {
49130d4df5SVlastimil Babka struct {
50130d4df5SVlastimil Babka struct list_head slab_list;
51401fb12cSVlastimil Babka void *freelist; /* array of free object indexes */
52401fb12cSVlastimil Babka void *s_mem; /* first object */
53130d4df5SVlastimil Babka };
54130d4df5SVlastimil Babka struct rcu_head rcu_head;
55130d4df5SVlastimil Babka };
56401fb12cSVlastimil Babka unsigned int active;
57401fb12cSVlastimil Babka
58401fb12cSVlastimil Babka #elif defined(CONFIG_SLUB)
59401fb12cSVlastimil Babka
60130d4df5SVlastimil Babka struct kmem_cache *slab_cache;
61130d4df5SVlastimil Babka union {
62130d4df5SVlastimil Babka struct {
63401fb12cSVlastimil Babka union {
64401fb12cSVlastimil Babka struct list_head slab_list;
659c01e9afSVlastimil Babka #ifdef CONFIG_SLUB_CPU_PARTIAL
66401fb12cSVlastimil Babka struct {
67d122019bSMatthew Wilcox (Oracle) struct slab *next;
68d122019bSMatthew Wilcox (Oracle) int slabs; /* Nr of slabs left */
69d122019bSMatthew Wilcox (Oracle) };
709c01e9afSVlastimil Babka #endif
71d122019bSMatthew Wilcox (Oracle) };
72d122019bSMatthew Wilcox (Oracle) /* Double-word boundary */
736801be4fSPeter Zijlstra union {
746801be4fSPeter Zijlstra struct {
75d122019bSMatthew Wilcox (Oracle) void *freelist; /* first free object */
76d122019bSMatthew Wilcox (Oracle) union {
77401fb12cSVlastimil Babka unsigned long counters;
78401fb12cSVlastimil Babka struct {
79d122019bSMatthew Wilcox (Oracle) unsigned inuse:16;
80d122019bSMatthew Wilcox (Oracle) unsigned objects:15;
81d122019bSMatthew Wilcox (Oracle) unsigned frozen:1;
82d122019bSMatthew Wilcox (Oracle) };
83d122019bSMatthew Wilcox (Oracle) };
84130d4df5SVlastimil Babka };
856801be4fSPeter Zijlstra #ifdef system_has_freelist_aba
866801be4fSPeter Zijlstra freelist_aba_t freelist_counter;
876801be4fSPeter Zijlstra #endif
886801be4fSPeter Zijlstra };
896801be4fSPeter Zijlstra };
90130d4df5SVlastimil Babka struct rcu_head rcu_head;
91130d4df5SVlastimil Babka };
92401fb12cSVlastimil Babka unsigned int __unused;
93d122019bSMatthew Wilcox (Oracle)
94401fb12cSVlastimil Babka #else
95401fb12cSVlastimil Babka #error "Unexpected slab allocator configured"
96401fb12cSVlastimil Babka #endif
97401fb12cSVlastimil Babka
98d122019bSMatthew Wilcox (Oracle) atomic_t __page_refcount;
99d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
100d122019bSMatthew Wilcox (Oracle) unsigned long memcg_data;
101d122019bSMatthew Wilcox (Oracle) #endif
102d122019bSMatthew Wilcox (Oracle) };
103d122019bSMatthew Wilcox (Oracle)
104d122019bSMatthew Wilcox (Oracle) #define SLAB_MATCH(pg, sl) \
105d122019bSMatthew Wilcox (Oracle) static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
106d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(flags, __page_flags);
107130d4df5SVlastimil Babka SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
108d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(_refcount, __page_refcount);
109d122019bSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
110d122019bSMatthew Wilcox (Oracle) SLAB_MATCH(memcg_data, memcg_data);
111d122019bSMatthew Wilcox (Oracle) #endif
112d122019bSMatthew Wilcox (Oracle) #undef SLAB_MATCH
113d122019bSMatthew Wilcox (Oracle) static_assert(sizeof(struct slab) <= sizeof(struct page));
1146801be4fSPeter Zijlstra #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB)
1156801be4fSPeter Zijlstra static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
116130d4df5SVlastimil Babka #endif
117d122019bSMatthew Wilcox (Oracle)
118d122019bSMatthew Wilcox (Oracle) /**
119d122019bSMatthew Wilcox (Oracle) * folio_slab - Converts from folio to slab.
120d122019bSMatthew Wilcox (Oracle) * @folio: The folio.
121d122019bSMatthew Wilcox (Oracle) *
122d122019bSMatthew Wilcox (Oracle) * Currently struct slab is a different representation of a folio where
123d122019bSMatthew Wilcox (Oracle) * folio_test_slab() is true.
124d122019bSMatthew Wilcox (Oracle) *
125d122019bSMatthew Wilcox (Oracle) * Return: The slab which contains this folio.
126d122019bSMatthew Wilcox (Oracle) */
127d122019bSMatthew Wilcox (Oracle) #define folio_slab(folio) (_Generic((folio), \
128d122019bSMatthew Wilcox (Oracle) const struct folio *: (const struct slab *)(folio), \
129d122019bSMatthew Wilcox (Oracle) struct folio *: (struct slab *)(folio)))
130d122019bSMatthew Wilcox (Oracle)
131d122019bSMatthew Wilcox (Oracle) /**
132d122019bSMatthew Wilcox (Oracle) * slab_folio - The folio allocated for a slab
133d122019bSMatthew Wilcox (Oracle) * @slab: The slab.
134d122019bSMatthew Wilcox (Oracle) *
135d122019bSMatthew Wilcox (Oracle) * Slabs are allocated as folios that contain the individual objects and are
136d122019bSMatthew Wilcox (Oracle) * using some fields in the first struct page of the folio - those fields are
137d122019bSMatthew Wilcox (Oracle) * now accessed by struct slab. It is occasionally necessary to convert back to
138d122019bSMatthew Wilcox (Oracle) * a folio in order to communicate with the rest of the mm. Please use this
139d122019bSMatthew Wilcox (Oracle) * helper function instead of casting yourself, as the implementation may change
140d122019bSMatthew Wilcox (Oracle) * in the future.
141d122019bSMatthew Wilcox (Oracle) */
142d122019bSMatthew Wilcox (Oracle) #define slab_folio(s) (_Generic((s), \
143d122019bSMatthew Wilcox (Oracle) const struct slab *: (const struct folio *)s, \
144d122019bSMatthew Wilcox (Oracle) struct slab *: (struct folio *)s))
145d122019bSMatthew Wilcox (Oracle)
146d122019bSMatthew Wilcox (Oracle) /**
147d122019bSMatthew Wilcox (Oracle) * page_slab - Converts from first struct page to slab.
148d122019bSMatthew Wilcox (Oracle) * @p: The first (either head of compound or single) page of slab.
149d122019bSMatthew Wilcox (Oracle) *
150d122019bSMatthew Wilcox (Oracle) * A temporary wrapper to convert struct page to struct slab in situations where
151d122019bSMatthew Wilcox (Oracle) * we know the page is the compound head, or single order-0 page.
152d122019bSMatthew Wilcox (Oracle) *
153d122019bSMatthew Wilcox (Oracle) * Long-term ideally everything would work with struct slab directly or go
154d122019bSMatthew Wilcox (Oracle) * through folio to struct slab.
155d122019bSMatthew Wilcox (Oracle) *
156d122019bSMatthew Wilcox (Oracle) * Return: The slab which contains this page
157d122019bSMatthew Wilcox (Oracle) */
158d122019bSMatthew Wilcox (Oracle) #define page_slab(p) (_Generic((p), \
159d122019bSMatthew Wilcox (Oracle) const struct page *: (const struct slab *)(p), \
160d122019bSMatthew Wilcox (Oracle) struct page *: (struct slab *)(p)))
161d122019bSMatthew Wilcox (Oracle)
162d122019bSMatthew Wilcox (Oracle) /**
163d122019bSMatthew Wilcox (Oracle) * slab_page - The first struct page allocated for a slab
164d122019bSMatthew Wilcox (Oracle) * @slab: The slab.
165d122019bSMatthew Wilcox (Oracle) *
166d122019bSMatthew Wilcox (Oracle) * A convenience wrapper for converting slab to the first struct page of the
167d122019bSMatthew Wilcox (Oracle) * underlying folio, to communicate with code not yet converted to folio or
168d122019bSMatthew Wilcox (Oracle) * struct slab.
169d122019bSMatthew Wilcox (Oracle) */
170d122019bSMatthew Wilcox (Oracle) #define slab_page(s) folio_page(slab_folio(s), 0)
171d122019bSMatthew Wilcox (Oracle)
172d122019bSMatthew Wilcox (Oracle) /*
173d122019bSMatthew Wilcox (Oracle) * If network-based swap is enabled, sl*b must keep track of whether pages
174d122019bSMatthew Wilcox (Oracle) * were allocated from pfmemalloc reserves.
175d122019bSMatthew Wilcox (Oracle) */
slab_test_pfmemalloc(const struct slab * slab)176d122019bSMatthew Wilcox (Oracle) static inline bool slab_test_pfmemalloc(const struct slab *slab)
177d122019bSMatthew Wilcox (Oracle) {
178d122019bSMatthew Wilcox (Oracle) return folio_test_active((struct folio *)slab_folio(slab));
179d122019bSMatthew Wilcox (Oracle) }
180d122019bSMatthew Wilcox (Oracle)
slab_set_pfmemalloc(struct slab * slab)181d122019bSMatthew Wilcox (Oracle) static inline void slab_set_pfmemalloc(struct slab *slab)
182d122019bSMatthew Wilcox (Oracle) {
183d122019bSMatthew Wilcox (Oracle) folio_set_active(slab_folio(slab));
184d122019bSMatthew Wilcox (Oracle) }
185d122019bSMatthew Wilcox (Oracle)
slab_clear_pfmemalloc(struct slab * slab)186d122019bSMatthew Wilcox (Oracle) static inline void slab_clear_pfmemalloc(struct slab *slab)
187d122019bSMatthew Wilcox (Oracle) {
188d122019bSMatthew Wilcox (Oracle) folio_clear_active(slab_folio(slab));
189d122019bSMatthew Wilcox (Oracle) }
190d122019bSMatthew Wilcox (Oracle)
__slab_clear_pfmemalloc(struct slab * slab)191d122019bSMatthew Wilcox (Oracle) static inline void __slab_clear_pfmemalloc(struct slab *slab)
192d122019bSMatthew Wilcox (Oracle) {
193d122019bSMatthew Wilcox (Oracle) __folio_clear_active(slab_folio(slab));
194d122019bSMatthew Wilcox (Oracle) }
195d122019bSMatthew Wilcox (Oracle)
slab_address(const struct slab * slab)196d122019bSMatthew Wilcox (Oracle) static inline void *slab_address(const struct slab *slab)
197d122019bSMatthew Wilcox (Oracle) {
198d122019bSMatthew Wilcox (Oracle) return folio_address(slab_folio(slab));
199d122019bSMatthew Wilcox (Oracle) }
200d122019bSMatthew Wilcox (Oracle)
slab_nid(const struct slab * slab)201d122019bSMatthew Wilcox (Oracle) static inline int slab_nid(const struct slab *slab)
202d122019bSMatthew Wilcox (Oracle) {
203d122019bSMatthew Wilcox (Oracle) return folio_nid(slab_folio(slab));
204d122019bSMatthew Wilcox (Oracle) }
205d122019bSMatthew Wilcox (Oracle)
slab_pgdat(const struct slab * slab)206d122019bSMatthew Wilcox (Oracle) static inline pg_data_t *slab_pgdat(const struct slab *slab)
207d122019bSMatthew Wilcox (Oracle) {
208d122019bSMatthew Wilcox (Oracle) return folio_pgdat(slab_folio(slab));
209d122019bSMatthew Wilcox (Oracle) }
210d122019bSMatthew Wilcox (Oracle)
virt_to_slab(const void * addr)211d122019bSMatthew Wilcox (Oracle) static inline struct slab *virt_to_slab(const void *addr)
212d122019bSMatthew Wilcox (Oracle) {
213d122019bSMatthew Wilcox (Oracle) struct folio *folio = virt_to_folio(addr);
214d122019bSMatthew Wilcox (Oracle)
215d122019bSMatthew Wilcox (Oracle) if (!folio_test_slab(folio))
216d122019bSMatthew Wilcox (Oracle) return NULL;
217d122019bSMatthew Wilcox (Oracle)
218d122019bSMatthew Wilcox (Oracle) return folio_slab(folio);
219d122019bSMatthew Wilcox (Oracle) }
220d122019bSMatthew Wilcox (Oracle)
slab_order(const struct slab * slab)221d122019bSMatthew Wilcox (Oracle) static inline int slab_order(const struct slab *slab)
222d122019bSMatthew Wilcox (Oracle) {
223d122019bSMatthew Wilcox (Oracle) return folio_order((struct folio *)slab_folio(slab));
224d122019bSMatthew Wilcox (Oracle) }
225d122019bSMatthew Wilcox (Oracle)
slab_size(const struct slab * slab)226d122019bSMatthew Wilcox (Oracle) static inline size_t slab_size(const struct slab *slab)
227d122019bSMatthew Wilcox (Oracle) {
228d122019bSMatthew Wilcox (Oracle) return PAGE_SIZE << slab_order(slab);
229d122019bSMatthew Wilcox (Oracle) }
230d122019bSMatthew Wilcox (Oracle)
23107f361b2SJoonsoo Kim #ifdef CONFIG_SLAB
23207f361b2SJoonsoo Kim #include <linux/slab_def.h>
23307f361b2SJoonsoo Kim #endif
23407f361b2SJoonsoo Kim
23507f361b2SJoonsoo Kim #ifdef CONFIG_SLUB
23607f361b2SJoonsoo Kim #include <linux/slub_def.h>
23707f361b2SJoonsoo Kim #endif
23807f361b2SJoonsoo Kim
23907f361b2SJoonsoo Kim #include <linux/memcontrol.h>
24011c7aec2SJesper Dangaard Brouer #include <linux/fault-inject.h>
24111c7aec2SJesper Dangaard Brouer #include <linux/kasan.h>
24211c7aec2SJesper Dangaard Brouer #include <linux/kmemleak.h>
2437c00fce9SThomas Garnier #include <linux/random.h>
244d92a8cfcSPeter Zijlstra #include <linux/sched/mm.h>
24588f2ef73SMuchun Song #include <linux/list_lru.h>
24607f361b2SJoonsoo Kim
24797d06609SChristoph Lameter /*
24897d06609SChristoph Lameter * State of the slab allocator.
24997d06609SChristoph Lameter *
25097d06609SChristoph Lameter * This is used to describe the states of the allocator during bootup.
25197d06609SChristoph Lameter * Allocators use this to gradually bootstrap themselves. Most allocators
25297d06609SChristoph Lameter * have the problem that the structures used for managing slab caches are
25397d06609SChristoph Lameter * allocated from slab caches themselves.
25497d06609SChristoph Lameter */
25597d06609SChristoph Lameter enum slab_state {
25697d06609SChristoph Lameter DOWN, /* No slab functionality yet */
25797d06609SChristoph Lameter PARTIAL, /* SLUB: kmem_cache_node available */
258ce8eb6c4SChristoph Lameter PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
25997d06609SChristoph Lameter UP, /* Slab caches usable but not all extras yet */
26097d06609SChristoph Lameter FULL /* Everything is working */
26197d06609SChristoph Lameter };
26297d06609SChristoph Lameter
26397d06609SChristoph Lameter extern enum slab_state slab_state;
26497d06609SChristoph Lameter
26518004c5dSChristoph Lameter /* The slab cache mutex protects the management structures during changes */
26618004c5dSChristoph Lameter extern struct mutex slab_mutex;
2679b030cb8SChristoph Lameter
2689b030cb8SChristoph Lameter /* The list of all slab caches on the system */
26918004c5dSChristoph Lameter extern struct list_head slab_caches;
27018004c5dSChristoph Lameter
2719b030cb8SChristoph Lameter /* The slab cache that manages slab cache information */
2729b030cb8SChristoph Lameter extern struct kmem_cache *kmem_cache;
2739b030cb8SChristoph Lameter
274af3b5f87SVlastimil Babka /* A table of kmalloc cache names and sizes */
275af3b5f87SVlastimil Babka extern const struct kmalloc_info_struct {
276cb5d9fb3SPengfei Li const char *name[NR_KMALLOC_TYPES];
27755de8b9cSAlexey Dobriyan unsigned int size;
278af3b5f87SVlastimil Babka } kmalloc_info[];
279af3b5f87SVlastimil Babka
280f97d5f63SChristoph Lameter /* Kmalloc array related functions */
28134cc6990SDaniel Sanders void setup_kmalloc_cache_index_table(void);
282d50112edSAlexey Dobriyan void create_kmalloc_caches(slab_flags_t);
2832c59dd65SChristoph Lameter
2842c59dd65SChristoph Lameter /* Find the kmalloc slab corresponding for a certain size */
285*3c615294SGONG, Ruiqi struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller);
286ed4cd17eSHyeonggon Yoo
287ed4cd17eSHyeonggon Yoo void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
288ed4cd17eSHyeonggon Yoo int node, size_t orig_size,
289ed4cd17eSHyeonggon Yoo unsigned long caller);
290ed4cd17eSHyeonggon Yoo void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
291f97d5f63SChristoph Lameter
29244405099SLong Li gfp_t kmalloc_fix_flags(gfp_t flags);
293f97d5f63SChristoph Lameter
2949b030cb8SChristoph Lameter /* Functions provided by the slab allocators */
295d50112edSAlexey Dobriyan int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
29697d06609SChristoph Lameter
2970c474d31SCatalin Marinas void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type,
2980c474d31SCatalin Marinas slab_flags_t flags);
29945530c44SChristoph Lameter extern void create_boot_cache(struct kmem_cache *, const char *name,
300361d575eSAlexey Dobriyan unsigned int size, slab_flags_t flags,
301361d575eSAlexey Dobriyan unsigned int useroffset, unsigned int usersize);
30245530c44SChristoph Lameter
303423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s);
304f4957d5bSAlexey Dobriyan struct kmem_cache *find_mergeable(unsigned size, unsigned align,
305d50112edSAlexey Dobriyan slab_flags_t flags, const char *name, void (*ctor)(void *));
3062633d7a0SGlauber Costa struct kmem_cache *
307f4957d5bSAlexey Dobriyan __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
308d50112edSAlexey Dobriyan slab_flags_t flags, void (*ctor)(void *));
309423c929cSJoonsoo Kim
3100293d1fdSAlexey Dobriyan slab_flags_t kmem_cache_flags(unsigned int object_size,
31137540008SNikolay Borisov slab_flags_t flags, const char *name);
312cbb79694SChristoph Lameter
is_kmalloc_cache(struct kmem_cache * s)313bb944290SFeng Tang static inline bool is_kmalloc_cache(struct kmem_cache *s)
314bb944290SFeng Tang {
315bb944290SFeng Tang return (s->flags & SLAB_KMALLOC);
316bb944290SFeng Tang }
317cbb79694SChristoph Lameter
318d8843922SGlauber Costa /* Legal flag mask for kmem_cache_create(), for various configurations */
3196d6ea1e9SNicolas Boichat #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3206d6ea1e9SNicolas Boichat SLAB_CACHE_DMA32 | SLAB_PANIC | \
3215f0d5a3aSPaul E. McKenney SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
322d8843922SGlauber Costa
323d8843922SGlauber Costa #if defined(CONFIG_DEBUG_SLAB)
324d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
325d8843922SGlauber Costa #elif defined(CONFIG_SLUB_DEBUG)
326d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
327becfda68SLaura Abbott SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
328d8843922SGlauber Costa #else
329d8843922SGlauber Costa #define SLAB_DEBUG_FLAGS (0)
330d8843922SGlauber Costa #endif
331d8843922SGlauber Costa
332d8843922SGlauber Costa #if defined(CONFIG_SLAB)
333d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
334230e9fc2SVladimir Davydov SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
335d0bf7d57SJesper Dangaard Brouer SLAB_ACCOUNT | SLAB_NO_MERGE)
336d8843922SGlauber Costa #elif defined(CONFIG_SLUB)
337d8843922SGlauber Costa #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
3386cd6d33cSFeng Tang SLAB_TEMPORARY | SLAB_ACCOUNT | \
339d0bf7d57SJesper Dangaard Brouer SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
340d8843922SGlauber Costa #else
34134dbc3aaSRustam Kovhaev #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
342d8843922SGlauber Costa #endif
343d8843922SGlauber Costa
344e70954fdSThomas Garnier /* Common flags available with current configuration */
345d8843922SGlauber Costa #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
346d8843922SGlauber Costa
347e70954fdSThomas Garnier /* Common flags permitted for kmem_cache_create */
348e70954fdSThomas Garnier #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
349e70954fdSThomas Garnier SLAB_RED_ZONE | \
350e70954fdSThomas Garnier SLAB_POISON | \
351e70954fdSThomas Garnier SLAB_STORE_USER | \
352e70954fdSThomas Garnier SLAB_TRACE | \
353e70954fdSThomas Garnier SLAB_CONSISTENCY_CHECKS | \
354e70954fdSThomas Garnier SLAB_MEM_SPREAD | \
355e70954fdSThomas Garnier SLAB_NOLEAKTRACE | \
356e70954fdSThomas Garnier SLAB_RECLAIM_ACCOUNT | \
357e70954fdSThomas Garnier SLAB_TEMPORARY | \
358a285909fSHyeonggon Yoo SLAB_ACCOUNT | \
3596cd6d33cSFeng Tang SLAB_KMALLOC | \
360d0bf7d57SJesper Dangaard Brouer SLAB_NO_MERGE | \
361a285909fSHyeonggon Yoo SLAB_NO_USER_FLAGS)
362e70954fdSThomas Garnier
363f9e13c0aSShakeel Butt bool __kmem_cache_empty(struct kmem_cache *);
364945cf2b6SChristoph Lameter int __kmem_cache_shutdown(struct kmem_cache *);
36552b4b950SDmitry Safonov void __kmem_cache_release(struct kmem_cache *);
366c9fc5864STejun Heo int __kmem_cache_shrink(struct kmem_cache *);
36741a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *);
368945cf2b6SChristoph Lameter
369b7454ad3SGlauber Costa struct seq_file;
370b7454ad3SGlauber Costa struct file;
371b7454ad3SGlauber Costa
3720d7561c6SGlauber Costa struct slabinfo {
3730d7561c6SGlauber Costa unsigned long active_objs;
3740d7561c6SGlauber Costa unsigned long num_objs;
3750d7561c6SGlauber Costa unsigned long active_slabs;
3760d7561c6SGlauber Costa unsigned long num_slabs;
3770d7561c6SGlauber Costa unsigned long shared_avail;
3780d7561c6SGlauber Costa unsigned int limit;
3790d7561c6SGlauber Costa unsigned int batchcount;
3800d7561c6SGlauber Costa unsigned int shared;
3810d7561c6SGlauber Costa unsigned int objects_per_slab;
3820d7561c6SGlauber Costa unsigned int cache_order;
3830d7561c6SGlauber Costa };
3840d7561c6SGlauber Costa
3850d7561c6SGlauber Costa void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
3860d7561c6SGlauber Costa void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
387b7454ad3SGlauber Costa ssize_t slabinfo_write(struct file *file, const char __user *buffer,
388b7454ad3SGlauber Costa size_t count, loff_t *ppos);
389ba6c496eSGlauber Costa
cache_vmstat_idx(struct kmem_cache * s)3901a984c4eSMuchun Song static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
3916cea1d56SRoman Gushchin {
3926cea1d56SRoman Gushchin return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
393d42f3245SRoman Gushchin NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
3946cea1d56SRoman Gushchin }
3956cea1d56SRoman Gushchin
396e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG
397e42f174eSVlastimil Babka #ifdef CONFIG_SLUB_DEBUG_ON
398e42f174eSVlastimil Babka DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
399e42f174eSVlastimil Babka #else
400e42f174eSVlastimil Babka DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
401e42f174eSVlastimil Babka #endif
402e42f174eSVlastimil Babka extern void print_tracking(struct kmem_cache *s, void *object);
4031f9f78b1SOliver Glitta long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)4040d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4050d4a062aSMarco Elver {
4060d4a062aSMarco Elver return static_branch_unlikely(&slub_debug_enabled);
4070d4a062aSMarco Elver }
408e42f174eSVlastimil Babka #else
print_tracking(struct kmem_cache * s,void * object)409e42f174eSVlastimil Babka static inline void print_tracking(struct kmem_cache *s, void *object)
410e42f174eSVlastimil Babka {
411e42f174eSVlastimil Babka }
__slub_debug_enabled(void)4120d4a062aSMarco Elver static inline bool __slub_debug_enabled(void)
4130d4a062aSMarco Elver {
4140d4a062aSMarco Elver return false;
4150d4a062aSMarco Elver }
416e42f174eSVlastimil Babka #endif
417e42f174eSVlastimil Babka
418e42f174eSVlastimil Babka /*
419e42f174eSVlastimil Babka * Returns true if any of the specified slub_debug flags is enabled for the
420e42f174eSVlastimil Babka * cache. Use only for flags parsed by setup_slub_debug() as it also enables
421e42f174eSVlastimil Babka * the static key.
422e42f174eSVlastimil Babka */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)423e42f174eSVlastimil Babka static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
424e42f174eSVlastimil Babka {
4250d4a062aSMarco Elver if (IS_ENABLED(CONFIG_SLUB_DEBUG))
426e42f174eSVlastimil Babka VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
4270d4a062aSMarco Elver if (__slub_debug_enabled())
428e42f174eSVlastimil Babka return s->flags & flags;
429e42f174eSVlastimil Babka return false;
430e42f174eSVlastimil Babka }
431e42f174eSVlastimil Babka
43284c07d11SKirill Tkhai #ifdef CONFIG_MEMCG_KMEM
4334b5f8d9aSVlastimil Babka /*
4344b5f8d9aSVlastimil Babka * slab_objcgs - get the object cgroups vector associated with a slab
4354b5f8d9aSVlastimil Babka * @slab: a pointer to the slab struct
4364b5f8d9aSVlastimil Babka *
4374b5f8d9aSVlastimil Babka * Returns a pointer to the object cgroups vector associated with the slab,
4384b5f8d9aSVlastimil Babka * or NULL if no such vector has been associated yet.
4394b5f8d9aSVlastimil Babka */
slab_objcgs(struct slab * slab)4404b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
4414b5f8d9aSVlastimil Babka {
4424b5f8d9aSVlastimil Babka unsigned long memcg_data = READ_ONCE(slab->memcg_data);
4434b5f8d9aSVlastimil Babka
4444b5f8d9aSVlastimil Babka VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
4454b5f8d9aSVlastimil Babka slab_page(slab));
4464b5f8d9aSVlastimil Babka VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
4474b5f8d9aSVlastimil Babka
4484b5f8d9aSVlastimil Babka return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
4494b5f8d9aSVlastimil Babka }
4504b5f8d9aSVlastimil Babka
4514b5f8d9aSVlastimil Babka int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
4524b5f8d9aSVlastimil Babka gfp_t gfp, bool new_slab);
453fdbcb2a6SWaiman Long void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
454fdbcb2a6SWaiman Long enum node_stat_item idx, int nr);
455286e04b8SRoman Gushchin
memcg_free_slab_cgroups(struct slab * slab)4564b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
457286e04b8SRoman Gushchin {
4584b5f8d9aSVlastimil Babka kfree(slab_objcgs(slab));
4594b5f8d9aSVlastimil Babka slab->memcg_data = 0;
460286e04b8SRoman Gushchin }
461286e04b8SRoman Gushchin
obj_full_size(struct kmem_cache * s)462f2fe7b09SRoman Gushchin static inline size_t obj_full_size(struct kmem_cache *s)
463f2fe7b09SRoman Gushchin {
464f2fe7b09SRoman Gushchin /*
465f2fe7b09SRoman Gushchin * For each accounted object there is an extra space which is used
466f2fe7b09SRoman Gushchin * to store obj_cgroup membership. Charge it too.
467f2fe7b09SRoman Gushchin */
468f2fe7b09SRoman Gushchin return s->size + sizeof(struct obj_cgroup *);
469f2fe7b09SRoman Gushchin }
470f2fe7b09SRoman Gushchin
471becaba65SRoman Gushchin /*
472becaba65SRoman Gushchin * Returns false if the allocation should fail.
473becaba65SRoman Gushchin */
memcg_slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t objects,gfp_t flags)474becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
47588f2ef73SMuchun Song struct list_lru *lru,
476becaba65SRoman Gushchin struct obj_cgroup **objcgp,
477becaba65SRoman Gushchin size_t objects, gfp_t flags)
478f2fe7b09SRoman Gushchin {
4799855609bSRoman Gushchin struct obj_cgroup *objcg;
480f2fe7b09SRoman Gushchin
481f7a449f7SRoman Gushchin if (!memcg_kmem_online())
482becaba65SRoman Gushchin return true;
483becaba65SRoman Gushchin
484becaba65SRoman Gushchin if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
485becaba65SRoman Gushchin return true;
486becaba65SRoman Gushchin
4879855609bSRoman Gushchin objcg = get_obj_cgroup_from_current();
4889855609bSRoman Gushchin if (!objcg)
489becaba65SRoman Gushchin return true;
4909855609bSRoman Gushchin
49188f2ef73SMuchun Song if (lru) {
49288f2ef73SMuchun Song int ret;
49388f2ef73SMuchun Song struct mem_cgroup *memcg;
49488f2ef73SMuchun Song
49588f2ef73SMuchun Song memcg = get_mem_cgroup_from_objcg(objcg);
49688f2ef73SMuchun Song ret = memcg_list_lru_alloc(memcg, lru, flags);
49788f2ef73SMuchun Song css_put(&memcg->css);
49888f2ef73SMuchun Song
49988f2ef73SMuchun Song if (ret)
50088f2ef73SMuchun Song goto out;
501f2fe7b09SRoman Gushchin }
502f2fe7b09SRoman Gushchin
50388f2ef73SMuchun Song if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
50488f2ef73SMuchun Song goto out;
50588f2ef73SMuchun Song
506becaba65SRoman Gushchin *objcgp = objcg;
507becaba65SRoman Gushchin return true;
50888f2ef73SMuchun Song out:
50988f2ef73SMuchun Song obj_cgroup_put(objcg);
51088f2ef73SMuchun Song return false;
511f2fe7b09SRoman Gushchin }
512f2fe7b09SRoman Gushchin
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p)513964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
514964d4bd3SRoman Gushchin struct obj_cgroup *objcg,
51510befea9SRoman Gushchin gfp_t flags, size_t size,
51610befea9SRoman Gushchin void **p)
517964d4bd3SRoman Gushchin {
5184b5f8d9aSVlastimil Babka struct slab *slab;
519964d4bd3SRoman Gushchin unsigned long off;
520964d4bd3SRoman Gushchin size_t i;
521964d4bd3SRoman Gushchin
522f7a449f7SRoman Gushchin if (!memcg_kmem_online() || !objcg)
52310befea9SRoman Gushchin return;
52410befea9SRoman Gushchin
525964d4bd3SRoman Gushchin for (i = 0; i < size; i++) {
526964d4bd3SRoman Gushchin if (likely(p[i])) {
5274b5f8d9aSVlastimil Babka slab = virt_to_slab(p[i]);
52810befea9SRoman Gushchin
5294b5f8d9aSVlastimil Babka if (!slab_objcgs(slab) &&
5304b5f8d9aSVlastimil Babka memcg_alloc_slab_cgroups(slab, s, flags,
5312e9bd483SRoman Gushchin false)) {
53210befea9SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s));
53310befea9SRoman Gushchin continue;
53410befea9SRoman Gushchin }
53510befea9SRoman Gushchin
5364b5f8d9aSVlastimil Babka off = obj_to_index(s, slab, p[i]);
537964d4bd3SRoman Gushchin obj_cgroup_get(objcg);
5384b5f8d9aSVlastimil Babka slab_objcgs(slab)[off] = objcg;
5394b5f8d9aSVlastimil Babka mod_objcg_state(objcg, slab_pgdat(slab),
540f2fe7b09SRoman Gushchin cache_vmstat_idx(s), obj_full_size(s));
541f2fe7b09SRoman Gushchin } else {
542f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s));
543964d4bd3SRoman Gushchin }
544964d4bd3SRoman Gushchin }
545964d4bd3SRoman Gushchin obj_cgroup_put(objcg);
546964d4bd3SRoman Gushchin }
547964d4bd3SRoman Gushchin
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)548b77d5b1bSMuchun Song static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
549d1b2cf6cSBharata B Rao void **p, int objects)
550964d4bd3SRoman Gushchin {
551270c6a71SRoman Gushchin struct obj_cgroup **objcgs;
552d1b2cf6cSBharata B Rao int i;
553964d4bd3SRoman Gushchin
554f7a449f7SRoman Gushchin if (!memcg_kmem_online())
55510befea9SRoman Gushchin return;
55610befea9SRoman Gushchin
5574b5f8d9aSVlastimil Babka objcgs = slab_objcgs(slab);
558270c6a71SRoman Gushchin if (!objcgs)
559b77d5b1bSMuchun Song return;
560964d4bd3SRoman Gushchin
561b77d5b1bSMuchun Song for (i = 0; i < objects; i++) {
562b77d5b1bSMuchun Song struct obj_cgroup *objcg;
563b77d5b1bSMuchun Song unsigned int off;
564d1b2cf6cSBharata B Rao
5654b5f8d9aSVlastimil Babka off = obj_to_index(s, slab, p[i]);
566270c6a71SRoman Gushchin objcg = objcgs[off];
56710befea9SRoman Gushchin if (!objcg)
568d1b2cf6cSBharata B Rao continue;
56910befea9SRoman Gushchin
570270c6a71SRoman Gushchin objcgs[off] = NULL;
571f2fe7b09SRoman Gushchin obj_cgroup_uncharge(objcg, obj_full_size(s));
5724b5f8d9aSVlastimil Babka mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
573f2fe7b09SRoman Gushchin -obj_full_size(s));
574964d4bd3SRoman Gushchin obj_cgroup_put(objcg);
575964d4bd3SRoman Gushchin }
576d1b2cf6cSBharata B Rao }
577964d4bd3SRoman Gushchin
57884c07d11SKirill Tkhai #else /* CONFIG_MEMCG_KMEM */
slab_objcgs(struct slab * slab)5794b5f8d9aSVlastimil Babka static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
5804b5f8d9aSVlastimil Babka {
5814b5f8d9aSVlastimil Babka return NULL;
5824b5f8d9aSVlastimil Babka }
5834b5f8d9aSVlastimil Babka
memcg_from_slab_obj(void * ptr)5849855609bSRoman Gushchin static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
5854d96ba35SRoman Gushchin {
5864d96ba35SRoman Gushchin return NULL;
5874d96ba35SRoman Gushchin }
5884d96ba35SRoman Gushchin
memcg_alloc_slab_cgroups(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)5894b5f8d9aSVlastimil Babka static inline int memcg_alloc_slab_cgroups(struct slab *slab,
5902e9bd483SRoman Gushchin struct kmem_cache *s, gfp_t gfp,
5914b5f8d9aSVlastimil Babka bool new_slab)
592286e04b8SRoman Gushchin {
593286e04b8SRoman Gushchin return 0;
594286e04b8SRoman Gushchin }
595286e04b8SRoman Gushchin
memcg_free_slab_cgroups(struct slab * slab)5964b5f8d9aSVlastimil Babka static inline void memcg_free_slab_cgroups(struct slab *slab)
597286e04b8SRoman Gushchin {
598286e04b8SRoman Gushchin }
599286e04b8SRoman Gushchin
memcg_slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t objects,gfp_t flags)600becaba65SRoman Gushchin static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
60188f2ef73SMuchun Song struct list_lru *lru,
602becaba65SRoman Gushchin struct obj_cgroup **objcgp,
603becaba65SRoman Gushchin size_t objects, gfp_t flags)
604f2fe7b09SRoman Gushchin {
605becaba65SRoman Gushchin return true;
606f2fe7b09SRoman Gushchin }
607f2fe7b09SRoman Gushchin
memcg_slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p)608964d4bd3SRoman Gushchin static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
609964d4bd3SRoman Gushchin struct obj_cgroup *objcg,
61010befea9SRoman Gushchin gfp_t flags, size_t size,
61110befea9SRoman Gushchin void **p)
612964d4bd3SRoman Gushchin {
613964d4bd3SRoman Gushchin }
614964d4bd3SRoman Gushchin
memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects)615b77d5b1bSMuchun Song static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
616d1b2cf6cSBharata B Rao void **p, int objects)
617964d4bd3SRoman Gushchin {
618964d4bd3SRoman Gushchin }
61984c07d11SKirill Tkhai #endif /* CONFIG_MEMCG_KMEM */
620b9ce5ef4SGlauber Costa
virt_to_cache(const void * obj)621a64b5378SKees Cook static inline struct kmem_cache *virt_to_cache(const void *obj)
622a64b5378SKees Cook {
62382c1775dSMatthew Wilcox (Oracle) struct slab *slab;
624a64b5378SKees Cook
62582c1775dSMatthew Wilcox (Oracle) slab = virt_to_slab(obj);
62682c1775dSMatthew Wilcox (Oracle) if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
627a64b5378SKees Cook __func__))
628a64b5378SKees Cook return NULL;
62982c1775dSMatthew Wilcox (Oracle) return slab->slab_cache;
630a64b5378SKees Cook }
631a64b5378SKees Cook
account_slab(struct slab * slab,int order,struct kmem_cache * s,gfp_t gfp)632b918653bSMatthew Wilcox (Oracle) static __always_inline void account_slab(struct slab *slab, int order,
633b918653bSMatthew Wilcox (Oracle) struct kmem_cache *s, gfp_t gfp)
6346cea1d56SRoman Gushchin {
635f7a449f7SRoman Gushchin if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
6364b5f8d9aSVlastimil Babka memcg_alloc_slab_cgroups(slab, s, gfp, true);
6372e9bd483SRoman Gushchin
638b918653bSMatthew Wilcox (Oracle) mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
639f2fe7b09SRoman Gushchin PAGE_SIZE << order);
6406cea1d56SRoman Gushchin }
6416cea1d56SRoman Gushchin
unaccount_slab(struct slab * slab,int order,struct kmem_cache * s)642b918653bSMatthew Wilcox (Oracle) static __always_inline void unaccount_slab(struct slab *slab, int order,
6436cea1d56SRoman Gushchin struct kmem_cache *s)
6446cea1d56SRoman Gushchin {
645f7a449f7SRoman Gushchin if (memcg_kmem_online())
6464b5f8d9aSVlastimil Babka memcg_free_slab_cgroups(slab);
6479855609bSRoman Gushchin
648b918653bSMatthew Wilcox (Oracle) mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
649d42f3245SRoman Gushchin -(PAGE_SIZE << order));
6506cea1d56SRoman Gushchin }
6516cea1d56SRoman Gushchin
cache_from_obj(struct kmem_cache * s,void * x)652e42f174eSVlastimil Babka static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
653e42f174eSVlastimil Babka {
654e42f174eSVlastimil Babka struct kmem_cache *cachep;
655e42f174eSVlastimil Babka
656e42f174eSVlastimil Babka if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
657e42f174eSVlastimil Babka !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
658e42f174eSVlastimil Babka return s;
659e42f174eSVlastimil Babka
660e42f174eSVlastimil Babka cachep = virt_to_cache(x);
66110befea9SRoman Gushchin if (WARN(cachep && cachep != s,
662e42f174eSVlastimil Babka "%s: Wrong slab cache. %s but object is from %s\n",
663e42f174eSVlastimil Babka __func__, s->name, cachep->name))
664e42f174eSVlastimil Babka print_tracking(cachep, x);
665e42f174eSVlastimil Babka return cachep;
666e42f174eSVlastimil Babka }
667d6a71648SHyeonggon Yoo
668d6a71648SHyeonggon Yoo void free_large_kmalloc(struct folio *folio, void *object);
669d6a71648SHyeonggon Yoo
6708dfa9d55SHyeonggon Yoo size_t __ksize(const void *objp);
6718dfa9d55SHyeonggon Yoo
slab_ksize(const struct kmem_cache * s)67211c7aec2SJesper Dangaard Brouer static inline size_t slab_ksize(const struct kmem_cache *s)
67311c7aec2SJesper Dangaard Brouer {
67411c7aec2SJesper Dangaard Brouer #ifndef CONFIG_SLUB
67511c7aec2SJesper Dangaard Brouer return s->object_size;
67611c7aec2SJesper Dangaard Brouer
67711c7aec2SJesper Dangaard Brouer #else /* CONFIG_SLUB */
67811c7aec2SJesper Dangaard Brouer # ifdef CONFIG_SLUB_DEBUG
67911c7aec2SJesper Dangaard Brouer /*
68011c7aec2SJesper Dangaard Brouer * Debugging requires use of the padding between object
68111c7aec2SJesper Dangaard Brouer * and whatever may come after it.
68211c7aec2SJesper Dangaard Brouer */
68311c7aec2SJesper Dangaard Brouer if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
68411c7aec2SJesper Dangaard Brouer return s->object_size;
68511c7aec2SJesper Dangaard Brouer # endif
68680a9201aSAlexander Potapenko if (s->flags & SLAB_KASAN)
68780a9201aSAlexander Potapenko return s->object_size;
68811c7aec2SJesper Dangaard Brouer /*
68911c7aec2SJesper Dangaard Brouer * If we have the need to store the freelist pointer
69011c7aec2SJesper Dangaard Brouer * back there or track user information then we can
69111c7aec2SJesper Dangaard Brouer * only use the space before that information.
69211c7aec2SJesper Dangaard Brouer */
6935f0d5a3aSPaul E. McKenney if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
69411c7aec2SJesper Dangaard Brouer return s->inuse;
69511c7aec2SJesper Dangaard Brouer /*
69611c7aec2SJesper Dangaard Brouer * Else we can use all the padding etc for the allocation
69711c7aec2SJesper Dangaard Brouer */
69811c7aec2SJesper Dangaard Brouer return s->size;
69911c7aec2SJesper Dangaard Brouer #endif
70011c7aec2SJesper Dangaard Brouer }
70111c7aec2SJesper Dangaard Brouer
slab_pre_alloc_hook(struct kmem_cache * s,struct list_lru * lru,struct obj_cgroup ** objcgp,size_t size,gfp_t flags)70211c7aec2SJesper Dangaard Brouer static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
70388f2ef73SMuchun Song struct list_lru *lru,
704964d4bd3SRoman Gushchin struct obj_cgroup **objcgp,
705964d4bd3SRoman Gushchin size_t size, gfp_t flags)
70611c7aec2SJesper Dangaard Brouer {
70711c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask;
708d92a8cfcSPeter Zijlstra
70995d6c701SDaniel Vetter might_alloc(flags);
71011c7aec2SJesper Dangaard Brouer
711fab9963aSJesper Dangaard Brouer if (should_failslab(s, flags))
71211c7aec2SJesper Dangaard Brouer return NULL;
71311c7aec2SJesper Dangaard Brouer
71488f2ef73SMuchun Song if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
715becaba65SRoman Gushchin return NULL;
71645264778SVladimir Davydov
71745264778SVladimir Davydov return s;
71811c7aec2SJesper Dangaard Brouer }
71911c7aec2SJesper Dangaard Brouer
slab_post_alloc_hook(struct kmem_cache * s,struct obj_cgroup * objcg,gfp_t flags,size_t size,void ** p,bool init,unsigned int orig_size)720964d4bd3SRoman Gushchin static inline void slab_post_alloc_hook(struct kmem_cache *s,
721da844b78SAndrey Konovalov struct obj_cgroup *objcg, gfp_t flags,
7229ce67395SFeng Tang size_t size, void **p, bool init,
7239ce67395SFeng Tang unsigned int orig_size)
72411c7aec2SJesper Dangaard Brouer {
7259ce67395SFeng Tang unsigned int zero_size = s->object_size;
726fdb54d96SAndrey Konovalov bool kasan_init = init;
72711c7aec2SJesper Dangaard Brouer size_t i;
72811c7aec2SJesper Dangaard Brouer
72911c7aec2SJesper Dangaard Brouer flags &= gfp_allowed_mask;
730da844b78SAndrey Konovalov
731da844b78SAndrey Konovalov /*
7329ce67395SFeng Tang * For kmalloc object, the allocated memory size(object_size) is likely
7339ce67395SFeng Tang * larger than the requested size(orig_size). If redzone check is
7349ce67395SFeng Tang * enabled for the extra space, don't zero it, as it will be redzoned
7359ce67395SFeng Tang * soon. The redzone operation for this extra space could be seen as a
7369ce67395SFeng Tang * replacement of current poisoning under certain debug option, and
7379ce67395SFeng Tang * won't break other sanity checks.
7389ce67395SFeng Tang */
7399ce67395SFeng Tang if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
7409ce67395SFeng Tang (s->flags & SLAB_KMALLOC))
7419ce67395SFeng Tang zero_size = orig_size;
7429ce67395SFeng Tang
7439ce67395SFeng Tang /*
744fdb54d96SAndrey Konovalov * When slub_debug is enabled, avoid memory initialization integrated
745fdb54d96SAndrey Konovalov * into KASAN and instead zero out the memory via the memset below with
746fdb54d96SAndrey Konovalov * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
747fdb54d96SAndrey Konovalov * cause false-positive reports. This does not lead to a performance
748fdb54d96SAndrey Konovalov * penalty on production builds, as slub_debug is not intended to be
749fdb54d96SAndrey Konovalov * enabled there.
750fdb54d96SAndrey Konovalov */
751fdb54d96SAndrey Konovalov if (__slub_debug_enabled())
752fdb54d96SAndrey Konovalov kasan_init = false;
753fdb54d96SAndrey Konovalov
754fdb54d96SAndrey Konovalov /*
755da844b78SAndrey Konovalov * As memory initialization might be integrated into KASAN,
756da844b78SAndrey Konovalov * kasan_slab_alloc and initialization memset must be
757da844b78SAndrey Konovalov * kept together to avoid discrepancies in behavior.
758da844b78SAndrey Konovalov *
759da844b78SAndrey Konovalov * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
760da844b78SAndrey Konovalov */
76111c7aec2SJesper Dangaard Brouer for (i = 0; i < size; i++) {
762fdb54d96SAndrey Konovalov p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
763fdb54d96SAndrey Konovalov if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
7649ce67395SFeng Tang memset(p[i], 0, zero_size);
76553128245SAndrey Konovalov kmemleak_alloc_recursive(p[i], s->object_size, 1,
76611c7aec2SJesper Dangaard Brouer s->flags, flags);
76768ef169aSAlexander Potapenko kmsan_slab_alloc(s, p[i], flags);
76811c7aec2SJesper Dangaard Brouer }
76945264778SVladimir Davydov
77010befea9SRoman Gushchin memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
77111c7aec2SJesper Dangaard Brouer }
77211c7aec2SJesper Dangaard Brouer
773ca34956bSChristoph Lameter /*
774ca34956bSChristoph Lameter * The slab lists for all objects.
775ca34956bSChristoph Lameter */
776ca34956bSChristoph Lameter struct kmem_cache_node {
777ca34956bSChristoph Lameter #ifdef CONFIG_SLAB
778b539ce9fSJiri Kosina raw_spinlock_t list_lock;
779ca34956bSChristoph Lameter struct list_head slabs_partial; /* partial list first, better asm code */
780ca34956bSChristoph Lameter struct list_head slabs_full;
781ca34956bSChristoph Lameter struct list_head slabs_free;
782bf00bd34SDavid Rientjes unsigned long total_slabs; /* length of all slab lists */
783bf00bd34SDavid Rientjes unsigned long free_slabs; /* length of free slab list only */
784ca34956bSChristoph Lameter unsigned long free_objects;
785ca34956bSChristoph Lameter unsigned int free_limit;
786ca34956bSChristoph Lameter unsigned int colour_next; /* Per-node cache coloring */
787ca34956bSChristoph Lameter struct array_cache *shared; /* shared per node */
788c8522a3aSJoonsoo Kim struct alien_cache **alien; /* on other nodes */
789ca34956bSChristoph Lameter unsigned long next_reap; /* updated without locking */
790ca34956bSChristoph Lameter int free_touched; /* updated without locking */
791ca34956bSChristoph Lameter #endif
792ca34956bSChristoph Lameter
793ca34956bSChristoph Lameter #ifdef CONFIG_SLUB
794b539ce9fSJiri Kosina spinlock_t list_lock;
795ca34956bSChristoph Lameter unsigned long nr_partial;
796ca34956bSChristoph Lameter struct list_head partial;
797ca34956bSChristoph Lameter #ifdef CONFIG_SLUB_DEBUG
798ca34956bSChristoph Lameter atomic_long_t nr_slabs;
799ca34956bSChristoph Lameter atomic_long_t total_objects;
800ca34956bSChristoph Lameter struct list_head full;
801ca34956bSChristoph Lameter #endif
802ca34956bSChristoph Lameter #endif
803ca34956bSChristoph Lameter
804ca34956bSChristoph Lameter };
805e25839f6SWanpeng Li
get_node(struct kmem_cache * s,int node)80644c5356fSChristoph Lameter static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
80744c5356fSChristoph Lameter {
80844c5356fSChristoph Lameter return s->node[node];
80944c5356fSChristoph Lameter }
81044c5356fSChristoph Lameter
81144c5356fSChristoph Lameter /*
81244c5356fSChristoph Lameter * Iterator over all nodes. The body will be executed for each node that has
81344c5356fSChristoph Lameter * a kmem_cache_node structure allocated (which is true for all online nodes)
81444c5356fSChristoph Lameter */
81544c5356fSChristoph Lameter #define for_each_kmem_cache_node(__s, __node, __n) \
8169163582cSMikulas Patocka for (__node = 0; __node < nr_node_ids; __node++) \
8179163582cSMikulas Patocka if ((__n = get_node(__s, __node)))
81844c5356fSChristoph Lameter
81944c5356fSChristoph Lameter
820852d8be0SYang Shi #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
821852d8be0SYang Shi void dump_unreclaimable_slab(void);
822852d8be0SYang Shi #else
dump_unreclaimable_slab(void)823852d8be0SYang Shi static inline void dump_unreclaimable_slab(void)
824852d8be0SYang Shi {
825852d8be0SYang Shi }
826852d8be0SYang Shi #endif
827852d8be0SYang Shi
82855834c59SAlexander Potapenko void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
82955834c59SAlexander Potapenko
8307c00fce9SThomas Garnier #ifdef CONFIG_SLAB_FREELIST_RANDOM
8317c00fce9SThomas Garnier int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
8327c00fce9SThomas Garnier gfp_t gfp);
8337c00fce9SThomas Garnier void cache_random_seq_destroy(struct kmem_cache *cachep);
8347c00fce9SThomas Garnier #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)8357c00fce9SThomas Garnier static inline int cache_random_seq_create(struct kmem_cache *cachep,
8367c00fce9SThomas Garnier unsigned int count, gfp_t gfp)
8377c00fce9SThomas Garnier {
8387c00fce9SThomas Garnier return 0;
8397c00fce9SThomas Garnier }
cache_random_seq_destroy(struct kmem_cache * cachep)8407c00fce9SThomas Garnier static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
8417c00fce9SThomas Garnier #endif /* CONFIG_SLAB_FREELIST_RANDOM */
8427c00fce9SThomas Garnier
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)8436471384aSAlexander Potapenko static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
8446471384aSAlexander Potapenko {
84551cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
84651cba1ebSKees Cook &init_on_alloc)) {
8476471384aSAlexander Potapenko if (c->ctor)
8486471384aSAlexander Potapenko return false;
8496471384aSAlexander Potapenko if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
8506471384aSAlexander Potapenko return flags & __GFP_ZERO;
8516471384aSAlexander Potapenko return true;
8526471384aSAlexander Potapenko }
8536471384aSAlexander Potapenko return flags & __GFP_ZERO;
8546471384aSAlexander Potapenko }
8556471384aSAlexander Potapenko
slab_want_init_on_free(struct kmem_cache * c)8566471384aSAlexander Potapenko static inline bool slab_want_init_on_free(struct kmem_cache *c)
8576471384aSAlexander Potapenko {
85851cba1ebSKees Cook if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
85951cba1ebSKees Cook &init_on_free))
8606471384aSAlexander Potapenko return !(c->ctor ||
8616471384aSAlexander Potapenko (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
8626471384aSAlexander Potapenko return false;
8636471384aSAlexander Potapenko }
8646471384aSAlexander Potapenko
86564dd6849SFaiyaz Mohammed #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
86664dd6849SFaiyaz Mohammed void debugfs_slab_release(struct kmem_cache *);
86764dd6849SFaiyaz Mohammed #else
debugfs_slab_release(struct kmem_cache * s)86864dd6849SFaiyaz Mohammed static inline void debugfs_slab_release(struct kmem_cache *s) { }
86964dd6849SFaiyaz Mohammed #endif
87064dd6849SFaiyaz Mohammed
8715bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
8728e7f37f2SPaul E. McKenney #define KS_ADDRS_COUNT 16
8738e7f37f2SPaul E. McKenney struct kmem_obj_info {
8748e7f37f2SPaul E. McKenney void *kp_ptr;
8757213230aSMatthew Wilcox (Oracle) struct slab *kp_slab;
8768e7f37f2SPaul E. McKenney void *kp_objp;
8778e7f37f2SPaul E. McKenney unsigned long kp_data_offset;
8788e7f37f2SPaul E. McKenney struct kmem_cache *kp_slab_cache;
8798e7f37f2SPaul E. McKenney void *kp_ret;
8808e7f37f2SPaul E. McKenney void *kp_stack[KS_ADDRS_COUNT];
881e548eaa1SManinder Singh void *kp_free_stack[KS_ADDRS_COUNT];
8828e7f37f2SPaul E. McKenney };
8832dfe63e6SMarco Elver void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
8845bb1bb35SPaul E. McKenney #endif
8858e7f37f2SPaul E. McKenney
8860b3eb091SMatthew Wilcox (Oracle) void __check_heap_object(const void *ptr, unsigned long n,
8870b3eb091SMatthew Wilcox (Oracle) const struct slab *slab, bool to_user);
8880b3eb091SMatthew Wilcox (Oracle)
889946fa0dbSFeng Tang #ifdef CONFIG_SLUB_DEBUG
890946fa0dbSFeng Tang void skip_orig_size_check(struct kmem_cache *s, const void *object);
891946fa0dbSFeng Tang #endif
892946fa0dbSFeng Tang
8935240ab40SAndrey Ryabinin #endif /* MM_SLAB_H */
894