xref: /openbmc/linux/include/linux/slub_def.h (revision b3d9fc14)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4 
5 /*
6  * SLUB : A Slab allocator without object queues.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 #include <linux/kfence.h>
11 #include <linux/kobject.h>
12 #include <linux/reciprocal_div.h>
13 
14 enum stat_item {
15 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
16 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
17 	FREE_FASTPATH,		/* Free to cpu slab */
18 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
19 	FREE_FROZEN,		/* Freeing to frozen slab */
20 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
21 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
22 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
23 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
24 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
25 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
26 	FREE_SLAB,		/* Slab freed to the page allocator */
27 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
28 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
29 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
30 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
31 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
32 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
33 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
34 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
35 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
36 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
37 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
38 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
39 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
40 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
41 	NR_SLUB_STAT_ITEMS };
42 
43 struct kmem_cache_cpu {
44 	void **freelist;	/* Pointer to next available object */
45 	unsigned long tid;	/* Globally unique transaction id */
46 	struct page *page;	/* The slab from which we are allocating */
47 #ifdef CONFIG_SLUB_CPU_PARTIAL
48 	struct page *partial;	/* Partially allocated frozen slabs */
49 #endif
50 #ifdef CONFIG_SLUB_STATS
51 	unsigned stat[NR_SLUB_STAT_ITEMS];
52 #endif
53 };
54 
55 #ifdef CONFIG_SLUB_CPU_PARTIAL
56 #define slub_percpu_partial(c)		((c)->partial)
57 
58 #define slub_set_percpu_partial(c, p)		\
59 ({						\
60 	slub_percpu_partial(c) = (p)->next;	\
61 })
62 
63 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
64 #else
65 #define slub_percpu_partial(c)			NULL
66 
67 #define slub_set_percpu_partial(c, p)
68 
69 #define slub_percpu_partial_read_once(c)	NULL
70 #endif // CONFIG_SLUB_CPU_PARTIAL
71 
72 /*
73  * Word size structure that can be atomically updated or read and that
74  * contains both the order and the number of objects that a slab of the
75  * given order would contain.
76  */
77 struct kmem_cache_order_objects {
78 	unsigned int x;
79 };
80 
81 /*
82  * Slab cache management.
83  */
84 struct kmem_cache {
85 	struct kmem_cache_cpu __percpu *cpu_slab;
86 	/* Used for retrieving partial slabs, etc. */
87 	slab_flags_t flags;
88 	unsigned long min_partial;
89 	unsigned int size;	/* The size of an object including metadata */
90 	unsigned int object_size;/* The size of an object without metadata */
91 	struct reciprocal_value reciprocal_size;
92 	unsigned int offset;	/* Free pointer offset */
93 #ifdef CONFIG_SLUB_CPU_PARTIAL
94 	/* Number of per cpu partial objects to keep around */
95 	unsigned int cpu_partial;
96 #endif
97 	struct kmem_cache_order_objects oo;
98 
99 	/* Allocation and freeing of slabs */
100 	struct kmem_cache_order_objects max;
101 	struct kmem_cache_order_objects min;
102 	gfp_t allocflags;	/* gfp flags to use on each alloc */
103 	int refcount;		/* Refcount for slab cache destroy */
104 	void (*ctor)(void *);
105 	unsigned int inuse;		/* Offset to metadata */
106 	unsigned int align;		/* Alignment */
107 	unsigned int red_left_pad;	/* Left redzone padding size */
108 	const char *name;	/* Name (only for display!) */
109 	struct list_head list;	/* List of slab caches */
110 #ifdef CONFIG_SYSFS
111 	struct kobject kobj;	/* For sysfs */
112 #endif
113 #ifdef CONFIG_SLAB_FREELIST_HARDENED
114 	unsigned long random;
115 #endif
116 
117 #ifdef CONFIG_NUMA
118 	/*
119 	 * Defragmentation by allocating from a remote node.
120 	 */
121 	unsigned int remote_node_defrag_ratio;
122 #endif
123 
124 #ifdef CONFIG_SLAB_FREELIST_RANDOM
125 	unsigned int *random_seq;
126 #endif
127 
128 #ifdef CONFIG_KASAN
129 	struct kasan_cache kasan_info;
130 #endif
131 
132 	unsigned int useroffset;	/* Usercopy region offset */
133 	unsigned int usersize;		/* Usercopy region size */
134 
135 	struct kmem_cache_node *node[MAX_NUMNODES];
136 };
137 
138 #ifdef CONFIG_SLUB_CPU_PARTIAL
139 #define slub_cpu_partial(s)		((s)->cpu_partial)
140 #define slub_set_cpu_partial(s, n)		\
141 ({						\
142 	slub_cpu_partial(s) = (n);		\
143 })
144 #else
145 #define slub_cpu_partial(s)		(0)
146 #define slub_set_cpu_partial(s, n)
147 #endif /* CONFIG_SLUB_CPU_PARTIAL */
148 
149 #ifdef CONFIG_SYSFS
150 #define SLAB_SUPPORTS_SYSFS
151 void sysfs_slab_unlink(struct kmem_cache *);
152 void sysfs_slab_release(struct kmem_cache *);
153 #else
154 static inline void sysfs_slab_unlink(struct kmem_cache *s)
155 {
156 }
157 static inline void sysfs_slab_release(struct kmem_cache *s)
158 {
159 }
160 #endif
161 
162 void object_err(struct kmem_cache *s, struct page *page,
163 		u8 *object, char *reason);
164 
165 void *fixup_red_left(struct kmem_cache *s, void *p);
166 
167 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
168 				void *x) {
169 	void *object = x - (x - page_address(page)) % cache->size;
170 	void *last_object = page_address(page) +
171 		(page->objects - 1) * cache->size;
172 	void *result = (unlikely(object > last_object)) ? last_object : object;
173 
174 	result = fixup_red_left(cache, result);
175 	return result;
176 }
177 
178 /* Determine object index from a given position */
179 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
180 					  void *addr, void *obj)
181 {
182 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
183 				 cache->reciprocal_size);
184 }
185 
186 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
187 					const struct page *page, void *obj)
188 {
189 	if (is_kfence_address(obj))
190 		return 0;
191 	return __obj_to_index(cache, page_address(page), obj);
192 }
193 
194 static inline int objs_per_slab_page(const struct kmem_cache *cache,
195 				     const struct page *page)
196 {
197 	return page->objects;
198 }
199 #endif /* _LINUX_SLUB_DEF_H */
200