xref: /openbmc/linux/include/linux/slub_def.h (revision 6801be4f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4 
5 /*
6  * SLUB : A Slab allocator without object queues.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 #include <linux/kfence.h>
11 #include <linux/kobject.h>
12 #include <linux/reciprocal_div.h>
13 #include <linux/local_lock.h>
14 
15 enum stat_item {
16 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
17 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
18 	FREE_FASTPATH,		/* Free to cpu slab */
19 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
20 	FREE_FROZEN,		/* Freeing to frozen slab */
21 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
22 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
23 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
24 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
25 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
26 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
27 	FREE_SLAB,		/* Slab freed to the page allocator */
28 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
29 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
30 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
31 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
32 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
33 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
34 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
35 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
36 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
37 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
38 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
39 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
40 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
41 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
42 	NR_SLUB_STAT_ITEMS
43 };
44 
45 #ifndef CONFIG_SLUB_TINY
46 /*
47  * When changing the layout, make sure freelist and tid are still compatible
48  * with this_cpu_cmpxchg_double() alignment requirements.
49  */
50 struct kmem_cache_cpu {
51 	union {
52 		struct {
53 			void **freelist;	/* Pointer to next available object */
54 			unsigned long tid;	/* Globally unique transaction id */
55 		};
56 		freelist_aba_t freelist_tid;
57 	};
58 	struct slab *slab;	/* The slab from which we are allocating */
59 #ifdef CONFIG_SLUB_CPU_PARTIAL
60 	struct slab *partial;	/* Partially allocated frozen slabs */
61 #endif
62 	local_lock_t lock;	/* Protects the fields above */
63 #ifdef CONFIG_SLUB_STATS
64 	unsigned stat[NR_SLUB_STAT_ITEMS];
65 #endif
66 };
67 #endif /* CONFIG_SLUB_TINY */
68 
69 #ifdef CONFIG_SLUB_CPU_PARTIAL
70 #define slub_percpu_partial(c)		((c)->partial)
71 
72 #define slub_set_percpu_partial(c, p)		\
73 ({						\
74 	slub_percpu_partial(c) = (p)->next;	\
75 })
76 
77 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
78 #else
79 #define slub_percpu_partial(c)			NULL
80 
81 #define slub_set_percpu_partial(c, p)
82 
83 #define slub_percpu_partial_read_once(c)	NULL
84 #endif // CONFIG_SLUB_CPU_PARTIAL
85 
86 /*
87  * Word size structure that can be atomically updated or read and that
88  * contains both the order and the number of objects that a slab of the
89  * given order would contain.
90  */
91 struct kmem_cache_order_objects {
92 	unsigned int x;
93 };
94 
95 /*
96  * Slab cache management.
97  */
98 struct kmem_cache {
99 #ifndef CONFIG_SLUB_TINY
100 	struct kmem_cache_cpu __percpu *cpu_slab;
101 #endif
102 	/* Used for retrieving partial slabs, etc. */
103 	slab_flags_t flags;
104 	unsigned long min_partial;
105 	unsigned int size;	/* The size of an object including metadata */
106 	unsigned int object_size;/* The size of an object without metadata */
107 	struct reciprocal_value reciprocal_size;
108 	unsigned int offset;	/* Free pointer offset */
109 #ifdef CONFIG_SLUB_CPU_PARTIAL
110 	/* Number of per cpu partial objects to keep around */
111 	unsigned int cpu_partial;
112 	/* Number of per cpu partial slabs to keep around */
113 	unsigned int cpu_partial_slabs;
114 #endif
115 	struct kmem_cache_order_objects oo;
116 
117 	/* Allocation and freeing of slabs */
118 	struct kmem_cache_order_objects min;
119 	gfp_t allocflags;	/* gfp flags to use on each alloc */
120 	int refcount;		/* Refcount for slab cache destroy */
121 	void (*ctor)(void *);
122 	unsigned int inuse;		/* Offset to metadata */
123 	unsigned int align;		/* Alignment */
124 	unsigned int red_left_pad;	/* Left redzone padding size */
125 	const char *name;	/* Name (only for display!) */
126 	struct list_head list;	/* List of slab caches */
127 #ifdef CONFIG_SYSFS
128 	struct kobject kobj;	/* For sysfs */
129 #endif
130 #ifdef CONFIG_SLAB_FREELIST_HARDENED
131 	unsigned long random;
132 #endif
133 
134 #ifdef CONFIG_NUMA
135 	/*
136 	 * Defragmentation by allocating from a remote node.
137 	 */
138 	unsigned int remote_node_defrag_ratio;
139 #endif
140 
141 #ifdef CONFIG_SLAB_FREELIST_RANDOM
142 	unsigned int *random_seq;
143 #endif
144 
145 #ifdef CONFIG_KASAN_GENERIC
146 	struct kasan_cache kasan_info;
147 #endif
148 
149 #ifdef CONFIG_HARDENED_USERCOPY
150 	unsigned int useroffset;	/* Usercopy region offset */
151 	unsigned int usersize;		/* Usercopy region size */
152 #endif
153 
154 	struct kmem_cache_node *node[MAX_NUMNODES];
155 };
156 
157 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
158 #define SLAB_SUPPORTS_SYSFS
159 void sysfs_slab_unlink(struct kmem_cache *);
160 void sysfs_slab_release(struct kmem_cache *);
161 #else
sysfs_slab_unlink(struct kmem_cache * s)162 static inline void sysfs_slab_unlink(struct kmem_cache *s)
163 {
164 }
sysfs_slab_release(struct kmem_cache * s)165 static inline void sysfs_slab_release(struct kmem_cache *s)
166 {
167 }
168 #endif
169 
170 void *fixup_red_left(struct kmem_cache *s, void *p);
171 
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)172 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
173 				void *x) {
174 	void *object = x - (x - slab_address(slab)) % cache->size;
175 	void *last_object = slab_address(slab) +
176 		(slab->objects - 1) * cache->size;
177 	void *result = (unlikely(object > last_object)) ? last_object : object;
178 
179 	result = fixup_red_left(cache, result);
180 	return result;
181 }
182 
183 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)184 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
185 					  void *addr, void *obj)
186 {
187 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
188 				 cache->reciprocal_size);
189 }
190 
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)191 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
192 					const struct slab *slab, void *obj)
193 {
194 	if (is_kfence_address(obj))
195 		return 0;
196 	return __obj_to_index(cache, slab_address(slab), obj);
197 }
198 
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)199 static inline int objs_per_slab(const struct kmem_cache *cache,
200 				     const struct slab *slab)
201 {
202 	return slab->objects;
203 }
204 #endif /* _LINUX_SLUB_DEF_H */
205