1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 8 #ifdef CONFIG_SLOB 9 /* 10 * Common fields provided in kmem_cache by all slab allocators 11 * This struct is either used directly by the allocator (SLOB) 12 * or the allocator must include definitions for all fields 13 * provided in kmem_cache_common in their definition of kmem_cache. 14 * 15 * Once we can do anonymous structs (C11 standard) we could put a 16 * anonymous struct definition in these allocators so that the 17 * separate allocations in the kmem_cache structure of SLAB and 18 * SLUB is no longer needed. 19 */ 20 struct kmem_cache { 21 unsigned int object_size;/* The original size of the object */ 22 unsigned int size; /* The aligned/padded/added on size */ 23 unsigned int align; /* Alignment as calculated */ 24 slab_flags_t flags; /* Active flags on the slab */ 25 unsigned int useroffset;/* Usercopy region offset */ 26 unsigned int usersize; /* Usercopy region size */ 27 const char *name; /* Slab name for sysfs */ 28 int refcount; /* Use counter */ 29 void (*ctor)(void *); /* Called on object slot creation */ 30 struct list_head list; /* List of all slab caches on the system */ 31 }; 32 33 #endif /* CONFIG_SLOB */ 34 35 #ifdef CONFIG_SLAB 36 #include <linux/slab_def.h> 37 #endif 38 39 #ifdef CONFIG_SLUB 40 #include <linux/slub_def.h> 41 #endif 42 43 #include <linux/memcontrol.h> 44 #include <linux/fault-inject.h> 45 #include <linux/kasan.h> 46 #include <linux/kmemleak.h> 47 #include <linux/random.h> 48 #include <linux/sched/mm.h> 49 50 /* 51 * State of the slab allocator. 52 * 53 * This is used to describe the states of the allocator during bootup. 54 * Allocators use this to gradually bootstrap themselves. Most allocators 55 * have the problem that the structures used for managing slab caches are 56 * allocated from slab caches themselves. 57 */ 58 enum slab_state { 59 DOWN, /* No slab functionality yet */ 60 PARTIAL, /* SLUB: kmem_cache_node available */ 61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 62 UP, /* Slab caches usable but not all extras yet */ 63 FULL /* Everything is working */ 64 }; 65 66 extern enum slab_state slab_state; 67 68 /* The slab cache mutex protects the management structures during changes */ 69 extern struct mutex slab_mutex; 70 71 /* The list of all slab caches on the system */ 72 extern struct list_head slab_caches; 73 74 /* The slab cache that manages slab cache information */ 75 extern struct kmem_cache *kmem_cache; 76 77 /* A table of kmalloc cache names and sizes */ 78 extern const struct kmalloc_info_struct { 79 const char *name; 80 unsigned int size; 81 } kmalloc_info[]; 82 83 #ifndef CONFIG_SLOB 84 /* Kmalloc array related functions */ 85 void setup_kmalloc_cache_index_table(void); 86 void create_kmalloc_caches(slab_flags_t); 87 88 /* Find the kmalloc slab corresponding for a certain size */ 89 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90 #endif 91 92 93 /* Functions provided by the slab allocators */ 94 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 95 96 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, 97 slab_flags_t flags, unsigned int useroffset, 98 unsigned int usersize); 99 extern void create_boot_cache(struct kmem_cache *, const char *name, 100 unsigned int size, slab_flags_t flags, 101 unsigned int useroffset, unsigned int usersize); 102 103 int slab_unmergeable(struct kmem_cache *s); 104 struct kmem_cache *find_mergeable(unsigned size, unsigned align, 105 slab_flags_t flags, const char *name, void (*ctor)(void *)); 106 #ifndef CONFIG_SLOB 107 struct kmem_cache * 108 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 109 slab_flags_t flags, void (*ctor)(void *)); 110 111 slab_flags_t kmem_cache_flags(unsigned int object_size, 112 slab_flags_t flags, const char *name, 113 void (*ctor)(void *)); 114 #else 115 static inline struct kmem_cache * 116 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 117 slab_flags_t flags, void (*ctor)(void *)) 118 { return NULL; } 119 120 static inline slab_flags_t kmem_cache_flags(unsigned int object_size, 121 slab_flags_t flags, const char *name, 122 void (*ctor)(void *)) 123 { 124 return flags; 125 } 126 #endif 127 128 129 /* Legal flag mask for kmem_cache_create(), for various configurations */ 130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132 133 #if defined(CONFIG_DEBUG_SLAB) 134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 135 #elif defined(CONFIG_SLUB_DEBUG) 136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 138 #else 139 #define SLAB_DEBUG_FLAGS (0) 140 #endif 141 142 #if defined(CONFIG_SLAB) 143 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 145 SLAB_ACCOUNT) 146 #elif defined(CONFIG_SLUB) 147 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 148 SLAB_TEMPORARY | SLAB_ACCOUNT) 149 #else 150 #define SLAB_CACHE_FLAGS (0) 151 #endif 152 153 /* Common flags available with current configuration */ 154 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 155 156 /* Common flags permitted for kmem_cache_create */ 157 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 158 SLAB_RED_ZONE | \ 159 SLAB_POISON | \ 160 SLAB_STORE_USER | \ 161 SLAB_TRACE | \ 162 SLAB_CONSISTENCY_CHECKS | \ 163 SLAB_MEM_SPREAD | \ 164 SLAB_NOLEAKTRACE | \ 165 SLAB_RECLAIM_ACCOUNT | \ 166 SLAB_TEMPORARY | \ 167 SLAB_ACCOUNT) 168 169 bool __kmem_cache_empty(struct kmem_cache *); 170 int __kmem_cache_shutdown(struct kmem_cache *); 171 void __kmem_cache_release(struct kmem_cache *); 172 int __kmem_cache_shrink(struct kmem_cache *); 173 void __kmemcg_cache_deactivate(struct kmem_cache *s); 174 void slab_kmem_cache_release(struct kmem_cache *); 175 176 struct seq_file; 177 struct file; 178 179 struct slabinfo { 180 unsigned long active_objs; 181 unsigned long num_objs; 182 unsigned long active_slabs; 183 unsigned long num_slabs; 184 unsigned long shared_avail; 185 unsigned int limit; 186 unsigned int batchcount; 187 unsigned int shared; 188 unsigned int objects_per_slab; 189 unsigned int cache_order; 190 }; 191 192 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 193 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 194 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 195 size_t count, loff_t *ppos); 196 197 /* 198 * Generic implementation of bulk operations 199 * These are useful for situations in which the allocator cannot 200 * perform optimizations. In that case segments of the object listed 201 * may be allocated or freed using these operations. 202 */ 203 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 204 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 205 206 #ifdef CONFIG_MEMCG_KMEM 207 208 /* List of all root caches. */ 209 extern struct list_head slab_root_caches; 210 #define root_caches_node memcg_params.__root_caches_node 211 212 /* 213 * Iterate over all memcg caches of the given root cache. The caller must hold 214 * slab_mutex. 215 */ 216 #define for_each_memcg_cache(iter, root) \ 217 list_for_each_entry(iter, &(root)->memcg_params.children, \ 218 memcg_params.children_node) 219 220 static inline bool is_root_cache(struct kmem_cache *s) 221 { 222 return !s->memcg_params.root_cache; 223 } 224 225 static inline bool slab_equal_or_root(struct kmem_cache *s, 226 struct kmem_cache *p) 227 { 228 return p == s || p == s->memcg_params.root_cache; 229 } 230 231 /* 232 * We use suffixes to the name in memcg because we can't have caches 233 * created in the system with the same name. But when we print them 234 * locally, better refer to them with the base name 235 */ 236 static inline const char *cache_name(struct kmem_cache *s) 237 { 238 if (!is_root_cache(s)) 239 s = s->memcg_params.root_cache; 240 return s->name; 241 } 242 243 /* 244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 245 * That said the caller must assure the memcg's cache won't go away by either 246 * taking a css reference to the owner cgroup, or holding the slab_mutex. 247 */ 248 static inline struct kmem_cache * 249 cache_from_memcg_idx(struct kmem_cache *s, int idx) 250 { 251 struct kmem_cache *cachep; 252 struct memcg_cache_array *arr; 253 254 rcu_read_lock(); 255 arr = rcu_dereference(s->memcg_params.memcg_caches); 256 257 /* 258 * Make sure we will access the up-to-date value. The code updating 259 * memcg_caches issues a write barrier to match this (see 260 * memcg_create_kmem_cache()). 261 */ 262 cachep = READ_ONCE(arr->entries[idx]); 263 rcu_read_unlock(); 264 265 return cachep; 266 } 267 268 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 269 { 270 if (is_root_cache(s)) 271 return s; 272 return s->memcg_params.root_cache; 273 } 274 275 static __always_inline int memcg_charge_slab(struct page *page, 276 gfp_t gfp, int order, 277 struct kmem_cache *s) 278 { 279 if (!memcg_kmem_enabled()) 280 return 0; 281 if (is_root_cache(s)) 282 return 0; 283 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 284 } 285 286 static __always_inline void memcg_uncharge_slab(struct page *page, int order, 287 struct kmem_cache *s) 288 { 289 if (!memcg_kmem_enabled()) 290 return; 291 memcg_kmem_uncharge(page, order); 292 } 293 294 extern void slab_init_memcg_params(struct kmem_cache *); 295 extern void memcg_link_cache(struct kmem_cache *s); 296 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 297 void (*deact_fn)(struct kmem_cache *)); 298 299 #else /* CONFIG_MEMCG_KMEM */ 300 301 /* If !memcg, all caches are root. */ 302 #define slab_root_caches slab_caches 303 #define root_caches_node list 304 305 #define for_each_memcg_cache(iter, root) \ 306 for ((void)(iter), (void)(root); 0; ) 307 308 static inline bool is_root_cache(struct kmem_cache *s) 309 { 310 return true; 311 } 312 313 static inline bool slab_equal_or_root(struct kmem_cache *s, 314 struct kmem_cache *p) 315 { 316 return true; 317 } 318 319 static inline const char *cache_name(struct kmem_cache *s) 320 { 321 return s->name; 322 } 323 324 static inline struct kmem_cache * 325 cache_from_memcg_idx(struct kmem_cache *s, int idx) 326 { 327 return NULL; 328 } 329 330 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 331 { 332 return s; 333 } 334 335 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 336 struct kmem_cache *s) 337 { 338 return 0; 339 } 340 341 static inline void memcg_uncharge_slab(struct page *page, int order, 342 struct kmem_cache *s) 343 { 344 } 345 346 static inline void slab_init_memcg_params(struct kmem_cache *s) 347 { 348 } 349 350 static inline void memcg_link_cache(struct kmem_cache *s) 351 { 352 } 353 354 #endif /* CONFIG_MEMCG_KMEM */ 355 356 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 357 { 358 struct kmem_cache *cachep; 359 struct page *page; 360 361 /* 362 * When kmemcg is not being used, both assignments should return the 363 * same value. but we don't want to pay the assignment price in that 364 * case. If it is not compiled in, the compiler should be smart enough 365 * to not do even the assignment. In that case, slab_equal_or_root 366 * will also be a constant. 367 */ 368 if (!memcg_kmem_enabled() && 369 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 370 return s; 371 372 page = virt_to_head_page(x); 373 cachep = page->slab_cache; 374 if (slab_equal_or_root(cachep, s)) 375 return cachep; 376 377 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 378 __func__, s->name, cachep->name); 379 WARN_ON_ONCE(1); 380 return s; 381 } 382 383 static inline size_t slab_ksize(const struct kmem_cache *s) 384 { 385 #ifndef CONFIG_SLUB 386 return s->object_size; 387 388 #else /* CONFIG_SLUB */ 389 # ifdef CONFIG_SLUB_DEBUG 390 /* 391 * Debugging requires use of the padding between object 392 * and whatever may come after it. 393 */ 394 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 395 return s->object_size; 396 # endif 397 if (s->flags & SLAB_KASAN) 398 return s->object_size; 399 /* 400 * If we have the need to store the freelist pointer 401 * back there or track user information then we can 402 * only use the space before that information. 403 */ 404 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 405 return s->inuse; 406 /* 407 * Else we can use all the padding etc for the allocation 408 */ 409 return s->size; 410 #endif 411 } 412 413 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 414 gfp_t flags) 415 { 416 flags &= gfp_allowed_mask; 417 418 fs_reclaim_acquire(flags); 419 fs_reclaim_release(flags); 420 421 might_sleep_if(gfpflags_allow_blocking(flags)); 422 423 if (should_failslab(s, flags)) 424 return NULL; 425 426 if (memcg_kmem_enabled() && 427 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 428 return memcg_kmem_get_cache(s); 429 430 return s; 431 } 432 433 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 434 size_t size, void **p) 435 { 436 size_t i; 437 438 flags &= gfp_allowed_mask; 439 for (i = 0; i < size; i++) { 440 void *object = p[i]; 441 442 kmemleak_alloc_recursive(object, s->object_size, 1, 443 s->flags, flags); 444 p[i] = kasan_slab_alloc(s, object, flags); 445 } 446 447 if (memcg_kmem_enabled()) 448 memcg_kmem_put_cache(s); 449 } 450 451 #ifndef CONFIG_SLOB 452 /* 453 * The slab lists for all objects. 454 */ 455 struct kmem_cache_node { 456 spinlock_t list_lock; 457 458 #ifdef CONFIG_SLAB 459 struct list_head slabs_partial; /* partial list first, better asm code */ 460 struct list_head slabs_full; 461 struct list_head slabs_free; 462 unsigned long total_slabs; /* length of all slab lists */ 463 unsigned long free_slabs; /* length of free slab list only */ 464 unsigned long free_objects; 465 unsigned int free_limit; 466 unsigned int colour_next; /* Per-node cache coloring */ 467 struct array_cache *shared; /* shared per node */ 468 struct alien_cache **alien; /* on other nodes */ 469 unsigned long next_reap; /* updated without locking */ 470 int free_touched; /* updated without locking */ 471 #endif 472 473 #ifdef CONFIG_SLUB 474 unsigned long nr_partial; 475 struct list_head partial; 476 #ifdef CONFIG_SLUB_DEBUG 477 atomic_long_t nr_slabs; 478 atomic_long_t total_objects; 479 struct list_head full; 480 #endif 481 #endif 482 483 }; 484 485 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 486 { 487 return s->node[node]; 488 } 489 490 /* 491 * Iterator over all nodes. The body will be executed for each node that has 492 * a kmem_cache_node structure allocated (which is true for all online nodes) 493 */ 494 #define for_each_kmem_cache_node(__s, __node, __n) \ 495 for (__node = 0; __node < nr_node_ids; __node++) \ 496 if ((__n = get_node(__s, __node))) 497 498 #endif 499 500 void *slab_start(struct seq_file *m, loff_t *pos); 501 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 502 void slab_stop(struct seq_file *m, void *p); 503 void *memcg_slab_start(struct seq_file *m, loff_t *pos); 504 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 505 void memcg_slab_stop(struct seq_file *m, void *p); 506 int memcg_slab_show(struct seq_file *m, void *p); 507 508 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 509 void dump_unreclaimable_slab(void); 510 #else 511 static inline void dump_unreclaimable_slab(void) 512 { 513 } 514 #endif 515 516 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 517 518 #ifdef CONFIG_SLAB_FREELIST_RANDOM 519 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 520 gfp_t gfp); 521 void cache_random_seq_destroy(struct kmem_cache *cachep); 522 #else 523 static inline int cache_random_seq_create(struct kmem_cache *cachep, 524 unsigned int count, gfp_t gfp) 525 { 526 return 0; 527 } 528 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 529 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 530 531 #endif /* MM_SLAB_H */ 532