1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 void __init kmem_cache_init(void); 8 9 #ifdef CONFIG_64BIT 10 # ifdef system_has_cmpxchg128 11 # define system_has_freelist_aba() system_has_cmpxchg128() 12 # define try_cmpxchg_freelist try_cmpxchg128 13 # endif 14 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 15 typedef u128 freelist_full_t; 16 #else /* CONFIG_64BIT */ 17 # ifdef system_has_cmpxchg64 18 # define system_has_freelist_aba() system_has_cmpxchg64() 19 # define try_cmpxchg_freelist try_cmpxchg64 20 # endif 21 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 22 typedef u64 freelist_full_t; 23 #endif /* CONFIG_64BIT */ 24 25 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 26 #undef system_has_freelist_aba 27 #endif 28 29 /* 30 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA 31 * problems with cmpxchg of just a pointer. 32 */ 33 typedef union { 34 struct { 35 void *freelist; 36 unsigned long counter; 37 }; 38 freelist_full_t full; 39 } freelist_aba_t; 40 41 /* Reuses the bits in struct page */ 42 struct slab { 43 unsigned long __page_flags; 44 45 #if defined(CONFIG_SLAB) 46 47 struct kmem_cache *slab_cache; 48 union { 49 struct { 50 struct list_head slab_list; 51 void *freelist; /* array of free object indexes */ 52 void *s_mem; /* first object */ 53 }; 54 struct rcu_head rcu_head; 55 }; 56 unsigned int active; 57 58 #elif defined(CONFIG_SLUB) 59 60 struct kmem_cache *slab_cache; 61 union { 62 struct { 63 union { 64 struct list_head slab_list; 65 #ifdef CONFIG_SLUB_CPU_PARTIAL 66 struct { 67 struct slab *next; 68 int slabs; /* Nr of slabs left */ 69 }; 70 #endif 71 }; 72 /* Double-word boundary */ 73 union { 74 struct { 75 void *freelist; /* first free object */ 76 union { 77 unsigned long counters; 78 struct { 79 unsigned inuse:16; 80 unsigned objects:15; 81 unsigned frozen:1; 82 }; 83 }; 84 }; 85 #ifdef system_has_freelist_aba 86 freelist_aba_t freelist_counter; 87 #endif 88 }; 89 }; 90 struct rcu_head rcu_head; 91 }; 92 unsigned int __unused; 93 94 #else 95 #error "Unexpected slab allocator configured" 96 #endif 97 98 atomic_t __page_refcount; 99 #ifdef CONFIG_MEMCG 100 unsigned long memcg_data; 101 #endif 102 }; 103 104 #define SLAB_MATCH(pg, sl) \ 105 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 106 SLAB_MATCH(flags, __page_flags); 107 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ 108 SLAB_MATCH(_refcount, __page_refcount); 109 #ifdef CONFIG_MEMCG 110 SLAB_MATCH(memcg_data, memcg_data); 111 #endif 112 #undef SLAB_MATCH 113 static_assert(sizeof(struct slab) <= sizeof(struct page)); 114 #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) 115 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); 116 #endif 117 118 /** 119 * folio_slab - Converts from folio to slab. 120 * @folio: The folio. 121 * 122 * Currently struct slab is a different representation of a folio where 123 * folio_test_slab() is true. 124 * 125 * Return: The slab which contains this folio. 126 */ 127 #define folio_slab(folio) (_Generic((folio), \ 128 const struct folio *: (const struct slab *)(folio), \ 129 struct folio *: (struct slab *)(folio))) 130 131 /** 132 * slab_folio - The folio allocated for a slab 133 * @slab: The slab. 134 * 135 * Slabs are allocated as folios that contain the individual objects and are 136 * using some fields in the first struct page of the folio - those fields are 137 * now accessed by struct slab. It is occasionally necessary to convert back to 138 * a folio in order to communicate with the rest of the mm. Please use this 139 * helper function instead of casting yourself, as the implementation may change 140 * in the future. 141 */ 142 #define slab_folio(s) (_Generic((s), \ 143 const struct slab *: (const struct folio *)s, \ 144 struct slab *: (struct folio *)s)) 145 146 /** 147 * page_slab - Converts from first struct page to slab. 148 * @p: The first (either head of compound or single) page of slab. 149 * 150 * A temporary wrapper to convert struct page to struct slab in situations where 151 * we know the page is the compound head, or single order-0 page. 152 * 153 * Long-term ideally everything would work with struct slab directly or go 154 * through folio to struct slab. 155 * 156 * Return: The slab which contains this page 157 */ 158 #define page_slab(p) (_Generic((p), \ 159 const struct page *: (const struct slab *)(p), \ 160 struct page *: (struct slab *)(p))) 161 162 /** 163 * slab_page - The first struct page allocated for a slab 164 * @slab: The slab. 165 * 166 * A convenience wrapper for converting slab to the first struct page of the 167 * underlying folio, to communicate with code not yet converted to folio or 168 * struct slab. 169 */ 170 #define slab_page(s) folio_page(slab_folio(s), 0) 171 172 /* 173 * If network-based swap is enabled, sl*b must keep track of whether pages 174 * were allocated from pfmemalloc reserves. 175 */ 176 static inline bool slab_test_pfmemalloc(const struct slab *slab) 177 { 178 return folio_test_active((struct folio *)slab_folio(slab)); 179 } 180 181 static inline void slab_set_pfmemalloc(struct slab *slab) 182 { 183 folio_set_active(slab_folio(slab)); 184 } 185 186 static inline void slab_clear_pfmemalloc(struct slab *slab) 187 { 188 folio_clear_active(slab_folio(slab)); 189 } 190 191 static inline void __slab_clear_pfmemalloc(struct slab *slab) 192 { 193 __folio_clear_active(slab_folio(slab)); 194 } 195 196 static inline void *slab_address(const struct slab *slab) 197 { 198 return folio_address(slab_folio(slab)); 199 } 200 201 static inline int slab_nid(const struct slab *slab) 202 { 203 return folio_nid(slab_folio(slab)); 204 } 205 206 static inline pg_data_t *slab_pgdat(const struct slab *slab) 207 { 208 return folio_pgdat(slab_folio(slab)); 209 } 210 211 static inline struct slab *virt_to_slab(const void *addr) 212 { 213 struct folio *folio = virt_to_folio(addr); 214 215 if (!folio_test_slab(folio)) 216 return NULL; 217 218 return folio_slab(folio); 219 } 220 221 static inline int slab_order(const struct slab *slab) 222 { 223 return folio_order((struct folio *)slab_folio(slab)); 224 } 225 226 static inline size_t slab_size(const struct slab *slab) 227 { 228 return PAGE_SIZE << slab_order(slab); 229 } 230 231 #ifdef CONFIG_SLAB 232 #include <linux/slab_def.h> 233 #endif 234 235 #ifdef CONFIG_SLUB 236 #include <linux/slub_def.h> 237 #endif 238 239 #include <linux/memcontrol.h> 240 #include <linux/fault-inject.h> 241 #include <linux/kasan.h> 242 #include <linux/kmemleak.h> 243 #include <linux/random.h> 244 #include <linux/sched/mm.h> 245 #include <linux/list_lru.h> 246 247 /* 248 * State of the slab allocator. 249 * 250 * This is used to describe the states of the allocator during bootup. 251 * Allocators use this to gradually bootstrap themselves. Most allocators 252 * have the problem that the structures used for managing slab caches are 253 * allocated from slab caches themselves. 254 */ 255 enum slab_state { 256 DOWN, /* No slab functionality yet */ 257 PARTIAL, /* SLUB: kmem_cache_node available */ 258 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 259 UP, /* Slab caches usable but not all extras yet */ 260 FULL /* Everything is working */ 261 }; 262 263 extern enum slab_state slab_state; 264 265 /* The slab cache mutex protects the management structures during changes */ 266 extern struct mutex slab_mutex; 267 268 /* The list of all slab caches on the system */ 269 extern struct list_head slab_caches; 270 271 /* The slab cache that manages slab cache information */ 272 extern struct kmem_cache *kmem_cache; 273 274 /* A table of kmalloc cache names and sizes */ 275 extern const struct kmalloc_info_struct { 276 const char *name[NR_KMALLOC_TYPES]; 277 unsigned int size; 278 } kmalloc_info[]; 279 280 /* Kmalloc array related functions */ 281 void setup_kmalloc_cache_index_table(void); 282 void create_kmalloc_caches(slab_flags_t); 283 284 /* Find the kmalloc slab corresponding for a certain size */ 285 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 286 287 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, 288 int node, size_t orig_size, 289 unsigned long caller); 290 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); 291 292 gfp_t kmalloc_fix_flags(gfp_t flags); 293 294 /* Functions provided by the slab allocators */ 295 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 296 297 void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, 298 slab_flags_t flags); 299 extern void create_boot_cache(struct kmem_cache *, const char *name, 300 unsigned int size, slab_flags_t flags, 301 unsigned int useroffset, unsigned int usersize); 302 303 int slab_unmergeable(struct kmem_cache *s); 304 struct kmem_cache *find_mergeable(unsigned size, unsigned align, 305 slab_flags_t flags, const char *name, void (*ctor)(void *)); 306 struct kmem_cache * 307 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 308 slab_flags_t flags, void (*ctor)(void *)); 309 310 slab_flags_t kmem_cache_flags(unsigned int object_size, 311 slab_flags_t flags, const char *name); 312 313 static inline bool is_kmalloc_cache(struct kmem_cache *s) 314 { 315 return (s->flags & SLAB_KMALLOC); 316 } 317 318 /* Legal flag mask for kmem_cache_create(), for various configurations */ 319 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 320 SLAB_CACHE_DMA32 | SLAB_PANIC | \ 321 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 322 323 #if defined(CONFIG_DEBUG_SLAB) 324 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 325 #elif defined(CONFIG_SLUB_DEBUG) 326 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 327 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 328 #else 329 #define SLAB_DEBUG_FLAGS (0) 330 #endif 331 332 #if defined(CONFIG_SLAB) 333 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 334 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 335 SLAB_ACCOUNT | SLAB_NO_MERGE) 336 #elif defined(CONFIG_SLUB) 337 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 338 SLAB_TEMPORARY | SLAB_ACCOUNT | \ 339 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) 340 #else 341 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) 342 #endif 343 344 /* Common flags available with current configuration */ 345 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 346 347 /* Common flags permitted for kmem_cache_create */ 348 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 349 SLAB_RED_ZONE | \ 350 SLAB_POISON | \ 351 SLAB_STORE_USER | \ 352 SLAB_TRACE | \ 353 SLAB_CONSISTENCY_CHECKS | \ 354 SLAB_MEM_SPREAD | \ 355 SLAB_NOLEAKTRACE | \ 356 SLAB_RECLAIM_ACCOUNT | \ 357 SLAB_TEMPORARY | \ 358 SLAB_ACCOUNT | \ 359 SLAB_KMALLOC | \ 360 SLAB_NO_MERGE | \ 361 SLAB_NO_USER_FLAGS) 362 363 bool __kmem_cache_empty(struct kmem_cache *); 364 int __kmem_cache_shutdown(struct kmem_cache *); 365 void __kmem_cache_release(struct kmem_cache *); 366 int __kmem_cache_shrink(struct kmem_cache *); 367 void slab_kmem_cache_release(struct kmem_cache *); 368 369 struct seq_file; 370 struct file; 371 372 struct slabinfo { 373 unsigned long active_objs; 374 unsigned long num_objs; 375 unsigned long active_slabs; 376 unsigned long num_slabs; 377 unsigned long shared_avail; 378 unsigned int limit; 379 unsigned int batchcount; 380 unsigned int shared; 381 unsigned int objects_per_slab; 382 unsigned int cache_order; 383 }; 384 385 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 386 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 387 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 388 size_t count, loff_t *ppos); 389 390 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 391 { 392 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 393 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 394 } 395 396 #ifdef CONFIG_SLUB_DEBUG 397 #ifdef CONFIG_SLUB_DEBUG_ON 398 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 399 #else 400 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 401 #endif 402 extern void print_tracking(struct kmem_cache *s, void *object); 403 long validate_slab_cache(struct kmem_cache *s); 404 static inline bool __slub_debug_enabled(void) 405 { 406 return static_branch_unlikely(&slub_debug_enabled); 407 } 408 #else 409 static inline void print_tracking(struct kmem_cache *s, void *object) 410 { 411 } 412 static inline bool __slub_debug_enabled(void) 413 { 414 return false; 415 } 416 #endif 417 418 /* 419 * Returns true if any of the specified slub_debug flags is enabled for the 420 * cache. Use only for flags parsed by setup_slub_debug() as it also enables 421 * the static key. 422 */ 423 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 424 { 425 if (IS_ENABLED(CONFIG_SLUB_DEBUG)) 426 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 427 if (__slub_debug_enabled()) 428 return s->flags & flags; 429 return false; 430 } 431 432 #ifdef CONFIG_MEMCG_KMEM 433 /* 434 * slab_objcgs - get the object cgroups vector associated with a slab 435 * @slab: a pointer to the slab struct 436 * 437 * Returns a pointer to the object cgroups vector associated with the slab, 438 * or NULL if no such vector has been associated yet. 439 */ 440 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 441 { 442 unsigned long memcg_data = READ_ONCE(slab->memcg_data); 443 444 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), 445 slab_page(slab)); 446 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); 447 448 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 449 } 450 451 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, 452 gfp_t gfp, bool new_slab); 453 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 454 enum node_stat_item idx, int nr); 455 456 static inline void memcg_free_slab_cgroups(struct slab *slab) 457 { 458 kfree(slab_objcgs(slab)); 459 slab->memcg_data = 0; 460 } 461 462 static inline size_t obj_full_size(struct kmem_cache *s) 463 { 464 /* 465 * For each accounted object there is an extra space which is used 466 * to store obj_cgroup membership. Charge it too. 467 */ 468 return s->size + sizeof(struct obj_cgroup *); 469 } 470 471 /* 472 * Returns false if the allocation should fail. 473 */ 474 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 475 struct list_lru *lru, 476 struct obj_cgroup **objcgp, 477 size_t objects, gfp_t flags) 478 { 479 struct obj_cgroup *objcg; 480 481 if (!memcg_kmem_online()) 482 return true; 483 484 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 485 return true; 486 487 objcg = get_obj_cgroup_from_current(); 488 if (!objcg) 489 return true; 490 491 if (lru) { 492 int ret; 493 struct mem_cgroup *memcg; 494 495 memcg = get_mem_cgroup_from_objcg(objcg); 496 ret = memcg_list_lru_alloc(memcg, lru, flags); 497 css_put(&memcg->css); 498 499 if (ret) 500 goto out; 501 } 502 503 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) 504 goto out; 505 506 *objcgp = objcg; 507 return true; 508 out: 509 obj_cgroup_put(objcg); 510 return false; 511 } 512 513 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 514 struct obj_cgroup *objcg, 515 gfp_t flags, size_t size, 516 void **p) 517 { 518 struct slab *slab; 519 unsigned long off; 520 size_t i; 521 522 if (!memcg_kmem_online() || !objcg) 523 return; 524 525 for (i = 0; i < size; i++) { 526 if (likely(p[i])) { 527 slab = virt_to_slab(p[i]); 528 529 if (!slab_objcgs(slab) && 530 memcg_alloc_slab_cgroups(slab, s, flags, 531 false)) { 532 obj_cgroup_uncharge(objcg, obj_full_size(s)); 533 continue; 534 } 535 536 off = obj_to_index(s, slab, p[i]); 537 obj_cgroup_get(objcg); 538 slab_objcgs(slab)[off] = objcg; 539 mod_objcg_state(objcg, slab_pgdat(slab), 540 cache_vmstat_idx(s), obj_full_size(s)); 541 } else { 542 obj_cgroup_uncharge(objcg, obj_full_size(s)); 543 } 544 } 545 obj_cgroup_put(objcg); 546 } 547 548 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 549 void **p, int objects) 550 { 551 struct obj_cgroup **objcgs; 552 int i; 553 554 if (!memcg_kmem_online()) 555 return; 556 557 objcgs = slab_objcgs(slab); 558 if (!objcgs) 559 return; 560 561 for (i = 0; i < objects; i++) { 562 struct obj_cgroup *objcg; 563 unsigned int off; 564 565 off = obj_to_index(s, slab, p[i]); 566 objcg = objcgs[off]; 567 if (!objcg) 568 continue; 569 570 objcgs[off] = NULL; 571 obj_cgroup_uncharge(objcg, obj_full_size(s)); 572 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 573 -obj_full_size(s)); 574 obj_cgroup_put(objcg); 575 } 576 } 577 578 #else /* CONFIG_MEMCG_KMEM */ 579 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 580 { 581 return NULL; 582 } 583 584 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 585 { 586 return NULL; 587 } 588 589 static inline int memcg_alloc_slab_cgroups(struct slab *slab, 590 struct kmem_cache *s, gfp_t gfp, 591 bool new_slab) 592 { 593 return 0; 594 } 595 596 static inline void memcg_free_slab_cgroups(struct slab *slab) 597 { 598 } 599 600 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 601 struct list_lru *lru, 602 struct obj_cgroup **objcgp, 603 size_t objects, gfp_t flags) 604 { 605 return true; 606 } 607 608 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 609 struct obj_cgroup *objcg, 610 gfp_t flags, size_t size, 611 void **p) 612 { 613 } 614 615 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 616 void **p, int objects) 617 { 618 } 619 #endif /* CONFIG_MEMCG_KMEM */ 620 621 static inline struct kmem_cache *virt_to_cache(const void *obj) 622 { 623 struct slab *slab; 624 625 slab = virt_to_slab(obj); 626 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", 627 __func__)) 628 return NULL; 629 return slab->slab_cache; 630 } 631 632 static __always_inline void account_slab(struct slab *slab, int order, 633 struct kmem_cache *s, gfp_t gfp) 634 { 635 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 636 memcg_alloc_slab_cgroups(slab, s, gfp, true); 637 638 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 639 PAGE_SIZE << order); 640 } 641 642 static __always_inline void unaccount_slab(struct slab *slab, int order, 643 struct kmem_cache *s) 644 { 645 if (memcg_kmem_online()) 646 memcg_free_slab_cgroups(slab); 647 648 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 649 -(PAGE_SIZE << order)); 650 } 651 652 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 653 { 654 struct kmem_cache *cachep; 655 656 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 657 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 658 return s; 659 660 cachep = virt_to_cache(x); 661 if (WARN(cachep && cachep != s, 662 "%s: Wrong slab cache. %s but object is from %s\n", 663 __func__, s->name, cachep->name)) 664 print_tracking(cachep, x); 665 return cachep; 666 } 667 668 void free_large_kmalloc(struct folio *folio, void *object); 669 670 size_t __ksize(const void *objp); 671 672 static inline size_t slab_ksize(const struct kmem_cache *s) 673 { 674 #ifndef CONFIG_SLUB 675 return s->object_size; 676 677 #else /* CONFIG_SLUB */ 678 # ifdef CONFIG_SLUB_DEBUG 679 /* 680 * Debugging requires use of the padding between object 681 * and whatever may come after it. 682 */ 683 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 684 return s->object_size; 685 # endif 686 if (s->flags & SLAB_KASAN) 687 return s->object_size; 688 /* 689 * If we have the need to store the freelist pointer 690 * back there or track user information then we can 691 * only use the space before that information. 692 */ 693 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 694 return s->inuse; 695 /* 696 * Else we can use all the padding etc for the allocation 697 */ 698 return s->size; 699 #endif 700 } 701 702 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 703 struct list_lru *lru, 704 struct obj_cgroup **objcgp, 705 size_t size, gfp_t flags) 706 { 707 flags &= gfp_allowed_mask; 708 709 might_alloc(flags); 710 711 if (should_failslab(s, flags)) 712 return NULL; 713 714 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) 715 return NULL; 716 717 return s; 718 } 719 720 static inline void slab_post_alloc_hook(struct kmem_cache *s, 721 struct obj_cgroup *objcg, gfp_t flags, 722 size_t size, void **p, bool init, 723 unsigned int orig_size) 724 { 725 unsigned int zero_size = s->object_size; 726 size_t i; 727 728 flags &= gfp_allowed_mask; 729 730 /* 731 * For kmalloc object, the allocated memory size(object_size) is likely 732 * larger than the requested size(orig_size). If redzone check is 733 * enabled for the extra space, don't zero it, as it will be redzoned 734 * soon. The redzone operation for this extra space could be seen as a 735 * replacement of current poisoning under certain debug option, and 736 * won't break other sanity checks. 737 */ 738 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 739 (s->flags & SLAB_KMALLOC)) 740 zero_size = orig_size; 741 742 /* 743 * As memory initialization might be integrated into KASAN, 744 * kasan_slab_alloc and initialization memset must be 745 * kept together to avoid discrepancies in behavior. 746 * 747 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 748 */ 749 for (i = 0; i < size; i++) { 750 p[i] = kasan_slab_alloc(s, p[i], flags, init); 751 if (p[i] && init && !kasan_has_integrated_init()) 752 memset(p[i], 0, zero_size); 753 kmemleak_alloc_recursive(p[i], s->object_size, 1, 754 s->flags, flags); 755 kmsan_slab_alloc(s, p[i], flags); 756 } 757 758 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 759 } 760 761 /* 762 * The slab lists for all objects. 763 */ 764 struct kmem_cache_node { 765 #ifdef CONFIG_SLAB 766 raw_spinlock_t list_lock; 767 struct list_head slabs_partial; /* partial list first, better asm code */ 768 struct list_head slabs_full; 769 struct list_head slabs_free; 770 unsigned long total_slabs; /* length of all slab lists */ 771 unsigned long free_slabs; /* length of free slab list only */ 772 unsigned long free_objects; 773 unsigned int free_limit; 774 unsigned int colour_next; /* Per-node cache coloring */ 775 struct array_cache *shared; /* shared per node */ 776 struct alien_cache **alien; /* on other nodes */ 777 unsigned long next_reap; /* updated without locking */ 778 int free_touched; /* updated without locking */ 779 #endif 780 781 #ifdef CONFIG_SLUB 782 spinlock_t list_lock; 783 unsigned long nr_partial; 784 struct list_head partial; 785 #ifdef CONFIG_SLUB_DEBUG 786 atomic_long_t nr_slabs; 787 atomic_long_t total_objects; 788 struct list_head full; 789 #endif 790 #endif 791 792 }; 793 794 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 795 { 796 return s->node[node]; 797 } 798 799 /* 800 * Iterator over all nodes. The body will be executed for each node that has 801 * a kmem_cache_node structure allocated (which is true for all online nodes) 802 */ 803 #define for_each_kmem_cache_node(__s, __node, __n) \ 804 for (__node = 0; __node < nr_node_ids; __node++) \ 805 if ((__n = get_node(__s, __node))) 806 807 808 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 809 void dump_unreclaimable_slab(void); 810 #else 811 static inline void dump_unreclaimable_slab(void) 812 { 813 } 814 #endif 815 816 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 817 818 #ifdef CONFIG_SLAB_FREELIST_RANDOM 819 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 820 gfp_t gfp); 821 void cache_random_seq_destroy(struct kmem_cache *cachep); 822 #else 823 static inline int cache_random_seq_create(struct kmem_cache *cachep, 824 unsigned int count, gfp_t gfp) 825 { 826 return 0; 827 } 828 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 829 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 830 831 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 832 { 833 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 834 &init_on_alloc)) { 835 if (c->ctor) 836 return false; 837 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 838 return flags & __GFP_ZERO; 839 return true; 840 } 841 return flags & __GFP_ZERO; 842 } 843 844 static inline bool slab_want_init_on_free(struct kmem_cache *c) 845 { 846 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 847 &init_on_free)) 848 return !(c->ctor || 849 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 850 return false; 851 } 852 853 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 854 void debugfs_slab_release(struct kmem_cache *); 855 #else 856 static inline void debugfs_slab_release(struct kmem_cache *s) { } 857 #endif 858 859 #ifdef CONFIG_PRINTK 860 #define KS_ADDRS_COUNT 16 861 struct kmem_obj_info { 862 void *kp_ptr; 863 struct slab *kp_slab; 864 void *kp_objp; 865 unsigned long kp_data_offset; 866 struct kmem_cache *kp_slab_cache; 867 void *kp_ret; 868 void *kp_stack[KS_ADDRS_COUNT]; 869 void *kp_free_stack[KS_ADDRS_COUNT]; 870 }; 871 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 872 #endif 873 874 void __check_heap_object(const void *ptr, unsigned long n, 875 const struct slab *slab, bool to_user); 876 877 #ifdef CONFIG_SLUB_DEBUG 878 void skip_orig_size_check(struct kmem_cache *s, const void *object); 879 #endif 880 881 #endif /* MM_SLAB_H */ 882