1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 void __init kmem_cache_init(void); 8 9 #ifdef CONFIG_64BIT 10 # ifdef system_has_cmpxchg128 11 # define system_has_freelist_aba() system_has_cmpxchg128() 12 # define try_cmpxchg_freelist try_cmpxchg128 13 # endif 14 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 15 typedef u128 freelist_full_t; 16 #else /* CONFIG_64BIT */ 17 # ifdef system_has_cmpxchg64 18 # define system_has_freelist_aba() system_has_cmpxchg64() 19 # define try_cmpxchg_freelist try_cmpxchg64 20 # endif 21 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 22 typedef u64 freelist_full_t; 23 #endif /* CONFIG_64BIT */ 24 25 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 26 #undef system_has_freelist_aba 27 #endif 28 29 /* 30 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA 31 * problems with cmpxchg of just a pointer. 32 */ 33 typedef union { 34 struct { 35 void *freelist; 36 unsigned long counter; 37 }; 38 freelist_full_t full; 39 } freelist_aba_t; 40 41 /* Reuses the bits in struct page */ 42 struct slab { 43 unsigned long __page_flags; 44 45 #if defined(CONFIG_SLAB) 46 47 struct kmem_cache *slab_cache; 48 union { 49 struct { 50 struct list_head slab_list; 51 void *freelist; /* array of free object indexes */ 52 void *s_mem; /* first object */ 53 }; 54 struct rcu_head rcu_head; 55 }; 56 unsigned int active; 57 58 #elif defined(CONFIG_SLUB) 59 60 struct kmem_cache *slab_cache; 61 union { 62 struct { 63 union { 64 struct list_head slab_list; 65 #ifdef CONFIG_SLUB_CPU_PARTIAL 66 struct { 67 struct slab *next; 68 int slabs; /* Nr of slabs left */ 69 }; 70 #endif 71 }; 72 /* Double-word boundary */ 73 union { 74 struct { 75 void *freelist; /* first free object */ 76 union { 77 unsigned long counters; 78 struct { 79 unsigned inuse:16; 80 unsigned objects:15; 81 unsigned frozen:1; 82 }; 83 }; 84 }; 85 #ifdef system_has_freelist_aba 86 freelist_aba_t freelist_counter; 87 #endif 88 }; 89 }; 90 struct rcu_head rcu_head; 91 }; 92 unsigned int __unused; 93 94 #else 95 #error "Unexpected slab allocator configured" 96 #endif 97 98 atomic_t __page_refcount; 99 #ifdef CONFIG_MEMCG 100 unsigned long memcg_data; 101 #endif 102 }; 103 104 #define SLAB_MATCH(pg, sl) \ 105 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) 106 SLAB_MATCH(flags, __page_flags); 107 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ 108 SLAB_MATCH(_refcount, __page_refcount); 109 #ifdef CONFIG_MEMCG 110 SLAB_MATCH(memcg_data, memcg_data); 111 #endif 112 #undef SLAB_MATCH 113 static_assert(sizeof(struct slab) <= sizeof(struct page)); 114 #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) 115 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); 116 #endif 117 118 /** 119 * folio_slab - Converts from folio to slab. 120 * @folio: The folio. 121 * 122 * Currently struct slab is a different representation of a folio where 123 * folio_test_slab() is true. 124 * 125 * Return: The slab which contains this folio. 126 */ 127 #define folio_slab(folio) (_Generic((folio), \ 128 const struct folio *: (const struct slab *)(folio), \ 129 struct folio *: (struct slab *)(folio))) 130 131 /** 132 * slab_folio - The folio allocated for a slab 133 * @slab: The slab. 134 * 135 * Slabs are allocated as folios that contain the individual objects and are 136 * using some fields in the first struct page of the folio - those fields are 137 * now accessed by struct slab. It is occasionally necessary to convert back to 138 * a folio in order to communicate with the rest of the mm. Please use this 139 * helper function instead of casting yourself, as the implementation may change 140 * in the future. 141 */ 142 #define slab_folio(s) (_Generic((s), \ 143 const struct slab *: (const struct folio *)s, \ 144 struct slab *: (struct folio *)s)) 145 146 /** 147 * page_slab - Converts from first struct page to slab. 148 * @p: The first (either head of compound or single) page of slab. 149 * 150 * A temporary wrapper to convert struct page to struct slab in situations where 151 * we know the page is the compound head, or single order-0 page. 152 * 153 * Long-term ideally everything would work with struct slab directly or go 154 * through folio to struct slab. 155 * 156 * Return: The slab which contains this page 157 */ 158 #define page_slab(p) (_Generic((p), \ 159 const struct page *: (const struct slab *)(p), \ 160 struct page *: (struct slab *)(p))) 161 162 /** 163 * slab_page - The first struct page allocated for a slab 164 * @slab: The slab. 165 * 166 * A convenience wrapper for converting slab to the first struct page of the 167 * underlying folio, to communicate with code not yet converted to folio or 168 * struct slab. 169 */ 170 #define slab_page(s) folio_page(slab_folio(s), 0) 171 172 /* 173 * If network-based swap is enabled, sl*b must keep track of whether pages 174 * were allocated from pfmemalloc reserves. 175 */ 176 static inline bool slab_test_pfmemalloc(const struct slab *slab) 177 { 178 return folio_test_active((struct folio *)slab_folio(slab)); 179 } 180 181 static inline void slab_set_pfmemalloc(struct slab *slab) 182 { 183 folio_set_active(slab_folio(slab)); 184 } 185 186 static inline void slab_clear_pfmemalloc(struct slab *slab) 187 { 188 folio_clear_active(slab_folio(slab)); 189 } 190 191 static inline void __slab_clear_pfmemalloc(struct slab *slab) 192 { 193 __folio_clear_active(slab_folio(slab)); 194 } 195 196 static inline void *slab_address(const struct slab *slab) 197 { 198 return folio_address(slab_folio(slab)); 199 } 200 201 static inline int slab_nid(const struct slab *slab) 202 { 203 return folio_nid(slab_folio(slab)); 204 } 205 206 static inline pg_data_t *slab_pgdat(const struct slab *slab) 207 { 208 return folio_pgdat(slab_folio(slab)); 209 } 210 211 static inline struct slab *virt_to_slab(const void *addr) 212 { 213 struct folio *folio = virt_to_folio(addr); 214 215 if (!folio_test_slab(folio)) 216 return NULL; 217 218 return folio_slab(folio); 219 } 220 221 static inline int slab_order(const struct slab *slab) 222 { 223 return folio_order((struct folio *)slab_folio(slab)); 224 } 225 226 static inline size_t slab_size(const struct slab *slab) 227 { 228 return PAGE_SIZE << slab_order(slab); 229 } 230 231 #ifdef CONFIG_SLAB 232 #include <linux/slab_def.h> 233 #endif 234 235 #ifdef CONFIG_SLUB 236 #include <linux/slub_def.h> 237 #endif 238 239 #include <linux/memcontrol.h> 240 #include <linux/fault-inject.h> 241 #include <linux/kasan.h> 242 #include <linux/kmemleak.h> 243 #include <linux/random.h> 244 #include <linux/sched/mm.h> 245 #include <linux/list_lru.h> 246 247 /* 248 * State of the slab allocator. 249 * 250 * This is used to describe the states of the allocator during bootup. 251 * Allocators use this to gradually bootstrap themselves. Most allocators 252 * have the problem that the structures used for managing slab caches are 253 * allocated from slab caches themselves. 254 */ 255 enum slab_state { 256 DOWN, /* No slab functionality yet */ 257 PARTIAL, /* SLUB: kmem_cache_node available */ 258 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 259 UP, /* Slab caches usable but not all extras yet */ 260 FULL /* Everything is working */ 261 }; 262 263 extern enum slab_state slab_state; 264 265 /* The slab cache mutex protects the management structures during changes */ 266 extern struct mutex slab_mutex; 267 268 /* The list of all slab caches on the system */ 269 extern struct list_head slab_caches; 270 271 /* The slab cache that manages slab cache information */ 272 extern struct kmem_cache *kmem_cache; 273 274 /* A table of kmalloc cache names and sizes */ 275 extern const struct kmalloc_info_struct { 276 const char *name[NR_KMALLOC_TYPES]; 277 unsigned int size; 278 } kmalloc_info[]; 279 280 /* Kmalloc array related functions */ 281 void setup_kmalloc_cache_index_table(void); 282 void create_kmalloc_caches(slab_flags_t); 283 284 /* Find the kmalloc slab corresponding for a certain size */ 285 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 286 287 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, 288 int node, size_t orig_size, 289 unsigned long caller); 290 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); 291 292 gfp_t kmalloc_fix_flags(gfp_t flags); 293 294 /* Functions provided by the slab allocators */ 295 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 296 297 void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, 298 slab_flags_t flags); 299 extern void create_boot_cache(struct kmem_cache *, const char *name, 300 unsigned int size, slab_flags_t flags, 301 unsigned int useroffset, unsigned int usersize); 302 303 int slab_unmergeable(struct kmem_cache *s); 304 struct kmem_cache *find_mergeable(unsigned size, unsigned align, 305 slab_flags_t flags, const char *name, void (*ctor)(void *)); 306 struct kmem_cache * 307 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 308 slab_flags_t flags, void (*ctor)(void *)); 309 310 slab_flags_t kmem_cache_flags(unsigned int object_size, 311 slab_flags_t flags, const char *name); 312 313 static inline bool is_kmalloc_cache(struct kmem_cache *s) 314 { 315 return (s->flags & SLAB_KMALLOC); 316 } 317 318 /* Legal flag mask for kmem_cache_create(), for various configurations */ 319 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ 320 SLAB_CACHE_DMA32 | SLAB_PANIC | \ 321 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 322 323 #if defined(CONFIG_DEBUG_SLAB) 324 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 325 #elif defined(CONFIG_SLUB_DEBUG) 326 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 327 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 328 #else 329 #define SLAB_DEBUG_FLAGS (0) 330 #endif 331 332 #if defined(CONFIG_SLAB) 333 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 334 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 335 SLAB_ACCOUNT) 336 #elif defined(CONFIG_SLUB) 337 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 338 SLAB_TEMPORARY | SLAB_ACCOUNT | \ 339 SLAB_NO_USER_FLAGS | SLAB_KMALLOC) 340 #else 341 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) 342 #endif 343 344 /* Common flags available with current configuration */ 345 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 346 347 /* Common flags permitted for kmem_cache_create */ 348 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 349 SLAB_RED_ZONE | \ 350 SLAB_POISON | \ 351 SLAB_STORE_USER | \ 352 SLAB_TRACE | \ 353 SLAB_CONSISTENCY_CHECKS | \ 354 SLAB_MEM_SPREAD | \ 355 SLAB_NOLEAKTRACE | \ 356 SLAB_RECLAIM_ACCOUNT | \ 357 SLAB_TEMPORARY | \ 358 SLAB_ACCOUNT | \ 359 SLAB_KMALLOC | \ 360 SLAB_NO_USER_FLAGS) 361 362 bool __kmem_cache_empty(struct kmem_cache *); 363 int __kmem_cache_shutdown(struct kmem_cache *); 364 void __kmem_cache_release(struct kmem_cache *); 365 int __kmem_cache_shrink(struct kmem_cache *); 366 void slab_kmem_cache_release(struct kmem_cache *); 367 368 struct seq_file; 369 struct file; 370 371 struct slabinfo { 372 unsigned long active_objs; 373 unsigned long num_objs; 374 unsigned long active_slabs; 375 unsigned long num_slabs; 376 unsigned long shared_avail; 377 unsigned int limit; 378 unsigned int batchcount; 379 unsigned int shared; 380 unsigned int objects_per_slab; 381 unsigned int cache_order; 382 }; 383 384 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 385 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 386 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 387 size_t count, loff_t *ppos); 388 389 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) 390 { 391 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? 392 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; 393 } 394 395 #ifdef CONFIG_SLUB_DEBUG 396 #ifdef CONFIG_SLUB_DEBUG_ON 397 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); 398 #else 399 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); 400 #endif 401 extern void print_tracking(struct kmem_cache *s, void *object); 402 long validate_slab_cache(struct kmem_cache *s); 403 static inline bool __slub_debug_enabled(void) 404 { 405 return static_branch_unlikely(&slub_debug_enabled); 406 } 407 #else 408 static inline void print_tracking(struct kmem_cache *s, void *object) 409 { 410 } 411 static inline bool __slub_debug_enabled(void) 412 { 413 return false; 414 } 415 #endif 416 417 /* 418 * Returns true if any of the specified slub_debug flags is enabled for the 419 * cache. Use only for flags parsed by setup_slub_debug() as it also enables 420 * the static key. 421 */ 422 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) 423 { 424 if (IS_ENABLED(CONFIG_SLUB_DEBUG)) 425 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); 426 if (__slub_debug_enabled()) 427 return s->flags & flags; 428 return false; 429 } 430 431 #ifdef CONFIG_MEMCG_KMEM 432 /* 433 * slab_objcgs - get the object cgroups vector associated with a slab 434 * @slab: a pointer to the slab struct 435 * 436 * Returns a pointer to the object cgroups vector associated with the slab, 437 * or NULL if no such vector has been associated yet. 438 */ 439 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 440 { 441 unsigned long memcg_data = READ_ONCE(slab->memcg_data); 442 443 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), 444 slab_page(slab)); 445 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); 446 447 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 448 } 449 450 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, 451 gfp_t gfp, bool new_slab); 452 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 453 enum node_stat_item idx, int nr); 454 455 static inline void memcg_free_slab_cgroups(struct slab *slab) 456 { 457 kfree(slab_objcgs(slab)); 458 slab->memcg_data = 0; 459 } 460 461 static inline size_t obj_full_size(struct kmem_cache *s) 462 { 463 /* 464 * For each accounted object there is an extra space which is used 465 * to store obj_cgroup membership. Charge it too. 466 */ 467 return s->size + sizeof(struct obj_cgroup *); 468 } 469 470 /* 471 * Returns false if the allocation should fail. 472 */ 473 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 474 struct list_lru *lru, 475 struct obj_cgroup **objcgp, 476 size_t objects, gfp_t flags) 477 { 478 struct obj_cgroup *objcg; 479 480 if (!memcg_kmem_online()) 481 return true; 482 483 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) 484 return true; 485 486 objcg = get_obj_cgroup_from_current(); 487 if (!objcg) 488 return true; 489 490 if (lru) { 491 int ret; 492 struct mem_cgroup *memcg; 493 494 memcg = get_mem_cgroup_from_objcg(objcg); 495 ret = memcg_list_lru_alloc(memcg, lru, flags); 496 css_put(&memcg->css); 497 498 if (ret) 499 goto out; 500 } 501 502 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) 503 goto out; 504 505 *objcgp = objcg; 506 return true; 507 out: 508 obj_cgroup_put(objcg); 509 return false; 510 } 511 512 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 513 struct obj_cgroup *objcg, 514 gfp_t flags, size_t size, 515 void **p) 516 { 517 struct slab *slab; 518 unsigned long off; 519 size_t i; 520 521 if (!memcg_kmem_online() || !objcg) 522 return; 523 524 for (i = 0; i < size; i++) { 525 if (likely(p[i])) { 526 slab = virt_to_slab(p[i]); 527 528 if (!slab_objcgs(slab) && 529 memcg_alloc_slab_cgroups(slab, s, flags, 530 false)) { 531 obj_cgroup_uncharge(objcg, obj_full_size(s)); 532 continue; 533 } 534 535 off = obj_to_index(s, slab, p[i]); 536 obj_cgroup_get(objcg); 537 slab_objcgs(slab)[off] = objcg; 538 mod_objcg_state(objcg, slab_pgdat(slab), 539 cache_vmstat_idx(s), obj_full_size(s)); 540 } else { 541 obj_cgroup_uncharge(objcg, obj_full_size(s)); 542 } 543 } 544 obj_cgroup_put(objcg); 545 } 546 547 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 548 void **p, int objects) 549 { 550 struct obj_cgroup **objcgs; 551 int i; 552 553 if (!memcg_kmem_online()) 554 return; 555 556 objcgs = slab_objcgs(slab); 557 if (!objcgs) 558 return; 559 560 for (i = 0; i < objects; i++) { 561 struct obj_cgroup *objcg; 562 unsigned int off; 563 564 off = obj_to_index(s, slab, p[i]); 565 objcg = objcgs[off]; 566 if (!objcg) 567 continue; 568 569 objcgs[off] = NULL; 570 obj_cgroup_uncharge(objcg, obj_full_size(s)); 571 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), 572 -obj_full_size(s)); 573 obj_cgroup_put(objcg); 574 } 575 } 576 577 #else /* CONFIG_MEMCG_KMEM */ 578 static inline struct obj_cgroup **slab_objcgs(struct slab *slab) 579 { 580 return NULL; 581 } 582 583 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) 584 { 585 return NULL; 586 } 587 588 static inline int memcg_alloc_slab_cgroups(struct slab *slab, 589 struct kmem_cache *s, gfp_t gfp, 590 bool new_slab) 591 { 592 return 0; 593 } 594 595 static inline void memcg_free_slab_cgroups(struct slab *slab) 596 { 597 } 598 599 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, 600 struct list_lru *lru, 601 struct obj_cgroup **objcgp, 602 size_t objects, gfp_t flags) 603 { 604 return true; 605 } 606 607 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, 608 struct obj_cgroup *objcg, 609 gfp_t flags, size_t size, 610 void **p) 611 { 612 } 613 614 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 615 void **p, int objects) 616 { 617 } 618 #endif /* CONFIG_MEMCG_KMEM */ 619 620 static inline struct kmem_cache *virt_to_cache(const void *obj) 621 { 622 struct slab *slab; 623 624 slab = virt_to_slab(obj); 625 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", 626 __func__)) 627 return NULL; 628 return slab->slab_cache; 629 } 630 631 static __always_inline void account_slab(struct slab *slab, int order, 632 struct kmem_cache *s, gfp_t gfp) 633 { 634 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 635 memcg_alloc_slab_cgroups(slab, s, gfp, true); 636 637 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 638 PAGE_SIZE << order); 639 } 640 641 static __always_inline void unaccount_slab(struct slab *slab, int order, 642 struct kmem_cache *s) 643 { 644 if (memcg_kmem_online()) 645 memcg_free_slab_cgroups(slab); 646 647 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 648 -(PAGE_SIZE << order)); 649 } 650 651 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 652 { 653 struct kmem_cache *cachep; 654 655 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 656 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 657 return s; 658 659 cachep = virt_to_cache(x); 660 if (WARN(cachep && cachep != s, 661 "%s: Wrong slab cache. %s but object is from %s\n", 662 __func__, s->name, cachep->name)) 663 print_tracking(cachep, x); 664 return cachep; 665 } 666 667 void free_large_kmalloc(struct folio *folio, void *object); 668 669 size_t __ksize(const void *objp); 670 671 static inline size_t slab_ksize(const struct kmem_cache *s) 672 { 673 #ifndef CONFIG_SLUB 674 return s->object_size; 675 676 #else /* CONFIG_SLUB */ 677 # ifdef CONFIG_SLUB_DEBUG 678 /* 679 * Debugging requires use of the padding between object 680 * and whatever may come after it. 681 */ 682 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 683 return s->object_size; 684 # endif 685 if (s->flags & SLAB_KASAN) 686 return s->object_size; 687 /* 688 * If we have the need to store the freelist pointer 689 * back there or track user information then we can 690 * only use the space before that information. 691 */ 692 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 693 return s->inuse; 694 /* 695 * Else we can use all the padding etc for the allocation 696 */ 697 return s->size; 698 #endif 699 } 700 701 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 702 struct list_lru *lru, 703 struct obj_cgroup **objcgp, 704 size_t size, gfp_t flags) 705 { 706 flags &= gfp_allowed_mask; 707 708 might_alloc(flags); 709 710 if (should_failslab(s, flags)) 711 return NULL; 712 713 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) 714 return NULL; 715 716 return s; 717 } 718 719 static inline void slab_post_alloc_hook(struct kmem_cache *s, 720 struct obj_cgroup *objcg, gfp_t flags, 721 size_t size, void **p, bool init, 722 unsigned int orig_size) 723 { 724 unsigned int zero_size = s->object_size; 725 size_t i; 726 727 flags &= gfp_allowed_mask; 728 729 /* 730 * For kmalloc object, the allocated memory size(object_size) is likely 731 * larger than the requested size(orig_size). If redzone check is 732 * enabled for the extra space, don't zero it, as it will be redzoned 733 * soon. The redzone operation for this extra space could be seen as a 734 * replacement of current poisoning under certain debug option, and 735 * won't break other sanity checks. 736 */ 737 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 738 (s->flags & SLAB_KMALLOC)) 739 zero_size = orig_size; 740 741 /* 742 * As memory initialization might be integrated into KASAN, 743 * kasan_slab_alloc and initialization memset must be 744 * kept together to avoid discrepancies in behavior. 745 * 746 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 747 */ 748 for (i = 0; i < size; i++) { 749 p[i] = kasan_slab_alloc(s, p[i], flags, init); 750 if (p[i] && init && !kasan_has_integrated_init()) 751 memset(p[i], 0, zero_size); 752 kmemleak_alloc_recursive(p[i], s->object_size, 1, 753 s->flags, flags); 754 kmsan_slab_alloc(s, p[i], flags); 755 } 756 757 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); 758 } 759 760 /* 761 * The slab lists for all objects. 762 */ 763 struct kmem_cache_node { 764 #ifdef CONFIG_SLAB 765 raw_spinlock_t list_lock; 766 struct list_head slabs_partial; /* partial list first, better asm code */ 767 struct list_head slabs_full; 768 struct list_head slabs_free; 769 unsigned long total_slabs; /* length of all slab lists */ 770 unsigned long free_slabs; /* length of free slab list only */ 771 unsigned long free_objects; 772 unsigned int free_limit; 773 unsigned int colour_next; /* Per-node cache coloring */ 774 struct array_cache *shared; /* shared per node */ 775 struct alien_cache **alien; /* on other nodes */ 776 unsigned long next_reap; /* updated without locking */ 777 int free_touched; /* updated without locking */ 778 #endif 779 780 #ifdef CONFIG_SLUB 781 spinlock_t list_lock; 782 unsigned long nr_partial; 783 struct list_head partial; 784 #ifdef CONFIG_SLUB_DEBUG 785 atomic_long_t nr_slabs; 786 atomic_long_t total_objects; 787 struct list_head full; 788 #endif 789 #endif 790 791 }; 792 793 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 794 { 795 return s->node[node]; 796 } 797 798 /* 799 * Iterator over all nodes. The body will be executed for each node that has 800 * a kmem_cache_node structure allocated (which is true for all online nodes) 801 */ 802 #define for_each_kmem_cache_node(__s, __node, __n) \ 803 for (__node = 0; __node < nr_node_ids; __node++) \ 804 if ((__n = get_node(__s, __node))) 805 806 807 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 808 void dump_unreclaimable_slab(void); 809 #else 810 static inline void dump_unreclaimable_slab(void) 811 { 812 } 813 #endif 814 815 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 816 817 #ifdef CONFIG_SLAB_FREELIST_RANDOM 818 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 819 gfp_t gfp); 820 void cache_random_seq_destroy(struct kmem_cache *cachep); 821 #else 822 static inline int cache_random_seq_create(struct kmem_cache *cachep, 823 unsigned int count, gfp_t gfp) 824 { 825 return 0; 826 } 827 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 828 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 829 830 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) 831 { 832 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 833 &init_on_alloc)) { 834 if (c->ctor) 835 return false; 836 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) 837 return flags & __GFP_ZERO; 838 return true; 839 } 840 return flags & __GFP_ZERO; 841 } 842 843 static inline bool slab_want_init_on_free(struct kmem_cache *c) 844 { 845 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 846 &init_on_free)) 847 return !(c->ctor || 848 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); 849 return false; 850 } 851 852 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 853 void debugfs_slab_release(struct kmem_cache *); 854 #else 855 static inline void debugfs_slab_release(struct kmem_cache *s) { } 856 #endif 857 858 #ifdef CONFIG_PRINTK 859 #define KS_ADDRS_COUNT 16 860 struct kmem_obj_info { 861 void *kp_ptr; 862 struct slab *kp_slab; 863 void *kp_objp; 864 unsigned long kp_data_offset; 865 struct kmem_cache *kp_slab_cache; 866 void *kp_ret; 867 void *kp_stack[KS_ADDRS_COUNT]; 868 void *kp_free_stack[KS_ADDRS_COUNT]; 869 }; 870 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); 871 #endif 872 873 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 874 void __check_heap_object(const void *ptr, unsigned long n, 875 const struct slab *slab, bool to_user); 876 #else 877 static inline 878 void __check_heap_object(const void *ptr, unsigned long n, 879 const struct slab *slab, bool to_user) 880 { 881 } 882 #endif 883 884 #ifdef CONFIG_SLUB_DEBUG 885 void skip_orig_size_check(struct kmem_cache *s, const void *object); 886 #endif 887 888 #endif /* MM_SLAB_H */ 889