1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/swap.h> /* struct reclaim_state */ 13 #include <linux/module.h> 14 #include <linux/bit_spinlock.h> 15 #include <linux/interrupt.h> 16 #include <linux/bitops.h> 17 #include <linux/slab.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/kmemcheck.h> 21 #include <linux/cpu.h> 22 #include <linux/cpuset.h> 23 #include <linux/mempolicy.h> 24 #include <linux/ctype.h> 25 #include <linux/debugobjects.h> 26 #include <linux/kallsyms.h> 27 #include <linux/memory.h> 28 #include <linux/math64.h> 29 #include <linux/fault-inject.h> 30 31 /* 32 * Lock order: 33 * 1. slab_lock(page) 34 * 2. slab->list_lock 35 * 36 * The slab_lock protects operations on the object of a particular 37 * slab and its metadata in the page struct. If the slab lock 38 * has been taken then no allocations nor frees can be performed 39 * on the objects in the slab nor can the slab be added or removed 40 * from the partial or full lists since this would mean modifying 41 * the page_struct of the slab. 42 * 43 * The list_lock protects the partial and full list on each node and 44 * the partial slab counter. If taken then no new slabs may be added or 45 * removed from the lists nor make the number of partial slabs be modified. 46 * (Note that the total number of slabs is an atomic value that may be 47 * modified without taking the list lock). 48 * 49 * The list_lock is a centralized lock and thus we avoid taking it as 50 * much as possible. As long as SLUB does not have to handle partial 51 * slabs, operations can continue without any centralized lock. F.e. 52 * allocating a long series of objects that fill up slabs does not require 53 * the list lock. 54 * 55 * The lock order is sometimes inverted when we are trying to get a slab 56 * off a list. We take the list_lock and then look for a page on the list 57 * to use. While we do that objects in the slabs may be freed. We can 58 * only operate on the slab if we have also taken the slab_lock. So we use 59 * a slab_trylock() on the slab. If trylock was successful then no frees 60 * can occur anymore and we can use the slab for allocations etc. If the 61 * slab_trylock() does not succeed then frees are in progress in the slab and 62 * we must stay away from it for a while since we may cause a bouncing 63 * cacheline if we try to acquire the lock. So go onto the next slab. 64 * If all pages are busy then we may allocate a new slab instead of reusing 65 * a partial slab. A new slab has noone operating on it and thus there is 66 * no danger of cacheline contention. 67 * 68 * Interrupts are disabled during allocation and deallocation in order to 69 * make the slab allocator safe to use in the context of an irq. In addition 70 * interrupts are disabled to ensure that the processor does not change 71 * while handling per_cpu slabs, due to kernel preemption. 72 * 73 * SLUB assigns one slab for allocation to each processor. 74 * Allocations only occur from these slabs called cpu slabs. 75 * 76 * Slabs with free elements are kept on a partial list and during regular 77 * operations no list for full slabs is used. If an object in a full slab is 78 * freed then the slab will show up again on the partial lists. 79 * We track full slabs for debugging purposes though because otherwise we 80 * cannot scan all objects. 81 * 82 * Slabs are freed when they become empty. Teardown and setup is 83 * minimal so we rely on the page allocators per cpu caches for 84 * fast frees and allocs. 85 * 86 * Overloading of page flags that are otherwise used for LRU management. 87 * 88 * PageActive The slab is frozen and exempt from list processing. 89 * This means that the slab is dedicated to a purpose 90 * such as satisfying allocations for a specific 91 * processor. Objects may be freed in the slab while 92 * it is frozen but slab_free will then skip the usual 93 * list operations. It is up to the processor holding 94 * the slab to integrate the slab into the slab lists 95 * when the slab is no longer needed. 96 * 97 * One use of this flag is to mark slabs that are 98 * used for allocations. Then such a slab becomes a cpu 99 * slab. The cpu slab may be equipped with an additional 100 * freelist that allows lockless access to 101 * free objects in addition to the regular freelist 102 * that requires the slab lock. 103 * 104 * PageError Slab requires special handling due to debug 105 * options set. This moves slab handling out of 106 * the fast path and disables lockless freelists. 107 */ 108 109 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 110 SLAB_TRACE | SLAB_DEBUG_FREE) 111 112 static inline int kmem_cache_debug(struct kmem_cache *s) 113 { 114 #ifdef CONFIG_SLUB_DEBUG 115 return unlikely(s->flags & SLAB_DEBUG_FLAGS); 116 #else 117 return 0; 118 #endif 119 } 120 121 /* 122 * Issues still to be resolved: 123 * 124 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 125 * 126 * - Variable sizing of the per node arrays 127 */ 128 129 /* Enable to test recovery from slab corruption on boot */ 130 #undef SLUB_RESILIENCY_TEST 131 132 /* 133 * Mininum number of partial slabs. These will be left on the partial 134 * lists even if they are empty. kmem_cache_shrink may reclaim them. 135 */ 136 #define MIN_PARTIAL 5 137 138 /* 139 * Maximum number of desirable partial slabs. 140 * The existence of more partial slabs makes kmem_cache_shrink 141 * sort the partial list by the number of objects in the. 142 */ 143 #define MAX_PARTIAL 10 144 145 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 146 SLAB_POISON | SLAB_STORE_USER) 147 148 /* 149 * Debugging flags that require metadata to be stored in the slab. These get 150 * disabled when slub_debug=O is used and a cache's min order increases with 151 * metadata. 152 */ 153 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 154 155 /* 156 * Set of flags that will prevent slab merging 157 */ 158 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 159 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 160 SLAB_FAILSLAB) 161 162 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 163 SLAB_CACHE_DMA | SLAB_NOTRACK) 164 165 #define OO_SHIFT 16 166 #define OO_MASK ((1 << OO_SHIFT) - 1) 167 #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 168 169 /* Internal SLUB flags */ 170 #define __OBJECT_POISON 0x80000000UL /* Poison object */ 171 172 static int kmem_size = sizeof(struct kmem_cache); 173 174 #ifdef CONFIG_SMP 175 static struct notifier_block slab_notifier; 176 #endif 177 178 static enum { 179 DOWN, /* No slab functionality available */ 180 PARTIAL, /* Kmem_cache_node works */ 181 UP, /* Everything works but does not show up in sysfs */ 182 SYSFS /* Sysfs up */ 183 } slab_state = DOWN; 184 185 /* A list of all slab caches on the system */ 186 static DECLARE_RWSEM(slub_lock); 187 static LIST_HEAD(slab_caches); 188 189 /* 190 * Tracking user of a slab. 191 */ 192 struct track { 193 unsigned long addr; /* Called from address */ 194 int cpu; /* Was running on cpu */ 195 int pid; /* Pid context */ 196 unsigned long when; /* When did the operation occur */ 197 }; 198 199 enum track_item { TRACK_ALLOC, TRACK_FREE }; 200 201 #ifdef CONFIG_SYSFS 202 static int sysfs_slab_add(struct kmem_cache *); 203 static int sysfs_slab_alias(struct kmem_cache *, const char *); 204 static void sysfs_slab_remove(struct kmem_cache *); 205 206 #else 207 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 208 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 209 { return 0; } 210 static inline void sysfs_slab_remove(struct kmem_cache *s) 211 { 212 kfree(s->name); 213 kfree(s); 214 } 215 216 #endif 217 218 static inline void stat(struct kmem_cache *s, enum stat_item si) 219 { 220 #ifdef CONFIG_SLUB_STATS 221 __this_cpu_inc(s->cpu_slab->stat[si]); 222 #endif 223 } 224 225 /******************************************************************** 226 * Core slab cache functions 227 *******************************************************************/ 228 229 int slab_is_available(void) 230 { 231 return slab_state >= UP; 232 } 233 234 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 235 { 236 return s->node[node]; 237 } 238 239 /* Verify that a pointer has an address that is valid within a slab page */ 240 static inline int check_valid_pointer(struct kmem_cache *s, 241 struct page *page, const void *object) 242 { 243 void *base; 244 245 if (!object) 246 return 1; 247 248 base = page_address(page); 249 if (object < base || object >= base + page->objects * s->size || 250 (object - base) % s->size) { 251 return 0; 252 } 253 254 return 1; 255 } 256 257 static inline void *get_freepointer(struct kmem_cache *s, void *object) 258 { 259 return *(void **)(object + s->offset); 260 } 261 262 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 263 { 264 *(void **)(object + s->offset) = fp; 265 } 266 267 /* Loop over all objects in a slab */ 268 #define for_each_object(__p, __s, __addr, __objects) \ 269 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 270 __p += (__s)->size) 271 272 /* Scan freelist */ 273 #define for_each_free_object(__p, __s, __free) \ 274 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 275 276 /* Determine object index from a given position */ 277 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 278 { 279 return (p - addr) / s->size; 280 } 281 282 static inline struct kmem_cache_order_objects oo_make(int order, 283 unsigned long size) 284 { 285 struct kmem_cache_order_objects x = { 286 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 287 }; 288 289 return x; 290 } 291 292 static inline int oo_order(struct kmem_cache_order_objects x) 293 { 294 return x.x >> OO_SHIFT; 295 } 296 297 static inline int oo_objects(struct kmem_cache_order_objects x) 298 { 299 return x.x & OO_MASK; 300 } 301 302 #ifdef CONFIG_SLUB_DEBUG 303 /* 304 * Debug settings: 305 */ 306 #ifdef CONFIG_SLUB_DEBUG_ON 307 static int slub_debug = DEBUG_DEFAULT_FLAGS; 308 #else 309 static int slub_debug; 310 #endif 311 312 static char *slub_debug_slabs; 313 static int disable_higher_order_debug; 314 315 /* 316 * Object debugging 317 */ 318 static void print_section(char *text, u8 *addr, unsigned int length) 319 { 320 int i, offset; 321 int newline = 1; 322 char ascii[17]; 323 324 ascii[16] = 0; 325 326 for (i = 0; i < length; i++) { 327 if (newline) { 328 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 329 newline = 0; 330 } 331 printk(KERN_CONT " %02x", addr[i]); 332 offset = i % 16; 333 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 334 if (offset == 15) { 335 printk(KERN_CONT " %s\n", ascii); 336 newline = 1; 337 } 338 } 339 if (!newline) { 340 i %= 16; 341 while (i < 16) { 342 printk(KERN_CONT " "); 343 ascii[i] = ' '; 344 i++; 345 } 346 printk(KERN_CONT " %s\n", ascii); 347 } 348 } 349 350 static struct track *get_track(struct kmem_cache *s, void *object, 351 enum track_item alloc) 352 { 353 struct track *p; 354 355 if (s->offset) 356 p = object + s->offset + sizeof(void *); 357 else 358 p = object + s->inuse; 359 360 return p + alloc; 361 } 362 363 static void set_track(struct kmem_cache *s, void *object, 364 enum track_item alloc, unsigned long addr) 365 { 366 struct track *p = get_track(s, object, alloc); 367 368 if (addr) { 369 p->addr = addr; 370 p->cpu = smp_processor_id(); 371 p->pid = current->pid; 372 p->when = jiffies; 373 } else 374 memset(p, 0, sizeof(struct track)); 375 } 376 377 static void init_tracking(struct kmem_cache *s, void *object) 378 { 379 if (!(s->flags & SLAB_STORE_USER)) 380 return; 381 382 set_track(s, object, TRACK_FREE, 0UL); 383 set_track(s, object, TRACK_ALLOC, 0UL); 384 } 385 386 static void print_track(const char *s, struct track *t) 387 { 388 if (!t->addr) 389 return; 390 391 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 392 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); 393 } 394 395 static void print_tracking(struct kmem_cache *s, void *object) 396 { 397 if (!(s->flags & SLAB_STORE_USER)) 398 return; 399 400 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 401 print_track("Freed", get_track(s, object, TRACK_FREE)); 402 } 403 404 static void print_page_info(struct page *page) 405 { 406 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 407 page, page->objects, page->inuse, page->freelist, page->flags); 408 409 } 410 411 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 412 { 413 va_list args; 414 char buf[100]; 415 416 va_start(args, fmt); 417 vsnprintf(buf, sizeof(buf), fmt, args); 418 va_end(args); 419 printk(KERN_ERR "========================================" 420 "=====================================\n"); 421 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 422 printk(KERN_ERR "----------------------------------------" 423 "-------------------------------------\n\n"); 424 } 425 426 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 427 { 428 va_list args; 429 char buf[100]; 430 431 va_start(args, fmt); 432 vsnprintf(buf, sizeof(buf), fmt, args); 433 va_end(args); 434 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 435 } 436 437 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 438 { 439 unsigned int off; /* Offset of last byte */ 440 u8 *addr = page_address(page); 441 442 print_tracking(s, p); 443 444 print_page_info(page); 445 446 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 447 p, p - addr, get_freepointer(s, p)); 448 449 if (p > addr + 16) 450 print_section("Bytes b4", p - 16, 16); 451 452 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); 453 454 if (s->flags & SLAB_RED_ZONE) 455 print_section("Redzone", p + s->objsize, 456 s->inuse - s->objsize); 457 458 if (s->offset) 459 off = s->offset + sizeof(void *); 460 else 461 off = s->inuse; 462 463 if (s->flags & SLAB_STORE_USER) 464 off += 2 * sizeof(struct track); 465 466 if (off != s->size) 467 /* Beginning of the filler is the free pointer */ 468 print_section("Padding", p + off, s->size - off); 469 470 dump_stack(); 471 } 472 473 static void object_err(struct kmem_cache *s, struct page *page, 474 u8 *object, char *reason) 475 { 476 slab_bug(s, "%s", reason); 477 print_trailer(s, page, object); 478 } 479 480 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 481 { 482 va_list args; 483 char buf[100]; 484 485 va_start(args, fmt); 486 vsnprintf(buf, sizeof(buf), fmt, args); 487 va_end(args); 488 slab_bug(s, "%s", buf); 489 print_page_info(page); 490 dump_stack(); 491 } 492 493 static void init_object(struct kmem_cache *s, void *object, u8 val) 494 { 495 u8 *p = object; 496 497 if (s->flags & __OBJECT_POISON) { 498 memset(p, POISON_FREE, s->objsize - 1); 499 p[s->objsize - 1] = POISON_END; 500 } 501 502 if (s->flags & SLAB_RED_ZONE) 503 memset(p + s->objsize, val, s->inuse - s->objsize); 504 } 505 506 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 507 { 508 while (bytes) { 509 if (*start != (u8)value) 510 return start; 511 start++; 512 bytes--; 513 } 514 return NULL; 515 } 516 517 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 518 void *from, void *to) 519 { 520 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 521 memset(from, data, to - from); 522 } 523 524 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 525 u8 *object, char *what, 526 u8 *start, unsigned int value, unsigned int bytes) 527 { 528 u8 *fault; 529 u8 *end; 530 531 fault = check_bytes(start, value, bytes); 532 if (!fault) 533 return 1; 534 535 end = start + bytes; 536 while (end > fault && end[-1] == value) 537 end--; 538 539 slab_bug(s, "%s overwritten", what); 540 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 541 fault, end - 1, fault[0], value); 542 print_trailer(s, page, object); 543 544 restore_bytes(s, what, value, fault, end); 545 return 0; 546 } 547 548 /* 549 * Object layout: 550 * 551 * object address 552 * Bytes of the object to be managed. 553 * If the freepointer may overlay the object then the free 554 * pointer is the first word of the object. 555 * 556 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 557 * 0xa5 (POISON_END) 558 * 559 * object + s->objsize 560 * Padding to reach word boundary. This is also used for Redzoning. 561 * Padding is extended by another word if Redzoning is enabled and 562 * objsize == inuse. 563 * 564 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 565 * 0xcc (RED_ACTIVE) for objects in use. 566 * 567 * object + s->inuse 568 * Meta data starts here. 569 * 570 * A. Free pointer (if we cannot overwrite object on free) 571 * B. Tracking data for SLAB_STORE_USER 572 * C. Padding to reach required alignment boundary or at mininum 573 * one word if debugging is on to be able to detect writes 574 * before the word boundary. 575 * 576 * Padding is done using 0x5a (POISON_INUSE) 577 * 578 * object + s->size 579 * Nothing is used beyond s->size. 580 * 581 * If slabcaches are merged then the objsize and inuse boundaries are mostly 582 * ignored. And therefore no slab options that rely on these boundaries 583 * may be used with merged slabcaches. 584 */ 585 586 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 587 { 588 unsigned long off = s->inuse; /* The end of info */ 589 590 if (s->offset) 591 /* Freepointer is placed after the object. */ 592 off += sizeof(void *); 593 594 if (s->flags & SLAB_STORE_USER) 595 /* We also have user information there */ 596 off += 2 * sizeof(struct track); 597 598 if (s->size == off) 599 return 1; 600 601 return check_bytes_and_report(s, page, p, "Object padding", 602 p + off, POISON_INUSE, s->size - off); 603 } 604 605 /* Check the pad bytes at the end of a slab page */ 606 static int slab_pad_check(struct kmem_cache *s, struct page *page) 607 { 608 u8 *start; 609 u8 *fault; 610 u8 *end; 611 int length; 612 int remainder; 613 614 if (!(s->flags & SLAB_POISON)) 615 return 1; 616 617 start = page_address(page); 618 length = (PAGE_SIZE << compound_order(page)); 619 end = start + length; 620 remainder = length % s->size; 621 if (!remainder) 622 return 1; 623 624 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 625 if (!fault) 626 return 1; 627 while (end > fault && end[-1] == POISON_INUSE) 628 end--; 629 630 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 631 print_section("Padding", end - remainder, remainder); 632 633 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 634 return 0; 635 } 636 637 static int check_object(struct kmem_cache *s, struct page *page, 638 void *object, u8 val) 639 { 640 u8 *p = object; 641 u8 *endobject = object + s->objsize; 642 643 if (s->flags & SLAB_RED_ZONE) { 644 if (!check_bytes_and_report(s, page, object, "Redzone", 645 endobject, val, s->inuse - s->objsize)) 646 return 0; 647 } else { 648 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 649 check_bytes_and_report(s, page, p, "Alignment padding", 650 endobject, POISON_INUSE, s->inuse - s->objsize); 651 } 652 } 653 654 if (s->flags & SLAB_POISON) { 655 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 656 (!check_bytes_and_report(s, page, p, "Poison", p, 657 POISON_FREE, s->objsize - 1) || 658 !check_bytes_and_report(s, page, p, "Poison", 659 p + s->objsize - 1, POISON_END, 1))) 660 return 0; 661 /* 662 * check_pad_bytes cleans up on its own. 663 */ 664 check_pad_bytes(s, page, p); 665 } 666 667 if (!s->offset && val == SLUB_RED_ACTIVE) 668 /* 669 * Object and freepointer overlap. Cannot check 670 * freepointer while object is allocated. 671 */ 672 return 1; 673 674 /* Check free pointer validity */ 675 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 676 object_err(s, page, p, "Freepointer corrupt"); 677 /* 678 * No choice but to zap it and thus lose the remainder 679 * of the free objects in this slab. May cause 680 * another error because the object count is now wrong. 681 */ 682 set_freepointer(s, p, NULL); 683 return 0; 684 } 685 return 1; 686 } 687 688 static int check_slab(struct kmem_cache *s, struct page *page) 689 { 690 int maxobj; 691 692 VM_BUG_ON(!irqs_disabled()); 693 694 if (!PageSlab(page)) { 695 slab_err(s, page, "Not a valid slab page"); 696 return 0; 697 } 698 699 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 700 if (page->objects > maxobj) { 701 slab_err(s, page, "objects %u > max %u", 702 s->name, page->objects, maxobj); 703 return 0; 704 } 705 if (page->inuse > page->objects) { 706 slab_err(s, page, "inuse %u > max %u", 707 s->name, page->inuse, page->objects); 708 return 0; 709 } 710 /* Slab_pad_check fixes things up after itself */ 711 slab_pad_check(s, page); 712 return 1; 713 } 714 715 /* 716 * Determine if a certain object on a page is on the freelist. Must hold the 717 * slab lock to guarantee that the chains are in a consistent state. 718 */ 719 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 720 { 721 int nr = 0; 722 void *fp = page->freelist; 723 void *object = NULL; 724 unsigned long max_objects; 725 726 while (fp && nr <= page->objects) { 727 if (fp == search) 728 return 1; 729 if (!check_valid_pointer(s, page, fp)) { 730 if (object) { 731 object_err(s, page, object, 732 "Freechain corrupt"); 733 set_freepointer(s, object, NULL); 734 break; 735 } else { 736 slab_err(s, page, "Freepointer corrupt"); 737 page->freelist = NULL; 738 page->inuse = page->objects; 739 slab_fix(s, "Freelist cleared"); 740 return 0; 741 } 742 break; 743 } 744 object = fp; 745 fp = get_freepointer(s, object); 746 nr++; 747 } 748 749 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 750 if (max_objects > MAX_OBJS_PER_PAGE) 751 max_objects = MAX_OBJS_PER_PAGE; 752 753 if (page->objects != max_objects) { 754 slab_err(s, page, "Wrong number of objects. Found %d but " 755 "should be %d", page->objects, max_objects); 756 page->objects = max_objects; 757 slab_fix(s, "Number of objects adjusted."); 758 } 759 if (page->inuse != page->objects - nr) { 760 slab_err(s, page, "Wrong object count. Counter is %d but " 761 "counted were %d", page->inuse, page->objects - nr); 762 page->inuse = page->objects - nr; 763 slab_fix(s, "Object count adjusted."); 764 } 765 return search == NULL; 766 } 767 768 static void trace(struct kmem_cache *s, struct page *page, void *object, 769 int alloc) 770 { 771 if (s->flags & SLAB_TRACE) { 772 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 773 s->name, 774 alloc ? "alloc" : "free", 775 object, page->inuse, 776 page->freelist); 777 778 if (!alloc) 779 print_section("Object", (void *)object, s->objsize); 780 781 dump_stack(); 782 } 783 } 784 785 /* 786 * Hooks for other subsystems that check memory allocations. In a typical 787 * production configuration these hooks all should produce no code at all. 788 */ 789 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 790 { 791 flags &= gfp_allowed_mask; 792 lockdep_trace_alloc(flags); 793 might_sleep_if(flags & __GFP_WAIT); 794 795 return should_failslab(s->objsize, flags, s->flags); 796 } 797 798 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 799 { 800 flags &= gfp_allowed_mask; 801 kmemcheck_slab_alloc(s, flags, object, s->objsize); 802 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 803 } 804 805 static inline void slab_free_hook(struct kmem_cache *s, void *x) 806 { 807 kmemleak_free_recursive(x, s->flags); 808 } 809 810 static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) 811 { 812 kmemcheck_slab_free(s, object, s->objsize); 813 debug_check_no_locks_freed(object, s->objsize); 814 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 815 debug_check_no_obj_freed(object, s->objsize); 816 } 817 818 /* 819 * Tracking of fully allocated slabs for debugging purposes. 820 */ 821 static void add_full(struct kmem_cache_node *n, struct page *page) 822 { 823 spin_lock(&n->list_lock); 824 list_add(&page->lru, &n->full); 825 spin_unlock(&n->list_lock); 826 } 827 828 static void remove_full(struct kmem_cache *s, struct page *page) 829 { 830 struct kmem_cache_node *n; 831 832 if (!(s->flags & SLAB_STORE_USER)) 833 return; 834 835 n = get_node(s, page_to_nid(page)); 836 837 spin_lock(&n->list_lock); 838 list_del(&page->lru); 839 spin_unlock(&n->list_lock); 840 } 841 842 /* Tracking of the number of slabs for debugging purposes */ 843 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 844 { 845 struct kmem_cache_node *n = get_node(s, node); 846 847 return atomic_long_read(&n->nr_slabs); 848 } 849 850 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 851 { 852 return atomic_long_read(&n->nr_slabs); 853 } 854 855 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 856 { 857 struct kmem_cache_node *n = get_node(s, node); 858 859 /* 860 * May be called early in order to allocate a slab for the 861 * kmem_cache_node structure. Solve the chicken-egg 862 * dilemma by deferring the increment of the count during 863 * bootstrap (see early_kmem_cache_node_alloc). 864 */ 865 if (n) { 866 atomic_long_inc(&n->nr_slabs); 867 atomic_long_add(objects, &n->total_objects); 868 } 869 } 870 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 871 { 872 struct kmem_cache_node *n = get_node(s, node); 873 874 atomic_long_dec(&n->nr_slabs); 875 atomic_long_sub(objects, &n->total_objects); 876 } 877 878 /* Object debug checks for alloc/free paths */ 879 static void setup_object_debug(struct kmem_cache *s, struct page *page, 880 void *object) 881 { 882 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 883 return; 884 885 init_object(s, object, SLUB_RED_INACTIVE); 886 init_tracking(s, object); 887 } 888 889 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 890 void *object, unsigned long addr) 891 { 892 if (!check_slab(s, page)) 893 goto bad; 894 895 if (!on_freelist(s, page, object)) { 896 object_err(s, page, object, "Object already allocated"); 897 goto bad; 898 } 899 900 if (!check_valid_pointer(s, page, object)) { 901 object_err(s, page, object, "Freelist Pointer check fails"); 902 goto bad; 903 } 904 905 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 906 goto bad; 907 908 /* Success perform special debug activities for allocs */ 909 if (s->flags & SLAB_STORE_USER) 910 set_track(s, object, TRACK_ALLOC, addr); 911 trace(s, page, object, 1); 912 init_object(s, object, SLUB_RED_ACTIVE); 913 return 1; 914 915 bad: 916 if (PageSlab(page)) { 917 /* 918 * If this is a slab page then lets do the best we can 919 * to avoid issues in the future. Marking all objects 920 * as used avoids touching the remaining objects. 921 */ 922 slab_fix(s, "Marking all objects used"); 923 page->inuse = page->objects; 924 page->freelist = NULL; 925 } 926 return 0; 927 } 928 929 static noinline int free_debug_processing(struct kmem_cache *s, 930 struct page *page, void *object, unsigned long addr) 931 { 932 if (!check_slab(s, page)) 933 goto fail; 934 935 if (!check_valid_pointer(s, page, object)) { 936 slab_err(s, page, "Invalid object pointer 0x%p", object); 937 goto fail; 938 } 939 940 if (on_freelist(s, page, object)) { 941 object_err(s, page, object, "Object already free"); 942 goto fail; 943 } 944 945 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 946 return 0; 947 948 if (unlikely(s != page->slab)) { 949 if (!PageSlab(page)) { 950 slab_err(s, page, "Attempt to free object(0x%p) " 951 "outside of slab", object); 952 } else if (!page->slab) { 953 printk(KERN_ERR 954 "SLUB <none>: no slab for object 0x%p.\n", 955 object); 956 dump_stack(); 957 } else 958 object_err(s, page, object, 959 "page slab pointer corrupt."); 960 goto fail; 961 } 962 963 /* Special debug activities for freeing objects */ 964 if (!PageSlubFrozen(page) && !page->freelist) 965 remove_full(s, page); 966 if (s->flags & SLAB_STORE_USER) 967 set_track(s, object, TRACK_FREE, addr); 968 trace(s, page, object, 0); 969 init_object(s, object, SLUB_RED_INACTIVE); 970 return 1; 971 972 fail: 973 slab_fix(s, "Object at 0x%p not freed", object); 974 return 0; 975 } 976 977 static int __init setup_slub_debug(char *str) 978 { 979 slub_debug = DEBUG_DEFAULT_FLAGS; 980 if (*str++ != '=' || !*str) 981 /* 982 * No options specified. Switch on full debugging. 983 */ 984 goto out; 985 986 if (*str == ',') 987 /* 988 * No options but restriction on slabs. This means full 989 * debugging for slabs matching a pattern. 990 */ 991 goto check_slabs; 992 993 if (tolower(*str) == 'o') { 994 /* 995 * Avoid enabling debugging on caches if its minimum order 996 * would increase as a result. 997 */ 998 disable_higher_order_debug = 1; 999 goto out; 1000 } 1001 1002 slub_debug = 0; 1003 if (*str == '-') 1004 /* 1005 * Switch off all debugging measures. 1006 */ 1007 goto out; 1008 1009 /* 1010 * Determine which debug features should be switched on 1011 */ 1012 for (; *str && *str != ','; str++) { 1013 switch (tolower(*str)) { 1014 case 'f': 1015 slub_debug |= SLAB_DEBUG_FREE; 1016 break; 1017 case 'z': 1018 slub_debug |= SLAB_RED_ZONE; 1019 break; 1020 case 'p': 1021 slub_debug |= SLAB_POISON; 1022 break; 1023 case 'u': 1024 slub_debug |= SLAB_STORE_USER; 1025 break; 1026 case 't': 1027 slub_debug |= SLAB_TRACE; 1028 break; 1029 case 'a': 1030 slub_debug |= SLAB_FAILSLAB; 1031 break; 1032 default: 1033 printk(KERN_ERR "slub_debug option '%c' " 1034 "unknown. skipped\n", *str); 1035 } 1036 } 1037 1038 check_slabs: 1039 if (*str == ',') 1040 slub_debug_slabs = str + 1; 1041 out: 1042 return 1; 1043 } 1044 1045 __setup("slub_debug", setup_slub_debug); 1046 1047 static unsigned long kmem_cache_flags(unsigned long objsize, 1048 unsigned long flags, const char *name, 1049 void (*ctor)(void *)) 1050 { 1051 /* 1052 * Enable debugging if selected on the kernel commandline. 1053 */ 1054 if (slub_debug && (!slub_debug_slabs || 1055 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1056 flags |= slub_debug; 1057 1058 return flags; 1059 } 1060 #else 1061 static inline void setup_object_debug(struct kmem_cache *s, 1062 struct page *page, void *object) {} 1063 1064 static inline int alloc_debug_processing(struct kmem_cache *s, 1065 struct page *page, void *object, unsigned long addr) { return 0; } 1066 1067 static inline int free_debug_processing(struct kmem_cache *s, 1068 struct page *page, void *object, unsigned long addr) { return 0; } 1069 1070 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1071 { return 1; } 1072 static inline int check_object(struct kmem_cache *s, struct page *page, 1073 void *object, u8 val) { return 1; } 1074 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1075 static inline unsigned long kmem_cache_flags(unsigned long objsize, 1076 unsigned long flags, const char *name, 1077 void (*ctor)(void *)) 1078 { 1079 return flags; 1080 } 1081 #define slub_debug 0 1082 1083 #define disable_higher_order_debug 0 1084 1085 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1086 { return 0; } 1087 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1088 { return 0; } 1089 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1090 int objects) {} 1091 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1092 int objects) {} 1093 1094 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1095 { return 0; } 1096 1097 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1098 void *object) {} 1099 1100 static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1101 1102 static inline void slab_free_hook_irq(struct kmem_cache *s, 1103 void *object) {} 1104 1105 #endif /* CONFIG_SLUB_DEBUG */ 1106 1107 /* 1108 * Slab allocation and freeing 1109 */ 1110 static inline struct page *alloc_slab_page(gfp_t flags, int node, 1111 struct kmem_cache_order_objects oo) 1112 { 1113 int order = oo_order(oo); 1114 1115 flags |= __GFP_NOTRACK; 1116 1117 if (node == NUMA_NO_NODE) 1118 return alloc_pages(flags, order); 1119 else 1120 return alloc_pages_exact_node(node, flags, order); 1121 } 1122 1123 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1124 { 1125 struct page *page; 1126 struct kmem_cache_order_objects oo = s->oo; 1127 gfp_t alloc_gfp; 1128 1129 flags |= s->allocflags; 1130 1131 /* 1132 * Let the initial higher-order allocation fail under memory pressure 1133 * so we fall-back to the minimum order allocation. 1134 */ 1135 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1136 1137 page = alloc_slab_page(alloc_gfp, node, oo); 1138 if (unlikely(!page)) { 1139 oo = s->min; 1140 /* 1141 * Allocation may have failed due to fragmentation. 1142 * Try a lower order alloc if possible 1143 */ 1144 page = alloc_slab_page(flags, node, oo); 1145 if (!page) 1146 return NULL; 1147 1148 stat(s, ORDER_FALLBACK); 1149 } 1150 1151 if (kmemcheck_enabled 1152 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1153 int pages = 1 << oo_order(oo); 1154 1155 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1156 1157 /* 1158 * Objects from caches that have a constructor don't get 1159 * cleared when they're allocated, so we need to do it here. 1160 */ 1161 if (s->ctor) 1162 kmemcheck_mark_uninitialized_pages(page, pages); 1163 else 1164 kmemcheck_mark_unallocated_pages(page, pages); 1165 } 1166 1167 page->objects = oo_objects(oo); 1168 mod_zone_page_state(page_zone(page), 1169 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1170 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1171 1 << oo_order(oo)); 1172 1173 return page; 1174 } 1175 1176 static void setup_object(struct kmem_cache *s, struct page *page, 1177 void *object) 1178 { 1179 setup_object_debug(s, page, object); 1180 if (unlikely(s->ctor)) 1181 s->ctor(object); 1182 } 1183 1184 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1185 { 1186 struct page *page; 1187 void *start; 1188 void *last; 1189 void *p; 1190 1191 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1192 1193 page = allocate_slab(s, 1194 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1195 if (!page) 1196 goto out; 1197 1198 inc_slabs_node(s, page_to_nid(page), page->objects); 1199 page->slab = s; 1200 page->flags |= 1 << PG_slab; 1201 1202 start = page_address(page); 1203 1204 if (unlikely(s->flags & SLAB_POISON)) 1205 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1206 1207 last = start; 1208 for_each_object(p, s, start, page->objects) { 1209 setup_object(s, page, last); 1210 set_freepointer(s, last, p); 1211 last = p; 1212 } 1213 setup_object(s, page, last); 1214 set_freepointer(s, last, NULL); 1215 1216 page->freelist = start; 1217 page->inuse = 0; 1218 out: 1219 return page; 1220 } 1221 1222 static void __free_slab(struct kmem_cache *s, struct page *page) 1223 { 1224 int order = compound_order(page); 1225 int pages = 1 << order; 1226 1227 if (kmem_cache_debug(s)) { 1228 void *p; 1229 1230 slab_pad_check(s, page); 1231 for_each_object(p, s, page_address(page), 1232 page->objects) 1233 check_object(s, page, p, SLUB_RED_INACTIVE); 1234 } 1235 1236 kmemcheck_free_shadow(page, compound_order(page)); 1237 1238 mod_zone_page_state(page_zone(page), 1239 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1240 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1241 -pages); 1242 1243 __ClearPageSlab(page); 1244 reset_page_mapcount(page); 1245 if (current->reclaim_state) 1246 current->reclaim_state->reclaimed_slab += pages; 1247 __free_pages(page, order); 1248 } 1249 1250 static void rcu_free_slab(struct rcu_head *h) 1251 { 1252 struct page *page; 1253 1254 page = container_of((struct list_head *)h, struct page, lru); 1255 __free_slab(page->slab, page); 1256 } 1257 1258 static void free_slab(struct kmem_cache *s, struct page *page) 1259 { 1260 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1261 /* 1262 * RCU free overloads the RCU head over the LRU 1263 */ 1264 struct rcu_head *head = (void *)&page->lru; 1265 1266 call_rcu(head, rcu_free_slab); 1267 } else 1268 __free_slab(s, page); 1269 } 1270 1271 static void discard_slab(struct kmem_cache *s, struct page *page) 1272 { 1273 dec_slabs_node(s, page_to_nid(page), page->objects); 1274 free_slab(s, page); 1275 } 1276 1277 /* 1278 * Per slab locking using the pagelock 1279 */ 1280 static __always_inline void slab_lock(struct page *page) 1281 { 1282 bit_spin_lock(PG_locked, &page->flags); 1283 } 1284 1285 static __always_inline void slab_unlock(struct page *page) 1286 { 1287 __bit_spin_unlock(PG_locked, &page->flags); 1288 } 1289 1290 static __always_inline int slab_trylock(struct page *page) 1291 { 1292 int rc = 1; 1293 1294 rc = bit_spin_trylock(PG_locked, &page->flags); 1295 return rc; 1296 } 1297 1298 /* 1299 * Management of partially allocated slabs 1300 */ 1301 static void add_partial(struct kmem_cache_node *n, 1302 struct page *page, int tail) 1303 { 1304 spin_lock(&n->list_lock); 1305 n->nr_partial++; 1306 if (tail) 1307 list_add_tail(&page->lru, &n->partial); 1308 else 1309 list_add(&page->lru, &n->partial); 1310 spin_unlock(&n->list_lock); 1311 } 1312 1313 static inline void __remove_partial(struct kmem_cache_node *n, 1314 struct page *page) 1315 { 1316 list_del(&page->lru); 1317 n->nr_partial--; 1318 } 1319 1320 static void remove_partial(struct kmem_cache *s, struct page *page) 1321 { 1322 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1323 1324 spin_lock(&n->list_lock); 1325 __remove_partial(n, page); 1326 spin_unlock(&n->list_lock); 1327 } 1328 1329 /* 1330 * Lock slab and remove from the partial list. 1331 * 1332 * Must hold list_lock. 1333 */ 1334 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1335 struct page *page) 1336 { 1337 if (slab_trylock(page)) { 1338 __remove_partial(n, page); 1339 __SetPageSlubFrozen(page); 1340 return 1; 1341 } 1342 return 0; 1343 } 1344 1345 /* 1346 * Try to allocate a partial slab from a specific node. 1347 */ 1348 static struct page *get_partial_node(struct kmem_cache_node *n) 1349 { 1350 struct page *page; 1351 1352 /* 1353 * Racy check. If we mistakenly see no partial slabs then we 1354 * just allocate an empty slab. If we mistakenly try to get a 1355 * partial slab and there is none available then get_partials() 1356 * will return NULL. 1357 */ 1358 if (!n || !n->nr_partial) 1359 return NULL; 1360 1361 spin_lock(&n->list_lock); 1362 list_for_each_entry(page, &n->partial, lru) 1363 if (lock_and_freeze_slab(n, page)) 1364 goto out; 1365 page = NULL; 1366 out: 1367 spin_unlock(&n->list_lock); 1368 return page; 1369 } 1370 1371 /* 1372 * Get a page from somewhere. Search in increasing NUMA distances. 1373 */ 1374 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1375 { 1376 #ifdef CONFIG_NUMA 1377 struct zonelist *zonelist; 1378 struct zoneref *z; 1379 struct zone *zone; 1380 enum zone_type high_zoneidx = gfp_zone(flags); 1381 struct page *page; 1382 1383 /* 1384 * The defrag ratio allows a configuration of the tradeoffs between 1385 * inter node defragmentation and node local allocations. A lower 1386 * defrag_ratio increases the tendency to do local allocations 1387 * instead of attempting to obtain partial slabs from other nodes. 1388 * 1389 * If the defrag_ratio is set to 0 then kmalloc() always 1390 * returns node local objects. If the ratio is higher then kmalloc() 1391 * may return off node objects because partial slabs are obtained 1392 * from other nodes and filled up. 1393 * 1394 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1395 * defrag_ratio = 1000) then every (well almost) allocation will 1396 * first attempt to defrag slab caches on other nodes. This means 1397 * scanning over all nodes to look for partial slabs which may be 1398 * expensive if we do it every time we are trying to find a slab 1399 * with available objects. 1400 */ 1401 if (!s->remote_node_defrag_ratio || 1402 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1403 return NULL; 1404 1405 get_mems_allowed(); 1406 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1407 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1408 struct kmem_cache_node *n; 1409 1410 n = get_node(s, zone_to_nid(zone)); 1411 1412 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1413 n->nr_partial > s->min_partial) { 1414 page = get_partial_node(n); 1415 if (page) { 1416 put_mems_allowed(); 1417 return page; 1418 } 1419 } 1420 } 1421 put_mems_allowed(); 1422 #endif 1423 return NULL; 1424 } 1425 1426 /* 1427 * Get a partial page, lock it and return it. 1428 */ 1429 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1430 { 1431 struct page *page; 1432 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; 1433 1434 page = get_partial_node(get_node(s, searchnode)); 1435 if (page || node != -1) 1436 return page; 1437 1438 return get_any_partial(s, flags); 1439 } 1440 1441 /* 1442 * Move a page back to the lists. 1443 * 1444 * Must be called with the slab lock held. 1445 * 1446 * On exit the slab lock will have been dropped. 1447 */ 1448 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1449 __releases(bitlock) 1450 { 1451 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1452 1453 __ClearPageSlubFrozen(page); 1454 if (page->inuse) { 1455 1456 if (page->freelist) { 1457 add_partial(n, page, tail); 1458 stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1459 } else { 1460 stat(s, DEACTIVATE_FULL); 1461 if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) 1462 add_full(n, page); 1463 } 1464 slab_unlock(page); 1465 } else { 1466 stat(s, DEACTIVATE_EMPTY); 1467 if (n->nr_partial < s->min_partial) { 1468 /* 1469 * Adding an empty slab to the partial slabs in order 1470 * to avoid page allocator overhead. This slab needs 1471 * to come after the other slabs with objects in 1472 * so that the others get filled first. That way the 1473 * size of the partial list stays small. 1474 * 1475 * kmem_cache_shrink can reclaim any empty slabs from 1476 * the partial list. 1477 */ 1478 add_partial(n, page, 1); 1479 slab_unlock(page); 1480 } else { 1481 slab_unlock(page); 1482 stat(s, FREE_SLAB); 1483 discard_slab(s, page); 1484 } 1485 } 1486 } 1487 1488 /* 1489 * Remove the cpu slab 1490 */ 1491 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1492 __releases(bitlock) 1493 { 1494 struct page *page = c->page; 1495 int tail = 1; 1496 1497 if (page->freelist) 1498 stat(s, DEACTIVATE_REMOTE_FREES); 1499 /* 1500 * Merge cpu freelist into slab freelist. Typically we get here 1501 * because both freelists are empty. So this is unlikely 1502 * to occur. 1503 */ 1504 while (unlikely(c->freelist)) { 1505 void **object; 1506 1507 tail = 0; /* Hot objects. Put the slab first */ 1508 1509 /* Retrieve object from cpu_freelist */ 1510 object = c->freelist; 1511 c->freelist = get_freepointer(s, c->freelist); 1512 1513 /* And put onto the regular freelist */ 1514 set_freepointer(s, object, page->freelist); 1515 page->freelist = object; 1516 page->inuse--; 1517 } 1518 c->page = NULL; 1519 unfreeze_slab(s, page, tail); 1520 } 1521 1522 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1523 { 1524 stat(s, CPUSLAB_FLUSH); 1525 slab_lock(c->page); 1526 deactivate_slab(s, c); 1527 } 1528 1529 /* 1530 * Flush cpu slab. 1531 * 1532 * Called from IPI handler with interrupts disabled. 1533 */ 1534 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1535 { 1536 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 1537 1538 if (likely(c && c->page)) 1539 flush_slab(s, c); 1540 } 1541 1542 static void flush_cpu_slab(void *d) 1543 { 1544 struct kmem_cache *s = d; 1545 1546 __flush_cpu_slab(s, smp_processor_id()); 1547 } 1548 1549 static void flush_all(struct kmem_cache *s) 1550 { 1551 on_each_cpu(flush_cpu_slab, s, 1); 1552 } 1553 1554 /* 1555 * Check if the objects in a per cpu structure fit numa 1556 * locality expectations. 1557 */ 1558 static inline int node_match(struct kmem_cache_cpu *c, int node) 1559 { 1560 #ifdef CONFIG_NUMA 1561 if (node != NUMA_NO_NODE && c->node != node) 1562 return 0; 1563 #endif 1564 return 1; 1565 } 1566 1567 static int count_free(struct page *page) 1568 { 1569 return page->objects - page->inuse; 1570 } 1571 1572 static unsigned long count_partial(struct kmem_cache_node *n, 1573 int (*get_count)(struct page *)) 1574 { 1575 unsigned long flags; 1576 unsigned long x = 0; 1577 struct page *page; 1578 1579 spin_lock_irqsave(&n->list_lock, flags); 1580 list_for_each_entry(page, &n->partial, lru) 1581 x += get_count(page); 1582 spin_unlock_irqrestore(&n->list_lock, flags); 1583 return x; 1584 } 1585 1586 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 1587 { 1588 #ifdef CONFIG_SLUB_DEBUG 1589 return atomic_long_read(&n->total_objects); 1590 #else 1591 return 0; 1592 #endif 1593 } 1594 1595 static noinline void 1596 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 1597 { 1598 int node; 1599 1600 printk(KERN_WARNING 1601 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1602 nid, gfpflags); 1603 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " 1604 "default order: %d, min order: %d\n", s->name, s->objsize, 1605 s->size, oo_order(s->oo), oo_order(s->min)); 1606 1607 if (oo_order(s->min) > get_order(s->objsize)) 1608 printk(KERN_WARNING " %s debugging increased min order, use " 1609 "slub_debug=O to disable.\n", s->name); 1610 1611 for_each_online_node(node) { 1612 struct kmem_cache_node *n = get_node(s, node); 1613 unsigned long nr_slabs; 1614 unsigned long nr_objs; 1615 unsigned long nr_free; 1616 1617 if (!n) 1618 continue; 1619 1620 nr_free = count_partial(n, count_free); 1621 nr_slabs = node_nr_slabs(n); 1622 nr_objs = node_nr_objs(n); 1623 1624 printk(KERN_WARNING 1625 " node %d: slabs: %ld, objs: %ld, free: %ld\n", 1626 node, nr_slabs, nr_objs, nr_free); 1627 } 1628 } 1629 1630 /* 1631 * Slow path. The lockless freelist is empty or we need to perform 1632 * debugging duties. 1633 * 1634 * Interrupts are disabled. 1635 * 1636 * Processing is still very fast if new objects have been freed to the 1637 * regular freelist. In that case we simply take over the regular freelist 1638 * as the lockless freelist and zap the regular freelist. 1639 * 1640 * If that is not working then we fall back to the partial lists. We take the 1641 * first element of the freelist as the object to allocate now and move the 1642 * rest of the freelist to the lockless freelist. 1643 * 1644 * And if we were unable to get a new slab from the partial slab lists then 1645 * we need to allocate a new slab. This is the slowest path since it involves 1646 * a call to the page allocator and the setup of a new slab. 1647 */ 1648 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 1649 unsigned long addr, struct kmem_cache_cpu *c) 1650 { 1651 void **object; 1652 struct page *new; 1653 1654 /* We handle __GFP_ZERO in the caller */ 1655 gfpflags &= ~__GFP_ZERO; 1656 1657 if (!c->page) 1658 goto new_slab; 1659 1660 slab_lock(c->page); 1661 if (unlikely(!node_match(c, node))) 1662 goto another_slab; 1663 1664 stat(s, ALLOC_REFILL); 1665 1666 load_freelist: 1667 object = c->page->freelist; 1668 if (unlikely(!object)) 1669 goto another_slab; 1670 if (kmem_cache_debug(s)) 1671 goto debug; 1672 1673 c->freelist = get_freepointer(s, object); 1674 c->page->inuse = c->page->objects; 1675 c->page->freelist = NULL; 1676 c->node = page_to_nid(c->page); 1677 unlock_out: 1678 slab_unlock(c->page); 1679 stat(s, ALLOC_SLOWPATH); 1680 return object; 1681 1682 another_slab: 1683 deactivate_slab(s, c); 1684 1685 new_slab: 1686 new = get_partial(s, gfpflags, node); 1687 if (new) { 1688 c->page = new; 1689 stat(s, ALLOC_FROM_PARTIAL); 1690 goto load_freelist; 1691 } 1692 1693 gfpflags &= gfp_allowed_mask; 1694 if (gfpflags & __GFP_WAIT) 1695 local_irq_enable(); 1696 1697 new = new_slab(s, gfpflags, node); 1698 1699 if (gfpflags & __GFP_WAIT) 1700 local_irq_disable(); 1701 1702 if (new) { 1703 c = __this_cpu_ptr(s->cpu_slab); 1704 stat(s, ALLOC_SLAB); 1705 if (c->page) 1706 flush_slab(s, c); 1707 slab_lock(new); 1708 __SetPageSlubFrozen(new); 1709 c->page = new; 1710 goto load_freelist; 1711 } 1712 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 1713 slab_out_of_memory(s, gfpflags, node); 1714 return NULL; 1715 debug: 1716 if (!alloc_debug_processing(s, c->page, object, addr)) 1717 goto another_slab; 1718 1719 c->page->inuse++; 1720 c->page->freelist = get_freepointer(s, object); 1721 c->node = NUMA_NO_NODE; 1722 goto unlock_out; 1723 } 1724 1725 /* 1726 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1727 * have the fastpath folded into their functions. So no function call 1728 * overhead for requests that can be satisfied on the fastpath. 1729 * 1730 * The fastpath works by first checking if the lockless freelist can be used. 1731 * If not then __slab_alloc is called for slow processing. 1732 * 1733 * Otherwise we can simply pick the next object from the lockless free list. 1734 */ 1735 static __always_inline void *slab_alloc(struct kmem_cache *s, 1736 gfp_t gfpflags, int node, unsigned long addr) 1737 { 1738 void **object; 1739 struct kmem_cache_cpu *c; 1740 unsigned long flags; 1741 1742 if (slab_pre_alloc_hook(s, gfpflags)) 1743 return NULL; 1744 1745 local_irq_save(flags); 1746 c = __this_cpu_ptr(s->cpu_slab); 1747 object = c->freelist; 1748 if (unlikely(!object || !node_match(c, node))) 1749 1750 object = __slab_alloc(s, gfpflags, node, addr, c); 1751 1752 else { 1753 c->freelist = get_freepointer(s, object); 1754 stat(s, ALLOC_FASTPATH); 1755 } 1756 local_irq_restore(flags); 1757 1758 if (unlikely(gfpflags & __GFP_ZERO) && object) 1759 memset(object, 0, s->objsize); 1760 1761 slab_post_alloc_hook(s, gfpflags, object); 1762 1763 return object; 1764 } 1765 1766 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1767 { 1768 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1769 1770 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); 1771 1772 return ret; 1773 } 1774 EXPORT_SYMBOL(kmem_cache_alloc); 1775 1776 #ifdef CONFIG_TRACING 1777 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1778 { 1779 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1780 } 1781 EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1782 #endif 1783 1784 #ifdef CONFIG_NUMA 1785 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1786 { 1787 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 1788 1789 trace_kmem_cache_alloc_node(_RET_IP_, ret, 1790 s->objsize, s->size, gfpflags, node); 1791 1792 return ret; 1793 } 1794 EXPORT_SYMBOL(kmem_cache_alloc_node); 1795 1796 #ifdef CONFIG_TRACING 1797 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1798 gfp_t gfpflags, 1799 int node) 1800 { 1801 return slab_alloc(s, gfpflags, node, _RET_IP_); 1802 } 1803 EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1804 #endif 1805 #endif 1806 1807 /* 1808 * Slow patch handling. This may still be called frequently since objects 1809 * have a longer lifetime than the cpu slabs in most processing loads. 1810 * 1811 * So we still attempt to reduce cache line usage. Just take the slab 1812 * lock and free the item. If there is no additional partial page 1813 * handling required then we can return immediately. 1814 */ 1815 static void __slab_free(struct kmem_cache *s, struct page *page, 1816 void *x, unsigned long addr) 1817 { 1818 void *prior; 1819 void **object = (void *)x; 1820 1821 stat(s, FREE_SLOWPATH); 1822 slab_lock(page); 1823 1824 if (kmem_cache_debug(s)) 1825 goto debug; 1826 1827 checks_ok: 1828 prior = page->freelist; 1829 set_freepointer(s, object, prior); 1830 page->freelist = object; 1831 page->inuse--; 1832 1833 if (unlikely(PageSlubFrozen(page))) { 1834 stat(s, FREE_FROZEN); 1835 goto out_unlock; 1836 } 1837 1838 if (unlikely(!page->inuse)) 1839 goto slab_empty; 1840 1841 /* 1842 * Objects left in the slab. If it was not on the partial list before 1843 * then add it. 1844 */ 1845 if (unlikely(!prior)) { 1846 add_partial(get_node(s, page_to_nid(page)), page, 1); 1847 stat(s, FREE_ADD_PARTIAL); 1848 } 1849 1850 out_unlock: 1851 slab_unlock(page); 1852 return; 1853 1854 slab_empty: 1855 if (prior) { 1856 /* 1857 * Slab still on the partial list. 1858 */ 1859 remove_partial(s, page); 1860 stat(s, FREE_REMOVE_PARTIAL); 1861 } 1862 slab_unlock(page); 1863 stat(s, FREE_SLAB); 1864 discard_slab(s, page); 1865 return; 1866 1867 debug: 1868 if (!free_debug_processing(s, page, x, addr)) 1869 goto out_unlock; 1870 goto checks_ok; 1871 } 1872 1873 /* 1874 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1875 * can perform fastpath freeing without additional function calls. 1876 * 1877 * The fastpath is only possible if we are freeing to the current cpu slab 1878 * of this processor. This typically the case if we have just allocated 1879 * the item before. 1880 * 1881 * If fastpath is not possible then fall back to __slab_free where we deal 1882 * with all sorts of special processing. 1883 */ 1884 static __always_inline void slab_free(struct kmem_cache *s, 1885 struct page *page, void *x, unsigned long addr) 1886 { 1887 void **object = (void *)x; 1888 struct kmem_cache_cpu *c; 1889 unsigned long flags; 1890 1891 slab_free_hook(s, x); 1892 1893 local_irq_save(flags); 1894 c = __this_cpu_ptr(s->cpu_slab); 1895 1896 slab_free_hook_irq(s, x); 1897 1898 if (likely(page == c->page && c->node != NUMA_NO_NODE)) { 1899 set_freepointer(s, object, c->freelist); 1900 c->freelist = object; 1901 stat(s, FREE_FASTPATH); 1902 } else 1903 __slab_free(s, page, x, addr); 1904 1905 local_irq_restore(flags); 1906 } 1907 1908 void kmem_cache_free(struct kmem_cache *s, void *x) 1909 { 1910 struct page *page; 1911 1912 page = virt_to_head_page(x); 1913 1914 slab_free(s, page, x, _RET_IP_); 1915 1916 trace_kmem_cache_free(_RET_IP_, x); 1917 } 1918 EXPORT_SYMBOL(kmem_cache_free); 1919 1920 /* Figure out on which slab page the object resides */ 1921 static struct page *get_object_page(const void *x) 1922 { 1923 struct page *page = virt_to_head_page(x); 1924 1925 if (!PageSlab(page)) 1926 return NULL; 1927 1928 return page; 1929 } 1930 1931 /* 1932 * Object placement in a slab is made very easy because we always start at 1933 * offset 0. If we tune the size of the object to the alignment then we can 1934 * get the required alignment by putting one properly sized object after 1935 * another. 1936 * 1937 * Notice that the allocation order determines the sizes of the per cpu 1938 * caches. Each processor has always one slab available for allocations. 1939 * Increasing the allocation order reduces the number of times that slabs 1940 * must be moved on and off the partial lists and is therefore a factor in 1941 * locking overhead. 1942 */ 1943 1944 /* 1945 * Mininum / Maximum order of slab pages. This influences locking overhead 1946 * and slab fragmentation. A higher order reduces the number of partial slabs 1947 * and increases the number of allocations possible without having to 1948 * take the list_lock. 1949 */ 1950 static int slub_min_order; 1951 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1952 static int slub_min_objects; 1953 1954 /* 1955 * Merge control. If this is set then no merging of slab caches will occur. 1956 * (Could be removed. This was introduced to pacify the merge skeptics.) 1957 */ 1958 static int slub_nomerge; 1959 1960 /* 1961 * Calculate the order of allocation given an slab object size. 1962 * 1963 * The order of allocation has significant impact on performance and other 1964 * system components. Generally order 0 allocations should be preferred since 1965 * order 0 does not cause fragmentation in the page allocator. Larger objects 1966 * be problematic to put into order 0 slabs because there may be too much 1967 * unused space left. We go to a higher order if more than 1/16th of the slab 1968 * would be wasted. 1969 * 1970 * In order to reach satisfactory performance we must ensure that a minimum 1971 * number of objects is in one slab. Otherwise we may generate too much 1972 * activity on the partial lists which requires taking the list_lock. This is 1973 * less a concern for large slabs though which are rarely used. 1974 * 1975 * slub_max_order specifies the order where we begin to stop considering the 1976 * number of objects in a slab as critical. If we reach slub_max_order then 1977 * we try to keep the page order as low as possible. So we accept more waste 1978 * of space in favor of a small page order. 1979 * 1980 * Higher order allocations also allow the placement of more objects in a 1981 * slab and thereby reduce object handling overhead. If the user has 1982 * requested a higher mininum order then we start with that one instead of 1983 * the smallest order which will fit the object. 1984 */ 1985 static inline int slab_order(int size, int min_objects, 1986 int max_order, int fract_leftover) 1987 { 1988 int order; 1989 int rem; 1990 int min_order = slub_min_order; 1991 1992 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 1993 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 1994 1995 for (order = max(min_order, 1996 fls(min_objects * size - 1) - PAGE_SHIFT); 1997 order <= max_order; order++) { 1998 1999 unsigned long slab_size = PAGE_SIZE << order; 2000 2001 if (slab_size < min_objects * size) 2002 continue; 2003 2004 rem = slab_size % size; 2005 2006 if (rem <= slab_size / fract_leftover) 2007 break; 2008 2009 } 2010 2011 return order; 2012 } 2013 2014 static inline int calculate_order(int size) 2015 { 2016 int order; 2017 int min_objects; 2018 int fraction; 2019 int max_objects; 2020 2021 /* 2022 * Attempt to find best configuration for a slab. This 2023 * works by first attempting to generate a layout with 2024 * the best configuration and backing off gradually. 2025 * 2026 * First we reduce the acceptable waste in a slab. Then 2027 * we reduce the minimum objects required in a slab. 2028 */ 2029 min_objects = slub_min_objects; 2030 if (!min_objects) 2031 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2032 max_objects = (PAGE_SIZE << slub_max_order)/size; 2033 min_objects = min(min_objects, max_objects); 2034 2035 while (min_objects > 1) { 2036 fraction = 16; 2037 while (fraction >= 4) { 2038 order = slab_order(size, min_objects, 2039 slub_max_order, fraction); 2040 if (order <= slub_max_order) 2041 return order; 2042 fraction /= 2; 2043 } 2044 min_objects--; 2045 } 2046 2047 /* 2048 * We were unable to place multiple objects in a slab. Now 2049 * lets see if we can place a single object there. 2050 */ 2051 order = slab_order(size, 1, slub_max_order, 1); 2052 if (order <= slub_max_order) 2053 return order; 2054 2055 /* 2056 * Doh this slab cannot be placed using slub_max_order. 2057 */ 2058 order = slab_order(size, 1, MAX_ORDER, 1); 2059 if (order < MAX_ORDER) 2060 return order; 2061 return -ENOSYS; 2062 } 2063 2064 /* 2065 * Figure out what the alignment of the objects will be. 2066 */ 2067 static unsigned long calculate_alignment(unsigned long flags, 2068 unsigned long align, unsigned long size) 2069 { 2070 /* 2071 * If the user wants hardware cache aligned objects then follow that 2072 * suggestion if the object is sufficiently large. 2073 * 2074 * The hardware cache alignment cannot override the specified 2075 * alignment though. If that is greater then use it. 2076 */ 2077 if (flags & SLAB_HWCACHE_ALIGN) { 2078 unsigned long ralign = cache_line_size(); 2079 while (size <= ralign / 2) 2080 ralign /= 2; 2081 align = max(align, ralign); 2082 } 2083 2084 if (align < ARCH_SLAB_MINALIGN) 2085 align = ARCH_SLAB_MINALIGN; 2086 2087 return ALIGN(align, sizeof(void *)); 2088 } 2089 2090 static void 2091 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 2092 { 2093 n->nr_partial = 0; 2094 spin_lock_init(&n->list_lock); 2095 INIT_LIST_HEAD(&n->partial); 2096 #ifdef CONFIG_SLUB_DEBUG 2097 atomic_long_set(&n->nr_slabs, 0); 2098 atomic_long_set(&n->total_objects, 0); 2099 INIT_LIST_HEAD(&n->full); 2100 #endif 2101 } 2102 2103 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 2104 { 2105 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2106 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2107 2108 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2109 2110 return s->cpu_slab != NULL; 2111 } 2112 2113 static struct kmem_cache *kmem_cache_node; 2114 2115 /* 2116 * No kmalloc_node yet so do it by hand. We know that this is the first 2117 * slab on the node for this slabcache. There are no concurrent accesses 2118 * possible. 2119 * 2120 * Note that this function only works on the kmalloc_node_cache 2121 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2122 * memory on a fresh node that has no slab structures yet. 2123 */ 2124 static void early_kmem_cache_node_alloc(int node) 2125 { 2126 struct page *page; 2127 struct kmem_cache_node *n; 2128 unsigned long flags; 2129 2130 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 2131 2132 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 2133 2134 BUG_ON(!page); 2135 if (page_to_nid(page) != node) { 2136 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2137 "node %d\n", node); 2138 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2139 "in order to be able to continue\n"); 2140 } 2141 2142 n = page->freelist; 2143 BUG_ON(!n); 2144 page->freelist = get_freepointer(kmem_cache_node, n); 2145 page->inuse++; 2146 kmem_cache_node->node[node] = n; 2147 #ifdef CONFIG_SLUB_DEBUG 2148 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2149 init_tracking(kmem_cache_node, n); 2150 #endif 2151 init_kmem_cache_node(n, kmem_cache_node); 2152 inc_slabs_node(kmem_cache_node, node, page->objects); 2153 2154 /* 2155 * lockdep requires consistent irq usage for each lock 2156 * so even though there cannot be a race this early in 2157 * the boot sequence, we still disable irqs. 2158 */ 2159 local_irq_save(flags); 2160 add_partial(n, page, 0); 2161 local_irq_restore(flags); 2162 } 2163 2164 static void free_kmem_cache_nodes(struct kmem_cache *s) 2165 { 2166 int node; 2167 2168 for_each_node_state(node, N_NORMAL_MEMORY) { 2169 struct kmem_cache_node *n = s->node[node]; 2170 2171 if (n) 2172 kmem_cache_free(kmem_cache_node, n); 2173 2174 s->node[node] = NULL; 2175 } 2176 } 2177 2178 static int init_kmem_cache_nodes(struct kmem_cache *s) 2179 { 2180 int node; 2181 2182 for_each_node_state(node, N_NORMAL_MEMORY) { 2183 struct kmem_cache_node *n; 2184 2185 if (slab_state == DOWN) { 2186 early_kmem_cache_node_alloc(node); 2187 continue; 2188 } 2189 n = kmem_cache_alloc_node(kmem_cache_node, 2190 GFP_KERNEL, node); 2191 2192 if (!n) { 2193 free_kmem_cache_nodes(s); 2194 return 0; 2195 } 2196 2197 s->node[node] = n; 2198 init_kmem_cache_node(n, s); 2199 } 2200 return 1; 2201 } 2202 2203 static void set_min_partial(struct kmem_cache *s, unsigned long min) 2204 { 2205 if (min < MIN_PARTIAL) 2206 min = MIN_PARTIAL; 2207 else if (min > MAX_PARTIAL) 2208 min = MAX_PARTIAL; 2209 s->min_partial = min; 2210 } 2211 2212 /* 2213 * calculate_sizes() determines the order and the distribution of data within 2214 * a slab object. 2215 */ 2216 static int calculate_sizes(struct kmem_cache *s, int forced_order) 2217 { 2218 unsigned long flags = s->flags; 2219 unsigned long size = s->objsize; 2220 unsigned long align = s->align; 2221 int order; 2222 2223 /* 2224 * Round up object size to the next word boundary. We can only 2225 * place the free pointer at word boundaries and this determines 2226 * the possible location of the free pointer. 2227 */ 2228 size = ALIGN(size, sizeof(void *)); 2229 2230 #ifdef CONFIG_SLUB_DEBUG 2231 /* 2232 * Determine if we can poison the object itself. If the user of 2233 * the slab may touch the object after free or before allocation 2234 * then we should never poison the object itself. 2235 */ 2236 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2237 !s->ctor) 2238 s->flags |= __OBJECT_POISON; 2239 else 2240 s->flags &= ~__OBJECT_POISON; 2241 2242 2243 /* 2244 * If we are Redzoning then check if there is some space between the 2245 * end of the object and the free pointer. If not then add an 2246 * additional word to have some bytes to store Redzone information. 2247 */ 2248 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2249 size += sizeof(void *); 2250 #endif 2251 2252 /* 2253 * With that we have determined the number of bytes in actual use 2254 * by the object. This is the potential offset to the free pointer. 2255 */ 2256 s->inuse = size; 2257 2258 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2259 s->ctor)) { 2260 /* 2261 * Relocate free pointer after the object if it is not 2262 * permitted to overwrite the first word of the object on 2263 * kmem_cache_free. 2264 * 2265 * This is the case if we do RCU, have a constructor or 2266 * destructor or are poisoning the objects. 2267 */ 2268 s->offset = size; 2269 size += sizeof(void *); 2270 } 2271 2272 #ifdef CONFIG_SLUB_DEBUG 2273 if (flags & SLAB_STORE_USER) 2274 /* 2275 * Need to store information about allocs and frees after 2276 * the object. 2277 */ 2278 size += 2 * sizeof(struct track); 2279 2280 if (flags & SLAB_RED_ZONE) 2281 /* 2282 * Add some empty padding so that we can catch 2283 * overwrites from earlier objects rather than let 2284 * tracking information or the free pointer be 2285 * corrupted if a user writes before the start 2286 * of the object. 2287 */ 2288 size += sizeof(void *); 2289 #endif 2290 2291 /* 2292 * Determine the alignment based on various parameters that the 2293 * user specified and the dynamic determination of cache line size 2294 * on bootup. 2295 */ 2296 align = calculate_alignment(flags, align, s->objsize); 2297 s->align = align; 2298 2299 /* 2300 * SLUB stores one object immediately after another beginning from 2301 * offset 0. In order to align the objects we have to simply size 2302 * each object to conform to the alignment. 2303 */ 2304 size = ALIGN(size, align); 2305 s->size = size; 2306 if (forced_order >= 0) 2307 order = forced_order; 2308 else 2309 order = calculate_order(size); 2310 2311 if (order < 0) 2312 return 0; 2313 2314 s->allocflags = 0; 2315 if (order) 2316 s->allocflags |= __GFP_COMP; 2317 2318 if (s->flags & SLAB_CACHE_DMA) 2319 s->allocflags |= SLUB_DMA; 2320 2321 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2322 s->allocflags |= __GFP_RECLAIMABLE; 2323 2324 /* 2325 * Determine the number of objects per slab 2326 */ 2327 s->oo = oo_make(order, size); 2328 s->min = oo_make(get_order(size), size); 2329 if (oo_objects(s->oo) > oo_objects(s->max)) 2330 s->max = s->oo; 2331 2332 return !!oo_objects(s->oo); 2333 2334 } 2335 2336 static int kmem_cache_open(struct kmem_cache *s, 2337 const char *name, size_t size, 2338 size_t align, unsigned long flags, 2339 void (*ctor)(void *)) 2340 { 2341 memset(s, 0, kmem_size); 2342 s->name = name; 2343 s->ctor = ctor; 2344 s->objsize = size; 2345 s->align = align; 2346 s->flags = kmem_cache_flags(size, flags, name, ctor); 2347 2348 if (!calculate_sizes(s, -1)) 2349 goto error; 2350 if (disable_higher_order_debug) { 2351 /* 2352 * Disable debugging flags that store metadata if the min slab 2353 * order increased. 2354 */ 2355 if (get_order(s->size) > get_order(s->objsize)) { 2356 s->flags &= ~DEBUG_METADATA_FLAGS; 2357 s->offset = 0; 2358 if (!calculate_sizes(s, -1)) 2359 goto error; 2360 } 2361 } 2362 2363 /* 2364 * The larger the object size is, the more pages we want on the partial 2365 * list to avoid pounding the page allocator excessively. 2366 */ 2367 set_min_partial(s, ilog2(s->size)); 2368 s->refcount = 1; 2369 #ifdef CONFIG_NUMA 2370 s->remote_node_defrag_ratio = 1000; 2371 #endif 2372 if (!init_kmem_cache_nodes(s)) 2373 goto error; 2374 2375 if (alloc_kmem_cache_cpus(s)) 2376 return 1; 2377 2378 free_kmem_cache_nodes(s); 2379 error: 2380 if (flags & SLAB_PANIC) 2381 panic("Cannot create slab %s size=%lu realsize=%u " 2382 "order=%u offset=%u flags=%lx\n", 2383 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2384 s->offset, flags); 2385 return 0; 2386 } 2387 2388 /* 2389 * Check if a given pointer is valid 2390 */ 2391 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2392 { 2393 struct page *page; 2394 2395 if (!kern_ptr_validate(object, s->size)) 2396 return 0; 2397 2398 page = get_object_page(object); 2399 2400 if (!page || s != page->slab) 2401 /* No slab or wrong slab */ 2402 return 0; 2403 2404 if (!check_valid_pointer(s, page, object)) 2405 return 0; 2406 2407 /* 2408 * We could also check if the object is on the slabs freelist. 2409 * But this would be too expensive and it seems that the main 2410 * purpose of kmem_ptr_valid() is to check if the object belongs 2411 * to a certain slab. 2412 */ 2413 return 1; 2414 } 2415 EXPORT_SYMBOL(kmem_ptr_validate); 2416 2417 /* 2418 * Determine the size of a slab object 2419 */ 2420 unsigned int kmem_cache_size(struct kmem_cache *s) 2421 { 2422 return s->objsize; 2423 } 2424 EXPORT_SYMBOL(kmem_cache_size); 2425 2426 const char *kmem_cache_name(struct kmem_cache *s) 2427 { 2428 return s->name; 2429 } 2430 EXPORT_SYMBOL(kmem_cache_name); 2431 2432 static void list_slab_objects(struct kmem_cache *s, struct page *page, 2433 const char *text) 2434 { 2435 #ifdef CONFIG_SLUB_DEBUG 2436 void *addr = page_address(page); 2437 void *p; 2438 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * 2439 sizeof(long), GFP_ATOMIC); 2440 if (!map) 2441 return; 2442 slab_err(s, page, "%s", text); 2443 slab_lock(page); 2444 for_each_free_object(p, s, page->freelist) 2445 set_bit(slab_index(p, s, addr), map); 2446 2447 for_each_object(p, s, addr, page->objects) { 2448 2449 if (!test_bit(slab_index(p, s, addr), map)) { 2450 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2451 p, p - addr); 2452 print_tracking(s, p); 2453 } 2454 } 2455 slab_unlock(page); 2456 kfree(map); 2457 #endif 2458 } 2459 2460 /* 2461 * Attempt to free all partial slabs on a node. 2462 */ 2463 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2464 { 2465 unsigned long flags; 2466 struct page *page, *h; 2467 2468 spin_lock_irqsave(&n->list_lock, flags); 2469 list_for_each_entry_safe(page, h, &n->partial, lru) { 2470 if (!page->inuse) { 2471 __remove_partial(n, page); 2472 discard_slab(s, page); 2473 } else { 2474 list_slab_objects(s, page, 2475 "Objects remaining on kmem_cache_close()"); 2476 } 2477 } 2478 spin_unlock_irqrestore(&n->list_lock, flags); 2479 } 2480 2481 /* 2482 * Release all resources used by a slab cache. 2483 */ 2484 static inline int kmem_cache_close(struct kmem_cache *s) 2485 { 2486 int node; 2487 2488 flush_all(s); 2489 free_percpu(s->cpu_slab); 2490 /* Attempt to free all objects */ 2491 for_each_node_state(node, N_NORMAL_MEMORY) { 2492 struct kmem_cache_node *n = get_node(s, node); 2493 2494 free_partial(s, n); 2495 if (n->nr_partial || slabs_node(s, node)) 2496 return 1; 2497 } 2498 free_kmem_cache_nodes(s); 2499 return 0; 2500 } 2501 2502 /* 2503 * Close a cache and release the kmem_cache structure 2504 * (must be used for caches created using kmem_cache_create) 2505 */ 2506 void kmem_cache_destroy(struct kmem_cache *s) 2507 { 2508 down_write(&slub_lock); 2509 s->refcount--; 2510 if (!s->refcount) { 2511 list_del(&s->list); 2512 if (kmem_cache_close(s)) { 2513 printk(KERN_ERR "SLUB %s: %s called for cache that " 2514 "still has objects.\n", s->name, __func__); 2515 dump_stack(); 2516 } 2517 if (s->flags & SLAB_DESTROY_BY_RCU) 2518 rcu_barrier(); 2519 sysfs_slab_remove(s); 2520 } 2521 up_write(&slub_lock); 2522 } 2523 EXPORT_SYMBOL(kmem_cache_destroy); 2524 2525 /******************************************************************** 2526 * Kmalloc subsystem 2527 *******************************************************************/ 2528 2529 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; 2530 EXPORT_SYMBOL(kmalloc_caches); 2531 2532 static struct kmem_cache *kmem_cache; 2533 2534 #ifdef CONFIG_ZONE_DMA 2535 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; 2536 #endif 2537 2538 static int __init setup_slub_min_order(char *str) 2539 { 2540 get_option(&str, &slub_min_order); 2541 2542 return 1; 2543 } 2544 2545 __setup("slub_min_order=", setup_slub_min_order); 2546 2547 static int __init setup_slub_max_order(char *str) 2548 { 2549 get_option(&str, &slub_max_order); 2550 slub_max_order = min(slub_max_order, MAX_ORDER - 1); 2551 2552 return 1; 2553 } 2554 2555 __setup("slub_max_order=", setup_slub_max_order); 2556 2557 static int __init setup_slub_min_objects(char *str) 2558 { 2559 get_option(&str, &slub_min_objects); 2560 2561 return 1; 2562 } 2563 2564 __setup("slub_min_objects=", setup_slub_min_objects); 2565 2566 static int __init setup_slub_nomerge(char *str) 2567 { 2568 slub_nomerge = 1; 2569 return 1; 2570 } 2571 2572 __setup("slub_nomerge", setup_slub_nomerge); 2573 2574 static struct kmem_cache *__init create_kmalloc_cache(const char *name, 2575 int size, unsigned int flags) 2576 { 2577 struct kmem_cache *s; 2578 2579 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 2580 2581 /* 2582 * This function is called with IRQs disabled during early-boot on 2583 * single CPU so there's no need to take slub_lock here. 2584 */ 2585 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 2586 flags, NULL)) 2587 goto panic; 2588 2589 list_add(&s->list, &slab_caches); 2590 return s; 2591 2592 panic: 2593 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2594 return NULL; 2595 } 2596 2597 /* 2598 * Conversion table for small slabs sizes / 8 to the index in the 2599 * kmalloc array. This is necessary for slabs < 192 since we have non power 2600 * of two cache sizes there. The size of larger slabs can be determined using 2601 * fls. 2602 */ 2603 static s8 size_index[24] = { 2604 3, /* 8 */ 2605 4, /* 16 */ 2606 5, /* 24 */ 2607 5, /* 32 */ 2608 6, /* 40 */ 2609 6, /* 48 */ 2610 6, /* 56 */ 2611 6, /* 64 */ 2612 1, /* 72 */ 2613 1, /* 80 */ 2614 1, /* 88 */ 2615 1, /* 96 */ 2616 7, /* 104 */ 2617 7, /* 112 */ 2618 7, /* 120 */ 2619 7, /* 128 */ 2620 2, /* 136 */ 2621 2, /* 144 */ 2622 2, /* 152 */ 2623 2, /* 160 */ 2624 2, /* 168 */ 2625 2, /* 176 */ 2626 2, /* 184 */ 2627 2 /* 192 */ 2628 }; 2629 2630 static inline int size_index_elem(size_t bytes) 2631 { 2632 return (bytes - 1) / 8; 2633 } 2634 2635 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2636 { 2637 int index; 2638 2639 if (size <= 192) { 2640 if (!size) 2641 return ZERO_SIZE_PTR; 2642 2643 index = size_index[size_index_elem(size)]; 2644 } else 2645 index = fls(size - 1); 2646 2647 #ifdef CONFIG_ZONE_DMA 2648 if (unlikely((flags & SLUB_DMA))) 2649 return kmalloc_dma_caches[index]; 2650 2651 #endif 2652 return kmalloc_caches[index]; 2653 } 2654 2655 void *__kmalloc(size_t size, gfp_t flags) 2656 { 2657 struct kmem_cache *s; 2658 void *ret; 2659 2660 if (unlikely(size > SLUB_MAX_SIZE)) 2661 return kmalloc_large(size, flags); 2662 2663 s = get_slab(size, flags); 2664 2665 if (unlikely(ZERO_OR_NULL_PTR(s))) 2666 return s; 2667 2668 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 2669 2670 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 2671 2672 return ret; 2673 } 2674 EXPORT_SYMBOL(__kmalloc); 2675 2676 #ifdef CONFIG_NUMA 2677 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2678 { 2679 struct page *page; 2680 void *ptr = NULL; 2681 2682 flags |= __GFP_COMP | __GFP_NOTRACK; 2683 page = alloc_pages_node(node, flags, get_order(size)); 2684 if (page) 2685 ptr = page_address(page); 2686 2687 kmemleak_alloc(ptr, size, 1, flags); 2688 return ptr; 2689 } 2690 2691 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2692 { 2693 struct kmem_cache *s; 2694 void *ret; 2695 2696 if (unlikely(size > SLUB_MAX_SIZE)) { 2697 ret = kmalloc_large_node(size, flags, node); 2698 2699 trace_kmalloc_node(_RET_IP_, ret, 2700 size, PAGE_SIZE << get_order(size), 2701 flags, node); 2702 2703 return ret; 2704 } 2705 2706 s = get_slab(size, flags); 2707 2708 if (unlikely(ZERO_OR_NULL_PTR(s))) 2709 return s; 2710 2711 ret = slab_alloc(s, flags, node, _RET_IP_); 2712 2713 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 2714 2715 return ret; 2716 } 2717 EXPORT_SYMBOL(__kmalloc_node); 2718 #endif 2719 2720 size_t ksize(const void *object) 2721 { 2722 struct page *page; 2723 struct kmem_cache *s; 2724 2725 if (unlikely(object == ZERO_SIZE_PTR)) 2726 return 0; 2727 2728 page = virt_to_head_page(object); 2729 2730 if (unlikely(!PageSlab(page))) { 2731 WARN_ON(!PageCompound(page)); 2732 return PAGE_SIZE << compound_order(page); 2733 } 2734 s = page->slab; 2735 2736 #ifdef CONFIG_SLUB_DEBUG 2737 /* 2738 * Debugging requires use of the padding between object 2739 * and whatever may come after it. 2740 */ 2741 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2742 return s->objsize; 2743 2744 #endif 2745 /* 2746 * If we have the need to store the freelist pointer 2747 * back there or track user information then we can 2748 * only use the space before that information. 2749 */ 2750 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2751 return s->inuse; 2752 /* 2753 * Else we can use all the padding etc for the allocation 2754 */ 2755 return s->size; 2756 } 2757 EXPORT_SYMBOL(ksize); 2758 2759 void kfree(const void *x) 2760 { 2761 struct page *page; 2762 void *object = (void *)x; 2763 2764 trace_kfree(_RET_IP_, x); 2765 2766 if (unlikely(ZERO_OR_NULL_PTR(x))) 2767 return; 2768 2769 page = virt_to_head_page(x); 2770 if (unlikely(!PageSlab(page))) { 2771 BUG_ON(!PageCompound(page)); 2772 kmemleak_free(x); 2773 put_page(page); 2774 return; 2775 } 2776 slab_free(page->slab, page, object, _RET_IP_); 2777 } 2778 EXPORT_SYMBOL(kfree); 2779 2780 /* 2781 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2782 * the remaining slabs by the number of items in use. The slabs with the 2783 * most items in use come first. New allocations will then fill those up 2784 * and thus they can be removed from the partial lists. 2785 * 2786 * The slabs with the least items are placed last. This results in them 2787 * being allocated from last increasing the chance that the last objects 2788 * are freed in them. 2789 */ 2790 int kmem_cache_shrink(struct kmem_cache *s) 2791 { 2792 int node; 2793 int i; 2794 struct kmem_cache_node *n; 2795 struct page *page; 2796 struct page *t; 2797 int objects = oo_objects(s->max); 2798 struct list_head *slabs_by_inuse = 2799 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2800 unsigned long flags; 2801 2802 if (!slabs_by_inuse) 2803 return -ENOMEM; 2804 2805 flush_all(s); 2806 for_each_node_state(node, N_NORMAL_MEMORY) { 2807 n = get_node(s, node); 2808 2809 if (!n->nr_partial) 2810 continue; 2811 2812 for (i = 0; i < objects; i++) 2813 INIT_LIST_HEAD(slabs_by_inuse + i); 2814 2815 spin_lock_irqsave(&n->list_lock, flags); 2816 2817 /* 2818 * Build lists indexed by the items in use in each slab. 2819 * 2820 * Note that concurrent frees may occur while we hold the 2821 * list_lock. page->inuse here is the upper limit. 2822 */ 2823 list_for_each_entry_safe(page, t, &n->partial, lru) { 2824 if (!page->inuse && slab_trylock(page)) { 2825 /* 2826 * Must hold slab lock here because slab_free 2827 * may have freed the last object and be 2828 * waiting to release the slab. 2829 */ 2830 __remove_partial(n, page); 2831 slab_unlock(page); 2832 discard_slab(s, page); 2833 } else { 2834 list_move(&page->lru, 2835 slabs_by_inuse + page->inuse); 2836 } 2837 } 2838 2839 /* 2840 * Rebuild the partial list with the slabs filled up most 2841 * first and the least used slabs at the end. 2842 */ 2843 for (i = objects - 1; i >= 0; i--) 2844 list_splice(slabs_by_inuse + i, n->partial.prev); 2845 2846 spin_unlock_irqrestore(&n->list_lock, flags); 2847 } 2848 2849 kfree(slabs_by_inuse); 2850 return 0; 2851 } 2852 EXPORT_SYMBOL(kmem_cache_shrink); 2853 2854 #if defined(CONFIG_MEMORY_HOTPLUG) 2855 static int slab_mem_going_offline_callback(void *arg) 2856 { 2857 struct kmem_cache *s; 2858 2859 down_read(&slub_lock); 2860 list_for_each_entry(s, &slab_caches, list) 2861 kmem_cache_shrink(s); 2862 up_read(&slub_lock); 2863 2864 return 0; 2865 } 2866 2867 static void slab_mem_offline_callback(void *arg) 2868 { 2869 struct kmem_cache_node *n; 2870 struct kmem_cache *s; 2871 struct memory_notify *marg = arg; 2872 int offline_node; 2873 2874 offline_node = marg->status_change_nid; 2875 2876 /* 2877 * If the node still has available memory. we need kmem_cache_node 2878 * for it yet. 2879 */ 2880 if (offline_node < 0) 2881 return; 2882 2883 down_read(&slub_lock); 2884 list_for_each_entry(s, &slab_caches, list) { 2885 n = get_node(s, offline_node); 2886 if (n) { 2887 /* 2888 * if n->nr_slabs > 0, slabs still exist on the node 2889 * that is going down. We were unable to free them, 2890 * and offline_pages() function shouldn't call this 2891 * callback. So, we must fail. 2892 */ 2893 BUG_ON(slabs_node(s, offline_node)); 2894 2895 s->node[offline_node] = NULL; 2896 kmem_cache_free(kmem_cache_node, n); 2897 } 2898 } 2899 up_read(&slub_lock); 2900 } 2901 2902 static int slab_mem_going_online_callback(void *arg) 2903 { 2904 struct kmem_cache_node *n; 2905 struct kmem_cache *s; 2906 struct memory_notify *marg = arg; 2907 int nid = marg->status_change_nid; 2908 int ret = 0; 2909 2910 /* 2911 * If the node's memory is already available, then kmem_cache_node is 2912 * already created. Nothing to do. 2913 */ 2914 if (nid < 0) 2915 return 0; 2916 2917 /* 2918 * We are bringing a node online. No memory is available yet. We must 2919 * allocate a kmem_cache_node structure in order to bring the node 2920 * online. 2921 */ 2922 down_read(&slub_lock); 2923 list_for_each_entry(s, &slab_caches, list) { 2924 /* 2925 * XXX: kmem_cache_alloc_node will fallback to other nodes 2926 * since memory is not yet available from the node that 2927 * is brought up. 2928 */ 2929 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 2930 if (!n) { 2931 ret = -ENOMEM; 2932 goto out; 2933 } 2934 init_kmem_cache_node(n, s); 2935 s->node[nid] = n; 2936 } 2937 out: 2938 up_read(&slub_lock); 2939 return ret; 2940 } 2941 2942 static int slab_memory_callback(struct notifier_block *self, 2943 unsigned long action, void *arg) 2944 { 2945 int ret = 0; 2946 2947 switch (action) { 2948 case MEM_GOING_ONLINE: 2949 ret = slab_mem_going_online_callback(arg); 2950 break; 2951 case MEM_GOING_OFFLINE: 2952 ret = slab_mem_going_offline_callback(arg); 2953 break; 2954 case MEM_OFFLINE: 2955 case MEM_CANCEL_ONLINE: 2956 slab_mem_offline_callback(arg); 2957 break; 2958 case MEM_ONLINE: 2959 case MEM_CANCEL_OFFLINE: 2960 break; 2961 } 2962 if (ret) 2963 ret = notifier_from_errno(ret); 2964 else 2965 ret = NOTIFY_OK; 2966 return ret; 2967 } 2968 2969 #endif /* CONFIG_MEMORY_HOTPLUG */ 2970 2971 /******************************************************************** 2972 * Basic setup of slabs 2973 *******************************************************************/ 2974 2975 /* 2976 * Used for early kmem_cache structures that were allocated using 2977 * the page allocator 2978 */ 2979 2980 static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) 2981 { 2982 int node; 2983 2984 list_add(&s->list, &slab_caches); 2985 s->refcount = -1; 2986 2987 for_each_node_state(node, N_NORMAL_MEMORY) { 2988 struct kmem_cache_node *n = get_node(s, node); 2989 struct page *p; 2990 2991 if (n) { 2992 list_for_each_entry(p, &n->partial, lru) 2993 p->slab = s; 2994 2995 #ifdef CONFIG_SLAB_DEBUG 2996 list_for_each_entry(p, &n->full, lru) 2997 p->slab = s; 2998 #endif 2999 } 3000 } 3001 } 3002 3003 void __init kmem_cache_init(void) 3004 { 3005 int i; 3006 int caches = 0; 3007 struct kmem_cache *temp_kmem_cache; 3008 int order; 3009 struct kmem_cache *temp_kmem_cache_node; 3010 unsigned long kmalloc_size; 3011 3012 kmem_size = offsetof(struct kmem_cache, node) + 3013 nr_node_ids * sizeof(struct kmem_cache_node *); 3014 3015 /* Allocate two kmem_caches from the page allocator */ 3016 kmalloc_size = ALIGN(kmem_size, cache_line_size()); 3017 order = get_order(2 * kmalloc_size); 3018 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); 3019 3020 /* 3021 * Must first have the slab cache available for the allocations of the 3022 * struct kmem_cache_node's. There is special bootstrap code in 3023 * kmem_cache_open for slab_state == DOWN. 3024 */ 3025 kmem_cache_node = (void *)kmem_cache + kmalloc_size; 3026 3027 kmem_cache_open(kmem_cache_node, "kmem_cache_node", 3028 sizeof(struct kmem_cache_node), 3029 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3030 3031 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 3032 3033 /* Able to allocate the per node structures */ 3034 slab_state = PARTIAL; 3035 3036 temp_kmem_cache = kmem_cache; 3037 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 3038 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3039 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3040 memcpy(kmem_cache, temp_kmem_cache, kmem_size); 3041 3042 /* 3043 * Allocate kmem_cache_node properly from the kmem_cache slab. 3044 * kmem_cache_node is separately allocated so no need to 3045 * update any list pointers. 3046 */ 3047 temp_kmem_cache_node = kmem_cache_node; 3048 3049 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); 3050 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); 3051 3052 kmem_cache_bootstrap_fixup(kmem_cache_node); 3053 3054 caches++; 3055 kmem_cache_bootstrap_fixup(kmem_cache); 3056 caches++; 3057 /* Free temporary boot structure */ 3058 free_pages((unsigned long)temp_kmem_cache, order); 3059 3060 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 3061 3062 /* 3063 * Patch up the size_index table if we have strange large alignment 3064 * requirements for the kmalloc array. This is only the case for 3065 * MIPS it seems. The standard arches will not generate any code here. 3066 * 3067 * Largest permitted alignment is 256 bytes due to the way we 3068 * handle the index determination for the smaller caches. 3069 * 3070 * Make sure that nothing crazy happens if someone starts tinkering 3071 * around with ARCH_KMALLOC_MINALIGN 3072 */ 3073 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3074 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3075 3076 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 3077 int elem = size_index_elem(i); 3078 if (elem >= ARRAY_SIZE(size_index)) 3079 break; 3080 size_index[elem] = KMALLOC_SHIFT_LOW; 3081 } 3082 3083 if (KMALLOC_MIN_SIZE == 64) { 3084 /* 3085 * The 96 byte size cache is not used if the alignment 3086 * is 64 byte. 3087 */ 3088 for (i = 64 + 8; i <= 96; i += 8) 3089 size_index[size_index_elem(i)] = 7; 3090 } else if (KMALLOC_MIN_SIZE == 128) { 3091 /* 3092 * The 192 byte sized cache is not used if the alignment 3093 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3094 * instead. 3095 */ 3096 for (i = 128 + 8; i <= 192; i += 8) 3097 size_index[size_index_elem(i)] = 8; 3098 } 3099 3100 /* Caches that are not of the two-to-the-power-of size */ 3101 if (KMALLOC_MIN_SIZE <= 32) { 3102 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); 3103 caches++; 3104 } 3105 3106 if (KMALLOC_MIN_SIZE <= 64) { 3107 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); 3108 caches++; 3109 } 3110 3111 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3112 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); 3113 caches++; 3114 } 3115 3116 slab_state = UP; 3117 3118 /* Provide the correct kmalloc names now that the caches are up */ 3119 if (KMALLOC_MIN_SIZE <= 32) { 3120 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); 3121 BUG_ON(!kmalloc_caches[1]->name); 3122 } 3123 3124 if (KMALLOC_MIN_SIZE <= 64) { 3125 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); 3126 BUG_ON(!kmalloc_caches[2]->name); 3127 } 3128 3129 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { 3130 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); 3131 3132 BUG_ON(!s); 3133 kmalloc_caches[i]->name = s; 3134 } 3135 3136 #ifdef CONFIG_SMP 3137 register_cpu_notifier(&slab_notifier); 3138 #endif 3139 3140 #ifdef CONFIG_ZONE_DMA 3141 for (i = 0; i < SLUB_PAGE_SHIFT; i++) { 3142 struct kmem_cache *s = kmalloc_caches[i]; 3143 3144 if (s && s->size) { 3145 char *name = kasprintf(GFP_NOWAIT, 3146 "dma-kmalloc-%d", s->objsize); 3147 3148 BUG_ON(!name); 3149 kmalloc_dma_caches[i] = create_kmalloc_cache(name, 3150 s->objsize, SLAB_CACHE_DMA); 3151 } 3152 } 3153 #endif 3154 printk(KERN_INFO 3155 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3156 " CPUs=%d, Nodes=%d\n", 3157 caches, cache_line_size(), 3158 slub_min_order, slub_max_order, slub_min_objects, 3159 nr_cpu_ids, nr_node_ids); 3160 } 3161 3162 void __init kmem_cache_init_late(void) 3163 { 3164 } 3165 3166 /* 3167 * Find a mergeable slab cache 3168 */ 3169 static int slab_unmergeable(struct kmem_cache *s) 3170 { 3171 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3172 return 1; 3173 3174 if (s->ctor) 3175 return 1; 3176 3177 /* 3178 * We may have set a slab to be unmergeable during bootstrap. 3179 */ 3180 if (s->refcount < 0) 3181 return 1; 3182 3183 return 0; 3184 } 3185 3186 static struct kmem_cache *find_mergeable(size_t size, 3187 size_t align, unsigned long flags, const char *name, 3188 void (*ctor)(void *)) 3189 { 3190 struct kmem_cache *s; 3191 3192 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3193 return NULL; 3194 3195 if (ctor) 3196 return NULL; 3197 3198 size = ALIGN(size, sizeof(void *)); 3199 align = calculate_alignment(flags, align, size); 3200 size = ALIGN(size, align); 3201 flags = kmem_cache_flags(size, flags, name, NULL); 3202 3203 list_for_each_entry(s, &slab_caches, list) { 3204 if (slab_unmergeable(s)) 3205 continue; 3206 3207 if (size > s->size) 3208 continue; 3209 3210 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3211 continue; 3212 /* 3213 * Check if alignment is compatible. 3214 * Courtesy of Adrian Drzewiecki 3215 */ 3216 if ((s->size & ~(align - 1)) != s->size) 3217 continue; 3218 3219 if (s->size - size >= sizeof(void *)) 3220 continue; 3221 3222 return s; 3223 } 3224 return NULL; 3225 } 3226 3227 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3228 size_t align, unsigned long flags, void (*ctor)(void *)) 3229 { 3230 struct kmem_cache *s; 3231 char *n; 3232 3233 if (WARN_ON(!name)) 3234 return NULL; 3235 3236 down_write(&slub_lock); 3237 s = find_mergeable(size, align, flags, name, ctor); 3238 if (s) { 3239 s->refcount++; 3240 /* 3241 * Adjust the object sizes so that we clear 3242 * the complete object on kzalloc. 3243 */ 3244 s->objsize = max(s->objsize, (int)size); 3245 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3246 3247 if (sysfs_slab_alias(s, name)) { 3248 s->refcount--; 3249 goto err; 3250 } 3251 up_write(&slub_lock); 3252 return s; 3253 } 3254 3255 n = kstrdup(name, GFP_KERNEL); 3256 if (!n) 3257 goto err; 3258 3259 s = kmalloc(kmem_size, GFP_KERNEL); 3260 if (s) { 3261 if (kmem_cache_open(s, n, 3262 size, align, flags, ctor)) { 3263 list_add(&s->list, &slab_caches); 3264 if (sysfs_slab_add(s)) { 3265 list_del(&s->list); 3266 kfree(n); 3267 kfree(s); 3268 goto err; 3269 } 3270 up_write(&slub_lock); 3271 return s; 3272 } 3273 kfree(n); 3274 kfree(s); 3275 } 3276 err: 3277 up_write(&slub_lock); 3278 3279 if (flags & SLAB_PANIC) 3280 panic("Cannot create slabcache %s\n", name); 3281 else 3282 s = NULL; 3283 return s; 3284 } 3285 EXPORT_SYMBOL(kmem_cache_create); 3286 3287 #ifdef CONFIG_SMP 3288 /* 3289 * Use the cpu notifier to insure that the cpu slabs are flushed when 3290 * necessary. 3291 */ 3292 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3293 unsigned long action, void *hcpu) 3294 { 3295 long cpu = (long)hcpu; 3296 struct kmem_cache *s; 3297 unsigned long flags; 3298 3299 switch (action) { 3300 case CPU_UP_CANCELED: 3301 case CPU_UP_CANCELED_FROZEN: 3302 case CPU_DEAD: 3303 case CPU_DEAD_FROZEN: 3304 down_read(&slub_lock); 3305 list_for_each_entry(s, &slab_caches, list) { 3306 local_irq_save(flags); 3307 __flush_cpu_slab(s, cpu); 3308 local_irq_restore(flags); 3309 } 3310 up_read(&slub_lock); 3311 break; 3312 default: 3313 break; 3314 } 3315 return NOTIFY_OK; 3316 } 3317 3318 static struct notifier_block __cpuinitdata slab_notifier = { 3319 .notifier_call = slab_cpuup_callback 3320 }; 3321 3322 #endif 3323 3324 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 3325 { 3326 struct kmem_cache *s; 3327 void *ret; 3328 3329 if (unlikely(size > SLUB_MAX_SIZE)) 3330 return kmalloc_large(size, gfpflags); 3331 3332 s = get_slab(size, gfpflags); 3333 3334 if (unlikely(ZERO_OR_NULL_PTR(s))) 3335 return s; 3336 3337 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 3338 3339 /* Honor the call site pointer we recieved. */ 3340 trace_kmalloc(caller, ret, size, s->size, gfpflags); 3341 3342 return ret; 3343 } 3344 3345 #ifdef CONFIG_NUMA 3346 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3347 int node, unsigned long caller) 3348 { 3349 struct kmem_cache *s; 3350 void *ret; 3351 3352 if (unlikely(size > SLUB_MAX_SIZE)) { 3353 ret = kmalloc_large_node(size, gfpflags, node); 3354 3355 trace_kmalloc_node(caller, ret, 3356 size, PAGE_SIZE << get_order(size), 3357 gfpflags, node); 3358 3359 return ret; 3360 } 3361 3362 s = get_slab(size, gfpflags); 3363 3364 if (unlikely(ZERO_OR_NULL_PTR(s))) 3365 return s; 3366 3367 ret = slab_alloc(s, gfpflags, node, caller); 3368 3369 /* Honor the call site pointer we recieved. */ 3370 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 3371 3372 return ret; 3373 } 3374 #endif 3375 3376 #ifdef CONFIG_SYSFS 3377 static int count_inuse(struct page *page) 3378 { 3379 return page->inuse; 3380 } 3381 3382 static int count_total(struct page *page) 3383 { 3384 return page->objects; 3385 } 3386 #endif 3387 3388 #ifdef CONFIG_SLUB_DEBUG 3389 static int validate_slab(struct kmem_cache *s, struct page *page, 3390 unsigned long *map) 3391 { 3392 void *p; 3393 void *addr = page_address(page); 3394 3395 if (!check_slab(s, page) || 3396 !on_freelist(s, page, NULL)) 3397 return 0; 3398 3399 /* Now we know that a valid freelist exists */ 3400 bitmap_zero(map, page->objects); 3401 3402 for_each_free_object(p, s, page->freelist) { 3403 set_bit(slab_index(p, s, addr), map); 3404 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) 3405 return 0; 3406 } 3407 3408 for_each_object(p, s, addr, page->objects) 3409 if (!test_bit(slab_index(p, s, addr), map)) 3410 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) 3411 return 0; 3412 return 1; 3413 } 3414 3415 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3416 unsigned long *map) 3417 { 3418 if (slab_trylock(page)) { 3419 validate_slab(s, page, map); 3420 slab_unlock(page); 3421 } else 3422 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3423 s->name, page); 3424 } 3425 3426 static int validate_slab_node(struct kmem_cache *s, 3427 struct kmem_cache_node *n, unsigned long *map) 3428 { 3429 unsigned long count = 0; 3430 struct page *page; 3431 unsigned long flags; 3432 3433 spin_lock_irqsave(&n->list_lock, flags); 3434 3435 list_for_each_entry(page, &n->partial, lru) { 3436 validate_slab_slab(s, page, map); 3437 count++; 3438 } 3439 if (count != n->nr_partial) 3440 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3441 "counter=%ld\n", s->name, count, n->nr_partial); 3442 3443 if (!(s->flags & SLAB_STORE_USER)) 3444 goto out; 3445 3446 list_for_each_entry(page, &n->full, lru) { 3447 validate_slab_slab(s, page, map); 3448 count++; 3449 } 3450 if (count != atomic_long_read(&n->nr_slabs)) 3451 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3452 "counter=%ld\n", s->name, count, 3453 atomic_long_read(&n->nr_slabs)); 3454 3455 out: 3456 spin_unlock_irqrestore(&n->list_lock, flags); 3457 return count; 3458 } 3459 3460 static long validate_slab_cache(struct kmem_cache *s) 3461 { 3462 int node; 3463 unsigned long count = 0; 3464 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3465 sizeof(unsigned long), GFP_KERNEL); 3466 3467 if (!map) 3468 return -ENOMEM; 3469 3470 flush_all(s); 3471 for_each_node_state(node, N_NORMAL_MEMORY) { 3472 struct kmem_cache_node *n = get_node(s, node); 3473 3474 count += validate_slab_node(s, n, map); 3475 } 3476 kfree(map); 3477 return count; 3478 } 3479 /* 3480 * Generate lists of code addresses where slabcache objects are allocated 3481 * and freed. 3482 */ 3483 3484 struct location { 3485 unsigned long count; 3486 unsigned long addr; 3487 long long sum_time; 3488 long min_time; 3489 long max_time; 3490 long min_pid; 3491 long max_pid; 3492 DECLARE_BITMAP(cpus, NR_CPUS); 3493 nodemask_t nodes; 3494 }; 3495 3496 struct loc_track { 3497 unsigned long max; 3498 unsigned long count; 3499 struct location *loc; 3500 }; 3501 3502 static void free_loc_track(struct loc_track *t) 3503 { 3504 if (t->max) 3505 free_pages((unsigned long)t->loc, 3506 get_order(sizeof(struct location) * t->max)); 3507 } 3508 3509 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3510 { 3511 struct location *l; 3512 int order; 3513 3514 order = get_order(sizeof(struct location) * max); 3515 3516 l = (void *)__get_free_pages(flags, order); 3517 if (!l) 3518 return 0; 3519 3520 if (t->count) { 3521 memcpy(l, t->loc, sizeof(struct location) * t->count); 3522 free_loc_track(t); 3523 } 3524 t->max = max; 3525 t->loc = l; 3526 return 1; 3527 } 3528 3529 static int add_location(struct loc_track *t, struct kmem_cache *s, 3530 const struct track *track) 3531 { 3532 long start, end, pos; 3533 struct location *l; 3534 unsigned long caddr; 3535 unsigned long age = jiffies - track->when; 3536 3537 start = -1; 3538 end = t->count; 3539 3540 for ( ; ; ) { 3541 pos = start + (end - start + 1) / 2; 3542 3543 /* 3544 * There is nothing at "end". If we end up there 3545 * we need to add something to before end. 3546 */ 3547 if (pos == end) 3548 break; 3549 3550 caddr = t->loc[pos].addr; 3551 if (track->addr == caddr) { 3552 3553 l = &t->loc[pos]; 3554 l->count++; 3555 if (track->when) { 3556 l->sum_time += age; 3557 if (age < l->min_time) 3558 l->min_time = age; 3559 if (age > l->max_time) 3560 l->max_time = age; 3561 3562 if (track->pid < l->min_pid) 3563 l->min_pid = track->pid; 3564 if (track->pid > l->max_pid) 3565 l->max_pid = track->pid; 3566 3567 cpumask_set_cpu(track->cpu, 3568 to_cpumask(l->cpus)); 3569 } 3570 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3571 return 1; 3572 } 3573 3574 if (track->addr < caddr) 3575 end = pos; 3576 else 3577 start = pos; 3578 } 3579 3580 /* 3581 * Not found. Insert new tracking element. 3582 */ 3583 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3584 return 0; 3585 3586 l = t->loc + pos; 3587 if (pos < t->count) 3588 memmove(l + 1, l, 3589 (t->count - pos) * sizeof(struct location)); 3590 t->count++; 3591 l->count = 1; 3592 l->addr = track->addr; 3593 l->sum_time = age; 3594 l->min_time = age; 3595 l->max_time = age; 3596 l->min_pid = track->pid; 3597 l->max_pid = track->pid; 3598 cpumask_clear(to_cpumask(l->cpus)); 3599 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 3600 nodes_clear(l->nodes); 3601 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3602 return 1; 3603 } 3604 3605 static void process_slab(struct loc_track *t, struct kmem_cache *s, 3606 struct page *page, enum track_item alloc, 3607 unsigned long *map) 3608 { 3609 void *addr = page_address(page); 3610 void *p; 3611 3612 bitmap_zero(map, page->objects); 3613 for_each_free_object(p, s, page->freelist) 3614 set_bit(slab_index(p, s, addr), map); 3615 3616 for_each_object(p, s, addr, page->objects) 3617 if (!test_bit(slab_index(p, s, addr), map)) 3618 add_location(t, s, get_track(s, p, alloc)); 3619 } 3620 3621 static int list_locations(struct kmem_cache *s, char *buf, 3622 enum track_item alloc) 3623 { 3624 int len = 0; 3625 unsigned long i; 3626 struct loc_track t = { 0, 0, NULL }; 3627 int node; 3628 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3629 sizeof(unsigned long), GFP_KERNEL); 3630 3631 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3632 GFP_TEMPORARY)) { 3633 kfree(map); 3634 return sprintf(buf, "Out of memory\n"); 3635 } 3636 /* Push back cpu slabs */ 3637 flush_all(s); 3638 3639 for_each_node_state(node, N_NORMAL_MEMORY) { 3640 struct kmem_cache_node *n = get_node(s, node); 3641 unsigned long flags; 3642 struct page *page; 3643 3644 if (!atomic_long_read(&n->nr_slabs)) 3645 continue; 3646 3647 spin_lock_irqsave(&n->list_lock, flags); 3648 list_for_each_entry(page, &n->partial, lru) 3649 process_slab(&t, s, page, alloc, map); 3650 list_for_each_entry(page, &n->full, lru) 3651 process_slab(&t, s, page, alloc, map); 3652 spin_unlock_irqrestore(&n->list_lock, flags); 3653 } 3654 3655 for (i = 0; i < t.count; i++) { 3656 struct location *l = &t.loc[i]; 3657 3658 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) 3659 break; 3660 len += sprintf(buf + len, "%7ld ", l->count); 3661 3662 if (l->addr) 3663 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3664 else 3665 len += sprintf(buf + len, "<not-available>"); 3666 3667 if (l->sum_time != l->min_time) { 3668 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3669 l->min_time, 3670 (long)div_u64(l->sum_time, l->count), 3671 l->max_time); 3672 } else 3673 len += sprintf(buf + len, " age=%ld", 3674 l->min_time); 3675 3676 if (l->min_pid != l->max_pid) 3677 len += sprintf(buf + len, " pid=%ld-%ld", 3678 l->min_pid, l->max_pid); 3679 else 3680 len += sprintf(buf + len, " pid=%ld", 3681 l->min_pid); 3682 3683 if (num_online_cpus() > 1 && 3684 !cpumask_empty(to_cpumask(l->cpus)) && 3685 len < PAGE_SIZE - 60) { 3686 len += sprintf(buf + len, " cpus="); 3687 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3688 to_cpumask(l->cpus)); 3689 } 3690 3691 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 3692 len < PAGE_SIZE - 60) { 3693 len += sprintf(buf + len, " nodes="); 3694 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3695 l->nodes); 3696 } 3697 3698 len += sprintf(buf + len, "\n"); 3699 } 3700 3701 free_loc_track(&t); 3702 kfree(map); 3703 if (!t.count) 3704 len += sprintf(buf, "No data\n"); 3705 return len; 3706 } 3707 #endif 3708 3709 #ifdef SLUB_RESILIENCY_TEST 3710 static void resiliency_test(void) 3711 { 3712 u8 *p; 3713 3714 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); 3715 3716 printk(KERN_ERR "SLUB resiliency testing\n"); 3717 printk(KERN_ERR "-----------------------\n"); 3718 printk(KERN_ERR "A. Corruption after allocation\n"); 3719 3720 p = kzalloc(16, GFP_KERNEL); 3721 p[16] = 0x12; 3722 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3723 " 0x12->0x%p\n\n", p + 16); 3724 3725 validate_slab_cache(kmalloc_caches[4]); 3726 3727 /* Hmmm... The next two are dangerous */ 3728 p = kzalloc(32, GFP_KERNEL); 3729 p[32 + sizeof(void *)] = 0x34; 3730 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3731 " 0x34 -> -0x%p\n", p); 3732 printk(KERN_ERR 3733 "If allocated object is overwritten then not detectable\n\n"); 3734 3735 validate_slab_cache(kmalloc_caches[5]); 3736 p = kzalloc(64, GFP_KERNEL); 3737 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3738 *p = 0x56; 3739 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3740 p); 3741 printk(KERN_ERR 3742 "If allocated object is overwritten then not detectable\n\n"); 3743 validate_slab_cache(kmalloc_caches[6]); 3744 3745 printk(KERN_ERR "\nB. Corruption after free\n"); 3746 p = kzalloc(128, GFP_KERNEL); 3747 kfree(p); 3748 *p = 0x78; 3749 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3750 validate_slab_cache(kmalloc_caches[7]); 3751 3752 p = kzalloc(256, GFP_KERNEL); 3753 kfree(p); 3754 p[50] = 0x9a; 3755 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3756 p); 3757 validate_slab_cache(kmalloc_caches[8]); 3758 3759 p = kzalloc(512, GFP_KERNEL); 3760 kfree(p); 3761 p[512] = 0xab; 3762 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3763 validate_slab_cache(kmalloc_caches[9]); 3764 } 3765 #else 3766 #ifdef CONFIG_SYSFS 3767 static void resiliency_test(void) {}; 3768 #endif 3769 #endif 3770 3771 #ifdef CONFIG_SYSFS 3772 enum slab_stat_type { 3773 SL_ALL, /* All slabs */ 3774 SL_PARTIAL, /* Only partially allocated slabs */ 3775 SL_CPU, /* Only slabs used for cpu caches */ 3776 SL_OBJECTS, /* Determine allocated objects not slabs */ 3777 SL_TOTAL /* Determine object capacity not slabs */ 3778 }; 3779 3780 #define SO_ALL (1 << SL_ALL) 3781 #define SO_PARTIAL (1 << SL_PARTIAL) 3782 #define SO_CPU (1 << SL_CPU) 3783 #define SO_OBJECTS (1 << SL_OBJECTS) 3784 #define SO_TOTAL (1 << SL_TOTAL) 3785 3786 static ssize_t show_slab_objects(struct kmem_cache *s, 3787 char *buf, unsigned long flags) 3788 { 3789 unsigned long total = 0; 3790 int node; 3791 int x; 3792 unsigned long *nodes; 3793 unsigned long *per_cpu; 3794 3795 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3796 if (!nodes) 3797 return -ENOMEM; 3798 per_cpu = nodes + nr_node_ids; 3799 3800 if (flags & SO_CPU) { 3801 int cpu; 3802 3803 for_each_possible_cpu(cpu) { 3804 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3805 3806 if (!c || c->node < 0) 3807 continue; 3808 3809 if (c->page) { 3810 if (flags & SO_TOTAL) 3811 x = c->page->objects; 3812 else if (flags & SO_OBJECTS) 3813 x = c->page->inuse; 3814 else 3815 x = 1; 3816 3817 total += x; 3818 nodes[c->node] += x; 3819 } 3820 per_cpu[c->node]++; 3821 } 3822 } 3823 3824 down_read(&slub_lock); 3825 #ifdef CONFIG_SLUB_DEBUG 3826 if (flags & SO_ALL) { 3827 for_each_node_state(node, N_NORMAL_MEMORY) { 3828 struct kmem_cache_node *n = get_node(s, node); 3829 3830 if (flags & SO_TOTAL) 3831 x = atomic_long_read(&n->total_objects); 3832 else if (flags & SO_OBJECTS) 3833 x = atomic_long_read(&n->total_objects) - 3834 count_partial(n, count_free); 3835 3836 else 3837 x = atomic_long_read(&n->nr_slabs); 3838 total += x; 3839 nodes[node] += x; 3840 } 3841 3842 } else 3843 #endif 3844 if (flags & SO_PARTIAL) { 3845 for_each_node_state(node, N_NORMAL_MEMORY) { 3846 struct kmem_cache_node *n = get_node(s, node); 3847 3848 if (flags & SO_TOTAL) 3849 x = count_partial(n, count_total); 3850 else if (flags & SO_OBJECTS) 3851 x = count_partial(n, count_inuse); 3852 else 3853 x = n->nr_partial; 3854 total += x; 3855 nodes[node] += x; 3856 } 3857 } 3858 x = sprintf(buf, "%lu", total); 3859 #ifdef CONFIG_NUMA 3860 for_each_node_state(node, N_NORMAL_MEMORY) 3861 if (nodes[node]) 3862 x += sprintf(buf + x, " N%d=%lu", 3863 node, nodes[node]); 3864 #endif 3865 up_read(&slub_lock); 3866 kfree(nodes); 3867 return x + sprintf(buf + x, "\n"); 3868 } 3869 3870 #ifdef CONFIG_SLUB_DEBUG 3871 static int any_slab_objects(struct kmem_cache *s) 3872 { 3873 int node; 3874 3875 for_each_online_node(node) { 3876 struct kmem_cache_node *n = get_node(s, node); 3877 3878 if (!n) 3879 continue; 3880 3881 if (atomic_long_read(&n->total_objects)) 3882 return 1; 3883 } 3884 return 0; 3885 } 3886 #endif 3887 3888 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3889 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 3890 3891 struct slab_attribute { 3892 struct attribute attr; 3893 ssize_t (*show)(struct kmem_cache *s, char *buf); 3894 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3895 }; 3896 3897 #define SLAB_ATTR_RO(_name) \ 3898 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3899 3900 #define SLAB_ATTR(_name) \ 3901 static struct slab_attribute _name##_attr = \ 3902 __ATTR(_name, 0644, _name##_show, _name##_store) 3903 3904 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3905 { 3906 return sprintf(buf, "%d\n", s->size); 3907 } 3908 SLAB_ATTR_RO(slab_size); 3909 3910 static ssize_t align_show(struct kmem_cache *s, char *buf) 3911 { 3912 return sprintf(buf, "%d\n", s->align); 3913 } 3914 SLAB_ATTR_RO(align); 3915 3916 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3917 { 3918 return sprintf(buf, "%d\n", s->objsize); 3919 } 3920 SLAB_ATTR_RO(object_size); 3921 3922 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3923 { 3924 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3925 } 3926 SLAB_ATTR_RO(objs_per_slab); 3927 3928 static ssize_t order_store(struct kmem_cache *s, 3929 const char *buf, size_t length) 3930 { 3931 unsigned long order; 3932 int err; 3933 3934 err = strict_strtoul(buf, 10, &order); 3935 if (err) 3936 return err; 3937 3938 if (order > slub_max_order || order < slub_min_order) 3939 return -EINVAL; 3940 3941 calculate_sizes(s, order); 3942 return length; 3943 } 3944 3945 static ssize_t order_show(struct kmem_cache *s, char *buf) 3946 { 3947 return sprintf(buf, "%d\n", oo_order(s->oo)); 3948 } 3949 SLAB_ATTR(order); 3950 3951 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 3952 { 3953 return sprintf(buf, "%lu\n", s->min_partial); 3954 } 3955 3956 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 3957 size_t length) 3958 { 3959 unsigned long min; 3960 int err; 3961 3962 err = strict_strtoul(buf, 10, &min); 3963 if (err) 3964 return err; 3965 3966 set_min_partial(s, min); 3967 return length; 3968 } 3969 SLAB_ATTR(min_partial); 3970 3971 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3972 { 3973 if (s->ctor) { 3974 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3975 3976 return n + sprintf(buf + n, "\n"); 3977 } 3978 return 0; 3979 } 3980 SLAB_ATTR_RO(ctor); 3981 3982 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3983 { 3984 return sprintf(buf, "%d\n", s->refcount - 1); 3985 } 3986 SLAB_ATTR_RO(aliases); 3987 3988 static ssize_t partial_show(struct kmem_cache *s, char *buf) 3989 { 3990 return show_slab_objects(s, buf, SO_PARTIAL); 3991 } 3992 SLAB_ATTR_RO(partial); 3993 3994 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3995 { 3996 return show_slab_objects(s, buf, SO_CPU); 3997 } 3998 SLAB_ATTR_RO(cpu_slabs); 3999 4000 static ssize_t objects_show(struct kmem_cache *s, char *buf) 4001 { 4002 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 4003 } 4004 SLAB_ATTR_RO(objects); 4005 4006 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 4007 { 4008 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 4009 } 4010 SLAB_ATTR_RO(objects_partial); 4011 4012 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4013 { 4014 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 4015 } 4016 4017 static ssize_t reclaim_account_store(struct kmem_cache *s, 4018 const char *buf, size_t length) 4019 { 4020 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 4021 if (buf[0] == '1') 4022 s->flags |= SLAB_RECLAIM_ACCOUNT; 4023 return length; 4024 } 4025 SLAB_ATTR(reclaim_account); 4026 4027 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 4028 { 4029 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 4030 } 4031 SLAB_ATTR_RO(hwcache_align); 4032 4033 #ifdef CONFIG_ZONE_DMA 4034 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 4035 { 4036 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 4037 } 4038 SLAB_ATTR_RO(cache_dma); 4039 #endif 4040 4041 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 4042 { 4043 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 4044 } 4045 SLAB_ATTR_RO(destroy_by_rcu); 4046 4047 #ifdef CONFIG_SLUB_DEBUG 4048 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4049 { 4050 return show_slab_objects(s, buf, SO_ALL); 4051 } 4052 SLAB_ATTR_RO(slabs); 4053 4054 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 4055 { 4056 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 4057 } 4058 SLAB_ATTR_RO(total_objects); 4059 4060 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 4061 { 4062 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 4063 } 4064 4065 static ssize_t sanity_checks_store(struct kmem_cache *s, 4066 const char *buf, size_t length) 4067 { 4068 s->flags &= ~SLAB_DEBUG_FREE; 4069 if (buf[0] == '1') 4070 s->flags |= SLAB_DEBUG_FREE; 4071 return length; 4072 } 4073 SLAB_ATTR(sanity_checks); 4074 4075 static ssize_t trace_show(struct kmem_cache *s, char *buf) 4076 { 4077 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 4078 } 4079 4080 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 4081 size_t length) 4082 { 4083 s->flags &= ~SLAB_TRACE; 4084 if (buf[0] == '1') 4085 s->flags |= SLAB_TRACE; 4086 return length; 4087 } 4088 SLAB_ATTR(trace); 4089 4090 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 4091 { 4092 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 4093 } 4094 4095 static ssize_t red_zone_store(struct kmem_cache *s, 4096 const char *buf, size_t length) 4097 { 4098 if (any_slab_objects(s)) 4099 return -EBUSY; 4100 4101 s->flags &= ~SLAB_RED_ZONE; 4102 if (buf[0] == '1') 4103 s->flags |= SLAB_RED_ZONE; 4104 calculate_sizes(s, -1); 4105 return length; 4106 } 4107 SLAB_ATTR(red_zone); 4108 4109 static ssize_t poison_show(struct kmem_cache *s, char *buf) 4110 { 4111 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 4112 } 4113 4114 static ssize_t poison_store(struct kmem_cache *s, 4115 const char *buf, size_t length) 4116 { 4117 if (any_slab_objects(s)) 4118 return -EBUSY; 4119 4120 s->flags &= ~SLAB_POISON; 4121 if (buf[0] == '1') 4122 s->flags |= SLAB_POISON; 4123 calculate_sizes(s, -1); 4124 return length; 4125 } 4126 SLAB_ATTR(poison); 4127 4128 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4129 { 4130 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4131 } 4132 4133 static ssize_t store_user_store(struct kmem_cache *s, 4134 const char *buf, size_t length) 4135 { 4136 if (any_slab_objects(s)) 4137 return -EBUSY; 4138 4139 s->flags &= ~SLAB_STORE_USER; 4140 if (buf[0] == '1') 4141 s->flags |= SLAB_STORE_USER; 4142 calculate_sizes(s, -1); 4143 return length; 4144 } 4145 SLAB_ATTR(store_user); 4146 4147 static ssize_t validate_show(struct kmem_cache *s, char *buf) 4148 { 4149 return 0; 4150 } 4151 4152 static ssize_t validate_store(struct kmem_cache *s, 4153 const char *buf, size_t length) 4154 { 4155 int ret = -EINVAL; 4156 4157 if (buf[0] == '1') { 4158 ret = validate_slab_cache(s); 4159 if (ret >= 0) 4160 ret = length; 4161 } 4162 return ret; 4163 } 4164 SLAB_ATTR(validate); 4165 4166 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4167 { 4168 if (!(s->flags & SLAB_STORE_USER)) 4169 return -ENOSYS; 4170 return list_locations(s, buf, TRACK_ALLOC); 4171 } 4172 SLAB_ATTR_RO(alloc_calls); 4173 4174 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4175 { 4176 if (!(s->flags & SLAB_STORE_USER)) 4177 return -ENOSYS; 4178 return list_locations(s, buf, TRACK_FREE); 4179 } 4180 SLAB_ATTR_RO(free_calls); 4181 #endif /* CONFIG_SLUB_DEBUG */ 4182 4183 #ifdef CONFIG_FAILSLAB 4184 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 4185 { 4186 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4187 } 4188 4189 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4190 size_t length) 4191 { 4192 s->flags &= ~SLAB_FAILSLAB; 4193 if (buf[0] == '1') 4194 s->flags |= SLAB_FAILSLAB; 4195 return length; 4196 } 4197 SLAB_ATTR(failslab); 4198 #endif 4199 4200 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4201 { 4202 return 0; 4203 } 4204 4205 static ssize_t shrink_store(struct kmem_cache *s, 4206 const char *buf, size_t length) 4207 { 4208 if (buf[0] == '1') { 4209 int rc = kmem_cache_shrink(s); 4210 4211 if (rc) 4212 return rc; 4213 } else 4214 return -EINVAL; 4215 return length; 4216 } 4217 SLAB_ATTR(shrink); 4218 4219 #ifdef CONFIG_NUMA 4220 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4221 { 4222 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4223 } 4224 4225 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4226 const char *buf, size_t length) 4227 { 4228 unsigned long ratio; 4229 int err; 4230 4231 err = strict_strtoul(buf, 10, &ratio); 4232 if (err) 4233 return err; 4234 4235 if (ratio <= 100) 4236 s->remote_node_defrag_ratio = ratio * 10; 4237 4238 return length; 4239 } 4240 SLAB_ATTR(remote_node_defrag_ratio); 4241 #endif 4242 4243 #ifdef CONFIG_SLUB_STATS 4244 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4245 { 4246 unsigned long sum = 0; 4247 int cpu; 4248 int len; 4249 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4250 4251 if (!data) 4252 return -ENOMEM; 4253 4254 for_each_online_cpu(cpu) { 4255 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 4256 4257 data[cpu] = x; 4258 sum += x; 4259 } 4260 4261 len = sprintf(buf, "%lu", sum); 4262 4263 #ifdef CONFIG_SMP 4264 for_each_online_cpu(cpu) { 4265 if (data[cpu] && len < PAGE_SIZE - 20) 4266 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4267 } 4268 #endif 4269 kfree(data); 4270 return len + sprintf(buf + len, "\n"); 4271 } 4272 4273 static void clear_stat(struct kmem_cache *s, enum stat_item si) 4274 { 4275 int cpu; 4276 4277 for_each_online_cpu(cpu) 4278 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 4279 } 4280 4281 #define STAT_ATTR(si, text) \ 4282 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4283 { \ 4284 return show_stat(s, buf, si); \ 4285 } \ 4286 static ssize_t text##_store(struct kmem_cache *s, \ 4287 const char *buf, size_t length) \ 4288 { \ 4289 if (buf[0] != '0') \ 4290 return -EINVAL; \ 4291 clear_stat(s, si); \ 4292 return length; \ 4293 } \ 4294 SLAB_ATTR(text); \ 4295 4296 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4297 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4298 STAT_ATTR(FREE_FASTPATH, free_fastpath); 4299 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4300 STAT_ATTR(FREE_FROZEN, free_frozen); 4301 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4302 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4303 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4304 STAT_ATTR(ALLOC_SLAB, alloc_slab); 4305 STAT_ATTR(ALLOC_REFILL, alloc_refill); 4306 STAT_ATTR(FREE_SLAB, free_slab); 4307 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4308 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4309 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4310 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4311 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4312 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4313 STAT_ATTR(ORDER_FALLBACK, order_fallback); 4314 #endif 4315 4316 static struct attribute *slab_attrs[] = { 4317 &slab_size_attr.attr, 4318 &object_size_attr.attr, 4319 &objs_per_slab_attr.attr, 4320 &order_attr.attr, 4321 &min_partial_attr.attr, 4322 &objects_attr.attr, 4323 &objects_partial_attr.attr, 4324 &partial_attr.attr, 4325 &cpu_slabs_attr.attr, 4326 &ctor_attr.attr, 4327 &aliases_attr.attr, 4328 &align_attr.attr, 4329 &hwcache_align_attr.attr, 4330 &reclaim_account_attr.attr, 4331 &destroy_by_rcu_attr.attr, 4332 &shrink_attr.attr, 4333 #ifdef CONFIG_SLUB_DEBUG 4334 &total_objects_attr.attr, 4335 &slabs_attr.attr, 4336 &sanity_checks_attr.attr, 4337 &trace_attr.attr, 4338 &red_zone_attr.attr, 4339 &poison_attr.attr, 4340 &store_user_attr.attr, 4341 &validate_attr.attr, 4342 &alloc_calls_attr.attr, 4343 &free_calls_attr.attr, 4344 #endif 4345 #ifdef CONFIG_ZONE_DMA 4346 &cache_dma_attr.attr, 4347 #endif 4348 #ifdef CONFIG_NUMA 4349 &remote_node_defrag_ratio_attr.attr, 4350 #endif 4351 #ifdef CONFIG_SLUB_STATS 4352 &alloc_fastpath_attr.attr, 4353 &alloc_slowpath_attr.attr, 4354 &free_fastpath_attr.attr, 4355 &free_slowpath_attr.attr, 4356 &free_frozen_attr.attr, 4357 &free_add_partial_attr.attr, 4358 &free_remove_partial_attr.attr, 4359 &alloc_from_partial_attr.attr, 4360 &alloc_slab_attr.attr, 4361 &alloc_refill_attr.attr, 4362 &free_slab_attr.attr, 4363 &cpuslab_flush_attr.attr, 4364 &deactivate_full_attr.attr, 4365 &deactivate_empty_attr.attr, 4366 &deactivate_to_head_attr.attr, 4367 &deactivate_to_tail_attr.attr, 4368 &deactivate_remote_frees_attr.attr, 4369 &order_fallback_attr.attr, 4370 #endif 4371 #ifdef CONFIG_FAILSLAB 4372 &failslab_attr.attr, 4373 #endif 4374 4375 NULL 4376 }; 4377 4378 static struct attribute_group slab_attr_group = { 4379 .attrs = slab_attrs, 4380 }; 4381 4382 static ssize_t slab_attr_show(struct kobject *kobj, 4383 struct attribute *attr, 4384 char *buf) 4385 { 4386 struct slab_attribute *attribute; 4387 struct kmem_cache *s; 4388 int err; 4389 4390 attribute = to_slab_attr(attr); 4391 s = to_slab(kobj); 4392 4393 if (!attribute->show) 4394 return -EIO; 4395 4396 err = attribute->show(s, buf); 4397 4398 return err; 4399 } 4400 4401 static ssize_t slab_attr_store(struct kobject *kobj, 4402 struct attribute *attr, 4403 const char *buf, size_t len) 4404 { 4405 struct slab_attribute *attribute; 4406 struct kmem_cache *s; 4407 int err; 4408 4409 attribute = to_slab_attr(attr); 4410 s = to_slab(kobj); 4411 4412 if (!attribute->store) 4413 return -EIO; 4414 4415 err = attribute->store(s, buf, len); 4416 4417 return err; 4418 } 4419 4420 static void kmem_cache_release(struct kobject *kobj) 4421 { 4422 struct kmem_cache *s = to_slab(kobj); 4423 4424 kfree(s->name); 4425 kfree(s); 4426 } 4427 4428 static const struct sysfs_ops slab_sysfs_ops = { 4429 .show = slab_attr_show, 4430 .store = slab_attr_store, 4431 }; 4432 4433 static struct kobj_type slab_ktype = { 4434 .sysfs_ops = &slab_sysfs_ops, 4435 .release = kmem_cache_release 4436 }; 4437 4438 static int uevent_filter(struct kset *kset, struct kobject *kobj) 4439 { 4440 struct kobj_type *ktype = get_ktype(kobj); 4441 4442 if (ktype == &slab_ktype) 4443 return 1; 4444 return 0; 4445 } 4446 4447 static const struct kset_uevent_ops slab_uevent_ops = { 4448 .filter = uevent_filter, 4449 }; 4450 4451 static struct kset *slab_kset; 4452 4453 #define ID_STR_LENGTH 64 4454 4455 /* Create a unique string id for a slab cache: 4456 * 4457 * Format :[flags-]size 4458 */ 4459 static char *create_unique_id(struct kmem_cache *s) 4460 { 4461 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4462 char *p = name; 4463 4464 BUG_ON(!name); 4465 4466 *p++ = ':'; 4467 /* 4468 * First flags affecting slabcache operations. We will only 4469 * get here for aliasable slabs so we do not need to support 4470 * too many flags. The flags here must cover all flags that 4471 * are matched during merging to guarantee that the id is 4472 * unique. 4473 */ 4474 if (s->flags & SLAB_CACHE_DMA) 4475 *p++ = 'd'; 4476 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4477 *p++ = 'a'; 4478 if (s->flags & SLAB_DEBUG_FREE) 4479 *p++ = 'F'; 4480 if (!(s->flags & SLAB_NOTRACK)) 4481 *p++ = 't'; 4482 if (p != name + 1) 4483 *p++ = '-'; 4484 p += sprintf(p, "%07d", s->size); 4485 BUG_ON(p > name + ID_STR_LENGTH - 1); 4486 return name; 4487 } 4488 4489 static int sysfs_slab_add(struct kmem_cache *s) 4490 { 4491 int err; 4492 const char *name; 4493 int unmergeable; 4494 4495 if (slab_state < SYSFS) 4496 /* Defer until later */ 4497 return 0; 4498 4499 unmergeable = slab_unmergeable(s); 4500 if (unmergeable) { 4501 /* 4502 * Slabcache can never be merged so we can use the name proper. 4503 * This is typically the case for debug situations. In that 4504 * case we can catch duplicate names easily. 4505 */ 4506 sysfs_remove_link(&slab_kset->kobj, s->name); 4507 name = s->name; 4508 } else { 4509 /* 4510 * Create a unique name for the slab as a target 4511 * for the symlinks. 4512 */ 4513 name = create_unique_id(s); 4514 } 4515 4516 s->kobj.kset = slab_kset; 4517 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4518 if (err) { 4519 kobject_put(&s->kobj); 4520 return err; 4521 } 4522 4523 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4524 if (err) { 4525 kobject_del(&s->kobj); 4526 kobject_put(&s->kobj); 4527 return err; 4528 } 4529 kobject_uevent(&s->kobj, KOBJ_ADD); 4530 if (!unmergeable) { 4531 /* Setup first alias */ 4532 sysfs_slab_alias(s, s->name); 4533 kfree(name); 4534 } 4535 return 0; 4536 } 4537 4538 static void sysfs_slab_remove(struct kmem_cache *s) 4539 { 4540 if (slab_state < SYSFS) 4541 /* 4542 * Sysfs has not been setup yet so no need to remove the 4543 * cache from sysfs. 4544 */ 4545 return; 4546 4547 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4548 kobject_del(&s->kobj); 4549 kobject_put(&s->kobj); 4550 } 4551 4552 /* 4553 * Need to buffer aliases during bootup until sysfs becomes 4554 * available lest we lose that information. 4555 */ 4556 struct saved_alias { 4557 struct kmem_cache *s; 4558 const char *name; 4559 struct saved_alias *next; 4560 }; 4561 4562 static struct saved_alias *alias_list; 4563 4564 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4565 { 4566 struct saved_alias *al; 4567 4568 if (slab_state == SYSFS) { 4569 /* 4570 * If we have a leftover link then remove it. 4571 */ 4572 sysfs_remove_link(&slab_kset->kobj, name); 4573 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4574 } 4575 4576 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4577 if (!al) 4578 return -ENOMEM; 4579 4580 al->s = s; 4581 al->name = name; 4582 al->next = alias_list; 4583 alias_list = al; 4584 return 0; 4585 } 4586 4587 static int __init slab_sysfs_init(void) 4588 { 4589 struct kmem_cache *s; 4590 int err; 4591 4592 down_write(&slub_lock); 4593 4594 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4595 if (!slab_kset) { 4596 up_write(&slub_lock); 4597 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4598 return -ENOSYS; 4599 } 4600 4601 slab_state = SYSFS; 4602 4603 list_for_each_entry(s, &slab_caches, list) { 4604 err = sysfs_slab_add(s); 4605 if (err) 4606 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4607 " to sysfs\n", s->name); 4608 } 4609 4610 while (alias_list) { 4611 struct saved_alias *al = alias_list; 4612 4613 alias_list = alias_list->next; 4614 err = sysfs_slab_alias(al->s, al->name); 4615 if (err) 4616 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4617 " %s to sysfs\n", s->name); 4618 kfree(al); 4619 } 4620 4621 up_write(&slub_lock); 4622 resiliency_test(); 4623 return 0; 4624 } 4625 4626 __initcall(slab_sysfs_init); 4627 #endif /* CONFIG_SYSFS */ 4628 4629 /* 4630 * The /proc/slabinfo ABI 4631 */ 4632 #ifdef CONFIG_SLABINFO 4633 static void print_slabinfo_header(struct seq_file *m) 4634 { 4635 seq_puts(m, "slabinfo - version: 2.1\n"); 4636 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4637 "<objperslab> <pagesperslab>"); 4638 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4639 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4640 seq_putc(m, '\n'); 4641 } 4642 4643 static void *s_start(struct seq_file *m, loff_t *pos) 4644 { 4645 loff_t n = *pos; 4646 4647 down_read(&slub_lock); 4648 if (!n) 4649 print_slabinfo_header(m); 4650 4651 return seq_list_start(&slab_caches, *pos); 4652 } 4653 4654 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4655 { 4656 return seq_list_next(p, &slab_caches, pos); 4657 } 4658 4659 static void s_stop(struct seq_file *m, void *p) 4660 { 4661 up_read(&slub_lock); 4662 } 4663 4664 static int s_show(struct seq_file *m, void *p) 4665 { 4666 unsigned long nr_partials = 0; 4667 unsigned long nr_slabs = 0; 4668 unsigned long nr_inuse = 0; 4669 unsigned long nr_objs = 0; 4670 unsigned long nr_free = 0; 4671 struct kmem_cache *s; 4672 int node; 4673 4674 s = list_entry(p, struct kmem_cache, list); 4675 4676 for_each_online_node(node) { 4677 struct kmem_cache_node *n = get_node(s, node); 4678 4679 if (!n) 4680 continue; 4681 4682 nr_partials += n->nr_partial; 4683 nr_slabs += atomic_long_read(&n->nr_slabs); 4684 nr_objs += atomic_long_read(&n->total_objects); 4685 nr_free += count_partial(n, count_free); 4686 } 4687 4688 nr_inuse = nr_objs - nr_free; 4689 4690 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4691 nr_objs, s->size, oo_objects(s->oo), 4692 (1 << oo_order(s->oo))); 4693 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4694 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4695 0UL); 4696 seq_putc(m, '\n'); 4697 return 0; 4698 } 4699 4700 static const struct seq_operations slabinfo_op = { 4701 .start = s_start, 4702 .next = s_next, 4703 .stop = s_stop, 4704 .show = s_show, 4705 }; 4706 4707 static int slabinfo_open(struct inode *inode, struct file *file) 4708 { 4709 return seq_open(file, &slabinfo_op); 4710 } 4711 4712 static const struct file_operations proc_slabinfo_operations = { 4713 .open = slabinfo_open, 4714 .read = seq_read, 4715 .llseek = seq_lseek, 4716 .release = seq_release, 4717 }; 4718 4719 static int __init slab_proc_init(void) 4720 { 4721 proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); 4722 return 0; 4723 } 4724 module_init(slab_proc_init); 4725 #endif /* CONFIG_SLABINFO */ 4726