1 /* 2 * SLUB: A slab allocator that limits cache line use instead of queuing 3 * objects in per cpu and per node lists. 4 * 5 * The allocator synchronizes using per slab locks and only 6 * uses a centralized lock to manage a pool of partial slabs. 7 * 8 * (C) 2007 SGI, Christoph Lameter 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/module.h> 13 #include <linux/bit_spinlock.h> 14 #include <linux/interrupt.h> 15 #include <linux/bitops.h> 16 #include <linux/slab.h> 17 #include <linux/seq_file.h> 18 #include <linux/cpu.h> 19 #include <linux/cpuset.h> 20 #include <linux/mempolicy.h> 21 #include <linux/ctype.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/memory.h> 25 #include <linux/math64.h> 26 27 /* 28 * Lock order: 29 * 1. slab_lock(page) 30 * 2. slab->list_lock 31 * 32 * The slab_lock protects operations on the object of a particular 33 * slab and its metadata in the page struct. If the slab lock 34 * has been taken then no allocations nor frees can be performed 35 * on the objects in the slab nor can the slab be added or removed 36 * from the partial or full lists since this would mean modifying 37 * the page_struct of the slab. 38 * 39 * The list_lock protects the partial and full list on each node and 40 * the partial slab counter. If taken then no new slabs may be added or 41 * removed from the lists nor make the number of partial slabs be modified. 42 * (Note that the total number of slabs is an atomic value that may be 43 * modified without taking the list lock). 44 * 45 * The list_lock is a centralized lock and thus we avoid taking it as 46 * much as possible. As long as SLUB does not have to handle partial 47 * slabs, operations can continue without any centralized lock. F.e. 48 * allocating a long series of objects that fill up slabs does not require 49 * the list lock. 50 * 51 * The lock order is sometimes inverted when we are trying to get a slab 52 * off a list. We take the list_lock and then look for a page on the list 53 * to use. While we do that objects in the slabs may be freed. We can 54 * only operate on the slab if we have also taken the slab_lock. So we use 55 * a slab_trylock() on the slab. If trylock was successful then no frees 56 * can occur anymore and we can use the slab for allocations etc. If the 57 * slab_trylock() does not succeed then frees are in progress in the slab and 58 * we must stay away from it for a while since we may cause a bouncing 59 * cacheline if we try to acquire the lock. So go onto the next slab. 60 * If all pages are busy then we may allocate a new slab instead of reusing 61 * a partial slab. A new slab has noone operating on it and thus there is 62 * no danger of cacheline contention. 63 * 64 * Interrupts are disabled during allocation and deallocation in order to 65 * make the slab allocator safe to use in the context of an irq. In addition 66 * interrupts are disabled to ensure that the processor does not change 67 * while handling per_cpu slabs, due to kernel preemption. 68 * 69 * SLUB assigns one slab for allocation to each processor. 70 * Allocations only occur from these slabs called cpu slabs. 71 * 72 * Slabs with free elements are kept on a partial list and during regular 73 * operations no list for full slabs is used. If an object in a full slab is 74 * freed then the slab will show up again on the partial lists. 75 * We track full slabs for debugging purposes though because otherwise we 76 * cannot scan all objects. 77 * 78 * Slabs are freed when they become empty. Teardown and setup is 79 * minimal so we rely on the page allocators per cpu caches for 80 * fast frees and allocs. 81 * 82 * Overloading of page flags that are otherwise used for LRU management. 83 * 84 * PageActive The slab is frozen and exempt from list processing. 85 * This means that the slab is dedicated to a purpose 86 * such as satisfying allocations for a specific 87 * processor. Objects may be freed in the slab while 88 * it is frozen but slab_free will then skip the usual 89 * list operations. It is up to the processor holding 90 * the slab to integrate the slab into the slab lists 91 * when the slab is no longer needed. 92 * 93 * One use of this flag is to mark slabs that are 94 * used for allocations. Then such a slab becomes a cpu 95 * slab. The cpu slab may be equipped with an additional 96 * freelist that allows lockless access to 97 * free objects in addition to the regular freelist 98 * that requires the slab lock. 99 * 100 * PageError Slab requires special handling due to debug 101 * options set. This moves slab handling out of 102 * the fast path and disables lockless freelists. 103 */ 104 105 #define FROZEN (1 << PG_active) 106 107 #ifdef CONFIG_SLUB_DEBUG 108 #define SLABDEBUG (1 << PG_error) 109 #else 110 #define SLABDEBUG 0 111 #endif 112 113 static inline int SlabFrozen(struct page *page) 114 { 115 return page->flags & FROZEN; 116 } 117 118 static inline void SetSlabFrozen(struct page *page) 119 { 120 page->flags |= FROZEN; 121 } 122 123 static inline void ClearSlabFrozen(struct page *page) 124 { 125 page->flags &= ~FROZEN; 126 } 127 128 static inline int SlabDebug(struct page *page) 129 { 130 return page->flags & SLABDEBUG; 131 } 132 133 static inline void SetSlabDebug(struct page *page) 134 { 135 page->flags |= SLABDEBUG; 136 } 137 138 static inline void ClearSlabDebug(struct page *page) 139 { 140 page->flags &= ~SLABDEBUG; 141 } 142 143 /* 144 * Issues still to be resolved: 145 * 146 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 147 * 148 * - Variable sizing of the per node arrays 149 */ 150 151 /* Enable to test recovery from slab corruption on boot */ 152 #undef SLUB_RESILIENCY_TEST 153 154 /* 155 * Mininum number of partial slabs. These will be left on the partial 156 * lists even if they are empty. kmem_cache_shrink may reclaim them. 157 */ 158 #define MIN_PARTIAL 5 159 160 /* 161 * Maximum number of desirable partial slabs. 162 * The existence of more partial slabs makes kmem_cache_shrink 163 * sort the partial list by the number of objects in the. 164 */ 165 #define MAX_PARTIAL 10 166 167 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ 168 SLAB_POISON | SLAB_STORE_USER) 169 170 /* 171 * Set of flags that will prevent slab merging 172 */ 173 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 174 SLAB_TRACE | SLAB_DESTROY_BY_RCU) 175 176 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 177 SLAB_CACHE_DMA) 178 179 #ifndef ARCH_KMALLOC_MINALIGN 180 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 181 #endif 182 183 #ifndef ARCH_SLAB_MINALIGN 184 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 185 #endif 186 187 /* Internal SLUB flags */ 188 #define __OBJECT_POISON 0x80000000 /* Poison object */ 189 #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ 190 191 static int kmem_size = sizeof(struct kmem_cache); 192 193 #ifdef CONFIG_SMP 194 static struct notifier_block slab_notifier; 195 #endif 196 197 static enum { 198 DOWN, /* No slab functionality available */ 199 PARTIAL, /* kmem_cache_open() works but kmalloc does not */ 200 UP, /* Everything works but does not show up in sysfs */ 201 SYSFS /* Sysfs up */ 202 } slab_state = DOWN; 203 204 /* A list of all slab caches on the system */ 205 static DECLARE_RWSEM(slub_lock); 206 static LIST_HEAD(slab_caches); 207 208 /* 209 * Tracking user of a slab. 210 */ 211 struct track { 212 void *addr; /* Called from address */ 213 int cpu; /* Was running on cpu */ 214 int pid; /* Pid context */ 215 unsigned long when; /* When did the operation occur */ 216 }; 217 218 enum track_item { TRACK_ALLOC, TRACK_FREE }; 219 220 #ifdef CONFIG_SLUB_DEBUG 221 static int sysfs_slab_add(struct kmem_cache *); 222 static int sysfs_slab_alias(struct kmem_cache *, const char *); 223 static void sysfs_slab_remove(struct kmem_cache *); 224 225 #else 226 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 227 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 228 { return 0; } 229 static inline void sysfs_slab_remove(struct kmem_cache *s) 230 { 231 kfree(s); 232 } 233 234 #endif 235 236 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si) 237 { 238 #ifdef CONFIG_SLUB_STATS 239 c->stat[si]++; 240 #endif 241 } 242 243 /******************************************************************** 244 * Core slab cache functions 245 *******************************************************************/ 246 247 int slab_is_available(void) 248 { 249 return slab_state >= UP; 250 } 251 252 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 253 { 254 #ifdef CONFIG_NUMA 255 return s->node[node]; 256 #else 257 return &s->local_node; 258 #endif 259 } 260 261 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu) 262 { 263 #ifdef CONFIG_SMP 264 return s->cpu_slab[cpu]; 265 #else 266 return &s->cpu_slab; 267 #endif 268 } 269 270 /* Verify that a pointer has an address that is valid within a slab page */ 271 static inline int check_valid_pointer(struct kmem_cache *s, 272 struct page *page, const void *object) 273 { 274 void *base; 275 276 if (!object) 277 return 1; 278 279 base = page_address(page); 280 if (object < base || object >= base + page->objects * s->size || 281 (object - base) % s->size) { 282 return 0; 283 } 284 285 return 1; 286 } 287 288 /* 289 * Slow version of get and set free pointer. 290 * 291 * This version requires touching the cache lines of kmem_cache which 292 * we avoid to do in the fast alloc free paths. There we obtain the offset 293 * from the page struct. 294 */ 295 static inline void *get_freepointer(struct kmem_cache *s, void *object) 296 { 297 return *(void **)(object + s->offset); 298 } 299 300 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 301 { 302 *(void **)(object + s->offset) = fp; 303 } 304 305 /* Loop over all objects in a slab */ 306 #define for_each_object(__p, __s, __addr, __objects) \ 307 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 308 __p += (__s)->size) 309 310 /* Scan freelist */ 311 #define for_each_free_object(__p, __s, __free) \ 312 for (__p = (__free); __p; __p = get_freepointer((__s), __p)) 313 314 /* Determine object index from a given position */ 315 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 316 { 317 return (p - addr) / s->size; 318 } 319 320 static inline struct kmem_cache_order_objects oo_make(int order, 321 unsigned long size) 322 { 323 struct kmem_cache_order_objects x = { 324 (order << 16) + (PAGE_SIZE << order) / size 325 }; 326 327 return x; 328 } 329 330 static inline int oo_order(struct kmem_cache_order_objects x) 331 { 332 return x.x >> 16; 333 } 334 335 static inline int oo_objects(struct kmem_cache_order_objects x) 336 { 337 return x.x & ((1 << 16) - 1); 338 } 339 340 #ifdef CONFIG_SLUB_DEBUG 341 /* 342 * Debug settings: 343 */ 344 #ifdef CONFIG_SLUB_DEBUG_ON 345 static int slub_debug = DEBUG_DEFAULT_FLAGS; 346 #else 347 static int slub_debug; 348 #endif 349 350 static char *slub_debug_slabs; 351 352 /* 353 * Object debugging 354 */ 355 static void print_section(char *text, u8 *addr, unsigned int length) 356 { 357 int i, offset; 358 int newline = 1; 359 char ascii[17]; 360 361 ascii[16] = 0; 362 363 for (i = 0; i < length; i++) { 364 if (newline) { 365 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 366 newline = 0; 367 } 368 printk(KERN_CONT " %02x", addr[i]); 369 offset = i % 16; 370 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 371 if (offset == 15) { 372 printk(KERN_CONT " %s\n", ascii); 373 newline = 1; 374 } 375 } 376 if (!newline) { 377 i %= 16; 378 while (i < 16) { 379 printk(KERN_CONT " "); 380 ascii[i] = ' '; 381 i++; 382 } 383 printk(KERN_CONT " %s\n", ascii); 384 } 385 } 386 387 static struct track *get_track(struct kmem_cache *s, void *object, 388 enum track_item alloc) 389 { 390 struct track *p; 391 392 if (s->offset) 393 p = object + s->offset + sizeof(void *); 394 else 395 p = object + s->inuse; 396 397 return p + alloc; 398 } 399 400 static void set_track(struct kmem_cache *s, void *object, 401 enum track_item alloc, void *addr) 402 { 403 struct track *p; 404 405 if (s->offset) 406 p = object + s->offset + sizeof(void *); 407 else 408 p = object + s->inuse; 409 410 p += alloc; 411 if (addr) { 412 p->addr = addr; 413 p->cpu = smp_processor_id(); 414 p->pid = current->pid; 415 p->when = jiffies; 416 } else 417 memset(p, 0, sizeof(struct track)); 418 } 419 420 static void init_tracking(struct kmem_cache *s, void *object) 421 { 422 if (!(s->flags & SLAB_STORE_USER)) 423 return; 424 425 set_track(s, object, TRACK_FREE, NULL); 426 set_track(s, object, TRACK_ALLOC, NULL); 427 } 428 429 static void print_track(const char *s, struct track *t) 430 { 431 if (!t->addr) 432 return; 433 434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", 435 s, t->addr, jiffies - t->when, t->cpu, t->pid); 436 } 437 438 static void print_tracking(struct kmem_cache *s, void *object) 439 { 440 if (!(s->flags & SLAB_STORE_USER)) 441 return; 442 443 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); 444 print_track("Freed", get_track(s, object, TRACK_FREE)); 445 } 446 447 static void print_page_info(struct page *page) 448 { 449 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 450 page, page->objects, page->inuse, page->freelist, page->flags); 451 452 } 453 454 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 455 { 456 va_list args; 457 char buf[100]; 458 459 va_start(args, fmt); 460 vsnprintf(buf, sizeof(buf), fmt, args); 461 va_end(args); 462 printk(KERN_ERR "========================================" 463 "=====================================\n"); 464 printk(KERN_ERR "BUG %s: %s\n", s->name, buf); 465 printk(KERN_ERR "----------------------------------------" 466 "-------------------------------------\n\n"); 467 } 468 469 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 470 { 471 va_list args; 472 char buf[100]; 473 474 va_start(args, fmt); 475 vsnprintf(buf, sizeof(buf), fmt, args); 476 va_end(args); 477 printk(KERN_ERR "FIX %s: %s\n", s->name, buf); 478 } 479 480 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 481 { 482 unsigned int off; /* Offset of last byte */ 483 u8 *addr = page_address(page); 484 485 print_tracking(s, p); 486 487 print_page_info(page); 488 489 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", 490 p, p - addr, get_freepointer(s, p)); 491 492 if (p > addr + 16) 493 print_section("Bytes b4", p - 16, 16); 494 495 print_section("Object", p, min(s->objsize, 128)); 496 497 if (s->flags & SLAB_RED_ZONE) 498 print_section("Redzone", p + s->objsize, 499 s->inuse - s->objsize); 500 501 if (s->offset) 502 off = s->offset + sizeof(void *); 503 else 504 off = s->inuse; 505 506 if (s->flags & SLAB_STORE_USER) 507 off += 2 * sizeof(struct track); 508 509 if (off != s->size) 510 /* Beginning of the filler is the free pointer */ 511 print_section("Padding", p + off, s->size - off); 512 513 dump_stack(); 514 } 515 516 static void object_err(struct kmem_cache *s, struct page *page, 517 u8 *object, char *reason) 518 { 519 slab_bug(s, "%s", reason); 520 print_trailer(s, page, object); 521 } 522 523 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 524 { 525 va_list args; 526 char buf[100]; 527 528 va_start(args, fmt); 529 vsnprintf(buf, sizeof(buf), fmt, args); 530 va_end(args); 531 slab_bug(s, "%s", buf); 532 print_page_info(page); 533 dump_stack(); 534 } 535 536 static void init_object(struct kmem_cache *s, void *object, int active) 537 { 538 u8 *p = object; 539 540 if (s->flags & __OBJECT_POISON) { 541 memset(p, POISON_FREE, s->objsize - 1); 542 p[s->objsize - 1] = POISON_END; 543 } 544 545 if (s->flags & SLAB_RED_ZONE) 546 memset(p + s->objsize, 547 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, 548 s->inuse - s->objsize); 549 } 550 551 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) 552 { 553 while (bytes) { 554 if (*start != (u8)value) 555 return start; 556 start++; 557 bytes--; 558 } 559 return NULL; 560 } 561 562 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 563 void *from, void *to) 564 { 565 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 566 memset(from, data, to - from); 567 } 568 569 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 570 u8 *object, char *what, 571 u8 *start, unsigned int value, unsigned int bytes) 572 { 573 u8 *fault; 574 u8 *end; 575 576 fault = check_bytes(start, value, bytes); 577 if (!fault) 578 return 1; 579 580 end = start + bytes; 581 while (end > fault && end[-1] == value) 582 end--; 583 584 slab_bug(s, "%s overwritten", what); 585 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", 586 fault, end - 1, fault[0], value); 587 print_trailer(s, page, object); 588 589 restore_bytes(s, what, value, fault, end); 590 return 0; 591 } 592 593 /* 594 * Object layout: 595 * 596 * object address 597 * Bytes of the object to be managed. 598 * If the freepointer may overlay the object then the free 599 * pointer is the first word of the object. 600 * 601 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 602 * 0xa5 (POISON_END) 603 * 604 * object + s->objsize 605 * Padding to reach word boundary. This is also used for Redzoning. 606 * Padding is extended by another word if Redzoning is enabled and 607 * objsize == inuse. 608 * 609 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 610 * 0xcc (RED_ACTIVE) for objects in use. 611 * 612 * object + s->inuse 613 * Meta data starts here. 614 * 615 * A. Free pointer (if we cannot overwrite object on free) 616 * B. Tracking data for SLAB_STORE_USER 617 * C. Padding to reach required alignment boundary or at mininum 618 * one word if debugging is on to be able to detect writes 619 * before the word boundary. 620 * 621 * Padding is done using 0x5a (POISON_INUSE) 622 * 623 * object + s->size 624 * Nothing is used beyond s->size. 625 * 626 * If slabcaches are merged then the objsize and inuse boundaries are mostly 627 * ignored. And therefore no slab options that rely on these boundaries 628 * may be used with merged slabcaches. 629 */ 630 631 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 632 { 633 unsigned long off = s->inuse; /* The end of info */ 634 635 if (s->offset) 636 /* Freepointer is placed after the object. */ 637 off += sizeof(void *); 638 639 if (s->flags & SLAB_STORE_USER) 640 /* We also have user information there */ 641 off += 2 * sizeof(struct track); 642 643 if (s->size == off) 644 return 1; 645 646 return check_bytes_and_report(s, page, p, "Object padding", 647 p + off, POISON_INUSE, s->size - off); 648 } 649 650 /* Check the pad bytes at the end of a slab page */ 651 static int slab_pad_check(struct kmem_cache *s, struct page *page) 652 { 653 u8 *start; 654 u8 *fault; 655 u8 *end; 656 int length; 657 int remainder; 658 659 if (!(s->flags & SLAB_POISON)) 660 return 1; 661 662 start = page_address(page); 663 length = (PAGE_SIZE << compound_order(page)); 664 end = start + length; 665 remainder = length % s->size; 666 if (!remainder) 667 return 1; 668 669 fault = check_bytes(end - remainder, POISON_INUSE, remainder); 670 if (!fault) 671 return 1; 672 while (end > fault && end[-1] == POISON_INUSE) 673 end--; 674 675 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 676 print_section("Padding", end - remainder, remainder); 677 678 restore_bytes(s, "slab padding", POISON_INUSE, start, end); 679 return 0; 680 } 681 682 static int check_object(struct kmem_cache *s, struct page *page, 683 void *object, int active) 684 { 685 u8 *p = object; 686 u8 *endobject = object + s->objsize; 687 688 if (s->flags & SLAB_RED_ZONE) { 689 unsigned int red = 690 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 691 692 if (!check_bytes_and_report(s, page, object, "Redzone", 693 endobject, red, s->inuse - s->objsize)) 694 return 0; 695 } else { 696 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { 697 check_bytes_and_report(s, page, p, "Alignment padding", 698 endobject, POISON_INUSE, s->inuse - s->objsize); 699 } 700 } 701 702 if (s->flags & SLAB_POISON) { 703 if (!active && (s->flags & __OBJECT_POISON) && 704 (!check_bytes_and_report(s, page, p, "Poison", p, 705 POISON_FREE, s->objsize - 1) || 706 !check_bytes_and_report(s, page, p, "Poison", 707 p + s->objsize - 1, POISON_END, 1))) 708 return 0; 709 /* 710 * check_pad_bytes cleans up on its own. 711 */ 712 check_pad_bytes(s, page, p); 713 } 714 715 if (!s->offset && active) 716 /* 717 * Object and freepointer overlap. Cannot check 718 * freepointer while object is allocated. 719 */ 720 return 1; 721 722 /* Check free pointer validity */ 723 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 724 object_err(s, page, p, "Freepointer corrupt"); 725 /* 726 * No choice but to zap it and thus loose the remainder 727 * of the free objects in this slab. May cause 728 * another error because the object count is now wrong. 729 */ 730 set_freepointer(s, p, NULL); 731 return 0; 732 } 733 return 1; 734 } 735 736 static int check_slab(struct kmem_cache *s, struct page *page) 737 { 738 int maxobj; 739 740 VM_BUG_ON(!irqs_disabled()); 741 742 if (!PageSlab(page)) { 743 slab_err(s, page, "Not a valid slab page"); 744 return 0; 745 } 746 747 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 748 if (page->objects > maxobj) { 749 slab_err(s, page, "objects %u > max %u", 750 s->name, page->objects, maxobj); 751 return 0; 752 } 753 if (page->inuse > page->objects) { 754 slab_err(s, page, "inuse %u > max %u", 755 s->name, page->inuse, page->objects); 756 return 0; 757 } 758 /* Slab_pad_check fixes things up after itself */ 759 slab_pad_check(s, page); 760 return 1; 761 } 762 763 /* 764 * Determine if a certain object on a page is on the freelist. Must hold the 765 * slab lock to guarantee that the chains are in a consistent state. 766 */ 767 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 768 { 769 int nr = 0; 770 void *fp = page->freelist; 771 void *object = NULL; 772 unsigned long max_objects; 773 774 while (fp && nr <= page->objects) { 775 if (fp == search) 776 return 1; 777 if (!check_valid_pointer(s, page, fp)) { 778 if (object) { 779 object_err(s, page, object, 780 "Freechain corrupt"); 781 set_freepointer(s, object, NULL); 782 break; 783 } else { 784 slab_err(s, page, "Freepointer corrupt"); 785 page->freelist = NULL; 786 page->inuse = page->objects; 787 slab_fix(s, "Freelist cleared"); 788 return 0; 789 } 790 break; 791 } 792 object = fp; 793 fp = get_freepointer(s, object); 794 nr++; 795 } 796 797 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 798 if (max_objects > 65535) 799 max_objects = 65535; 800 801 if (page->objects != max_objects) { 802 slab_err(s, page, "Wrong number of objects. Found %d but " 803 "should be %d", page->objects, max_objects); 804 page->objects = max_objects; 805 slab_fix(s, "Number of objects adjusted."); 806 } 807 if (page->inuse != page->objects - nr) { 808 slab_err(s, page, "Wrong object count. Counter is %d but " 809 "counted were %d", page->inuse, page->objects - nr); 810 page->inuse = page->objects - nr; 811 slab_fix(s, "Object count adjusted."); 812 } 813 return search == NULL; 814 } 815 816 static void trace(struct kmem_cache *s, struct page *page, void *object, 817 int alloc) 818 { 819 if (s->flags & SLAB_TRACE) { 820 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 821 s->name, 822 alloc ? "alloc" : "free", 823 object, page->inuse, 824 page->freelist); 825 826 if (!alloc) 827 print_section("Object", (void *)object, s->objsize); 828 829 dump_stack(); 830 } 831 } 832 833 /* 834 * Tracking of fully allocated slabs for debugging purposes. 835 */ 836 static void add_full(struct kmem_cache_node *n, struct page *page) 837 { 838 spin_lock(&n->list_lock); 839 list_add(&page->lru, &n->full); 840 spin_unlock(&n->list_lock); 841 } 842 843 static void remove_full(struct kmem_cache *s, struct page *page) 844 { 845 struct kmem_cache_node *n; 846 847 if (!(s->flags & SLAB_STORE_USER)) 848 return; 849 850 n = get_node(s, page_to_nid(page)); 851 852 spin_lock(&n->list_lock); 853 list_del(&page->lru); 854 spin_unlock(&n->list_lock); 855 } 856 857 /* Tracking of the number of slabs for debugging purposes */ 858 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 859 { 860 struct kmem_cache_node *n = get_node(s, node); 861 862 return atomic_long_read(&n->nr_slabs); 863 } 864 865 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 866 { 867 struct kmem_cache_node *n = get_node(s, node); 868 869 /* 870 * May be called early in order to allocate a slab for the 871 * kmem_cache_node structure. Solve the chicken-egg 872 * dilemma by deferring the increment of the count during 873 * bootstrap (see early_kmem_cache_node_alloc). 874 */ 875 if (!NUMA_BUILD || n) { 876 atomic_long_inc(&n->nr_slabs); 877 atomic_long_add(objects, &n->total_objects); 878 } 879 } 880 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 881 { 882 struct kmem_cache_node *n = get_node(s, node); 883 884 atomic_long_dec(&n->nr_slabs); 885 atomic_long_sub(objects, &n->total_objects); 886 } 887 888 /* Object debug checks for alloc/free paths */ 889 static void setup_object_debug(struct kmem_cache *s, struct page *page, 890 void *object) 891 { 892 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 893 return; 894 895 init_object(s, object, 0); 896 init_tracking(s, object); 897 } 898 899 static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 900 void *object, void *addr) 901 { 902 if (!check_slab(s, page)) 903 goto bad; 904 905 if (!on_freelist(s, page, object)) { 906 object_err(s, page, object, "Object already allocated"); 907 goto bad; 908 } 909 910 if (!check_valid_pointer(s, page, object)) { 911 object_err(s, page, object, "Freelist Pointer check fails"); 912 goto bad; 913 } 914 915 if (!check_object(s, page, object, 0)) 916 goto bad; 917 918 /* Success perform special debug activities for allocs */ 919 if (s->flags & SLAB_STORE_USER) 920 set_track(s, object, TRACK_ALLOC, addr); 921 trace(s, page, object, 1); 922 init_object(s, object, 1); 923 return 1; 924 925 bad: 926 if (PageSlab(page)) { 927 /* 928 * If this is a slab page then lets do the best we can 929 * to avoid issues in the future. Marking all objects 930 * as used avoids touching the remaining objects. 931 */ 932 slab_fix(s, "Marking all objects used"); 933 page->inuse = page->objects; 934 page->freelist = NULL; 935 } 936 return 0; 937 } 938 939 static int free_debug_processing(struct kmem_cache *s, struct page *page, 940 void *object, void *addr) 941 { 942 if (!check_slab(s, page)) 943 goto fail; 944 945 if (!check_valid_pointer(s, page, object)) { 946 slab_err(s, page, "Invalid object pointer 0x%p", object); 947 goto fail; 948 } 949 950 if (on_freelist(s, page, object)) { 951 object_err(s, page, object, "Object already free"); 952 goto fail; 953 } 954 955 if (!check_object(s, page, object, 1)) 956 return 0; 957 958 if (unlikely(s != page->slab)) { 959 if (!PageSlab(page)) { 960 slab_err(s, page, "Attempt to free object(0x%p) " 961 "outside of slab", object); 962 } else if (!page->slab) { 963 printk(KERN_ERR 964 "SLUB <none>: no slab for object 0x%p.\n", 965 object); 966 dump_stack(); 967 } else 968 object_err(s, page, object, 969 "page slab pointer corrupt."); 970 goto fail; 971 } 972 973 /* Special debug activities for freeing objects */ 974 if (!SlabFrozen(page) && !page->freelist) 975 remove_full(s, page); 976 if (s->flags & SLAB_STORE_USER) 977 set_track(s, object, TRACK_FREE, addr); 978 trace(s, page, object, 0); 979 init_object(s, object, 0); 980 return 1; 981 982 fail: 983 slab_fix(s, "Object at 0x%p not freed", object); 984 return 0; 985 } 986 987 static int __init setup_slub_debug(char *str) 988 { 989 slub_debug = DEBUG_DEFAULT_FLAGS; 990 if (*str++ != '=' || !*str) 991 /* 992 * No options specified. Switch on full debugging. 993 */ 994 goto out; 995 996 if (*str == ',') 997 /* 998 * No options but restriction on slabs. This means full 999 * debugging for slabs matching a pattern. 1000 */ 1001 goto check_slabs; 1002 1003 slub_debug = 0; 1004 if (*str == '-') 1005 /* 1006 * Switch off all debugging measures. 1007 */ 1008 goto out; 1009 1010 /* 1011 * Determine which debug features should be switched on 1012 */ 1013 for (; *str && *str != ','; str++) { 1014 switch (tolower(*str)) { 1015 case 'f': 1016 slub_debug |= SLAB_DEBUG_FREE; 1017 break; 1018 case 'z': 1019 slub_debug |= SLAB_RED_ZONE; 1020 break; 1021 case 'p': 1022 slub_debug |= SLAB_POISON; 1023 break; 1024 case 'u': 1025 slub_debug |= SLAB_STORE_USER; 1026 break; 1027 case 't': 1028 slub_debug |= SLAB_TRACE; 1029 break; 1030 default: 1031 printk(KERN_ERR "slub_debug option '%c' " 1032 "unknown. skipped\n", *str); 1033 } 1034 } 1035 1036 check_slabs: 1037 if (*str == ',') 1038 slub_debug_slabs = str + 1; 1039 out: 1040 return 1; 1041 } 1042 1043 __setup("slub_debug", setup_slub_debug); 1044 1045 static unsigned long kmem_cache_flags(unsigned long objsize, 1046 unsigned long flags, const char *name, 1047 void (*ctor)(struct kmem_cache *, void *)) 1048 { 1049 /* 1050 * Enable debugging if selected on the kernel commandline. 1051 */ 1052 if (slub_debug && (!slub_debug_slabs || 1053 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0)) 1054 flags |= slub_debug; 1055 1056 return flags; 1057 } 1058 #else 1059 static inline void setup_object_debug(struct kmem_cache *s, 1060 struct page *page, void *object) {} 1061 1062 static inline int alloc_debug_processing(struct kmem_cache *s, 1063 struct page *page, void *object, void *addr) { return 0; } 1064 1065 static inline int free_debug_processing(struct kmem_cache *s, 1066 struct page *page, void *object, void *addr) { return 0; } 1067 1068 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1069 { return 1; } 1070 static inline int check_object(struct kmem_cache *s, struct page *page, 1071 void *object, int active) { return 1; } 1072 static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 1073 static inline unsigned long kmem_cache_flags(unsigned long objsize, 1074 unsigned long flags, const char *name, 1075 void (*ctor)(struct kmem_cache *, void *)) 1076 { 1077 return flags; 1078 } 1079 #define slub_debug 0 1080 1081 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1082 { return 0; } 1083 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1084 int objects) {} 1085 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1086 int objects) {} 1087 #endif 1088 1089 /* 1090 * Slab allocation and freeing 1091 */ 1092 static inline struct page *alloc_slab_page(gfp_t flags, int node, 1093 struct kmem_cache_order_objects oo) 1094 { 1095 int order = oo_order(oo); 1096 1097 if (node == -1) 1098 return alloc_pages(flags, order); 1099 else 1100 return alloc_pages_node(node, flags, order); 1101 } 1102 1103 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1104 { 1105 struct page *page; 1106 struct kmem_cache_order_objects oo = s->oo; 1107 1108 flags |= s->allocflags; 1109 1110 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, 1111 oo); 1112 if (unlikely(!page)) { 1113 oo = s->min; 1114 /* 1115 * Allocation may have failed due to fragmentation. 1116 * Try a lower order alloc if possible 1117 */ 1118 page = alloc_slab_page(flags, node, oo); 1119 if (!page) 1120 return NULL; 1121 1122 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); 1123 } 1124 page->objects = oo_objects(oo); 1125 mod_zone_page_state(page_zone(page), 1126 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1127 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1128 1 << oo_order(oo)); 1129 1130 return page; 1131 } 1132 1133 static void setup_object(struct kmem_cache *s, struct page *page, 1134 void *object) 1135 { 1136 setup_object_debug(s, page, object); 1137 if (unlikely(s->ctor)) 1138 s->ctor(s, object); 1139 } 1140 1141 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1142 { 1143 struct page *page; 1144 void *start; 1145 void *last; 1146 void *p; 1147 1148 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1149 1150 page = allocate_slab(s, 1151 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1152 if (!page) 1153 goto out; 1154 1155 inc_slabs_node(s, page_to_nid(page), page->objects); 1156 page->slab = s; 1157 page->flags |= 1 << PG_slab; 1158 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | 1159 SLAB_STORE_USER | SLAB_TRACE)) 1160 SetSlabDebug(page); 1161 1162 start = page_address(page); 1163 1164 if (unlikely(s->flags & SLAB_POISON)) 1165 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1166 1167 last = start; 1168 for_each_object(p, s, start, page->objects) { 1169 setup_object(s, page, last); 1170 set_freepointer(s, last, p); 1171 last = p; 1172 } 1173 setup_object(s, page, last); 1174 set_freepointer(s, last, NULL); 1175 1176 page->freelist = start; 1177 page->inuse = 0; 1178 out: 1179 return page; 1180 } 1181 1182 static void __free_slab(struct kmem_cache *s, struct page *page) 1183 { 1184 int order = compound_order(page); 1185 int pages = 1 << order; 1186 1187 if (unlikely(SlabDebug(page))) { 1188 void *p; 1189 1190 slab_pad_check(s, page); 1191 for_each_object(p, s, page_address(page), 1192 page->objects) 1193 check_object(s, page, p, 0); 1194 ClearSlabDebug(page); 1195 } 1196 1197 mod_zone_page_state(page_zone(page), 1198 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1199 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1200 -pages); 1201 1202 __ClearPageSlab(page); 1203 reset_page_mapcount(page); 1204 __free_pages(page, order); 1205 } 1206 1207 static void rcu_free_slab(struct rcu_head *h) 1208 { 1209 struct page *page; 1210 1211 page = container_of((struct list_head *)h, struct page, lru); 1212 __free_slab(page->slab, page); 1213 } 1214 1215 static void free_slab(struct kmem_cache *s, struct page *page) 1216 { 1217 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1218 /* 1219 * RCU free overloads the RCU head over the LRU 1220 */ 1221 struct rcu_head *head = (void *)&page->lru; 1222 1223 call_rcu(head, rcu_free_slab); 1224 } else 1225 __free_slab(s, page); 1226 } 1227 1228 static void discard_slab(struct kmem_cache *s, struct page *page) 1229 { 1230 dec_slabs_node(s, page_to_nid(page), page->objects); 1231 free_slab(s, page); 1232 } 1233 1234 /* 1235 * Per slab locking using the pagelock 1236 */ 1237 static __always_inline void slab_lock(struct page *page) 1238 { 1239 bit_spin_lock(PG_locked, &page->flags); 1240 } 1241 1242 static __always_inline void slab_unlock(struct page *page) 1243 { 1244 __bit_spin_unlock(PG_locked, &page->flags); 1245 } 1246 1247 static __always_inline int slab_trylock(struct page *page) 1248 { 1249 int rc = 1; 1250 1251 rc = bit_spin_trylock(PG_locked, &page->flags); 1252 return rc; 1253 } 1254 1255 /* 1256 * Management of partially allocated slabs 1257 */ 1258 static void add_partial(struct kmem_cache_node *n, 1259 struct page *page, int tail) 1260 { 1261 spin_lock(&n->list_lock); 1262 n->nr_partial++; 1263 if (tail) 1264 list_add_tail(&page->lru, &n->partial); 1265 else 1266 list_add(&page->lru, &n->partial); 1267 spin_unlock(&n->list_lock); 1268 } 1269 1270 static void remove_partial(struct kmem_cache *s, struct page *page) 1271 { 1272 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1273 1274 spin_lock(&n->list_lock); 1275 list_del(&page->lru); 1276 n->nr_partial--; 1277 spin_unlock(&n->list_lock); 1278 } 1279 1280 /* 1281 * Lock slab and remove from the partial list. 1282 * 1283 * Must hold list_lock. 1284 */ 1285 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, 1286 struct page *page) 1287 { 1288 if (slab_trylock(page)) { 1289 list_del(&page->lru); 1290 n->nr_partial--; 1291 SetSlabFrozen(page); 1292 return 1; 1293 } 1294 return 0; 1295 } 1296 1297 /* 1298 * Try to allocate a partial slab from a specific node. 1299 */ 1300 static struct page *get_partial_node(struct kmem_cache_node *n) 1301 { 1302 struct page *page; 1303 1304 /* 1305 * Racy check. If we mistakenly see no partial slabs then we 1306 * just allocate an empty slab. If we mistakenly try to get a 1307 * partial slab and there is none available then get_partials() 1308 * will return NULL. 1309 */ 1310 if (!n || !n->nr_partial) 1311 return NULL; 1312 1313 spin_lock(&n->list_lock); 1314 list_for_each_entry(page, &n->partial, lru) 1315 if (lock_and_freeze_slab(n, page)) 1316 goto out; 1317 page = NULL; 1318 out: 1319 spin_unlock(&n->list_lock); 1320 return page; 1321 } 1322 1323 /* 1324 * Get a page from somewhere. Search in increasing NUMA distances. 1325 */ 1326 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) 1327 { 1328 #ifdef CONFIG_NUMA 1329 struct zonelist *zonelist; 1330 struct zoneref *z; 1331 struct zone *zone; 1332 enum zone_type high_zoneidx = gfp_zone(flags); 1333 struct page *page; 1334 1335 /* 1336 * The defrag ratio allows a configuration of the tradeoffs between 1337 * inter node defragmentation and node local allocations. A lower 1338 * defrag_ratio increases the tendency to do local allocations 1339 * instead of attempting to obtain partial slabs from other nodes. 1340 * 1341 * If the defrag_ratio is set to 0 then kmalloc() always 1342 * returns node local objects. If the ratio is higher then kmalloc() 1343 * may return off node objects because partial slabs are obtained 1344 * from other nodes and filled up. 1345 * 1346 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes 1347 * defrag_ratio = 1000) then every (well almost) allocation will 1348 * first attempt to defrag slab caches on other nodes. This means 1349 * scanning over all nodes to look for partial slabs which may be 1350 * expensive if we do it every time we are trying to find a slab 1351 * with available objects. 1352 */ 1353 if (!s->remote_node_defrag_ratio || 1354 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1355 return NULL; 1356 1357 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1358 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1359 struct kmem_cache_node *n; 1360 1361 n = get_node(s, zone_to_nid(zone)); 1362 1363 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1364 n->nr_partial > MIN_PARTIAL) { 1365 page = get_partial_node(n); 1366 if (page) 1367 return page; 1368 } 1369 } 1370 #endif 1371 return NULL; 1372 } 1373 1374 /* 1375 * Get a partial page, lock it and return it. 1376 */ 1377 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) 1378 { 1379 struct page *page; 1380 int searchnode = (node == -1) ? numa_node_id() : node; 1381 1382 page = get_partial_node(get_node(s, searchnode)); 1383 if (page || (flags & __GFP_THISNODE)) 1384 return page; 1385 1386 return get_any_partial(s, flags); 1387 } 1388 1389 /* 1390 * Move a page back to the lists. 1391 * 1392 * Must be called with the slab lock held. 1393 * 1394 * On exit the slab lock will have been dropped. 1395 */ 1396 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) 1397 { 1398 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1399 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id()); 1400 1401 ClearSlabFrozen(page); 1402 if (page->inuse) { 1403 1404 if (page->freelist) { 1405 add_partial(n, page, tail); 1406 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); 1407 } else { 1408 stat(c, DEACTIVATE_FULL); 1409 if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) 1410 add_full(n, page); 1411 } 1412 slab_unlock(page); 1413 } else { 1414 stat(c, DEACTIVATE_EMPTY); 1415 if (n->nr_partial < MIN_PARTIAL) { 1416 /* 1417 * Adding an empty slab to the partial slabs in order 1418 * to avoid page allocator overhead. This slab needs 1419 * to come after the other slabs with objects in 1420 * so that the others get filled first. That way the 1421 * size of the partial list stays small. 1422 * 1423 * kmem_cache_shrink can reclaim any empty slabs from 1424 * the partial list. 1425 */ 1426 add_partial(n, page, 1); 1427 slab_unlock(page); 1428 } else { 1429 slab_unlock(page); 1430 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB); 1431 discard_slab(s, page); 1432 } 1433 } 1434 } 1435 1436 /* 1437 * Remove the cpu slab 1438 */ 1439 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1440 { 1441 struct page *page = c->page; 1442 int tail = 1; 1443 1444 if (page->freelist) 1445 stat(c, DEACTIVATE_REMOTE_FREES); 1446 /* 1447 * Merge cpu freelist into slab freelist. Typically we get here 1448 * because both freelists are empty. So this is unlikely 1449 * to occur. 1450 */ 1451 while (unlikely(c->freelist)) { 1452 void **object; 1453 1454 tail = 0; /* Hot objects. Put the slab first */ 1455 1456 /* Retrieve object from cpu_freelist */ 1457 object = c->freelist; 1458 c->freelist = c->freelist[c->offset]; 1459 1460 /* And put onto the regular freelist */ 1461 object[c->offset] = page->freelist; 1462 page->freelist = object; 1463 page->inuse--; 1464 } 1465 c->page = NULL; 1466 unfreeze_slab(s, page, tail); 1467 } 1468 1469 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 1470 { 1471 stat(c, CPUSLAB_FLUSH); 1472 slab_lock(c->page); 1473 deactivate_slab(s, c); 1474 } 1475 1476 /* 1477 * Flush cpu slab. 1478 * 1479 * Called from IPI handler with interrupts disabled. 1480 */ 1481 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1482 { 1483 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 1484 1485 if (likely(c && c->page)) 1486 flush_slab(s, c); 1487 } 1488 1489 static void flush_cpu_slab(void *d) 1490 { 1491 struct kmem_cache *s = d; 1492 1493 __flush_cpu_slab(s, smp_processor_id()); 1494 } 1495 1496 static void flush_all(struct kmem_cache *s) 1497 { 1498 #ifdef CONFIG_SMP 1499 on_each_cpu(flush_cpu_slab, s, 1); 1500 #else 1501 unsigned long flags; 1502 1503 local_irq_save(flags); 1504 flush_cpu_slab(s); 1505 local_irq_restore(flags); 1506 #endif 1507 } 1508 1509 /* 1510 * Check if the objects in a per cpu structure fit numa 1511 * locality expectations. 1512 */ 1513 static inline int node_match(struct kmem_cache_cpu *c, int node) 1514 { 1515 #ifdef CONFIG_NUMA 1516 if (node != -1 && c->node != node) 1517 return 0; 1518 #endif 1519 return 1; 1520 } 1521 1522 /* 1523 * Slow path. The lockless freelist is empty or we need to perform 1524 * debugging duties. 1525 * 1526 * Interrupts are disabled. 1527 * 1528 * Processing is still very fast if new objects have been freed to the 1529 * regular freelist. In that case we simply take over the regular freelist 1530 * as the lockless freelist and zap the regular freelist. 1531 * 1532 * If that is not working then we fall back to the partial lists. We take the 1533 * first element of the freelist as the object to allocate now and move the 1534 * rest of the freelist to the lockless freelist. 1535 * 1536 * And if we were unable to get a new slab from the partial slab lists then 1537 * we need to allocate a new slab. This is the slowest path since it involves 1538 * a call to the page allocator and the setup of a new slab. 1539 */ 1540 static void *__slab_alloc(struct kmem_cache *s, 1541 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) 1542 { 1543 void **object; 1544 struct page *new; 1545 1546 /* We handle __GFP_ZERO in the caller */ 1547 gfpflags &= ~__GFP_ZERO; 1548 1549 if (!c->page) 1550 goto new_slab; 1551 1552 slab_lock(c->page); 1553 if (unlikely(!node_match(c, node))) 1554 goto another_slab; 1555 1556 stat(c, ALLOC_REFILL); 1557 1558 load_freelist: 1559 object = c->page->freelist; 1560 if (unlikely(!object)) 1561 goto another_slab; 1562 if (unlikely(SlabDebug(c->page))) 1563 goto debug; 1564 1565 c->freelist = object[c->offset]; 1566 c->page->inuse = c->page->objects; 1567 c->page->freelist = NULL; 1568 c->node = page_to_nid(c->page); 1569 unlock_out: 1570 slab_unlock(c->page); 1571 stat(c, ALLOC_SLOWPATH); 1572 return object; 1573 1574 another_slab: 1575 deactivate_slab(s, c); 1576 1577 new_slab: 1578 new = get_partial(s, gfpflags, node); 1579 if (new) { 1580 c->page = new; 1581 stat(c, ALLOC_FROM_PARTIAL); 1582 goto load_freelist; 1583 } 1584 1585 if (gfpflags & __GFP_WAIT) 1586 local_irq_enable(); 1587 1588 new = new_slab(s, gfpflags, node); 1589 1590 if (gfpflags & __GFP_WAIT) 1591 local_irq_disable(); 1592 1593 if (new) { 1594 c = get_cpu_slab(s, smp_processor_id()); 1595 stat(c, ALLOC_SLAB); 1596 if (c->page) 1597 flush_slab(s, c); 1598 slab_lock(new); 1599 SetSlabFrozen(new); 1600 c->page = new; 1601 goto load_freelist; 1602 } 1603 return NULL; 1604 debug: 1605 if (!alloc_debug_processing(s, c->page, object, addr)) 1606 goto another_slab; 1607 1608 c->page->inuse++; 1609 c->page->freelist = object[c->offset]; 1610 c->node = -1; 1611 goto unlock_out; 1612 } 1613 1614 /* 1615 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 1616 * have the fastpath folded into their functions. So no function call 1617 * overhead for requests that can be satisfied on the fastpath. 1618 * 1619 * The fastpath works by first checking if the lockless freelist can be used. 1620 * If not then __slab_alloc is called for slow processing. 1621 * 1622 * Otherwise we can simply pick the next object from the lockless free list. 1623 */ 1624 static __always_inline void *slab_alloc(struct kmem_cache *s, 1625 gfp_t gfpflags, int node, void *addr) 1626 { 1627 void **object; 1628 struct kmem_cache_cpu *c; 1629 unsigned long flags; 1630 unsigned int objsize; 1631 1632 local_irq_save(flags); 1633 c = get_cpu_slab(s, smp_processor_id()); 1634 objsize = c->objsize; 1635 if (unlikely(!c->freelist || !node_match(c, node))) 1636 1637 object = __slab_alloc(s, gfpflags, node, addr, c); 1638 1639 else { 1640 object = c->freelist; 1641 c->freelist = object[c->offset]; 1642 stat(c, ALLOC_FASTPATH); 1643 } 1644 local_irq_restore(flags); 1645 1646 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1647 memset(object, 0, objsize); 1648 1649 return object; 1650 } 1651 1652 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1653 { 1654 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); 1655 } 1656 EXPORT_SYMBOL(kmem_cache_alloc); 1657 1658 #ifdef CONFIG_NUMA 1659 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1660 { 1661 return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); 1662 } 1663 EXPORT_SYMBOL(kmem_cache_alloc_node); 1664 #endif 1665 1666 /* 1667 * Slow patch handling. This may still be called frequently since objects 1668 * have a longer lifetime than the cpu slabs in most processing loads. 1669 * 1670 * So we still attempt to reduce cache line usage. Just take the slab 1671 * lock and free the item. If there is no additional partial page 1672 * handling required then we can return immediately. 1673 */ 1674 static void __slab_free(struct kmem_cache *s, struct page *page, 1675 void *x, void *addr, unsigned int offset) 1676 { 1677 void *prior; 1678 void **object = (void *)x; 1679 struct kmem_cache_cpu *c; 1680 1681 c = get_cpu_slab(s, raw_smp_processor_id()); 1682 stat(c, FREE_SLOWPATH); 1683 slab_lock(page); 1684 1685 if (unlikely(SlabDebug(page))) 1686 goto debug; 1687 1688 checks_ok: 1689 prior = object[offset] = page->freelist; 1690 page->freelist = object; 1691 page->inuse--; 1692 1693 if (unlikely(SlabFrozen(page))) { 1694 stat(c, FREE_FROZEN); 1695 goto out_unlock; 1696 } 1697 1698 if (unlikely(!page->inuse)) 1699 goto slab_empty; 1700 1701 /* 1702 * Objects left in the slab. If it was not on the partial list before 1703 * then add it. 1704 */ 1705 if (unlikely(!prior)) { 1706 add_partial(get_node(s, page_to_nid(page)), page, 1); 1707 stat(c, FREE_ADD_PARTIAL); 1708 } 1709 1710 out_unlock: 1711 slab_unlock(page); 1712 return; 1713 1714 slab_empty: 1715 if (prior) { 1716 /* 1717 * Slab still on the partial list. 1718 */ 1719 remove_partial(s, page); 1720 stat(c, FREE_REMOVE_PARTIAL); 1721 } 1722 slab_unlock(page); 1723 stat(c, FREE_SLAB); 1724 discard_slab(s, page); 1725 return; 1726 1727 debug: 1728 if (!free_debug_processing(s, page, x, addr)) 1729 goto out_unlock; 1730 goto checks_ok; 1731 } 1732 1733 /* 1734 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 1735 * can perform fastpath freeing without additional function calls. 1736 * 1737 * The fastpath is only possible if we are freeing to the current cpu slab 1738 * of this processor. This typically the case if we have just allocated 1739 * the item before. 1740 * 1741 * If fastpath is not possible then fall back to __slab_free where we deal 1742 * with all sorts of special processing. 1743 */ 1744 static __always_inline void slab_free(struct kmem_cache *s, 1745 struct page *page, void *x, void *addr) 1746 { 1747 void **object = (void *)x; 1748 struct kmem_cache_cpu *c; 1749 unsigned long flags; 1750 1751 local_irq_save(flags); 1752 c = get_cpu_slab(s, smp_processor_id()); 1753 debug_check_no_locks_freed(object, c->objsize); 1754 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1755 debug_check_no_obj_freed(object, s->objsize); 1756 if (likely(page == c->page && c->node >= 0)) { 1757 object[c->offset] = c->freelist; 1758 c->freelist = object; 1759 stat(c, FREE_FASTPATH); 1760 } else 1761 __slab_free(s, page, x, addr, c->offset); 1762 1763 local_irq_restore(flags); 1764 } 1765 1766 void kmem_cache_free(struct kmem_cache *s, void *x) 1767 { 1768 struct page *page; 1769 1770 page = virt_to_head_page(x); 1771 1772 slab_free(s, page, x, __builtin_return_address(0)); 1773 } 1774 EXPORT_SYMBOL(kmem_cache_free); 1775 1776 /* Figure out on which slab object the object resides */ 1777 static struct page *get_object_page(const void *x) 1778 { 1779 struct page *page = virt_to_head_page(x); 1780 1781 if (!PageSlab(page)) 1782 return NULL; 1783 1784 return page; 1785 } 1786 1787 /* 1788 * Object placement in a slab is made very easy because we always start at 1789 * offset 0. If we tune the size of the object to the alignment then we can 1790 * get the required alignment by putting one properly sized object after 1791 * another. 1792 * 1793 * Notice that the allocation order determines the sizes of the per cpu 1794 * caches. Each processor has always one slab available for allocations. 1795 * Increasing the allocation order reduces the number of times that slabs 1796 * must be moved on and off the partial lists and is therefore a factor in 1797 * locking overhead. 1798 */ 1799 1800 /* 1801 * Mininum / Maximum order of slab pages. This influences locking overhead 1802 * and slab fragmentation. A higher order reduces the number of partial slabs 1803 * and increases the number of allocations possible without having to 1804 * take the list_lock. 1805 */ 1806 static int slub_min_order; 1807 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 1808 static int slub_min_objects; 1809 1810 /* 1811 * Merge control. If this is set then no merging of slab caches will occur. 1812 * (Could be removed. This was introduced to pacify the merge skeptics.) 1813 */ 1814 static int slub_nomerge; 1815 1816 /* 1817 * Calculate the order of allocation given an slab object size. 1818 * 1819 * The order of allocation has significant impact on performance and other 1820 * system components. Generally order 0 allocations should be preferred since 1821 * order 0 does not cause fragmentation in the page allocator. Larger objects 1822 * be problematic to put into order 0 slabs because there may be too much 1823 * unused space left. We go to a higher order if more than 1/16th of the slab 1824 * would be wasted. 1825 * 1826 * In order to reach satisfactory performance we must ensure that a minimum 1827 * number of objects is in one slab. Otherwise we may generate too much 1828 * activity on the partial lists which requires taking the list_lock. This is 1829 * less a concern for large slabs though which are rarely used. 1830 * 1831 * slub_max_order specifies the order where we begin to stop considering the 1832 * number of objects in a slab as critical. If we reach slub_max_order then 1833 * we try to keep the page order as low as possible. So we accept more waste 1834 * of space in favor of a small page order. 1835 * 1836 * Higher order allocations also allow the placement of more objects in a 1837 * slab and thereby reduce object handling overhead. If the user has 1838 * requested a higher mininum order then we start with that one instead of 1839 * the smallest order which will fit the object. 1840 */ 1841 static inline int slab_order(int size, int min_objects, 1842 int max_order, int fract_leftover) 1843 { 1844 int order; 1845 int rem; 1846 int min_order = slub_min_order; 1847 1848 if ((PAGE_SIZE << min_order) / size > 65535) 1849 return get_order(size * 65535) - 1; 1850 1851 for (order = max(min_order, 1852 fls(min_objects * size - 1) - PAGE_SHIFT); 1853 order <= max_order; order++) { 1854 1855 unsigned long slab_size = PAGE_SIZE << order; 1856 1857 if (slab_size < min_objects * size) 1858 continue; 1859 1860 rem = slab_size % size; 1861 1862 if (rem <= slab_size / fract_leftover) 1863 break; 1864 1865 } 1866 1867 return order; 1868 } 1869 1870 static inline int calculate_order(int size) 1871 { 1872 int order; 1873 int min_objects; 1874 int fraction; 1875 1876 /* 1877 * Attempt to find best configuration for a slab. This 1878 * works by first attempting to generate a layout with 1879 * the best configuration and backing off gradually. 1880 * 1881 * First we reduce the acceptable waste in a slab. Then 1882 * we reduce the minimum objects required in a slab. 1883 */ 1884 min_objects = slub_min_objects; 1885 if (!min_objects) 1886 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1887 while (min_objects > 1) { 1888 fraction = 16; 1889 while (fraction >= 4) { 1890 order = slab_order(size, min_objects, 1891 slub_max_order, fraction); 1892 if (order <= slub_max_order) 1893 return order; 1894 fraction /= 2; 1895 } 1896 min_objects /= 2; 1897 } 1898 1899 /* 1900 * We were unable to place multiple objects in a slab. Now 1901 * lets see if we can place a single object there. 1902 */ 1903 order = slab_order(size, 1, slub_max_order, 1); 1904 if (order <= slub_max_order) 1905 return order; 1906 1907 /* 1908 * Doh this slab cannot be placed using slub_max_order. 1909 */ 1910 order = slab_order(size, 1, MAX_ORDER, 1); 1911 if (order <= MAX_ORDER) 1912 return order; 1913 return -ENOSYS; 1914 } 1915 1916 /* 1917 * Figure out what the alignment of the objects will be. 1918 */ 1919 static unsigned long calculate_alignment(unsigned long flags, 1920 unsigned long align, unsigned long size) 1921 { 1922 /* 1923 * If the user wants hardware cache aligned objects then follow that 1924 * suggestion if the object is sufficiently large. 1925 * 1926 * The hardware cache alignment cannot override the specified 1927 * alignment though. If that is greater then use it. 1928 */ 1929 if (flags & SLAB_HWCACHE_ALIGN) { 1930 unsigned long ralign = cache_line_size(); 1931 while (size <= ralign / 2) 1932 ralign /= 2; 1933 align = max(align, ralign); 1934 } 1935 1936 if (align < ARCH_SLAB_MINALIGN) 1937 align = ARCH_SLAB_MINALIGN; 1938 1939 return ALIGN(align, sizeof(void *)); 1940 } 1941 1942 static void init_kmem_cache_cpu(struct kmem_cache *s, 1943 struct kmem_cache_cpu *c) 1944 { 1945 c->page = NULL; 1946 c->freelist = NULL; 1947 c->node = 0; 1948 c->offset = s->offset / sizeof(void *); 1949 c->objsize = s->objsize; 1950 #ifdef CONFIG_SLUB_STATS 1951 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned)); 1952 #endif 1953 } 1954 1955 static void init_kmem_cache_node(struct kmem_cache_node *n) 1956 { 1957 n->nr_partial = 0; 1958 spin_lock_init(&n->list_lock); 1959 INIT_LIST_HEAD(&n->partial); 1960 #ifdef CONFIG_SLUB_DEBUG 1961 atomic_long_set(&n->nr_slabs, 0); 1962 INIT_LIST_HEAD(&n->full); 1963 #endif 1964 } 1965 1966 #ifdef CONFIG_SMP 1967 /* 1968 * Per cpu array for per cpu structures. 1969 * 1970 * The per cpu array places all kmem_cache_cpu structures from one processor 1971 * close together meaning that it becomes possible that multiple per cpu 1972 * structures are contained in one cacheline. This may be particularly 1973 * beneficial for the kmalloc caches. 1974 * 1975 * A desktop system typically has around 60-80 slabs. With 100 here we are 1976 * likely able to get per cpu structures for all caches from the array defined 1977 * here. We must be able to cover all kmalloc caches during bootstrap. 1978 * 1979 * If the per cpu array is exhausted then fall back to kmalloc 1980 * of individual cachelines. No sharing is possible then. 1981 */ 1982 #define NR_KMEM_CACHE_CPU 100 1983 1984 static DEFINE_PER_CPU(struct kmem_cache_cpu, 1985 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 1986 1987 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 1988 static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; 1989 1990 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, 1991 int cpu, gfp_t flags) 1992 { 1993 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); 1994 1995 if (c) 1996 per_cpu(kmem_cache_cpu_free, cpu) = 1997 (void *)c->freelist; 1998 else { 1999 /* Table overflow: So allocate ourselves */ 2000 c = kmalloc_node( 2001 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), 2002 flags, cpu_to_node(cpu)); 2003 if (!c) 2004 return NULL; 2005 } 2006 2007 init_kmem_cache_cpu(s, c); 2008 return c; 2009 } 2010 2011 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) 2012 { 2013 if (c < per_cpu(kmem_cache_cpu, cpu) || 2014 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { 2015 kfree(c); 2016 return; 2017 } 2018 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); 2019 per_cpu(kmem_cache_cpu_free, cpu) = c; 2020 } 2021 2022 static void free_kmem_cache_cpus(struct kmem_cache *s) 2023 { 2024 int cpu; 2025 2026 for_each_online_cpu(cpu) { 2027 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2028 2029 if (c) { 2030 s->cpu_slab[cpu] = NULL; 2031 free_kmem_cache_cpu(c, cpu); 2032 } 2033 } 2034 } 2035 2036 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2037 { 2038 int cpu; 2039 2040 for_each_online_cpu(cpu) { 2041 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 2042 2043 if (c) 2044 continue; 2045 2046 c = alloc_kmem_cache_cpu(s, cpu, flags); 2047 if (!c) { 2048 free_kmem_cache_cpus(s); 2049 return 0; 2050 } 2051 s->cpu_slab[cpu] = c; 2052 } 2053 return 1; 2054 } 2055 2056 /* 2057 * Initialize the per cpu array. 2058 */ 2059 static void init_alloc_cpu_cpu(int cpu) 2060 { 2061 int i; 2062 2063 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) 2064 return; 2065 2066 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) 2067 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); 2068 2069 cpu_set(cpu, kmem_cach_cpu_free_init_once); 2070 } 2071 2072 static void __init init_alloc_cpu(void) 2073 { 2074 int cpu; 2075 2076 for_each_online_cpu(cpu) 2077 init_alloc_cpu_cpu(cpu); 2078 } 2079 2080 #else 2081 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} 2082 static inline void init_alloc_cpu(void) {} 2083 2084 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) 2085 { 2086 init_kmem_cache_cpu(s, &s->cpu_slab); 2087 return 1; 2088 } 2089 #endif 2090 2091 #ifdef CONFIG_NUMA 2092 /* 2093 * No kmalloc_node yet so do it by hand. We know that this is the first 2094 * slab on the node for this slabcache. There are no concurrent accesses 2095 * possible. 2096 * 2097 * Note that this function only works on the kmalloc_node_cache 2098 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2099 * memory on a fresh node that has no slab structures yet. 2100 */ 2101 static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, 2102 int node) 2103 { 2104 struct page *page; 2105 struct kmem_cache_node *n; 2106 unsigned long flags; 2107 2108 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); 2109 2110 page = new_slab(kmalloc_caches, gfpflags, node); 2111 2112 BUG_ON(!page); 2113 if (page_to_nid(page) != node) { 2114 printk(KERN_ERR "SLUB: Unable to allocate memory from " 2115 "node %d\n", node); 2116 printk(KERN_ERR "SLUB: Allocating a useless per node structure " 2117 "in order to be able to continue\n"); 2118 } 2119 2120 n = page->freelist; 2121 BUG_ON(!n); 2122 page->freelist = get_freepointer(kmalloc_caches, n); 2123 page->inuse++; 2124 kmalloc_caches->node[node] = n; 2125 #ifdef CONFIG_SLUB_DEBUG 2126 init_object(kmalloc_caches, n, 1); 2127 init_tracking(kmalloc_caches, n); 2128 #endif 2129 init_kmem_cache_node(n); 2130 inc_slabs_node(kmalloc_caches, node, page->objects); 2131 2132 /* 2133 * lockdep requires consistent irq usage for each lock 2134 * so even though there cannot be a race this early in 2135 * the boot sequence, we still disable irqs. 2136 */ 2137 local_irq_save(flags); 2138 add_partial(n, page, 0); 2139 local_irq_restore(flags); 2140 return n; 2141 } 2142 2143 static void free_kmem_cache_nodes(struct kmem_cache *s) 2144 { 2145 int node; 2146 2147 for_each_node_state(node, N_NORMAL_MEMORY) { 2148 struct kmem_cache_node *n = s->node[node]; 2149 if (n && n != &s->local_node) 2150 kmem_cache_free(kmalloc_caches, n); 2151 s->node[node] = NULL; 2152 } 2153 } 2154 2155 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2156 { 2157 int node; 2158 int local_node; 2159 2160 if (slab_state >= UP) 2161 local_node = page_to_nid(virt_to_page(s)); 2162 else 2163 local_node = 0; 2164 2165 for_each_node_state(node, N_NORMAL_MEMORY) { 2166 struct kmem_cache_node *n; 2167 2168 if (local_node == node) 2169 n = &s->local_node; 2170 else { 2171 if (slab_state == DOWN) { 2172 n = early_kmem_cache_node_alloc(gfpflags, 2173 node); 2174 continue; 2175 } 2176 n = kmem_cache_alloc_node(kmalloc_caches, 2177 gfpflags, node); 2178 2179 if (!n) { 2180 free_kmem_cache_nodes(s); 2181 return 0; 2182 } 2183 2184 } 2185 s->node[node] = n; 2186 init_kmem_cache_node(n); 2187 } 2188 return 1; 2189 } 2190 #else 2191 static void free_kmem_cache_nodes(struct kmem_cache *s) 2192 { 2193 } 2194 2195 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2196 { 2197 init_kmem_cache_node(&s->local_node); 2198 return 1; 2199 } 2200 #endif 2201 2202 /* 2203 * calculate_sizes() determines the order and the distribution of data within 2204 * a slab object. 2205 */ 2206 static int calculate_sizes(struct kmem_cache *s, int forced_order) 2207 { 2208 unsigned long flags = s->flags; 2209 unsigned long size = s->objsize; 2210 unsigned long align = s->align; 2211 int order; 2212 2213 /* 2214 * Round up object size to the next word boundary. We can only 2215 * place the free pointer at word boundaries and this determines 2216 * the possible location of the free pointer. 2217 */ 2218 size = ALIGN(size, sizeof(void *)); 2219 2220 #ifdef CONFIG_SLUB_DEBUG 2221 /* 2222 * Determine if we can poison the object itself. If the user of 2223 * the slab may touch the object after free or before allocation 2224 * then we should never poison the object itself. 2225 */ 2226 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 2227 !s->ctor) 2228 s->flags |= __OBJECT_POISON; 2229 else 2230 s->flags &= ~__OBJECT_POISON; 2231 2232 2233 /* 2234 * If we are Redzoning then check if there is some space between the 2235 * end of the object and the free pointer. If not then add an 2236 * additional word to have some bytes to store Redzone information. 2237 */ 2238 if ((flags & SLAB_RED_ZONE) && size == s->objsize) 2239 size += sizeof(void *); 2240 #endif 2241 2242 /* 2243 * With that we have determined the number of bytes in actual use 2244 * by the object. This is the potential offset to the free pointer. 2245 */ 2246 s->inuse = size; 2247 2248 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 2249 s->ctor)) { 2250 /* 2251 * Relocate free pointer after the object if it is not 2252 * permitted to overwrite the first word of the object on 2253 * kmem_cache_free. 2254 * 2255 * This is the case if we do RCU, have a constructor or 2256 * destructor or are poisoning the objects. 2257 */ 2258 s->offset = size; 2259 size += sizeof(void *); 2260 } 2261 2262 #ifdef CONFIG_SLUB_DEBUG 2263 if (flags & SLAB_STORE_USER) 2264 /* 2265 * Need to store information about allocs and frees after 2266 * the object. 2267 */ 2268 size += 2 * sizeof(struct track); 2269 2270 if (flags & SLAB_RED_ZONE) 2271 /* 2272 * Add some empty padding so that we can catch 2273 * overwrites from earlier objects rather than let 2274 * tracking information or the free pointer be 2275 * corrupted if an user writes before the start 2276 * of the object. 2277 */ 2278 size += sizeof(void *); 2279 #endif 2280 2281 /* 2282 * Determine the alignment based on various parameters that the 2283 * user specified and the dynamic determination of cache line size 2284 * on bootup. 2285 */ 2286 align = calculate_alignment(flags, align, s->objsize); 2287 2288 /* 2289 * SLUB stores one object immediately after another beginning from 2290 * offset 0. In order to align the objects we have to simply size 2291 * each object to conform to the alignment. 2292 */ 2293 size = ALIGN(size, align); 2294 s->size = size; 2295 if (forced_order >= 0) 2296 order = forced_order; 2297 else 2298 order = calculate_order(size); 2299 2300 if (order < 0) 2301 return 0; 2302 2303 s->allocflags = 0; 2304 if (order) 2305 s->allocflags |= __GFP_COMP; 2306 2307 if (s->flags & SLAB_CACHE_DMA) 2308 s->allocflags |= SLUB_DMA; 2309 2310 if (s->flags & SLAB_RECLAIM_ACCOUNT) 2311 s->allocflags |= __GFP_RECLAIMABLE; 2312 2313 /* 2314 * Determine the number of objects per slab 2315 */ 2316 s->oo = oo_make(order, size); 2317 s->min = oo_make(get_order(size), size); 2318 if (oo_objects(s->oo) > oo_objects(s->max)) 2319 s->max = s->oo; 2320 2321 return !!oo_objects(s->oo); 2322 2323 } 2324 2325 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 2326 const char *name, size_t size, 2327 size_t align, unsigned long flags, 2328 void (*ctor)(struct kmem_cache *, void *)) 2329 { 2330 memset(s, 0, kmem_size); 2331 s->name = name; 2332 s->ctor = ctor; 2333 s->objsize = size; 2334 s->align = align; 2335 s->flags = kmem_cache_flags(size, flags, name, ctor); 2336 2337 if (!calculate_sizes(s, -1)) 2338 goto error; 2339 2340 s->refcount = 1; 2341 #ifdef CONFIG_NUMA 2342 s->remote_node_defrag_ratio = 100; 2343 #endif 2344 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) 2345 goto error; 2346 2347 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) 2348 return 1; 2349 free_kmem_cache_nodes(s); 2350 error: 2351 if (flags & SLAB_PANIC) 2352 panic("Cannot create slab %s size=%lu realsize=%u " 2353 "order=%u offset=%u flags=%lx\n", 2354 s->name, (unsigned long)size, s->size, oo_order(s->oo), 2355 s->offset, flags); 2356 return 0; 2357 } 2358 2359 /* 2360 * Check if a given pointer is valid 2361 */ 2362 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2363 { 2364 struct page *page; 2365 2366 page = get_object_page(object); 2367 2368 if (!page || s != page->slab) 2369 /* No slab or wrong slab */ 2370 return 0; 2371 2372 if (!check_valid_pointer(s, page, object)) 2373 return 0; 2374 2375 /* 2376 * We could also check if the object is on the slabs freelist. 2377 * But this would be too expensive and it seems that the main 2378 * purpose of kmem_ptr_valid() is to check if the object belongs 2379 * to a certain slab. 2380 */ 2381 return 1; 2382 } 2383 EXPORT_SYMBOL(kmem_ptr_validate); 2384 2385 /* 2386 * Determine the size of a slab object 2387 */ 2388 unsigned int kmem_cache_size(struct kmem_cache *s) 2389 { 2390 return s->objsize; 2391 } 2392 EXPORT_SYMBOL(kmem_cache_size); 2393 2394 const char *kmem_cache_name(struct kmem_cache *s) 2395 { 2396 return s->name; 2397 } 2398 EXPORT_SYMBOL(kmem_cache_name); 2399 2400 static void list_slab_objects(struct kmem_cache *s, struct page *page, 2401 const char *text) 2402 { 2403 #ifdef CONFIG_SLUB_DEBUG 2404 void *addr = page_address(page); 2405 void *p; 2406 DECLARE_BITMAP(map, page->objects); 2407 2408 bitmap_zero(map, page->objects); 2409 slab_err(s, page, "%s", text); 2410 slab_lock(page); 2411 for_each_free_object(p, s, page->freelist) 2412 set_bit(slab_index(p, s, addr), map); 2413 2414 for_each_object(p, s, addr, page->objects) { 2415 2416 if (!test_bit(slab_index(p, s, addr), map)) { 2417 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n", 2418 p, p - addr); 2419 print_tracking(s, p); 2420 } 2421 } 2422 slab_unlock(page); 2423 #endif 2424 } 2425 2426 /* 2427 * Attempt to free all partial slabs on a node. 2428 */ 2429 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2430 { 2431 unsigned long flags; 2432 struct page *page, *h; 2433 2434 spin_lock_irqsave(&n->list_lock, flags); 2435 list_for_each_entry_safe(page, h, &n->partial, lru) { 2436 if (!page->inuse) { 2437 list_del(&page->lru); 2438 discard_slab(s, page); 2439 n->nr_partial--; 2440 } else { 2441 list_slab_objects(s, page, 2442 "Objects remaining on kmem_cache_close()"); 2443 } 2444 } 2445 spin_unlock_irqrestore(&n->list_lock, flags); 2446 } 2447 2448 /* 2449 * Release all resources used by a slab cache. 2450 */ 2451 static inline int kmem_cache_close(struct kmem_cache *s) 2452 { 2453 int node; 2454 2455 flush_all(s); 2456 2457 /* Attempt to free all objects */ 2458 free_kmem_cache_cpus(s); 2459 for_each_node_state(node, N_NORMAL_MEMORY) { 2460 struct kmem_cache_node *n = get_node(s, node); 2461 2462 free_partial(s, n); 2463 if (n->nr_partial || slabs_node(s, node)) 2464 return 1; 2465 } 2466 free_kmem_cache_nodes(s); 2467 return 0; 2468 } 2469 2470 /* 2471 * Close a cache and release the kmem_cache structure 2472 * (must be used for caches created using kmem_cache_create) 2473 */ 2474 void kmem_cache_destroy(struct kmem_cache *s) 2475 { 2476 down_write(&slub_lock); 2477 s->refcount--; 2478 if (!s->refcount) { 2479 list_del(&s->list); 2480 up_write(&slub_lock); 2481 if (kmem_cache_close(s)) { 2482 printk(KERN_ERR "SLUB %s: %s called for cache that " 2483 "still has objects.\n", s->name, __func__); 2484 dump_stack(); 2485 } 2486 sysfs_slab_remove(s); 2487 } else 2488 up_write(&slub_lock); 2489 } 2490 EXPORT_SYMBOL(kmem_cache_destroy); 2491 2492 /******************************************************************** 2493 * Kmalloc subsystem 2494 *******************************************************************/ 2495 2496 struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2497 EXPORT_SYMBOL(kmalloc_caches); 2498 2499 static int __init setup_slub_min_order(char *str) 2500 { 2501 get_option(&str, &slub_min_order); 2502 2503 return 1; 2504 } 2505 2506 __setup("slub_min_order=", setup_slub_min_order); 2507 2508 static int __init setup_slub_max_order(char *str) 2509 { 2510 get_option(&str, &slub_max_order); 2511 2512 return 1; 2513 } 2514 2515 __setup("slub_max_order=", setup_slub_max_order); 2516 2517 static int __init setup_slub_min_objects(char *str) 2518 { 2519 get_option(&str, &slub_min_objects); 2520 2521 return 1; 2522 } 2523 2524 __setup("slub_min_objects=", setup_slub_min_objects); 2525 2526 static int __init setup_slub_nomerge(char *str) 2527 { 2528 slub_nomerge = 1; 2529 return 1; 2530 } 2531 2532 __setup("slub_nomerge", setup_slub_nomerge); 2533 2534 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, 2535 const char *name, int size, gfp_t gfp_flags) 2536 { 2537 unsigned int flags = 0; 2538 2539 if (gfp_flags & SLUB_DMA) 2540 flags = SLAB_CACHE_DMA; 2541 2542 down_write(&slub_lock); 2543 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2544 flags, NULL)) 2545 goto panic; 2546 2547 list_add(&s->list, &slab_caches); 2548 up_write(&slub_lock); 2549 if (sysfs_slab_add(s)) 2550 goto panic; 2551 return s; 2552 2553 panic: 2554 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2555 } 2556 2557 #ifdef CONFIG_ZONE_DMA 2558 static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2559 2560 static void sysfs_add_func(struct work_struct *w) 2561 { 2562 struct kmem_cache *s; 2563 2564 down_write(&slub_lock); 2565 list_for_each_entry(s, &slab_caches, list) { 2566 if (s->flags & __SYSFS_ADD_DEFERRED) { 2567 s->flags &= ~__SYSFS_ADD_DEFERRED; 2568 sysfs_slab_add(s); 2569 } 2570 } 2571 up_write(&slub_lock); 2572 } 2573 2574 static DECLARE_WORK(sysfs_add_work, sysfs_add_func); 2575 2576 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) 2577 { 2578 struct kmem_cache *s; 2579 char *text; 2580 size_t realsize; 2581 2582 s = kmalloc_caches_dma[index]; 2583 if (s) 2584 return s; 2585 2586 /* Dynamically create dma cache */ 2587 if (flags & __GFP_WAIT) 2588 down_write(&slub_lock); 2589 else { 2590 if (!down_write_trylock(&slub_lock)) 2591 goto out; 2592 } 2593 2594 if (kmalloc_caches_dma[index]) 2595 goto unlock_out; 2596 2597 realsize = kmalloc_caches[index].objsize; 2598 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2599 (unsigned int)realsize); 2600 s = kmalloc(kmem_size, flags & ~SLUB_DMA); 2601 2602 if (!s || !text || !kmem_cache_open(s, flags, text, 2603 realsize, ARCH_KMALLOC_MINALIGN, 2604 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { 2605 kfree(s); 2606 kfree(text); 2607 goto unlock_out; 2608 } 2609 2610 list_add(&s->list, &slab_caches); 2611 kmalloc_caches_dma[index] = s; 2612 2613 schedule_work(&sysfs_add_work); 2614 2615 unlock_out: 2616 up_write(&slub_lock); 2617 out: 2618 return kmalloc_caches_dma[index]; 2619 } 2620 #endif 2621 2622 /* 2623 * Conversion table for small slabs sizes / 8 to the index in the 2624 * kmalloc array. This is necessary for slabs < 192 since we have non power 2625 * of two cache sizes there. The size of larger slabs can be determined using 2626 * fls. 2627 */ 2628 static s8 size_index[24] = { 2629 3, /* 8 */ 2630 4, /* 16 */ 2631 5, /* 24 */ 2632 5, /* 32 */ 2633 6, /* 40 */ 2634 6, /* 48 */ 2635 6, /* 56 */ 2636 6, /* 64 */ 2637 1, /* 72 */ 2638 1, /* 80 */ 2639 1, /* 88 */ 2640 1, /* 96 */ 2641 7, /* 104 */ 2642 7, /* 112 */ 2643 7, /* 120 */ 2644 7, /* 128 */ 2645 2, /* 136 */ 2646 2, /* 144 */ 2647 2, /* 152 */ 2648 2, /* 160 */ 2649 2, /* 168 */ 2650 2, /* 176 */ 2651 2, /* 184 */ 2652 2 /* 192 */ 2653 }; 2654 2655 static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2656 { 2657 int index; 2658 2659 if (size <= 192) { 2660 if (!size) 2661 return ZERO_SIZE_PTR; 2662 2663 index = size_index[(size - 1) / 8]; 2664 } else 2665 index = fls(size - 1); 2666 2667 #ifdef CONFIG_ZONE_DMA 2668 if (unlikely((flags & SLUB_DMA))) 2669 return dma_kmalloc_cache(index, flags); 2670 2671 #endif 2672 return &kmalloc_caches[index]; 2673 } 2674 2675 void *__kmalloc(size_t size, gfp_t flags) 2676 { 2677 struct kmem_cache *s; 2678 2679 if (unlikely(size > PAGE_SIZE)) 2680 return kmalloc_large(size, flags); 2681 2682 s = get_slab(size, flags); 2683 2684 if (unlikely(ZERO_OR_NULL_PTR(s))) 2685 return s; 2686 2687 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2688 } 2689 EXPORT_SYMBOL(__kmalloc); 2690 2691 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2692 { 2693 struct page *page = alloc_pages_node(node, flags | __GFP_COMP, 2694 get_order(size)); 2695 2696 if (page) 2697 return page_address(page); 2698 else 2699 return NULL; 2700 } 2701 2702 #ifdef CONFIG_NUMA 2703 void *__kmalloc_node(size_t size, gfp_t flags, int node) 2704 { 2705 struct kmem_cache *s; 2706 2707 if (unlikely(size > PAGE_SIZE)) 2708 return kmalloc_large_node(size, flags, node); 2709 2710 s = get_slab(size, flags); 2711 2712 if (unlikely(ZERO_OR_NULL_PTR(s))) 2713 return s; 2714 2715 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2716 } 2717 EXPORT_SYMBOL(__kmalloc_node); 2718 #endif 2719 2720 size_t ksize(const void *object) 2721 { 2722 struct page *page; 2723 struct kmem_cache *s; 2724 2725 if (unlikely(object == ZERO_SIZE_PTR)) 2726 return 0; 2727 2728 page = virt_to_head_page(object); 2729 2730 if (unlikely(!PageSlab(page))) { 2731 WARN_ON(!PageCompound(page)); 2732 return PAGE_SIZE << compound_order(page); 2733 } 2734 s = page->slab; 2735 2736 #ifdef CONFIG_SLUB_DEBUG 2737 /* 2738 * Debugging requires use of the padding between object 2739 * and whatever may come after it. 2740 */ 2741 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 2742 return s->objsize; 2743 2744 #endif 2745 /* 2746 * If we have the need to store the freelist pointer 2747 * back there or track user information then we can 2748 * only use the space before that information. 2749 */ 2750 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 2751 return s->inuse; 2752 /* 2753 * Else we can use all the padding etc for the allocation 2754 */ 2755 return s->size; 2756 } 2757 EXPORT_SYMBOL(ksize); 2758 2759 void kfree(const void *x) 2760 { 2761 struct page *page; 2762 void *object = (void *)x; 2763 2764 if (unlikely(ZERO_OR_NULL_PTR(x))) 2765 return; 2766 2767 page = virt_to_head_page(x); 2768 if (unlikely(!PageSlab(page))) { 2769 BUG_ON(!PageCompound(page)); 2770 put_page(page); 2771 return; 2772 } 2773 slab_free(page->slab, page, object, __builtin_return_address(0)); 2774 } 2775 EXPORT_SYMBOL(kfree); 2776 2777 /* 2778 * kmem_cache_shrink removes empty slabs from the partial lists and sorts 2779 * the remaining slabs by the number of items in use. The slabs with the 2780 * most items in use come first. New allocations will then fill those up 2781 * and thus they can be removed from the partial lists. 2782 * 2783 * The slabs with the least items are placed last. This results in them 2784 * being allocated from last increasing the chance that the last objects 2785 * are freed in them. 2786 */ 2787 int kmem_cache_shrink(struct kmem_cache *s) 2788 { 2789 int node; 2790 int i; 2791 struct kmem_cache_node *n; 2792 struct page *page; 2793 struct page *t; 2794 int objects = oo_objects(s->max); 2795 struct list_head *slabs_by_inuse = 2796 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); 2797 unsigned long flags; 2798 2799 if (!slabs_by_inuse) 2800 return -ENOMEM; 2801 2802 flush_all(s); 2803 for_each_node_state(node, N_NORMAL_MEMORY) { 2804 n = get_node(s, node); 2805 2806 if (!n->nr_partial) 2807 continue; 2808 2809 for (i = 0; i < objects; i++) 2810 INIT_LIST_HEAD(slabs_by_inuse + i); 2811 2812 spin_lock_irqsave(&n->list_lock, flags); 2813 2814 /* 2815 * Build lists indexed by the items in use in each slab. 2816 * 2817 * Note that concurrent frees may occur while we hold the 2818 * list_lock. page->inuse here is the upper limit. 2819 */ 2820 list_for_each_entry_safe(page, t, &n->partial, lru) { 2821 if (!page->inuse && slab_trylock(page)) { 2822 /* 2823 * Must hold slab lock here because slab_free 2824 * may have freed the last object and be 2825 * waiting to release the slab. 2826 */ 2827 list_del(&page->lru); 2828 n->nr_partial--; 2829 slab_unlock(page); 2830 discard_slab(s, page); 2831 } else { 2832 list_move(&page->lru, 2833 slabs_by_inuse + page->inuse); 2834 } 2835 } 2836 2837 /* 2838 * Rebuild the partial list with the slabs filled up most 2839 * first and the least used slabs at the end. 2840 */ 2841 for (i = objects - 1; i >= 0; i--) 2842 list_splice(slabs_by_inuse + i, n->partial.prev); 2843 2844 spin_unlock_irqrestore(&n->list_lock, flags); 2845 } 2846 2847 kfree(slabs_by_inuse); 2848 return 0; 2849 } 2850 EXPORT_SYMBOL(kmem_cache_shrink); 2851 2852 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 2853 static int slab_mem_going_offline_callback(void *arg) 2854 { 2855 struct kmem_cache *s; 2856 2857 down_read(&slub_lock); 2858 list_for_each_entry(s, &slab_caches, list) 2859 kmem_cache_shrink(s); 2860 up_read(&slub_lock); 2861 2862 return 0; 2863 } 2864 2865 static void slab_mem_offline_callback(void *arg) 2866 { 2867 struct kmem_cache_node *n; 2868 struct kmem_cache *s; 2869 struct memory_notify *marg = arg; 2870 int offline_node; 2871 2872 offline_node = marg->status_change_nid; 2873 2874 /* 2875 * If the node still has available memory. we need kmem_cache_node 2876 * for it yet. 2877 */ 2878 if (offline_node < 0) 2879 return; 2880 2881 down_read(&slub_lock); 2882 list_for_each_entry(s, &slab_caches, list) { 2883 n = get_node(s, offline_node); 2884 if (n) { 2885 /* 2886 * if n->nr_slabs > 0, slabs still exist on the node 2887 * that is going down. We were unable to free them, 2888 * and offline_pages() function shoudn't call this 2889 * callback. So, we must fail. 2890 */ 2891 BUG_ON(slabs_node(s, offline_node)); 2892 2893 s->node[offline_node] = NULL; 2894 kmem_cache_free(kmalloc_caches, n); 2895 } 2896 } 2897 up_read(&slub_lock); 2898 } 2899 2900 static int slab_mem_going_online_callback(void *arg) 2901 { 2902 struct kmem_cache_node *n; 2903 struct kmem_cache *s; 2904 struct memory_notify *marg = arg; 2905 int nid = marg->status_change_nid; 2906 int ret = 0; 2907 2908 /* 2909 * If the node's memory is already available, then kmem_cache_node is 2910 * already created. Nothing to do. 2911 */ 2912 if (nid < 0) 2913 return 0; 2914 2915 /* 2916 * We are bringing a node online. No memory is available yet. We must 2917 * allocate a kmem_cache_node structure in order to bring the node 2918 * online. 2919 */ 2920 down_read(&slub_lock); 2921 list_for_each_entry(s, &slab_caches, list) { 2922 /* 2923 * XXX: kmem_cache_alloc_node will fallback to other nodes 2924 * since memory is not yet available from the node that 2925 * is brought up. 2926 */ 2927 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); 2928 if (!n) { 2929 ret = -ENOMEM; 2930 goto out; 2931 } 2932 init_kmem_cache_node(n); 2933 s->node[nid] = n; 2934 } 2935 out: 2936 up_read(&slub_lock); 2937 return ret; 2938 } 2939 2940 static int slab_memory_callback(struct notifier_block *self, 2941 unsigned long action, void *arg) 2942 { 2943 int ret = 0; 2944 2945 switch (action) { 2946 case MEM_GOING_ONLINE: 2947 ret = slab_mem_going_online_callback(arg); 2948 break; 2949 case MEM_GOING_OFFLINE: 2950 ret = slab_mem_going_offline_callback(arg); 2951 break; 2952 case MEM_OFFLINE: 2953 case MEM_CANCEL_ONLINE: 2954 slab_mem_offline_callback(arg); 2955 break; 2956 case MEM_ONLINE: 2957 case MEM_CANCEL_OFFLINE: 2958 break; 2959 } 2960 2961 ret = notifier_from_errno(ret); 2962 return ret; 2963 } 2964 2965 #endif /* CONFIG_MEMORY_HOTPLUG */ 2966 2967 /******************************************************************** 2968 * Basic setup of slabs 2969 *******************************************************************/ 2970 2971 void __init kmem_cache_init(void) 2972 { 2973 int i; 2974 int caches = 0; 2975 2976 init_alloc_cpu(); 2977 2978 #ifdef CONFIG_NUMA 2979 /* 2980 * Must first have the slab cache available for the allocations of the 2981 * struct kmem_cache_node's. There is special bootstrap code in 2982 * kmem_cache_open for slab_state == DOWN. 2983 */ 2984 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", 2985 sizeof(struct kmem_cache_node), GFP_KERNEL); 2986 kmalloc_caches[0].refcount = -1; 2987 caches++; 2988 2989 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 2990 #endif 2991 2992 /* Able to allocate the per node structures */ 2993 slab_state = PARTIAL; 2994 2995 /* Caches that are not of the two-to-the-power-of size */ 2996 if (KMALLOC_MIN_SIZE <= 64) { 2997 create_kmalloc_cache(&kmalloc_caches[1], 2998 "kmalloc-96", 96, GFP_KERNEL); 2999 caches++; 3000 create_kmalloc_cache(&kmalloc_caches[2], 3001 "kmalloc-192", 192, GFP_KERNEL); 3002 caches++; 3003 } 3004 3005 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 3006 create_kmalloc_cache(&kmalloc_caches[i], 3007 "kmalloc", 1 << i, GFP_KERNEL); 3008 caches++; 3009 } 3010 3011 3012 /* 3013 * Patch up the size_index table if we have strange large alignment 3014 * requirements for the kmalloc array. This is only the case for 3015 * MIPS it seems. The standard arches will not generate any code here. 3016 * 3017 * Largest permitted alignment is 256 bytes due to the way we 3018 * handle the index determination for the smaller caches. 3019 * 3020 * Make sure that nothing crazy happens if someone starts tinkering 3021 * around with ARCH_KMALLOC_MINALIGN 3022 */ 3023 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 3024 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 3025 3026 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) 3027 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW; 3028 3029 if (KMALLOC_MIN_SIZE == 128) { 3030 /* 3031 * The 192 byte sized cache is not used if the alignment 3032 * is 128 byte. Redirect kmalloc to use the 256 byte cache 3033 * instead. 3034 */ 3035 for (i = 128 + 8; i <= 192; i += 8) 3036 size_index[(i - 1) / 8] = 8; 3037 } 3038 3039 slab_state = UP; 3040 3041 /* Provide the correct kmalloc names now that the caches are up */ 3042 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3043 kmalloc_caches[i]. name = 3044 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3045 3046 #ifdef CONFIG_SMP 3047 register_cpu_notifier(&slab_notifier); 3048 kmem_size = offsetof(struct kmem_cache, cpu_slab) + 3049 nr_cpu_ids * sizeof(struct kmem_cache_cpu *); 3050 #else 3051 kmem_size = sizeof(struct kmem_cache); 3052 #endif 3053 3054 printk(KERN_INFO 3055 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," 3056 " CPUs=%d, Nodes=%d\n", 3057 caches, cache_line_size(), 3058 slub_min_order, slub_max_order, slub_min_objects, 3059 nr_cpu_ids, nr_node_ids); 3060 } 3061 3062 /* 3063 * Find a mergeable slab cache 3064 */ 3065 static int slab_unmergeable(struct kmem_cache *s) 3066 { 3067 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 3068 return 1; 3069 3070 if (s->ctor) 3071 return 1; 3072 3073 /* 3074 * We may have set a slab to be unmergeable during bootstrap. 3075 */ 3076 if (s->refcount < 0) 3077 return 1; 3078 3079 return 0; 3080 } 3081 3082 static struct kmem_cache *find_mergeable(size_t size, 3083 size_t align, unsigned long flags, const char *name, 3084 void (*ctor)(struct kmem_cache *, void *)) 3085 { 3086 struct kmem_cache *s; 3087 3088 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 3089 return NULL; 3090 3091 if (ctor) 3092 return NULL; 3093 3094 size = ALIGN(size, sizeof(void *)); 3095 align = calculate_alignment(flags, align, size); 3096 size = ALIGN(size, align); 3097 flags = kmem_cache_flags(size, flags, name, NULL); 3098 3099 list_for_each_entry(s, &slab_caches, list) { 3100 if (slab_unmergeable(s)) 3101 continue; 3102 3103 if (size > s->size) 3104 continue; 3105 3106 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) 3107 continue; 3108 /* 3109 * Check if alignment is compatible. 3110 * Courtesy of Adrian Drzewiecki 3111 */ 3112 if ((s->size & ~(align - 1)) != s->size) 3113 continue; 3114 3115 if (s->size - size >= sizeof(void *)) 3116 continue; 3117 3118 return s; 3119 } 3120 return NULL; 3121 } 3122 3123 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 3124 size_t align, unsigned long flags, 3125 void (*ctor)(struct kmem_cache *, void *)) 3126 { 3127 struct kmem_cache *s; 3128 3129 down_write(&slub_lock); 3130 s = find_mergeable(size, align, flags, name, ctor); 3131 if (s) { 3132 int cpu; 3133 3134 s->refcount++; 3135 /* 3136 * Adjust the object sizes so that we clear 3137 * the complete object on kzalloc. 3138 */ 3139 s->objsize = max(s->objsize, (int)size); 3140 3141 /* 3142 * And then we need to update the object size in the 3143 * per cpu structures 3144 */ 3145 for_each_online_cpu(cpu) 3146 get_cpu_slab(s, cpu)->objsize = s->objsize; 3147 3148 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 3149 up_write(&slub_lock); 3150 3151 if (sysfs_slab_alias(s, name)) 3152 goto err; 3153 return s; 3154 } 3155 3156 s = kmalloc(kmem_size, GFP_KERNEL); 3157 if (s) { 3158 if (kmem_cache_open(s, GFP_KERNEL, name, 3159 size, align, flags, ctor)) { 3160 list_add(&s->list, &slab_caches); 3161 up_write(&slub_lock); 3162 if (sysfs_slab_add(s)) 3163 goto err; 3164 return s; 3165 } 3166 kfree(s); 3167 } 3168 up_write(&slub_lock); 3169 3170 err: 3171 if (flags & SLAB_PANIC) 3172 panic("Cannot create slabcache %s\n", name); 3173 else 3174 s = NULL; 3175 return s; 3176 } 3177 EXPORT_SYMBOL(kmem_cache_create); 3178 3179 #ifdef CONFIG_SMP 3180 /* 3181 * Use the cpu notifier to insure that the cpu slabs are flushed when 3182 * necessary. 3183 */ 3184 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3185 unsigned long action, void *hcpu) 3186 { 3187 long cpu = (long)hcpu; 3188 struct kmem_cache *s; 3189 unsigned long flags; 3190 3191 switch (action) { 3192 case CPU_UP_PREPARE: 3193 case CPU_UP_PREPARE_FROZEN: 3194 init_alloc_cpu_cpu(cpu); 3195 down_read(&slub_lock); 3196 list_for_each_entry(s, &slab_caches, list) 3197 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu, 3198 GFP_KERNEL); 3199 up_read(&slub_lock); 3200 break; 3201 3202 case CPU_UP_CANCELED: 3203 case CPU_UP_CANCELED_FROZEN: 3204 case CPU_DEAD: 3205 case CPU_DEAD_FROZEN: 3206 down_read(&slub_lock); 3207 list_for_each_entry(s, &slab_caches, list) { 3208 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3209 3210 local_irq_save(flags); 3211 __flush_cpu_slab(s, cpu); 3212 local_irq_restore(flags); 3213 free_kmem_cache_cpu(c, cpu); 3214 s->cpu_slab[cpu] = NULL; 3215 } 3216 up_read(&slub_lock); 3217 break; 3218 default: 3219 break; 3220 } 3221 return NOTIFY_OK; 3222 } 3223 3224 static struct notifier_block __cpuinitdata slab_notifier = { 3225 .notifier_call = slab_cpuup_callback 3226 }; 3227 3228 #endif 3229 3230 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 3231 { 3232 struct kmem_cache *s; 3233 3234 if (unlikely(size > PAGE_SIZE)) 3235 return kmalloc_large(size, gfpflags); 3236 3237 s = get_slab(size, gfpflags); 3238 3239 if (unlikely(ZERO_OR_NULL_PTR(s))) 3240 return s; 3241 3242 return slab_alloc(s, gfpflags, -1, caller); 3243 } 3244 3245 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 3246 int node, void *caller) 3247 { 3248 struct kmem_cache *s; 3249 3250 if (unlikely(size > PAGE_SIZE)) 3251 return kmalloc_large_node(size, gfpflags, node); 3252 3253 s = get_slab(size, gfpflags); 3254 3255 if (unlikely(ZERO_OR_NULL_PTR(s))) 3256 return s; 3257 3258 return slab_alloc(s, gfpflags, node, caller); 3259 } 3260 3261 #ifdef CONFIG_SLUB_DEBUG 3262 static unsigned long count_partial(struct kmem_cache_node *n, 3263 int (*get_count)(struct page *)) 3264 { 3265 unsigned long flags; 3266 unsigned long x = 0; 3267 struct page *page; 3268 3269 spin_lock_irqsave(&n->list_lock, flags); 3270 list_for_each_entry(page, &n->partial, lru) 3271 x += get_count(page); 3272 spin_unlock_irqrestore(&n->list_lock, flags); 3273 return x; 3274 } 3275 3276 static int count_inuse(struct page *page) 3277 { 3278 return page->inuse; 3279 } 3280 3281 static int count_total(struct page *page) 3282 { 3283 return page->objects; 3284 } 3285 3286 static int count_free(struct page *page) 3287 { 3288 return page->objects - page->inuse; 3289 } 3290 3291 static int validate_slab(struct kmem_cache *s, struct page *page, 3292 unsigned long *map) 3293 { 3294 void *p; 3295 void *addr = page_address(page); 3296 3297 if (!check_slab(s, page) || 3298 !on_freelist(s, page, NULL)) 3299 return 0; 3300 3301 /* Now we know that a valid freelist exists */ 3302 bitmap_zero(map, page->objects); 3303 3304 for_each_free_object(p, s, page->freelist) { 3305 set_bit(slab_index(p, s, addr), map); 3306 if (!check_object(s, page, p, 0)) 3307 return 0; 3308 } 3309 3310 for_each_object(p, s, addr, page->objects) 3311 if (!test_bit(slab_index(p, s, addr), map)) 3312 if (!check_object(s, page, p, 1)) 3313 return 0; 3314 return 1; 3315 } 3316 3317 static void validate_slab_slab(struct kmem_cache *s, struct page *page, 3318 unsigned long *map) 3319 { 3320 if (slab_trylock(page)) { 3321 validate_slab(s, page, map); 3322 slab_unlock(page); 3323 } else 3324 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 3325 s->name, page); 3326 3327 if (s->flags & DEBUG_DEFAULT_FLAGS) { 3328 if (!SlabDebug(page)) 3329 printk(KERN_ERR "SLUB %s: SlabDebug not set " 3330 "on slab 0x%p\n", s->name, page); 3331 } else { 3332 if (SlabDebug(page)) 3333 printk(KERN_ERR "SLUB %s: SlabDebug set on " 3334 "slab 0x%p\n", s->name, page); 3335 } 3336 } 3337 3338 static int validate_slab_node(struct kmem_cache *s, 3339 struct kmem_cache_node *n, unsigned long *map) 3340 { 3341 unsigned long count = 0; 3342 struct page *page; 3343 unsigned long flags; 3344 3345 spin_lock_irqsave(&n->list_lock, flags); 3346 3347 list_for_each_entry(page, &n->partial, lru) { 3348 validate_slab_slab(s, page, map); 3349 count++; 3350 } 3351 if (count != n->nr_partial) 3352 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " 3353 "counter=%ld\n", s->name, count, n->nr_partial); 3354 3355 if (!(s->flags & SLAB_STORE_USER)) 3356 goto out; 3357 3358 list_for_each_entry(page, &n->full, lru) { 3359 validate_slab_slab(s, page, map); 3360 count++; 3361 } 3362 if (count != atomic_long_read(&n->nr_slabs)) 3363 printk(KERN_ERR "SLUB: %s %ld slabs counted but " 3364 "counter=%ld\n", s->name, count, 3365 atomic_long_read(&n->nr_slabs)); 3366 3367 out: 3368 spin_unlock_irqrestore(&n->list_lock, flags); 3369 return count; 3370 } 3371 3372 static long validate_slab_cache(struct kmem_cache *s) 3373 { 3374 int node; 3375 unsigned long count = 0; 3376 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * 3377 sizeof(unsigned long), GFP_KERNEL); 3378 3379 if (!map) 3380 return -ENOMEM; 3381 3382 flush_all(s); 3383 for_each_node_state(node, N_NORMAL_MEMORY) { 3384 struct kmem_cache_node *n = get_node(s, node); 3385 3386 count += validate_slab_node(s, n, map); 3387 } 3388 kfree(map); 3389 return count; 3390 } 3391 3392 #ifdef SLUB_RESILIENCY_TEST 3393 static void resiliency_test(void) 3394 { 3395 u8 *p; 3396 3397 printk(KERN_ERR "SLUB resiliency testing\n"); 3398 printk(KERN_ERR "-----------------------\n"); 3399 printk(KERN_ERR "A. Corruption after allocation\n"); 3400 3401 p = kzalloc(16, GFP_KERNEL); 3402 p[16] = 0x12; 3403 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" 3404 " 0x12->0x%p\n\n", p + 16); 3405 3406 validate_slab_cache(kmalloc_caches + 4); 3407 3408 /* Hmmm... The next two are dangerous */ 3409 p = kzalloc(32, GFP_KERNEL); 3410 p[32 + sizeof(void *)] = 0x34; 3411 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" 3412 " 0x34 -> -0x%p\n", p); 3413 printk(KERN_ERR 3414 "If allocated object is overwritten then not detectable\n\n"); 3415 3416 validate_slab_cache(kmalloc_caches + 5); 3417 p = kzalloc(64, GFP_KERNEL); 3418 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 3419 *p = 0x56; 3420 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 3421 p); 3422 printk(KERN_ERR 3423 "If allocated object is overwritten then not detectable\n\n"); 3424 validate_slab_cache(kmalloc_caches + 6); 3425 3426 printk(KERN_ERR "\nB. Corruption after free\n"); 3427 p = kzalloc(128, GFP_KERNEL); 3428 kfree(p); 3429 *p = 0x78; 3430 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 3431 validate_slab_cache(kmalloc_caches + 7); 3432 3433 p = kzalloc(256, GFP_KERNEL); 3434 kfree(p); 3435 p[50] = 0x9a; 3436 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", 3437 p); 3438 validate_slab_cache(kmalloc_caches + 8); 3439 3440 p = kzalloc(512, GFP_KERNEL); 3441 kfree(p); 3442 p[512] = 0xab; 3443 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 3444 validate_slab_cache(kmalloc_caches + 9); 3445 } 3446 #else 3447 static void resiliency_test(void) {}; 3448 #endif 3449 3450 /* 3451 * Generate lists of code addresses where slabcache objects are allocated 3452 * and freed. 3453 */ 3454 3455 struct location { 3456 unsigned long count; 3457 void *addr; 3458 long long sum_time; 3459 long min_time; 3460 long max_time; 3461 long min_pid; 3462 long max_pid; 3463 cpumask_t cpus; 3464 nodemask_t nodes; 3465 }; 3466 3467 struct loc_track { 3468 unsigned long max; 3469 unsigned long count; 3470 struct location *loc; 3471 }; 3472 3473 static void free_loc_track(struct loc_track *t) 3474 { 3475 if (t->max) 3476 free_pages((unsigned long)t->loc, 3477 get_order(sizeof(struct location) * t->max)); 3478 } 3479 3480 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 3481 { 3482 struct location *l; 3483 int order; 3484 3485 order = get_order(sizeof(struct location) * max); 3486 3487 l = (void *)__get_free_pages(flags, order); 3488 if (!l) 3489 return 0; 3490 3491 if (t->count) { 3492 memcpy(l, t->loc, sizeof(struct location) * t->count); 3493 free_loc_track(t); 3494 } 3495 t->max = max; 3496 t->loc = l; 3497 return 1; 3498 } 3499 3500 static int add_location(struct loc_track *t, struct kmem_cache *s, 3501 const struct track *track) 3502 { 3503 long start, end, pos; 3504 struct location *l; 3505 void *caddr; 3506 unsigned long age = jiffies - track->when; 3507 3508 start = -1; 3509 end = t->count; 3510 3511 for ( ; ; ) { 3512 pos = start + (end - start + 1) / 2; 3513 3514 /* 3515 * There is nothing at "end". If we end up there 3516 * we need to add something to before end. 3517 */ 3518 if (pos == end) 3519 break; 3520 3521 caddr = t->loc[pos].addr; 3522 if (track->addr == caddr) { 3523 3524 l = &t->loc[pos]; 3525 l->count++; 3526 if (track->when) { 3527 l->sum_time += age; 3528 if (age < l->min_time) 3529 l->min_time = age; 3530 if (age > l->max_time) 3531 l->max_time = age; 3532 3533 if (track->pid < l->min_pid) 3534 l->min_pid = track->pid; 3535 if (track->pid > l->max_pid) 3536 l->max_pid = track->pid; 3537 3538 cpu_set(track->cpu, l->cpus); 3539 } 3540 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3541 return 1; 3542 } 3543 3544 if (track->addr < caddr) 3545 end = pos; 3546 else 3547 start = pos; 3548 } 3549 3550 /* 3551 * Not found. Insert new tracking element. 3552 */ 3553 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 3554 return 0; 3555 3556 l = t->loc + pos; 3557 if (pos < t->count) 3558 memmove(l + 1, l, 3559 (t->count - pos) * sizeof(struct location)); 3560 t->count++; 3561 l->count = 1; 3562 l->addr = track->addr; 3563 l->sum_time = age; 3564 l->min_time = age; 3565 l->max_time = age; 3566 l->min_pid = track->pid; 3567 l->max_pid = track->pid; 3568 cpus_clear(l->cpus); 3569 cpu_set(track->cpu, l->cpus); 3570 nodes_clear(l->nodes); 3571 node_set(page_to_nid(virt_to_page(track)), l->nodes); 3572 return 1; 3573 } 3574 3575 static void process_slab(struct loc_track *t, struct kmem_cache *s, 3576 struct page *page, enum track_item alloc) 3577 { 3578 void *addr = page_address(page); 3579 DECLARE_BITMAP(map, page->objects); 3580 void *p; 3581 3582 bitmap_zero(map, page->objects); 3583 for_each_free_object(p, s, page->freelist) 3584 set_bit(slab_index(p, s, addr), map); 3585 3586 for_each_object(p, s, addr, page->objects) 3587 if (!test_bit(slab_index(p, s, addr), map)) 3588 add_location(t, s, get_track(s, p, alloc)); 3589 } 3590 3591 static int list_locations(struct kmem_cache *s, char *buf, 3592 enum track_item alloc) 3593 { 3594 int len = 0; 3595 unsigned long i; 3596 struct loc_track t = { 0, 0, NULL }; 3597 int node; 3598 3599 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3600 GFP_TEMPORARY)) 3601 return sprintf(buf, "Out of memory\n"); 3602 3603 /* Push back cpu slabs */ 3604 flush_all(s); 3605 3606 for_each_node_state(node, N_NORMAL_MEMORY) { 3607 struct kmem_cache_node *n = get_node(s, node); 3608 unsigned long flags; 3609 struct page *page; 3610 3611 if (!atomic_long_read(&n->nr_slabs)) 3612 continue; 3613 3614 spin_lock_irqsave(&n->list_lock, flags); 3615 list_for_each_entry(page, &n->partial, lru) 3616 process_slab(&t, s, page, alloc); 3617 list_for_each_entry(page, &n->full, lru) 3618 process_slab(&t, s, page, alloc); 3619 spin_unlock_irqrestore(&n->list_lock, flags); 3620 } 3621 3622 for (i = 0; i < t.count; i++) { 3623 struct location *l = &t.loc[i]; 3624 3625 if (len > PAGE_SIZE - 100) 3626 break; 3627 len += sprintf(buf + len, "%7ld ", l->count); 3628 3629 if (l->addr) 3630 len += sprint_symbol(buf + len, (unsigned long)l->addr); 3631 else 3632 len += sprintf(buf + len, "<not-available>"); 3633 3634 if (l->sum_time != l->min_time) { 3635 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3636 l->min_time, 3637 (long)div_u64(l->sum_time, l->count), 3638 l->max_time); 3639 } else 3640 len += sprintf(buf + len, " age=%ld", 3641 l->min_time); 3642 3643 if (l->min_pid != l->max_pid) 3644 len += sprintf(buf + len, " pid=%ld-%ld", 3645 l->min_pid, l->max_pid); 3646 else 3647 len += sprintf(buf + len, " pid=%ld", 3648 l->min_pid); 3649 3650 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && 3651 len < PAGE_SIZE - 60) { 3652 len += sprintf(buf + len, " cpus="); 3653 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3654 l->cpus); 3655 } 3656 3657 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && 3658 len < PAGE_SIZE - 60) { 3659 len += sprintf(buf + len, " nodes="); 3660 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 3661 l->nodes); 3662 } 3663 3664 len += sprintf(buf + len, "\n"); 3665 } 3666 3667 free_loc_track(&t); 3668 if (!t.count) 3669 len += sprintf(buf, "No data\n"); 3670 return len; 3671 } 3672 3673 enum slab_stat_type { 3674 SL_ALL, /* All slabs */ 3675 SL_PARTIAL, /* Only partially allocated slabs */ 3676 SL_CPU, /* Only slabs used for cpu caches */ 3677 SL_OBJECTS, /* Determine allocated objects not slabs */ 3678 SL_TOTAL /* Determine object capacity not slabs */ 3679 }; 3680 3681 #define SO_ALL (1 << SL_ALL) 3682 #define SO_PARTIAL (1 << SL_PARTIAL) 3683 #define SO_CPU (1 << SL_CPU) 3684 #define SO_OBJECTS (1 << SL_OBJECTS) 3685 #define SO_TOTAL (1 << SL_TOTAL) 3686 3687 static ssize_t show_slab_objects(struct kmem_cache *s, 3688 char *buf, unsigned long flags) 3689 { 3690 unsigned long total = 0; 3691 int node; 3692 int x; 3693 unsigned long *nodes; 3694 unsigned long *per_cpu; 3695 3696 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 3697 if (!nodes) 3698 return -ENOMEM; 3699 per_cpu = nodes + nr_node_ids; 3700 3701 if (flags & SO_CPU) { 3702 int cpu; 3703 3704 for_each_possible_cpu(cpu) { 3705 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); 3706 3707 if (!c || c->node < 0) 3708 continue; 3709 3710 if (c->page) { 3711 if (flags & SO_TOTAL) 3712 x = c->page->objects; 3713 else if (flags & SO_OBJECTS) 3714 x = c->page->inuse; 3715 else 3716 x = 1; 3717 3718 total += x; 3719 nodes[c->node] += x; 3720 } 3721 per_cpu[c->node]++; 3722 } 3723 } 3724 3725 if (flags & SO_ALL) { 3726 for_each_node_state(node, N_NORMAL_MEMORY) { 3727 struct kmem_cache_node *n = get_node(s, node); 3728 3729 if (flags & SO_TOTAL) 3730 x = atomic_long_read(&n->total_objects); 3731 else if (flags & SO_OBJECTS) 3732 x = atomic_long_read(&n->total_objects) - 3733 count_partial(n, count_free); 3734 3735 else 3736 x = atomic_long_read(&n->nr_slabs); 3737 total += x; 3738 nodes[node] += x; 3739 } 3740 3741 } else if (flags & SO_PARTIAL) { 3742 for_each_node_state(node, N_NORMAL_MEMORY) { 3743 struct kmem_cache_node *n = get_node(s, node); 3744 3745 if (flags & SO_TOTAL) 3746 x = count_partial(n, count_total); 3747 else if (flags & SO_OBJECTS) 3748 x = count_partial(n, count_inuse); 3749 else 3750 x = n->nr_partial; 3751 total += x; 3752 nodes[node] += x; 3753 } 3754 } 3755 x = sprintf(buf, "%lu", total); 3756 #ifdef CONFIG_NUMA 3757 for_each_node_state(node, N_NORMAL_MEMORY) 3758 if (nodes[node]) 3759 x += sprintf(buf + x, " N%d=%lu", 3760 node, nodes[node]); 3761 #endif 3762 kfree(nodes); 3763 return x + sprintf(buf + x, "\n"); 3764 } 3765 3766 static int any_slab_objects(struct kmem_cache *s) 3767 { 3768 int node; 3769 3770 for_each_online_node(node) { 3771 struct kmem_cache_node *n = get_node(s, node); 3772 3773 if (!n) 3774 continue; 3775 3776 if (atomic_long_read(&n->total_objects)) 3777 return 1; 3778 } 3779 return 0; 3780 } 3781 3782 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 3783 #define to_slab(n) container_of(n, struct kmem_cache, kobj); 3784 3785 struct slab_attribute { 3786 struct attribute attr; 3787 ssize_t (*show)(struct kmem_cache *s, char *buf); 3788 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 3789 }; 3790 3791 #define SLAB_ATTR_RO(_name) \ 3792 static struct slab_attribute _name##_attr = __ATTR_RO(_name) 3793 3794 #define SLAB_ATTR(_name) \ 3795 static struct slab_attribute _name##_attr = \ 3796 __ATTR(_name, 0644, _name##_show, _name##_store) 3797 3798 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 3799 { 3800 return sprintf(buf, "%d\n", s->size); 3801 } 3802 SLAB_ATTR_RO(slab_size); 3803 3804 static ssize_t align_show(struct kmem_cache *s, char *buf) 3805 { 3806 return sprintf(buf, "%d\n", s->align); 3807 } 3808 SLAB_ATTR_RO(align); 3809 3810 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 3811 { 3812 return sprintf(buf, "%d\n", s->objsize); 3813 } 3814 SLAB_ATTR_RO(object_size); 3815 3816 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 3817 { 3818 return sprintf(buf, "%d\n", oo_objects(s->oo)); 3819 } 3820 SLAB_ATTR_RO(objs_per_slab); 3821 3822 static ssize_t order_store(struct kmem_cache *s, 3823 const char *buf, size_t length) 3824 { 3825 unsigned long order; 3826 int err; 3827 3828 err = strict_strtoul(buf, 10, &order); 3829 if (err) 3830 return err; 3831 3832 if (order > slub_max_order || order < slub_min_order) 3833 return -EINVAL; 3834 3835 calculate_sizes(s, order); 3836 return length; 3837 } 3838 3839 static ssize_t order_show(struct kmem_cache *s, char *buf) 3840 { 3841 return sprintf(buf, "%d\n", oo_order(s->oo)); 3842 } 3843 SLAB_ATTR(order); 3844 3845 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3846 { 3847 if (s->ctor) { 3848 int n = sprint_symbol(buf, (unsigned long)s->ctor); 3849 3850 return n + sprintf(buf + n, "\n"); 3851 } 3852 return 0; 3853 } 3854 SLAB_ATTR_RO(ctor); 3855 3856 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3857 { 3858 return sprintf(buf, "%d\n", s->refcount - 1); 3859 } 3860 SLAB_ATTR_RO(aliases); 3861 3862 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 3863 { 3864 return show_slab_objects(s, buf, SO_ALL); 3865 } 3866 SLAB_ATTR_RO(slabs); 3867 3868 static ssize_t partial_show(struct kmem_cache *s, char *buf) 3869 { 3870 return show_slab_objects(s, buf, SO_PARTIAL); 3871 } 3872 SLAB_ATTR_RO(partial); 3873 3874 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 3875 { 3876 return show_slab_objects(s, buf, SO_CPU); 3877 } 3878 SLAB_ATTR_RO(cpu_slabs); 3879 3880 static ssize_t objects_show(struct kmem_cache *s, char *buf) 3881 { 3882 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 3883 } 3884 SLAB_ATTR_RO(objects); 3885 3886 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 3887 { 3888 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 3889 } 3890 SLAB_ATTR_RO(objects_partial); 3891 3892 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 3893 { 3894 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 3895 } 3896 SLAB_ATTR_RO(total_objects); 3897 3898 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 3899 { 3900 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); 3901 } 3902 3903 static ssize_t sanity_checks_store(struct kmem_cache *s, 3904 const char *buf, size_t length) 3905 { 3906 s->flags &= ~SLAB_DEBUG_FREE; 3907 if (buf[0] == '1') 3908 s->flags |= SLAB_DEBUG_FREE; 3909 return length; 3910 } 3911 SLAB_ATTR(sanity_checks); 3912 3913 static ssize_t trace_show(struct kmem_cache *s, char *buf) 3914 { 3915 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 3916 } 3917 3918 static ssize_t trace_store(struct kmem_cache *s, const char *buf, 3919 size_t length) 3920 { 3921 s->flags &= ~SLAB_TRACE; 3922 if (buf[0] == '1') 3923 s->flags |= SLAB_TRACE; 3924 return length; 3925 } 3926 SLAB_ATTR(trace); 3927 3928 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 3929 { 3930 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 3931 } 3932 3933 static ssize_t reclaim_account_store(struct kmem_cache *s, 3934 const char *buf, size_t length) 3935 { 3936 s->flags &= ~SLAB_RECLAIM_ACCOUNT; 3937 if (buf[0] == '1') 3938 s->flags |= SLAB_RECLAIM_ACCOUNT; 3939 return length; 3940 } 3941 SLAB_ATTR(reclaim_account); 3942 3943 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3944 { 3945 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 3946 } 3947 SLAB_ATTR_RO(hwcache_align); 3948 3949 #ifdef CONFIG_ZONE_DMA 3950 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 3951 { 3952 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 3953 } 3954 SLAB_ATTR_RO(cache_dma); 3955 #endif 3956 3957 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 3958 { 3959 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); 3960 } 3961 SLAB_ATTR_RO(destroy_by_rcu); 3962 3963 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 3964 { 3965 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 3966 } 3967 3968 static ssize_t red_zone_store(struct kmem_cache *s, 3969 const char *buf, size_t length) 3970 { 3971 if (any_slab_objects(s)) 3972 return -EBUSY; 3973 3974 s->flags &= ~SLAB_RED_ZONE; 3975 if (buf[0] == '1') 3976 s->flags |= SLAB_RED_ZONE; 3977 calculate_sizes(s, -1); 3978 return length; 3979 } 3980 SLAB_ATTR(red_zone); 3981 3982 static ssize_t poison_show(struct kmem_cache *s, char *buf) 3983 { 3984 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); 3985 } 3986 3987 static ssize_t poison_store(struct kmem_cache *s, 3988 const char *buf, size_t length) 3989 { 3990 if (any_slab_objects(s)) 3991 return -EBUSY; 3992 3993 s->flags &= ~SLAB_POISON; 3994 if (buf[0] == '1') 3995 s->flags |= SLAB_POISON; 3996 calculate_sizes(s, -1); 3997 return length; 3998 } 3999 SLAB_ATTR(poison); 4000 4001 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 4002 { 4003 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 4004 } 4005 4006 static ssize_t store_user_store(struct kmem_cache *s, 4007 const char *buf, size_t length) 4008 { 4009 if (any_slab_objects(s)) 4010 return -EBUSY; 4011 4012 s->flags &= ~SLAB_STORE_USER; 4013 if (buf[0] == '1') 4014 s->flags |= SLAB_STORE_USER; 4015 calculate_sizes(s, -1); 4016 return length; 4017 } 4018 SLAB_ATTR(store_user); 4019 4020 static ssize_t validate_show(struct kmem_cache *s, char *buf) 4021 { 4022 return 0; 4023 } 4024 4025 static ssize_t validate_store(struct kmem_cache *s, 4026 const char *buf, size_t length) 4027 { 4028 int ret = -EINVAL; 4029 4030 if (buf[0] == '1') { 4031 ret = validate_slab_cache(s); 4032 if (ret >= 0) 4033 ret = length; 4034 } 4035 return ret; 4036 } 4037 SLAB_ATTR(validate); 4038 4039 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 4040 { 4041 return 0; 4042 } 4043 4044 static ssize_t shrink_store(struct kmem_cache *s, 4045 const char *buf, size_t length) 4046 { 4047 if (buf[0] == '1') { 4048 int rc = kmem_cache_shrink(s); 4049 4050 if (rc) 4051 return rc; 4052 } else 4053 return -EINVAL; 4054 return length; 4055 } 4056 SLAB_ATTR(shrink); 4057 4058 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 4059 { 4060 if (!(s->flags & SLAB_STORE_USER)) 4061 return -ENOSYS; 4062 return list_locations(s, buf, TRACK_ALLOC); 4063 } 4064 SLAB_ATTR_RO(alloc_calls); 4065 4066 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 4067 { 4068 if (!(s->flags & SLAB_STORE_USER)) 4069 return -ENOSYS; 4070 return list_locations(s, buf, TRACK_FREE); 4071 } 4072 SLAB_ATTR_RO(free_calls); 4073 4074 #ifdef CONFIG_NUMA 4075 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 4076 { 4077 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); 4078 } 4079 4080 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4081 const char *buf, size_t length) 4082 { 4083 unsigned long ratio; 4084 int err; 4085 4086 err = strict_strtoul(buf, 10, &ratio); 4087 if (err) 4088 return err; 4089 4090 if (ratio < 100) 4091 s->remote_node_defrag_ratio = ratio * 10; 4092 4093 return length; 4094 } 4095 SLAB_ATTR(remote_node_defrag_ratio); 4096 #endif 4097 4098 #ifdef CONFIG_SLUB_STATS 4099 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 4100 { 4101 unsigned long sum = 0; 4102 int cpu; 4103 int len; 4104 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); 4105 4106 if (!data) 4107 return -ENOMEM; 4108 4109 for_each_online_cpu(cpu) { 4110 unsigned x = get_cpu_slab(s, cpu)->stat[si]; 4111 4112 data[cpu] = x; 4113 sum += x; 4114 } 4115 4116 len = sprintf(buf, "%lu", sum); 4117 4118 #ifdef CONFIG_SMP 4119 for_each_online_cpu(cpu) { 4120 if (data[cpu] && len < PAGE_SIZE - 20) 4121 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); 4122 } 4123 #endif 4124 kfree(data); 4125 return len + sprintf(buf + len, "\n"); 4126 } 4127 4128 #define STAT_ATTR(si, text) \ 4129 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 4130 { \ 4131 return show_stat(s, buf, si); \ 4132 } \ 4133 SLAB_ATTR_RO(text); \ 4134 4135 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 4136 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 4137 STAT_ATTR(FREE_FASTPATH, free_fastpath); 4138 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 4139 STAT_ATTR(FREE_FROZEN, free_frozen); 4140 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 4141 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 4142 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 4143 STAT_ATTR(ALLOC_SLAB, alloc_slab); 4144 STAT_ATTR(ALLOC_REFILL, alloc_refill); 4145 STAT_ATTR(FREE_SLAB, free_slab); 4146 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 4147 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 4148 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 4149 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 4150 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4151 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4152 STAT_ATTR(ORDER_FALLBACK, order_fallback); 4153 #endif 4154 4155 static struct attribute *slab_attrs[] = { 4156 &slab_size_attr.attr, 4157 &object_size_attr.attr, 4158 &objs_per_slab_attr.attr, 4159 &order_attr.attr, 4160 &objects_attr.attr, 4161 &objects_partial_attr.attr, 4162 &total_objects_attr.attr, 4163 &slabs_attr.attr, 4164 &partial_attr.attr, 4165 &cpu_slabs_attr.attr, 4166 &ctor_attr.attr, 4167 &aliases_attr.attr, 4168 &align_attr.attr, 4169 &sanity_checks_attr.attr, 4170 &trace_attr.attr, 4171 &hwcache_align_attr.attr, 4172 &reclaim_account_attr.attr, 4173 &destroy_by_rcu_attr.attr, 4174 &red_zone_attr.attr, 4175 &poison_attr.attr, 4176 &store_user_attr.attr, 4177 &validate_attr.attr, 4178 &shrink_attr.attr, 4179 &alloc_calls_attr.attr, 4180 &free_calls_attr.attr, 4181 #ifdef CONFIG_ZONE_DMA 4182 &cache_dma_attr.attr, 4183 #endif 4184 #ifdef CONFIG_NUMA 4185 &remote_node_defrag_ratio_attr.attr, 4186 #endif 4187 #ifdef CONFIG_SLUB_STATS 4188 &alloc_fastpath_attr.attr, 4189 &alloc_slowpath_attr.attr, 4190 &free_fastpath_attr.attr, 4191 &free_slowpath_attr.attr, 4192 &free_frozen_attr.attr, 4193 &free_add_partial_attr.attr, 4194 &free_remove_partial_attr.attr, 4195 &alloc_from_partial_attr.attr, 4196 &alloc_slab_attr.attr, 4197 &alloc_refill_attr.attr, 4198 &free_slab_attr.attr, 4199 &cpuslab_flush_attr.attr, 4200 &deactivate_full_attr.attr, 4201 &deactivate_empty_attr.attr, 4202 &deactivate_to_head_attr.attr, 4203 &deactivate_to_tail_attr.attr, 4204 &deactivate_remote_frees_attr.attr, 4205 &order_fallback_attr.attr, 4206 #endif 4207 NULL 4208 }; 4209 4210 static struct attribute_group slab_attr_group = { 4211 .attrs = slab_attrs, 4212 }; 4213 4214 static ssize_t slab_attr_show(struct kobject *kobj, 4215 struct attribute *attr, 4216 char *buf) 4217 { 4218 struct slab_attribute *attribute; 4219 struct kmem_cache *s; 4220 int err; 4221 4222 attribute = to_slab_attr(attr); 4223 s = to_slab(kobj); 4224 4225 if (!attribute->show) 4226 return -EIO; 4227 4228 err = attribute->show(s, buf); 4229 4230 return err; 4231 } 4232 4233 static ssize_t slab_attr_store(struct kobject *kobj, 4234 struct attribute *attr, 4235 const char *buf, size_t len) 4236 { 4237 struct slab_attribute *attribute; 4238 struct kmem_cache *s; 4239 int err; 4240 4241 attribute = to_slab_attr(attr); 4242 s = to_slab(kobj); 4243 4244 if (!attribute->store) 4245 return -EIO; 4246 4247 err = attribute->store(s, buf, len); 4248 4249 return err; 4250 } 4251 4252 static void kmem_cache_release(struct kobject *kobj) 4253 { 4254 struct kmem_cache *s = to_slab(kobj); 4255 4256 kfree(s); 4257 } 4258 4259 static struct sysfs_ops slab_sysfs_ops = { 4260 .show = slab_attr_show, 4261 .store = slab_attr_store, 4262 }; 4263 4264 static struct kobj_type slab_ktype = { 4265 .sysfs_ops = &slab_sysfs_ops, 4266 .release = kmem_cache_release 4267 }; 4268 4269 static int uevent_filter(struct kset *kset, struct kobject *kobj) 4270 { 4271 struct kobj_type *ktype = get_ktype(kobj); 4272 4273 if (ktype == &slab_ktype) 4274 return 1; 4275 return 0; 4276 } 4277 4278 static struct kset_uevent_ops slab_uevent_ops = { 4279 .filter = uevent_filter, 4280 }; 4281 4282 static struct kset *slab_kset; 4283 4284 #define ID_STR_LENGTH 64 4285 4286 /* Create a unique string id for a slab cache: 4287 * 4288 * Format :[flags-]size 4289 */ 4290 static char *create_unique_id(struct kmem_cache *s) 4291 { 4292 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 4293 char *p = name; 4294 4295 BUG_ON(!name); 4296 4297 *p++ = ':'; 4298 /* 4299 * First flags affecting slabcache operations. We will only 4300 * get here for aliasable slabs so we do not need to support 4301 * too many flags. The flags here must cover all flags that 4302 * are matched during merging to guarantee that the id is 4303 * unique. 4304 */ 4305 if (s->flags & SLAB_CACHE_DMA) 4306 *p++ = 'd'; 4307 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4308 *p++ = 'a'; 4309 if (s->flags & SLAB_DEBUG_FREE) 4310 *p++ = 'F'; 4311 if (p != name + 1) 4312 *p++ = '-'; 4313 p += sprintf(p, "%07d", s->size); 4314 BUG_ON(p > name + ID_STR_LENGTH - 1); 4315 return name; 4316 } 4317 4318 static int sysfs_slab_add(struct kmem_cache *s) 4319 { 4320 int err; 4321 const char *name; 4322 int unmergeable; 4323 4324 if (slab_state < SYSFS) 4325 /* Defer until later */ 4326 return 0; 4327 4328 unmergeable = slab_unmergeable(s); 4329 if (unmergeable) { 4330 /* 4331 * Slabcache can never be merged so we can use the name proper. 4332 * This is typically the case for debug situations. In that 4333 * case we can catch duplicate names easily. 4334 */ 4335 sysfs_remove_link(&slab_kset->kobj, s->name); 4336 name = s->name; 4337 } else { 4338 /* 4339 * Create a unique name for the slab as a target 4340 * for the symlinks. 4341 */ 4342 name = create_unique_id(s); 4343 } 4344 4345 s->kobj.kset = slab_kset; 4346 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 4347 if (err) { 4348 kobject_put(&s->kobj); 4349 return err; 4350 } 4351 4352 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4353 if (err) 4354 return err; 4355 kobject_uevent(&s->kobj, KOBJ_ADD); 4356 if (!unmergeable) { 4357 /* Setup first alias */ 4358 sysfs_slab_alias(s, s->name); 4359 kfree(name); 4360 } 4361 return 0; 4362 } 4363 4364 static void sysfs_slab_remove(struct kmem_cache *s) 4365 { 4366 kobject_uevent(&s->kobj, KOBJ_REMOVE); 4367 kobject_del(&s->kobj); 4368 kobject_put(&s->kobj); 4369 } 4370 4371 /* 4372 * Need to buffer aliases during bootup until sysfs becomes 4373 * available lest we loose that information. 4374 */ 4375 struct saved_alias { 4376 struct kmem_cache *s; 4377 const char *name; 4378 struct saved_alias *next; 4379 }; 4380 4381 static struct saved_alias *alias_list; 4382 4383 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 4384 { 4385 struct saved_alias *al; 4386 4387 if (slab_state == SYSFS) { 4388 /* 4389 * If we have a leftover link then remove it. 4390 */ 4391 sysfs_remove_link(&slab_kset->kobj, name); 4392 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 4393 } 4394 4395 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4396 if (!al) 4397 return -ENOMEM; 4398 4399 al->s = s; 4400 al->name = name; 4401 al->next = alias_list; 4402 alias_list = al; 4403 return 0; 4404 } 4405 4406 static int __init slab_sysfs_init(void) 4407 { 4408 struct kmem_cache *s; 4409 int err; 4410 4411 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 4412 if (!slab_kset) { 4413 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4414 return -ENOSYS; 4415 } 4416 4417 slab_state = SYSFS; 4418 4419 list_for_each_entry(s, &slab_caches, list) { 4420 err = sysfs_slab_add(s); 4421 if (err) 4422 printk(KERN_ERR "SLUB: Unable to add boot slab %s" 4423 " to sysfs\n", s->name); 4424 } 4425 4426 while (alias_list) { 4427 struct saved_alias *al = alias_list; 4428 4429 alias_list = alias_list->next; 4430 err = sysfs_slab_alias(al->s, al->name); 4431 if (err) 4432 printk(KERN_ERR "SLUB: Unable to add boot slab alias" 4433 " %s to sysfs\n", s->name); 4434 kfree(al); 4435 } 4436 4437 resiliency_test(); 4438 return 0; 4439 } 4440 4441 __initcall(slab_sysfs_init); 4442 #endif 4443 4444 /* 4445 * The /proc/slabinfo ABI 4446 */ 4447 #ifdef CONFIG_SLABINFO 4448 4449 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4450 size_t count, loff_t *ppos) 4451 { 4452 return -EINVAL; 4453 } 4454 4455 4456 static void print_slabinfo_header(struct seq_file *m) 4457 { 4458 seq_puts(m, "slabinfo - version: 2.1\n"); 4459 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4460 "<objperslab> <pagesperslab>"); 4461 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4462 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4463 seq_putc(m, '\n'); 4464 } 4465 4466 static void *s_start(struct seq_file *m, loff_t *pos) 4467 { 4468 loff_t n = *pos; 4469 4470 down_read(&slub_lock); 4471 if (!n) 4472 print_slabinfo_header(m); 4473 4474 return seq_list_start(&slab_caches, *pos); 4475 } 4476 4477 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4478 { 4479 return seq_list_next(p, &slab_caches, pos); 4480 } 4481 4482 static void s_stop(struct seq_file *m, void *p) 4483 { 4484 up_read(&slub_lock); 4485 } 4486 4487 static int s_show(struct seq_file *m, void *p) 4488 { 4489 unsigned long nr_partials = 0; 4490 unsigned long nr_slabs = 0; 4491 unsigned long nr_inuse = 0; 4492 unsigned long nr_objs = 0; 4493 unsigned long nr_free = 0; 4494 struct kmem_cache *s; 4495 int node; 4496 4497 s = list_entry(p, struct kmem_cache, list); 4498 4499 for_each_online_node(node) { 4500 struct kmem_cache_node *n = get_node(s, node); 4501 4502 if (!n) 4503 continue; 4504 4505 nr_partials += n->nr_partial; 4506 nr_slabs += atomic_long_read(&n->nr_slabs); 4507 nr_objs += atomic_long_read(&n->total_objects); 4508 nr_free += count_partial(n, count_free); 4509 } 4510 4511 nr_inuse = nr_objs - nr_free; 4512 4513 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, 4514 nr_objs, s->size, oo_objects(s->oo), 4515 (1 << oo_order(s->oo))); 4516 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); 4517 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 4518 0UL); 4519 seq_putc(m, '\n'); 4520 return 0; 4521 } 4522 4523 const struct seq_operations slabinfo_op = { 4524 .start = s_start, 4525 .next = s_next, 4526 .stop = s_stop, 4527 .show = s_show, 4528 }; 4529 4530 #endif /* CONFIG_SLABINFO */ 4531