xref: /openbmc/linux/mm/slub.c (revision f42b3800)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/bit_spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
17 #include <linux/seq_file.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuset.h>
20 #include <linux/mempolicy.h>
21 #include <linux/ctype.h>
22 #include <linux/kallsyms.h>
23 #include <linux/memory.h>
24 
25 /*
26  * Lock order:
27  *   1. slab_lock(page)
28  *   2. slab->list_lock
29  *
30  *   The slab_lock protects operations on the object of a particular
31  *   slab and its metadata in the page struct. If the slab lock
32  *   has been taken then no allocations nor frees can be performed
33  *   on the objects in the slab nor can the slab be added or removed
34  *   from the partial or full lists since this would mean modifying
35  *   the page_struct of the slab.
36  *
37  *   The list_lock protects the partial and full list on each node and
38  *   the partial slab counter. If taken then no new slabs may be added or
39  *   removed from the lists nor make the number of partial slabs be modified.
40  *   (Note that the total number of slabs is an atomic value that may be
41  *   modified without taking the list lock).
42  *
43  *   The list_lock is a centralized lock and thus we avoid taking it as
44  *   much as possible. As long as SLUB does not have to handle partial
45  *   slabs, operations can continue without any centralized lock. F.e.
46  *   allocating a long series of objects that fill up slabs does not require
47  *   the list lock.
48  *
49  *   The lock order is sometimes inverted when we are trying to get a slab
50  *   off a list. We take the list_lock and then look for a page on the list
51  *   to use. While we do that objects in the slabs may be freed. We can
52  *   only operate on the slab if we have also taken the slab_lock. So we use
53  *   a slab_trylock() on the slab. If trylock was successful then no frees
54  *   can occur anymore and we can use the slab for allocations etc. If the
55  *   slab_trylock() does not succeed then frees are in progress in the slab and
56  *   we must stay away from it for a while since we may cause a bouncing
57  *   cacheline if we try to acquire the lock. So go onto the next slab.
58  *   If all pages are busy then we may allocate a new slab instead of reusing
59  *   a partial slab. A new slab has noone operating on it and thus there is
60  *   no danger of cacheline contention.
61  *
62  *   Interrupts are disabled during allocation and deallocation in order to
63  *   make the slab allocator safe to use in the context of an irq. In addition
64  *   interrupts are disabled to ensure that the processor does not change
65  *   while handling per_cpu slabs, due to kernel preemption.
66  *
67  * SLUB assigns one slab for allocation to each processor.
68  * Allocations only occur from these slabs called cpu slabs.
69  *
70  * Slabs with free elements are kept on a partial list and during regular
71  * operations no list for full slabs is used. If an object in a full slab is
72  * freed then the slab will show up again on the partial lists.
73  * We track full slabs for debugging purposes though because otherwise we
74  * cannot scan all objects.
75  *
76  * Slabs are freed when they become empty. Teardown and setup is
77  * minimal so we rely on the page allocators per cpu caches for
78  * fast frees and allocs.
79  *
80  * Overloading of page flags that are otherwise used for LRU management.
81  *
82  * PageActive 		The slab is frozen and exempt from list processing.
83  * 			This means that the slab is dedicated to a purpose
84  * 			such as satisfying allocations for a specific
85  * 			processor. Objects may be freed in the slab while
86  * 			it is frozen but slab_free will then skip the usual
87  * 			list operations. It is up to the processor holding
88  * 			the slab to integrate the slab into the slab lists
89  * 			when the slab is no longer needed.
90  *
91  * 			One use of this flag is to mark slabs that are
92  * 			used for allocations. Then such a slab becomes a cpu
93  * 			slab. The cpu slab may be equipped with an additional
94  * 			freelist that allows lockless access to
95  * 			free objects in addition to the regular freelist
96  * 			that requires the slab lock.
97  *
98  * PageError		Slab requires special handling due to debug
99  * 			options set. This moves	slab handling out of
100  * 			the fast path and disables lockless freelists.
101  */
102 
103 #define FROZEN (1 << PG_active)
104 
105 #ifdef CONFIG_SLUB_DEBUG
106 #define SLABDEBUG (1 << PG_error)
107 #else
108 #define SLABDEBUG 0
109 #endif
110 
111 static inline int SlabFrozen(struct page *page)
112 {
113 	return page->flags & FROZEN;
114 }
115 
116 static inline void SetSlabFrozen(struct page *page)
117 {
118 	page->flags |= FROZEN;
119 }
120 
121 static inline void ClearSlabFrozen(struct page *page)
122 {
123 	page->flags &= ~FROZEN;
124 }
125 
126 static inline int SlabDebug(struct page *page)
127 {
128 	return page->flags & SLABDEBUG;
129 }
130 
131 static inline void SetSlabDebug(struct page *page)
132 {
133 	page->flags |= SLABDEBUG;
134 }
135 
136 static inline void ClearSlabDebug(struct page *page)
137 {
138 	page->flags &= ~SLABDEBUG;
139 }
140 
141 /*
142  * Issues still to be resolved:
143  *
144  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
145  *
146  * - Variable sizing of the per node arrays
147  */
148 
149 /* Enable to test recovery from slab corruption on boot */
150 #undef SLUB_RESILIENCY_TEST
151 
152 #if PAGE_SHIFT <= 12
153 
154 /*
155  * Small page size. Make sure that we do not fragment memory
156  */
157 #define DEFAULT_MAX_ORDER 1
158 #define DEFAULT_MIN_OBJECTS 4
159 
160 #else
161 
162 /*
163  * Large page machines are customarily able to handle larger
164  * page orders.
165  */
166 #define DEFAULT_MAX_ORDER 2
167 #define DEFAULT_MIN_OBJECTS 8
168 
169 #endif
170 
171 /*
172  * Mininum number of partial slabs. These will be left on the partial
173  * lists even if they are empty. kmem_cache_shrink may reclaim them.
174  */
175 #define MIN_PARTIAL 5
176 
177 /*
178  * Maximum number of desirable partial slabs.
179  * The existence of more partial slabs makes kmem_cache_shrink
180  * sort the partial list by the number of objects in the.
181  */
182 #define MAX_PARTIAL 10
183 
184 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
185 				SLAB_POISON | SLAB_STORE_USER)
186 
187 /*
188  * Set of flags that will prevent slab merging
189  */
190 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
191 		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
192 
193 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
194 		SLAB_CACHE_DMA)
195 
196 #ifndef ARCH_KMALLOC_MINALIGN
197 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
198 #endif
199 
200 #ifndef ARCH_SLAB_MINALIGN
201 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
202 #endif
203 
204 /* Internal SLUB flags */
205 #define __OBJECT_POISON		0x80000000 /* Poison object */
206 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
207 #define __KMALLOC_CACHE		0x20000000 /* objects freed using kfree */
208 #define __PAGE_ALLOC_FALLBACK	0x10000000 /* Allow fallback to page alloc */
209 
210 /* Not all arches define cache_line_size */
211 #ifndef cache_line_size
212 #define cache_line_size()	L1_CACHE_BYTES
213 #endif
214 
215 static int kmem_size = sizeof(struct kmem_cache);
216 
217 #ifdef CONFIG_SMP
218 static struct notifier_block slab_notifier;
219 #endif
220 
221 static enum {
222 	DOWN,		/* No slab functionality available */
223 	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
224 	UP,		/* Everything works but does not show up in sysfs */
225 	SYSFS		/* Sysfs up */
226 } slab_state = DOWN;
227 
228 /* A list of all slab caches on the system */
229 static DECLARE_RWSEM(slub_lock);
230 static LIST_HEAD(slab_caches);
231 
232 /*
233  * Tracking user of a slab.
234  */
235 struct track {
236 	void *addr;		/* Called from address */
237 	int cpu;		/* Was running on cpu */
238 	int pid;		/* Pid context */
239 	unsigned long when;	/* When did the operation occur */
240 };
241 
242 enum track_item { TRACK_ALLOC, TRACK_FREE };
243 
244 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
245 static int sysfs_slab_add(struct kmem_cache *);
246 static int sysfs_slab_alias(struct kmem_cache *, const char *);
247 static void sysfs_slab_remove(struct kmem_cache *);
248 
249 #else
250 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
251 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
252 							{ return 0; }
253 static inline void sysfs_slab_remove(struct kmem_cache *s)
254 {
255 	kfree(s);
256 }
257 
258 #endif
259 
260 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
261 {
262 #ifdef CONFIG_SLUB_STATS
263 	c->stat[si]++;
264 #endif
265 }
266 
267 /********************************************************************
268  * 			Core slab cache functions
269  *******************************************************************/
270 
271 int slab_is_available(void)
272 {
273 	return slab_state >= UP;
274 }
275 
276 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
277 {
278 #ifdef CONFIG_NUMA
279 	return s->node[node];
280 #else
281 	return &s->local_node;
282 #endif
283 }
284 
285 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
286 {
287 #ifdef CONFIG_SMP
288 	return s->cpu_slab[cpu];
289 #else
290 	return &s->cpu_slab;
291 #endif
292 }
293 
294 /* Verify that a pointer has an address that is valid within a slab page */
295 static inline int check_valid_pointer(struct kmem_cache *s,
296 				struct page *page, const void *object)
297 {
298 	void *base;
299 
300 	if (!object)
301 		return 1;
302 
303 	base = page_address(page);
304 	if (object < base || object >= base + s->objects * s->size ||
305 		(object - base) % s->size) {
306 		return 0;
307 	}
308 
309 	return 1;
310 }
311 
312 /*
313  * Slow version of get and set free pointer.
314  *
315  * This version requires touching the cache lines of kmem_cache which
316  * we avoid to do in the fast alloc free paths. There we obtain the offset
317  * from the page struct.
318  */
319 static inline void *get_freepointer(struct kmem_cache *s, void *object)
320 {
321 	return *(void **)(object + s->offset);
322 }
323 
324 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
325 {
326 	*(void **)(object + s->offset) = fp;
327 }
328 
329 /* Loop over all objects in a slab */
330 #define for_each_object(__p, __s, __addr) \
331 	for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
332 			__p += (__s)->size)
333 
334 /* Scan freelist */
335 #define for_each_free_object(__p, __s, __free) \
336 	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
337 
338 /* Determine object index from a given position */
339 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
340 {
341 	return (p - addr) / s->size;
342 }
343 
344 #ifdef CONFIG_SLUB_DEBUG
345 /*
346  * Debug settings:
347  */
348 #ifdef CONFIG_SLUB_DEBUG_ON
349 static int slub_debug = DEBUG_DEFAULT_FLAGS;
350 #else
351 static int slub_debug;
352 #endif
353 
354 static char *slub_debug_slabs;
355 
356 /*
357  * Object debugging
358  */
359 static void print_section(char *text, u8 *addr, unsigned int length)
360 {
361 	int i, offset;
362 	int newline = 1;
363 	char ascii[17];
364 
365 	ascii[16] = 0;
366 
367 	for (i = 0; i < length; i++) {
368 		if (newline) {
369 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
370 			newline = 0;
371 		}
372 		printk(KERN_CONT " %02x", addr[i]);
373 		offset = i % 16;
374 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
375 		if (offset == 15) {
376 			printk(KERN_CONT " %s\n", ascii);
377 			newline = 1;
378 		}
379 	}
380 	if (!newline) {
381 		i %= 16;
382 		while (i < 16) {
383 			printk(KERN_CONT "   ");
384 			ascii[i] = ' ';
385 			i++;
386 		}
387 		printk(KERN_CONT " %s\n", ascii);
388 	}
389 }
390 
391 static struct track *get_track(struct kmem_cache *s, void *object,
392 	enum track_item alloc)
393 {
394 	struct track *p;
395 
396 	if (s->offset)
397 		p = object + s->offset + sizeof(void *);
398 	else
399 		p = object + s->inuse;
400 
401 	return p + alloc;
402 }
403 
404 static void set_track(struct kmem_cache *s, void *object,
405 				enum track_item alloc, void *addr)
406 {
407 	struct track *p;
408 
409 	if (s->offset)
410 		p = object + s->offset + sizeof(void *);
411 	else
412 		p = object + s->inuse;
413 
414 	p += alloc;
415 	if (addr) {
416 		p->addr = addr;
417 		p->cpu = smp_processor_id();
418 		p->pid = current ? current->pid : -1;
419 		p->when = jiffies;
420 	} else
421 		memset(p, 0, sizeof(struct track));
422 }
423 
424 static void init_tracking(struct kmem_cache *s, void *object)
425 {
426 	if (!(s->flags & SLAB_STORE_USER))
427 		return;
428 
429 	set_track(s, object, TRACK_FREE, NULL);
430 	set_track(s, object, TRACK_ALLOC, NULL);
431 }
432 
433 static void print_track(const char *s, struct track *t)
434 {
435 	if (!t->addr)
436 		return;
437 
438 	printk(KERN_ERR "INFO: %s in ", s);
439 	__print_symbol("%s", (unsigned long)t->addr);
440 	printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
441 }
442 
443 static void print_tracking(struct kmem_cache *s, void *object)
444 {
445 	if (!(s->flags & SLAB_STORE_USER))
446 		return;
447 
448 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
449 	print_track("Freed", get_track(s, object, TRACK_FREE));
450 }
451 
452 static void print_page_info(struct page *page)
453 {
454 	printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
455 		page, page->inuse, page->freelist, page->flags);
456 
457 }
458 
459 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
460 {
461 	va_list args;
462 	char buf[100];
463 
464 	va_start(args, fmt);
465 	vsnprintf(buf, sizeof(buf), fmt, args);
466 	va_end(args);
467 	printk(KERN_ERR "========================================"
468 			"=====================================\n");
469 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
470 	printk(KERN_ERR "----------------------------------------"
471 			"-------------------------------------\n\n");
472 }
473 
474 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
475 {
476 	va_list args;
477 	char buf[100];
478 
479 	va_start(args, fmt);
480 	vsnprintf(buf, sizeof(buf), fmt, args);
481 	va_end(args);
482 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
483 }
484 
485 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
486 {
487 	unsigned int off;	/* Offset of last byte */
488 	u8 *addr = page_address(page);
489 
490 	print_tracking(s, p);
491 
492 	print_page_info(page);
493 
494 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
495 			p, p - addr, get_freepointer(s, p));
496 
497 	if (p > addr + 16)
498 		print_section("Bytes b4", p - 16, 16);
499 
500 	print_section("Object", p, min(s->objsize, 128));
501 
502 	if (s->flags & SLAB_RED_ZONE)
503 		print_section("Redzone", p + s->objsize,
504 			s->inuse - s->objsize);
505 
506 	if (s->offset)
507 		off = s->offset + sizeof(void *);
508 	else
509 		off = s->inuse;
510 
511 	if (s->flags & SLAB_STORE_USER)
512 		off += 2 * sizeof(struct track);
513 
514 	if (off != s->size)
515 		/* Beginning of the filler is the free pointer */
516 		print_section("Padding", p + off, s->size - off);
517 
518 	dump_stack();
519 }
520 
521 static void object_err(struct kmem_cache *s, struct page *page,
522 			u8 *object, char *reason)
523 {
524 	slab_bug(s, reason);
525 	print_trailer(s, page, object);
526 }
527 
528 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
529 {
530 	va_list args;
531 	char buf[100];
532 
533 	va_start(args, fmt);
534 	vsnprintf(buf, sizeof(buf), fmt, args);
535 	va_end(args);
536 	slab_bug(s, fmt);
537 	print_page_info(page);
538 	dump_stack();
539 }
540 
541 static void init_object(struct kmem_cache *s, void *object, int active)
542 {
543 	u8 *p = object;
544 
545 	if (s->flags & __OBJECT_POISON) {
546 		memset(p, POISON_FREE, s->objsize - 1);
547 		p[s->objsize - 1] = POISON_END;
548 	}
549 
550 	if (s->flags & SLAB_RED_ZONE)
551 		memset(p + s->objsize,
552 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
553 			s->inuse - s->objsize);
554 }
555 
556 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
557 {
558 	while (bytes) {
559 		if (*start != (u8)value)
560 			return start;
561 		start++;
562 		bytes--;
563 	}
564 	return NULL;
565 }
566 
567 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
568 						void *from, void *to)
569 {
570 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
571 	memset(from, data, to - from);
572 }
573 
574 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
575 			u8 *object, char *what,
576 			u8 *start, unsigned int value, unsigned int bytes)
577 {
578 	u8 *fault;
579 	u8 *end;
580 
581 	fault = check_bytes(start, value, bytes);
582 	if (!fault)
583 		return 1;
584 
585 	end = start + bytes;
586 	while (end > fault && end[-1] == value)
587 		end--;
588 
589 	slab_bug(s, "%s overwritten", what);
590 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
591 					fault, end - 1, fault[0], value);
592 	print_trailer(s, page, object);
593 
594 	restore_bytes(s, what, value, fault, end);
595 	return 0;
596 }
597 
598 /*
599  * Object layout:
600  *
601  * object address
602  * 	Bytes of the object to be managed.
603  * 	If the freepointer may overlay the object then the free
604  * 	pointer is the first word of the object.
605  *
606  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
607  * 	0xa5 (POISON_END)
608  *
609  * object + s->objsize
610  * 	Padding to reach word boundary. This is also used for Redzoning.
611  * 	Padding is extended by another word if Redzoning is enabled and
612  * 	objsize == inuse.
613  *
614  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
615  * 	0xcc (RED_ACTIVE) for objects in use.
616  *
617  * object + s->inuse
618  * 	Meta data starts here.
619  *
620  * 	A. Free pointer (if we cannot overwrite object on free)
621  * 	B. Tracking data for SLAB_STORE_USER
622  * 	C. Padding to reach required alignment boundary or at mininum
623  * 		one word if debugging is on to be able to detect writes
624  * 		before the word boundary.
625  *
626  *	Padding is done using 0x5a (POISON_INUSE)
627  *
628  * object + s->size
629  * 	Nothing is used beyond s->size.
630  *
631  * If slabcaches are merged then the objsize and inuse boundaries are mostly
632  * ignored. And therefore no slab options that rely on these boundaries
633  * may be used with merged slabcaches.
634  */
635 
636 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
637 {
638 	unsigned long off = s->inuse;	/* The end of info */
639 
640 	if (s->offset)
641 		/* Freepointer is placed after the object. */
642 		off += sizeof(void *);
643 
644 	if (s->flags & SLAB_STORE_USER)
645 		/* We also have user information there */
646 		off += 2 * sizeof(struct track);
647 
648 	if (s->size == off)
649 		return 1;
650 
651 	return check_bytes_and_report(s, page, p, "Object padding",
652 				p + off, POISON_INUSE, s->size - off);
653 }
654 
655 static int slab_pad_check(struct kmem_cache *s, struct page *page)
656 {
657 	u8 *start;
658 	u8 *fault;
659 	u8 *end;
660 	int length;
661 	int remainder;
662 
663 	if (!(s->flags & SLAB_POISON))
664 		return 1;
665 
666 	start = page_address(page);
667 	end = start + (PAGE_SIZE << s->order);
668 	length = s->objects * s->size;
669 	remainder = end - (start + length);
670 	if (!remainder)
671 		return 1;
672 
673 	fault = check_bytes(start + length, POISON_INUSE, remainder);
674 	if (!fault)
675 		return 1;
676 	while (end > fault && end[-1] == POISON_INUSE)
677 		end--;
678 
679 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
680 	print_section("Padding", start, length);
681 
682 	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
683 	return 0;
684 }
685 
686 static int check_object(struct kmem_cache *s, struct page *page,
687 					void *object, int active)
688 {
689 	u8 *p = object;
690 	u8 *endobject = object + s->objsize;
691 
692 	if (s->flags & SLAB_RED_ZONE) {
693 		unsigned int red =
694 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
695 
696 		if (!check_bytes_and_report(s, page, object, "Redzone",
697 			endobject, red, s->inuse - s->objsize))
698 			return 0;
699 	} else {
700 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
701 			check_bytes_and_report(s, page, p, "Alignment padding",
702 				endobject, POISON_INUSE, s->inuse - s->objsize);
703 		}
704 	}
705 
706 	if (s->flags & SLAB_POISON) {
707 		if (!active && (s->flags & __OBJECT_POISON) &&
708 			(!check_bytes_and_report(s, page, p, "Poison", p,
709 					POISON_FREE, s->objsize - 1) ||
710 			 !check_bytes_and_report(s, page, p, "Poison",
711 				p + s->objsize - 1, POISON_END, 1)))
712 			return 0;
713 		/*
714 		 * check_pad_bytes cleans up on its own.
715 		 */
716 		check_pad_bytes(s, page, p);
717 	}
718 
719 	if (!s->offset && active)
720 		/*
721 		 * Object and freepointer overlap. Cannot check
722 		 * freepointer while object is allocated.
723 		 */
724 		return 1;
725 
726 	/* Check free pointer validity */
727 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
728 		object_err(s, page, p, "Freepointer corrupt");
729 		/*
730 		 * No choice but to zap it and thus loose the remainder
731 		 * of the free objects in this slab. May cause
732 		 * another error because the object count is now wrong.
733 		 */
734 		set_freepointer(s, p, NULL);
735 		return 0;
736 	}
737 	return 1;
738 }
739 
740 static int check_slab(struct kmem_cache *s, struct page *page)
741 {
742 	VM_BUG_ON(!irqs_disabled());
743 
744 	if (!PageSlab(page)) {
745 		slab_err(s, page, "Not a valid slab page");
746 		return 0;
747 	}
748 	if (page->inuse > s->objects) {
749 		slab_err(s, page, "inuse %u > max %u",
750 			s->name, page->inuse, s->objects);
751 		return 0;
752 	}
753 	/* Slab_pad_check fixes things up after itself */
754 	slab_pad_check(s, page);
755 	return 1;
756 }
757 
758 /*
759  * Determine if a certain object on a page is on the freelist. Must hold the
760  * slab lock to guarantee that the chains are in a consistent state.
761  */
762 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
763 {
764 	int nr = 0;
765 	void *fp = page->freelist;
766 	void *object = NULL;
767 
768 	while (fp && nr <= s->objects) {
769 		if (fp == search)
770 			return 1;
771 		if (!check_valid_pointer(s, page, fp)) {
772 			if (object) {
773 				object_err(s, page, object,
774 					"Freechain corrupt");
775 				set_freepointer(s, object, NULL);
776 				break;
777 			} else {
778 				slab_err(s, page, "Freepointer corrupt");
779 				page->freelist = NULL;
780 				page->inuse = s->objects;
781 				slab_fix(s, "Freelist cleared");
782 				return 0;
783 			}
784 			break;
785 		}
786 		object = fp;
787 		fp = get_freepointer(s, object);
788 		nr++;
789 	}
790 
791 	if (page->inuse != s->objects - nr) {
792 		slab_err(s, page, "Wrong object count. Counter is %d but "
793 			"counted were %d", page->inuse, s->objects - nr);
794 		page->inuse = s->objects - nr;
795 		slab_fix(s, "Object count adjusted.");
796 	}
797 	return search == NULL;
798 }
799 
800 static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
801 {
802 	if (s->flags & SLAB_TRACE) {
803 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
804 			s->name,
805 			alloc ? "alloc" : "free",
806 			object, page->inuse,
807 			page->freelist);
808 
809 		if (!alloc)
810 			print_section("Object", (void *)object, s->objsize);
811 
812 		dump_stack();
813 	}
814 }
815 
816 /*
817  * Tracking of fully allocated slabs for debugging purposes.
818  */
819 static void add_full(struct kmem_cache_node *n, struct page *page)
820 {
821 	spin_lock(&n->list_lock);
822 	list_add(&page->lru, &n->full);
823 	spin_unlock(&n->list_lock);
824 }
825 
826 static void remove_full(struct kmem_cache *s, struct page *page)
827 {
828 	struct kmem_cache_node *n;
829 
830 	if (!(s->flags & SLAB_STORE_USER))
831 		return;
832 
833 	n = get_node(s, page_to_nid(page));
834 
835 	spin_lock(&n->list_lock);
836 	list_del(&page->lru);
837 	spin_unlock(&n->list_lock);
838 }
839 
840 /* Tracking of the number of slabs for debugging purposes */
841 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
842 {
843 	struct kmem_cache_node *n = get_node(s, node);
844 
845 	return atomic_long_read(&n->nr_slabs);
846 }
847 
848 static inline void inc_slabs_node(struct kmem_cache *s, int node)
849 {
850 	struct kmem_cache_node *n = get_node(s, node);
851 
852 	/*
853 	 * May be called early in order to allocate a slab for the
854 	 * kmem_cache_node structure. Solve the chicken-egg
855 	 * dilemma by deferring the increment of the count during
856 	 * bootstrap (see early_kmem_cache_node_alloc).
857 	 */
858 	if (!NUMA_BUILD || n)
859 		atomic_long_inc(&n->nr_slabs);
860 }
861 static inline void dec_slabs_node(struct kmem_cache *s, int node)
862 {
863 	struct kmem_cache_node *n = get_node(s, node);
864 
865 	atomic_long_dec(&n->nr_slabs);
866 }
867 
868 /* Object debug checks for alloc/free paths */
869 static void setup_object_debug(struct kmem_cache *s, struct page *page,
870 								void *object)
871 {
872 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
873 		return;
874 
875 	init_object(s, object, 0);
876 	init_tracking(s, object);
877 }
878 
879 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
880 						void *object, void *addr)
881 {
882 	if (!check_slab(s, page))
883 		goto bad;
884 
885 	if (!on_freelist(s, page, object)) {
886 		object_err(s, page, object, "Object already allocated");
887 		goto bad;
888 	}
889 
890 	if (!check_valid_pointer(s, page, object)) {
891 		object_err(s, page, object, "Freelist Pointer check fails");
892 		goto bad;
893 	}
894 
895 	if (!check_object(s, page, object, 0))
896 		goto bad;
897 
898 	/* Success perform special debug activities for allocs */
899 	if (s->flags & SLAB_STORE_USER)
900 		set_track(s, object, TRACK_ALLOC, addr);
901 	trace(s, page, object, 1);
902 	init_object(s, object, 1);
903 	return 1;
904 
905 bad:
906 	if (PageSlab(page)) {
907 		/*
908 		 * If this is a slab page then lets do the best we can
909 		 * to avoid issues in the future. Marking all objects
910 		 * as used avoids touching the remaining objects.
911 		 */
912 		slab_fix(s, "Marking all objects used");
913 		page->inuse = s->objects;
914 		page->freelist = NULL;
915 	}
916 	return 0;
917 }
918 
919 static int free_debug_processing(struct kmem_cache *s, struct page *page,
920 						void *object, void *addr)
921 {
922 	if (!check_slab(s, page))
923 		goto fail;
924 
925 	if (!check_valid_pointer(s, page, object)) {
926 		slab_err(s, page, "Invalid object pointer 0x%p", object);
927 		goto fail;
928 	}
929 
930 	if (on_freelist(s, page, object)) {
931 		object_err(s, page, object, "Object already free");
932 		goto fail;
933 	}
934 
935 	if (!check_object(s, page, object, 1))
936 		return 0;
937 
938 	if (unlikely(s != page->slab)) {
939 		if (!PageSlab(page)) {
940 			slab_err(s, page, "Attempt to free object(0x%p) "
941 				"outside of slab", object);
942 		} else if (!page->slab) {
943 			printk(KERN_ERR
944 				"SLUB <none>: no slab for object 0x%p.\n",
945 						object);
946 			dump_stack();
947 		} else
948 			object_err(s, page, object,
949 					"page slab pointer corrupt.");
950 		goto fail;
951 	}
952 
953 	/* Special debug activities for freeing objects */
954 	if (!SlabFrozen(page) && !page->freelist)
955 		remove_full(s, page);
956 	if (s->flags & SLAB_STORE_USER)
957 		set_track(s, object, TRACK_FREE, addr);
958 	trace(s, page, object, 0);
959 	init_object(s, object, 0);
960 	return 1;
961 
962 fail:
963 	slab_fix(s, "Object at 0x%p not freed", object);
964 	return 0;
965 }
966 
967 static int __init setup_slub_debug(char *str)
968 {
969 	slub_debug = DEBUG_DEFAULT_FLAGS;
970 	if (*str++ != '=' || !*str)
971 		/*
972 		 * No options specified. Switch on full debugging.
973 		 */
974 		goto out;
975 
976 	if (*str == ',')
977 		/*
978 		 * No options but restriction on slabs. This means full
979 		 * debugging for slabs matching a pattern.
980 		 */
981 		goto check_slabs;
982 
983 	slub_debug = 0;
984 	if (*str == '-')
985 		/*
986 		 * Switch off all debugging measures.
987 		 */
988 		goto out;
989 
990 	/*
991 	 * Determine which debug features should be switched on
992 	 */
993 	for (; *str && *str != ','; str++) {
994 		switch (tolower(*str)) {
995 		case 'f':
996 			slub_debug |= SLAB_DEBUG_FREE;
997 			break;
998 		case 'z':
999 			slub_debug |= SLAB_RED_ZONE;
1000 			break;
1001 		case 'p':
1002 			slub_debug |= SLAB_POISON;
1003 			break;
1004 		case 'u':
1005 			slub_debug |= SLAB_STORE_USER;
1006 			break;
1007 		case 't':
1008 			slub_debug |= SLAB_TRACE;
1009 			break;
1010 		default:
1011 			printk(KERN_ERR "slub_debug option '%c' "
1012 				"unknown. skipped\n", *str);
1013 		}
1014 	}
1015 
1016 check_slabs:
1017 	if (*str == ',')
1018 		slub_debug_slabs = str + 1;
1019 out:
1020 	return 1;
1021 }
1022 
1023 __setup("slub_debug", setup_slub_debug);
1024 
1025 static unsigned long kmem_cache_flags(unsigned long objsize,
1026 	unsigned long flags, const char *name,
1027 	void (*ctor)(struct kmem_cache *, void *))
1028 {
1029 	/*
1030 	 * Enable debugging if selected on the kernel commandline.
1031 	 */
1032 	if (slub_debug && (!slub_debug_slabs ||
1033 	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1034 			flags |= slub_debug;
1035 
1036 	return flags;
1037 }
1038 #else
1039 static inline void setup_object_debug(struct kmem_cache *s,
1040 			struct page *page, void *object) {}
1041 
1042 static inline int alloc_debug_processing(struct kmem_cache *s,
1043 	struct page *page, void *object, void *addr) { return 0; }
1044 
1045 static inline int free_debug_processing(struct kmem_cache *s,
1046 	struct page *page, void *object, void *addr) { return 0; }
1047 
1048 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1049 			{ return 1; }
1050 static inline int check_object(struct kmem_cache *s, struct page *page,
1051 			void *object, int active) { return 1; }
1052 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1053 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1054 	unsigned long flags, const char *name,
1055 	void (*ctor)(struct kmem_cache *, void *))
1056 {
1057 	return flags;
1058 }
1059 #define slub_debug 0
1060 
1061 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1062 							{ return 0; }
1063 static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
1064 static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
1065 #endif
1066 /*
1067  * Slab allocation and freeing
1068  */
1069 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1070 {
1071 	struct page *page;
1072 	int pages = 1 << s->order;
1073 
1074 	flags |= s->allocflags;
1075 
1076 	if (node == -1)
1077 		page = alloc_pages(flags, s->order);
1078 	else
1079 		page = alloc_pages_node(node, flags, s->order);
1080 
1081 	if (!page)
1082 		return NULL;
1083 
1084 	mod_zone_page_state(page_zone(page),
1085 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1086 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1087 		pages);
1088 
1089 	return page;
1090 }
1091 
1092 static void setup_object(struct kmem_cache *s, struct page *page,
1093 				void *object)
1094 {
1095 	setup_object_debug(s, page, object);
1096 	if (unlikely(s->ctor))
1097 		s->ctor(s, object);
1098 }
1099 
1100 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1101 {
1102 	struct page *page;
1103 	void *start;
1104 	void *last;
1105 	void *p;
1106 
1107 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1108 
1109 	page = allocate_slab(s,
1110 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1111 	if (!page)
1112 		goto out;
1113 
1114 	inc_slabs_node(s, page_to_nid(page));
1115 	page->slab = s;
1116 	page->flags |= 1 << PG_slab;
1117 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1118 			SLAB_STORE_USER | SLAB_TRACE))
1119 		SetSlabDebug(page);
1120 
1121 	start = page_address(page);
1122 
1123 	if (unlikely(s->flags & SLAB_POISON))
1124 		memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1125 
1126 	last = start;
1127 	for_each_object(p, s, start) {
1128 		setup_object(s, page, last);
1129 		set_freepointer(s, last, p);
1130 		last = p;
1131 	}
1132 	setup_object(s, page, last);
1133 	set_freepointer(s, last, NULL);
1134 
1135 	page->freelist = start;
1136 	page->inuse = 0;
1137 out:
1138 	return page;
1139 }
1140 
1141 static void __free_slab(struct kmem_cache *s, struct page *page)
1142 {
1143 	int pages = 1 << s->order;
1144 
1145 	if (unlikely(SlabDebug(page))) {
1146 		void *p;
1147 
1148 		slab_pad_check(s, page);
1149 		for_each_object(p, s, page_address(page))
1150 			check_object(s, page, p, 0);
1151 		ClearSlabDebug(page);
1152 	}
1153 
1154 	mod_zone_page_state(page_zone(page),
1155 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1156 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1157 		-pages);
1158 
1159 	__ClearPageSlab(page);
1160 	reset_page_mapcount(page);
1161 	__free_pages(page, s->order);
1162 }
1163 
1164 static void rcu_free_slab(struct rcu_head *h)
1165 {
1166 	struct page *page;
1167 
1168 	page = container_of((struct list_head *)h, struct page, lru);
1169 	__free_slab(page->slab, page);
1170 }
1171 
1172 static void free_slab(struct kmem_cache *s, struct page *page)
1173 {
1174 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1175 		/*
1176 		 * RCU free overloads the RCU head over the LRU
1177 		 */
1178 		struct rcu_head *head = (void *)&page->lru;
1179 
1180 		call_rcu(head, rcu_free_slab);
1181 	} else
1182 		__free_slab(s, page);
1183 }
1184 
1185 static void discard_slab(struct kmem_cache *s, struct page *page)
1186 {
1187 	dec_slabs_node(s, page_to_nid(page));
1188 	free_slab(s, page);
1189 }
1190 
1191 /*
1192  * Per slab locking using the pagelock
1193  */
1194 static __always_inline void slab_lock(struct page *page)
1195 {
1196 	bit_spin_lock(PG_locked, &page->flags);
1197 }
1198 
1199 static __always_inline void slab_unlock(struct page *page)
1200 {
1201 	__bit_spin_unlock(PG_locked, &page->flags);
1202 }
1203 
1204 static __always_inline int slab_trylock(struct page *page)
1205 {
1206 	int rc = 1;
1207 
1208 	rc = bit_spin_trylock(PG_locked, &page->flags);
1209 	return rc;
1210 }
1211 
1212 /*
1213  * Management of partially allocated slabs
1214  */
1215 static void add_partial(struct kmem_cache_node *n,
1216 				struct page *page, int tail)
1217 {
1218 	spin_lock(&n->list_lock);
1219 	n->nr_partial++;
1220 	if (tail)
1221 		list_add_tail(&page->lru, &n->partial);
1222 	else
1223 		list_add(&page->lru, &n->partial);
1224 	spin_unlock(&n->list_lock);
1225 }
1226 
1227 static void remove_partial(struct kmem_cache *s,
1228 						struct page *page)
1229 {
1230 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1231 
1232 	spin_lock(&n->list_lock);
1233 	list_del(&page->lru);
1234 	n->nr_partial--;
1235 	spin_unlock(&n->list_lock);
1236 }
1237 
1238 /*
1239  * Lock slab and remove from the partial list.
1240  *
1241  * Must hold list_lock.
1242  */
1243 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1244 {
1245 	if (slab_trylock(page)) {
1246 		list_del(&page->lru);
1247 		n->nr_partial--;
1248 		SetSlabFrozen(page);
1249 		return 1;
1250 	}
1251 	return 0;
1252 }
1253 
1254 /*
1255  * Try to allocate a partial slab from a specific node.
1256  */
1257 static struct page *get_partial_node(struct kmem_cache_node *n)
1258 {
1259 	struct page *page;
1260 
1261 	/*
1262 	 * Racy check. If we mistakenly see no partial slabs then we
1263 	 * just allocate an empty slab. If we mistakenly try to get a
1264 	 * partial slab and there is none available then get_partials()
1265 	 * will return NULL.
1266 	 */
1267 	if (!n || !n->nr_partial)
1268 		return NULL;
1269 
1270 	spin_lock(&n->list_lock);
1271 	list_for_each_entry(page, &n->partial, lru)
1272 		if (lock_and_freeze_slab(n, page))
1273 			goto out;
1274 	page = NULL;
1275 out:
1276 	spin_unlock(&n->list_lock);
1277 	return page;
1278 }
1279 
1280 /*
1281  * Get a page from somewhere. Search in increasing NUMA distances.
1282  */
1283 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1284 {
1285 #ifdef CONFIG_NUMA
1286 	struct zonelist *zonelist;
1287 	struct zone **z;
1288 	struct page *page;
1289 
1290 	/*
1291 	 * The defrag ratio allows a configuration of the tradeoffs between
1292 	 * inter node defragmentation and node local allocations. A lower
1293 	 * defrag_ratio increases the tendency to do local allocations
1294 	 * instead of attempting to obtain partial slabs from other nodes.
1295 	 *
1296 	 * If the defrag_ratio is set to 0 then kmalloc() always
1297 	 * returns node local objects. If the ratio is higher then kmalloc()
1298 	 * may return off node objects because partial slabs are obtained
1299 	 * from other nodes and filled up.
1300 	 *
1301 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1302 	 * defrag_ratio = 1000) then every (well almost) allocation will
1303 	 * first attempt to defrag slab caches on other nodes. This means
1304 	 * scanning over all nodes to look for partial slabs which may be
1305 	 * expensive if we do it every time we are trying to find a slab
1306 	 * with available objects.
1307 	 */
1308 	if (!s->remote_node_defrag_ratio ||
1309 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1310 		return NULL;
1311 
1312 	zonelist = &NODE_DATA(
1313 		slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
1314 	for (z = zonelist->zones; *z; z++) {
1315 		struct kmem_cache_node *n;
1316 
1317 		n = get_node(s, zone_to_nid(*z));
1318 
1319 		if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1320 				n->nr_partial > MIN_PARTIAL) {
1321 			page = get_partial_node(n);
1322 			if (page)
1323 				return page;
1324 		}
1325 	}
1326 #endif
1327 	return NULL;
1328 }
1329 
1330 /*
1331  * Get a partial page, lock it and return it.
1332  */
1333 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1334 {
1335 	struct page *page;
1336 	int searchnode = (node == -1) ? numa_node_id() : node;
1337 
1338 	page = get_partial_node(get_node(s, searchnode));
1339 	if (page || (flags & __GFP_THISNODE))
1340 		return page;
1341 
1342 	return get_any_partial(s, flags);
1343 }
1344 
1345 /*
1346  * Move a page back to the lists.
1347  *
1348  * Must be called with the slab lock held.
1349  *
1350  * On exit the slab lock will have been dropped.
1351  */
1352 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1353 {
1354 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1355 	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1356 
1357 	ClearSlabFrozen(page);
1358 	if (page->inuse) {
1359 
1360 		if (page->freelist) {
1361 			add_partial(n, page, tail);
1362 			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1363 		} else {
1364 			stat(c, DEACTIVATE_FULL);
1365 			if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1366 				add_full(n, page);
1367 		}
1368 		slab_unlock(page);
1369 	} else {
1370 		stat(c, DEACTIVATE_EMPTY);
1371 		if (n->nr_partial < MIN_PARTIAL) {
1372 			/*
1373 			 * Adding an empty slab to the partial slabs in order
1374 			 * to avoid page allocator overhead. This slab needs
1375 			 * to come after the other slabs with objects in
1376 			 * so that the others get filled first. That way the
1377 			 * size of the partial list stays small.
1378 			 *
1379 			 * kmem_cache_shrink can reclaim any empty slabs from the
1380 			 * partial list.
1381 			 */
1382 			add_partial(n, page, 1);
1383 			slab_unlock(page);
1384 		} else {
1385 			slab_unlock(page);
1386 			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1387 			discard_slab(s, page);
1388 		}
1389 	}
1390 }
1391 
1392 /*
1393  * Remove the cpu slab
1394  */
1395 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1396 {
1397 	struct page *page = c->page;
1398 	int tail = 1;
1399 
1400 	if (page->freelist)
1401 		stat(c, DEACTIVATE_REMOTE_FREES);
1402 	/*
1403 	 * Merge cpu freelist into slab freelist. Typically we get here
1404 	 * because both freelists are empty. So this is unlikely
1405 	 * to occur.
1406 	 */
1407 	while (unlikely(c->freelist)) {
1408 		void **object;
1409 
1410 		tail = 0;	/* Hot objects. Put the slab first */
1411 
1412 		/* Retrieve object from cpu_freelist */
1413 		object = c->freelist;
1414 		c->freelist = c->freelist[c->offset];
1415 
1416 		/* And put onto the regular freelist */
1417 		object[c->offset] = page->freelist;
1418 		page->freelist = object;
1419 		page->inuse--;
1420 	}
1421 	c->page = NULL;
1422 	unfreeze_slab(s, page, tail);
1423 }
1424 
1425 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1426 {
1427 	stat(c, CPUSLAB_FLUSH);
1428 	slab_lock(c->page);
1429 	deactivate_slab(s, c);
1430 }
1431 
1432 /*
1433  * Flush cpu slab.
1434  *
1435  * Called from IPI handler with interrupts disabled.
1436  */
1437 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1438 {
1439 	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1440 
1441 	if (likely(c && c->page))
1442 		flush_slab(s, c);
1443 }
1444 
1445 static void flush_cpu_slab(void *d)
1446 {
1447 	struct kmem_cache *s = d;
1448 
1449 	__flush_cpu_slab(s, smp_processor_id());
1450 }
1451 
1452 static void flush_all(struct kmem_cache *s)
1453 {
1454 #ifdef CONFIG_SMP
1455 	on_each_cpu(flush_cpu_slab, s, 1, 1);
1456 #else
1457 	unsigned long flags;
1458 
1459 	local_irq_save(flags);
1460 	flush_cpu_slab(s);
1461 	local_irq_restore(flags);
1462 #endif
1463 }
1464 
1465 /*
1466  * Check if the objects in a per cpu structure fit numa
1467  * locality expectations.
1468  */
1469 static inline int node_match(struct kmem_cache_cpu *c, int node)
1470 {
1471 #ifdef CONFIG_NUMA
1472 	if (node != -1 && c->node != node)
1473 		return 0;
1474 #endif
1475 	return 1;
1476 }
1477 
1478 /*
1479  * Slow path. The lockless freelist is empty or we need to perform
1480  * debugging duties.
1481  *
1482  * Interrupts are disabled.
1483  *
1484  * Processing is still very fast if new objects have been freed to the
1485  * regular freelist. In that case we simply take over the regular freelist
1486  * as the lockless freelist and zap the regular freelist.
1487  *
1488  * If that is not working then we fall back to the partial lists. We take the
1489  * first element of the freelist as the object to allocate now and move the
1490  * rest of the freelist to the lockless freelist.
1491  *
1492  * And if we were unable to get a new slab from the partial slab lists then
1493  * we need to allocate a new slab. This is the slowest path since it involves
1494  * a call to the page allocator and the setup of a new slab.
1495  */
1496 static void *__slab_alloc(struct kmem_cache *s,
1497 		gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
1498 {
1499 	void **object;
1500 	struct page *new;
1501 
1502 	/* We handle __GFP_ZERO in the caller */
1503 	gfpflags &= ~__GFP_ZERO;
1504 
1505 	if (!c->page)
1506 		goto new_slab;
1507 
1508 	slab_lock(c->page);
1509 	if (unlikely(!node_match(c, node)))
1510 		goto another_slab;
1511 
1512 	stat(c, ALLOC_REFILL);
1513 
1514 load_freelist:
1515 	object = c->page->freelist;
1516 	if (unlikely(!object))
1517 		goto another_slab;
1518 	if (unlikely(SlabDebug(c->page)))
1519 		goto debug;
1520 
1521 	c->freelist = object[c->offset];
1522 	c->page->inuse = s->objects;
1523 	c->page->freelist = NULL;
1524 	c->node = page_to_nid(c->page);
1525 unlock_out:
1526 	slab_unlock(c->page);
1527 	stat(c, ALLOC_SLOWPATH);
1528 	return object;
1529 
1530 another_slab:
1531 	deactivate_slab(s, c);
1532 
1533 new_slab:
1534 	new = get_partial(s, gfpflags, node);
1535 	if (new) {
1536 		c->page = new;
1537 		stat(c, ALLOC_FROM_PARTIAL);
1538 		goto load_freelist;
1539 	}
1540 
1541 	if (gfpflags & __GFP_WAIT)
1542 		local_irq_enable();
1543 
1544 	new = new_slab(s, gfpflags, node);
1545 
1546 	if (gfpflags & __GFP_WAIT)
1547 		local_irq_disable();
1548 
1549 	if (new) {
1550 		c = get_cpu_slab(s, smp_processor_id());
1551 		stat(c, ALLOC_SLAB);
1552 		if (c->page)
1553 			flush_slab(s, c);
1554 		slab_lock(new);
1555 		SetSlabFrozen(new);
1556 		c->page = new;
1557 		goto load_freelist;
1558 	}
1559 
1560 	/*
1561 	 * No memory available.
1562 	 *
1563 	 * If the slab uses higher order allocs but the object is
1564 	 * smaller than a page size then we can fallback in emergencies
1565 	 * to the page allocator via kmalloc_large. The page allocator may
1566 	 * have failed to obtain a higher order page and we can try to
1567 	 * allocate a single page if the object fits into a single page.
1568 	 * That is only possible if certain conditions are met that are being
1569 	 * checked when a slab is created.
1570 	 */
1571 	if (!(gfpflags & __GFP_NORETRY) &&
1572 				(s->flags & __PAGE_ALLOC_FALLBACK)) {
1573 		if (gfpflags & __GFP_WAIT)
1574 			local_irq_enable();
1575 		object = kmalloc_large(s->objsize, gfpflags);
1576 		if (gfpflags & __GFP_WAIT)
1577 			local_irq_disable();
1578 		return object;
1579 	}
1580 	return NULL;
1581 debug:
1582 	if (!alloc_debug_processing(s, c->page, object, addr))
1583 		goto another_slab;
1584 
1585 	c->page->inuse++;
1586 	c->page->freelist = object[c->offset];
1587 	c->node = -1;
1588 	goto unlock_out;
1589 }
1590 
1591 /*
1592  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1593  * have the fastpath folded into their functions. So no function call
1594  * overhead for requests that can be satisfied on the fastpath.
1595  *
1596  * The fastpath works by first checking if the lockless freelist can be used.
1597  * If not then __slab_alloc is called for slow processing.
1598  *
1599  * Otherwise we can simply pick the next object from the lockless free list.
1600  */
1601 static __always_inline void *slab_alloc(struct kmem_cache *s,
1602 		gfp_t gfpflags, int node, void *addr)
1603 {
1604 	void **object;
1605 	struct kmem_cache_cpu *c;
1606 	unsigned long flags;
1607 
1608 	local_irq_save(flags);
1609 	c = get_cpu_slab(s, smp_processor_id());
1610 	if (unlikely(!c->freelist || !node_match(c, node)))
1611 
1612 		object = __slab_alloc(s, gfpflags, node, addr, c);
1613 
1614 	else {
1615 		object = c->freelist;
1616 		c->freelist = object[c->offset];
1617 		stat(c, ALLOC_FASTPATH);
1618 	}
1619 	local_irq_restore(flags);
1620 
1621 	if (unlikely((gfpflags & __GFP_ZERO) && object))
1622 		memset(object, 0, c->objsize);
1623 
1624 	return object;
1625 }
1626 
1627 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1628 {
1629 	return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1630 }
1631 EXPORT_SYMBOL(kmem_cache_alloc);
1632 
1633 #ifdef CONFIG_NUMA
1634 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1635 {
1636 	return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1637 }
1638 EXPORT_SYMBOL(kmem_cache_alloc_node);
1639 #endif
1640 
1641 /*
1642  * Slow patch handling. This may still be called frequently since objects
1643  * have a longer lifetime than the cpu slabs in most processing loads.
1644  *
1645  * So we still attempt to reduce cache line usage. Just take the slab
1646  * lock and free the item. If there is no additional partial page
1647  * handling required then we can return immediately.
1648  */
1649 static void __slab_free(struct kmem_cache *s, struct page *page,
1650 				void *x, void *addr, unsigned int offset)
1651 {
1652 	void *prior;
1653 	void **object = (void *)x;
1654 	struct kmem_cache_cpu *c;
1655 
1656 	c = get_cpu_slab(s, raw_smp_processor_id());
1657 	stat(c, FREE_SLOWPATH);
1658 	slab_lock(page);
1659 
1660 	if (unlikely(SlabDebug(page)))
1661 		goto debug;
1662 
1663 checks_ok:
1664 	prior = object[offset] = page->freelist;
1665 	page->freelist = object;
1666 	page->inuse--;
1667 
1668 	if (unlikely(SlabFrozen(page))) {
1669 		stat(c, FREE_FROZEN);
1670 		goto out_unlock;
1671 	}
1672 
1673 	if (unlikely(!page->inuse))
1674 		goto slab_empty;
1675 
1676 	/*
1677 	 * Objects left in the slab. If it was not on the partial list before
1678 	 * then add it.
1679 	 */
1680 	if (unlikely(!prior)) {
1681 		add_partial(get_node(s, page_to_nid(page)), page, 1);
1682 		stat(c, FREE_ADD_PARTIAL);
1683 	}
1684 
1685 out_unlock:
1686 	slab_unlock(page);
1687 	return;
1688 
1689 slab_empty:
1690 	if (prior) {
1691 		/*
1692 		 * Slab still on the partial list.
1693 		 */
1694 		remove_partial(s, page);
1695 		stat(c, FREE_REMOVE_PARTIAL);
1696 	}
1697 	slab_unlock(page);
1698 	stat(c, FREE_SLAB);
1699 	discard_slab(s, page);
1700 	return;
1701 
1702 debug:
1703 	if (!free_debug_processing(s, page, x, addr))
1704 		goto out_unlock;
1705 	goto checks_ok;
1706 }
1707 
1708 /*
1709  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1710  * can perform fastpath freeing without additional function calls.
1711  *
1712  * The fastpath is only possible if we are freeing to the current cpu slab
1713  * of this processor. This typically the case if we have just allocated
1714  * the item before.
1715  *
1716  * If fastpath is not possible then fall back to __slab_free where we deal
1717  * with all sorts of special processing.
1718  */
1719 static __always_inline void slab_free(struct kmem_cache *s,
1720 			struct page *page, void *x, void *addr)
1721 {
1722 	void **object = (void *)x;
1723 	struct kmem_cache_cpu *c;
1724 	unsigned long flags;
1725 
1726 	local_irq_save(flags);
1727 	c = get_cpu_slab(s, smp_processor_id());
1728 	debug_check_no_locks_freed(object, c->objsize);
1729 	if (likely(page == c->page && c->node >= 0)) {
1730 		object[c->offset] = c->freelist;
1731 		c->freelist = object;
1732 		stat(c, FREE_FASTPATH);
1733 	} else
1734 		__slab_free(s, page, x, addr, c->offset);
1735 
1736 	local_irq_restore(flags);
1737 }
1738 
1739 void kmem_cache_free(struct kmem_cache *s, void *x)
1740 {
1741 	struct page *page;
1742 
1743 	page = virt_to_head_page(x);
1744 
1745 	slab_free(s, page, x, __builtin_return_address(0));
1746 }
1747 EXPORT_SYMBOL(kmem_cache_free);
1748 
1749 /* Figure out on which slab object the object resides */
1750 static struct page *get_object_page(const void *x)
1751 {
1752 	struct page *page = virt_to_head_page(x);
1753 
1754 	if (!PageSlab(page))
1755 		return NULL;
1756 
1757 	return page;
1758 }
1759 
1760 /*
1761  * Object placement in a slab is made very easy because we always start at
1762  * offset 0. If we tune the size of the object to the alignment then we can
1763  * get the required alignment by putting one properly sized object after
1764  * another.
1765  *
1766  * Notice that the allocation order determines the sizes of the per cpu
1767  * caches. Each processor has always one slab available for allocations.
1768  * Increasing the allocation order reduces the number of times that slabs
1769  * must be moved on and off the partial lists and is therefore a factor in
1770  * locking overhead.
1771  */
1772 
1773 /*
1774  * Mininum / Maximum order of slab pages. This influences locking overhead
1775  * and slab fragmentation. A higher order reduces the number of partial slabs
1776  * and increases the number of allocations possible without having to
1777  * take the list_lock.
1778  */
1779 static int slub_min_order;
1780 static int slub_max_order = DEFAULT_MAX_ORDER;
1781 static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1782 
1783 /*
1784  * Merge control. If this is set then no merging of slab caches will occur.
1785  * (Could be removed. This was introduced to pacify the merge skeptics.)
1786  */
1787 static int slub_nomerge;
1788 
1789 /*
1790  * Calculate the order of allocation given an slab object size.
1791  *
1792  * The order of allocation has significant impact on performance and other
1793  * system components. Generally order 0 allocations should be preferred since
1794  * order 0 does not cause fragmentation in the page allocator. Larger objects
1795  * be problematic to put into order 0 slabs because there may be too much
1796  * unused space left. We go to a higher order if more than 1/8th of the slab
1797  * would be wasted.
1798  *
1799  * In order to reach satisfactory performance we must ensure that a minimum
1800  * number of objects is in one slab. Otherwise we may generate too much
1801  * activity on the partial lists which requires taking the list_lock. This is
1802  * less a concern for large slabs though which are rarely used.
1803  *
1804  * slub_max_order specifies the order where we begin to stop considering the
1805  * number of objects in a slab as critical. If we reach slub_max_order then
1806  * we try to keep the page order as low as possible. So we accept more waste
1807  * of space in favor of a small page order.
1808  *
1809  * Higher order allocations also allow the placement of more objects in a
1810  * slab and thereby reduce object handling overhead. If the user has
1811  * requested a higher mininum order then we start with that one instead of
1812  * the smallest order which will fit the object.
1813  */
1814 static inline int slab_order(int size, int min_objects,
1815 				int max_order, int fract_leftover)
1816 {
1817 	int order;
1818 	int rem;
1819 	int min_order = slub_min_order;
1820 
1821 	for (order = max(min_order,
1822 				fls(min_objects * size - 1) - PAGE_SHIFT);
1823 			order <= max_order; order++) {
1824 
1825 		unsigned long slab_size = PAGE_SIZE << order;
1826 
1827 		if (slab_size < min_objects * size)
1828 			continue;
1829 
1830 		rem = slab_size % size;
1831 
1832 		if (rem <= slab_size / fract_leftover)
1833 			break;
1834 
1835 	}
1836 
1837 	return order;
1838 }
1839 
1840 static inline int calculate_order(int size)
1841 {
1842 	int order;
1843 	int min_objects;
1844 	int fraction;
1845 
1846 	/*
1847 	 * Attempt to find best configuration for a slab. This
1848 	 * works by first attempting to generate a layout with
1849 	 * the best configuration and backing off gradually.
1850 	 *
1851 	 * First we reduce the acceptable waste in a slab. Then
1852 	 * we reduce the minimum objects required in a slab.
1853 	 */
1854 	min_objects = slub_min_objects;
1855 	while (min_objects > 1) {
1856 		fraction = 8;
1857 		while (fraction >= 4) {
1858 			order = slab_order(size, min_objects,
1859 						slub_max_order, fraction);
1860 			if (order <= slub_max_order)
1861 				return order;
1862 			fraction /= 2;
1863 		}
1864 		min_objects /= 2;
1865 	}
1866 
1867 	/*
1868 	 * We were unable to place multiple objects in a slab. Now
1869 	 * lets see if we can place a single object there.
1870 	 */
1871 	order = slab_order(size, 1, slub_max_order, 1);
1872 	if (order <= slub_max_order)
1873 		return order;
1874 
1875 	/*
1876 	 * Doh this slab cannot be placed using slub_max_order.
1877 	 */
1878 	order = slab_order(size, 1, MAX_ORDER, 1);
1879 	if (order <= MAX_ORDER)
1880 		return order;
1881 	return -ENOSYS;
1882 }
1883 
1884 /*
1885  * Figure out what the alignment of the objects will be.
1886  */
1887 static unsigned long calculate_alignment(unsigned long flags,
1888 		unsigned long align, unsigned long size)
1889 {
1890 	/*
1891 	 * If the user wants hardware cache aligned objects then follow that
1892 	 * suggestion if the object is sufficiently large.
1893 	 *
1894 	 * The hardware cache alignment cannot override the specified
1895 	 * alignment though. If that is greater then use it.
1896 	 */
1897 	if (flags & SLAB_HWCACHE_ALIGN) {
1898 		unsigned long ralign = cache_line_size();
1899 		while (size <= ralign / 2)
1900 			ralign /= 2;
1901 		align = max(align, ralign);
1902 	}
1903 
1904 	if (align < ARCH_SLAB_MINALIGN)
1905 		align = ARCH_SLAB_MINALIGN;
1906 
1907 	return ALIGN(align, sizeof(void *));
1908 }
1909 
1910 static void init_kmem_cache_cpu(struct kmem_cache *s,
1911 			struct kmem_cache_cpu *c)
1912 {
1913 	c->page = NULL;
1914 	c->freelist = NULL;
1915 	c->node = 0;
1916 	c->offset = s->offset / sizeof(void *);
1917 	c->objsize = s->objsize;
1918 #ifdef CONFIG_SLUB_STATS
1919 	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1920 #endif
1921 }
1922 
1923 static void init_kmem_cache_node(struct kmem_cache_node *n)
1924 {
1925 	n->nr_partial = 0;
1926 	spin_lock_init(&n->list_lock);
1927 	INIT_LIST_HEAD(&n->partial);
1928 #ifdef CONFIG_SLUB_DEBUG
1929 	atomic_long_set(&n->nr_slabs, 0);
1930 	INIT_LIST_HEAD(&n->full);
1931 #endif
1932 }
1933 
1934 #ifdef CONFIG_SMP
1935 /*
1936  * Per cpu array for per cpu structures.
1937  *
1938  * The per cpu array places all kmem_cache_cpu structures from one processor
1939  * close together meaning that it becomes possible that multiple per cpu
1940  * structures are contained in one cacheline. This may be particularly
1941  * beneficial for the kmalloc caches.
1942  *
1943  * A desktop system typically has around 60-80 slabs. With 100 here we are
1944  * likely able to get per cpu structures for all caches from the array defined
1945  * here. We must be able to cover all kmalloc caches during bootstrap.
1946  *
1947  * If the per cpu array is exhausted then fall back to kmalloc
1948  * of individual cachelines. No sharing is possible then.
1949  */
1950 #define NR_KMEM_CACHE_CPU 100
1951 
1952 static DEFINE_PER_CPU(struct kmem_cache_cpu,
1953 				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1954 
1955 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1956 static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1957 
1958 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1959 							int cpu, gfp_t flags)
1960 {
1961 	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1962 
1963 	if (c)
1964 		per_cpu(kmem_cache_cpu_free, cpu) =
1965 				(void *)c->freelist;
1966 	else {
1967 		/* Table overflow: So allocate ourselves */
1968 		c = kmalloc_node(
1969 			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1970 			flags, cpu_to_node(cpu));
1971 		if (!c)
1972 			return NULL;
1973 	}
1974 
1975 	init_kmem_cache_cpu(s, c);
1976 	return c;
1977 }
1978 
1979 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1980 {
1981 	if (c < per_cpu(kmem_cache_cpu, cpu) ||
1982 			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1983 		kfree(c);
1984 		return;
1985 	}
1986 	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1987 	per_cpu(kmem_cache_cpu_free, cpu) = c;
1988 }
1989 
1990 static void free_kmem_cache_cpus(struct kmem_cache *s)
1991 {
1992 	int cpu;
1993 
1994 	for_each_online_cpu(cpu) {
1995 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1996 
1997 		if (c) {
1998 			s->cpu_slab[cpu] = NULL;
1999 			free_kmem_cache_cpu(c, cpu);
2000 		}
2001 	}
2002 }
2003 
2004 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2005 {
2006 	int cpu;
2007 
2008 	for_each_online_cpu(cpu) {
2009 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2010 
2011 		if (c)
2012 			continue;
2013 
2014 		c = alloc_kmem_cache_cpu(s, cpu, flags);
2015 		if (!c) {
2016 			free_kmem_cache_cpus(s);
2017 			return 0;
2018 		}
2019 		s->cpu_slab[cpu] = c;
2020 	}
2021 	return 1;
2022 }
2023 
2024 /*
2025  * Initialize the per cpu array.
2026  */
2027 static void init_alloc_cpu_cpu(int cpu)
2028 {
2029 	int i;
2030 
2031 	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2032 		return;
2033 
2034 	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2035 		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2036 
2037 	cpu_set(cpu, kmem_cach_cpu_free_init_once);
2038 }
2039 
2040 static void __init init_alloc_cpu(void)
2041 {
2042 	int cpu;
2043 
2044 	for_each_online_cpu(cpu)
2045 		init_alloc_cpu_cpu(cpu);
2046   }
2047 
2048 #else
2049 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2050 static inline void init_alloc_cpu(void) {}
2051 
2052 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2053 {
2054 	init_kmem_cache_cpu(s, &s->cpu_slab);
2055 	return 1;
2056 }
2057 #endif
2058 
2059 #ifdef CONFIG_NUMA
2060 /*
2061  * No kmalloc_node yet so do it by hand. We know that this is the first
2062  * slab on the node for this slabcache. There are no concurrent accesses
2063  * possible.
2064  *
2065  * Note that this function only works on the kmalloc_node_cache
2066  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2067  * memory on a fresh node that has no slab structures yet.
2068  */
2069 static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2070 							   int node)
2071 {
2072 	struct page *page;
2073 	struct kmem_cache_node *n;
2074 	unsigned long flags;
2075 
2076 	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2077 
2078 	page = new_slab(kmalloc_caches, gfpflags, node);
2079 
2080 	BUG_ON(!page);
2081 	if (page_to_nid(page) != node) {
2082 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2083 				"node %d\n", node);
2084 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2085 				"in order to be able to continue\n");
2086 	}
2087 
2088 	n = page->freelist;
2089 	BUG_ON(!n);
2090 	page->freelist = get_freepointer(kmalloc_caches, n);
2091 	page->inuse++;
2092 	kmalloc_caches->node[node] = n;
2093 #ifdef CONFIG_SLUB_DEBUG
2094 	init_object(kmalloc_caches, n, 1);
2095 	init_tracking(kmalloc_caches, n);
2096 #endif
2097 	init_kmem_cache_node(n);
2098 	inc_slabs_node(kmalloc_caches, node);
2099 
2100 	/*
2101 	 * lockdep requires consistent irq usage for each lock
2102 	 * so even though there cannot be a race this early in
2103 	 * the boot sequence, we still disable irqs.
2104 	 */
2105 	local_irq_save(flags);
2106 	add_partial(n, page, 0);
2107 	local_irq_restore(flags);
2108 	return n;
2109 }
2110 
2111 static void free_kmem_cache_nodes(struct kmem_cache *s)
2112 {
2113 	int node;
2114 
2115 	for_each_node_state(node, N_NORMAL_MEMORY) {
2116 		struct kmem_cache_node *n = s->node[node];
2117 		if (n && n != &s->local_node)
2118 			kmem_cache_free(kmalloc_caches, n);
2119 		s->node[node] = NULL;
2120 	}
2121 }
2122 
2123 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2124 {
2125 	int node;
2126 	int local_node;
2127 
2128 	if (slab_state >= UP)
2129 		local_node = page_to_nid(virt_to_page(s));
2130 	else
2131 		local_node = 0;
2132 
2133 	for_each_node_state(node, N_NORMAL_MEMORY) {
2134 		struct kmem_cache_node *n;
2135 
2136 		if (local_node == node)
2137 			n = &s->local_node;
2138 		else {
2139 			if (slab_state == DOWN) {
2140 				n = early_kmem_cache_node_alloc(gfpflags,
2141 								node);
2142 				continue;
2143 			}
2144 			n = kmem_cache_alloc_node(kmalloc_caches,
2145 							gfpflags, node);
2146 
2147 			if (!n) {
2148 				free_kmem_cache_nodes(s);
2149 				return 0;
2150 			}
2151 
2152 		}
2153 		s->node[node] = n;
2154 		init_kmem_cache_node(n);
2155 	}
2156 	return 1;
2157 }
2158 #else
2159 static void free_kmem_cache_nodes(struct kmem_cache *s)
2160 {
2161 }
2162 
2163 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2164 {
2165 	init_kmem_cache_node(&s->local_node);
2166 	return 1;
2167 }
2168 #endif
2169 
2170 /*
2171  * calculate_sizes() determines the order and the distribution of data within
2172  * a slab object.
2173  */
2174 static int calculate_sizes(struct kmem_cache *s)
2175 {
2176 	unsigned long flags = s->flags;
2177 	unsigned long size = s->objsize;
2178 	unsigned long align = s->align;
2179 
2180 	/*
2181 	 * Round up object size to the next word boundary. We can only
2182 	 * place the free pointer at word boundaries and this determines
2183 	 * the possible location of the free pointer.
2184 	 */
2185 	size = ALIGN(size, sizeof(void *));
2186 
2187 #ifdef CONFIG_SLUB_DEBUG
2188 	/*
2189 	 * Determine if we can poison the object itself. If the user of
2190 	 * the slab may touch the object after free or before allocation
2191 	 * then we should never poison the object itself.
2192 	 */
2193 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2194 			!s->ctor)
2195 		s->flags |= __OBJECT_POISON;
2196 	else
2197 		s->flags &= ~__OBJECT_POISON;
2198 
2199 
2200 	/*
2201 	 * If we are Redzoning then check if there is some space between the
2202 	 * end of the object and the free pointer. If not then add an
2203 	 * additional word to have some bytes to store Redzone information.
2204 	 */
2205 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2206 		size += sizeof(void *);
2207 #endif
2208 
2209 	/*
2210 	 * With that we have determined the number of bytes in actual use
2211 	 * by the object. This is the potential offset to the free pointer.
2212 	 */
2213 	s->inuse = size;
2214 
2215 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2216 		s->ctor)) {
2217 		/*
2218 		 * Relocate free pointer after the object if it is not
2219 		 * permitted to overwrite the first word of the object on
2220 		 * kmem_cache_free.
2221 		 *
2222 		 * This is the case if we do RCU, have a constructor or
2223 		 * destructor or are poisoning the objects.
2224 		 */
2225 		s->offset = size;
2226 		size += sizeof(void *);
2227 	}
2228 
2229 #ifdef CONFIG_SLUB_DEBUG
2230 	if (flags & SLAB_STORE_USER)
2231 		/*
2232 		 * Need to store information about allocs and frees after
2233 		 * the object.
2234 		 */
2235 		size += 2 * sizeof(struct track);
2236 
2237 	if (flags & SLAB_RED_ZONE)
2238 		/*
2239 		 * Add some empty padding so that we can catch
2240 		 * overwrites from earlier objects rather than let
2241 		 * tracking information or the free pointer be
2242 		 * corrupted if an user writes before the start
2243 		 * of the object.
2244 		 */
2245 		size += sizeof(void *);
2246 #endif
2247 
2248 	/*
2249 	 * Determine the alignment based on various parameters that the
2250 	 * user specified and the dynamic determination of cache line size
2251 	 * on bootup.
2252 	 */
2253 	align = calculate_alignment(flags, align, s->objsize);
2254 
2255 	/*
2256 	 * SLUB stores one object immediately after another beginning from
2257 	 * offset 0. In order to align the objects we have to simply size
2258 	 * each object to conform to the alignment.
2259 	 */
2260 	size = ALIGN(size, align);
2261 	s->size = size;
2262 
2263 	if ((flags & __KMALLOC_CACHE) &&
2264 			PAGE_SIZE / size < slub_min_objects) {
2265 		/*
2266 		 * Kmalloc cache that would not have enough objects in
2267 		 * an order 0 page. Kmalloc slabs can fallback to
2268 		 * page allocator order 0 allocs so take a reasonably large
2269 		 * order that will allows us a good number of objects.
2270 		 */
2271 		s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
2272 		s->flags |= __PAGE_ALLOC_FALLBACK;
2273 		s->allocflags |= __GFP_NOWARN;
2274 	} else
2275 		s->order = calculate_order(size);
2276 
2277 	if (s->order < 0)
2278 		return 0;
2279 
2280 	s->allocflags = 0;
2281 	if (s->order)
2282 		s->allocflags |= __GFP_COMP;
2283 
2284 	if (s->flags & SLAB_CACHE_DMA)
2285 		s->allocflags |= SLUB_DMA;
2286 
2287 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2288 		s->allocflags |= __GFP_RECLAIMABLE;
2289 
2290 	/*
2291 	 * Determine the number of objects per slab
2292 	 */
2293 	s->objects = (PAGE_SIZE << s->order) / size;
2294 
2295 	return !!s->objects;
2296 
2297 }
2298 
2299 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2300 		const char *name, size_t size,
2301 		size_t align, unsigned long flags,
2302 		void (*ctor)(struct kmem_cache *, void *))
2303 {
2304 	memset(s, 0, kmem_size);
2305 	s->name = name;
2306 	s->ctor = ctor;
2307 	s->objsize = size;
2308 	s->align = align;
2309 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2310 
2311 	if (!calculate_sizes(s))
2312 		goto error;
2313 
2314 	s->refcount = 1;
2315 #ifdef CONFIG_NUMA
2316 	s->remote_node_defrag_ratio = 100;
2317 #endif
2318 	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2319 		goto error;
2320 
2321 	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2322 		return 1;
2323 	free_kmem_cache_nodes(s);
2324 error:
2325 	if (flags & SLAB_PANIC)
2326 		panic("Cannot create slab %s size=%lu realsize=%u "
2327 			"order=%u offset=%u flags=%lx\n",
2328 			s->name, (unsigned long)size, s->size, s->order,
2329 			s->offset, flags);
2330 	return 0;
2331 }
2332 
2333 /*
2334  * Check if a given pointer is valid
2335  */
2336 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2337 {
2338 	struct page *page;
2339 
2340 	page = get_object_page(object);
2341 
2342 	if (!page || s != page->slab)
2343 		/* No slab or wrong slab */
2344 		return 0;
2345 
2346 	if (!check_valid_pointer(s, page, object))
2347 		return 0;
2348 
2349 	/*
2350 	 * We could also check if the object is on the slabs freelist.
2351 	 * But this would be too expensive and it seems that the main
2352 	 * purpose of kmem_ptr_valid() is to check if the object belongs
2353 	 * to a certain slab.
2354 	 */
2355 	return 1;
2356 }
2357 EXPORT_SYMBOL(kmem_ptr_validate);
2358 
2359 /*
2360  * Determine the size of a slab object
2361  */
2362 unsigned int kmem_cache_size(struct kmem_cache *s)
2363 {
2364 	return s->objsize;
2365 }
2366 EXPORT_SYMBOL(kmem_cache_size);
2367 
2368 const char *kmem_cache_name(struct kmem_cache *s)
2369 {
2370 	return s->name;
2371 }
2372 EXPORT_SYMBOL(kmem_cache_name);
2373 
2374 /*
2375  * Attempt to free all slabs on a node. Return the number of slabs we
2376  * were unable to free.
2377  */
2378 static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2379 			struct list_head *list)
2380 {
2381 	int slabs_inuse = 0;
2382 	unsigned long flags;
2383 	struct page *page, *h;
2384 
2385 	spin_lock_irqsave(&n->list_lock, flags);
2386 	list_for_each_entry_safe(page, h, list, lru)
2387 		if (!page->inuse) {
2388 			list_del(&page->lru);
2389 			discard_slab(s, page);
2390 		} else
2391 			slabs_inuse++;
2392 	spin_unlock_irqrestore(&n->list_lock, flags);
2393 	return slabs_inuse;
2394 }
2395 
2396 /*
2397  * Release all resources used by a slab cache.
2398  */
2399 static inline int kmem_cache_close(struct kmem_cache *s)
2400 {
2401 	int node;
2402 
2403 	flush_all(s);
2404 
2405 	/* Attempt to free all objects */
2406 	free_kmem_cache_cpus(s);
2407 	for_each_node_state(node, N_NORMAL_MEMORY) {
2408 		struct kmem_cache_node *n = get_node(s, node);
2409 
2410 		n->nr_partial -= free_list(s, n, &n->partial);
2411 		if (slabs_node(s, node))
2412 			return 1;
2413 	}
2414 	free_kmem_cache_nodes(s);
2415 	return 0;
2416 }
2417 
2418 /*
2419  * Close a cache and release the kmem_cache structure
2420  * (must be used for caches created using kmem_cache_create)
2421  */
2422 void kmem_cache_destroy(struct kmem_cache *s)
2423 {
2424 	down_write(&slub_lock);
2425 	s->refcount--;
2426 	if (!s->refcount) {
2427 		list_del(&s->list);
2428 		up_write(&slub_lock);
2429 		if (kmem_cache_close(s))
2430 			WARN_ON(1);
2431 		sysfs_slab_remove(s);
2432 	} else
2433 		up_write(&slub_lock);
2434 }
2435 EXPORT_SYMBOL(kmem_cache_destroy);
2436 
2437 /********************************************************************
2438  *		Kmalloc subsystem
2439  *******************************************************************/
2440 
2441 struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2442 EXPORT_SYMBOL(kmalloc_caches);
2443 
2444 static int __init setup_slub_min_order(char *str)
2445 {
2446 	get_option(&str, &slub_min_order);
2447 
2448 	return 1;
2449 }
2450 
2451 __setup("slub_min_order=", setup_slub_min_order);
2452 
2453 static int __init setup_slub_max_order(char *str)
2454 {
2455 	get_option(&str, &slub_max_order);
2456 
2457 	return 1;
2458 }
2459 
2460 __setup("slub_max_order=", setup_slub_max_order);
2461 
2462 static int __init setup_slub_min_objects(char *str)
2463 {
2464 	get_option(&str, &slub_min_objects);
2465 
2466 	return 1;
2467 }
2468 
2469 __setup("slub_min_objects=", setup_slub_min_objects);
2470 
2471 static int __init setup_slub_nomerge(char *str)
2472 {
2473 	slub_nomerge = 1;
2474 	return 1;
2475 }
2476 
2477 __setup("slub_nomerge", setup_slub_nomerge);
2478 
2479 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2480 		const char *name, int size, gfp_t gfp_flags)
2481 {
2482 	unsigned int flags = 0;
2483 
2484 	if (gfp_flags & SLUB_DMA)
2485 		flags = SLAB_CACHE_DMA;
2486 
2487 	down_write(&slub_lock);
2488 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2489 			flags | __KMALLOC_CACHE, NULL))
2490 		goto panic;
2491 
2492 	list_add(&s->list, &slab_caches);
2493 	up_write(&slub_lock);
2494 	if (sysfs_slab_add(s))
2495 		goto panic;
2496 	return s;
2497 
2498 panic:
2499 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2500 }
2501 
2502 #ifdef CONFIG_ZONE_DMA
2503 static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2504 
2505 static void sysfs_add_func(struct work_struct *w)
2506 {
2507 	struct kmem_cache *s;
2508 
2509 	down_write(&slub_lock);
2510 	list_for_each_entry(s, &slab_caches, list) {
2511 		if (s->flags & __SYSFS_ADD_DEFERRED) {
2512 			s->flags &= ~__SYSFS_ADD_DEFERRED;
2513 			sysfs_slab_add(s);
2514 		}
2515 	}
2516 	up_write(&slub_lock);
2517 }
2518 
2519 static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2520 
2521 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2522 {
2523 	struct kmem_cache *s;
2524 	char *text;
2525 	size_t realsize;
2526 
2527 	s = kmalloc_caches_dma[index];
2528 	if (s)
2529 		return s;
2530 
2531 	/* Dynamically create dma cache */
2532 	if (flags & __GFP_WAIT)
2533 		down_write(&slub_lock);
2534 	else {
2535 		if (!down_write_trylock(&slub_lock))
2536 			goto out;
2537 	}
2538 
2539 	if (kmalloc_caches_dma[index])
2540 		goto unlock_out;
2541 
2542 	realsize = kmalloc_caches[index].objsize;
2543 	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2544 			 (unsigned int)realsize);
2545 	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2546 
2547 	if (!s || !text || !kmem_cache_open(s, flags, text,
2548 			realsize, ARCH_KMALLOC_MINALIGN,
2549 			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2550 		kfree(s);
2551 		kfree(text);
2552 		goto unlock_out;
2553 	}
2554 
2555 	list_add(&s->list, &slab_caches);
2556 	kmalloc_caches_dma[index] = s;
2557 
2558 	schedule_work(&sysfs_add_work);
2559 
2560 unlock_out:
2561 	up_write(&slub_lock);
2562 out:
2563 	return kmalloc_caches_dma[index];
2564 }
2565 #endif
2566 
2567 /*
2568  * Conversion table for small slabs sizes / 8 to the index in the
2569  * kmalloc array. This is necessary for slabs < 192 since we have non power
2570  * of two cache sizes there. The size of larger slabs can be determined using
2571  * fls.
2572  */
2573 static s8 size_index[24] = {
2574 	3,	/* 8 */
2575 	4,	/* 16 */
2576 	5,	/* 24 */
2577 	5,	/* 32 */
2578 	6,	/* 40 */
2579 	6,	/* 48 */
2580 	6,	/* 56 */
2581 	6,	/* 64 */
2582 	1,	/* 72 */
2583 	1,	/* 80 */
2584 	1,	/* 88 */
2585 	1,	/* 96 */
2586 	7,	/* 104 */
2587 	7,	/* 112 */
2588 	7,	/* 120 */
2589 	7,	/* 128 */
2590 	2,	/* 136 */
2591 	2,	/* 144 */
2592 	2,	/* 152 */
2593 	2,	/* 160 */
2594 	2,	/* 168 */
2595 	2,	/* 176 */
2596 	2,	/* 184 */
2597 	2	/* 192 */
2598 };
2599 
2600 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2601 {
2602 	int index;
2603 
2604 	if (size <= 192) {
2605 		if (!size)
2606 			return ZERO_SIZE_PTR;
2607 
2608 		index = size_index[(size - 1) / 8];
2609 	} else
2610 		index = fls(size - 1);
2611 
2612 #ifdef CONFIG_ZONE_DMA
2613 	if (unlikely((flags & SLUB_DMA)))
2614 		return dma_kmalloc_cache(index, flags);
2615 
2616 #endif
2617 	return &kmalloc_caches[index];
2618 }
2619 
2620 void *__kmalloc(size_t size, gfp_t flags)
2621 {
2622 	struct kmem_cache *s;
2623 
2624 	if (unlikely(size > PAGE_SIZE))
2625 		return kmalloc_large(size, flags);
2626 
2627 	s = get_slab(size, flags);
2628 
2629 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2630 		return s;
2631 
2632 	return slab_alloc(s, flags, -1, __builtin_return_address(0));
2633 }
2634 EXPORT_SYMBOL(__kmalloc);
2635 
2636 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2637 {
2638 	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2639 						get_order(size));
2640 
2641 	if (page)
2642 		return page_address(page);
2643 	else
2644 		return NULL;
2645 }
2646 
2647 #ifdef CONFIG_NUMA
2648 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2649 {
2650 	struct kmem_cache *s;
2651 
2652 	if (unlikely(size > PAGE_SIZE))
2653 		return kmalloc_large_node(size, flags, node);
2654 
2655 	s = get_slab(size, flags);
2656 
2657 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2658 		return s;
2659 
2660 	return slab_alloc(s, flags, node, __builtin_return_address(0));
2661 }
2662 EXPORT_SYMBOL(__kmalloc_node);
2663 #endif
2664 
2665 size_t ksize(const void *object)
2666 {
2667 	struct page *page;
2668 	struct kmem_cache *s;
2669 
2670 	if (unlikely(object == ZERO_SIZE_PTR))
2671 		return 0;
2672 
2673 	page = virt_to_head_page(object);
2674 
2675 	if (unlikely(!PageSlab(page)))
2676 		return PAGE_SIZE << compound_order(page);
2677 
2678 	s = page->slab;
2679 
2680 #ifdef CONFIG_SLUB_DEBUG
2681 	/*
2682 	 * Debugging requires use of the padding between object
2683 	 * and whatever may come after it.
2684 	 */
2685 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2686 		return s->objsize;
2687 
2688 #endif
2689 	/*
2690 	 * If we have the need to store the freelist pointer
2691 	 * back there or track user information then we can
2692 	 * only use the space before that information.
2693 	 */
2694 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2695 		return s->inuse;
2696 	/*
2697 	 * Else we can use all the padding etc for the allocation
2698 	 */
2699 	return s->size;
2700 }
2701 EXPORT_SYMBOL(ksize);
2702 
2703 void kfree(const void *x)
2704 {
2705 	struct page *page;
2706 	void *object = (void *)x;
2707 
2708 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2709 		return;
2710 
2711 	page = virt_to_head_page(x);
2712 	if (unlikely(!PageSlab(page))) {
2713 		put_page(page);
2714 		return;
2715 	}
2716 	slab_free(page->slab, page, object, __builtin_return_address(0));
2717 }
2718 EXPORT_SYMBOL(kfree);
2719 
2720 /*
2721  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2722  * the remaining slabs by the number of items in use. The slabs with the
2723  * most items in use come first. New allocations will then fill those up
2724  * and thus they can be removed from the partial lists.
2725  *
2726  * The slabs with the least items are placed last. This results in them
2727  * being allocated from last increasing the chance that the last objects
2728  * are freed in them.
2729  */
2730 int kmem_cache_shrink(struct kmem_cache *s)
2731 {
2732 	int node;
2733 	int i;
2734 	struct kmem_cache_node *n;
2735 	struct page *page;
2736 	struct page *t;
2737 	struct list_head *slabs_by_inuse =
2738 		kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2739 	unsigned long flags;
2740 
2741 	if (!slabs_by_inuse)
2742 		return -ENOMEM;
2743 
2744 	flush_all(s);
2745 	for_each_node_state(node, N_NORMAL_MEMORY) {
2746 		n = get_node(s, node);
2747 
2748 		if (!n->nr_partial)
2749 			continue;
2750 
2751 		for (i = 0; i < s->objects; i++)
2752 			INIT_LIST_HEAD(slabs_by_inuse + i);
2753 
2754 		spin_lock_irqsave(&n->list_lock, flags);
2755 
2756 		/*
2757 		 * Build lists indexed by the items in use in each slab.
2758 		 *
2759 		 * Note that concurrent frees may occur while we hold the
2760 		 * list_lock. page->inuse here is the upper limit.
2761 		 */
2762 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2763 			if (!page->inuse && slab_trylock(page)) {
2764 				/*
2765 				 * Must hold slab lock here because slab_free
2766 				 * may have freed the last object and be
2767 				 * waiting to release the slab.
2768 				 */
2769 				list_del(&page->lru);
2770 				n->nr_partial--;
2771 				slab_unlock(page);
2772 				discard_slab(s, page);
2773 			} else {
2774 				list_move(&page->lru,
2775 				slabs_by_inuse + page->inuse);
2776 			}
2777 		}
2778 
2779 		/*
2780 		 * Rebuild the partial list with the slabs filled up most
2781 		 * first and the least used slabs at the end.
2782 		 */
2783 		for (i = s->objects - 1; i >= 0; i--)
2784 			list_splice(slabs_by_inuse + i, n->partial.prev);
2785 
2786 		spin_unlock_irqrestore(&n->list_lock, flags);
2787 	}
2788 
2789 	kfree(slabs_by_inuse);
2790 	return 0;
2791 }
2792 EXPORT_SYMBOL(kmem_cache_shrink);
2793 
2794 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2795 static int slab_mem_going_offline_callback(void *arg)
2796 {
2797 	struct kmem_cache *s;
2798 
2799 	down_read(&slub_lock);
2800 	list_for_each_entry(s, &slab_caches, list)
2801 		kmem_cache_shrink(s);
2802 	up_read(&slub_lock);
2803 
2804 	return 0;
2805 }
2806 
2807 static void slab_mem_offline_callback(void *arg)
2808 {
2809 	struct kmem_cache_node *n;
2810 	struct kmem_cache *s;
2811 	struct memory_notify *marg = arg;
2812 	int offline_node;
2813 
2814 	offline_node = marg->status_change_nid;
2815 
2816 	/*
2817 	 * If the node still has available memory. we need kmem_cache_node
2818 	 * for it yet.
2819 	 */
2820 	if (offline_node < 0)
2821 		return;
2822 
2823 	down_read(&slub_lock);
2824 	list_for_each_entry(s, &slab_caches, list) {
2825 		n = get_node(s, offline_node);
2826 		if (n) {
2827 			/*
2828 			 * if n->nr_slabs > 0, slabs still exist on the node
2829 			 * that is going down. We were unable to free them,
2830 			 * and offline_pages() function shoudn't call this
2831 			 * callback. So, we must fail.
2832 			 */
2833 			BUG_ON(slabs_node(s, offline_node));
2834 
2835 			s->node[offline_node] = NULL;
2836 			kmem_cache_free(kmalloc_caches, n);
2837 		}
2838 	}
2839 	up_read(&slub_lock);
2840 }
2841 
2842 static int slab_mem_going_online_callback(void *arg)
2843 {
2844 	struct kmem_cache_node *n;
2845 	struct kmem_cache *s;
2846 	struct memory_notify *marg = arg;
2847 	int nid = marg->status_change_nid;
2848 	int ret = 0;
2849 
2850 	/*
2851 	 * If the node's memory is already available, then kmem_cache_node is
2852 	 * already created. Nothing to do.
2853 	 */
2854 	if (nid < 0)
2855 		return 0;
2856 
2857 	/*
2858 	 * We are bringing a node online. No memory is availabe yet. We must
2859 	 * allocate a kmem_cache_node structure in order to bring the node
2860 	 * online.
2861 	 */
2862 	down_read(&slub_lock);
2863 	list_for_each_entry(s, &slab_caches, list) {
2864 		/*
2865 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2866 		 *      since memory is not yet available from the node that
2867 		 *      is brought up.
2868 		 */
2869 		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2870 		if (!n) {
2871 			ret = -ENOMEM;
2872 			goto out;
2873 		}
2874 		init_kmem_cache_node(n);
2875 		s->node[nid] = n;
2876 	}
2877 out:
2878 	up_read(&slub_lock);
2879 	return ret;
2880 }
2881 
2882 static int slab_memory_callback(struct notifier_block *self,
2883 				unsigned long action, void *arg)
2884 {
2885 	int ret = 0;
2886 
2887 	switch (action) {
2888 	case MEM_GOING_ONLINE:
2889 		ret = slab_mem_going_online_callback(arg);
2890 		break;
2891 	case MEM_GOING_OFFLINE:
2892 		ret = slab_mem_going_offline_callback(arg);
2893 		break;
2894 	case MEM_OFFLINE:
2895 	case MEM_CANCEL_ONLINE:
2896 		slab_mem_offline_callback(arg);
2897 		break;
2898 	case MEM_ONLINE:
2899 	case MEM_CANCEL_OFFLINE:
2900 		break;
2901 	}
2902 
2903 	ret = notifier_from_errno(ret);
2904 	return ret;
2905 }
2906 
2907 #endif /* CONFIG_MEMORY_HOTPLUG */
2908 
2909 /********************************************************************
2910  *			Basic setup of slabs
2911  *******************************************************************/
2912 
2913 void __init kmem_cache_init(void)
2914 {
2915 	int i;
2916 	int caches = 0;
2917 
2918 	init_alloc_cpu();
2919 
2920 #ifdef CONFIG_NUMA
2921 	/*
2922 	 * Must first have the slab cache available for the allocations of the
2923 	 * struct kmem_cache_node's. There is special bootstrap code in
2924 	 * kmem_cache_open for slab_state == DOWN.
2925 	 */
2926 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2927 		sizeof(struct kmem_cache_node), GFP_KERNEL);
2928 	kmalloc_caches[0].refcount = -1;
2929 	caches++;
2930 
2931 	hotplug_memory_notifier(slab_memory_callback, 1);
2932 #endif
2933 
2934 	/* Able to allocate the per node structures */
2935 	slab_state = PARTIAL;
2936 
2937 	/* Caches that are not of the two-to-the-power-of size */
2938 	if (KMALLOC_MIN_SIZE <= 64) {
2939 		create_kmalloc_cache(&kmalloc_caches[1],
2940 				"kmalloc-96", 96, GFP_KERNEL);
2941 		caches++;
2942 	}
2943 	if (KMALLOC_MIN_SIZE <= 128) {
2944 		create_kmalloc_cache(&kmalloc_caches[2],
2945 				"kmalloc-192", 192, GFP_KERNEL);
2946 		caches++;
2947 	}
2948 
2949 	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
2950 		create_kmalloc_cache(&kmalloc_caches[i],
2951 			"kmalloc", 1 << i, GFP_KERNEL);
2952 		caches++;
2953 	}
2954 
2955 
2956 	/*
2957 	 * Patch up the size_index table if we have strange large alignment
2958 	 * requirements for the kmalloc array. This is only the case for
2959 	 * MIPS it seems. The standard arches will not generate any code here.
2960 	 *
2961 	 * Largest permitted alignment is 256 bytes due to the way we
2962 	 * handle the index determination for the smaller caches.
2963 	 *
2964 	 * Make sure that nothing crazy happens if someone starts tinkering
2965 	 * around with ARCH_KMALLOC_MINALIGN
2966 	 */
2967 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2968 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2969 
2970 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
2971 		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2972 
2973 	slab_state = UP;
2974 
2975 	/* Provide the correct kmalloc names now that the caches are up */
2976 	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
2977 		kmalloc_caches[i]. name =
2978 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2979 
2980 #ifdef CONFIG_SMP
2981 	register_cpu_notifier(&slab_notifier);
2982 	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2983 				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
2984 #else
2985 	kmem_size = sizeof(struct kmem_cache);
2986 #endif
2987 
2988 	printk(KERN_INFO
2989 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2990 		" CPUs=%d, Nodes=%d\n",
2991 		caches, cache_line_size(),
2992 		slub_min_order, slub_max_order, slub_min_objects,
2993 		nr_cpu_ids, nr_node_ids);
2994 }
2995 
2996 /*
2997  * Find a mergeable slab cache
2998  */
2999 static int slab_unmergeable(struct kmem_cache *s)
3000 {
3001 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3002 		return 1;
3003 
3004 	if ((s->flags & __PAGE_ALLOC_FALLBACK))
3005 		return 1;
3006 
3007 	if (s->ctor)
3008 		return 1;
3009 
3010 	/*
3011 	 * We may have set a slab to be unmergeable during bootstrap.
3012 	 */
3013 	if (s->refcount < 0)
3014 		return 1;
3015 
3016 	return 0;
3017 }
3018 
3019 static struct kmem_cache *find_mergeable(size_t size,
3020 		size_t align, unsigned long flags, const char *name,
3021 		void (*ctor)(struct kmem_cache *, void *))
3022 {
3023 	struct kmem_cache *s;
3024 
3025 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3026 		return NULL;
3027 
3028 	if (ctor)
3029 		return NULL;
3030 
3031 	size = ALIGN(size, sizeof(void *));
3032 	align = calculate_alignment(flags, align, size);
3033 	size = ALIGN(size, align);
3034 	flags = kmem_cache_flags(size, flags, name, NULL);
3035 
3036 	list_for_each_entry(s, &slab_caches, list) {
3037 		if (slab_unmergeable(s))
3038 			continue;
3039 
3040 		if (size > s->size)
3041 			continue;
3042 
3043 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3044 				continue;
3045 		/*
3046 		 * Check if alignment is compatible.
3047 		 * Courtesy of Adrian Drzewiecki
3048 		 */
3049 		if ((s->size & ~(align - 1)) != s->size)
3050 			continue;
3051 
3052 		if (s->size - size >= sizeof(void *))
3053 			continue;
3054 
3055 		return s;
3056 	}
3057 	return NULL;
3058 }
3059 
3060 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3061 		size_t align, unsigned long flags,
3062 		void (*ctor)(struct kmem_cache *, void *))
3063 {
3064 	struct kmem_cache *s;
3065 
3066 	down_write(&slub_lock);
3067 	s = find_mergeable(size, align, flags, name, ctor);
3068 	if (s) {
3069 		int cpu;
3070 
3071 		s->refcount++;
3072 		/*
3073 		 * Adjust the object sizes so that we clear
3074 		 * the complete object on kzalloc.
3075 		 */
3076 		s->objsize = max(s->objsize, (int)size);
3077 
3078 		/*
3079 		 * And then we need to update the object size in the
3080 		 * per cpu structures
3081 		 */
3082 		for_each_online_cpu(cpu)
3083 			get_cpu_slab(s, cpu)->objsize = s->objsize;
3084 
3085 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3086 		up_write(&slub_lock);
3087 
3088 		if (sysfs_slab_alias(s, name))
3089 			goto err;
3090 		return s;
3091 	}
3092 
3093 	s = kmalloc(kmem_size, GFP_KERNEL);
3094 	if (s) {
3095 		if (kmem_cache_open(s, GFP_KERNEL, name,
3096 				size, align, flags, ctor)) {
3097 			list_add(&s->list, &slab_caches);
3098 			up_write(&slub_lock);
3099 			if (sysfs_slab_add(s))
3100 				goto err;
3101 			return s;
3102 		}
3103 		kfree(s);
3104 	}
3105 	up_write(&slub_lock);
3106 
3107 err:
3108 	if (flags & SLAB_PANIC)
3109 		panic("Cannot create slabcache %s\n", name);
3110 	else
3111 		s = NULL;
3112 	return s;
3113 }
3114 EXPORT_SYMBOL(kmem_cache_create);
3115 
3116 #ifdef CONFIG_SMP
3117 /*
3118  * Use the cpu notifier to insure that the cpu slabs are flushed when
3119  * necessary.
3120  */
3121 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3122 		unsigned long action, void *hcpu)
3123 {
3124 	long cpu = (long)hcpu;
3125 	struct kmem_cache *s;
3126 	unsigned long flags;
3127 
3128 	switch (action) {
3129 	case CPU_UP_PREPARE:
3130 	case CPU_UP_PREPARE_FROZEN:
3131 		init_alloc_cpu_cpu(cpu);
3132 		down_read(&slub_lock);
3133 		list_for_each_entry(s, &slab_caches, list)
3134 			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3135 							GFP_KERNEL);
3136 		up_read(&slub_lock);
3137 		break;
3138 
3139 	case CPU_UP_CANCELED:
3140 	case CPU_UP_CANCELED_FROZEN:
3141 	case CPU_DEAD:
3142 	case CPU_DEAD_FROZEN:
3143 		down_read(&slub_lock);
3144 		list_for_each_entry(s, &slab_caches, list) {
3145 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3146 
3147 			local_irq_save(flags);
3148 			__flush_cpu_slab(s, cpu);
3149 			local_irq_restore(flags);
3150 			free_kmem_cache_cpu(c, cpu);
3151 			s->cpu_slab[cpu] = NULL;
3152 		}
3153 		up_read(&slub_lock);
3154 		break;
3155 	default:
3156 		break;
3157 	}
3158 	return NOTIFY_OK;
3159 }
3160 
3161 static struct notifier_block __cpuinitdata slab_notifier = {
3162 	.notifier_call = slab_cpuup_callback
3163 };
3164 
3165 #endif
3166 
3167 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3168 {
3169 	struct kmem_cache *s;
3170 
3171 	if (unlikely(size > PAGE_SIZE))
3172 		return kmalloc_large(size, gfpflags);
3173 
3174 	s = get_slab(size, gfpflags);
3175 
3176 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3177 		return s;
3178 
3179 	return slab_alloc(s, gfpflags, -1, caller);
3180 }
3181 
3182 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3183 					int node, void *caller)
3184 {
3185 	struct kmem_cache *s;
3186 
3187 	if (unlikely(size > PAGE_SIZE))
3188 		return kmalloc_large_node(size, gfpflags, node);
3189 
3190 	s = get_slab(size, gfpflags);
3191 
3192 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3193 		return s;
3194 
3195 	return slab_alloc(s, gfpflags, node, caller);
3196 }
3197 
3198 #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3199 static unsigned long count_partial(struct kmem_cache_node *n)
3200 {
3201 	unsigned long flags;
3202 	unsigned long x = 0;
3203 	struct page *page;
3204 
3205 	spin_lock_irqsave(&n->list_lock, flags);
3206 	list_for_each_entry(page, &n->partial, lru)
3207 		x += page->inuse;
3208 	spin_unlock_irqrestore(&n->list_lock, flags);
3209 	return x;
3210 }
3211 #endif
3212 
3213 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3214 static int validate_slab(struct kmem_cache *s, struct page *page,
3215 						unsigned long *map)
3216 {
3217 	void *p;
3218 	void *addr = page_address(page);
3219 
3220 	if (!check_slab(s, page) ||
3221 			!on_freelist(s, page, NULL))
3222 		return 0;
3223 
3224 	/* Now we know that a valid freelist exists */
3225 	bitmap_zero(map, s->objects);
3226 
3227 	for_each_free_object(p, s, page->freelist) {
3228 		set_bit(slab_index(p, s, addr), map);
3229 		if (!check_object(s, page, p, 0))
3230 			return 0;
3231 	}
3232 
3233 	for_each_object(p, s, addr)
3234 		if (!test_bit(slab_index(p, s, addr), map))
3235 			if (!check_object(s, page, p, 1))
3236 				return 0;
3237 	return 1;
3238 }
3239 
3240 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3241 						unsigned long *map)
3242 {
3243 	if (slab_trylock(page)) {
3244 		validate_slab(s, page, map);
3245 		slab_unlock(page);
3246 	} else
3247 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3248 			s->name, page);
3249 
3250 	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3251 		if (!SlabDebug(page))
3252 			printk(KERN_ERR "SLUB %s: SlabDebug not set "
3253 				"on slab 0x%p\n", s->name, page);
3254 	} else {
3255 		if (SlabDebug(page))
3256 			printk(KERN_ERR "SLUB %s: SlabDebug set on "
3257 				"slab 0x%p\n", s->name, page);
3258 	}
3259 }
3260 
3261 static int validate_slab_node(struct kmem_cache *s,
3262 		struct kmem_cache_node *n, unsigned long *map)
3263 {
3264 	unsigned long count = 0;
3265 	struct page *page;
3266 	unsigned long flags;
3267 
3268 	spin_lock_irqsave(&n->list_lock, flags);
3269 
3270 	list_for_each_entry(page, &n->partial, lru) {
3271 		validate_slab_slab(s, page, map);
3272 		count++;
3273 	}
3274 	if (count != n->nr_partial)
3275 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3276 			"counter=%ld\n", s->name, count, n->nr_partial);
3277 
3278 	if (!(s->flags & SLAB_STORE_USER))
3279 		goto out;
3280 
3281 	list_for_each_entry(page, &n->full, lru) {
3282 		validate_slab_slab(s, page, map);
3283 		count++;
3284 	}
3285 	if (count != atomic_long_read(&n->nr_slabs))
3286 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3287 			"counter=%ld\n", s->name, count,
3288 			atomic_long_read(&n->nr_slabs));
3289 
3290 out:
3291 	spin_unlock_irqrestore(&n->list_lock, flags);
3292 	return count;
3293 }
3294 
3295 static long validate_slab_cache(struct kmem_cache *s)
3296 {
3297 	int node;
3298 	unsigned long count = 0;
3299 	unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
3300 				sizeof(unsigned long), GFP_KERNEL);
3301 
3302 	if (!map)
3303 		return -ENOMEM;
3304 
3305 	flush_all(s);
3306 	for_each_node_state(node, N_NORMAL_MEMORY) {
3307 		struct kmem_cache_node *n = get_node(s, node);
3308 
3309 		count += validate_slab_node(s, n, map);
3310 	}
3311 	kfree(map);
3312 	return count;
3313 }
3314 
3315 #ifdef SLUB_RESILIENCY_TEST
3316 static void resiliency_test(void)
3317 {
3318 	u8 *p;
3319 
3320 	printk(KERN_ERR "SLUB resiliency testing\n");
3321 	printk(KERN_ERR "-----------------------\n");
3322 	printk(KERN_ERR "A. Corruption after allocation\n");
3323 
3324 	p = kzalloc(16, GFP_KERNEL);
3325 	p[16] = 0x12;
3326 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3327 			" 0x12->0x%p\n\n", p + 16);
3328 
3329 	validate_slab_cache(kmalloc_caches + 4);
3330 
3331 	/* Hmmm... The next two are dangerous */
3332 	p = kzalloc(32, GFP_KERNEL);
3333 	p[32 + sizeof(void *)] = 0x34;
3334 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3335 			" 0x34 -> -0x%p\n", p);
3336 	printk(KERN_ERR
3337 		"If allocated object is overwritten then not detectable\n\n");
3338 
3339 	validate_slab_cache(kmalloc_caches + 5);
3340 	p = kzalloc(64, GFP_KERNEL);
3341 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3342 	*p = 0x56;
3343 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3344 									p);
3345 	printk(KERN_ERR
3346 		"If allocated object is overwritten then not detectable\n\n");
3347 	validate_slab_cache(kmalloc_caches + 6);
3348 
3349 	printk(KERN_ERR "\nB. Corruption after free\n");
3350 	p = kzalloc(128, GFP_KERNEL);
3351 	kfree(p);
3352 	*p = 0x78;
3353 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3354 	validate_slab_cache(kmalloc_caches + 7);
3355 
3356 	p = kzalloc(256, GFP_KERNEL);
3357 	kfree(p);
3358 	p[50] = 0x9a;
3359 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3360 			p);
3361 	validate_slab_cache(kmalloc_caches + 8);
3362 
3363 	p = kzalloc(512, GFP_KERNEL);
3364 	kfree(p);
3365 	p[512] = 0xab;
3366 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3367 	validate_slab_cache(kmalloc_caches + 9);
3368 }
3369 #else
3370 static void resiliency_test(void) {};
3371 #endif
3372 
3373 /*
3374  * Generate lists of code addresses where slabcache objects are allocated
3375  * and freed.
3376  */
3377 
3378 struct location {
3379 	unsigned long count;
3380 	void *addr;
3381 	long long sum_time;
3382 	long min_time;
3383 	long max_time;
3384 	long min_pid;
3385 	long max_pid;
3386 	cpumask_t cpus;
3387 	nodemask_t nodes;
3388 };
3389 
3390 struct loc_track {
3391 	unsigned long max;
3392 	unsigned long count;
3393 	struct location *loc;
3394 };
3395 
3396 static void free_loc_track(struct loc_track *t)
3397 {
3398 	if (t->max)
3399 		free_pages((unsigned long)t->loc,
3400 			get_order(sizeof(struct location) * t->max));
3401 }
3402 
3403 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3404 {
3405 	struct location *l;
3406 	int order;
3407 
3408 	order = get_order(sizeof(struct location) * max);
3409 
3410 	l = (void *)__get_free_pages(flags, order);
3411 	if (!l)
3412 		return 0;
3413 
3414 	if (t->count) {
3415 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3416 		free_loc_track(t);
3417 	}
3418 	t->max = max;
3419 	t->loc = l;
3420 	return 1;
3421 }
3422 
3423 static int add_location(struct loc_track *t, struct kmem_cache *s,
3424 				const struct track *track)
3425 {
3426 	long start, end, pos;
3427 	struct location *l;
3428 	void *caddr;
3429 	unsigned long age = jiffies - track->when;
3430 
3431 	start = -1;
3432 	end = t->count;
3433 
3434 	for ( ; ; ) {
3435 		pos = start + (end - start + 1) / 2;
3436 
3437 		/*
3438 		 * There is nothing at "end". If we end up there
3439 		 * we need to add something to before end.
3440 		 */
3441 		if (pos == end)
3442 			break;
3443 
3444 		caddr = t->loc[pos].addr;
3445 		if (track->addr == caddr) {
3446 
3447 			l = &t->loc[pos];
3448 			l->count++;
3449 			if (track->when) {
3450 				l->sum_time += age;
3451 				if (age < l->min_time)
3452 					l->min_time = age;
3453 				if (age > l->max_time)
3454 					l->max_time = age;
3455 
3456 				if (track->pid < l->min_pid)
3457 					l->min_pid = track->pid;
3458 				if (track->pid > l->max_pid)
3459 					l->max_pid = track->pid;
3460 
3461 				cpu_set(track->cpu, l->cpus);
3462 			}
3463 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3464 			return 1;
3465 		}
3466 
3467 		if (track->addr < caddr)
3468 			end = pos;
3469 		else
3470 			start = pos;
3471 	}
3472 
3473 	/*
3474 	 * Not found. Insert new tracking element.
3475 	 */
3476 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3477 		return 0;
3478 
3479 	l = t->loc + pos;
3480 	if (pos < t->count)
3481 		memmove(l + 1, l,
3482 			(t->count - pos) * sizeof(struct location));
3483 	t->count++;
3484 	l->count = 1;
3485 	l->addr = track->addr;
3486 	l->sum_time = age;
3487 	l->min_time = age;
3488 	l->max_time = age;
3489 	l->min_pid = track->pid;
3490 	l->max_pid = track->pid;
3491 	cpus_clear(l->cpus);
3492 	cpu_set(track->cpu, l->cpus);
3493 	nodes_clear(l->nodes);
3494 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3495 	return 1;
3496 }
3497 
3498 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3499 		struct page *page, enum track_item alloc)
3500 {
3501 	void *addr = page_address(page);
3502 	DECLARE_BITMAP(map, s->objects);
3503 	void *p;
3504 
3505 	bitmap_zero(map, s->objects);
3506 	for_each_free_object(p, s, page->freelist)
3507 		set_bit(slab_index(p, s, addr), map);
3508 
3509 	for_each_object(p, s, addr)
3510 		if (!test_bit(slab_index(p, s, addr), map))
3511 			add_location(t, s, get_track(s, p, alloc));
3512 }
3513 
3514 static int list_locations(struct kmem_cache *s, char *buf,
3515 					enum track_item alloc)
3516 {
3517 	int len = 0;
3518 	unsigned long i;
3519 	struct loc_track t = { 0, 0, NULL };
3520 	int node;
3521 
3522 	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3523 			GFP_TEMPORARY))
3524 		return sprintf(buf, "Out of memory\n");
3525 
3526 	/* Push back cpu slabs */
3527 	flush_all(s);
3528 
3529 	for_each_node_state(node, N_NORMAL_MEMORY) {
3530 		struct kmem_cache_node *n = get_node(s, node);
3531 		unsigned long flags;
3532 		struct page *page;
3533 
3534 		if (!atomic_long_read(&n->nr_slabs))
3535 			continue;
3536 
3537 		spin_lock_irqsave(&n->list_lock, flags);
3538 		list_for_each_entry(page, &n->partial, lru)
3539 			process_slab(&t, s, page, alloc);
3540 		list_for_each_entry(page, &n->full, lru)
3541 			process_slab(&t, s, page, alloc);
3542 		spin_unlock_irqrestore(&n->list_lock, flags);
3543 	}
3544 
3545 	for (i = 0; i < t.count; i++) {
3546 		struct location *l = &t.loc[i];
3547 
3548 		if (len > PAGE_SIZE - 100)
3549 			break;
3550 		len += sprintf(buf + len, "%7ld ", l->count);
3551 
3552 		if (l->addr)
3553 			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3554 		else
3555 			len += sprintf(buf + len, "<not-available>");
3556 
3557 		if (l->sum_time != l->min_time) {
3558 			unsigned long remainder;
3559 
3560 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3561 			l->min_time,
3562 			div_long_long_rem(l->sum_time, l->count, &remainder),
3563 			l->max_time);
3564 		} else
3565 			len += sprintf(buf + len, " age=%ld",
3566 				l->min_time);
3567 
3568 		if (l->min_pid != l->max_pid)
3569 			len += sprintf(buf + len, " pid=%ld-%ld",
3570 				l->min_pid, l->max_pid);
3571 		else
3572 			len += sprintf(buf + len, " pid=%ld",
3573 				l->min_pid);
3574 
3575 		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3576 				len < PAGE_SIZE - 60) {
3577 			len += sprintf(buf + len, " cpus=");
3578 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3579 					l->cpus);
3580 		}
3581 
3582 		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3583 				len < PAGE_SIZE - 60) {
3584 			len += sprintf(buf + len, " nodes=");
3585 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3586 					l->nodes);
3587 		}
3588 
3589 		len += sprintf(buf + len, "\n");
3590 	}
3591 
3592 	free_loc_track(&t);
3593 	if (!t.count)
3594 		len += sprintf(buf, "No data\n");
3595 	return len;
3596 }
3597 
3598 enum slab_stat_type {
3599 	SL_FULL,
3600 	SL_PARTIAL,
3601 	SL_CPU,
3602 	SL_OBJECTS
3603 };
3604 
3605 #define SO_FULL		(1 << SL_FULL)
3606 #define SO_PARTIAL	(1 << SL_PARTIAL)
3607 #define SO_CPU		(1 << SL_CPU)
3608 #define SO_OBJECTS	(1 << SL_OBJECTS)
3609 
3610 static ssize_t show_slab_objects(struct kmem_cache *s,
3611 			    char *buf, unsigned long flags)
3612 {
3613 	unsigned long total = 0;
3614 	int cpu;
3615 	int node;
3616 	int x;
3617 	unsigned long *nodes;
3618 	unsigned long *per_cpu;
3619 
3620 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3621 	if (!nodes)
3622 		return -ENOMEM;
3623 	per_cpu = nodes + nr_node_ids;
3624 
3625 	for_each_possible_cpu(cpu) {
3626 		struct page *page;
3627 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3628 
3629 		if (!c)
3630 			continue;
3631 
3632 		page = c->page;
3633 		node = c->node;
3634 		if (node < 0)
3635 			continue;
3636 		if (page) {
3637 			if (flags & SO_CPU) {
3638 				if (flags & SO_OBJECTS)
3639 					x = page->inuse;
3640 				else
3641 					x = 1;
3642 				total += x;
3643 				nodes[node] += x;
3644 			}
3645 			per_cpu[node]++;
3646 		}
3647 	}
3648 
3649 	for_each_node_state(node, N_NORMAL_MEMORY) {
3650 		struct kmem_cache_node *n = get_node(s, node);
3651 
3652 		if (flags & SO_PARTIAL) {
3653 			if (flags & SO_OBJECTS)
3654 				x = count_partial(n);
3655 			else
3656 				x = n->nr_partial;
3657 			total += x;
3658 			nodes[node] += x;
3659 		}
3660 
3661 		if (flags & SO_FULL) {
3662 			int full_slabs = atomic_long_read(&n->nr_slabs)
3663 					- per_cpu[node]
3664 					- n->nr_partial;
3665 
3666 			if (flags & SO_OBJECTS)
3667 				x = full_slabs * s->objects;
3668 			else
3669 				x = full_slabs;
3670 			total += x;
3671 			nodes[node] += x;
3672 		}
3673 	}
3674 
3675 	x = sprintf(buf, "%lu", total);
3676 #ifdef CONFIG_NUMA
3677 	for_each_node_state(node, N_NORMAL_MEMORY)
3678 		if (nodes[node])
3679 			x += sprintf(buf + x, " N%d=%lu",
3680 					node, nodes[node]);
3681 #endif
3682 	kfree(nodes);
3683 	return x + sprintf(buf + x, "\n");
3684 }
3685 
3686 static int any_slab_objects(struct kmem_cache *s)
3687 {
3688 	int node;
3689 	int cpu;
3690 
3691 	for_each_possible_cpu(cpu) {
3692 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3693 
3694 		if (c && c->page)
3695 			return 1;
3696 	}
3697 
3698 	for_each_online_node(node) {
3699 		struct kmem_cache_node *n = get_node(s, node);
3700 
3701 		if (!n)
3702 			continue;
3703 
3704 		if (n->nr_partial || atomic_long_read(&n->nr_slabs))
3705 			return 1;
3706 	}
3707 	return 0;
3708 }
3709 
3710 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3711 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3712 
3713 struct slab_attribute {
3714 	struct attribute attr;
3715 	ssize_t (*show)(struct kmem_cache *s, char *buf);
3716 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3717 };
3718 
3719 #define SLAB_ATTR_RO(_name) \
3720 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3721 
3722 #define SLAB_ATTR(_name) \
3723 	static struct slab_attribute _name##_attr =  \
3724 	__ATTR(_name, 0644, _name##_show, _name##_store)
3725 
3726 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3727 {
3728 	return sprintf(buf, "%d\n", s->size);
3729 }
3730 SLAB_ATTR_RO(slab_size);
3731 
3732 static ssize_t align_show(struct kmem_cache *s, char *buf)
3733 {
3734 	return sprintf(buf, "%d\n", s->align);
3735 }
3736 SLAB_ATTR_RO(align);
3737 
3738 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3739 {
3740 	return sprintf(buf, "%d\n", s->objsize);
3741 }
3742 SLAB_ATTR_RO(object_size);
3743 
3744 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3745 {
3746 	return sprintf(buf, "%d\n", s->objects);
3747 }
3748 SLAB_ATTR_RO(objs_per_slab);
3749 
3750 static ssize_t order_show(struct kmem_cache *s, char *buf)
3751 {
3752 	return sprintf(buf, "%d\n", s->order);
3753 }
3754 SLAB_ATTR_RO(order);
3755 
3756 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3757 {
3758 	if (s->ctor) {
3759 		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3760 
3761 		return n + sprintf(buf + n, "\n");
3762 	}
3763 	return 0;
3764 }
3765 SLAB_ATTR_RO(ctor);
3766 
3767 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3768 {
3769 	return sprintf(buf, "%d\n", s->refcount - 1);
3770 }
3771 SLAB_ATTR_RO(aliases);
3772 
3773 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3774 {
3775 	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3776 }
3777 SLAB_ATTR_RO(slabs);
3778 
3779 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3780 {
3781 	return show_slab_objects(s, buf, SO_PARTIAL);
3782 }
3783 SLAB_ATTR_RO(partial);
3784 
3785 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3786 {
3787 	return show_slab_objects(s, buf, SO_CPU);
3788 }
3789 SLAB_ATTR_RO(cpu_slabs);
3790 
3791 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3792 {
3793 	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3794 }
3795 SLAB_ATTR_RO(objects);
3796 
3797 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3798 {
3799 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3800 }
3801 
3802 static ssize_t sanity_checks_store(struct kmem_cache *s,
3803 				const char *buf, size_t length)
3804 {
3805 	s->flags &= ~SLAB_DEBUG_FREE;
3806 	if (buf[0] == '1')
3807 		s->flags |= SLAB_DEBUG_FREE;
3808 	return length;
3809 }
3810 SLAB_ATTR(sanity_checks);
3811 
3812 static ssize_t trace_show(struct kmem_cache *s, char *buf)
3813 {
3814 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3815 }
3816 
3817 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3818 							size_t length)
3819 {
3820 	s->flags &= ~SLAB_TRACE;
3821 	if (buf[0] == '1')
3822 		s->flags |= SLAB_TRACE;
3823 	return length;
3824 }
3825 SLAB_ATTR(trace);
3826 
3827 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3828 {
3829 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3830 }
3831 
3832 static ssize_t reclaim_account_store(struct kmem_cache *s,
3833 				const char *buf, size_t length)
3834 {
3835 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3836 	if (buf[0] == '1')
3837 		s->flags |= SLAB_RECLAIM_ACCOUNT;
3838 	return length;
3839 }
3840 SLAB_ATTR(reclaim_account);
3841 
3842 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3843 {
3844 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3845 }
3846 SLAB_ATTR_RO(hwcache_align);
3847 
3848 #ifdef CONFIG_ZONE_DMA
3849 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3850 {
3851 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3852 }
3853 SLAB_ATTR_RO(cache_dma);
3854 #endif
3855 
3856 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3857 {
3858 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3859 }
3860 SLAB_ATTR_RO(destroy_by_rcu);
3861 
3862 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3863 {
3864 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3865 }
3866 
3867 static ssize_t red_zone_store(struct kmem_cache *s,
3868 				const char *buf, size_t length)
3869 {
3870 	if (any_slab_objects(s))
3871 		return -EBUSY;
3872 
3873 	s->flags &= ~SLAB_RED_ZONE;
3874 	if (buf[0] == '1')
3875 		s->flags |= SLAB_RED_ZONE;
3876 	calculate_sizes(s);
3877 	return length;
3878 }
3879 SLAB_ATTR(red_zone);
3880 
3881 static ssize_t poison_show(struct kmem_cache *s, char *buf)
3882 {
3883 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3884 }
3885 
3886 static ssize_t poison_store(struct kmem_cache *s,
3887 				const char *buf, size_t length)
3888 {
3889 	if (any_slab_objects(s))
3890 		return -EBUSY;
3891 
3892 	s->flags &= ~SLAB_POISON;
3893 	if (buf[0] == '1')
3894 		s->flags |= SLAB_POISON;
3895 	calculate_sizes(s);
3896 	return length;
3897 }
3898 SLAB_ATTR(poison);
3899 
3900 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3901 {
3902 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3903 }
3904 
3905 static ssize_t store_user_store(struct kmem_cache *s,
3906 				const char *buf, size_t length)
3907 {
3908 	if (any_slab_objects(s))
3909 		return -EBUSY;
3910 
3911 	s->flags &= ~SLAB_STORE_USER;
3912 	if (buf[0] == '1')
3913 		s->flags |= SLAB_STORE_USER;
3914 	calculate_sizes(s);
3915 	return length;
3916 }
3917 SLAB_ATTR(store_user);
3918 
3919 static ssize_t validate_show(struct kmem_cache *s, char *buf)
3920 {
3921 	return 0;
3922 }
3923 
3924 static ssize_t validate_store(struct kmem_cache *s,
3925 			const char *buf, size_t length)
3926 {
3927 	int ret = -EINVAL;
3928 
3929 	if (buf[0] == '1') {
3930 		ret = validate_slab_cache(s);
3931 		if (ret >= 0)
3932 			ret = length;
3933 	}
3934 	return ret;
3935 }
3936 SLAB_ATTR(validate);
3937 
3938 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3939 {
3940 	return 0;
3941 }
3942 
3943 static ssize_t shrink_store(struct kmem_cache *s,
3944 			const char *buf, size_t length)
3945 {
3946 	if (buf[0] == '1') {
3947 		int rc = kmem_cache_shrink(s);
3948 
3949 		if (rc)
3950 			return rc;
3951 	} else
3952 		return -EINVAL;
3953 	return length;
3954 }
3955 SLAB_ATTR(shrink);
3956 
3957 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3958 {
3959 	if (!(s->flags & SLAB_STORE_USER))
3960 		return -ENOSYS;
3961 	return list_locations(s, buf, TRACK_ALLOC);
3962 }
3963 SLAB_ATTR_RO(alloc_calls);
3964 
3965 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3966 {
3967 	if (!(s->flags & SLAB_STORE_USER))
3968 		return -ENOSYS;
3969 	return list_locations(s, buf, TRACK_FREE);
3970 }
3971 SLAB_ATTR_RO(free_calls);
3972 
3973 #ifdef CONFIG_NUMA
3974 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
3975 {
3976 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
3977 }
3978 
3979 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
3980 				const char *buf, size_t length)
3981 {
3982 	int n = simple_strtoul(buf, NULL, 10);
3983 
3984 	if (n < 100)
3985 		s->remote_node_defrag_ratio = n * 10;
3986 	return length;
3987 }
3988 SLAB_ATTR(remote_node_defrag_ratio);
3989 #endif
3990 
3991 #ifdef CONFIG_SLUB_STATS
3992 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
3993 {
3994 	unsigned long sum  = 0;
3995 	int cpu;
3996 	int len;
3997 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
3998 
3999 	if (!data)
4000 		return -ENOMEM;
4001 
4002 	for_each_online_cpu(cpu) {
4003 		unsigned x = get_cpu_slab(s, cpu)->stat[si];
4004 
4005 		data[cpu] = x;
4006 		sum += x;
4007 	}
4008 
4009 	len = sprintf(buf, "%lu", sum);
4010 
4011 #ifdef CONFIG_SMP
4012 	for_each_online_cpu(cpu) {
4013 		if (data[cpu] && len < PAGE_SIZE - 20)
4014 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4015 	}
4016 #endif
4017 	kfree(data);
4018 	return len + sprintf(buf + len, "\n");
4019 }
4020 
4021 #define STAT_ATTR(si, text) 					\
4022 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4023 {								\
4024 	return show_stat(s, buf, si);				\
4025 }								\
4026 SLAB_ATTR_RO(text);						\
4027 
4028 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4029 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4030 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4031 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4032 STAT_ATTR(FREE_FROZEN, free_frozen);
4033 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4034 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4035 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4036 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4037 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4038 STAT_ATTR(FREE_SLAB, free_slab);
4039 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4040 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4041 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4042 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4043 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4044 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4045 
4046 #endif
4047 
4048 static struct attribute *slab_attrs[] = {
4049 	&slab_size_attr.attr,
4050 	&object_size_attr.attr,
4051 	&objs_per_slab_attr.attr,
4052 	&order_attr.attr,
4053 	&objects_attr.attr,
4054 	&slabs_attr.attr,
4055 	&partial_attr.attr,
4056 	&cpu_slabs_attr.attr,
4057 	&ctor_attr.attr,
4058 	&aliases_attr.attr,
4059 	&align_attr.attr,
4060 	&sanity_checks_attr.attr,
4061 	&trace_attr.attr,
4062 	&hwcache_align_attr.attr,
4063 	&reclaim_account_attr.attr,
4064 	&destroy_by_rcu_attr.attr,
4065 	&red_zone_attr.attr,
4066 	&poison_attr.attr,
4067 	&store_user_attr.attr,
4068 	&validate_attr.attr,
4069 	&shrink_attr.attr,
4070 	&alloc_calls_attr.attr,
4071 	&free_calls_attr.attr,
4072 #ifdef CONFIG_ZONE_DMA
4073 	&cache_dma_attr.attr,
4074 #endif
4075 #ifdef CONFIG_NUMA
4076 	&remote_node_defrag_ratio_attr.attr,
4077 #endif
4078 #ifdef CONFIG_SLUB_STATS
4079 	&alloc_fastpath_attr.attr,
4080 	&alloc_slowpath_attr.attr,
4081 	&free_fastpath_attr.attr,
4082 	&free_slowpath_attr.attr,
4083 	&free_frozen_attr.attr,
4084 	&free_add_partial_attr.attr,
4085 	&free_remove_partial_attr.attr,
4086 	&alloc_from_partial_attr.attr,
4087 	&alloc_slab_attr.attr,
4088 	&alloc_refill_attr.attr,
4089 	&free_slab_attr.attr,
4090 	&cpuslab_flush_attr.attr,
4091 	&deactivate_full_attr.attr,
4092 	&deactivate_empty_attr.attr,
4093 	&deactivate_to_head_attr.attr,
4094 	&deactivate_to_tail_attr.attr,
4095 	&deactivate_remote_frees_attr.attr,
4096 #endif
4097 	NULL
4098 };
4099 
4100 static struct attribute_group slab_attr_group = {
4101 	.attrs = slab_attrs,
4102 };
4103 
4104 static ssize_t slab_attr_show(struct kobject *kobj,
4105 				struct attribute *attr,
4106 				char *buf)
4107 {
4108 	struct slab_attribute *attribute;
4109 	struct kmem_cache *s;
4110 	int err;
4111 
4112 	attribute = to_slab_attr(attr);
4113 	s = to_slab(kobj);
4114 
4115 	if (!attribute->show)
4116 		return -EIO;
4117 
4118 	err = attribute->show(s, buf);
4119 
4120 	return err;
4121 }
4122 
4123 static ssize_t slab_attr_store(struct kobject *kobj,
4124 				struct attribute *attr,
4125 				const char *buf, size_t len)
4126 {
4127 	struct slab_attribute *attribute;
4128 	struct kmem_cache *s;
4129 	int err;
4130 
4131 	attribute = to_slab_attr(attr);
4132 	s = to_slab(kobj);
4133 
4134 	if (!attribute->store)
4135 		return -EIO;
4136 
4137 	err = attribute->store(s, buf, len);
4138 
4139 	return err;
4140 }
4141 
4142 static void kmem_cache_release(struct kobject *kobj)
4143 {
4144 	struct kmem_cache *s = to_slab(kobj);
4145 
4146 	kfree(s);
4147 }
4148 
4149 static struct sysfs_ops slab_sysfs_ops = {
4150 	.show = slab_attr_show,
4151 	.store = slab_attr_store,
4152 };
4153 
4154 static struct kobj_type slab_ktype = {
4155 	.sysfs_ops = &slab_sysfs_ops,
4156 	.release = kmem_cache_release
4157 };
4158 
4159 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4160 {
4161 	struct kobj_type *ktype = get_ktype(kobj);
4162 
4163 	if (ktype == &slab_ktype)
4164 		return 1;
4165 	return 0;
4166 }
4167 
4168 static struct kset_uevent_ops slab_uevent_ops = {
4169 	.filter = uevent_filter,
4170 };
4171 
4172 static struct kset *slab_kset;
4173 
4174 #define ID_STR_LENGTH 64
4175 
4176 /* Create a unique string id for a slab cache:
4177  *
4178  * Format	:[flags-]size
4179  */
4180 static char *create_unique_id(struct kmem_cache *s)
4181 {
4182 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4183 	char *p = name;
4184 
4185 	BUG_ON(!name);
4186 
4187 	*p++ = ':';
4188 	/*
4189 	 * First flags affecting slabcache operations. We will only
4190 	 * get here for aliasable slabs so we do not need to support
4191 	 * too many flags. The flags here must cover all flags that
4192 	 * are matched during merging to guarantee that the id is
4193 	 * unique.
4194 	 */
4195 	if (s->flags & SLAB_CACHE_DMA)
4196 		*p++ = 'd';
4197 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4198 		*p++ = 'a';
4199 	if (s->flags & SLAB_DEBUG_FREE)
4200 		*p++ = 'F';
4201 	if (p != name + 1)
4202 		*p++ = '-';
4203 	p += sprintf(p, "%07d", s->size);
4204 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4205 	return name;
4206 }
4207 
4208 static int sysfs_slab_add(struct kmem_cache *s)
4209 {
4210 	int err;
4211 	const char *name;
4212 	int unmergeable;
4213 
4214 	if (slab_state < SYSFS)
4215 		/* Defer until later */
4216 		return 0;
4217 
4218 	unmergeable = slab_unmergeable(s);
4219 	if (unmergeable) {
4220 		/*
4221 		 * Slabcache can never be merged so we can use the name proper.
4222 		 * This is typically the case for debug situations. In that
4223 		 * case we can catch duplicate names easily.
4224 		 */
4225 		sysfs_remove_link(&slab_kset->kobj, s->name);
4226 		name = s->name;
4227 	} else {
4228 		/*
4229 		 * Create a unique name for the slab as a target
4230 		 * for the symlinks.
4231 		 */
4232 		name = create_unique_id(s);
4233 	}
4234 
4235 	s->kobj.kset = slab_kset;
4236 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4237 	if (err) {
4238 		kobject_put(&s->kobj);
4239 		return err;
4240 	}
4241 
4242 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4243 	if (err)
4244 		return err;
4245 	kobject_uevent(&s->kobj, KOBJ_ADD);
4246 	if (!unmergeable) {
4247 		/* Setup first alias */
4248 		sysfs_slab_alias(s, s->name);
4249 		kfree(name);
4250 	}
4251 	return 0;
4252 }
4253 
4254 static void sysfs_slab_remove(struct kmem_cache *s)
4255 {
4256 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4257 	kobject_del(&s->kobj);
4258 	kobject_put(&s->kobj);
4259 }
4260 
4261 /*
4262  * Need to buffer aliases during bootup until sysfs becomes
4263  * available lest we loose that information.
4264  */
4265 struct saved_alias {
4266 	struct kmem_cache *s;
4267 	const char *name;
4268 	struct saved_alias *next;
4269 };
4270 
4271 static struct saved_alias *alias_list;
4272 
4273 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4274 {
4275 	struct saved_alias *al;
4276 
4277 	if (slab_state == SYSFS) {
4278 		/*
4279 		 * If we have a leftover link then remove it.
4280 		 */
4281 		sysfs_remove_link(&slab_kset->kobj, name);
4282 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4283 	}
4284 
4285 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4286 	if (!al)
4287 		return -ENOMEM;
4288 
4289 	al->s = s;
4290 	al->name = name;
4291 	al->next = alias_list;
4292 	alias_list = al;
4293 	return 0;
4294 }
4295 
4296 static int __init slab_sysfs_init(void)
4297 {
4298 	struct kmem_cache *s;
4299 	int err;
4300 
4301 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4302 	if (!slab_kset) {
4303 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4304 		return -ENOSYS;
4305 	}
4306 
4307 	slab_state = SYSFS;
4308 
4309 	list_for_each_entry(s, &slab_caches, list) {
4310 		err = sysfs_slab_add(s);
4311 		if (err)
4312 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4313 						" to sysfs\n", s->name);
4314 	}
4315 
4316 	while (alias_list) {
4317 		struct saved_alias *al = alias_list;
4318 
4319 		alias_list = alias_list->next;
4320 		err = sysfs_slab_alias(al->s, al->name);
4321 		if (err)
4322 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4323 					" %s to sysfs\n", s->name);
4324 		kfree(al);
4325 	}
4326 
4327 	resiliency_test();
4328 	return 0;
4329 }
4330 
4331 __initcall(slab_sysfs_init);
4332 #endif
4333 
4334 /*
4335  * The /proc/slabinfo ABI
4336  */
4337 #ifdef CONFIG_SLABINFO
4338 
4339 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4340                        size_t count, loff_t *ppos)
4341 {
4342 	return -EINVAL;
4343 }
4344 
4345 
4346 static void print_slabinfo_header(struct seq_file *m)
4347 {
4348 	seq_puts(m, "slabinfo - version: 2.1\n");
4349 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4350 		 "<objperslab> <pagesperslab>");
4351 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4352 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4353 	seq_putc(m, '\n');
4354 }
4355 
4356 static void *s_start(struct seq_file *m, loff_t *pos)
4357 {
4358 	loff_t n = *pos;
4359 
4360 	down_read(&slub_lock);
4361 	if (!n)
4362 		print_slabinfo_header(m);
4363 
4364 	return seq_list_start(&slab_caches, *pos);
4365 }
4366 
4367 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4368 {
4369 	return seq_list_next(p, &slab_caches, pos);
4370 }
4371 
4372 static void s_stop(struct seq_file *m, void *p)
4373 {
4374 	up_read(&slub_lock);
4375 }
4376 
4377 static int s_show(struct seq_file *m, void *p)
4378 {
4379 	unsigned long nr_partials = 0;
4380 	unsigned long nr_slabs = 0;
4381 	unsigned long nr_inuse = 0;
4382 	unsigned long nr_objs;
4383 	struct kmem_cache *s;
4384 	int node;
4385 
4386 	s = list_entry(p, struct kmem_cache, list);
4387 
4388 	for_each_online_node(node) {
4389 		struct kmem_cache_node *n = get_node(s, node);
4390 
4391 		if (!n)
4392 			continue;
4393 
4394 		nr_partials += n->nr_partial;
4395 		nr_slabs += atomic_long_read(&n->nr_slabs);
4396 		nr_inuse += count_partial(n);
4397 	}
4398 
4399 	nr_objs = nr_slabs * s->objects;
4400 	nr_inuse += (nr_slabs - nr_partials) * s->objects;
4401 
4402 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4403 		   nr_objs, s->size, s->objects, (1 << s->order));
4404 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4405 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4406 		   0UL);
4407 	seq_putc(m, '\n');
4408 	return 0;
4409 }
4410 
4411 const struct seq_operations slabinfo_op = {
4412 	.start = s_start,
4413 	.next = s_next,
4414 	.stop = s_stop,
4415 	.show = s_show,
4416 };
4417 
4418 #endif /* CONFIG_SLABINFO */
4419