xref: /openbmc/linux/mm/slub.c (revision a1e58bbd)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/bit_spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
17 #include <linux/seq_file.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuset.h>
20 #include <linux/mempolicy.h>
21 #include <linux/ctype.h>
22 #include <linux/kallsyms.h>
23 #include <linux/memory.h>
24 
25 /*
26  * Lock order:
27  *   1. slab_lock(page)
28  *   2. slab->list_lock
29  *
30  *   The slab_lock protects operations on the object of a particular
31  *   slab and its metadata in the page struct. If the slab lock
32  *   has been taken then no allocations nor frees can be performed
33  *   on the objects in the slab nor can the slab be added or removed
34  *   from the partial or full lists since this would mean modifying
35  *   the page_struct of the slab.
36  *
37  *   The list_lock protects the partial and full list on each node and
38  *   the partial slab counter. If taken then no new slabs may be added or
39  *   removed from the lists nor make the number of partial slabs be modified.
40  *   (Note that the total number of slabs is an atomic value that may be
41  *   modified without taking the list lock).
42  *
43  *   The list_lock is a centralized lock and thus we avoid taking it as
44  *   much as possible. As long as SLUB does not have to handle partial
45  *   slabs, operations can continue without any centralized lock. F.e.
46  *   allocating a long series of objects that fill up slabs does not require
47  *   the list lock.
48  *
49  *   The lock order is sometimes inverted when we are trying to get a slab
50  *   off a list. We take the list_lock and then look for a page on the list
51  *   to use. While we do that objects in the slabs may be freed. We can
52  *   only operate on the slab if we have also taken the slab_lock. So we use
53  *   a slab_trylock() on the slab. If trylock was successful then no frees
54  *   can occur anymore and we can use the slab for allocations etc. If the
55  *   slab_trylock() does not succeed then frees are in progress in the slab and
56  *   we must stay away from it for a while since we may cause a bouncing
57  *   cacheline if we try to acquire the lock. So go onto the next slab.
58  *   If all pages are busy then we may allocate a new slab instead of reusing
59  *   a partial slab. A new slab has noone operating on it and thus there is
60  *   no danger of cacheline contention.
61  *
62  *   Interrupts are disabled during allocation and deallocation in order to
63  *   make the slab allocator safe to use in the context of an irq. In addition
64  *   interrupts are disabled to ensure that the processor does not change
65  *   while handling per_cpu slabs, due to kernel preemption.
66  *
67  * SLUB assigns one slab for allocation to each processor.
68  * Allocations only occur from these slabs called cpu slabs.
69  *
70  * Slabs with free elements are kept on a partial list and during regular
71  * operations no list for full slabs is used. If an object in a full slab is
72  * freed then the slab will show up again on the partial lists.
73  * We track full slabs for debugging purposes though because otherwise we
74  * cannot scan all objects.
75  *
76  * Slabs are freed when they become empty. Teardown and setup is
77  * minimal so we rely on the page allocators per cpu caches for
78  * fast frees and allocs.
79  *
80  * Overloading of page flags that are otherwise used for LRU management.
81  *
82  * PageActive 		The slab is frozen and exempt from list processing.
83  * 			This means that the slab is dedicated to a purpose
84  * 			such as satisfying allocations for a specific
85  * 			processor. Objects may be freed in the slab while
86  * 			it is frozen but slab_free will then skip the usual
87  * 			list operations. It is up to the processor holding
88  * 			the slab to integrate the slab into the slab lists
89  * 			when the slab is no longer needed.
90  *
91  * 			One use of this flag is to mark slabs that are
92  * 			used for allocations. Then such a slab becomes a cpu
93  * 			slab. The cpu slab may be equipped with an additional
94  * 			freelist that allows lockless access to
95  * 			free objects in addition to the regular freelist
96  * 			that requires the slab lock.
97  *
98  * PageError		Slab requires special handling due to debug
99  * 			options set. This moves	slab handling out of
100  * 			the fast path and disables lockless freelists.
101  */
102 
103 #define FROZEN (1 << PG_active)
104 
105 #ifdef CONFIG_SLUB_DEBUG
106 #define SLABDEBUG (1 << PG_error)
107 #else
108 #define SLABDEBUG 0
109 #endif
110 
111 static inline int SlabFrozen(struct page *page)
112 {
113 	return page->flags & FROZEN;
114 }
115 
116 static inline void SetSlabFrozen(struct page *page)
117 {
118 	page->flags |= FROZEN;
119 }
120 
121 static inline void ClearSlabFrozen(struct page *page)
122 {
123 	page->flags &= ~FROZEN;
124 }
125 
126 static inline int SlabDebug(struct page *page)
127 {
128 	return page->flags & SLABDEBUG;
129 }
130 
131 static inline void SetSlabDebug(struct page *page)
132 {
133 	page->flags |= SLABDEBUG;
134 }
135 
136 static inline void ClearSlabDebug(struct page *page)
137 {
138 	page->flags &= ~SLABDEBUG;
139 }
140 
141 /*
142  * Issues still to be resolved:
143  *
144  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
145  *
146  * - Variable sizing of the per node arrays
147  */
148 
149 /* Enable to test recovery from slab corruption on boot */
150 #undef SLUB_RESILIENCY_TEST
151 
152 #if PAGE_SHIFT <= 12
153 
154 /*
155  * Small page size. Make sure that we do not fragment memory
156  */
157 #define DEFAULT_MAX_ORDER 1
158 #define DEFAULT_MIN_OBJECTS 4
159 
160 #else
161 
162 /*
163  * Large page machines are customarily able to handle larger
164  * page orders.
165  */
166 #define DEFAULT_MAX_ORDER 2
167 #define DEFAULT_MIN_OBJECTS 8
168 
169 #endif
170 
171 /*
172  * Mininum number of partial slabs. These will be left on the partial
173  * lists even if they are empty. kmem_cache_shrink may reclaim them.
174  */
175 #define MIN_PARTIAL 5
176 
177 /*
178  * Maximum number of desirable partial slabs.
179  * The existence of more partial slabs makes kmem_cache_shrink
180  * sort the partial list by the number of objects in the.
181  */
182 #define MAX_PARTIAL 10
183 
184 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
185 				SLAB_POISON | SLAB_STORE_USER)
186 
187 /*
188  * Set of flags that will prevent slab merging
189  */
190 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
191 		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
192 
193 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
194 		SLAB_CACHE_DMA)
195 
196 #ifndef ARCH_KMALLOC_MINALIGN
197 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
198 #endif
199 
200 #ifndef ARCH_SLAB_MINALIGN
201 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
202 #endif
203 
204 /* Internal SLUB flags */
205 #define __OBJECT_POISON		0x80000000 /* Poison object */
206 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
207 #define __KMALLOC_CACHE		0x20000000 /* objects freed using kfree */
208 #define __PAGE_ALLOC_FALLBACK	0x10000000 /* Allow fallback to page alloc */
209 
210 /* Not all arches define cache_line_size */
211 #ifndef cache_line_size
212 #define cache_line_size()	L1_CACHE_BYTES
213 #endif
214 
215 static int kmem_size = sizeof(struct kmem_cache);
216 
217 #ifdef CONFIG_SMP
218 static struct notifier_block slab_notifier;
219 #endif
220 
221 static enum {
222 	DOWN,		/* No slab functionality available */
223 	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
224 	UP,		/* Everything works but does not show up in sysfs */
225 	SYSFS		/* Sysfs up */
226 } slab_state = DOWN;
227 
228 /* A list of all slab caches on the system */
229 static DECLARE_RWSEM(slub_lock);
230 static LIST_HEAD(slab_caches);
231 
232 /*
233  * Tracking user of a slab.
234  */
235 struct track {
236 	void *addr;		/* Called from address */
237 	int cpu;		/* Was running on cpu */
238 	int pid;		/* Pid context */
239 	unsigned long when;	/* When did the operation occur */
240 };
241 
242 enum track_item { TRACK_ALLOC, TRACK_FREE };
243 
244 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
245 static int sysfs_slab_add(struct kmem_cache *);
246 static int sysfs_slab_alias(struct kmem_cache *, const char *);
247 static void sysfs_slab_remove(struct kmem_cache *);
248 
249 #else
250 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
251 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
252 							{ return 0; }
253 static inline void sysfs_slab_remove(struct kmem_cache *s)
254 {
255 	kfree(s);
256 }
257 
258 #endif
259 
260 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
261 {
262 #ifdef CONFIG_SLUB_STATS
263 	c->stat[si]++;
264 #endif
265 }
266 
267 /********************************************************************
268  * 			Core slab cache functions
269  *******************************************************************/
270 
271 int slab_is_available(void)
272 {
273 	return slab_state >= UP;
274 }
275 
276 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
277 {
278 #ifdef CONFIG_NUMA
279 	return s->node[node];
280 #else
281 	return &s->local_node;
282 #endif
283 }
284 
285 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
286 {
287 #ifdef CONFIG_SMP
288 	return s->cpu_slab[cpu];
289 #else
290 	return &s->cpu_slab;
291 #endif
292 }
293 
294 /* Verify that a pointer has an address that is valid within a slab page */
295 static inline int check_valid_pointer(struct kmem_cache *s,
296 				struct page *page, const void *object)
297 {
298 	void *base;
299 
300 	if (!object)
301 		return 1;
302 
303 	base = page_address(page);
304 	if (object < base || object >= base + s->objects * s->size ||
305 		(object - base) % s->size) {
306 		return 0;
307 	}
308 
309 	return 1;
310 }
311 
312 /*
313  * Slow version of get and set free pointer.
314  *
315  * This version requires touching the cache lines of kmem_cache which
316  * we avoid to do in the fast alloc free paths. There we obtain the offset
317  * from the page struct.
318  */
319 static inline void *get_freepointer(struct kmem_cache *s, void *object)
320 {
321 	return *(void **)(object + s->offset);
322 }
323 
324 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
325 {
326 	*(void **)(object + s->offset) = fp;
327 }
328 
329 /* Loop over all objects in a slab */
330 #define for_each_object(__p, __s, __addr) \
331 	for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
332 			__p += (__s)->size)
333 
334 /* Scan freelist */
335 #define for_each_free_object(__p, __s, __free) \
336 	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
337 
338 /* Determine object index from a given position */
339 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
340 {
341 	return (p - addr) / s->size;
342 }
343 
344 #ifdef CONFIG_SLUB_DEBUG
345 /*
346  * Debug settings:
347  */
348 #ifdef CONFIG_SLUB_DEBUG_ON
349 static int slub_debug = DEBUG_DEFAULT_FLAGS;
350 #else
351 static int slub_debug;
352 #endif
353 
354 static char *slub_debug_slabs;
355 
356 /*
357  * Object debugging
358  */
359 static void print_section(char *text, u8 *addr, unsigned int length)
360 {
361 	int i, offset;
362 	int newline = 1;
363 	char ascii[17];
364 
365 	ascii[16] = 0;
366 
367 	for (i = 0; i < length; i++) {
368 		if (newline) {
369 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
370 			newline = 0;
371 		}
372 		printk(KERN_CONT " %02x", addr[i]);
373 		offset = i % 16;
374 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
375 		if (offset == 15) {
376 			printk(KERN_CONT " %s\n", ascii);
377 			newline = 1;
378 		}
379 	}
380 	if (!newline) {
381 		i %= 16;
382 		while (i < 16) {
383 			printk(KERN_CONT "   ");
384 			ascii[i] = ' ';
385 			i++;
386 		}
387 		printk(KERN_CONT " %s\n", ascii);
388 	}
389 }
390 
391 static struct track *get_track(struct kmem_cache *s, void *object,
392 	enum track_item alloc)
393 {
394 	struct track *p;
395 
396 	if (s->offset)
397 		p = object + s->offset + sizeof(void *);
398 	else
399 		p = object + s->inuse;
400 
401 	return p + alloc;
402 }
403 
404 static void set_track(struct kmem_cache *s, void *object,
405 				enum track_item alloc, void *addr)
406 {
407 	struct track *p;
408 
409 	if (s->offset)
410 		p = object + s->offset + sizeof(void *);
411 	else
412 		p = object + s->inuse;
413 
414 	p += alloc;
415 	if (addr) {
416 		p->addr = addr;
417 		p->cpu = smp_processor_id();
418 		p->pid = current ? current->pid : -1;
419 		p->when = jiffies;
420 	} else
421 		memset(p, 0, sizeof(struct track));
422 }
423 
424 static void init_tracking(struct kmem_cache *s, void *object)
425 {
426 	if (!(s->flags & SLAB_STORE_USER))
427 		return;
428 
429 	set_track(s, object, TRACK_FREE, NULL);
430 	set_track(s, object, TRACK_ALLOC, NULL);
431 }
432 
433 static void print_track(const char *s, struct track *t)
434 {
435 	if (!t->addr)
436 		return;
437 
438 	printk(KERN_ERR "INFO: %s in ", s);
439 	__print_symbol("%s", (unsigned long)t->addr);
440 	printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
441 }
442 
443 static void print_tracking(struct kmem_cache *s, void *object)
444 {
445 	if (!(s->flags & SLAB_STORE_USER))
446 		return;
447 
448 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
449 	print_track("Freed", get_track(s, object, TRACK_FREE));
450 }
451 
452 static void print_page_info(struct page *page)
453 {
454 	printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
455 		page, page->inuse, page->freelist, page->flags);
456 
457 }
458 
459 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
460 {
461 	va_list args;
462 	char buf[100];
463 
464 	va_start(args, fmt);
465 	vsnprintf(buf, sizeof(buf), fmt, args);
466 	va_end(args);
467 	printk(KERN_ERR "========================================"
468 			"=====================================\n");
469 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
470 	printk(KERN_ERR "----------------------------------------"
471 			"-------------------------------------\n\n");
472 }
473 
474 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
475 {
476 	va_list args;
477 	char buf[100];
478 
479 	va_start(args, fmt);
480 	vsnprintf(buf, sizeof(buf), fmt, args);
481 	va_end(args);
482 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
483 }
484 
485 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
486 {
487 	unsigned int off;	/* Offset of last byte */
488 	u8 *addr = page_address(page);
489 
490 	print_tracking(s, p);
491 
492 	print_page_info(page);
493 
494 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
495 			p, p - addr, get_freepointer(s, p));
496 
497 	if (p > addr + 16)
498 		print_section("Bytes b4", p - 16, 16);
499 
500 	print_section("Object", p, min(s->objsize, 128));
501 
502 	if (s->flags & SLAB_RED_ZONE)
503 		print_section("Redzone", p + s->objsize,
504 			s->inuse - s->objsize);
505 
506 	if (s->offset)
507 		off = s->offset + sizeof(void *);
508 	else
509 		off = s->inuse;
510 
511 	if (s->flags & SLAB_STORE_USER)
512 		off += 2 * sizeof(struct track);
513 
514 	if (off != s->size)
515 		/* Beginning of the filler is the free pointer */
516 		print_section("Padding", p + off, s->size - off);
517 
518 	dump_stack();
519 }
520 
521 static void object_err(struct kmem_cache *s, struct page *page,
522 			u8 *object, char *reason)
523 {
524 	slab_bug(s, reason);
525 	print_trailer(s, page, object);
526 }
527 
528 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
529 {
530 	va_list args;
531 	char buf[100];
532 
533 	va_start(args, fmt);
534 	vsnprintf(buf, sizeof(buf), fmt, args);
535 	va_end(args);
536 	slab_bug(s, fmt);
537 	print_page_info(page);
538 	dump_stack();
539 }
540 
541 static void init_object(struct kmem_cache *s, void *object, int active)
542 {
543 	u8 *p = object;
544 
545 	if (s->flags & __OBJECT_POISON) {
546 		memset(p, POISON_FREE, s->objsize - 1);
547 		p[s->objsize - 1] = POISON_END;
548 	}
549 
550 	if (s->flags & SLAB_RED_ZONE)
551 		memset(p + s->objsize,
552 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
553 			s->inuse - s->objsize);
554 }
555 
556 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
557 {
558 	while (bytes) {
559 		if (*start != (u8)value)
560 			return start;
561 		start++;
562 		bytes--;
563 	}
564 	return NULL;
565 }
566 
567 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
568 						void *from, void *to)
569 {
570 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
571 	memset(from, data, to - from);
572 }
573 
574 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
575 			u8 *object, char *what,
576 			u8 *start, unsigned int value, unsigned int bytes)
577 {
578 	u8 *fault;
579 	u8 *end;
580 
581 	fault = check_bytes(start, value, bytes);
582 	if (!fault)
583 		return 1;
584 
585 	end = start + bytes;
586 	while (end > fault && end[-1] == value)
587 		end--;
588 
589 	slab_bug(s, "%s overwritten", what);
590 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
591 					fault, end - 1, fault[0], value);
592 	print_trailer(s, page, object);
593 
594 	restore_bytes(s, what, value, fault, end);
595 	return 0;
596 }
597 
598 /*
599  * Object layout:
600  *
601  * object address
602  * 	Bytes of the object to be managed.
603  * 	If the freepointer may overlay the object then the free
604  * 	pointer is the first word of the object.
605  *
606  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
607  * 	0xa5 (POISON_END)
608  *
609  * object + s->objsize
610  * 	Padding to reach word boundary. This is also used for Redzoning.
611  * 	Padding is extended by another word if Redzoning is enabled and
612  * 	objsize == inuse.
613  *
614  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
615  * 	0xcc (RED_ACTIVE) for objects in use.
616  *
617  * object + s->inuse
618  * 	Meta data starts here.
619  *
620  * 	A. Free pointer (if we cannot overwrite object on free)
621  * 	B. Tracking data for SLAB_STORE_USER
622  * 	C. Padding to reach required alignment boundary or at mininum
623  * 		one word if debugging is on to be able to detect writes
624  * 		before the word boundary.
625  *
626  *	Padding is done using 0x5a (POISON_INUSE)
627  *
628  * object + s->size
629  * 	Nothing is used beyond s->size.
630  *
631  * If slabcaches are merged then the objsize and inuse boundaries are mostly
632  * ignored. And therefore no slab options that rely on these boundaries
633  * may be used with merged slabcaches.
634  */
635 
636 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
637 {
638 	unsigned long off = s->inuse;	/* The end of info */
639 
640 	if (s->offset)
641 		/* Freepointer is placed after the object. */
642 		off += sizeof(void *);
643 
644 	if (s->flags & SLAB_STORE_USER)
645 		/* We also have user information there */
646 		off += 2 * sizeof(struct track);
647 
648 	if (s->size == off)
649 		return 1;
650 
651 	return check_bytes_and_report(s, page, p, "Object padding",
652 				p + off, POISON_INUSE, s->size - off);
653 }
654 
655 static int slab_pad_check(struct kmem_cache *s, struct page *page)
656 {
657 	u8 *start;
658 	u8 *fault;
659 	u8 *end;
660 	int length;
661 	int remainder;
662 
663 	if (!(s->flags & SLAB_POISON))
664 		return 1;
665 
666 	start = page_address(page);
667 	end = start + (PAGE_SIZE << s->order);
668 	length = s->objects * s->size;
669 	remainder = end - (start + length);
670 	if (!remainder)
671 		return 1;
672 
673 	fault = check_bytes(start + length, POISON_INUSE, remainder);
674 	if (!fault)
675 		return 1;
676 	while (end > fault && end[-1] == POISON_INUSE)
677 		end--;
678 
679 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
680 	print_section("Padding", start, length);
681 
682 	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
683 	return 0;
684 }
685 
686 static int check_object(struct kmem_cache *s, struct page *page,
687 					void *object, int active)
688 {
689 	u8 *p = object;
690 	u8 *endobject = object + s->objsize;
691 
692 	if (s->flags & SLAB_RED_ZONE) {
693 		unsigned int red =
694 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
695 
696 		if (!check_bytes_and_report(s, page, object, "Redzone",
697 			endobject, red, s->inuse - s->objsize))
698 			return 0;
699 	} else {
700 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
701 			check_bytes_and_report(s, page, p, "Alignment padding",
702 				endobject, POISON_INUSE, s->inuse - s->objsize);
703 		}
704 	}
705 
706 	if (s->flags & SLAB_POISON) {
707 		if (!active && (s->flags & __OBJECT_POISON) &&
708 			(!check_bytes_and_report(s, page, p, "Poison", p,
709 					POISON_FREE, s->objsize - 1) ||
710 			 !check_bytes_and_report(s, page, p, "Poison",
711 				p + s->objsize - 1, POISON_END, 1)))
712 			return 0;
713 		/*
714 		 * check_pad_bytes cleans up on its own.
715 		 */
716 		check_pad_bytes(s, page, p);
717 	}
718 
719 	if (!s->offset && active)
720 		/*
721 		 * Object and freepointer overlap. Cannot check
722 		 * freepointer while object is allocated.
723 		 */
724 		return 1;
725 
726 	/* Check free pointer validity */
727 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
728 		object_err(s, page, p, "Freepointer corrupt");
729 		/*
730 		 * No choice but to zap it and thus loose the remainder
731 		 * of the free objects in this slab. May cause
732 		 * another error because the object count is now wrong.
733 		 */
734 		set_freepointer(s, p, NULL);
735 		return 0;
736 	}
737 	return 1;
738 }
739 
740 static int check_slab(struct kmem_cache *s, struct page *page)
741 {
742 	VM_BUG_ON(!irqs_disabled());
743 
744 	if (!PageSlab(page)) {
745 		slab_err(s, page, "Not a valid slab page");
746 		return 0;
747 	}
748 	if (page->inuse > s->objects) {
749 		slab_err(s, page, "inuse %u > max %u",
750 			s->name, page->inuse, s->objects);
751 		return 0;
752 	}
753 	/* Slab_pad_check fixes things up after itself */
754 	slab_pad_check(s, page);
755 	return 1;
756 }
757 
758 /*
759  * Determine if a certain object on a page is on the freelist. Must hold the
760  * slab lock to guarantee that the chains are in a consistent state.
761  */
762 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
763 {
764 	int nr = 0;
765 	void *fp = page->freelist;
766 	void *object = NULL;
767 
768 	while (fp && nr <= s->objects) {
769 		if (fp == search)
770 			return 1;
771 		if (!check_valid_pointer(s, page, fp)) {
772 			if (object) {
773 				object_err(s, page, object,
774 					"Freechain corrupt");
775 				set_freepointer(s, object, NULL);
776 				break;
777 			} else {
778 				slab_err(s, page, "Freepointer corrupt");
779 				page->freelist = NULL;
780 				page->inuse = s->objects;
781 				slab_fix(s, "Freelist cleared");
782 				return 0;
783 			}
784 			break;
785 		}
786 		object = fp;
787 		fp = get_freepointer(s, object);
788 		nr++;
789 	}
790 
791 	if (page->inuse != s->objects - nr) {
792 		slab_err(s, page, "Wrong object count. Counter is %d but "
793 			"counted were %d", page->inuse, s->objects - nr);
794 		page->inuse = s->objects - nr;
795 		slab_fix(s, "Object count adjusted.");
796 	}
797 	return search == NULL;
798 }
799 
800 static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
801 {
802 	if (s->flags & SLAB_TRACE) {
803 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
804 			s->name,
805 			alloc ? "alloc" : "free",
806 			object, page->inuse,
807 			page->freelist);
808 
809 		if (!alloc)
810 			print_section("Object", (void *)object, s->objsize);
811 
812 		dump_stack();
813 	}
814 }
815 
816 /*
817  * Tracking of fully allocated slabs for debugging purposes.
818  */
819 static void add_full(struct kmem_cache_node *n, struct page *page)
820 {
821 	spin_lock(&n->list_lock);
822 	list_add(&page->lru, &n->full);
823 	spin_unlock(&n->list_lock);
824 }
825 
826 static void remove_full(struct kmem_cache *s, struct page *page)
827 {
828 	struct kmem_cache_node *n;
829 
830 	if (!(s->flags & SLAB_STORE_USER))
831 		return;
832 
833 	n = get_node(s, page_to_nid(page));
834 
835 	spin_lock(&n->list_lock);
836 	list_del(&page->lru);
837 	spin_unlock(&n->list_lock);
838 }
839 
840 static void setup_object_debug(struct kmem_cache *s, struct page *page,
841 								void *object)
842 {
843 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
844 		return;
845 
846 	init_object(s, object, 0);
847 	init_tracking(s, object);
848 }
849 
850 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
851 						void *object, void *addr)
852 {
853 	if (!check_slab(s, page))
854 		goto bad;
855 
856 	if (!on_freelist(s, page, object)) {
857 		object_err(s, page, object, "Object already allocated");
858 		goto bad;
859 	}
860 
861 	if (!check_valid_pointer(s, page, object)) {
862 		object_err(s, page, object, "Freelist Pointer check fails");
863 		goto bad;
864 	}
865 
866 	if (!check_object(s, page, object, 0))
867 		goto bad;
868 
869 	/* Success perform special debug activities for allocs */
870 	if (s->flags & SLAB_STORE_USER)
871 		set_track(s, object, TRACK_ALLOC, addr);
872 	trace(s, page, object, 1);
873 	init_object(s, object, 1);
874 	return 1;
875 
876 bad:
877 	if (PageSlab(page)) {
878 		/*
879 		 * If this is a slab page then lets do the best we can
880 		 * to avoid issues in the future. Marking all objects
881 		 * as used avoids touching the remaining objects.
882 		 */
883 		slab_fix(s, "Marking all objects used");
884 		page->inuse = s->objects;
885 		page->freelist = NULL;
886 	}
887 	return 0;
888 }
889 
890 static int free_debug_processing(struct kmem_cache *s, struct page *page,
891 						void *object, void *addr)
892 {
893 	if (!check_slab(s, page))
894 		goto fail;
895 
896 	if (!check_valid_pointer(s, page, object)) {
897 		slab_err(s, page, "Invalid object pointer 0x%p", object);
898 		goto fail;
899 	}
900 
901 	if (on_freelist(s, page, object)) {
902 		object_err(s, page, object, "Object already free");
903 		goto fail;
904 	}
905 
906 	if (!check_object(s, page, object, 1))
907 		return 0;
908 
909 	if (unlikely(s != page->slab)) {
910 		if (!PageSlab(page)) {
911 			slab_err(s, page, "Attempt to free object(0x%p) "
912 				"outside of slab", object);
913 		} else if (!page->slab) {
914 			printk(KERN_ERR
915 				"SLUB <none>: no slab for object 0x%p.\n",
916 						object);
917 			dump_stack();
918 		} else
919 			object_err(s, page, object,
920 					"page slab pointer corrupt.");
921 		goto fail;
922 	}
923 
924 	/* Special debug activities for freeing objects */
925 	if (!SlabFrozen(page) && !page->freelist)
926 		remove_full(s, page);
927 	if (s->flags & SLAB_STORE_USER)
928 		set_track(s, object, TRACK_FREE, addr);
929 	trace(s, page, object, 0);
930 	init_object(s, object, 0);
931 	return 1;
932 
933 fail:
934 	slab_fix(s, "Object at 0x%p not freed", object);
935 	return 0;
936 }
937 
938 static int __init setup_slub_debug(char *str)
939 {
940 	slub_debug = DEBUG_DEFAULT_FLAGS;
941 	if (*str++ != '=' || !*str)
942 		/*
943 		 * No options specified. Switch on full debugging.
944 		 */
945 		goto out;
946 
947 	if (*str == ',')
948 		/*
949 		 * No options but restriction on slabs. This means full
950 		 * debugging for slabs matching a pattern.
951 		 */
952 		goto check_slabs;
953 
954 	slub_debug = 0;
955 	if (*str == '-')
956 		/*
957 		 * Switch off all debugging measures.
958 		 */
959 		goto out;
960 
961 	/*
962 	 * Determine which debug features should be switched on
963 	 */
964 	for (; *str && *str != ','; str++) {
965 		switch (tolower(*str)) {
966 		case 'f':
967 			slub_debug |= SLAB_DEBUG_FREE;
968 			break;
969 		case 'z':
970 			slub_debug |= SLAB_RED_ZONE;
971 			break;
972 		case 'p':
973 			slub_debug |= SLAB_POISON;
974 			break;
975 		case 'u':
976 			slub_debug |= SLAB_STORE_USER;
977 			break;
978 		case 't':
979 			slub_debug |= SLAB_TRACE;
980 			break;
981 		default:
982 			printk(KERN_ERR "slub_debug option '%c' "
983 				"unknown. skipped\n", *str);
984 		}
985 	}
986 
987 check_slabs:
988 	if (*str == ',')
989 		slub_debug_slabs = str + 1;
990 out:
991 	return 1;
992 }
993 
994 __setup("slub_debug", setup_slub_debug);
995 
996 static unsigned long kmem_cache_flags(unsigned long objsize,
997 	unsigned long flags, const char *name,
998 	void (*ctor)(struct kmem_cache *, void *))
999 {
1000 	/*
1001 	 * Enable debugging if selected on the kernel commandline.
1002 	 */
1003 	if (slub_debug && (!slub_debug_slabs ||
1004 	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1005 			flags |= slub_debug;
1006 
1007 	return flags;
1008 }
1009 #else
1010 static inline void setup_object_debug(struct kmem_cache *s,
1011 			struct page *page, void *object) {}
1012 
1013 static inline int alloc_debug_processing(struct kmem_cache *s,
1014 	struct page *page, void *object, void *addr) { return 0; }
1015 
1016 static inline int free_debug_processing(struct kmem_cache *s,
1017 	struct page *page, void *object, void *addr) { return 0; }
1018 
1019 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1020 			{ return 1; }
1021 static inline int check_object(struct kmem_cache *s, struct page *page,
1022 			void *object, int active) { return 1; }
1023 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1024 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1025 	unsigned long flags, const char *name,
1026 	void (*ctor)(struct kmem_cache *, void *))
1027 {
1028 	return flags;
1029 }
1030 #define slub_debug 0
1031 #endif
1032 /*
1033  * Slab allocation and freeing
1034  */
1035 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1036 {
1037 	struct page *page;
1038 	int pages = 1 << s->order;
1039 
1040 	flags |= s->allocflags;
1041 
1042 	if (node == -1)
1043 		page = alloc_pages(flags, s->order);
1044 	else
1045 		page = alloc_pages_node(node, flags, s->order);
1046 
1047 	if (!page)
1048 		return NULL;
1049 
1050 	mod_zone_page_state(page_zone(page),
1051 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1052 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1053 		pages);
1054 
1055 	return page;
1056 }
1057 
1058 static void setup_object(struct kmem_cache *s, struct page *page,
1059 				void *object)
1060 {
1061 	setup_object_debug(s, page, object);
1062 	if (unlikely(s->ctor))
1063 		s->ctor(s, object);
1064 }
1065 
1066 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1067 {
1068 	struct page *page;
1069 	struct kmem_cache_node *n;
1070 	void *start;
1071 	void *last;
1072 	void *p;
1073 
1074 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1075 
1076 	page = allocate_slab(s,
1077 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1078 	if (!page)
1079 		goto out;
1080 
1081 	n = get_node(s, page_to_nid(page));
1082 	if (n)
1083 		atomic_long_inc(&n->nr_slabs);
1084 	page->slab = s;
1085 	page->flags |= 1 << PG_slab;
1086 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1087 			SLAB_STORE_USER | SLAB_TRACE))
1088 		SetSlabDebug(page);
1089 
1090 	start = page_address(page);
1091 
1092 	if (unlikely(s->flags & SLAB_POISON))
1093 		memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1094 
1095 	last = start;
1096 	for_each_object(p, s, start) {
1097 		setup_object(s, page, last);
1098 		set_freepointer(s, last, p);
1099 		last = p;
1100 	}
1101 	setup_object(s, page, last);
1102 	set_freepointer(s, last, NULL);
1103 
1104 	page->freelist = start;
1105 	page->inuse = 0;
1106 out:
1107 	return page;
1108 }
1109 
1110 static void __free_slab(struct kmem_cache *s, struct page *page)
1111 {
1112 	int pages = 1 << s->order;
1113 
1114 	if (unlikely(SlabDebug(page))) {
1115 		void *p;
1116 
1117 		slab_pad_check(s, page);
1118 		for_each_object(p, s, page_address(page))
1119 			check_object(s, page, p, 0);
1120 		ClearSlabDebug(page);
1121 	}
1122 
1123 	mod_zone_page_state(page_zone(page),
1124 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1125 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1126 		-pages);
1127 
1128 	__free_pages(page, s->order);
1129 }
1130 
1131 static void rcu_free_slab(struct rcu_head *h)
1132 {
1133 	struct page *page;
1134 
1135 	page = container_of((struct list_head *)h, struct page, lru);
1136 	__free_slab(page->slab, page);
1137 }
1138 
1139 static void free_slab(struct kmem_cache *s, struct page *page)
1140 {
1141 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1142 		/*
1143 		 * RCU free overloads the RCU head over the LRU
1144 		 */
1145 		struct rcu_head *head = (void *)&page->lru;
1146 
1147 		call_rcu(head, rcu_free_slab);
1148 	} else
1149 		__free_slab(s, page);
1150 }
1151 
1152 static void discard_slab(struct kmem_cache *s, struct page *page)
1153 {
1154 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1155 
1156 	atomic_long_dec(&n->nr_slabs);
1157 	reset_page_mapcount(page);
1158 	__ClearPageSlab(page);
1159 	free_slab(s, page);
1160 }
1161 
1162 /*
1163  * Per slab locking using the pagelock
1164  */
1165 static __always_inline void slab_lock(struct page *page)
1166 {
1167 	bit_spin_lock(PG_locked, &page->flags);
1168 }
1169 
1170 static __always_inline void slab_unlock(struct page *page)
1171 {
1172 	__bit_spin_unlock(PG_locked, &page->flags);
1173 }
1174 
1175 static __always_inline int slab_trylock(struct page *page)
1176 {
1177 	int rc = 1;
1178 
1179 	rc = bit_spin_trylock(PG_locked, &page->flags);
1180 	return rc;
1181 }
1182 
1183 /*
1184  * Management of partially allocated slabs
1185  */
1186 static void add_partial(struct kmem_cache_node *n,
1187 				struct page *page, int tail)
1188 {
1189 	spin_lock(&n->list_lock);
1190 	n->nr_partial++;
1191 	if (tail)
1192 		list_add_tail(&page->lru, &n->partial);
1193 	else
1194 		list_add(&page->lru, &n->partial);
1195 	spin_unlock(&n->list_lock);
1196 }
1197 
1198 static void remove_partial(struct kmem_cache *s,
1199 						struct page *page)
1200 {
1201 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1202 
1203 	spin_lock(&n->list_lock);
1204 	list_del(&page->lru);
1205 	n->nr_partial--;
1206 	spin_unlock(&n->list_lock);
1207 }
1208 
1209 /*
1210  * Lock slab and remove from the partial list.
1211  *
1212  * Must hold list_lock.
1213  */
1214 static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1215 {
1216 	if (slab_trylock(page)) {
1217 		list_del(&page->lru);
1218 		n->nr_partial--;
1219 		SetSlabFrozen(page);
1220 		return 1;
1221 	}
1222 	return 0;
1223 }
1224 
1225 /*
1226  * Try to allocate a partial slab from a specific node.
1227  */
1228 static struct page *get_partial_node(struct kmem_cache_node *n)
1229 {
1230 	struct page *page;
1231 
1232 	/*
1233 	 * Racy check. If we mistakenly see no partial slabs then we
1234 	 * just allocate an empty slab. If we mistakenly try to get a
1235 	 * partial slab and there is none available then get_partials()
1236 	 * will return NULL.
1237 	 */
1238 	if (!n || !n->nr_partial)
1239 		return NULL;
1240 
1241 	spin_lock(&n->list_lock);
1242 	list_for_each_entry(page, &n->partial, lru)
1243 		if (lock_and_freeze_slab(n, page))
1244 			goto out;
1245 	page = NULL;
1246 out:
1247 	spin_unlock(&n->list_lock);
1248 	return page;
1249 }
1250 
1251 /*
1252  * Get a page from somewhere. Search in increasing NUMA distances.
1253  */
1254 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1255 {
1256 #ifdef CONFIG_NUMA
1257 	struct zonelist *zonelist;
1258 	struct zone **z;
1259 	struct page *page;
1260 
1261 	/*
1262 	 * The defrag ratio allows a configuration of the tradeoffs between
1263 	 * inter node defragmentation and node local allocations. A lower
1264 	 * defrag_ratio increases the tendency to do local allocations
1265 	 * instead of attempting to obtain partial slabs from other nodes.
1266 	 *
1267 	 * If the defrag_ratio is set to 0 then kmalloc() always
1268 	 * returns node local objects. If the ratio is higher then kmalloc()
1269 	 * may return off node objects because partial slabs are obtained
1270 	 * from other nodes and filled up.
1271 	 *
1272 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1273 	 * defrag_ratio = 1000) then every (well almost) allocation will
1274 	 * first attempt to defrag slab caches on other nodes. This means
1275 	 * scanning over all nodes to look for partial slabs which may be
1276 	 * expensive if we do it every time we are trying to find a slab
1277 	 * with available objects.
1278 	 */
1279 	if (!s->remote_node_defrag_ratio ||
1280 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1281 		return NULL;
1282 
1283 	zonelist = &NODE_DATA(
1284 		slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
1285 	for (z = zonelist->zones; *z; z++) {
1286 		struct kmem_cache_node *n;
1287 
1288 		n = get_node(s, zone_to_nid(*z));
1289 
1290 		if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1291 				n->nr_partial > MIN_PARTIAL) {
1292 			page = get_partial_node(n);
1293 			if (page)
1294 				return page;
1295 		}
1296 	}
1297 #endif
1298 	return NULL;
1299 }
1300 
1301 /*
1302  * Get a partial page, lock it and return it.
1303  */
1304 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1305 {
1306 	struct page *page;
1307 	int searchnode = (node == -1) ? numa_node_id() : node;
1308 
1309 	page = get_partial_node(get_node(s, searchnode));
1310 	if (page || (flags & __GFP_THISNODE))
1311 		return page;
1312 
1313 	return get_any_partial(s, flags);
1314 }
1315 
1316 /*
1317  * Move a page back to the lists.
1318  *
1319  * Must be called with the slab lock held.
1320  *
1321  * On exit the slab lock will have been dropped.
1322  */
1323 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1324 {
1325 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1326 	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1327 
1328 	ClearSlabFrozen(page);
1329 	if (page->inuse) {
1330 
1331 		if (page->freelist) {
1332 			add_partial(n, page, tail);
1333 			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1334 		} else {
1335 			stat(c, DEACTIVATE_FULL);
1336 			if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1337 				add_full(n, page);
1338 		}
1339 		slab_unlock(page);
1340 	} else {
1341 		stat(c, DEACTIVATE_EMPTY);
1342 		if (n->nr_partial < MIN_PARTIAL) {
1343 			/*
1344 			 * Adding an empty slab to the partial slabs in order
1345 			 * to avoid page allocator overhead. This slab needs
1346 			 * to come after the other slabs with objects in
1347 			 * so that the others get filled first. That way the
1348 			 * size of the partial list stays small.
1349 			 *
1350 			 * kmem_cache_shrink can reclaim any empty slabs from the
1351 			 * partial list.
1352 			 */
1353 			add_partial(n, page, 1);
1354 			slab_unlock(page);
1355 		} else {
1356 			slab_unlock(page);
1357 			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1358 			discard_slab(s, page);
1359 		}
1360 	}
1361 }
1362 
1363 /*
1364  * Remove the cpu slab
1365  */
1366 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1367 {
1368 	struct page *page = c->page;
1369 	int tail = 1;
1370 
1371 	if (page->freelist)
1372 		stat(c, DEACTIVATE_REMOTE_FREES);
1373 	/*
1374 	 * Merge cpu freelist into slab freelist. Typically we get here
1375 	 * because both freelists are empty. So this is unlikely
1376 	 * to occur.
1377 	 */
1378 	while (unlikely(c->freelist)) {
1379 		void **object;
1380 
1381 		tail = 0;	/* Hot objects. Put the slab first */
1382 
1383 		/* Retrieve object from cpu_freelist */
1384 		object = c->freelist;
1385 		c->freelist = c->freelist[c->offset];
1386 
1387 		/* And put onto the regular freelist */
1388 		object[c->offset] = page->freelist;
1389 		page->freelist = object;
1390 		page->inuse--;
1391 	}
1392 	c->page = NULL;
1393 	unfreeze_slab(s, page, tail);
1394 }
1395 
1396 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1397 {
1398 	stat(c, CPUSLAB_FLUSH);
1399 	slab_lock(c->page);
1400 	deactivate_slab(s, c);
1401 }
1402 
1403 /*
1404  * Flush cpu slab.
1405  *
1406  * Called from IPI handler with interrupts disabled.
1407  */
1408 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1409 {
1410 	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1411 
1412 	if (likely(c && c->page))
1413 		flush_slab(s, c);
1414 }
1415 
1416 static void flush_cpu_slab(void *d)
1417 {
1418 	struct kmem_cache *s = d;
1419 
1420 	__flush_cpu_slab(s, smp_processor_id());
1421 }
1422 
1423 static void flush_all(struct kmem_cache *s)
1424 {
1425 #ifdef CONFIG_SMP
1426 	on_each_cpu(flush_cpu_slab, s, 1, 1);
1427 #else
1428 	unsigned long flags;
1429 
1430 	local_irq_save(flags);
1431 	flush_cpu_slab(s);
1432 	local_irq_restore(flags);
1433 #endif
1434 }
1435 
1436 /*
1437  * Check if the objects in a per cpu structure fit numa
1438  * locality expectations.
1439  */
1440 static inline int node_match(struct kmem_cache_cpu *c, int node)
1441 {
1442 #ifdef CONFIG_NUMA
1443 	if (node != -1 && c->node != node)
1444 		return 0;
1445 #endif
1446 	return 1;
1447 }
1448 
1449 /*
1450  * Slow path. The lockless freelist is empty or we need to perform
1451  * debugging duties.
1452  *
1453  * Interrupts are disabled.
1454  *
1455  * Processing is still very fast if new objects have been freed to the
1456  * regular freelist. In that case we simply take over the regular freelist
1457  * as the lockless freelist and zap the regular freelist.
1458  *
1459  * If that is not working then we fall back to the partial lists. We take the
1460  * first element of the freelist as the object to allocate now and move the
1461  * rest of the freelist to the lockless freelist.
1462  *
1463  * And if we were unable to get a new slab from the partial slab lists then
1464  * we need to allocate a new slab. This is the slowest path since it involves
1465  * a call to the page allocator and the setup of a new slab.
1466  */
1467 static void *__slab_alloc(struct kmem_cache *s,
1468 		gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
1469 {
1470 	void **object;
1471 	struct page *new;
1472 
1473 	/* We handle __GFP_ZERO in the caller */
1474 	gfpflags &= ~__GFP_ZERO;
1475 
1476 	if (!c->page)
1477 		goto new_slab;
1478 
1479 	slab_lock(c->page);
1480 	if (unlikely(!node_match(c, node)))
1481 		goto another_slab;
1482 
1483 	stat(c, ALLOC_REFILL);
1484 
1485 load_freelist:
1486 	object = c->page->freelist;
1487 	if (unlikely(!object))
1488 		goto another_slab;
1489 	if (unlikely(SlabDebug(c->page)))
1490 		goto debug;
1491 
1492 	c->freelist = object[c->offset];
1493 	c->page->inuse = s->objects;
1494 	c->page->freelist = NULL;
1495 	c->node = page_to_nid(c->page);
1496 unlock_out:
1497 	slab_unlock(c->page);
1498 	stat(c, ALLOC_SLOWPATH);
1499 	return object;
1500 
1501 another_slab:
1502 	deactivate_slab(s, c);
1503 
1504 new_slab:
1505 	new = get_partial(s, gfpflags, node);
1506 	if (new) {
1507 		c->page = new;
1508 		stat(c, ALLOC_FROM_PARTIAL);
1509 		goto load_freelist;
1510 	}
1511 
1512 	if (gfpflags & __GFP_WAIT)
1513 		local_irq_enable();
1514 
1515 	new = new_slab(s, gfpflags, node);
1516 
1517 	if (gfpflags & __GFP_WAIT)
1518 		local_irq_disable();
1519 
1520 	if (new) {
1521 		c = get_cpu_slab(s, smp_processor_id());
1522 		stat(c, ALLOC_SLAB);
1523 		if (c->page)
1524 			flush_slab(s, c);
1525 		slab_lock(new);
1526 		SetSlabFrozen(new);
1527 		c->page = new;
1528 		goto load_freelist;
1529 	}
1530 
1531 	/*
1532 	 * No memory available.
1533 	 *
1534 	 * If the slab uses higher order allocs but the object is
1535 	 * smaller than a page size then we can fallback in emergencies
1536 	 * to the page allocator via kmalloc_large. The page allocator may
1537 	 * have failed to obtain a higher order page and we can try to
1538 	 * allocate a single page if the object fits into a single page.
1539 	 * That is only possible if certain conditions are met that are being
1540 	 * checked when a slab is created.
1541 	 */
1542 	if (!(gfpflags & __GFP_NORETRY) &&
1543 				(s->flags & __PAGE_ALLOC_FALLBACK)) {
1544 		if (gfpflags & __GFP_WAIT)
1545 			local_irq_enable();
1546 		object = kmalloc_large(s->objsize, gfpflags);
1547 		if (gfpflags & __GFP_WAIT)
1548 			local_irq_disable();
1549 		return object;
1550 	}
1551 	return NULL;
1552 debug:
1553 	if (!alloc_debug_processing(s, c->page, object, addr))
1554 		goto another_slab;
1555 
1556 	c->page->inuse++;
1557 	c->page->freelist = object[c->offset];
1558 	c->node = -1;
1559 	goto unlock_out;
1560 }
1561 
1562 /*
1563  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1564  * have the fastpath folded into their functions. So no function call
1565  * overhead for requests that can be satisfied on the fastpath.
1566  *
1567  * The fastpath works by first checking if the lockless freelist can be used.
1568  * If not then __slab_alloc is called for slow processing.
1569  *
1570  * Otherwise we can simply pick the next object from the lockless free list.
1571  */
1572 static __always_inline void *slab_alloc(struct kmem_cache *s,
1573 		gfp_t gfpflags, int node, void *addr)
1574 {
1575 	void **object;
1576 	struct kmem_cache_cpu *c;
1577 	unsigned long flags;
1578 
1579 	local_irq_save(flags);
1580 	c = get_cpu_slab(s, smp_processor_id());
1581 	if (unlikely(!c->freelist || !node_match(c, node)))
1582 
1583 		object = __slab_alloc(s, gfpflags, node, addr, c);
1584 
1585 	else {
1586 		object = c->freelist;
1587 		c->freelist = object[c->offset];
1588 		stat(c, ALLOC_FASTPATH);
1589 	}
1590 	local_irq_restore(flags);
1591 
1592 	if (unlikely((gfpflags & __GFP_ZERO) && object))
1593 		memset(object, 0, c->objsize);
1594 
1595 	return object;
1596 }
1597 
1598 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1599 {
1600 	return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1601 }
1602 EXPORT_SYMBOL(kmem_cache_alloc);
1603 
1604 #ifdef CONFIG_NUMA
1605 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1606 {
1607 	return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1608 }
1609 EXPORT_SYMBOL(kmem_cache_alloc_node);
1610 #endif
1611 
1612 /*
1613  * Slow patch handling. This may still be called frequently since objects
1614  * have a longer lifetime than the cpu slabs in most processing loads.
1615  *
1616  * So we still attempt to reduce cache line usage. Just take the slab
1617  * lock and free the item. If there is no additional partial page
1618  * handling required then we can return immediately.
1619  */
1620 static void __slab_free(struct kmem_cache *s, struct page *page,
1621 				void *x, void *addr, unsigned int offset)
1622 {
1623 	void *prior;
1624 	void **object = (void *)x;
1625 	struct kmem_cache_cpu *c;
1626 
1627 	c = get_cpu_slab(s, raw_smp_processor_id());
1628 	stat(c, FREE_SLOWPATH);
1629 	slab_lock(page);
1630 
1631 	if (unlikely(SlabDebug(page)))
1632 		goto debug;
1633 
1634 checks_ok:
1635 	prior = object[offset] = page->freelist;
1636 	page->freelist = object;
1637 	page->inuse--;
1638 
1639 	if (unlikely(SlabFrozen(page))) {
1640 		stat(c, FREE_FROZEN);
1641 		goto out_unlock;
1642 	}
1643 
1644 	if (unlikely(!page->inuse))
1645 		goto slab_empty;
1646 
1647 	/*
1648 	 * Objects left in the slab. If it was not on the partial list before
1649 	 * then add it.
1650 	 */
1651 	if (unlikely(!prior)) {
1652 		add_partial(get_node(s, page_to_nid(page)), page, 1);
1653 		stat(c, FREE_ADD_PARTIAL);
1654 	}
1655 
1656 out_unlock:
1657 	slab_unlock(page);
1658 	return;
1659 
1660 slab_empty:
1661 	if (prior) {
1662 		/*
1663 		 * Slab still on the partial list.
1664 		 */
1665 		remove_partial(s, page);
1666 		stat(c, FREE_REMOVE_PARTIAL);
1667 	}
1668 	slab_unlock(page);
1669 	stat(c, FREE_SLAB);
1670 	discard_slab(s, page);
1671 	return;
1672 
1673 debug:
1674 	if (!free_debug_processing(s, page, x, addr))
1675 		goto out_unlock;
1676 	goto checks_ok;
1677 }
1678 
1679 /*
1680  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1681  * can perform fastpath freeing without additional function calls.
1682  *
1683  * The fastpath is only possible if we are freeing to the current cpu slab
1684  * of this processor. This typically the case if we have just allocated
1685  * the item before.
1686  *
1687  * If fastpath is not possible then fall back to __slab_free where we deal
1688  * with all sorts of special processing.
1689  */
1690 static __always_inline void slab_free(struct kmem_cache *s,
1691 			struct page *page, void *x, void *addr)
1692 {
1693 	void **object = (void *)x;
1694 	struct kmem_cache_cpu *c;
1695 	unsigned long flags;
1696 
1697 	local_irq_save(flags);
1698 	c = get_cpu_slab(s, smp_processor_id());
1699 	debug_check_no_locks_freed(object, c->objsize);
1700 	if (likely(page == c->page && c->node >= 0)) {
1701 		object[c->offset] = c->freelist;
1702 		c->freelist = object;
1703 		stat(c, FREE_FASTPATH);
1704 	} else
1705 		__slab_free(s, page, x, addr, c->offset);
1706 
1707 	local_irq_restore(flags);
1708 }
1709 
1710 void kmem_cache_free(struct kmem_cache *s, void *x)
1711 {
1712 	struct page *page;
1713 
1714 	page = virt_to_head_page(x);
1715 
1716 	slab_free(s, page, x, __builtin_return_address(0));
1717 }
1718 EXPORT_SYMBOL(kmem_cache_free);
1719 
1720 /* Figure out on which slab object the object resides */
1721 static struct page *get_object_page(const void *x)
1722 {
1723 	struct page *page = virt_to_head_page(x);
1724 
1725 	if (!PageSlab(page))
1726 		return NULL;
1727 
1728 	return page;
1729 }
1730 
1731 /*
1732  * Object placement in a slab is made very easy because we always start at
1733  * offset 0. If we tune the size of the object to the alignment then we can
1734  * get the required alignment by putting one properly sized object after
1735  * another.
1736  *
1737  * Notice that the allocation order determines the sizes of the per cpu
1738  * caches. Each processor has always one slab available for allocations.
1739  * Increasing the allocation order reduces the number of times that slabs
1740  * must be moved on and off the partial lists and is therefore a factor in
1741  * locking overhead.
1742  */
1743 
1744 /*
1745  * Mininum / Maximum order of slab pages. This influences locking overhead
1746  * and slab fragmentation. A higher order reduces the number of partial slabs
1747  * and increases the number of allocations possible without having to
1748  * take the list_lock.
1749  */
1750 static int slub_min_order;
1751 static int slub_max_order = DEFAULT_MAX_ORDER;
1752 static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1753 
1754 /*
1755  * Merge control. If this is set then no merging of slab caches will occur.
1756  * (Could be removed. This was introduced to pacify the merge skeptics.)
1757  */
1758 static int slub_nomerge;
1759 
1760 /*
1761  * Calculate the order of allocation given an slab object size.
1762  *
1763  * The order of allocation has significant impact on performance and other
1764  * system components. Generally order 0 allocations should be preferred since
1765  * order 0 does not cause fragmentation in the page allocator. Larger objects
1766  * be problematic to put into order 0 slabs because there may be too much
1767  * unused space left. We go to a higher order if more than 1/8th of the slab
1768  * would be wasted.
1769  *
1770  * In order to reach satisfactory performance we must ensure that a minimum
1771  * number of objects is in one slab. Otherwise we may generate too much
1772  * activity on the partial lists which requires taking the list_lock. This is
1773  * less a concern for large slabs though which are rarely used.
1774  *
1775  * slub_max_order specifies the order where we begin to stop considering the
1776  * number of objects in a slab as critical. If we reach slub_max_order then
1777  * we try to keep the page order as low as possible. So we accept more waste
1778  * of space in favor of a small page order.
1779  *
1780  * Higher order allocations also allow the placement of more objects in a
1781  * slab and thereby reduce object handling overhead. If the user has
1782  * requested a higher mininum order then we start with that one instead of
1783  * the smallest order which will fit the object.
1784  */
1785 static inline int slab_order(int size, int min_objects,
1786 				int max_order, int fract_leftover)
1787 {
1788 	int order;
1789 	int rem;
1790 	int min_order = slub_min_order;
1791 
1792 	for (order = max(min_order,
1793 				fls(min_objects * size - 1) - PAGE_SHIFT);
1794 			order <= max_order; order++) {
1795 
1796 		unsigned long slab_size = PAGE_SIZE << order;
1797 
1798 		if (slab_size < min_objects * size)
1799 			continue;
1800 
1801 		rem = slab_size % size;
1802 
1803 		if (rem <= slab_size / fract_leftover)
1804 			break;
1805 
1806 	}
1807 
1808 	return order;
1809 }
1810 
1811 static inline int calculate_order(int size)
1812 {
1813 	int order;
1814 	int min_objects;
1815 	int fraction;
1816 
1817 	/*
1818 	 * Attempt to find best configuration for a slab. This
1819 	 * works by first attempting to generate a layout with
1820 	 * the best configuration and backing off gradually.
1821 	 *
1822 	 * First we reduce the acceptable waste in a slab. Then
1823 	 * we reduce the minimum objects required in a slab.
1824 	 */
1825 	min_objects = slub_min_objects;
1826 	while (min_objects > 1) {
1827 		fraction = 8;
1828 		while (fraction >= 4) {
1829 			order = slab_order(size, min_objects,
1830 						slub_max_order, fraction);
1831 			if (order <= slub_max_order)
1832 				return order;
1833 			fraction /= 2;
1834 		}
1835 		min_objects /= 2;
1836 	}
1837 
1838 	/*
1839 	 * We were unable to place multiple objects in a slab. Now
1840 	 * lets see if we can place a single object there.
1841 	 */
1842 	order = slab_order(size, 1, slub_max_order, 1);
1843 	if (order <= slub_max_order)
1844 		return order;
1845 
1846 	/*
1847 	 * Doh this slab cannot be placed using slub_max_order.
1848 	 */
1849 	order = slab_order(size, 1, MAX_ORDER, 1);
1850 	if (order <= MAX_ORDER)
1851 		return order;
1852 	return -ENOSYS;
1853 }
1854 
1855 /*
1856  * Figure out what the alignment of the objects will be.
1857  */
1858 static unsigned long calculate_alignment(unsigned long flags,
1859 		unsigned long align, unsigned long size)
1860 {
1861 	/*
1862 	 * If the user wants hardware cache aligned objects then follow that
1863 	 * suggestion if the object is sufficiently large.
1864 	 *
1865 	 * The hardware cache alignment cannot override the specified
1866 	 * alignment though. If that is greater then use it.
1867 	 */
1868 	if (flags & SLAB_HWCACHE_ALIGN) {
1869 		unsigned long ralign = cache_line_size();
1870 		while (size <= ralign / 2)
1871 			ralign /= 2;
1872 		align = max(align, ralign);
1873 	}
1874 
1875 	if (align < ARCH_SLAB_MINALIGN)
1876 		align = ARCH_SLAB_MINALIGN;
1877 
1878 	return ALIGN(align, sizeof(void *));
1879 }
1880 
1881 static void init_kmem_cache_cpu(struct kmem_cache *s,
1882 			struct kmem_cache_cpu *c)
1883 {
1884 	c->page = NULL;
1885 	c->freelist = NULL;
1886 	c->node = 0;
1887 	c->offset = s->offset / sizeof(void *);
1888 	c->objsize = s->objsize;
1889 }
1890 
1891 static void init_kmem_cache_node(struct kmem_cache_node *n)
1892 {
1893 	n->nr_partial = 0;
1894 	atomic_long_set(&n->nr_slabs, 0);
1895 	spin_lock_init(&n->list_lock);
1896 	INIT_LIST_HEAD(&n->partial);
1897 #ifdef CONFIG_SLUB_DEBUG
1898 	INIT_LIST_HEAD(&n->full);
1899 #endif
1900 }
1901 
1902 #ifdef CONFIG_SMP
1903 /*
1904  * Per cpu array for per cpu structures.
1905  *
1906  * The per cpu array places all kmem_cache_cpu structures from one processor
1907  * close together meaning that it becomes possible that multiple per cpu
1908  * structures are contained in one cacheline. This may be particularly
1909  * beneficial for the kmalloc caches.
1910  *
1911  * A desktop system typically has around 60-80 slabs. With 100 here we are
1912  * likely able to get per cpu structures for all caches from the array defined
1913  * here. We must be able to cover all kmalloc caches during bootstrap.
1914  *
1915  * If the per cpu array is exhausted then fall back to kmalloc
1916  * of individual cachelines. No sharing is possible then.
1917  */
1918 #define NR_KMEM_CACHE_CPU 100
1919 
1920 static DEFINE_PER_CPU(struct kmem_cache_cpu,
1921 				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1922 
1923 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1924 static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1925 
1926 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1927 							int cpu, gfp_t flags)
1928 {
1929 	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1930 
1931 	if (c)
1932 		per_cpu(kmem_cache_cpu_free, cpu) =
1933 				(void *)c->freelist;
1934 	else {
1935 		/* Table overflow: So allocate ourselves */
1936 		c = kmalloc_node(
1937 			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1938 			flags, cpu_to_node(cpu));
1939 		if (!c)
1940 			return NULL;
1941 	}
1942 
1943 	init_kmem_cache_cpu(s, c);
1944 	return c;
1945 }
1946 
1947 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1948 {
1949 	if (c < per_cpu(kmem_cache_cpu, cpu) ||
1950 			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1951 		kfree(c);
1952 		return;
1953 	}
1954 	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1955 	per_cpu(kmem_cache_cpu_free, cpu) = c;
1956 }
1957 
1958 static void free_kmem_cache_cpus(struct kmem_cache *s)
1959 {
1960 	int cpu;
1961 
1962 	for_each_online_cpu(cpu) {
1963 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1964 
1965 		if (c) {
1966 			s->cpu_slab[cpu] = NULL;
1967 			free_kmem_cache_cpu(c, cpu);
1968 		}
1969 	}
1970 }
1971 
1972 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
1973 {
1974 	int cpu;
1975 
1976 	for_each_online_cpu(cpu) {
1977 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1978 
1979 		if (c)
1980 			continue;
1981 
1982 		c = alloc_kmem_cache_cpu(s, cpu, flags);
1983 		if (!c) {
1984 			free_kmem_cache_cpus(s);
1985 			return 0;
1986 		}
1987 		s->cpu_slab[cpu] = c;
1988 	}
1989 	return 1;
1990 }
1991 
1992 /*
1993  * Initialize the per cpu array.
1994  */
1995 static void init_alloc_cpu_cpu(int cpu)
1996 {
1997 	int i;
1998 
1999 	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2000 		return;
2001 
2002 	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2003 		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2004 
2005 	cpu_set(cpu, kmem_cach_cpu_free_init_once);
2006 }
2007 
2008 static void __init init_alloc_cpu(void)
2009 {
2010 	int cpu;
2011 
2012 	for_each_online_cpu(cpu)
2013 		init_alloc_cpu_cpu(cpu);
2014   }
2015 
2016 #else
2017 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2018 static inline void init_alloc_cpu(void) {}
2019 
2020 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2021 {
2022 	init_kmem_cache_cpu(s, &s->cpu_slab);
2023 	return 1;
2024 }
2025 #endif
2026 
2027 #ifdef CONFIG_NUMA
2028 /*
2029  * No kmalloc_node yet so do it by hand. We know that this is the first
2030  * slab on the node for this slabcache. There are no concurrent accesses
2031  * possible.
2032  *
2033  * Note that this function only works on the kmalloc_node_cache
2034  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2035  * memory on a fresh node that has no slab structures yet.
2036  */
2037 static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2038 							   int node)
2039 {
2040 	struct page *page;
2041 	struct kmem_cache_node *n;
2042 	unsigned long flags;
2043 
2044 	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2045 
2046 	page = new_slab(kmalloc_caches, gfpflags, node);
2047 
2048 	BUG_ON(!page);
2049 	if (page_to_nid(page) != node) {
2050 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2051 				"node %d\n", node);
2052 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2053 				"in order to be able to continue\n");
2054 	}
2055 
2056 	n = page->freelist;
2057 	BUG_ON(!n);
2058 	page->freelist = get_freepointer(kmalloc_caches, n);
2059 	page->inuse++;
2060 	kmalloc_caches->node[node] = n;
2061 #ifdef CONFIG_SLUB_DEBUG
2062 	init_object(kmalloc_caches, n, 1);
2063 	init_tracking(kmalloc_caches, n);
2064 #endif
2065 	init_kmem_cache_node(n);
2066 	atomic_long_inc(&n->nr_slabs);
2067 
2068 	/*
2069 	 * lockdep requires consistent irq usage for each lock
2070 	 * so even though there cannot be a race this early in
2071 	 * the boot sequence, we still disable irqs.
2072 	 */
2073 	local_irq_save(flags);
2074 	add_partial(n, page, 0);
2075 	local_irq_restore(flags);
2076 	return n;
2077 }
2078 
2079 static void free_kmem_cache_nodes(struct kmem_cache *s)
2080 {
2081 	int node;
2082 
2083 	for_each_node_state(node, N_NORMAL_MEMORY) {
2084 		struct kmem_cache_node *n = s->node[node];
2085 		if (n && n != &s->local_node)
2086 			kmem_cache_free(kmalloc_caches, n);
2087 		s->node[node] = NULL;
2088 	}
2089 }
2090 
2091 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2092 {
2093 	int node;
2094 	int local_node;
2095 
2096 	if (slab_state >= UP)
2097 		local_node = page_to_nid(virt_to_page(s));
2098 	else
2099 		local_node = 0;
2100 
2101 	for_each_node_state(node, N_NORMAL_MEMORY) {
2102 		struct kmem_cache_node *n;
2103 
2104 		if (local_node == node)
2105 			n = &s->local_node;
2106 		else {
2107 			if (slab_state == DOWN) {
2108 				n = early_kmem_cache_node_alloc(gfpflags,
2109 								node);
2110 				continue;
2111 			}
2112 			n = kmem_cache_alloc_node(kmalloc_caches,
2113 							gfpflags, node);
2114 
2115 			if (!n) {
2116 				free_kmem_cache_nodes(s);
2117 				return 0;
2118 			}
2119 
2120 		}
2121 		s->node[node] = n;
2122 		init_kmem_cache_node(n);
2123 	}
2124 	return 1;
2125 }
2126 #else
2127 static void free_kmem_cache_nodes(struct kmem_cache *s)
2128 {
2129 }
2130 
2131 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2132 {
2133 	init_kmem_cache_node(&s->local_node);
2134 	return 1;
2135 }
2136 #endif
2137 
2138 /*
2139  * calculate_sizes() determines the order and the distribution of data within
2140  * a slab object.
2141  */
2142 static int calculate_sizes(struct kmem_cache *s)
2143 {
2144 	unsigned long flags = s->flags;
2145 	unsigned long size = s->objsize;
2146 	unsigned long align = s->align;
2147 
2148 	/*
2149 	 * Round up object size to the next word boundary. We can only
2150 	 * place the free pointer at word boundaries and this determines
2151 	 * the possible location of the free pointer.
2152 	 */
2153 	size = ALIGN(size, sizeof(void *));
2154 
2155 #ifdef CONFIG_SLUB_DEBUG
2156 	/*
2157 	 * Determine if we can poison the object itself. If the user of
2158 	 * the slab may touch the object after free or before allocation
2159 	 * then we should never poison the object itself.
2160 	 */
2161 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2162 			!s->ctor)
2163 		s->flags |= __OBJECT_POISON;
2164 	else
2165 		s->flags &= ~__OBJECT_POISON;
2166 
2167 
2168 	/*
2169 	 * If we are Redzoning then check if there is some space between the
2170 	 * end of the object and the free pointer. If not then add an
2171 	 * additional word to have some bytes to store Redzone information.
2172 	 */
2173 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2174 		size += sizeof(void *);
2175 #endif
2176 
2177 	/*
2178 	 * With that we have determined the number of bytes in actual use
2179 	 * by the object. This is the potential offset to the free pointer.
2180 	 */
2181 	s->inuse = size;
2182 
2183 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2184 		s->ctor)) {
2185 		/*
2186 		 * Relocate free pointer after the object if it is not
2187 		 * permitted to overwrite the first word of the object on
2188 		 * kmem_cache_free.
2189 		 *
2190 		 * This is the case if we do RCU, have a constructor or
2191 		 * destructor or are poisoning the objects.
2192 		 */
2193 		s->offset = size;
2194 		size += sizeof(void *);
2195 	}
2196 
2197 #ifdef CONFIG_SLUB_DEBUG
2198 	if (flags & SLAB_STORE_USER)
2199 		/*
2200 		 * Need to store information about allocs and frees after
2201 		 * the object.
2202 		 */
2203 		size += 2 * sizeof(struct track);
2204 
2205 	if (flags & SLAB_RED_ZONE)
2206 		/*
2207 		 * Add some empty padding so that we can catch
2208 		 * overwrites from earlier objects rather than let
2209 		 * tracking information or the free pointer be
2210 		 * corrupted if an user writes before the start
2211 		 * of the object.
2212 		 */
2213 		size += sizeof(void *);
2214 #endif
2215 
2216 	/*
2217 	 * Determine the alignment based on various parameters that the
2218 	 * user specified and the dynamic determination of cache line size
2219 	 * on bootup.
2220 	 */
2221 	align = calculate_alignment(flags, align, s->objsize);
2222 
2223 	/*
2224 	 * SLUB stores one object immediately after another beginning from
2225 	 * offset 0. In order to align the objects we have to simply size
2226 	 * each object to conform to the alignment.
2227 	 */
2228 	size = ALIGN(size, align);
2229 	s->size = size;
2230 
2231 	if ((flags & __KMALLOC_CACHE) &&
2232 			PAGE_SIZE / size < slub_min_objects) {
2233 		/*
2234 		 * Kmalloc cache that would not have enough objects in
2235 		 * an order 0 page. Kmalloc slabs can fallback to
2236 		 * page allocator order 0 allocs so take a reasonably large
2237 		 * order that will allows us a good number of objects.
2238 		 */
2239 		s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
2240 		s->flags |= __PAGE_ALLOC_FALLBACK;
2241 		s->allocflags |= __GFP_NOWARN;
2242 	} else
2243 		s->order = calculate_order(size);
2244 
2245 	if (s->order < 0)
2246 		return 0;
2247 
2248 	s->allocflags = 0;
2249 	if (s->order)
2250 		s->allocflags |= __GFP_COMP;
2251 
2252 	if (s->flags & SLAB_CACHE_DMA)
2253 		s->allocflags |= SLUB_DMA;
2254 
2255 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2256 		s->allocflags |= __GFP_RECLAIMABLE;
2257 
2258 	/*
2259 	 * Determine the number of objects per slab
2260 	 */
2261 	s->objects = (PAGE_SIZE << s->order) / size;
2262 
2263 	return !!s->objects;
2264 
2265 }
2266 
2267 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2268 		const char *name, size_t size,
2269 		size_t align, unsigned long flags,
2270 		void (*ctor)(struct kmem_cache *, void *))
2271 {
2272 	memset(s, 0, kmem_size);
2273 	s->name = name;
2274 	s->ctor = ctor;
2275 	s->objsize = size;
2276 	s->align = align;
2277 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2278 
2279 	if (!calculate_sizes(s))
2280 		goto error;
2281 
2282 	s->refcount = 1;
2283 #ifdef CONFIG_NUMA
2284 	s->remote_node_defrag_ratio = 100;
2285 #endif
2286 	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2287 		goto error;
2288 
2289 	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2290 		return 1;
2291 	free_kmem_cache_nodes(s);
2292 error:
2293 	if (flags & SLAB_PANIC)
2294 		panic("Cannot create slab %s size=%lu realsize=%u "
2295 			"order=%u offset=%u flags=%lx\n",
2296 			s->name, (unsigned long)size, s->size, s->order,
2297 			s->offset, flags);
2298 	return 0;
2299 }
2300 
2301 /*
2302  * Check if a given pointer is valid
2303  */
2304 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2305 {
2306 	struct page *page;
2307 
2308 	page = get_object_page(object);
2309 
2310 	if (!page || s != page->slab)
2311 		/* No slab or wrong slab */
2312 		return 0;
2313 
2314 	if (!check_valid_pointer(s, page, object))
2315 		return 0;
2316 
2317 	/*
2318 	 * We could also check if the object is on the slabs freelist.
2319 	 * But this would be too expensive and it seems that the main
2320 	 * purpose of kmem_ptr_valid() is to check if the object belongs
2321 	 * to a certain slab.
2322 	 */
2323 	return 1;
2324 }
2325 EXPORT_SYMBOL(kmem_ptr_validate);
2326 
2327 /*
2328  * Determine the size of a slab object
2329  */
2330 unsigned int kmem_cache_size(struct kmem_cache *s)
2331 {
2332 	return s->objsize;
2333 }
2334 EXPORT_SYMBOL(kmem_cache_size);
2335 
2336 const char *kmem_cache_name(struct kmem_cache *s)
2337 {
2338 	return s->name;
2339 }
2340 EXPORT_SYMBOL(kmem_cache_name);
2341 
2342 /*
2343  * Attempt to free all slabs on a node. Return the number of slabs we
2344  * were unable to free.
2345  */
2346 static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2347 			struct list_head *list)
2348 {
2349 	int slabs_inuse = 0;
2350 	unsigned long flags;
2351 	struct page *page, *h;
2352 
2353 	spin_lock_irqsave(&n->list_lock, flags);
2354 	list_for_each_entry_safe(page, h, list, lru)
2355 		if (!page->inuse) {
2356 			list_del(&page->lru);
2357 			discard_slab(s, page);
2358 		} else
2359 			slabs_inuse++;
2360 	spin_unlock_irqrestore(&n->list_lock, flags);
2361 	return slabs_inuse;
2362 }
2363 
2364 /*
2365  * Release all resources used by a slab cache.
2366  */
2367 static inline int kmem_cache_close(struct kmem_cache *s)
2368 {
2369 	int node;
2370 
2371 	flush_all(s);
2372 
2373 	/* Attempt to free all objects */
2374 	free_kmem_cache_cpus(s);
2375 	for_each_node_state(node, N_NORMAL_MEMORY) {
2376 		struct kmem_cache_node *n = get_node(s, node);
2377 
2378 		n->nr_partial -= free_list(s, n, &n->partial);
2379 		if (atomic_long_read(&n->nr_slabs))
2380 			return 1;
2381 	}
2382 	free_kmem_cache_nodes(s);
2383 	return 0;
2384 }
2385 
2386 /*
2387  * Close a cache and release the kmem_cache structure
2388  * (must be used for caches created using kmem_cache_create)
2389  */
2390 void kmem_cache_destroy(struct kmem_cache *s)
2391 {
2392 	down_write(&slub_lock);
2393 	s->refcount--;
2394 	if (!s->refcount) {
2395 		list_del(&s->list);
2396 		up_write(&slub_lock);
2397 		if (kmem_cache_close(s))
2398 			WARN_ON(1);
2399 		sysfs_slab_remove(s);
2400 	} else
2401 		up_write(&slub_lock);
2402 }
2403 EXPORT_SYMBOL(kmem_cache_destroy);
2404 
2405 /********************************************************************
2406  *		Kmalloc subsystem
2407  *******************************************************************/
2408 
2409 struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2410 EXPORT_SYMBOL(kmalloc_caches);
2411 
2412 #ifdef CONFIG_ZONE_DMA
2413 static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2414 #endif
2415 
2416 static int __init setup_slub_min_order(char *str)
2417 {
2418 	get_option(&str, &slub_min_order);
2419 
2420 	return 1;
2421 }
2422 
2423 __setup("slub_min_order=", setup_slub_min_order);
2424 
2425 static int __init setup_slub_max_order(char *str)
2426 {
2427 	get_option(&str, &slub_max_order);
2428 
2429 	return 1;
2430 }
2431 
2432 __setup("slub_max_order=", setup_slub_max_order);
2433 
2434 static int __init setup_slub_min_objects(char *str)
2435 {
2436 	get_option(&str, &slub_min_objects);
2437 
2438 	return 1;
2439 }
2440 
2441 __setup("slub_min_objects=", setup_slub_min_objects);
2442 
2443 static int __init setup_slub_nomerge(char *str)
2444 {
2445 	slub_nomerge = 1;
2446 	return 1;
2447 }
2448 
2449 __setup("slub_nomerge", setup_slub_nomerge);
2450 
2451 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2452 		const char *name, int size, gfp_t gfp_flags)
2453 {
2454 	unsigned int flags = 0;
2455 
2456 	if (gfp_flags & SLUB_DMA)
2457 		flags = SLAB_CACHE_DMA;
2458 
2459 	down_write(&slub_lock);
2460 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2461 			flags | __KMALLOC_CACHE, NULL))
2462 		goto panic;
2463 
2464 	list_add(&s->list, &slab_caches);
2465 	up_write(&slub_lock);
2466 	if (sysfs_slab_add(s))
2467 		goto panic;
2468 	return s;
2469 
2470 panic:
2471 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2472 }
2473 
2474 #ifdef CONFIG_ZONE_DMA
2475 
2476 static void sysfs_add_func(struct work_struct *w)
2477 {
2478 	struct kmem_cache *s;
2479 
2480 	down_write(&slub_lock);
2481 	list_for_each_entry(s, &slab_caches, list) {
2482 		if (s->flags & __SYSFS_ADD_DEFERRED) {
2483 			s->flags &= ~__SYSFS_ADD_DEFERRED;
2484 			sysfs_slab_add(s);
2485 		}
2486 	}
2487 	up_write(&slub_lock);
2488 }
2489 
2490 static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2491 
2492 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2493 {
2494 	struct kmem_cache *s;
2495 	char *text;
2496 	size_t realsize;
2497 
2498 	s = kmalloc_caches_dma[index];
2499 	if (s)
2500 		return s;
2501 
2502 	/* Dynamically create dma cache */
2503 	if (flags & __GFP_WAIT)
2504 		down_write(&slub_lock);
2505 	else {
2506 		if (!down_write_trylock(&slub_lock))
2507 			goto out;
2508 	}
2509 
2510 	if (kmalloc_caches_dma[index])
2511 		goto unlock_out;
2512 
2513 	realsize = kmalloc_caches[index].objsize;
2514 	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2515 			 (unsigned int)realsize);
2516 	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2517 
2518 	if (!s || !text || !kmem_cache_open(s, flags, text,
2519 			realsize, ARCH_KMALLOC_MINALIGN,
2520 			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2521 		kfree(s);
2522 		kfree(text);
2523 		goto unlock_out;
2524 	}
2525 
2526 	list_add(&s->list, &slab_caches);
2527 	kmalloc_caches_dma[index] = s;
2528 
2529 	schedule_work(&sysfs_add_work);
2530 
2531 unlock_out:
2532 	up_write(&slub_lock);
2533 out:
2534 	return kmalloc_caches_dma[index];
2535 }
2536 #endif
2537 
2538 /*
2539  * Conversion table for small slabs sizes / 8 to the index in the
2540  * kmalloc array. This is necessary for slabs < 192 since we have non power
2541  * of two cache sizes there. The size of larger slabs can be determined using
2542  * fls.
2543  */
2544 static s8 size_index[24] = {
2545 	3,	/* 8 */
2546 	4,	/* 16 */
2547 	5,	/* 24 */
2548 	5,	/* 32 */
2549 	6,	/* 40 */
2550 	6,	/* 48 */
2551 	6,	/* 56 */
2552 	6,	/* 64 */
2553 	1,	/* 72 */
2554 	1,	/* 80 */
2555 	1,	/* 88 */
2556 	1,	/* 96 */
2557 	7,	/* 104 */
2558 	7,	/* 112 */
2559 	7,	/* 120 */
2560 	7,	/* 128 */
2561 	2,	/* 136 */
2562 	2,	/* 144 */
2563 	2,	/* 152 */
2564 	2,	/* 160 */
2565 	2,	/* 168 */
2566 	2,	/* 176 */
2567 	2,	/* 184 */
2568 	2	/* 192 */
2569 };
2570 
2571 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2572 {
2573 	int index;
2574 
2575 	if (size <= 192) {
2576 		if (!size)
2577 			return ZERO_SIZE_PTR;
2578 
2579 		index = size_index[(size - 1) / 8];
2580 	} else
2581 		index = fls(size - 1);
2582 
2583 #ifdef CONFIG_ZONE_DMA
2584 	if (unlikely((flags & SLUB_DMA)))
2585 		return dma_kmalloc_cache(index, flags);
2586 
2587 #endif
2588 	return &kmalloc_caches[index];
2589 }
2590 
2591 void *__kmalloc(size_t size, gfp_t flags)
2592 {
2593 	struct kmem_cache *s;
2594 
2595 	if (unlikely(size > PAGE_SIZE))
2596 		return kmalloc_large(size, flags);
2597 
2598 	s = get_slab(size, flags);
2599 
2600 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2601 		return s;
2602 
2603 	return slab_alloc(s, flags, -1, __builtin_return_address(0));
2604 }
2605 EXPORT_SYMBOL(__kmalloc);
2606 
2607 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2608 {
2609 	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2610 						get_order(size));
2611 
2612 	if (page)
2613 		return page_address(page);
2614 	else
2615 		return NULL;
2616 }
2617 
2618 #ifdef CONFIG_NUMA
2619 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2620 {
2621 	struct kmem_cache *s;
2622 
2623 	if (unlikely(size > PAGE_SIZE))
2624 		return kmalloc_large_node(size, flags, node);
2625 
2626 	s = get_slab(size, flags);
2627 
2628 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2629 		return s;
2630 
2631 	return slab_alloc(s, flags, node, __builtin_return_address(0));
2632 }
2633 EXPORT_SYMBOL(__kmalloc_node);
2634 #endif
2635 
2636 size_t ksize(const void *object)
2637 {
2638 	struct page *page;
2639 	struct kmem_cache *s;
2640 
2641 	if (unlikely(object == ZERO_SIZE_PTR))
2642 		return 0;
2643 
2644 	page = virt_to_head_page(object);
2645 
2646 	if (unlikely(!PageSlab(page)))
2647 		return PAGE_SIZE << compound_order(page);
2648 
2649 	s = page->slab;
2650 
2651 #ifdef CONFIG_SLUB_DEBUG
2652 	/*
2653 	 * Debugging requires use of the padding between object
2654 	 * and whatever may come after it.
2655 	 */
2656 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2657 		return s->objsize;
2658 
2659 #endif
2660 	/*
2661 	 * If we have the need to store the freelist pointer
2662 	 * back there or track user information then we can
2663 	 * only use the space before that information.
2664 	 */
2665 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2666 		return s->inuse;
2667 	/*
2668 	 * Else we can use all the padding etc for the allocation
2669 	 */
2670 	return s->size;
2671 }
2672 EXPORT_SYMBOL(ksize);
2673 
2674 void kfree(const void *x)
2675 {
2676 	struct page *page;
2677 	void *object = (void *)x;
2678 
2679 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2680 		return;
2681 
2682 	page = virt_to_head_page(x);
2683 	if (unlikely(!PageSlab(page))) {
2684 		put_page(page);
2685 		return;
2686 	}
2687 	slab_free(page->slab, page, object, __builtin_return_address(0));
2688 }
2689 EXPORT_SYMBOL(kfree);
2690 
2691 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLABINFO)
2692 static unsigned long count_partial(struct kmem_cache_node *n)
2693 {
2694 	unsigned long flags;
2695 	unsigned long x = 0;
2696 	struct page *page;
2697 
2698 	spin_lock_irqsave(&n->list_lock, flags);
2699 	list_for_each_entry(page, &n->partial, lru)
2700 		x += page->inuse;
2701 	spin_unlock_irqrestore(&n->list_lock, flags);
2702 	return x;
2703 }
2704 #endif
2705 
2706 /*
2707  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2708  * the remaining slabs by the number of items in use. The slabs with the
2709  * most items in use come first. New allocations will then fill those up
2710  * and thus they can be removed from the partial lists.
2711  *
2712  * The slabs with the least items are placed last. This results in them
2713  * being allocated from last increasing the chance that the last objects
2714  * are freed in them.
2715  */
2716 int kmem_cache_shrink(struct kmem_cache *s)
2717 {
2718 	int node;
2719 	int i;
2720 	struct kmem_cache_node *n;
2721 	struct page *page;
2722 	struct page *t;
2723 	struct list_head *slabs_by_inuse =
2724 		kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2725 	unsigned long flags;
2726 
2727 	if (!slabs_by_inuse)
2728 		return -ENOMEM;
2729 
2730 	flush_all(s);
2731 	for_each_node_state(node, N_NORMAL_MEMORY) {
2732 		n = get_node(s, node);
2733 
2734 		if (!n->nr_partial)
2735 			continue;
2736 
2737 		for (i = 0; i < s->objects; i++)
2738 			INIT_LIST_HEAD(slabs_by_inuse + i);
2739 
2740 		spin_lock_irqsave(&n->list_lock, flags);
2741 
2742 		/*
2743 		 * Build lists indexed by the items in use in each slab.
2744 		 *
2745 		 * Note that concurrent frees may occur while we hold the
2746 		 * list_lock. page->inuse here is the upper limit.
2747 		 */
2748 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2749 			if (!page->inuse && slab_trylock(page)) {
2750 				/*
2751 				 * Must hold slab lock here because slab_free
2752 				 * may have freed the last object and be
2753 				 * waiting to release the slab.
2754 				 */
2755 				list_del(&page->lru);
2756 				n->nr_partial--;
2757 				slab_unlock(page);
2758 				discard_slab(s, page);
2759 			} else {
2760 				list_move(&page->lru,
2761 				slabs_by_inuse + page->inuse);
2762 			}
2763 		}
2764 
2765 		/*
2766 		 * Rebuild the partial list with the slabs filled up most
2767 		 * first and the least used slabs at the end.
2768 		 */
2769 		for (i = s->objects - 1; i >= 0; i--)
2770 			list_splice(slabs_by_inuse + i, n->partial.prev);
2771 
2772 		spin_unlock_irqrestore(&n->list_lock, flags);
2773 	}
2774 
2775 	kfree(slabs_by_inuse);
2776 	return 0;
2777 }
2778 EXPORT_SYMBOL(kmem_cache_shrink);
2779 
2780 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2781 static int slab_mem_going_offline_callback(void *arg)
2782 {
2783 	struct kmem_cache *s;
2784 
2785 	down_read(&slub_lock);
2786 	list_for_each_entry(s, &slab_caches, list)
2787 		kmem_cache_shrink(s);
2788 	up_read(&slub_lock);
2789 
2790 	return 0;
2791 }
2792 
2793 static void slab_mem_offline_callback(void *arg)
2794 {
2795 	struct kmem_cache_node *n;
2796 	struct kmem_cache *s;
2797 	struct memory_notify *marg = arg;
2798 	int offline_node;
2799 
2800 	offline_node = marg->status_change_nid;
2801 
2802 	/*
2803 	 * If the node still has available memory. we need kmem_cache_node
2804 	 * for it yet.
2805 	 */
2806 	if (offline_node < 0)
2807 		return;
2808 
2809 	down_read(&slub_lock);
2810 	list_for_each_entry(s, &slab_caches, list) {
2811 		n = get_node(s, offline_node);
2812 		if (n) {
2813 			/*
2814 			 * if n->nr_slabs > 0, slabs still exist on the node
2815 			 * that is going down. We were unable to free them,
2816 			 * and offline_pages() function shoudn't call this
2817 			 * callback. So, we must fail.
2818 			 */
2819 			BUG_ON(atomic_long_read(&n->nr_slabs));
2820 
2821 			s->node[offline_node] = NULL;
2822 			kmem_cache_free(kmalloc_caches, n);
2823 		}
2824 	}
2825 	up_read(&slub_lock);
2826 }
2827 
2828 static int slab_mem_going_online_callback(void *arg)
2829 {
2830 	struct kmem_cache_node *n;
2831 	struct kmem_cache *s;
2832 	struct memory_notify *marg = arg;
2833 	int nid = marg->status_change_nid;
2834 	int ret = 0;
2835 
2836 	/*
2837 	 * If the node's memory is already available, then kmem_cache_node is
2838 	 * already created. Nothing to do.
2839 	 */
2840 	if (nid < 0)
2841 		return 0;
2842 
2843 	/*
2844 	 * We are bringing a node online. No memory is availabe yet. We must
2845 	 * allocate a kmem_cache_node structure in order to bring the node
2846 	 * online.
2847 	 */
2848 	down_read(&slub_lock);
2849 	list_for_each_entry(s, &slab_caches, list) {
2850 		/*
2851 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2852 		 *      since memory is not yet available from the node that
2853 		 *      is brought up.
2854 		 */
2855 		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2856 		if (!n) {
2857 			ret = -ENOMEM;
2858 			goto out;
2859 		}
2860 		init_kmem_cache_node(n);
2861 		s->node[nid] = n;
2862 	}
2863 out:
2864 	up_read(&slub_lock);
2865 	return ret;
2866 }
2867 
2868 static int slab_memory_callback(struct notifier_block *self,
2869 				unsigned long action, void *arg)
2870 {
2871 	int ret = 0;
2872 
2873 	switch (action) {
2874 	case MEM_GOING_ONLINE:
2875 		ret = slab_mem_going_online_callback(arg);
2876 		break;
2877 	case MEM_GOING_OFFLINE:
2878 		ret = slab_mem_going_offline_callback(arg);
2879 		break;
2880 	case MEM_OFFLINE:
2881 	case MEM_CANCEL_ONLINE:
2882 		slab_mem_offline_callback(arg);
2883 		break;
2884 	case MEM_ONLINE:
2885 	case MEM_CANCEL_OFFLINE:
2886 		break;
2887 	}
2888 
2889 	ret = notifier_from_errno(ret);
2890 	return ret;
2891 }
2892 
2893 #endif /* CONFIG_MEMORY_HOTPLUG */
2894 
2895 /********************************************************************
2896  *			Basic setup of slabs
2897  *******************************************************************/
2898 
2899 void __init kmem_cache_init(void)
2900 {
2901 	int i;
2902 	int caches = 0;
2903 
2904 	init_alloc_cpu();
2905 
2906 #ifdef CONFIG_NUMA
2907 	/*
2908 	 * Must first have the slab cache available for the allocations of the
2909 	 * struct kmem_cache_node's. There is special bootstrap code in
2910 	 * kmem_cache_open for slab_state == DOWN.
2911 	 */
2912 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2913 		sizeof(struct kmem_cache_node), GFP_KERNEL);
2914 	kmalloc_caches[0].refcount = -1;
2915 	caches++;
2916 
2917 	hotplug_memory_notifier(slab_memory_callback, 1);
2918 #endif
2919 
2920 	/* Able to allocate the per node structures */
2921 	slab_state = PARTIAL;
2922 
2923 	/* Caches that are not of the two-to-the-power-of size */
2924 	if (KMALLOC_MIN_SIZE <= 64) {
2925 		create_kmalloc_cache(&kmalloc_caches[1],
2926 				"kmalloc-96", 96, GFP_KERNEL);
2927 		caches++;
2928 	}
2929 	if (KMALLOC_MIN_SIZE <= 128) {
2930 		create_kmalloc_cache(&kmalloc_caches[2],
2931 				"kmalloc-192", 192, GFP_KERNEL);
2932 		caches++;
2933 	}
2934 
2935 	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
2936 		create_kmalloc_cache(&kmalloc_caches[i],
2937 			"kmalloc", 1 << i, GFP_KERNEL);
2938 		caches++;
2939 	}
2940 
2941 
2942 	/*
2943 	 * Patch up the size_index table if we have strange large alignment
2944 	 * requirements for the kmalloc array. This is only the case for
2945 	 * MIPS it seems. The standard arches will not generate any code here.
2946 	 *
2947 	 * Largest permitted alignment is 256 bytes due to the way we
2948 	 * handle the index determination for the smaller caches.
2949 	 *
2950 	 * Make sure that nothing crazy happens if someone starts tinkering
2951 	 * around with ARCH_KMALLOC_MINALIGN
2952 	 */
2953 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2954 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2955 
2956 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
2957 		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2958 
2959 	slab_state = UP;
2960 
2961 	/* Provide the correct kmalloc names now that the caches are up */
2962 	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
2963 		kmalloc_caches[i]. name =
2964 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2965 
2966 #ifdef CONFIG_SMP
2967 	register_cpu_notifier(&slab_notifier);
2968 	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2969 				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
2970 #else
2971 	kmem_size = sizeof(struct kmem_cache);
2972 #endif
2973 
2974 	printk(KERN_INFO
2975 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2976 		" CPUs=%d, Nodes=%d\n",
2977 		caches, cache_line_size(),
2978 		slub_min_order, slub_max_order, slub_min_objects,
2979 		nr_cpu_ids, nr_node_ids);
2980 }
2981 
2982 /*
2983  * Find a mergeable slab cache
2984  */
2985 static int slab_unmergeable(struct kmem_cache *s)
2986 {
2987 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2988 		return 1;
2989 
2990 	if ((s->flags & __PAGE_ALLOC_FALLBACK))
2991 		return 1;
2992 
2993 	if (s->ctor)
2994 		return 1;
2995 
2996 	/*
2997 	 * We may have set a slab to be unmergeable during bootstrap.
2998 	 */
2999 	if (s->refcount < 0)
3000 		return 1;
3001 
3002 	return 0;
3003 }
3004 
3005 static struct kmem_cache *find_mergeable(size_t size,
3006 		size_t align, unsigned long flags, const char *name,
3007 		void (*ctor)(struct kmem_cache *, void *))
3008 {
3009 	struct kmem_cache *s;
3010 
3011 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3012 		return NULL;
3013 
3014 	if (ctor)
3015 		return NULL;
3016 
3017 	size = ALIGN(size, sizeof(void *));
3018 	align = calculate_alignment(flags, align, size);
3019 	size = ALIGN(size, align);
3020 	flags = kmem_cache_flags(size, flags, name, NULL);
3021 
3022 	list_for_each_entry(s, &slab_caches, list) {
3023 		if (slab_unmergeable(s))
3024 			continue;
3025 
3026 		if (size > s->size)
3027 			continue;
3028 
3029 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3030 				continue;
3031 		/*
3032 		 * Check if alignment is compatible.
3033 		 * Courtesy of Adrian Drzewiecki
3034 		 */
3035 		if ((s->size & ~(align - 1)) != s->size)
3036 			continue;
3037 
3038 		if (s->size - size >= sizeof(void *))
3039 			continue;
3040 
3041 		return s;
3042 	}
3043 	return NULL;
3044 }
3045 
3046 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3047 		size_t align, unsigned long flags,
3048 		void (*ctor)(struct kmem_cache *, void *))
3049 {
3050 	struct kmem_cache *s;
3051 
3052 	down_write(&slub_lock);
3053 	s = find_mergeable(size, align, flags, name, ctor);
3054 	if (s) {
3055 		int cpu;
3056 
3057 		s->refcount++;
3058 		/*
3059 		 * Adjust the object sizes so that we clear
3060 		 * the complete object on kzalloc.
3061 		 */
3062 		s->objsize = max(s->objsize, (int)size);
3063 
3064 		/*
3065 		 * And then we need to update the object size in the
3066 		 * per cpu structures
3067 		 */
3068 		for_each_online_cpu(cpu)
3069 			get_cpu_slab(s, cpu)->objsize = s->objsize;
3070 
3071 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3072 		up_write(&slub_lock);
3073 
3074 		if (sysfs_slab_alias(s, name))
3075 			goto err;
3076 		return s;
3077 	}
3078 
3079 	s = kmalloc(kmem_size, GFP_KERNEL);
3080 	if (s) {
3081 		if (kmem_cache_open(s, GFP_KERNEL, name,
3082 				size, align, flags, ctor)) {
3083 			list_add(&s->list, &slab_caches);
3084 			up_write(&slub_lock);
3085 			if (sysfs_slab_add(s))
3086 				goto err;
3087 			return s;
3088 		}
3089 		kfree(s);
3090 	}
3091 	up_write(&slub_lock);
3092 
3093 err:
3094 	if (flags & SLAB_PANIC)
3095 		panic("Cannot create slabcache %s\n", name);
3096 	else
3097 		s = NULL;
3098 	return s;
3099 }
3100 EXPORT_SYMBOL(kmem_cache_create);
3101 
3102 #ifdef CONFIG_SMP
3103 /*
3104  * Use the cpu notifier to insure that the cpu slabs are flushed when
3105  * necessary.
3106  */
3107 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3108 		unsigned long action, void *hcpu)
3109 {
3110 	long cpu = (long)hcpu;
3111 	struct kmem_cache *s;
3112 	unsigned long flags;
3113 
3114 	switch (action) {
3115 	case CPU_UP_PREPARE:
3116 	case CPU_UP_PREPARE_FROZEN:
3117 		init_alloc_cpu_cpu(cpu);
3118 		down_read(&slub_lock);
3119 		list_for_each_entry(s, &slab_caches, list)
3120 			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3121 							GFP_KERNEL);
3122 		up_read(&slub_lock);
3123 		break;
3124 
3125 	case CPU_UP_CANCELED:
3126 	case CPU_UP_CANCELED_FROZEN:
3127 	case CPU_DEAD:
3128 	case CPU_DEAD_FROZEN:
3129 		down_read(&slub_lock);
3130 		list_for_each_entry(s, &slab_caches, list) {
3131 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3132 
3133 			local_irq_save(flags);
3134 			__flush_cpu_slab(s, cpu);
3135 			local_irq_restore(flags);
3136 			free_kmem_cache_cpu(c, cpu);
3137 			s->cpu_slab[cpu] = NULL;
3138 		}
3139 		up_read(&slub_lock);
3140 		break;
3141 	default:
3142 		break;
3143 	}
3144 	return NOTIFY_OK;
3145 }
3146 
3147 static struct notifier_block __cpuinitdata slab_notifier = {
3148 	.notifier_call = slab_cpuup_callback
3149 };
3150 
3151 #endif
3152 
3153 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3154 {
3155 	struct kmem_cache *s;
3156 
3157 	if (unlikely(size > PAGE_SIZE))
3158 		return kmalloc_large(size, gfpflags);
3159 
3160 	s = get_slab(size, gfpflags);
3161 
3162 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3163 		return s;
3164 
3165 	return slab_alloc(s, gfpflags, -1, caller);
3166 }
3167 
3168 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3169 					int node, void *caller)
3170 {
3171 	struct kmem_cache *s;
3172 
3173 	if (unlikely(size > PAGE_SIZE))
3174 		return kmalloc_large_node(size, gfpflags, node);
3175 
3176 	s = get_slab(size, gfpflags);
3177 
3178 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3179 		return s;
3180 
3181 	return slab_alloc(s, gfpflags, node, caller);
3182 }
3183 
3184 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185 static int validate_slab(struct kmem_cache *s, struct page *page,
3186 						unsigned long *map)
3187 {
3188 	void *p;
3189 	void *addr = page_address(page);
3190 
3191 	if (!check_slab(s, page) ||
3192 			!on_freelist(s, page, NULL))
3193 		return 0;
3194 
3195 	/* Now we know that a valid freelist exists */
3196 	bitmap_zero(map, s->objects);
3197 
3198 	for_each_free_object(p, s, page->freelist) {
3199 		set_bit(slab_index(p, s, addr), map);
3200 		if (!check_object(s, page, p, 0))
3201 			return 0;
3202 	}
3203 
3204 	for_each_object(p, s, addr)
3205 		if (!test_bit(slab_index(p, s, addr), map))
3206 			if (!check_object(s, page, p, 1))
3207 				return 0;
3208 	return 1;
3209 }
3210 
3211 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3212 						unsigned long *map)
3213 {
3214 	if (slab_trylock(page)) {
3215 		validate_slab(s, page, map);
3216 		slab_unlock(page);
3217 	} else
3218 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3219 			s->name, page);
3220 
3221 	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3222 		if (!SlabDebug(page))
3223 			printk(KERN_ERR "SLUB %s: SlabDebug not set "
3224 				"on slab 0x%p\n", s->name, page);
3225 	} else {
3226 		if (SlabDebug(page))
3227 			printk(KERN_ERR "SLUB %s: SlabDebug set on "
3228 				"slab 0x%p\n", s->name, page);
3229 	}
3230 }
3231 
3232 static int validate_slab_node(struct kmem_cache *s,
3233 		struct kmem_cache_node *n, unsigned long *map)
3234 {
3235 	unsigned long count = 0;
3236 	struct page *page;
3237 	unsigned long flags;
3238 
3239 	spin_lock_irqsave(&n->list_lock, flags);
3240 
3241 	list_for_each_entry(page, &n->partial, lru) {
3242 		validate_slab_slab(s, page, map);
3243 		count++;
3244 	}
3245 	if (count != n->nr_partial)
3246 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3247 			"counter=%ld\n", s->name, count, n->nr_partial);
3248 
3249 	if (!(s->flags & SLAB_STORE_USER))
3250 		goto out;
3251 
3252 	list_for_each_entry(page, &n->full, lru) {
3253 		validate_slab_slab(s, page, map);
3254 		count++;
3255 	}
3256 	if (count != atomic_long_read(&n->nr_slabs))
3257 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3258 			"counter=%ld\n", s->name, count,
3259 			atomic_long_read(&n->nr_slabs));
3260 
3261 out:
3262 	spin_unlock_irqrestore(&n->list_lock, flags);
3263 	return count;
3264 }
3265 
3266 static long validate_slab_cache(struct kmem_cache *s)
3267 {
3268 	int node;
3269 	unsigned long count = 0;
3270 	unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
3271 				sizeof(unsigned long), GFP_KERNEL);
3272 
3273 	if (!map)
3274 		return -ENOMEM;
3275 
3276 	flush_all(s);
3277 	for_each_node_state(node, N_NORMAL_MEMORY) {
3278 		struct kmem_cache_node *n = get_node(s, node);
3279 
3280 		count += validate_slab_node(s, n, map);
3281 	}
3282 	kfree(map);
3283 	return count;
3284 }
3285 
3286 #ifdef SLUB_RESILIENCY_TEST
3287 static void resiliency_test(void)
3288 {
3289 	u8 *p;
3290 
3291 	printk(KERN_ERR "SLUB resiliency testing\n");
3292 	printk(KERN_ERR "-----------------------\n");
3293 	printk(KERN_ERR "A. Corruption after allocation\n");
3294 
3295 	p = kzalloc(16, GFP_KERNEL);
3296 	p[16] = 0x12;
3297 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3298 			" 0x12->0x%p\n\n", p + 16);
3299 
3300 	validate_slab_cache(kmalloc_caches + 4);
3301 
3302 	/* Hmmm... The next two are dangerous */
3303 	p = kzalloc(32, GFP_KERNEL);
3304 	p[32 + sizeof(void *)] = 0x34;
3305 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3306 			" 0x34 -> -0x%p\n", p);
3307 	printk(KERN_ERR
3308 		"If allocated object is overwritten then not detectable\n\n");
3309 
3310 	validate_slab_cache(kmalloc_caches + 5);
3311 	p = kzalloc(64, GFP_KERNEL);
3312 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3313 	*p = 0x56;
3314 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3315 									p);
3316 	printk(KERN_ERR
3317 		"If allocated object is overwritten then not detectable\n\n");
3318 	validate_slab_cache(kmalloc_caches + 6);
3319 
3320 	printk(KERN_ERR "\nB. Corruption after free\n");
3321 	p = kzalloc(128, GFP_KERNEL);
3322 	kfree(p);
3323 	*p = 0x78;
3324 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3325 	validate_slab_cache(kmalloc_caches + 7);
3326 
3327 	p = kzalloc(256, GFP_KERNEL);
3328 	kfree(p);
3329 	p[50] = 0x9a;
3330 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3331 			p);
3332 	validate_slab_cache(kmalloc_caches + 8);
3333 
3334 	p = kzalloc(512, GFP_KERNEL);
3335 	kfree(p);
3336 	p[512] = 0xab;
3337 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3338 	validate_slab_cache(kmalloc_caches + 9);
3339 }
3340 #else
3341 static void resiliency_test(void) {};
3342 #endif
3343 
3344 /*
3345  * Generate lists of code addresses where slabcache objects are allocated
3346  * and freed.
3347  */
3348 
3349 struct location {
3350 	unsigned long count;
3351 	void *addr;
3352 	long long sum_time;
3353 	long min_time;
3354 	long max_time;
3355 	long min_pid;
3356 	long max_pid;
3357 	cpumask_t cpus;
3358 	nodemask_t nodes;
3359 };
3360 
3361 struct loc_track {
3362 	unsigned long max;
3363 	unsigned long count;
3364 	struct location *loc;
3365 };
3366 
3367 static void free_loc_track(struct loc_track *t)
3368 {
3369 	if (t->max)
3370 		free_pages((unsigned long)t->loc,
3371 			get_order(sizeof(struct location) * t->max));
3372 }
3373 
3374 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3375 {
3376 	struct location *l;
3377 	int order;
3378 
3379 	order = get_order(sizeof(struct location) * max);
3380 
3381 	l = (void *)__get_free_pages(flags, order);
3382 	if (!l)
3383 		return 0;
3384 
3385 	if (t->count) {
3386 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3387 		free_loc_track(t);
3388 	}
3389 	t->max = max;
3390 	t->loc = l;
3391 	return 1;
3392 }
3393 
3394 static int add_location(struct loc_track *t, struct kmem_cache *s,
3395 				const struct track *track)
3396 {
3397 	long start, end, pos;
3398 	struct location *l;
3399 	void *caddr;
3400 	unsigned long age = jiffies - track->when;
3401 
3402 	start = -1;
3403 	end = t->count;
3404 
3405 	for ( ; ; ) {
3406 		pos = start + (end - start + 1) / 2;
3407 
3408 		/*
3409 		 * There is nothing at "end". If we end up there
3410 		 * we need to add something to before end.
3411 		 */
3412 		if (pos == end)
3413 			break;
3414 
3415 		caddr = t->loc[pos].addr;
3416 		if (track->addr == caddr) {
3417 
3418 			l = &t->loc[pos];
3419 			l->count++;
3420 			if (track->when) {
3421 				l->sum_time += age;
3422 				if (age < l->min_time)
3423 					l->min_time = age;
3424 				if (age > l->max_time)
3425 					l->max_time = age;
3426 
3427 				if (track->pid < l->min_pid)
3428 					l->min_pid = track->pid;
3429 				if (track->pid > l->max_pid)
3430 					l->max_pid = track->pid;
3431 
3432 				cpu_set(track->cpu, l->cpus);
3433 			}
3434 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3435 			return 1;
3436 		}
3437 
3438 		if (track->addr < caddr)
3439 			end = pos;
3440 		else
3441 			start = pos;
3442 	}
3443 
3444 	/*
3445 	 * Not found. Insert new tracking element.
3446 	 */
3447 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3448 		return 0;
3449 
3450 	l = t->loc + pos;
3451 	if (pos < t->count)
3452 		memmove(l + 1, l,
3453 			(t->count - pos) * sizeof(struct location));
3454 	t->count++;
3455 	l->count = 1;
3456 	l->addr = track->addr;
3457 	l->sum_time = age;
3458 	l->min_time = age;
3459 	l->max_time = age;
3460 	l->min_pid = track->pid;
3461 	l->max_pid = track->pid;
3462 	cpus_clear(l->cpus);
3463 	cpu_set(track->cpu, l->cpus);
3464 	nodes_clear(l->nodes);
3465 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3466 	return 1;
3467 }
3468 
3469 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3470 		struct page *page, enum track_item alloc)
3471 {
3472 	void *addr = page_address(page);
3473 	DECLARE_BITMAP(map, s->objects);
3474 	void *p;
3475 
3476 	bitmap_zero(map, s->objects);
3477 	for_each_free_object(p, s, page->freelist)
3478 		set_bit(slab_index(p, s, addr), map);
3479 
3480 	for_each_object(p, s, addr)
3481 		if (!test_bit(slab_index(p, s, addr), map))
3482 			add_location(t, s, get_track(s, p, alloc));
3483 }
3484 
3485 static int list_locations(struct kmem_cache *s, char *buf,
3486 					enum track_item alloc)
3487 {
3488 	int len = 0;
3489 	unsigned long i;
3490 	struct loc_track t = { 0, 0, NULL };
3491 	int node;
3492 
3493 	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3494 			GFP_TEMPORARY))
3495 		return sprintf(buf, "Out of memory\n");
3496 
3497 	/* Push back cpu slabs */
3498 	flush_all(s);
3499 
3500 	for_each_node_state(node, N_NORMAL_MEMORY) {
3501 		struct kmem_cache_node *n = get_node(s, node);
3502 		unsigned long flags;
3503 		struct page *page;
3504 
3505 		if (!atomic_long_read(&n->nr_slabs))
3506 			continue;
3507 
3508 		spin_lock_irqsave(&n->list_lock, flags);
3509 		list_for_each_entry(page, &n->partial, lru)
3510 			process_slab(&t, s, page, alloc);
3511 		list_for_each_entry(page, &n->full, lru)
3512 			process_slab(&t, s, page, alloc);
3513 		spin_unlock_irqrestore(&n->list_lock, flags);
3514 	}
3515 
3516 	for (i = 0; i < t.count; i++) {
3517 		struct location *l = &t.loc[i];
3518 
3519 		if (len > PAGE_SIZE - 100)
3520 			break;
3521 		len += sprintf(buf + len, "%7ld ", l->count);
3522 
3523 		if (l->addr)
3524 			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3525 		else
3526 			len += sprintf(buf + len, "<not-available>");
3527 
3528 		if (l->sum_time != l->min_time) {
3529 			unsigned long remainder;
3530 
3531 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3532 			l->min_time,
3533 			div_long_long_rem(l->sum_time, l->count, &remainder),
3534 			l->max_time);
3535 		} else
3536 			len += sprintf(buf + len, " age=%ld",
3537 				l->min_time);
3538 
3539 		if (l->min_pid != l->max_pid)
3540 			len += sprintf(buf + len, " pid=%ld-%ld",
3541 				l->min_pid, l->max_pid);
3542 		else
3543 			len += sprintf(buf + len, " pid=%ld",
3544 				l->min_pid);
3545 
3546 		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3547 				len < PAGE_SIZE - 60) {
3548 			len += sprintf(buf + len, " cpus=");
3549 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3550 					l->cpus);
3551 		}
3552 
3553 		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3554 				len < PAGE_SIZE - 60) {
3555 			len += sprintf(buf + len, " nodes=");
3556 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3557 					l->nodes);
3558 		}
3559 
3560 		len += sprintf(buf + len, "\n");
3561 	}
3562 
3563 	free_loc_track(&t);
3564 	if (!t.count)
3565 		len += sprintf(buf, "No data\n");
3566 	return len;
3567 }
3568 
3569 enum slab_stat_type {
3570 	SL_FULL,
3571 	SL_PARTIAL,
3572 	SL_CPU,
3573 	SL_OBJECTS
3574 };
3575 
3576 #define SO_FULL		(1 << SL_FULL)
3577 #define SO_PARTIAL	(1 << SL_PARTIAL)
3578 #define SO_CPU		(1 << SL_CPU)
3579 #define SO_OBJECTS	(1 << SL_OBJECTS)
3580 
3581 static ssize_t show_slab_objects(struct kmem_cache *s,
3582 			    char *buf, unsigned long flags)
3583 {
3584 	unsigned long total = 0;
3585 	int cpu;
3586 	int node;
3587 	int x;
3588 	unsigned long *nodes;
3589 	unsigned long *per_cpu;
3590 
3591 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3592 	if (!nodes)
3593 		return -ENOMEM;
3594 	per_cpu = nodes + nr_node_ids;
3595 
3596 	for_each_possible_cpu(cpu) {
3597 		struct page *page;
3598 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3599 
3600 		if (!c)
3601 			continue;
3602 
3603 		page = c->page;
3604 		node = c->node;
3605 		if (node < 0)
3606 			continue;
3607 		if (page) {
3608 			if (flags & SO_CPU) {
3609 				if (flags & SO_OBJECTS)
3610 					x = page->inuse;
3611 				else
3612 					x = 1;
3613 				total += x;
3614 				nodes[node] += x;
3615 			}
3616 			per_cpu[node]++;
3617 		}
3618 	}
3619 
3620 	for_each_node_state(node, N_NORMAL_MEMORY) {
3621 		struct kmem_cache_node *n = get_node(s, node);
3622 
3623 		if (flags & SO_PARTIAL) {
3624 			if (flags & SO_OBJECTS)
3625 				x = count_partial(n);
3626 			else
3627 				x = n->nr_partial;
3628 			total += x;
3629 			nodes[node] += x;
3630 		}
3631 
3632 		if (flags & SO_FULL) {
3633 			int full_slabs = atomic_long_read(&n->nr_slabs)
3634 					- per_cpu[node]
3635 					- n->nr_partial;
3636 
3637 			if (flags & SO_OBJECTS)
3638 				x = full_slabs * s->objects;
3639 			else
3640 				x = full_slabs;
3641 			total += x;
3642 			nodes[node] += x;
3643 		}
3644 	}
3645 
3646 	x = sprintf(buf, "%lu", total);
3647 #ifdef CONFIG_NUMA
3648 	for_each_node_state(node, N_NORMAL_MEMORY)
3649 		if (nodes[node])
3650 			x += sprintf(buf + x, " N%d=%lu",
3651 					node, nodes[node]);
3652 #endif
3653 	kfree(nodes);
3654 	return x + sprintf(buf + x, "\n");
3655 }
3656 
3657 static int any_slab_objects(struct kmem_cache *s)
3658 {
3659 	int node;
3660 	int cpu;
3661 
3662 	for_each_possible_cpu(cpu) {
3663 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3664 
3665 		if (c && c->page)
3666 			return 1;
3667 	}
3668 
3669 	for_each_online_node(node) {
3670 		struct kmem_cache_node *n = get_node(s, node);
3671 
3672 		if (!n)
3673 			continue;
3674 
3675 		if (n->nr_partial || atomic_long_read(&n->nr_slabs))
3676 			return 1;
3677 	}
3678 	return 0;
3679 }
3680 
3681 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3682 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3683 
3684 struct slab_attribute {
3685 	struct attribute attr;
3686 	ssize_t (*show)(struct kmem_cache *s, char *buf);
3687 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3688 };
3689 
3690 #define SLAB_ATTR_RO(_name) \
3691 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3692 
3693 #define SLAB_ATTR(_name) \
3694 	static struct slab_attribute _name##_attr =  \
3695 	__ATTR(_name, 0644, _name##_show, _name##_store)
3696 
3697 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3698 {
3699 	return sprintf(buf, "%d\n", s->size);
3700 }
3701 SLAB_ATTR_RO(slab_size);
3702 
3703 static ssize_t align_show(struct kmem_cache *s, char *buf)
3704 {
3705 	return sprintf(buf, "%d\n", s->align);
3706 }
3707 SLAB_ATTR_RO(align);
3708 
3709 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3710 {
3711 	return sprintf(buf, "%d\n", s->objsize);
3712 }
3713 SLAB_ATTR_RO(object_size);
3714 
3715 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3716 {
3717 	return sprintf(buf, "%d\n", s->objects);
3718 }
3719 SLAB_ATTR_RO(objs_per_slab);
3720 
3721 static ssize_t order_show(struct kmem_cache *s, char *buf)
3722 {
3723 	return sprintf(buf, "%d\n", s->order);
3724 }
3725 SLAB_ATTR_RO(order);
3726 
3727 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3728 {
3729 	if (s->ctor) {
3730 		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3731 
3732 		return n + sprintf(buf + n, "\n");
3733 	}
3734 	return 0;
3735 }
3736 SLAB_ATTR_RO(ctor);
3737 
3738 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3739 {
3740 	return sprintf(buf, "%d\n", s->refcount - 1);
3741 }
3742 SLAB_ATTR_RO(aliases);
3743 
3744 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3745 {
3746 	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3747 }
3748 SLAB_ATTR_RO(slabs);
3749 
3750 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3751 {
3752 	return show_slab_objects(s, buf, SO_PARTIAL);
3753 }
3754 SLAB_ATTR_RO(partial);
3755 
3756 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3757 {
3758 	return show_slab_objects(s, buf, SO_CPU);
3759 }
3760 SLAB_ATTR_RO(cpu_slabs);
3761 
3762 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3763 {
3764 	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3765 }
3766 SLAB_ATTR_RO(objects);
3767 
3768 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3769 {
3770 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3771 }
3772 
3773 static ssize_t sanity_checks_store(struct kmem_cache *s,
3774 				const char *buf, size_t length)
3775 {
3776 	s->flags &= ~SLAB_DEBUG_FREE;
3777 	if (buf[0] == '1')
3778 		s->flags |= SLAB_DEBUG_FREE;
3779 	return length;
3780 }
3781 SLAB_ATTR(sanity_checks);
3782 
3783 static ssize_t trace_show(struct kmem_cache *s, char *buf)
3784 {
3785 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3786 }
3787 
3788 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3789 							size_t length)
3790 {
3791 	s->flags &= ~SLAB_TRACE;
3792 	if (buf[0] == '1')
3793 		s->flags |= SLAB_TRACE;
3794 	return length;
3795 }
3796 SLAB_ATTR(trace);
3797 
3798 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3799 {
3800 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3801 }
3802 
3803 static ssize_t reclaim_account_store(struct kmem_cache *s,
3804 				const char *buf, size_t length)
3805 {
3806 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3807 	if (buf[0] == '1')
3808 		s->flags |= SLAB_RECLAIM_ACCOUNT;
3809 	return length;
3810 }
3811 SLAB_ATTR(reclaim_account);
3812 
3813 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3814 {
3815 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3816 }
3817 SLAB_ATTR_RO(hwcache_align);
3818 
3819 #ifdef CONFIG_ZONE_DMA
3820 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3821 {
3822 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3823 }
3824 SLAB_ATTR_RO(cache_dma);
3825 #endif
3826 
3827 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3828 {
3829 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3830 }
3831 SLAB_ATTR_RO(destroy_by_rcu);
3832 
3833 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3834 {
3835 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3836 }
3837 
3838 static ssize_t red_zone_store(struct kmem_cache *s,
3839 				const char *buf, size_t length)
3840 {
3841 	if (any_slab_objects(s))
3842 		return -EBUSY;
3843 
3844 	s->flags &= ~SLAB_RED_ZONE;
3845 	if (buf[0] == '1')
3846 		s->flags |= SLAB_RED_ZONE;
3847 	calculate_sizes(s);
3848 	return length;
3849 }
3850 SLAB_ATTR(red_zone);
3851 
3852 static ssize_t poison_show(struct kmem_cache *s, char *buf)
3853 {
3854 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3855 }
3856 
3857 static ssize_t poison_store(struct kmem_cache *s,
3858 				const char *buf, size_t length)
3859 {
3860 	if (any_slab_objects(s))
3861 		return -EBUSY;
3862 
3863 	s->flags &= ~SLAB_POISON;
3864 	if (buf[0] == '1')
3865 		s->flags |= SLAB_POISON;
3866 	calculate_sizes(s);
3867 	return length;
3868 }
3869 SLAB_ATTR(poison);
3870 
3871 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3872 {
3873 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3874 }
3875 
3876 static ssize_t store_user_store(struct kmem_cache *s,
3877 				const char *buf, size_t length)
3878 {
3879 	if (any_slab_objects(s))
3880 		return -EBUSY;
3881 
3882 	s->flags &= ~SLAB_STORE_USER;
3883 	if (buf[0] == '1')
3884 		s->flags |= SLAB_STORE_USER;
3885 	calculate_sizes(s);
3886 	return length;
3887 }
3888 SLAB_ATTR(store_user);
3889 
3890 static ssize_t validate_show(struct kmem_cache *s, char *buf)
3891 {
3892 	return 0;
3893 }
3894 
3895 static ssize_t validate_store(struct kmem_cache *s,
3896 			const char *buf, size_t length)
3897 {
3898 	int ret = -EINVAL;
3899 
3900 	if (buf[0] == '1') {
3901 		ret = validate_slab_cache(s);
3902 		if (ret >= 0)
3903 			ret = length;
3904 	}
3905 	return ret;
3906 }
3907 SLAB_ATTR(validate);
3908 
3909 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3910 {
3911 	return 0;
3912 }
3913 
3914 static ssize_t shrink_store(struct kmem_cache *s,
3915 			const char *buf, size_t length)
3916 {
3917 	if (buf[0] == '1') {
3918 		int rc = kmem_cache_shrink(s);
3919 
3920 		if (rc)
3921 			return rc;
3922 	} else
3923 		return -EINVAL;
3924 	return length;
3925 }
3926 SLAB_ATTR(shrink);
3927 
3928 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3929 {
3930 	if (!(s->flags & SLAB_STORE_USER))
3931 		return -ENOSYS;
3932 	return list_locations(s, buf, TRACK_ALLOC);
3933 }
3934 SLAB_ATTR_RO(alloc_calls);
3935 
3936 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3937 {
3938 	if (!(s->flags & SLAB_STORE_USER))
3939 		return -ENOSYS;
3940 	return list_locations(s, buf, TRACK_FREE);
3941 }
3942 SLAB_ATTR_RO(free_calls);
3943 
3944 #ifdef CONFIG_NUMA
3945 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
3946 {
3947 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
3948 }
3949 
3950 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
3951 				const char *buf, size_t length)
3952 {
3953 	int n = simple_strtoul(buf, NULL, 10);
3954 
3955 	if (n < 100)
3956 		s->remote_node_defrag_ratio = n * 10;
3957 	return length;
3958 }
3959 SLAB_ATTR(remote_node_defrag_ratio);
3960 #endif
3961 
3962 #ifdef CONFIG_SLUB_STATS
3963 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
3964 {
3965 	unsigned long sum  = 0;
3966 	int cpu;
3967 	int len;
3968 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
3969 
3970 	if (!data)
3971 		return -ENOMEM;
3972 
3973 	for_each_online_cpu(cpu) {
3974 		unsigned x = get_cpu_slab(s, cpu)->stat[si];
3975 
3976 		data[cpu] = x;
3977 		sum += x;
3978 	}
3979 
3980 	len = sprintf(buf, "%lu", sum);
3981 
3982 	for_each_online_cpu(cpu) {
3983 		if (data[cpu] && len < PAGE_SIZE - 20)
3984 			len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
3985 	}
3986 	kfree(data);
3987 	return len + sprintf(buf + len, "\n");
3988 }
3989 
3990 #define STAT_ATTR(si, text) 					\
3991 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
3992 {								\
3993 	return show_stat(s, buf, si);				\
3994 }								\
3995 SLAB_ATTR_RO(text);						\
3996 
3997 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
3998 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
3999 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4000 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4001 STAT_ATTR(FREE_FROZEN, free_frozen);
4002 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4003 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4004 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4005 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4006 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4007 STAT_ATTR(FREE_SLAB, free_slab);
4008 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4009 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4010 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4011 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4012 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4013 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4014 
4015 #endif
4016 
4017 static struct attribute *slab_attrs[] = {
4018 	&slab_size_attr.attr,
4019 	&object_size_attr.attr,
4020 	&objs_per_slab_attr.attr,
4021 	&order_attr.attr,
4022 	&objects_attr.attr,
4023 	&slabs_attr.attr,
4024 	&partial_attr.attr,
4025 	&cpu_slabs_attr.attr,
4026 	&ctor_attr.attr,
4027 	&aliases_attr.attr,
4028 	&align_attr.attr,
4029 	&sanity_checks_attr.attr,
4030 	&trace_attr.attr,
4031 	&hwcache_align_attr.attr,
4032 	&reclaim_account_attr.attr,
4033 	&destroy_by_rcu_attr.attr,
4034 	&red_zone_attr.attr,
4035 	&poison_attr.attr,
4036 	&store_user_attr.attr,
4037 	&validate_attr.attr,
4038 	&shrink_attr.attr,
4039 	&alloc_calls_attr.attr,
4040 	&free_calls_attr.attr,
4041 #ifdef CONFIG_ZONE_DMA
4042 	&cache_dma_attr.attr,
4043 #endif
4044 #ifdef CONFIG_NUMA
4045 	&remote_node_defrag_ratio_attr.attr,
4046 #endif
4047 #ifdef CONFIG_SLUB_STATS
4048 	&alloc_fastpath_attr.attr,
4049 	&alloc_slowpath_attr.attr,
4050 	&free_fastpath_attr.attr,
4051 	&free_slowpath_attr.attr,
4052 	&free_frozen_attr.attr,
4053 	&free_add_partial_attr.attr,
4054 	&free_remove_partial_attr.attr,
4055 	&alloc_from_partial_attr.attr,
4056 	&alloc_slab_attr.attr,
4057 	&alloc_refill_attr.attr,
4058 	&free_slab_attr.attr,
4059 	&cpuslab_flush_attr.attr,
4060 	&deactivate_full_attr.attr,
4061 	&deactivate_empty_attr.attr,
4062 	&deactivate_to_head_attr.attr,
4063 	&deactivate_to_tail_attr.attr,
4064 	&deactivate_remote_frees_attr.attr,
4065 #endif
4066 	NULL
4067 };
4068 
4069 static struct attribute_group slab_attr_group = {
4070 	.attrs = slab_attrs,
4071 };
4072 
4073 static ssize_t slab_attr_show(struct kobject *kobj,
4074 				struct attribute *attr,
4075 				char *buf)
4076 {
4077 	struct slab_attribute *attribute;
4078 	struct kmem_cache *s;
4079 	int err;
4080 
4081 	attribute = to_slab_attr(attr);
4082 	s = to_slab(kobj);
4083 
4084 	if (!attribute->show)
4085 		return -EIO;
4086 
4087 	err = attribute->show(s, buf);
4088 
4089 	return err;
4090 }
4091 
4092 static ssize_t slab_attr_store(struct kobject *kobj,
4093 				struct attribute *attr,
4094 				const char *buf, size_t len)
4095 {
4096 	struct slab_attribute *attribute;
4097 	struct kmem_cache *s;
4098 	int err;
4099 
4100 	attribute = to_slab_attr(attr);
4101 	s = to_slab(kobj);
4102 
4103 	if (!attribute->store)
4104 		return -EIO;
4105 
4106 	err = attribute->store(s, buf, len);
4107 
4108 	return err;
4109 }
4110 
4111 static void kmem_cache_release(struct kobject *kobj)
4112 {
4113 	struct kmem_cache *s = to_slab(kobj);
4114 
4115 	kfree(s);
4116 }
4117 
4118 static struct sysfs_ops slab_sysfs_ops = {
4119 	.show = slab_attr_show,
4120 	.store = slab_attr_store,
4121 };
4122 
4123 static struct kobj_type slab_ktype = {
4124 	.sysfs_ops = &slab_sysfs_ops,
4125 	.release = kmem_cache_release
4126 };
4127 
4128 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4129 {
4130 	struct kobj_type *ktype = get_ktype(kobj);
4131 
4132 	if (ktype == &slab_ktype)
4133 		return 1;
4134 	return 0;
4135 }
4136 
4137 static struct kset_uevent_ops slab_uevent_ops = {
4138 	.filter = uevent_filter,
4139 };
4140 
4141 static struct kset *slab_kset;
4142 
4143 #define ID_STR_LENGTH 64
4144 
4145 /* Create a unique string id for a slab cache:
4146  *
4147  * Format	:[flags-]size
4148  */
4149 static char *create_unique_id(struct kmem_cache *s)
4150 {
4151 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4152 	char *p = name;
4153 
4154 	BUG_ON(!name);
4155 
4156 	*p++ = ':';
4157 	/*
4158 	 * First flags affecting slabcache operations. We will only
4159 	 * get here for aliasable slabs so we do not need to support
4160 	 * too many flags. The flags here must cover all flags that
4161 	 * are matched during merging to guarantee that the id is
4162 	 * unique.
4163 	 */
4164 	if (s->flags & SLAB_CACHE_DMA)
4165 		*p++ = 'd';
4166 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4167 		*p++ = 'a';
4168 	if (s->flags & SLAB_DEBUG_FREE)
4169 		*p++ = 'F';
4170 	if (p != name + 1)
4171 		*p++ = '-';
4172 	p += sprintf(p, "%07d", s->size);
4173 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4174 	return name;
4175 }
4176 
4177 static int sysfs_slab_add(struct kmem_cache *s)
4178 {
4179 	int err;
4180 	const char *name;
4181 	int unmergeable;
4182 
4183 	if (slab_state < SYSFS)
4184 		/* Defer until later */
4185 		return 0;
4186 
4187 	unmergeable = slab_unmergeable(s);
4188 	if (unmergeable) {
4189 		/*
4190 		 * Slabcache can never be merged so we can use the name proper.
4191 		 * This is typically the case for debug situations. In that
4192 		 * case we can catch duplicate names easily.
4193 		 */
4194 		sysfs_remove_link(&slab_kset->kobj, s->name);
4195 		name = s->name;
4196 	} else {
4197 		/*
4198 		 * Create a unique name for the slab as a target
4199 		 * for the symlinks.
4200 		 */
4201 		name = create_unique_id(s);
4202 	}
4203 
4204 	s->kobj.kset = slab_kset;
4205 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4206 	if (err) {
4207 		kobject_put(&s->kobj);
4208 		return err;
4209 	}
4210 
4211 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4212 	if (err)
4213 		return err;
4214 	kobject_uevent(&s->kobj, KOBJ_ADD);
4215 	if (!unmergeable) {
4216 		/* Setup first alias */
4217 		sysfs_slab_alias(s, s->name);
4218 		kfree(name);
4219 	}
4220 	return 0;
4221 }
4222 
4223 static void sysfs_slab_remove(struct kmem_cache *s)
4224 {
4225 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4226 	kobject_del(&s->kobj);
4227 	kobject_put(&s->kobj);
4228 }
4229 
4230 /*
4231  * Need to buffer aliases during bootup until sysfs becomes
4232  * available lest we loose that information.
4233  */
4234 struct saved_alias {
4235 	struct kmem_cache *s;
4236 	const char *name;
4237 	struct saved_alias *next;
4238 };
4239 
4240 static struct saved_alias *alias_list;
4241 
4242 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4243 {
4244 	struct saved_alias *al;
4245 
4246 	if (slab_state == SYSFS) {
4247 		/*
4248 		 * If we have a leftover link then remove it.
4249 		 */
4250 		sysfs_remove_link(&slab_kset->kobj, name);
4251 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4252 	}
4253 
4254 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4255 	if (!al)
4256 		return -ENOMEM;
4257 
4258 	al->s = s;
4259 	al->name = name;
4260 	al->next = alias_list;
4261 	alias_list = al;
4262 	return 0;
4263 }
4264 
4265 static int __init slab_sysfs_init(void)
4266 {
4267 	struct kmem_cache *s;
4268 	int err;
4269 
4270 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4271 	if (!slab_kset) {
4272 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4273 		return -ENOSYS;
4274 	}
4275 
4276 	slab_state = SYSFS;
4277 
4278 	list_for_each_entry(s, &slab_caches, list) {
4279 		err = sysfs_slab_add(s);
4280 		if (err)
4281 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4282 						" to sysfs\n", s->name);
4283 	}
4284 
4285 	while (alias_list) {
4286 		struct saved_alias *al = alias_list;
4287 
4288 		alias_list = alias_list->next;
4289 		err = sysfs_slab_alias(al->s, al->name);
4290 		if (err)
4291 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4292 					" %s to sysfs\n", s->name);
4293 		kfree(al);
4294 	}
4295 
4296 	resiliency_test();
4297 	return 0;
4298 }
4299 
4300 __initcall(slab_sysfs_init);
4301 #endif
4302 
4303 /*
4304  * The /proc/slabinfo ABI
4305  */
4306 #ifdef CONFIG_SLABINFO
4307 
4308 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4309                        size_t count, loff_t *ppos)
4310 {
4311 	return -EINVAL;
4312 }
4313 
4314 
4315 static void print_slabinfo_header(struct seq_file *m)
4316 {
4317 	seq_puts(m, "slabinfo - version: 2.1\n");
4318 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4319 		 "<objperslab> <pagesperslab>");
4320 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4321 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4322 	seq_putc(m, '\n');
4323 }
4324 
4325 static void *s_start(struct seq_file *m, loff_t *pos)
4326 {
4327 	loff_t n = *pos;
4328 
4329 	down_read(&slub_lock);
4330 	if (!n)
4331 		print_slabinfo_header(m);
4332 
4333 	return seq_list_start(&slab_caches, *pos);
4334 }
4335 
4336 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4337 {
4338 	return seq_list_next(p, &slab_caches, pos);
4339 }
4340 
4341 static void s_stop(struct seq_file *m, void *p)
4342 {
4343 	up_read(&slub_lock);
4344 }
4345 
4346 static int s_show(struct seq_file *m, void *p)
4347 {
4348 	unsigned long nr_partials = 0;
4349 	unsigned long nr_slabs = 0;
4350 	unsigned long nr_inuse = 0;
4351 	unsigned long nr_objs;
4352 	struct kmem_cache *s;
4353 	int node;
4354 
4355 	s = list_entry(p, struct kmem_cache, list);
4356 
4357 	for_each_online_node(node) {
4358 		struct kmem_cache_node *n = get_node(s, node);
4359 
4360 		if (!n)
4361 			continue;
4362 
4363 		nr_partials += n->nr_partial;
4364 		nr_slabs += atomic_long_read(&n->nr_slabs);
4365 		nr_inuse += count_partial(n);
4366 	}
4367 
4368 	nr_objs = nr_slabs * s->objects;
4369 	nr_inuse += (nr_slabs - nr_partials) * s->objects;
4370 
4371 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4372 		   nr_objs, s->size, s->objects, (1 << s->order));
4373 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4374 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4375 		   0UL);
4376 	seq_putc(m, '\n');
4377 	return 0;
4378 }
4379 
4380 const struct seq_operations slabinfo_op = {
4381 	.start = s_start,
4382 	.next = s_next,
4383 	.stop = s_stop,
4384 	.show = s_show,
4385 };
4386 
4387 #endif /* CONFIG_SLABINFO */
4388