xref: /openbmc/linux/mm/slub.c (revision 82ced6fd)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/swap.h> /* struct reclaim_state */
13 #include <linux/module.h>
14 #include <linux/bit_spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/bitops.h>
17 #include <linux/slab.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <trace/kmemtrace.h>
21 #include <linux/cpu.h>
22 #include <linux/cpuset.h>
23 #include <linux/mempolicy.h>
24 #include <linux/ctype.h>
25 #include <linux/debugobjects.h>
26 #include <linux/kallsyms.h>
27 #include <linux/memory.h>
28 #include <linux/math64.h>
29 #include <linux/fault-inject.h>
30 
31 /*
32  * Lock order:
33  *   1. slab_lock(page)
34  *   2. slab->list_lock
35  *
36  *   The slab_lock protects operations on the object of a particular
37  *   slab and its metadata in the page struct. If the slab lock
38  *   has been taken then no allocations nor frees can be performed
39  *   on the objects in the slab nor can the slab be added or removed
40  *   from the partial or full lists since this would mean modifying
41  *   the page_struct of the slab.
42  *
43  *   The list_lock protects the partial and full list on each node and
44  *   the partial slab counter. If taken then no new slabs may be added or
45  *   removed from the lists nor make the number of partial slabs be modified.
46  *   (Note that the total number of slabs is an atomic value that may be
47  *   modified without taking the list lock).
48  *
49  *   The list_lock is a centralized lock and thus we avoid taking it as
50  *   much as possible. As long as SLUB does not have to handle partial
51  *   slabs, operations can continue without any centralized lock. F.e.
52  *   allocating a long series of objects that fill up slabs does not require
53  *   the list lock.
54  *
55  *   The lock order is sometimes inverted when we are trying to get a slab
56  *   off a list. We take the list_lock and then look for a page on the list
57  *   to use. While we do that objects in the slabs may be freed. We can
58  *   only operate on the slab if we have also taken the slab_lock. So we use
59  *   a slab_trylock() on the slab. If trylock was successful then no frees
60  *   can occur anymore and we can use the slab for allocations etc. If the
61  *   slab_trylock() does not succeed then frees are in progress in the slab and
62  *   we must stay away from it for a while since we may cause a bouncing
63  *   cacheline if we try to acquire the lock. So go onto the next slab.
64  *   If all pages are busy then we may allocate a new slab instead of reusing
65  *   a partial slab. A new slab has noone operating on it and thus there is
66  *   no danger of cacheline contention.
67  *
68  *   Interrupts are disabled during allocation and deallocation in order to
69  *   make the slab allocator safe to use in the context of an irq. In addition
70  *   interrupts are disabled to ensure that the processor does not change
71  *   while handling per_cpu slabs, due to kernel preemption.
72  *
73  * SLUB assigns one slab for allocation to each processor.
74  * Allocations only occur from these slabs called cpu slabs.
75  *
76  * Slabs with free elements are kept on a partial list and during regular
77  * operations no list for full slabs is used. If an object in a full slab is
78  * freed then the slab will show up again on the partial lists.
79  * We track full slabs for debugging purposes though because otherwise we
80  * cannot scan all objects.
81  *
82  * Slabs are freed when they become empty. Teardown and setup is
83  * minimal so we rely on the page allocators per cpu caches for
84  * fast frees and allocs.
85  *
86  * Overloading of page flags that are otherwise used for LRU management.
87  *
88  * PageActive 		The slab is frozen and exempt from list processing.
89  * 			This means that the slab is dedicated to a purpose
90  * 			such as satisfying allocations for a specific
91  * 			processor. Objects may be freed in the slab while
92  * 			it is frozen but slab_free will then skip the usual
93  * 			list operations. It is up to the processor holding
94  * 			the slab to integrate the slab into the slab lists
95  * 			when the slab is no longer needed.
96  *
97  * 			One use of this flag is to mark slabs that are
98  * 			used for allocations. Then such a slab becomes a cpu
99  * 			slab. The cpu slab may be equipped with an additional
100  * 			freelist that allows lockless access to
101  * 			free objects in addition to the regular freelist
102  * 			that requires the slab lock.
103  *
104  * PageError		Slab requires special handling due to debug
105  * 			options set. This moves	slab handling out of
106  * 			the fast path and disables lockless freelists.
107  */
108 
109 #ifdef CONFIG_SLUB_DEBUG
110 #define SLABDEBUG 1
111 #else
112 #define SLABDEBUG 0
113 #endif
114 
115 /*
116  * Issues still to be resolved:
117  *
118  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
119  *
120  * - Variable sizing of the per node arrays
121  */
122 
123 /* Enable to test recovery from slab corruption on boot */
124 #undef SLUB_RESILIENCY_TEST
125 
126 /*
127  * Mininum number of partial slabs. These will be left on the partial
128  * lists even if they are empty. kmem_cache_shrink may reclaim them.
129  */
130 #define MIN_PARTIAL 5
131 
132 /*
133  * Maximum number of desirable partial slabs.
134  * The existence of more partial slabs makes kmem_cache_shrink
135  * sort the partial list by the number of objects in the.
136  */
137 #define MAX_PARTIAL 10
138 
139 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
140 				SLAB_POISON | SLAB_STORE_USER)
141 
142 /*
143  * Set of flags that will prevent slab merging
144  */
145 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
146 		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
147 
148 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
149 		SLAB_CACHE_DMA)
150 
151 #ifndef ARCH_KMALLOC_MINALIGN
152 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
153 #endif
154 
155 #ifndef ARCH_SLAB_MINALIGN
156 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
157 #endif
158 
159 #define OO_SHIFT	16
160 #define OO_MASK		((1 << OO_SHIFT) - 1)
161 #define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
162 
163 /* Internal SLUB flags */
164 #define __OBJECT_POISON		0x80000000 /* Poison object */
165 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
166 
167 static int kmem_size = sizeof(struct kmem_cache);
168 
169 #ifdef CONFIG_SMP
170 static struct notifier_block slab_notifier;
171 #endif
172 
173 static enum {
174 	DOWN,		/* No slab functionality available */
175 	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
176 	UP,		/* Everything works but does not show up in sysfs */
177 	SYSFS		/* Sysfs up */
178 } slab_state = DOWN;
179 
180 /* A list of all slab caches on the system */
181 static DECLARE_RWSEM(slub_lock);
182 static LIST_HEAD(slab_caches);
183 
184 /*
185  * Tracking user of a slab.
186  */
187 struct track {
188 	unsigned long addr;	/* Called from address */
189 	int cpu;		/* Was running on cpu */
190 	int pid;		/* Pid context */
191 	unsigned long when;	/* When did the operation occur */
192 };
193 
194 enum track_item { TRACK_ALLOC, TRACK_FREE };
195 
196 #ifdef CONFIG_SLUB_DEBUG
197 static int sysfs_slab_add(struct kmem_cache *);
198 static int sysfs_slab_alias(struct kmem_cache *, const char *);
199 static void sysfs_slab_remove(struct kmem_cache *);
200 
201 #else
202 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
203 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
204 							{ return 0; }
205 static inline void sysfs_slab_remove(struct kmem_cache *s)
206 {
207 	kfree(s);
208 }
209 
210 #endif
211 
212 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
213 {
214 #ifdef CONFIG_SLUB_STATS
215 	c->stat[si]++;
216 #endif
217 }
218 
219 /********************************************************************
220  * 			Core slab cache functions
221  *******************************************************************/
222 
223 int slab_is_available(void)
224 {
225 	return slab_state >= UP;
226 }
227 
228 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
229 {
230 #ifdef CONFIG_NUMA
231 	return s->node[node];
232 #else
233 	return &s->local_node;
234 #endif
235 }
236 
237 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
238 {
239 #ifdef CONFIG_SMP
240 	return s->cpu_slab[cpu];
241 #else
242 	return &s->cpu_slab;
243 #endif
244 }
245 
246 /* Verify that a pointer has an address that is valid within a slab page */
247 static inline int check_valid_pointer(struct kmem_cache *s,
248 				struct page *page, const void *object)
249 {
250 	void *base;
251 
252 	if (!object)
253 		return 1;
254 
255 	base = page_address(page);
256 	if (object < base || object >= base + page->objects * s->size ||
257 		(object - base) % s->size) {
258 		return 0;
259 	}
260 
261 	return 1;
262 }
263 
264 /*
265  * Slow version of get and set free pointer.
266  *
267  * This version requires touching the cache lines of kmem_cache which
268  * we avoid to do in the fast alloc free paths. There we obtain the offset
269  * from the page struct.
270  */
271 static inline void *get_freepointer(struct kmem_cache *s, void *object)
272 {
273 	return *(void **)(object + s->offset);
274 }
275 
276 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
277 {
278 	*(void **)(object + s->offset) = fp;
279 }
280 
281 /* Loop over all objects in a slab */
282 #define for_each_object(__p, __s, __addr, __objects) \
283 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
284 			__p += (__s)->size)
285 
286 /* Scan freelist */
287 #define for_each_free_object(__p, __s, __free) \
288 	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
289 
290 /* Determine object index from a given position */
291 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
292 {
293 	return (p - addr) / s->size;
294 }
295 
296 static inline struct kmem_cache_order_objects oo_make(int order,
297 						unsigned long size)
298 {
299 	struct kmem_cache_order_objects x = {
300 		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
301 	};
302 
303 	return x;
304 }
305 
306 static inline int oo_order(struct kmem_cache_order_objects x)
307 {
308 	return x.x >> OO_SHIFT;
309 }
310 
311 static inline int oo_objects(struct kmem_cache_order_objects x)
312 {
313 	return x.x & OO_MASK;
314 }
315 
316 #ifdef CONFIG_SLUB_DEBUG
317 /*
318  * Debug settings:
319  */
320 #ifdef CONFIG_SLUB_DEBUG_ON
321 static int slub_debug = DEBUG_DEFAULT_FLAGS;
322 #else
323 static int slub_debug;
324 #endif
325 
326 static char *slub_debug_slabs;
327 
328 /*
329  * Object debugging
330  */
331 static void print_section(char *text, u8 *addr, unsigned int length)
332 {
333 	int i, offset;
334 	int newline = 1;
335 	char ascii[17];
336 
337 	ascii[16] = 0;
338 
339 	for (i = 0; i < length; i++) {
340 		if (newline) {
341 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
342 			newline = 0;
343 		}
344 		printk(KERN_CONT " %02x", addr[i]);
345 		offset = i % 16;
346 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
347 		if (offset == 15) {
348 			printk(KERN_CONT " %s\n", ascii);
349 			newline = 1;
350 		}
351 	}
352 	if (!newline) {
353 		i %= 16;
354 		while (i < 16) {
355 			printk(KERN_CONT "   ");
356 			ascii[i] = ' ';
357 			i++;
358 		}
359 		printk(KERN_CONT " %s\n", ascii);
360 	}
361 }
362 
363 static struct track *get_track(struct kmem_cache *s, void *object,
364 	enum track_item alloc)
365 {
366 	struct track *p;
367 
368 	if (s->offset)
369 		p = object + s->offset + sizeof(void *);
370 	else
371 		p = object + s->inuse;
372 
373 	return p + alloc;
374 }
375 
376 static void set_track(struct kmem_cache *s, void *object,
377 			enum track_item alloc, unsigned long addr)
378 {
379 	struct track *p = get_track(s, object, alloc);
380 
381 	if (addr) {
382 		p->addr = addr;
383 		p->cpu = smp_processor_id();
384 		p->pid = current->pid;
385 		p->when = jiffies;
386 	} else
387 		memset(p, 0, sizeof(struct track));
388 }
389 
390 static void init_tracking(struct kmem_cache *s, void *object)
391 {
392 	if (!(s->flags & SLAB_STORE_USER))
393 		return;
394 
395 	set_track(s, object, TRACK_FREE, 0UL);
396 	set_track(s, object, TRACK_ALLOC, 0UL);
397 }
398 
399 static void print_track(const char *s, struct track *t)
400 {
401 	if (!t->addr)
402 		return;
403 
404 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
405 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
406 }
407 
408 static void print_tracking(struct kmem_cache *s, void *object)
409 {
410 	if (!(s->flags & SLAB_STORE_USER))
411 		return;
412 
413 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
414 	print_track("Freed", get_track(s, object, TRACK_FREE));
415 }
416 
417 static void print_page_info(struct page *page)
418 {
419 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
420 		page, page->objects, page->inuse, page->freelist, page->flags);
421 
422 }
423 
424 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
425 {
426 	va_list args;
427 	char buf[100];
428 
429 	va_start(args, fmt);
430 	vsnprintf(buf, sizeof(buf), fmt, args);
431 	va_end(args);
432 	printk(KERN_ERR "========================================"
433 			"=====================================\n");
434 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
435 	printk(KERN_ERR "----------------------------------------"
436 			"-------------------------------------\n\n");
437 }
438 
439 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
440 {
441 	va_list args;
442 	char buf[100];
443 
444 	va_start(args, fmt);
445 	vsnprintf(buf, sizeof(buf), fmt, args);
446 	va_end(args);
447 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
448 }
449 
450 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
451 {
452 	unsigned int off;	/* Offset of last byte */
453 	u8 *addr = page_address(page);
454 
455 	print_tracking(s, p);
456 
457 	print_page_info(page);
458 
459 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
460 			p, p - addr, get_freepointer(s, p));
461 
462 	if (p > addr + 16)
463 		print_section("Bytes b4", p - 16, 16);
464 
465 	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
466 
467 	if (s->flags & SLAB_RED_ZONE)
468 		print_section("Redzone", p + s->objsize,
469 			s->inuse - s->objsize);
470 
471 	if (s->offset)
472 		off = s->offset + sizeof(void *);
473 	else
474 		off = s->inuse;
475 
476 	if (s->flags & SLAB_STORE_USER)
477 		off += 2 * sizeof(struct track);
478 
479 	if (off != s->size)
480 		/* Beginning of the filler is the free pointer */
481 		print_section("Padding", p + off, s->size - off);
482 
483 	dump_stack();
484 }
485 
486 static void object_err(struct kmem_cache *s, struct page *page,
487 			u8 *object, char *reason)
488 {
489 	slab_bug(s, "%s", reason);
490 	print_trailer(s, page, object);
491 }
492 
493 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
494 {
495 	va_list args;
496 	char buf[100];
497 
498 	va_start(args, fmt);
499 	vsnprintf(buf, sizeof(buf), fmt, args);
500 	va_end(args);
501 	slab_bug(s, "%s", buf);
502 	print_page_info(page);
503 	dump_stack();
504 }
505 
506 static void init_object(struct kmem_cache *s, void *object, int active)
507 {
508 	u8 *p = object;
509 
510 	if (s->flags & __OBJECT_POISON) {
511 		memset(p, POISON_FREE, s->objsize - 1);
512 		p[s->objsize - 1] = POISON_END;
513 	}
514 
515 	if (s->flags & SLAB_RED_ZONE)
516 		memset(p + s->objsize,
517 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
518 			s->inuse - s->objsize);
519 }
520 
521 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
522 {
523 	while (bytes) {
524 		if (*start != (u8)value)
525 			return start;
526 		start++;
527 		bytes--;
528 	}
529 	return NULL;
530 }
531 
532 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
533 						void *from, void *to)
534 {
535 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
536 	memset(from, data, to - from);
537 }
538 
539 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
540 			u8 *object, char *what,
541 			u8 *start, unsigned int value, unsigned int bytes)
542 {
543 	u8 *fault;
544 	u8 *end;
545 
546 	fault = check_bytes(start, value, bytes);
547 	if (!fault)
548 		return 1;
549 
550 	end = start + bytes;
551 	while (end > fault && end[-1] == value)
552 		end--;
553 
554 	slab_bug(s, "%s overwritten", what);
555 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
556 					fault, end - 1, fault[0], value);
557 	print_trailer(s, page, object);
558 
559 	restore_bytes(s, what, value, fault, end);
560 	return 0;
561 }
562 
563 /*
564  * Object layout:
565  *
566  * object address
567  * 	Bytes of the object to be managed.
568  * 	If the freepointer may overlay the object then the free
569  * 	pointer is the first word of the object.
570  *
571  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
572  * 	0xa5 (POISON_END)
573  *
574  * object + s->objsize
575  * 	Padding to reach word boundary. This is also used for Redzoning.
576  * 	Padding is extended by another word if Redzoning is enabled and
577  * 	objsize == inuse.
578  *
579  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
580  * 	0xcc (RED_ACTIVE) for objects in use.
581  *
582  * object + s->inuse
583  * 	Meta data starts here.
584  *
585  * 	A. Free pointer (if we cannot overwrite object on free)
586  * 	B. Tracking data for SLAB_STORE_USER
587  * 	C. Padding to reach required alignment boundary or at mininum
588  * 		one word if debugging is on to be able to detect writes
589  * 		before the word boundary.
590  *
591  *	Padding is done using 0x5a (POISON_INUSE)
592  *
593  * object + s->size
594  * 	Nothing is used beyond s->size.
595  *
596  * If slabcaches are merged then the objsize and inuse boundaries are mostly
597  * ignored. And therefore no slab options that rely on these boundaries
598  * may be used with merged slabcaches.
599  */
600 
601 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
602 {
603 	unsigned long off = s->inuse;	/* The end of info */
604 
605 	if (s->offset)
606 		/* Freepointer is placed after the object. */
607 		off += sizeof(void *);
608 
609 	if (s->flags & SLAB_STORE_USER)
610 		/* We also have user information there */
611 		off += 2 * sizeof(struct track);
612 
613 	if (s->size == off)
614 		return 1;
615 
616 	return check_bytes_and_report(s, page, p, "Object padding",
617 				p + off, POISON_INUSE, s->size - off);
618 }
619 
620 /* Check the pad bytes at the end of a slab page */
621 static int slab_pad_check(struct kmem_cache *s, struct page *page)
622 {
623 	u8 *start;
624 	u8 *fault;
625 	u8 *end;
626 	int length;
627 	int remainder;
628 
629 	if (!(s->flags & SLAB_POISON))
630 		return 1;
631 
632 	start = page_address(page);
633 	length = (PAGE_SIZE << compound_order(page));
634 	end = start + length;
635 	remainder = length % s->size;
636 	if (!remainder)
637 		return 1;
638 
639 	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
640 	if (!fault)
641 		return 1;
642 	while (end > fault && end[-1] == POISON_INUSE)
643 		end--;
644 
645 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
646 	print_section("Padding", end - remainder, remainder);
647 
648 	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
649 	return 0;
650 }
651 
652 static int check_object(struct kmem_cache *s, struct page *page,
653 					void *object, int active)
654 {
655 	u8 *p = object;
656 	u8 *endobject = object + s->objsize;
657 
658 	if (s->flags & SLAB_RED_ZONE) {
659 		unsigned int red =
660 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
661 
662 		if (!check_bytes_and_report(s, page, object, "Redzone",
663 			endobject, red, s->inuse - s->objsize))
664 			return 0;
665 	} else {
666 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
667 			check_bytes_and_report(s, page, p, "Alignment padding",
668 				endobject, POISON_INUSE, s->inuse - s->objsize);
669 		}
670 	}
671 
672 	if (s->flags & SLAB_POISON) {
673 		if (!active && (s->flags & __OBJECT_POISON) &&
674 			(!check_bytes_and_report(s, page, p, "Poison", p,
675 					POISON_FREE, s->objsize - 1) ||
676 			 !check_bytes_and_report(s, page, p, "Poison",
677 				p + s->objsize - 1, POISON_END, 1)))
678 			return 0;
679 		/*
680 		 * check_pad_bytes cleans up on its own.
681 		 */
682 		check_pad_bytes(s, page, p);
683 	}
684 
685 	if (!s->offset && active)
686 		/*
687 		 * Object and freepointer overlap. Cannot check
688 		 * freepointer while object is allocated.
689 		 */
690 		return 1;
691 
692 	/* Check free pointer validity */
693 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
694 		object_err(s, page, p, "Freepointer corrupt");
695 		/*
696 		 * No choice but to zap it and thus lose the remainder
697 		 * of the free objects in this slab. May cause
698 		 * another error because the object count is now wrong.
699 		 */
700 		set_freepointer(s, p, NULL);
701 		return 0;
702 	}
703 	return 1;
704 }
705 
706 static int check_slab(struct kmem_cache *s, struct page *page)
707 {
708 	int maxobj;
709 
710 	VM_BUG_ON(!irqs_disabled());
711 
712 	if (!PageSlab(page)) {
713 		slab_err(s, page, "Not a valid slab page");
714 		return 0;
715 	}
716 
717 	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
718 	if (page->objects > maxobj) {
719 		slab_err(s, page, "objects %u > max %u",
720 			s->name, page->objects, maxobj);
721 		return 0;
722 	}
723 	if (page->inuse > page->objects) {
724 		slab_err(s, page, "inuse %u > max %u",
725 			s->name, page->inuse, page->objects);
726 		return 0;
727 	}
728 	/* Slab_pad_check fixes things up after itself */
729 	slab_pad_check(s, page);
730 	return 1;
731 }
732 
733 /*
734  * Determine if a certain object on a page is on the freelist. Must hold the
735  * slab lock to guarantee that the chains are in a consistent state.
736  */
737 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
738 {
739 	int nr = 0;
740 	void *fp = page->freelist;
741 	void *object = NULL;
742 	unsigned long max_objects;
743 
744 	while (fp && nr <= page->objects) {
745 		if (fp == search)
746 			return 1;
747 		if (!check_valid_pointer(s, page, fp)) {
748 			if (object) {
749 				object_err(s, page, object,
750 					"Freechain corrupt");
751 				set_freepointer(s, object, NULL);
752 				break;
753 			} else {
754 				slab_err(s, page, "Freepointer corrupt");
755 				page->freelist = NULL;
756 				page->inuse = page->objects;
757 				slab_fix(s, "Freelist cleared");
758 				return 0;
759 			}
760 			break;
761 		}
762 		object = fp;
763 		fp = get_freepointer(s, object);
764 		nr++;
765 	}
766 
767 	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
768 	if (max_objects > MAX_OBJS_PER_PAGE)
769 		max_objects = MAX_OBJS_PER_PAGE;
770 
771 	if (page->objects != max_objects) {
772 		slab_err(s, page, "Wrong number of objects. Found %d but "
773 			"should be %d", page->objects, max_objects);
774 		page->objects = max_objects;
775 		slab_fix(s, "Number of objects adjusted.");
776 	}
777 	if (page->inuse != page->objects - nr) {
778 		slab_err(s, page, "Wrong object count. Counter is %d but "
779 			"counted were %d", page->inuse, page->objects - nr);
780 		page->inuse = page->objects - nr;
781 		slab_fix(s, "Object count adjusted.");
782 	}
783 	return search == NULL;
784 }
785 
786 static void trace(struct kmem_cache *s, struct page *page, void *object,
787 								int alloc)
788 {
789 	if (s->flags & SLAB_TRACE) {
790 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
791 			s->name,
792 			alloc ? "alloc" : "free",
793 			object, page->inuse,
794 			page->freelist);
795 
796 		if (!alloc)
797 			print_section("Object", (void *)object, s->objsize);
798 
799 		dump_stack();
800 	}
801 }
802 
803 /*
804  * Tracking of fully allocated slabs for debugging purposes.
805  */
806 static void add_full(struct kmem_cache_node *n, struct page *page)
807 {
808 	spin_lock(&n->list_lock);
809 	list_add(&page->lru, &n->full);
810 	spin_unlock(&n->list_lock);
811 }
812 
813 static void remove_full(struct kmem_cache *s, struct page *page)
814 {
815 	struct kmem_cache_node *n;
816 
817 	if (!(s->flags & SLAB_STORE_USER))
818 		return;
819 
820 	n = get_node(s, page_to_nid(page));
821 
822 	spin_lock(&n->list_lock);
823 	list_del(&page->lru);
824 	spin_unlock(&n->list_lock);
825 }
826 
827 /* Tracking of the number of slabs for debugging purposes */
828 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
829 {
830 	struct kmem_cache_node *n = get_node(s, node);
831 
832 	return atomic_long_read(&n->nr_slabs);
833 }
834 
835 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
836 {
837 	struct kmem_cache_node *n = get_node(s, node);
838 
839 	/*
840 	 * May be called early in order to allocate a slab for the
841 	 * kmem_cache_node structure. Solve the chicken-egg
842 	 * dilemma by deferring the increment of the count during
843 	 * bootstrap (see early_kmem_cache_node_alloc).
844 	 */
845 	if (!NUMA_BUILD || n) {
846 		atomic_long_inc(&n->nr_slabs);
847 		atomic_long_add(objects, &n->total_objects);
848 	}
849 }
850 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
851 {
852 	struct kmem_cache_node *n = get_node(s, node);
853 
854 	atomic_long_dec(&n->nr_slabs);
855 	atomic_long_sub(objects, &n->total_objects);
856 }
857 
858 /* Object debug checks for alloc/free paths */
859 static void setup_object_debug(struct kmem_cache *s, struct page *page,
860 								void *object)
861 {
862 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
863 		return;
864 
865 	init_object(s, object, 0);
866 	init_tracking(s, object);
867 }
868 
869 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
870 					void *object, unsigned long addr)
871 {
872 	if (!check_slab(s, page))
873 		goto bad;
874 
875 	if (!on_freelist(s, page, object)) {
876 		object_err(s, page, object, "Object already allocated");
877 		goto bad;
878 	}
879 
880 	if (!check_valid_pointer(s, page, object)) {
881 		object_err(s, page, object, "Freelist Pointer check fails");
882 		goto bad;
883 	}
884 
885 	if (!check_object(s, page, object, 0))
886 		goto bad;
887 
888 	/* Success perform special debug activities for allocs */
889 	if (s->flags & SLAB_STORE_USER)
890 		set_track(s, object, TRACK_ALLOC, addr);
891 	trace(s, page, object, 1);
892 	init_object(s, object, 1);
893 	return 1;
894 
895 bad:
896 	if (PageSlab(page)) {
897 		/*
898 		 * If this is a slab page then lets do the best we can
899 		 * to avoid issues in the future. Marking all objects
900 		 * as used avoids touching the remaining objects.
901 		 */
902 		slab_fix(s, "Marking all objects used");
903 		page->inuse = page->objects;
904 		page->freelist = NULL;
905 	}
906 	return 0;
907 }
908 
909 static int free_debug_processing(struct kmem_cache *s, struct page *page,
910 					void *object, unsigned long addr)
911 {
912 	if (!check_slab(s, page))
913 		goto fail;
914 
915 	if (!check_valid_pointer(s, page, object)) {
916 		slab_err(s, page, "Invalid object pointer 0x%p", object);
917 		goto fail;
918 	}
919 
920 	if (on_freelist(s, page, object)) {
921 		object_err(s, page, object, "Object already free");
922 		goto fail;
923 	}
924 
925 	if (!check_object(s, page, object, 1))
926 		return 0;
927 
928 	if (unlikely(s != page->slab)) {
929 		if (!PageSlab(page)) {
930 			slab_err(s, page, "Attempt to free object(0x%p) "
931 				"outside of slab", object);
932 		} else if (!page->slab) {
933 			printk(KERN_ERR
934 				"SLUB <none>: no slab for object 0x%p.\n",
935 						object);
936 			dump_stack();
937 		} else
938 			object_err(s, page, object,
939 					"page slab pointer corrupt.");
940 		goto fail;
941 	}
942 
943 	/* Special debug activities for freeing objects */
944 	if (!PageSlubFrozen(page) && !page->freelist)
945 		remove_full(s, page);
946 	if (s->flags & SLAB_STORE_USER)
947 		set_track(s, object, TRACK_FREE, addr);
948 	trace(s, page, object, 0);
949 	init_object(s, object, 0);
950 	return 1;
951 
952 fail:
953 	slab_fix(s, "Object at 0x%p not freed", object);
954 	return 0;
955 }
956 
957 static int __init setup_slub_debug(char *str)
958 {
959 	slub_debug = DEBUG_DEFAULT_FLAGS;
960 	if (*str++ != '=' || !*str)
961 		/*
962 		 * No options specified. Switch on full debugging.
963 		 */
964 		goto out;
965 
966 	if (*str == ',')
967 		/*
968 		 * No options but restriction on slabs. This means full
969 		 * debugging for slabs matching a pattern.
970 		 */
971 		goto check_slabs;
972 
973 	slub_debug = 0;
974 	if (*str == '-')
975 		/*
976 		 * Switch off all debugging measures.
977 		 */
978 		goto out;
979 
980 	/*
981 	 * Determine which debug features should be switched on
982 	 */
983 	for (; *str && *str != ','; str++) {
984 		switch (tolower(*str)) {
985 		case 'f':
986 			slub_debug |= SLAB_DEBUG_FREE;
987 			break;
988 		case 'z':
989 			slub_debug |= SLAB_RED_ZONE;
990 			break;
991 		case 'p':
992 			slub_debug |= SLAB_POISON;
993 			break;
994 		case 'u':
995 			slub_debug |= SLAB_STORE_USER;
996 			break;
997 		case 't':
998 			slub_debug |= SLAB_TRACE;
999 			break;
1000 		default:
1001 			printk(KERN_ERR "slub_debug option '%c' "
1002 				"unknown. skipped\n", *str);
1003 		}
1004 	}
1005 
1006 check_slabs:
1007 	if (*str == ',')
1008 		slub_debug_slabs = str + 1;
1009 out:
1010 	return 1;
1011 }
1012 
1013 __setup("slub_debug", setup_slub_debug);
1014 
1015 static unsigned long kmem_cache_flags(unsigned long objsize,
1016 	unsigned long flags, const char *name,
1017 	void (*ctor)(void *))
1018 {
1019 	/*
1020 	 * Enable debugging if selected on the kernel commandline.
1021 	 */
1022 	if (slub_debug && (!slub_debug_slabs ||
1023 	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1024 			flags |= slub_debug;
1025 
1026 	return flags;
1027 }
1028 #else
1029 static inline void setup_object_debug(struct kmem_cache *s,
1030 			struct page *page, void *object) {}
1031 
1032 static inline int alloc_debug_processing(struct kmem_cache *s,
1033 	struct page *page, void *object, unsigned long addr) { return 0; }
1034 
1035 static inline int free_debug_processing(struct kmem_cache *s,
1036 	struct page *page, void *object, unsigned long addr) { return 0; }
1037 
1038 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1039 			{ return 1; }
1040 static inline int check_object(struct kmem_cache *s, struct page *page,
1041 			void *object, int active) { return 1; }
1042 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1043 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1044 	unsigned long flags, const char *name,
1045 	void (*ctor)(void *))
1046 {
1047 	return flags;
1048 }
1049 #define slub_debug 0
1050 
1051 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1052 							{ return 0; }
1053 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1054 							int objects) {}
1055 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1056 							int objects) {}
1057 #endif
1058 
1059 /*
1060  * Slab allocation and freeing
1061  */
1062 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1063 					struct kmem_cache_order_objects oo)
1064 {
1065 	int order = oo_order(oo);
1066 
1067 	if (node == -1)
1068 		return alloc_pages(flags, order);
1069 	else
1070 		return alloc_pages_node(node, flags, order);
1071 }
1072 
1073 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1074 {
1075 	struct page *page;
1076 	struct kmem_cache_order_objects oo = s->oo;
1077 
1078 	flags |= s->allocflags;
1079 
1080 	page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1081 									oo);
1082 	if (unlikely(!page)) {
1083 		oo = s->min;
1084 		/*
1085 		 * Allocation may have failed due to fragmentation.
1086 		 * Try a lower order alloc if possible
1087 		 */
1088 		page = alloc_slab_page(flags, node, oo);
1089 		if (!page)
1090 			return NULL;
1091 
1092 		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1093 	}
1094 	page->objects = oo_objects(oo);
1095 	mod_zone_page_state(page_zone(page),
1096 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1097 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1098 		1 << oo_order(oo));
1099 
1100 	return page;
1101 }
1102 
1103 static void setup_object(struct kmem_cache *s, struct page *page,
1104 				void *object)
1105 {
1106 	setup_object_debug(s, page, object);
1107 	if (unlikely(s->ctor))
1108 		s->ctor(object);
1109 }
1110 
1111 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1112 {
1113 	struct page *page;
1114 	void *start;
1115 	void *last;
1116 	void *p;
1117 
1118 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1119 
1120 	page = allocate_slab(s,
1121 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1122 	if (!page)
1123 		goto out;
1124 
1125 	inc_slabs_node(s, page_to_nid(page), page->objects);
1126 	page->slab = s;
1127 	page->flags |= 1 << PG_slab;
1128 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1129 			SLAB_STORE_USER | SLAB_TRACE))
1130 		__SetPageSlubDebug(page);
1131 
1132 	start = page_address(page);
1133 
1134 	if (unlikely(s->flags & SLAB_POISON))
1135 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1136 
1137 	last = start;
1138 	for_each_object(p, s, start, page->objects) {
1139 		setup_object(s, page, last);
1140 		set_freepointer(s, last, p);
1141 		last = p;
1142 	}
1143 	setup_object(s, page, last);
1144 	set_freepointer(s, last, NULL);
1145 
1146 	page->freelist = start;
1147 	page->inuse = 0;
1148 out:
1149 	return page;
1150 }
1151 
1152 static void __free_slab(struct kmem_cache *s, struct page *page)
1153 {
1154 	int order = compound_order(page);
1155 	int pages = 1 << order;
1156 
1157 	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1158 		void *p;
1159 
1160 		slab_pad_check(s, page);
1161 		for_each_object(p, s, page_address(page),
1162 						page->objects)
1163 			check_object(s, page, p, 0);
1164 		__ClearPageSlubDebug(page);
1165 	}
1166 
1167 	mod_zone_page_state(page_zone(page),
1168 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1169 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1170 		-pages);
1171 
1172 	__ClearPageSlab(page);
1173 	reset_page_mapcount(page);
1174 	if (current->reclaim_state)
1175 		current->reclaim_state->reclaimed_slab += pages;
1176 	__free_pages(page, order);
1177 }
1178 
1179 static void rcu_free_slab(struct rcu_head *h)
1180 {
1181 	struct page *page;
1182 
1183 	page = container_of((struct list_head *)h, struct page, lru);
1184 	__free_slab(page->slab, page);
1185 }
1186 
1187 static void free_slab(struct kmem_cache *s, struct page *page)
1188 {
1189 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1190 		/*
1191 		 * RCU free overloads the RCU head over the LRU
1192 		 */
1193 		struct rcu_head *head = (void *)&page->lru;
1194 
1195 		call_rcu(head, rcu_free_slab);
1196 	} else
1197 		__free_slab(s, page);
1198 }
1199 
1200 static void discard_slab(struct kmem_cache *s, struct page *page)
1201 {
1202 	dec_slabs_node(s, page_to_nid(page), page->objects);
1203 	free_slab(s, page);
1204 }
1205 
1206 /*
1207  * Per slab locking using the pagelock
1208  */
1209 static __always_inline void slab_lock(struct page *page)
1210 {
1211 	bit_spin_lock(PG_locked, &page->flags);
1212 }
1213 
1214 static __always_inline void slab_unlock(struct page *page)
1215 {
1216 	__bit_spin_unlock(PG_locked, &page->flags);
1217 }
1218 
1219 static __always_inline int slab_trylock(struct page *page)
1220 {
1221 	int rc = 1;
1222 
1223 	rc = bit_spin_trylock(PG_locked, &page->flags);
1224 	return rc;
1225 }
1226 
1227 /*
1228  * Management of partially allocated slabs
1229  */
1230 static void add_partial(struct kmem_cache_node *n,
1231 				struct page *page, int tail)
1232 {
1233 	spin_lock(&n->list_lock);
1234 	n->nr_partial++;
1235 	if (tail)
1236 		list_add_tail(&page->lru, &n->partial);
1237 	else
1238 		list_add(&page->lru, &n->partial);
1239 	spin_unlock(&n->list_lock);
1240 }
1241 
1242 static void remove_partial(struct kmem_cache *s, struct page *page)
1243 {
1244 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1245 
1246 	spin_lock(&n->list_lock);
1247 	list_del(&page->lru);
1248 	n->nr_partial--;
1249 	spin_unlock(&n->list_lock);
1250 }
1251 
1252 /*
1253  * Lock slab and remove from the partial list.
1254  *
1255  * Must hold list_lock.
1256  */
1257 static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1258 							struct page *page)
1259 {
1260 	if (slab_trylock(page)) {
1261 		list_del(&page->lru);
1262 		n->nr_partial--;
1263 		__SetPageSlubFrozen(page);
1264 		return 1;
1265 	}
1266 	return 0;
1267 }
1268 
1269 /*
1270  * Try to allocate a partial slab from a specific node.
1271  */
1272 static struct page *get_partial_node(struct kmem_cache_node *n)
1273 {
1274 	struct page *page;
1275 
1276 	/*
1277 	 * Racy check. If we mistakenly see no partial slabs then we
1278 	 * just allocate an empty slab. If we mistakenly try to get a
1279 	 * partial slab and there is none available then get_partials()
1280 	 * will return NULL.
1281 	 */
1282 	if (!n || !n->nr_partial)
1283 		return NULL;
1284 
1285 	spin_lock(&n->list_lock);
1286 	list_for_each_entry(page, &n->partial, lru)
1287 		if (lock_and_freeze_slab(n, page))
1288 			goto out;
1289 	page = NULL;
1290 out:
1291 	spin_unlock(&n->list_lock);
1292 	return page;
1293 }
1294 
1295 /*
1296  * Get a page from somewhere. Search in increasing NUMA distances.
1297  */
1298 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1299 {
1300 #ifdef CONFIG_NUMA
1301 	struct zonelist *zonelist;
1302 	struct zoneref *z;
1303 	struct zone *zone;
1304 	enum zone_type high_zoneidx = gfp_zone(flags);
1305 	struct page *page;
1306 
1307 	/*
1308 	 * The defrag ratio allows a configuration of the tradeoffs between
1309 	 * inter node defragmentation and node local allocations. A lower
1310 	 * defrag_ratio increases the tendency to do local allocations
1311 	 * instead of attempting to obtain partial slabs from other nodes.
1312 	 *
1313 	 * If the defrag_ratio is set to 0 then kmalloc() always
1314 	 * returns node local objects. If the ratio is higher then kmalloc()
1315 	 * may return off node objects because partial slabs are obtained
1316 	 * from other nodes and filled up.
1317 	 *
1318 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1319 	 * defrag_ratio = 1000) then every (well almost) allocation will
1320 	 * first attempt to defrag slab caches on other nodes. This means
1321 	 * scanning over all nodes to look for partial slabs which may be
1322 	 * expensive if we do it every time we are trying to find a slab
1323 	 * with available objects.
1324 	 */
1325 	if (!s->remote_node_defrag_ratio ||
1326 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1327 		return NULL;
1328 
1329 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1330 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1331 		struct kmem_cache_node *n;
1332 
1333 		n = get_node(s, zone_to_nid(zone));
1334 
1335 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1336 				n->nr_partial > s->min_partial) {
1337 			page = get_partial_node(n);
1338 			if (page)
1339 				return page;
1340 		}
1341 	}
1342 #endif
1343 	return NULL;
1344 }
1345 
1346 /*
1347  * Get a partial page, lock it and return it.
1348  */
1349 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1350 {
1351 	struct page *page;
1352 	int searchnode = (node == -1) ? numa_node_id() : node;
1353 
1354 	page = get_partial_node(get_node(s, searchnode));
1355 	if (page || (flags & __GFP_THISNODE))
1356 		return page;
1357 
1358 	return get_any_partial(s, flags);
1359 }
1360 
1361 /*
1362  * Move a page back to the lists.
1363  *
1364  * Must be called with the slab lock held.
1365  *
1366  * On exit the slab lock will have been dropped.
1367  */
1368 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1369 {
1370 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1371 	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1372 
1373 	__ClearPageSlubFrozen(page);
1374 	if (page->inuse) {
1375 
1376 		if (page->freelist) {
1377 			add_partial(n, page, tail);
1378 			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1379 		} else {
1380 			stat(c, DEACTIVATE_FULL);
1381 			if (SLABDEBUG && PageSlubDebug(page) &&
1382 						(s->flags & SLAB_STORE_USER))
1383 				add_full(n, page);
1384 		}
1385 		slab_unlock(page);
1386 	} else {
1387 		stat(c, DEACTIVATE_EMPTY);
1388 		if (n->nr_partial < s->min_partial) {
1389 			/*
1390 			 * Adding an empty slab to the partial slabs in order
1391 			 * to avoid page allocator overhead. This slab needs
1392 			 * to come after the other slabs with objects in
1393 			 * so that the others get filled first. That way the
1394 			 * size of the partial list stays small.
1395 			 *
1396 			 * kmem_cache_shrink can reclaim any empty slabs from
1397 			 * the partial list.
1398 			 */
1399 			add_partial(n, page, 1);
1400 			slab_unlock(page);
1401 		} else {
1402 			slab_unlock(page);
1403 			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1404 			discard_slab(s, page);
1405 		}
1406 	}
1407 }
1408 
1409 /*
1410  * Remove the cpu slab
1411  */
1412 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1413 {
1414 	struct page *page = c->page;
1415 	int tail = 1;
1416 
1417 	if (page->freelist)
1418 		stat(c, DEACTIVATE_REMOTE_FREES);
1419 	/*
1420 	 * Merge cpu freelist into slab freelist. Typically we get here
1421 	 * because both freelists are empty. So this is unlikely
1422 	 * to occur.
1423 	 */
1424 	while (unlikely(c->freelist)) {
1425 		void **object;
1426 
1427 		tail = 0;	/* Hot objects. Put the slab first */
1428 
1429 		/* Retrieve object from cpu_freelist */
1430 		object = c->freelist;
1431 		c->freelist = c->freelist[c->offset];
1432 
1433 		/* And put onto the regular freelist */
1434 		object[c->offset] = page->freelist;
1435 		page->freelist = object;
1436 		page->inuse--;
1437 	}
1438 	c->page = NULL;
1439 	unfreeze_slab(s, page, tail);
1440 }
1441 
1442 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1443 {
1444 	stat(c, CPUSLAB_FLUSH);
1445 	slab_lock(c->page);
1446 	deactivate_slab(s, c);
1447 }
1448 
1449 /*
1450  * Flush cpu slab.
1451  *
1452  * Called from IPI handler with interrupts disabled.
1453  */
1454 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1455 {
1456 	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1457 
1458 	if (likely(c && c->page))
1459 		flush_slab(s, c);
1460 }
1461 
1462 static void flush_cpu_slab(void *d)
1463 {
1464 	struct kmem_cache *s = d;
1465 
1466 	__flush_cpu_slab(s, smp_processor_id());
1467 }
1468 
1469 static void flush_all(struct kmem_cache *s)
1470 {
1471 	on_each_cpu(flush_cpu_slab, s, 1);
1472 }
1473 
1474 /*
1475  * Check if the objects in a per cpu structure fit numa
1476  * locality expectations.
1477  */
1478 static inline int node_match(struct kmem_cache_cpu *c, int node)
1479 {
1480 #ifdef CONFIG_NUMA
1481 	if (node != -1 && c->node != node)
1482 		return 0;
1483 #endif
1484 	return 1;
1485 }
1486 
1487 /*
1488  * Slow path. The lockless freelist is empty or we need to perform
1489  * debugging duties.
1490  *
1491  * Interrupts are disabled.
1492  *
1493  * Processing is still very fast if new objects have been freed to the
1494  * regular freelist. In that case we simply take over the regular freelist
1495  * as the lockless freelist and zap the regular freelist.
1496  *
1497  * If that is not working then we fall back to the partial lists. We take the
1498  * first element of the freelist as the object to allocate now and move the
1499  * rest of the freelist to the lockless freelist.
1500  *
1501  * And if we were unable to get a new slab from the partial slab lists then
1502  * we need to allocate a new slab. This is the slowest path since it involves
1503  * a call to the page allocator and the setup of a new slab.
1504  */
1505 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1506 			  unsigned long addr, struct kmem_cache_cpu *c)
1507 {
1508 	void **object;
1509 	struct page *new;
1510 
1511 	/* We handle __GFP_ZERO in the caller */
1512 	gfpflags &= ~__GFP_ZERO;
1513 
1514 	if (!c->page)
1515 		goto new_slab;
1516 
1517 	slab_lock(c->page);
1518 	if (unlikely(!node_match(c, node)))
1519 		goto another_slab;
1520 
1521 	stat(c, ALLOC_REFILL);
1522 
1523 load_freelist:
1524 	object = c->page->freelist;
1525 	if (unlikely(!object))
1526 		goto another_slab;
1527 	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1528 		goto debug;
1529 
1530 	c->freelist = object[c->offset];
1531 	c->page->inuse = c->page->objects;
1532 	c->page->freelist = NULL;
1533 	c->node = page_to_nid(c->page);
1534 unlock_out:
1535 	slab_unlock(c->page);
1536 	stat(c, ALLOC_SLOWPATH);
1537 	return object;
1538 
1539 another_slab:
1540 	deactivate_slab(s, c);
1541 
1542 new_slab:
1543 	new = get_partial(s, gfpflags, node);
1544 	if (new) {
1545 		c->page = new;
1546 		stat(c, ALLOC_FROM_PARTIAL);
1547 		goto load_freelist;
1548 	}
1549 
1550 	if (gfpflags & __GFP_WAIT)
1551 		local_irq_enable();
1552 
1553 	new = new_slab(s, gfpflags, node);
1554 
1555 	if (gfpflags & __GFP_WAIT)
1556 		local_irq_disable();
1557 
1558 	if (new) {
1559 		c = get_cpu_slab(s, smp_processor_id());
1560 		stat(c, ALLOC_SLAB);
1561 		if (c->page)
1562 			flush_slab(s, c);
1563 		slab_lock(new);
1564 		__SetPageSlubFrozen(new);
1565 		c->page = new;
1566 		goto load_freelist;
1567 	}
1568 	return NULL;
1569 debug:
1570 	if (!alloc_debug_processing(s, c->page, object, addr))
1571 		goto another_slab;
1572 
1573 	c->page->inuse++;
1574 	c->page->freelist = object[c->offset];
1575 	c->node = -1;
1576 	goto unlock_out;
1577 }
1578 
1579 /*
1580  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1581  * have the fastpath folded into their functions. So no function call
1582  * overhead for requests that can be satisfied on the fastpath.
1583  *
1584  * The fastpath works by first checking if the lockless freelist can be used.
1585  * If not then __slab_alloc is called for slow processing.
1586  *
1587  * Otherwise we can simply pick the next object from the lockless free list.
1588  */
1589 static __always_inline void *slab_alloc(struct kmem_cache *s,
1590 		gfp_t gfpflags, int node, unsigned long addr)
1591 {
1592 	void **object;
1593 	struct kmem_cache_cpu *c;
1594 	unsigned long flags;
1595 	unsigned int objsize;
1596 
1597 	lockdep_trace_alloc(gfpflags);
1598 	might_sleep_if(gfpflags & __GFP_WAIT);
1599 
1600 	if (should_failslab(s->objsize, gfpflags))
1601 		return NULL;
1602 
1603 	local_irq_save(flags);
1604 	c = get_cpu_slab(s, smp_processor_id());
1605 	objsize = c->objsize;
1606 	if (unlikely(!c->freelist || !node_match(c, node)))
1607 
1608 		object = __slab_alloc(s, gfpflags, node, addr, c);
1609 
1610 	else {
1611 		object = c->freelist;
1612 		c->freelist = object[c->offset];
1613 		stat(c, ALLOC_FASTPATH);
1614 	}
1615 	local_irq_restore(flags);
1616 
1617 	if (unlikely((gfpflags & __GFP_ZERO) && object))
1618 		memset(object, 0, objsize);
1619 
1620 	return object;
1621 }
1622 
1623 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1624 {
1625 	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1626 
1627 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1628 
1629 	return ret;
1630 }
1631 EXPORT_SYMBOL(kmem_cache_alloc);
1632 
1633 #ifdef CONFIG_KMEMTRACE
1634 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1635 {
1636 	return slab_alloc(s, gfpflags, -1, _RET_IP_);
1637 }
1638 EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1639 #endif
1640 
1641 #ifdef CONFIG_NUMA
1642 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1643 {
1644 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1645 
1646 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
1647 				    s->objsize, s->size, gfpflags, node);
1648 
1649 	return ret;
1650 }
1651 EXPORT_SYMBOL(kmem_cache_alloc_node);
1652 #endif
1653 
1654 #ifdef CONFIG_KMEMTRACE
1655 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1656 				    gfp_t gfpflags,
1657 				    int node)
1658 {
1659 	return slab_alloc(s, gfpflags, node, _RET_IP_);
1660 }
1661 EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1662 #endif
1663 
1664 /*
1665  * Slow patch handling. This may still be called frequently since objects
1666  * have a longer lifetime than the cpu slabs in most processing loads.
1667  *
1668  * So we still attempt to reduce cache line usage. Just take the slab
1669  * lock and free the item. If there is no additional partial page
1670  * handling required then we can return immediately.
1671  */
1672 static void __slab_free(struct kmem_cache *s, struct page *page,
1673 			void *x, unsigned long addr, unsigned int offset)
1674 {
1675 	void *prior;
1676 	void **object = (void *)x;
1677 	struct kmem_cache_cpu *c;
1678 
1679 	c = get_cpu_slab(s, raw_smp_processor_id());
1680 	stat(c, FREE_SLOWPATH);
1681 	slab_lock(page);
1682 
1683 	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1684 		goto debug;
1685 
1686 checks_ok:
1687 	prior = object[offset] = page->freelist;
1688 	page->freelist = object;
1689 	page->inuse--;
1690 
1691 	if (unlikely(PageSlubFrozen(page))) {
1692 		stat(c, FREE_FROZEN);
1693 		goto out_unlock;
1694 	}
1695 
1696 	if (unlikely(!page->inuse))
1697 		goto slab_empty;
1698 
1699 	/*
1700 	 * Objects left in the slab. If it was not on the partial list before
1701 	 * then add it.
1702 	 */
1703 	if (unlikely(!prior)) {
1704 		add_partial(get_node(s, page_to_nid(page)), page, 1);
1705 		stat(c, FREE_ADD_PARTIAL);
1706 	}
1707 
1708 out_unlock:
1709 	slab_unlock(page);
1710 	return;
1711 
1712 slab_empty:
1713 	if (prior) {
1714 		/*
1715 		 * Slab still on the partial list.
1716 		 */
1717 		remove_partial(s, page);
1718 		stat(c, FREE_REMOVE_PARTIAL);
1719 	}
1720 	slab_unlock(page);
1721 	stat(c, FREE_SLAB);
1722 	discard_slab(s, page);
1723 	return;
1724 
1725 debug:
1726 	if (!free_debug_processing(s, page, x, addr))
1727 		goto out_unlock;
1728 	goto checks_ok;
1729 }
1730 
1731 /*
1732  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1733  * can perform fastpath freeing without additional function calls.
1734  *
1735  * The fastpath is only possible if we are freeing to the current cpu slab
1736  * of this processor. This typically the case if we have just allocated
1737  * the item before.
1738  *
1739  * If fastpath is not possible then fall back to __slab_free where we deal
1740  * with all sorts of special processing.
1741  */
1742 static __always_inline void slab_free(struct kmem_cache *s,
1743 			struct page *page, void *x, unsigned long addr)
1744 {
1745 	void **object = (void *)x;
1746 	struct kmem_cache_cpu *c;
1747 	unsigned long flags;
1748 
1749 	local_irq_save(flags);
1750 	c = get_cpu_slab(s, smp_processor_id());
1751 	debug_check_no_locks_freed(object, c->objsize);
1752 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1753 		debug_check_no_obj_freed(object, c->objsize);
1754 	if (likely(page == c->page && c->node >= 0)) {
1755 		object[c->offset] = c->freelist;
1756 		c->freelist = object;
1757 		stat(c, FREE_FASTPATH);
1758 	} else
1759 		__slab_free(s, page, x, addr, c->offset);
1760 
1761 	local_irq_restore(flags);
1762 }
1763 
1764 void kmem_cache_free(struct kmem_cache *s, void *x)
1765 {
1766 	struct page *page;
1767 
1768 	page = virt_to_head_page(x);
1769 
1770 	slab_free(s, page, x, _RET_IP_);
1771 
1772 	trace_kmem_cache_free(_RET_IP_, x);
1773 }
1774 EXPORT_SYMBOL(kmem_cache_free);
1775 
1776 /* Figure out on which slab page the object resides */
1777 static struct page *get_object_page(const void *x)
1778 {
1779 	struct page *page = virt_to_head_page(x);
1780 
1781 	if (!PageSlab(page))
1782 		return NULL;
1783 
1784 	return page;
1785 }
1786 
1787 /*
1788  * Object placement in a slab is made very easy because we always start at
1789  * offset 0. If we tune the size of the object to the alignment then we can
1790  * get the required alignment by putting one properly sized object after
1791  * another.
1792  *
1793  * Notice that the allocation order determines the sizes of the per cpu
1794  * caches. Each processor has always one slab available for allocations.
1795  * Increasing the allocation order reduces the number of times that slabs
1796  * must be moved on and off the partial lists and is therefore a factor in
1797  * locking overhead.
1798  */
1799 
1800 /*
1801  * Mininum / Maximum order of slab pages. This influences locking overhead
1802  * and slab fragmentation. A higher order reduces the number of partial slabs
1803  * and increases the number of allocations possible without having to
1804  * take the list_lock.
1805  */
1806 static int slub_min_order;
1807 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1808 static int slub_min_objects;
1809 
1810 /*
1811  * Merge control. If this is set then no merging of slab caches will occur.
1812  * (Could be removed. This was introduced to pacify the merge skeptics.)
1813  */
1814 static int slub_nomerge;
1815 
1816 /*
1817  * Calculate the order of allocation given an slab object size.
1818  *
1819  * The order of allocation has significant impact on performance and other
1820  * system components. Generally order 0 allocations should be preferred since
1821  * order 0 does not cause fragmentation in the page allocator. Larger objects
1822  * be problematic to put into order 0 slabs because there may be too much
1823  * unused space left. We go to a higher order if more than 1/16th of the slab
1824  * would be wasted.
1825  *
1826  * In order to reach satisfactory performance we must ensure that a minimum
1827  * number of objects is in one slab. Otherwise we may generate too much
1828  * activity on the partial lists which requires taking the list_lock. This is
1829  * less a concern for large slabs though which are rarely used.
1830  *
1831  * slub_max_order specifies the order where we begin to stop considering the
1832  * number of objects in a slab as critical. If we reach slub_max_order then
1833  * we try to keep the page order as low as possible. So we accept more waste
1834  * of space in favor of a small page order.
1835  *
1836  * Higher order allocations also allow the placement of more objects in a
1837  * slab and thereby reduce object handling overhead. If the user has
1838  * requested a higher mininum order then we start with that one instead of
1839  * the smallest order which will fit the object.
1840  */
1841 static inline int slab_order(int size, int min_objects,
1842 				int max_order, int fract_leftover)
1843 {
1844 	int order;
1845 	int rem;
1846 	int min_order = slub_min_order;
1847 
1848 	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1849 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1850 
1851 	for (order = max(min_order,
1852 				fls(min_objects * size - 1) - PAGE_SHIFT);
1853 			order <= max_order; order++) {
1854 
1855 		unsigned long slab_size = PAGE_SIZE << order;
1856 
1857 		if (slab_size < min_objects * size)
1858 			continue;
1859 
1860 		rem = slab_size % size;
1861 
1862 		if (rem <= slab_size / fract_leftover)
1863 			break;
1864 
1865 	}
1866 
1867 	return order;
1868 }
1869 
1870 static inline int calculate_order(int size)
1871 {
1872 	int order;
1873 	int min_objects;
1874 	int fraction;
1875 	int max_objects;
1876 
1877 	/*
1878 	 * Attempt to find best configuration for a slab. This
1879 	 * works by first attempting to generate a layout with
1880 	 * the best configuration and backing off gradually.
1881 	 *
1882 	 * First we reduce the acceptable waste in a slab. Then
1883 	 * we reduce the minimum objects required in a slab.
1884 	 */
1885 	min_objects = slub_min_objects;
1886 	if (!min_objects)
1887 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
1888 	max_objects = (PAGE_SIZE << slub_max_order)/size;
1889 	min_objects = min(min_objects, max_objects);
1890 
1891 	while (min_objects > 1) {
1892 		fraction = 16;
1893 		while (fraction >= 4) {
1894 			order = slab_order(size, min_objects,
1895 						slub_max_order, fraction);
1896 			if (order <= slub_max_order)
1897 				return order;
1898 			fraction /= 2;
1899 		}
1900 		min_objects --;
1901 	}
1902 
1903 	/*
1904 	 * We were unable to place multiple objects in a slab. Now
1905 	 * lets see if we can place a single object there.
1906 	 */
1907 	order = slab_order(size, 1, slub_max_order, 1);
1908 	if (order <= slub_max_order)
1909 		return order;
1910 
1911 	/*
1912 	 * Doh this slab cannot be placed using slub_max_order.
1913 	 */
1914 	order = slab_order(size, 1, MAX_ORDER, 1);
1915 	if (order < MAX_ORDER)
1916 		return order;
1917 	return -ENOSYS;
1918 }
1919 
1920 /*
1921  * Figure out what the alignment of the objects will be.
1922  */
1923 static unsigned long calculate_alignment(unsigned long flags,
1924 		unsigned long align, unsigned long size)
1925 {
1926 	/*
1927 	 * If the user wants hardware cache aligned objects then follow that
1928 	 * suggestion if the object is sufficiently large.
1929 	 *
1930 	 * The hardware cache alignment cannot override the specified
1931 	 * alignment though. If that is greater then use it.
1932 	 */
1933 	if (flags & SLAB_HWCACHE_ALIGN) {
1934 		unsigned long ralign = cache_line_size();
1935 		while (size <= ralign / 2)
1936 			ralign /= 2;
1937 		align = max(align, ralign);
1938 	}
1939 
1940 	if (align < ARCH_SLAB_MINALIGN)
1941 		align = ARCH_SLAB_MINALIGN;
1942 
1943 	return ALIGN(align, sizeof(void *));
1944 }
1945 
1946 static void init_kmem_cache_cpu(struct kmem_cache *s,
1947 			struct kmem_cache_cpu *c)
1948 {
1949 	c->page = NULL;
1950 	c->freelist = NULL;
1951 	c->node = 0;
1952 	c->offset = s->offset / sizeof(void *);
1953 	c->objsize = s->objsize;
1954 #ifdef CONFIG_SLUB_STATS
1955 	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1956 #endif
1957 }
1958 
1959 static void
1960 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1961 {
1962 	n->nr_partial = 0;
1963 	spin_lock_init(&n->list_lock);
1964 	INIT_LIST_HEAD(&n->partial);
1965 #ifdef CONFIG_SLUB_DEBUG
1966 	atomic_long_set(&n->nr_slabs, 0);
1967 	atomic_long_set(&n->total_objects, 0);
1968 	INIT_LIST_HEAD(&n->full);
1969 #endif
1970 }
1971 
1972 #ifdef CONFIG_SMP
1973 /*
1974  * Per cpu array for per cpu structures.
1975  *
1976  * The per cpu array places all kmem_cache_cpu structures from one processor
1977  * close together meaning that it becomes possible that multiple per cpu
1978  * structures are contained in one cacheline. This may be particularly
1979  * beneficial for the kmalloc caches.
1980  *
1981  * A desktop system typically has around 60-80 slabs. With 100 here we are
1982  * likely able to get per cpu structures for all caches from the array defined
1983  * here. We must be able to cover all kmalloc caches during bootstrap.
1984  *
1985  * If the per cpu array is exhausted then fall back to kmalloc
1986  * of individual cachelines. No sharing is possible then.
1987  */
1988 #define NR_KMEM_CACHE_CPU 100
1989 
1990 static DEFINE_PER_CPU(struct kmem_cache_cpu,
1991 				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1992 
1993 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1994 static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
1995 
1996 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1997 							int cpu, gfp_t flags)
1998 {
1999 	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
2000 
2001 	if (c)
2002 		per_cpu(kmem_cache_cpu_free, cpu) =
2003 				(void *)c->freelist;
2004 	else {
2005 		/* Table overflow: So allocate ourselves */
2006 		c = kmalloc_node(
2007 			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
2008 			flags, cpu_to_node(cpu));
2009 		if (!c)
2010 			return NULL;
2011 	}
2012 
2013 	init_kmem_cache_cpu(s, c);
2014 	return c;
2015 }
2016 
2017 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
2018 {
2019 	if (c < per_cpu(kmem_cache_cpu, cpu) ||
2020 			c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
2021 		kfree(c);
2022 		return;
2023 	}
2024 	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
2025 	per_cpu(kmem_cache_cpu_free, cpu) = c;
2026 }
2027 
2028 static void free_kmem_cache_cpus(struct kmem_cache *s)
2029 {
2030 	int cpu;
2031 
2032 	for_each_online_cpu(cpu) {
2033 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2034 
2035 		if (c) {
2036 			s->cpu_slab[cpu] = NULL;
2037 			free_kmem_cache_cpu(c, cpu);
2038 		}
2039 	}
2040 }
2041 
2042 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2043 {
2044 	int cpu;
2045 
2046 	for_each_online_cpu(cpu) {
2047 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2048 
2049 		if (c)
2050 			continue;
2051 
2052 		c = alloc_kmem_cache_cpu(s, cpu, flags);
2053 		if (!c) {
2054 			free_kmem_cache_cpus(s);
2055 			return 0;
2056 		}
2057 		s->cpu_slab[cpu] = c;
2058 	}
2059 	return 1;
2060 }
2061 
2062 /*
2063  * Initialize the per cpu array.
2064  */
2065 static void init_alloc_cpu_cpu(int cpu)
2066 {
2067 	int i;
2068 
2069 	if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2070 		return;
2071 
2072 	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2073 		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2074 
2075 	cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2076 }
2077 
2078 static void __init init_alloc_cpu(void)
2079 {
2080 	int cpu;
2081 
2082 	for_each_online_cpu(cpu)
2083 		init_alloc_cpu_cpu(cpu);
2084   }
2085 
2086 #else
2087 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2088 static inline void init_alloc_cpu(void) {}
2089 
2090 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2091 {
2092 	init_kmem_cache_cpu(s, &s->cpu_slab);
2093 	return 1;
2094 }
2095 #endif
2096 
2097 #ifdef CONFIG_NUMA
2098 /*
2099  * No kmalloc_node yet so do it by hand. We know that this is the first
2100  * slab on the node for this slabcache. There are no concurrent accesses
2101  * possible.
2102  *
2103  * Note that this function only works on the kmalloc_node_cache
2104  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2105  * memory on a fresh node that has no slab structures yet.
2106  */
2107 static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2108 {
2109 	struct page *page;
2110 	struct kmem_cache_node *n;
2111 	unsigned long flags;
2112 
2113 	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2114 
2115 	page = new_slab(kmalloc_caches, gfpflags, node);
2116 
2117 	BUG_ON(!page);
2118 	if (page_to_nid(page) != node) {
2119 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2120 				"node %d\n", node);
2121 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2122 				"in order to be able to continue\n");
2123 	}
2124 
2125 	n = page->freelist;
2126 	BUG_ON(!n);
2127 	page->freelist = get_freepointer(kmalloc_caches, n);
2128 	page->inuse++;
2129 	kmalloc_caches->node[node] = n;
2130 #ifdef CONFIG_SLUB_DEBUG
2131 	init_object(kmalloc_caches, n, 1);
2132 	init_tracking(kmalloc_caches, n);
2133 #endif
2134 	init_kmem_cache_node(n, kmalloc_caches);
2135 	inc_slabs_node(kmalloc_caches, node, page->objects);
2136 
2137 	/*
2138 	 * lockdep requires consistent irq usage for each lock
2139 	 * so even though there cannot be a race this early in
2140 	 * the boot sequence, we still disable irqs.
2141 	 */
2142 	local_irq_save(flags);
2143 	add_partial(n, page, 0);
2144 	local_irq_restore(flags);
2145 }
2146 
2147 static void free_kmem_cache_nodes(struct kmem_cache *s)
2148 {
2149 	int node;
2150 
2151 	for_each_node_state(node, N_NORMAL_MEMORY) {
2152 		struct kmem_cache_node *n = s->node[node];
2153 		if (n && n != &s->local_node)
2154 			kmem_cache_free(kmalloc_caches, n);
2155 		s->node[node] = NULL;
2156 	}
2157 }
2158 
2159 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2160 {
2161 	int node;
2162 	int local_node;
2163 
2164 	if (slab_state >= UP)
2165 		local_node = page_to_nid(virt_to_page(s));
2166 	else
2167 		local_node = 0;
2168 
2169 	for_each_node_state(node, N_NORMAL_MEMORY) {
2170 		struct kmem_cache_node *n;
2171 
2172 		if (local_node == node)
2173 			n = &s->local_node;
2174 		else {
2175 			if (slab_state == DOWN) {
2176 				early_kmem_cache_node_alloc(gfpflags, node);
2177 				continue;
2178 			}
2179 			n = kmem_cache_alloc_node(kmalloc_caches,
2180 							gfpflags, node);
2181 
2182 			if (!n) {
2183 				free_kmem_cache_nodes(s);
2184 				return 0;
2185 			}
2186 
2187 		}
2188 		s->node[node] = n;
2189 		init_kmem_cache_node(n, s);
2190 	}
2191 	return 1;
2192 }
2193 #else
2194 static void free_kmem_cache_nodes(struct kmem_cache *s)
2195 {
2196 }
2197 
2198 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2199 {
2200 	init_kmem_cache_node(&s->local_node, s);
2201 	return 1;
2202 }
2203 #endif
2204 
2205 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2206 {
2207 	if (min < MIN_PARTIAL)
2208 		min = MIN_PARTIAL;
2209 	else if (min > MAX_PARTIAL)
2210 		min = MAX_PARTIAL;
2211 	s->min_partial = min;
2212 }
2213 
2214 /*
2215  * calculate_sizes() determines the order and the distribution of data within
2216  * a slab object.
2217  */
2218 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2219 {
2220 	unsigned long flags = s->flags;
2221 	unsigned long size = s->objsize;
2222 	unsigned long align = s->align;
2223 	int order;
2224 
2225 	/*
2226 	 * Round up object size to the next word boundary. We can only
2227 	 * place the free pointer at word boundaries and this determines
2228 	 * the possible location of the free pointer.
2229 	 */
2230 	size = ALIGN(size, sizeof(void *));
2231 
2232 #ifdef CONFIG_SLUB_DEBUG
2233 	/*
2234 	 * Determine if we can poison the object itself. If the user of
2235 	 * the slab may touch the object after free or before allocation
2236 	 * then we should never poison the object itself.
2237 	 */
2238 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2239 			!s->ctor)
2240 		s->flags |= __OBJECT_POISON;
2241 	else
2242 		s->flags &= ~__OBJECT_POISON;
2243 
2244 
2245 	/*
2246 	 * If we are Redzoning then check if there is some space between the
2247 	 * end of the object and the free pointer. If not then add an
2248 	 * additional word to have some bytes to store Redzone information.
2249 	 */
2250 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2251 		size += sizeof(void *);
2252 #endif
2253 
2254 	/*
2255 	 * With that we have determined the number of bytes in actual use
2256 	 * by the object. This is the potential offset to the free pointer.
2257 	 */
2258 	s->inuse = size;
2259 
2260 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2261 		s->ctor)) {
2262 		/*
2263 		 * Relocate free pointer after the object if it is not
2264 		 * permitted to overwrite the first word of the object on
2265 		 * kmem_cache_free.
2266 		 *
2267 		 * This is the case if we do RCU, have a constructor or
2268 		 * destructor or are poisoning the objects.
2269 		 */
2270 		s->offset = size;
2271 		size += sizeof(void *);
2272 	}
2273 
2274 #ifdef CONFIG_SLUB_DEBUG
2275 	if (flags & SLAB_STORE_USER)
2276 		/*
2277 		 * Need to store information about allocs and frees after
2278 		 * the object.
2279 		 */
2280 		size += 2 * sizeof(struct track);
2281 
2282 	if (flags & SLAB_RED_ZONE)
2283 		/*
2284 		 * Add some empty padding so that we can catch
2285 		 * overwrites from earlier objects rather than let
2286 		 * tracking information or the free pointer be
2287 		 * corrupted if a user writes before the start
2288 		 * of the object.
2289 		 */
2290 		size += sizeof(void *);
2291 #endif
2292 
2293 	/*
2294 	 * Determine the alignment based on various parameters that the
2295 	 * user specified and the dynamic determination of cache line size
2296 	 * on bootup.
2297 	 */
2298 	align = calculate_alignment(flags, align, s->objsize);
2299 
2300 	/*
2301 	 * SLUB stores one object immediately after another beginning from
2302 	 * offset 0. In order to align the objects we have to simply size
2303 	 * each object to conform to the alignment.
2304 	 */
2305 	size = ALIGN(size, align);
2306 	s->size = size;
2307 	if (forced_order >= 0)
2308 		order = forced_order;
2309 	else
2310 		order = calculate_order(size);
2311 
2312 	if (order < 0)
2313 		return 0;
2314 
2315 	s->allocflags = 0;
2316 	if (order)
2317 		s->allocflags |= __GFP_COMP;
2318 
2319 	if (s->flags & SLAB_CACHE_DMA)
2320 		s->allocflags |= SLUB_DMA;
2321 
2322 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2323 		s->allocflags |= __GFP_RECLAIMABLE;
2324 
2325 	/*
2326 	 * Determine the number of objects per slab
2327 	 */
2328 	s->oo = oo_make(order, size);
2329 	s->min = oo_make(get_order(size), size);
2330 	if (oo_objects(s->oo) > oo_objects(s->max))
2331 		s->max = s->oo;
2332 
2333 	return !!oo_objects(s->oo);
2334 
2335 }
2336 
2337 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2338 		const char *name, size_t size,
2339 		size_t align, unsigned long flags,
2340 		void (*ctor)(void *))
2341 {
2342 	memset(s, 0, kmem_size);
2343 	s->name = name;
2344 	s->ctor = ctor;
2345 	s->objsize = size;
2346 	s->align = align;
2347 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2348 
2349 	if (!calculate_sizes(s, -1))
2350 		goto error;
2351 
2352 	/*
2353 	 * The larger the object size is, the more pages we want on the partial
2354 	 * list to avoid pounding the page allocator excessively.
2355 	 */
2356 	set_min_partial(s, ilog2(s->size));
2357 	s->refcount = 1;
2358 #ifdef CONFIG_NUMA
2359 	s->remote_node_defrag_ratio = 1000;
2360 #endif
2361 	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2362 		goto error;
2363 
2364 	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2365 		return 1;
2366 	free_kmem_cache_nodes(s);
2367 error:
2368 	if (flags & SLAB_PANIC)
2369 		panic("Cannot create slab %s size=%lu realsize=%u "
2370 			"order=%u offset=%u flags=%lx\n",
2371 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2372 			s->offset, flags);
2373 	return 0;
2374 }
2375 
2376 /*
2377  * Check if a given pointer is valid
2378  */
2379 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2380 {
2381 	struct page *page;
2382 
2383 	page = get_object_page(object);
2384 
2385 	if (!page || s != page->slab)
2386 		/* No slab or wrong slab */
2387 		return 0;
2388 
2389 	if (!check_valid_pointer(s, page, object))
2390 		return 0;
2391 
2392 	/*
2393 	 * We could also check if the object is on the slabs freelist.
2394 	 * But this would be too expensive and it seems that the main
2395 	 * purpose of kmem_ptr_valid() is to check if the object belongs
2396 	 * to a certain slab.
2397 	 */
2398 	return 1;
2399 }
2400 EXPORT_SYMBOL(kmem_ptr_validate);
2401 
2402 /*
2403  * Determine the size of a slab object
2404  */
2405 unsigned int kmem_cache_size(struct kmem_cache *s)
2406 {
2407 	return s->objsize;
2408 }
2409 EXPORT_SYMBOL(kmem_cache_size);
2410 
2411 const char *kmem_cache_name(struct kmem_cache *s)
2412 {
2413 	return s->name;
2414 }
2415 EXPORT_SYMBOL(kmem_cache_name);
2416 
2417 static void list_slab_objects(struct kmem_cache *s, struct page *page,
2418 							const char *text)
2419 {
2420 #ifdef CONFIG_SLUB_DEBUG
2421 	void *addr = page_address(page);
2422 	void *p;
2423 	DECLARE_BITMAP(map, page->objects);
2424 
2425 	bitmap_zero(map, page->objects);
2426 	slab_err(s, page, "%s", text);
2427 	slab_lock(page);
2428 	for_each_free_object(p, s, page->freelist)
2429 		set_bit(slab_index(p, s, addr), map);
2430 
2431 	for_each_object(p, s, addr, page->objects) {
2432 
2433 		if (!test_bit(slab_index(p, s, addr), map)) {
2434 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2435 							p, p - addr);
2436 			print_tracking(s, p);
2437 		}
2438 	}
2439 	slab_unlock(page);
2440 #endif
2441 }
2442 
2443 /*
2444  * Attempt to free all partial slabs on a node.
2445  */
2446 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2447 {
2448 	unsigned long flags;
2449 	struct page *page, *h;
2450 
2451 	spin_lock_irqsave(&n->list_lock, flags);
2452 	list_for_each_entry_safe(page, h, &n->partial, lru) {
2453 		if (!page->inuse) {
2454 			list_del(&page->lru);
2455 			discard_slab(s, page);
2456 			n->nr_partial--;
2457 		} else {
2458 			list_slab_objects(s, page,
2459 				"Objects remaining on kmem_cache_close()");
2460 		}
2461 	}
2462 	spin_unlock_irqrestore(&n->list_lock, flags);
2463 }
2464 
2465 /*
2466  * Release all resources used by a slab cache.
2467  */
2468 static inline int kmem_cache_close(struct kmem_cache *s)
2469 {
2470 	int node;
2471 
2472 	flush_all(s);
2473 
2474 	/* Attempt to free all objects */
2475 	free_kmem_cache_cpus(s);
2476 	for_each_node_state(node, N_NORMAL_MEMORY) {
2477 		struct kmem_cache_node *n = get_node(s, node);
2478 
2479 		free_partial(s, n);
2480 		if (n->nr_partial || slabs_node(s, node))
2481 			return 1;
2482 	}
2483 	free_kmem_cache_nodes(s);
2484 	return 0;
2485 }
2486 
2487 /*
2488  * Close a cache and release the kmem_cache structure
2489  * (must be used for caches created using kmem_cache_create)
2490  */
2491 void kmem_cache_destroy(struct kmem_cache *s)
2492 {
2493 	down_write(&slub_lock);
2494 	s->refcount--;
2495 	if (!s->refcount) {
2496 		list_del(&s->list);
2497 		up_write(&slub_lock);
2498 		if (kmem_cache_close(s)) {
2499 			printk(KERN_ERR "SLUB %s: %s called for cache that "
2500 				"still has objects.\n", s->name, __func__);
2501 			dump_stack();
2502 		}
2503 		sysfs_slab_remove(s);
2504 	} else
2505 		up_write(&slub_lock);
2506 }
2507 EXPORT_SYMBOL(kmem_cache_destroy);
2508 
2509 /********************************************************************
2510  *		Kmalloc subsystem
2511  *******************************************************************/
2512 
2513 struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2514 EXPORT_SYMBOL(kmalloc_caches);
2515 
2516 static int __init setup_slub_min_order(char *str)
2517 {
2518 	get_option(&str, &slub_min_order);
2519 
2520 	return 1;
2521 }
2522 
2523 __setup("slub_min_order=", setup_slub_min_order);
2524 
2525 static int __init setup_slub_max_order(char *str)
2526 {
2527 	get_option(&str, &slub_max_order);
2528 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2529 
2530 	return 1;
2531 }
2532 
2533 __setup("slub_max_order=", setup_slub_max_order);
2534 
2535 static int __init setup_slub_min_objects(char *str)
2536 {
2537 	get_option(&str, &slub_min_objects);
2538 
2539 	return 1;
2540 }
2541 
2542 __setup("slub_min_objects=", setup_slub_min_objects);
2543 
2544 static int __init setup_slub_nomerge(char *str)
2545 {
2546 	slub_nomerge = 1;
2547 	return 1;
2548 }
2549 
2550 __setup("slub_nomerge", setup_slub_nomerge);
2551 
2552 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2553 		const char *name, int size, gfp_t gfp_flags)
2554 {
2555 	unsigned int flags = 0;
2556 
2557 	if (gfp_flags & SLUB_DMA)
2558 		flags = SLAB_CACHE_DMA;
2559 
2560 	down_write(&slub_lock);
2561 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2562 								flags, NULL))
2563 		goto panic;
2564 
2565 	list_add(&s->list, &slab_caches);
2566 	up_write(&slub_lock);
2567 	if (sysfs_slab_add(s))
2568 		goto panic;
2569 	return s;
2570 
2571 panic:
2572 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2573 }
2574 
2575 #ifdef CONFIG_ZONE_DMA
2576 static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2577 
2578 static void sysfs_add_func(struct work_struct *w)
2579 {
2580 	struct kmem_cache *s;
2581 
2582 	down_write(&slub_lock);
2583 	list_for_each_entry(s, &slab_caches, list) {
2584 		if (s->flags & __SYSFS_ADD_DEFERRED) {
2585 			s->flags &= ~__SYSFS_ADD_DEFERRED;
2586 			sysfs_slab_add(s);
2587 		}
2588 	}
2589 	up_write(&slub_lock);
2590 }
2591 
2592 static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2593 
2594 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2595 {
2596 	struct kmem_cache *s;
2597 	char *text;
2598 	size_t realsize;
2599 
2600 	s = kmalloc_caches_dma[index];
2601 	if (s)
2602 		return s;
2603 
2604 	/* Dynamically create dma cache */
2605 	if (flags & __GFP_WAIT)
2606 		down_write(&slub_lock);
2607 	else {
2608 		if (!down_write_trylock(&slub_lock))
2609 			goto out;
2610 	}
2611 
2612 	if (kmalloc_caches_dma[index])
2613 		goto unlock_out;
2614 
2615 	realsize = kmalloc_caches[index].objsize;
2616 	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2617 			 (unsigned int)realsize);
2618 	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2619 
2620 	if (!s || !text || !kmem_cache_open(s, flags, text,
2621 			realsize, ARCH_KMALLOC_MINALIGN,
2622 			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2623 		kfree(s);
2624 		kfree(text);
2625 		goto unlock_out;
2626 	}
2627 
2628 	list_add(&s->list, &slab_caches);
2629 	kmalloc_caches_dma[index] = s;
2630 
2631 	schedule_work(&sysfs_add_work);
2632 
2633 unlock_out:
2634 	up_write(&slub_lock);
2635 out:
2636 	return kmalloc_caches_dma[index];
2637 }
2638 #endif
2639 
2640 /*
2641  * Conversion table for small slabs sizes / 8 to the index in the
2642  * kmalloc array. This is necessary for slabs < 192 since we have non power
2643  * of two cache sizes there. The size of larger slabs can be determined using
2644  * fls.
2645  */
2646 static s8 size_index[24] = {
2647 	3,	/* 8 */
2648 	4,	/* 16 */
2649 	5,	/* 24 */
2650 	5,	/* 32 */
2651 	6,	/* 40 */
2652 	6,	/* 48 */
2653 	6,	/* 56 */
2654 	6,	/* 64 */
2655 	1,	/* 72 */
2656 	1,	/* 80 */
2657 	1,	/* 88 */
2658 	1,	/* 96 */
2659 	7,	/* 104 */
2660 	7,	/* 112 */
2661 	7,	/* 120 */
2662 	7,	/* 128 */
2663 	2,	/* 136 */
2664 	2,	/* 144 */
2665 	2,	/* 152 */
2666 	2,	/* 160 */
2667 	2,	/* 168 */
2668 	2,	/* 176 */
2669 	2,	/* 184 */
2670 	2	/* 192 */
2671 };
2672 
2673 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2674 {
2675 	int index;
2676 
2677 	if (size <= 192) {
2678 		if (!size)
2679 			return ZERO_SIZE_PTR;
2680 
2681 		index = size_index[(size - 1) / 8];
2682 	} else
2683 		index = fls(size - 1);
2684 
2685 #ifdef CONFIG_ZONE_DMA
2686 	if (unlikely((flags & SLUB_DMA)))
2687 		return dma_kmalloc_cache(index, flags);
2688 
2689 #endif
2690 	return &kmalloc_caches[index];
2691 }
2692 
2693 void *__kmalloc(size_t size, gfp_t flags)
2694 {
2695 	struct kmem_cache *s;
2696 	void *ret;
2697 
2698 	if (unlikely(size > SLUB_MAX_SIZE))
2699 		return kmalloc_large(size, flags);
2700 
2701 	s = get_slab(size, flags);
2702 
2703 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2704 		return s;
2705 
2706 	ret = slab_alloc(s, flags, -1, _RET_IP_);
2707 
2708 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2709 
2710 	return ret;
2711 }
2712 EXPORT_SYMBOL(__kmalloc);
2713 
2714 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2715 {
2716 	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2717 						get_order(size));
2718 
2719 	if (page)
2720 		return page_address(page);
2721 	else
2722 		return NULL;
2723 }
2724 
2725 #ifdef CONFIG_NUMA
2726 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2727 {
2728 	struct kmem_cache *s;
2729 	void *ret;
2730 
2731 	if (unlikely(size > SLUB_MAX_SIZE)) {
2732 		ret = kmalloc_large_node(size, flags, node);
2733 
2734 		trace_kmalloc_node(_RET_IP_, ret,
2735 				   size, PAGE_SIZE << get_order(size),
2736 				   flags, node);
2737 
2738 		return ret;
2739 	}
2740 
2741 	s = get_slab(size, flags);
2742 
2743 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2744 		return s;
2745 
2746 	ret = slab_alloc(s, flags, node, _RET_IP_);
2747 
2748 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2749 
2750 	return ret;
2751 }
2752 EXPORT_SYMBOL(__kmalloc_node);
2753 #endif
2754 
2755 size_t ksize(const void *object)
2756 {
2757 	struct page *page;
2758 	struct kmem_cache *s;
2759 
2760 	if (unlikely(object == ZERO_SIZE_PTR))
2761 		return 0;
2762 
2763 	page = virt_to_head_page(object);
2764 
2765 	if (unlikely(!PageSlab(page))) {
2766 		WARN_ON(!PageCompound(page));
2767 		return PAGE_SIZE << compound_order(page);
2768 	}
2769 	s = page->slab;
2770 
2771 #ifdef CONFIG_SLUB_DEBUG
2772 	/*
2773 	 * Debugging requires use of the padding between object
2774 	 * and whatever may come after it.
2775 	 */
2776 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2777 		return s->objsize;
2778 
2779 #endif
2780 	/*
2781 	 * If we have the need to store the freelist pointer
2782 	 * back there or track user information then we can
2783 	 * only use the space before that information.
2784 	 */
2785 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2786 		return s->inuse;
2787 	/*
2788 	 * Else we can use all the padding etc for the allocation
2789 	 */
2790 	return s->size;
2791 }
2792 EXPORT_SYMBOL(ksize);
2793 
2794 void kfree(const void *x)
2795 {
2796 	struct page *page;
2797 	void *object = (void *)x;
2798 
2799 	trace_kfree(_RET_IP_, x);
2800 
2801 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2802 		return;
2803 
2804 	page = virt_to_head_page(x);
2805 	if (unlikely(!PageSlab(page))) {
2806 		BUG_ON(!PageCompound(page));
2807 		put_page(page);
2808 		return;
2809 	}
2810 	slab_free(page->slab, page, object, _RET_IP_);
2811 }
2812 EXPORT_SYMBOL(kfree);
2813 
2814 /*
2815  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2816  * the remaining slabs by the number of items in use. The slabs with the
2817  * most items in use come first. New allocations will then fill those up
2818  * and thus they can be removed from the partial lists.
2819  *
2820  * The slabs with the least items are placed last. This results in them
2821  * being allocated from last increasing the chance that the last objects
2822  * are freed in them.
2823  */
2824 int kmem_cache_shrink(struct kmem_cache *s)
2825 {
2826 	int node;
2827 	int i;
2828 	struct kmem_cache_node *n;
2829 	struct page *page;
2830 	struct page *t;
2831 	int objects = oo_objects(s->max);
2832 	struct list_head *slabs_by_inuse =
2833 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2834 	unsigned long flags;
2835 
2836 	if (!slabs_by_inuse)
2837 		return -ENOMEM;
2838 
2839 	flush_all(s);
2840 	for_each_node_state(node, N_NORMAL_MEMORY) {
2841 		n = get_node(s, node);
2842 
2843 		if (!n->nr_partial)
2844 			continue;
2845 
2846 		for (i = 0; i < objects; i++)
2847 			INIT_LIST_HEAD(slabs_by_inuse + i);
2848 
2849 		spin_lock_irqsave(&n->list_lock, flags);
2850 
2851 		/*
2852 		 * Build lists indexed by the items in use in each slab.
2853 		 *
2854 		 * Note that concurrent frees may occur while we hold the
2855 		 * list_lock. page->inuse here is the upper limit.
2856 		 */
2857 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2858 			if (!page->inuse && slab_trylock(page)) {
2859 				/*
2860 				 * Must hold slab lock here because slab_free
2861 				 * may have freed the last object and be
2862 				 * waiting to release the slab.
2863 				 */
2864 				list_del(&page->lru);
2865 				n->nr_partial--;
2866 				slab_unlock(page);
2867 				discard_slab(s, page);
2868 			} else {
2869 				list_move(&page->lru,
2870 				slabs_by_inuse + page->inuse);
2871 			}
2872 		}
2873 
2874 		/*
2875 		 * Rebuild the partial list with the slabs filled up most
2876 		 * first and the least used slabs at the end.
2877 		 */
2878 		for (i = objects - 1; i >= 0; i--)
2879 			list_splice(slabs_by_inuse + i, n->partial.prev);
2880 
2881 		spin_unlock_irqrestore(&n->list_lock, flags);
2882 	}
2883 
2884 	kfree(slabs_by_inuse);
2885 	return 0;
2886 }
2887 EXPORT_SYMBOL(kmem_cache_shrink);
2888 
2889 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2890 static int slab_mem_going_offline_callback(void *arg)
2891 {
2892 	struct kmem_cache *s;
2893 
2894 	down_read(&slub_lock);
2895 	list_for_each_entry(s, &slab_caches, list)
2896 		kmem_cache_shrink(s);
2897 	up_read(&slub_lock);
2898 
2899 	return 0;
2900 }
2901 
2902 static void slab_mem_offline_callback(void *arg)
2903 {
2904 	struct kmem_cache_node *n;
2905 	struct kmem_cache *s;
2906 	struct memory_notify *marg = arg;
2907 	int offline_node;
2908 
2909 	offline_node = marg->status_change_nid;
2910 
2911 	/*
2912 	 * If the node still has available memory. we need kmem_cache_node
2913 	 * for it yet.
2914 	 */
2915 	if (offline_node < 0)
2916 		return;
2917 
2918 	down_read(&slub_lock);
2919 	list_for_each_entry(s, &slab_caches, list) {
2920 		n = get_node(s, offline_node);
2921 		if (n) {
2922 			/*
2923 			 * if n->nr_slabs > 0, slabs still exist on the node
2924 			 * that is going down. We were unable to free them,
2925 			 * and offline_pages() function shoudn't call this
2926 			 * callback. So, we must fail.
2927 			 */
2928 			BUG_ON(slabs_node(s, offline_node));
2929 
2930 			s->node[offline_node] = NULL;
2931 			kmem_cache_free(kmalloc_caches, n);
2932 		}
2933 	}
2934 	up_read(&slub_lock);
2935 }
2936 
2937 static int slab_mem_going_online_callback(void *arg)
2938 {
2939 	struct kmem_cache_node *n;
2940 	struct kmem_cache *s;
2941 	struct memory_notify *marg = arg;
2942 	int nid = marg->status_change_nid;
2943 	int ret = 0;
2944 
2945 	/*
2946 	 * If the node's memory is already available, then kmem_cache_node is
2947 	 * already created. Nothing to do.
2948 	 */
2949 	if (nid < 0)
2950 		return 0;
2951 
2952 	/*
2953 	 * We are bringing a node online. No memory is available yet. We must
2954 	 * allocate a kmem_cache_node structure in order to bring the node
2955 	 * online.
2956 	 */
2957 	down_read(&slub_lock);
2958 	list_for_each_entry(s, &slab_caches, list) {
2959 		/*
2960 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2961 		 *      since memory is not yet available from the node that
2962 		 *      is brought up.
2963 		 */
2964 		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2965 		if (!n) {
2966 			ret = -ENOMEM;
2967 			goto out;
2968 		}
2969 		init_kmem_cache_node(n, s);
2970 		s->node[nid] = n;
2971 	}
2972 out:
2973 	up_read(&slub_lock);
2974 	return ret;
2975 }
2976 
2977 static int slab_memory_callback(struct notifier_block *self,
2978 				unsigned long action, void *arg)
2979 {
2980 	int ret = 0;
2981 
2982 	switch (action) {
2983 	case MEM_GOING_ONLINE:
2984 		ret = slab_mem_going_online_callback(arg);
2985 		break;
2986 	case MEM_GOING_OFFLINE:
2987 		ret = slab_mem_going_offline_callback(arg);
2988 		break;
2989 	case MEM_OFFLINE:
2990 	case MEM_CANCEL_ONLINE:
2991 		slab_mem_offline_callback(arg);
2992 		break;
2993 	case MEM_ONLINE:
2994 	case MEM_CANCEL_OFFLINE:
2995 		break;
2996 	}
2997 	if (ret)
2998 		ret = notifier_from_errno(ret);
2999 	else
3000 		ret = NOTIFY_OK;
3001 	return ret;
3002 }
3003 
3004 #endif /* CONFIG_MEMORY_HOTPLUG */
3005 
3006 /********************************************************************
3007  *			Basic setup of slabs
3008  *******************************************************************/
3009 
3010 void __init kmem_cache_init(void)
3011 {
3012 	int i;
3013 	int caches = 0;
3014 
3015 	init_alloc_cpu();
3016 
3017 #ifdef CONFIG_NUMA
3018 	/*
3019 	 * Must first have the slab cache available for the allocations of the
3020 	 * struct kmem_cache_node's. There is special bootstrap code in
3021 	 * kmem_cache_open for slab_state == DOWN.
3022 	 */
3023 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
3024 		sizeof(struct kmem_cache_node), GFP_KERNEL);
3025 	kmalloc_caches[0].refcount = -1;
3026 	caches++;
3027 
3028 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3029 #endif
3030 
3031 	/* Able to allocate the per node structures */
3032 	slab_state = PARTIAL;
3033 
3034 	/* Caches that are not of the two-to-the-power-of size */
3035 	if (KMALLOC_MIN_SIZE <= 64) {
3036 		create_kmalloc_cache(&kmalloc_caches[1],
3037 				"kmalloc-96", 96, GFP_KERNEL);
3038 		caches++;
3039 		create_kmalloc_cache(&kmalloc_caches[2],
3040 				"kmalloc-192", 192, GFP_KERNEL);
3041 		caches++;
3042 	}
3043 
3044 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3045 		create_kmalloc_cache(&kmalloc_caches[i],
3046 			"kmalloc", 1 << i, GFP_KERNEL);
3047 		caches++;
3048 	}
3049 
3050 
3051 	/*
3052 	 * Patch up the size_index table if we have strange large alignment
3053 	 * requirements for the kmalloc array. This is only the case for
3054 	 * MIPS it seems. The standard arches will not generate any code here.
3055 	 *
3056 	 * Largest permitted alignment is 256 bytes due to the way we
3057 	 * handle the index determination for the smaller caches.
3058 	 *
3059 	 * Make sure that nothing crazy happens if someone starts tinkering
3060 	 * around with ARCH_KMALLOC_MINALIGN
3061 	 */
3062 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3063 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3064 
3065 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3066 		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3067 
3068 	if (KMALLOC_MIN_SIZE == 128) {
3069 		/*
3070 		 * The 192 byte sized cache is not used if the alignment
3071 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3072 		 * instead.
3073 		 */
3074 		for (i = 128 + 8; i <= 192; i += 8)
3075 			size_index[(i - 1) / 8] = 8;
3076 	}
3077 
3078 	slab_state = UP;
3079 
3080 	/* Provide the correct kmalloc names now that the caches are up */
3081 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3082 		kmalloc_caches[i]. name =
3083 			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3084 
3085 #ifdef CONFIG_SMP
3086 	register_cpu_notifier(&slab_notifier);
3087 	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
3088 				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
3089 #else
3090 	kmem_size = sizeof(struct kmem_cache);
3091 #endif
3092 
3093 	printk(KERN_INFO
3094 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3095 		" CPUs=%d, Nodes=%d\n",
3096 		caches, cache_line_size(),
3097 		slub_min_order, slub_max_order, slub_min_objects,
3098 		nr_cpu_ids, nr_node_ids);
3099 }
3100 
3101 /*
3102  * Find a mergeable slab cache
3103  */
3104 static int slab_unmergeable(struct kmem_cache *s)
3105 {
3106 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3107 		return 1;
3108 
3109 	if (s->ctor)
3110 		return 1;
3111 
3112 	/*
3113 	 * We may have set a slab to be unmergeable during bootstrap.
3114 	 */
3115 	if (s->refcount < 0)
3116 		return 1;
3117 
3118 	return 0;
3119 }
3120 
3121 static struct kmem_cache *find_mergeable(size_t size,
3122 		size_t align, unsigned long flags, const char *name,
3123 		void (*ctor)(void *))
3124 {
3125 	struct kmem_cache *s;
3126 
3127 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3128 		return NULL;
3129 
3130 	if (ctor)
3131 		return NULL;
3132 
3133 	size = ALIGN(size, sizeof(void *));
3134 	align = calculate_alignment(flags, align, size);
3135 	size = ALIGN(size, align);
3136 	flags = kmem_cache_flags(size, flags, name, NULL);
3137 
3138 	list_for_each_entry(s, &slab_caches, list) {
3139 		if (slab_unmergeable(s))
3140 			continue;
3141 
3142 		if (size > s->size)
3143 			continue;
3144 
3145 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3146 				continue;
3147 		/*
3148 		 * Check if alignment is compatible.
3149 		 * Courtesy of Adrian Drzewiecki
3150 		 */
3151 		if ((s->size & ~(align - 1)) != s->size)
3152 			continue;
3153 
3154 		if (s->size - size >= sizeof(void *))
3155 			continue;
3156 
3157 		return s;
3158 	}
3159 	return NULL;
3160 }
3161 
3162 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3163 		size_t align, unsigned long flags, void (*ctor)(void *))
3164 {
3165 	struct kmem_cache *s;
3166 
3167 	down_write(&slub_lock);
3168 	s = find_mergeable(size, align, flags, name, ctor);
3169 	if (s) {
3170 		int cpu;
3171 
3172 		s->refcount++;
3173 		/*
3174 		 * Adjust the object sizes so that we clear
3175 		 * the complete object on kzalloc.
3176 		 */
3177 		s->objsize = max(s->objsize, (int)size);
3178 
3179 		/*
3180 		 * And then we need to update the object size in the
3181 		 * per cpu structures
3182 		 */
3183 		for_each_online_cpu(cpu)
3184 			get_cpu_slab(s, cpu)->objsize = s->objsize;
3185 
3186 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3187 		up_write(&slub_lock);
3188 
3189 		if (sysfs_slab_alias(s, name)) {
3190 			down_write(&slub_lock);
3191 			s->refcount--;
3192 			up_write(&slub_lock);
3193 			goto err;
3194 		}
3195 		return s;
3196 	}
3197 
3198 	s = kmalloc(kmem_size, GFP_KERNEL);
3199 	if (s) {
3200 		if (kmem_cache_open(s, GFP_KERNEL, name,
3201 				size, align, flags, ctor)) {
3202 			list_add(&s->list, &slab_caches);
3203 			up_write(&slub_lock);
3204 			if (sysfs_slab_add(s)) {
3205 				down_write(&slub_lock);
3206 				list_del(&s->list);
3207 				up_write(&slub_lock);
3208 				kfree(s);
3209 				goto err;
3210 			}
3211 			return s;
3212 		}
3213 		kfree(s);
3214 	}
3215 	up_write(&slub_lock);
3216 
3217 err:
3218 	if (flags & SLAB_PANIC)
3219 		panic("Cannot create slabcache %s\n", name);
3220 	else
3221 		s = NULL;
3222 	return s;
3223 }
3224 EXPORT_SYMBOL(kmem_cache_create);
3225 
3226 #ifdef CONFIG_SMP
3227 /*
3228  * Use the cpu notifier to insure that the cpu slabs are flushed when
3229  * necessary.
3230  */
3231 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3232 		unsigned long action, void *hcpu)
3233 {
3234 	long cpu = (long)hcpu;
3235 	struct kmem_cache *s;
3236 	unsigned long flags;
3237 
3238 	switch (action) {
3239 	case CPU_UP_PREPARE:
3240 	case CPU_UP_PREPARE_FROZEN:
3241 		init_alloc_cpu_cpu(cpu);
3242 		down_read(&slub_lock);
3243 		list_for_each_entry(s, &slab_caches, list)
3244 			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3245 							GFP_KERNEL);
3246 		up_read(&slub_lock);
3247 		break;
3248 
3249 	case CPU_UP_CANCELED:
3250 	case CPU_UP_CANCELED_FROZEN:
3251 	case CPU_DEAD:
3252 	case CPU_DEAD_FROZEN:
3253 		down_read(&slub_lock);
3254 		list_for_each_entry(s, &slab_caches, list) {
3255 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3256 
3257 			local_irq_save(flags);
3258 			__flush_cpu_slab(s, cpu);
3259 			local_irq_restore(flags);
3260 			free_kmem_cache_cpu(c, cpu);
3261 			s->cpu_slab[cpu] = NULL;
3262 		}
3263 		up_read(&slub_lock);
3264 		break;
3265 	default:
3266 		break;
3267 	}
3268 	return NOTIFY_OK;
3269 }
3270 
3271 static struct notifier_block __cpuinitdata slab_notifier = {
3272 	.notifier_call = slab_cpuup_callback
3273 };
3274 
3275 #endif
3276 
3277 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3278 {
3279 	struct kmem_cache *s;
3280 	void *ret;
3281 
3282 	if (unlikely(size > SLUB_MAX_SIZE))
3283 		return kmalloc_large(size, gfpflags);
3284 
3285 	s = get_slab(size, gfpflags);
3286 
3287 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3288 		return s;
3289 
3290 	ret = slab_alloc(s, gfpflags, -1, caller);
3291 
3292 	/* Honor the call site pointer we recieved. */
3293 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3294 
3295 	return ret;
3296 }
3297 
3298 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3299 					int node, unsigned long caller)
3300 {
3301 	struct kmem_cache *s;
3302 	void *ret;
3303 
3304 	if (unlikely(size > SLUB_MAX_SIZE))
3305 		return kmalloc_large_node(size, gfpflags, node);
3306 
3307 	s = get_slab(size, gfpflags);
3308 
3309 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3310 		return s;
3311 
3312 	ret = slab_alloc(s, gfpflags, node, caller);
3313 
3314 	/* Honor the call site pointer we recieved. */
3315 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3316 
3317 	return ret;
3318 }
3319 
3320 #ifdef CONFIG_SLUB_DEBUG
3321 static unsigned long count_partial(struct kmem_cache_node *n,
3322 					int (*get_count)(struct page *))
3323 {
3324 	unsigned long flags;
3325 	unsigned long x = 0;
3326 	struct page *page;
3327 
3328 	spin_lock_irqsave(&n->list_lock, flags);
3329 	list_for_each_entry(page, &n->partial, lru)
3330 		x += get_count(page);
3331 	spin_unlock_irqrestore(&n->list_lock, flags);
3332 	return x;
3333 }
3334 
3335 static int count_inuse(struct page *page)
3336 {
3337 	return page->inuse;
3338 }
3339 
3340 static int count_total(struct page *page)
3341 {
3342 	return page->objects;
3343 }
3344 
3345 static int count_free(struct page *page)
3346 {
3347 	return page->objects - page->inuse;
3348 }
3349 
3350 static int validate_slab(struct kmem_cache *s, struct page *page,
3351 						unsigned long *map)
3352 {
3353 	void *p;
3354 	void *addr = page_address(page);
3355 
3356 	if (!check_slab(s, page) ||
3357 			!on_freelist(s, page, NULL))
3358 		return 0;
3359 
3360 	/* Now we know that a valid freelist exists */
3361 	bitmap_zero(map, page->objects);
3362 
3363 	for_each_free_object(p, s, page->freelist) {
3364 		set_bit(slab_index(p, s, addr), map);
3365 		if (!check_object(s, page, p, 0))
3366 			return 0;
3367 	}
3368 
3369 	for_each_object(p, s, addr, page->objects)
3370 		if (!test_bit(slab_index(p, s, addr), map))
3371 			if (!check_object(s, page, p, 1))
3372 				return 0;
3373 	return 1;
3374 }
3375 
3376 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3377 						unsigned long *map)
3378 {
3379 	if (slab_trylock(page)) {
3380 		validate_slab(s, page, map);
3381 		slab_unlock(page);
3382 	} else
3383 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3384 			s->name, page);
3385 
3386 	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3387 		if (!PageSlubDebug(page))
3388 			printk(KERN_ERR "SLUB %s: SlubDebug not set "
3389 				"on slab 0x%p\n", s->name, page);
3390 	} else {
3391 		if (PageSlubDebug(page))
3392 			printk(KERN_ERR "SLUB %s: SlubDebug set on "
3393 				"slab 0x%p\n", s->name, page);
3394 	}
3395 }
3396 
3397 static int validate_slab_node(struct kmem_cache *s,
3398 		struct kmem_cache_node *n, unsigned long *map)
3399 {
3400 	unsigned long count = 0;
3401 	struct page *page;
3402 	unsigned long flags;
3403 
3404 	spin_lock_irqsave(&n->list_lock, flags);
3405 
3406 	list_for_each_entry(page, &n->partial, lru) {
3407 		validate_slab_slab(s, page, map);
3408 		count++;
3409 	}
3410 	if (count != n->nr_partial)
3411 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3412 			"counter=%ld\n", s->name, count, n->nr_partial);
3413 
3414 	if (!(s->flags & SLAB_STORE_USER))
3415 		goto out;
3416 
3417 	list_for_each_entry(page, &n->full, lru) {
3418 		validate_slab_slab(s, page, map);
3419 		count++;
3420 	}
3421 	if (count != atomic_long_read(&n->nr_slabs))
3422 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3423 			"counter=%ld\n", s->name, count,
3424 			atomic_long_read(&n->nr_slabs));
3425 
3426 out:
3427 	spin_unlock_irqrestore(&n->list_lock, flags);
3428 	return count;
3429 }
3430 
3431 static long validate_slab_cache(struct kmem_cache *s)
3432 {
3433 	int node;
3434 	unsigned long count = 0;
3435 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3436 				sizeof(unsigned long), GFP_KERNEL);
3437 
3438 	if (!map)
3439 		return -ENOMEM;
3440 
3441 	flush_all(s);
3442 	for_each_node_state(node, N_NORMAL_MEMORY) {
3443 		struct kmem_cache_node *n = get_node(s, node);
3444 
3445 		count += validate_slab_node(s, n, map);
3446 	}
3447 	kfree(map);
3448 	return count;
3449 }
3450 
3451 #ifdef SLUB_RESILIENCY_TEST
3452 static void resiliency_test(void)
3453 {
3454 	u8 *p;
3455 
3456 	printk(KERN_ERR "SLUB resiliency testing\n");
3457 	printk(KERN_ERR "-----------------------\n");
3458 	printk(KERN_ERR "A. Corruption after allocation\n");
3459 
3460 	p = kzalloc(16, GFP_KERNEL);
3461 	p[16] = 0x12;
3462 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3463 			" 0x12->0x%p\n\n", p + 16);
3464 
3465 	validate_slab_cache(kmalloc_caches + 4);
3466 
3467 	/* Hmmm... The next two are dangerous */
3468 	p = kzalloc(32, GFP_KERNEL);
3469 	p[32 + sizeof(void *)] = 0x34;
3470 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3471 			" 0x34 -> -0x%p\n", p);
3472 	printk(KERN_ERR
3473 		"If allocated object is overwritten then not detectable\n\n");
3474 
3475 	validate_slab_cache(kmalloc_caches + 5);
3476 	p = kzalloc(64, GFP_KERNEL);
3477 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3478 	*p = 0x56;
3479 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3480 									p);
3481 	printk(KERN_ERR
3482 		"If allocated object is overwritten then not detectable\n\n");
3483 	validate_slab_cache(kmalloc_caches + 6);
3484 
3485 	printk(KERN_ERR "\nB. Corruption after free\n");
3486 	p = kzalloc(128, GFP_KERNEL);
3487 	kfree(p);
3488 	*p = 0x78;
3489 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3490 	validate_slab_cache(kmalloc_caches + 7);
3491 
3492 	p = kzalloc(256, GFP_KERNEL);
3493 	kfree(p);
3494 	p[50] = 0x9a;
3495 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3496 			p);
3497 	validate_slab_cache(kmalloc_caches + 8);
3498 
3499 	p = kzalloc(512, GFP_KERNEL);
3500 	kfree(p);
3501 	p[512] = 0xab;
3502 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3503 	validate_slab_cache(kmalloc_caches + 9);
3504 }
3505 #else
3506 static void resiliency_test(void) {};
3507 #endif
3508 
3509 /*
3510  * Generate lists of code addresses where slabcache objects are allocated
3511  * and freed.
3512  */
3513 
3514 struct location {
3515 	unsigned long count;
3516 	unsigned long addr;
3517 	long long sum_time;
3518 	long min_time;
3519 	long max_time;
3520 	long min_pid;
3521 	long max_pid;
3522 	DECLARE_BITMAP(cpus, NR_CPUS);
3523 	nodemask_t nodes;
3524 };
3525 
3526 struct loc_track {
3527 	unsigned long max;
3528 	unsigned long count;
3529 	struct location *loc;
3530 };
3531 
3532 static void free_loc_track(struct loc_track *t)
3533 {
3534 	if (t->max)
3535 		free_pages((unsigned long)t->loc,
3536 			get_order(sizeof(struct location) * t->max));
3537 }
3538 
3539 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3540 {
3541 	struct location *l;
3542 	int order;
3543 
3544 	order = get_order(sizeof(struct location) * max);
3545 
3546 	l = (void *)__get_free_pages(flags, order);
3547 	if (!l)
3548 		return 0;
3549 
3550 	if (t->count) {
3551 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3552 		free_loc_track(t);
3553 	}
3554 	t->max = max;
3555 	t->loc = l;
3556 	return 1;
3557 }
3558 
3559 static int add_location(struct loc_track *t, struct kmem_cache *s,
3560 				const struct track *track)
3561 {
3562 	long start, end, pos;
3563 	struct location *l;
3564 	unsigned long caddr;
3565 	unsigned long age = jiffies - track->when;
3566 
3567 	start = -1;
3568 	end = t->count;
3569 
3570 	for ( ; ; ) {
3571 		pos = start + (end - start + 1) / 2;
3572 
3573 		/*
3574 		 * There is nothing at "end". If we end up there
3575 		 * we need to add something to before end.
3576 		 */
3577 		if (pos == end)
3578 			break;
3579 
3580 		caddr = t->loc[pos].addr;
3581 		if (track->addr == caddr) {
3582 
3583 			l = &t->loc[pos];
3584 			l->count++;
3585 			if (track->when) {
3586 				l->sum_time += age;
3587 				if (age < l->min_time)
3588 					l->min_time = age;
3589 				if (age > l->max_time)
3590 					l->max_time = age;
3591 
3592 				if (track->pid < l->min_pid)
3593 					l->min_pid = track->pid;
3594 				if (track->pid > l->max_pid)
3595 					l->max_pid = track->pid;
3596 
3597 				cpumask_set_cpu(track->cpu,
3598 						to_cpumask(l->cpus));
3599 			}
3600 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3601 			return 1;
3602 		}
3603 
3604 		if (track->addr < caddr)
3605 			end = pos;
3606 		else
3607 			start = pos;
3608 	}
3609 
3610 	/*
3611 	 * Not found. Insert new tracking element.
3612 	 */
3613 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3614 		return 0;
3615 
3616 	l = t->loc + pos;
3617 	if (pos < t->count)
3618 		memmove(l + 1, l,
3619 			(t->count - pos) * sizeof(struct location));
3620 	t->count++;
3621 	l->count = 1;
3622 	l->addr = track->addr;
3623 	l->sum_time = age;
3624 	l->min_time = age;
3625 	l->max_time = age;
3626 	l->min_pid = track->pid;
3627 	l->max_pid = track->pid;
3628 	cpumask_clear(to_cpumask(l->cpus));
3629 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3630 	nodes_clear(l->nodes);
3631 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3632 	return 1;
3633 }
3634 
3635 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3636 		struct page *page, enum track_item alloc)
3637 {
3638 	void *addr = page_address(page);
3639 	DECLARE_BITMAP(map, page->objects);
3640 	void *p;
3641 
3642 	bitmap_zero(map, page->objects);
3643 	for_each_free_object(p, s, page->freelist)
3644 		set_bit(slab_index(p, s, addr), map);
3645 
3646 	for_each_object(p, s, addr, page->objects)
3647 		if (!test_bit(slab_index(p, s, addr), map))
3648 			add_location(t, s, get_track(s, p, alloc));
3649 }
3650 
3651 static int list_locations(struct kmem_cache *s, char *buf,
3652 					enum track_item alloc)
3653 {
3654 	int len = 0;
3655 	unsigned long i;
3656 	struct loc_track t = { 0, 0, NULL };
3657 	int node;
3658 
3659 	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3660 			GFP_TEMPORARY))
3661 		return sprintf(buf, "Out of memory\n");
3662 
3663 	/* Push back cpu slabs */
3664 	flush_all(s);
3665 
3666 	for_each_node_state(node, N_NORMAL_MEMORY) {
3667 		struct kmem_cache_node *n = get_node(s, node);
3668 		unsigned long flags;
3669 		struct page *page;
3670 
3671 		if (!atomic_long_read(&n->nr_slabs))
3672 			continue;
3673 
3674 		spin_lock_irqsave(&n->list_lock, flags);
3675 		list_for_each_entry(page, &n->partial, lru)
3676 			process_slab(&t, s, page, alloc);
3677 		list_for_each_entry(page, &n->full, lru)
3678 			process_slab(&t, s, page, alloc);
3679 		spin_unlock_irqrestore(&n->list_lock, flags);
3680 	}
3681 
3682 	for (i = 0; i < t.count; i++) {
3683 		struct location *l = &t.loc[i];
3684 
3685 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3686 			break;
3687 		len += sprintf(buf + len, "%7ld ", l->count);
3688 
3689 		if (l->addr)
3690 			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3691 		else
3692 			len += sprintf(buf + len, "<not-available>");
3693 
3694 		if (l->sum_time != l->min_time) {
3695 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3696 				l->min_time,
3697 				(long)div_u64(l->sum_time, l->count),
3698 				l->max_time);
3699 		} else
3700 			len += sprintf(buf + len, " age=%ld",
3701 				l->min_time);
3702 
3703 		if (l->min_pid != l->max_pid)
3704 			len += sprintf(buf + len, " pid=%ld-%ld",
3705 				l->min_pid, l->max_pid);
3706 		else
3707 			len += sprintf(buf + len, " pid=%ld",
3708 				l->min_pid);
3709 
3710 		if (num_online_cpus() > 1 &&
3711 				!cpumask_empty(to_cpumask(l->cpus)) &&
3712 				len < PAGE_SIZE - 60) {
3713 			len += sprintf(buf + len, " cpus=");
3714 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3715 						 to_cpumask(l->cpus));
3716 		}
3717 
3718 		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3719 				len < PAGE_SIZE - 60) {
3720 			len += sprintf(buf + len, " nodes=");
3721 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3722 					l->nodes);
3723 		}
3724 
3725 		len += sprintf(buf + len, "\n");
3726 	}
3727 
3728 	free_loc_track(&t);
3729 	if (!t.count)
3730 		len += sprintf(buf, "No data\n");
3731 	return len;
3732 }
3733 
3734 enum slab_stat_type {
3735 	SL_ALL,			/* All slabs */
3736 	SL_PARTIAL,		/* Only partially allocated slabs */
3737 	SL_CPU,			/* Only slabs used for cpu caches */
3738 	SL_OBJECTS,		/* Determine allocated objects not slabs */
3739 	SL_TOTAL		/* Determine object capacity not slabs */
3740 };
3741 
3742 #define SO_ALL		(1 << SL_ALL)
3743 #define SO_PARTIAL	(1 << SL_PARTIAL)
3744 #define SO_CPU		(1 << SL_CPU)
3745 #define SO_OBJECTS	(1 << SL_OBJECTS)
3746 #define SO_TOTAL	(1 << SL_TOTAL)
3747 
3748 static ssize_t show_slab_objects(struct kmem_cache *s,
3749 			    char *buf, unsigned long flags)
3750 {
3751 	unsigned long total = 0;
3752 	int node;
3753 	int x;
3754 	unsigned long *nodes;
3755 	unsigned long *per_cpu;
3756 
3757 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3758 	if (!nodes)
3759 		return -ENOMEM;
3760 	per_cpu = nodes + nr_node_ids;
3761 
3762 	if (flags & SO_CPU) {
3763 		int cpu;
3764 
3765 		for_each_possible_cpu(cpu) {
3766 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3767 
3768 			if (!c || c->node < 0)
3769 				continue;
3770 
3771 			if (c->page) {
3772 					if (flags & SO_TOTAL)
3773 						x = c->page->objects;
3774 				else if (flags & SO_OBJECTS)
3775 					x = c->page->inuse;
3776 				else
3777 					x = 1;
3778 
3779 				total += x;
3780 				nodes[c->node] += x;
3781 			}
3782 			per_cpu[c->node]++;
3783 		}
3784 	}
3785 
3786 	if (flags & SO_ALL) {
3787 		for_each_node_state(node, N_NORMAL_MEMORY) {
3788 			struct kmem_cache_node *n = get_node(s, node);
3789 
3790 		if (flags & SO_TOTAL)
3791 			x = atomic_long_read(&n->total_objects);
3792 		else if (flags & SO_OBJECTS)
3793 			x = atomic_long_read(&n->total_objects) -
3794 				count_partial(n, count_free);
3795 
3796 			else
3797 				x = atomic_long_read(&n->nr_slabs);
3798 			total += x;
3799 			nodes[node] += x;
3800 		}
3801 
3802 	} else if (flags & SO_PARTIAL) {
3803 		for_each_node_state(node, N_NORMAL_MEMORY) {
3804 			struct kmem_cache_node *n = get_node(s, node);
3805 
3806 			if (flags & SO_TOTAL)
3807 				x = count_partial(n, count_total);
3808 			else if (flags & SO_OBJECTS)
3809 				x = count_partial(n, count_inuse);
3810 			else
3811 				x = n->nr_partial;
3812 			total += x;
3813 			nodes[node] += x;
3814 		}
3815 	}
3816 	x = sprintf(buf, "%lu", total);
3817 #ifdef CONFIG_NUMA
3818 	for_each_node_state(node, N_NORMAL_MEMORY)
3819 		if (nodes[node])
3820 			x += sprintf(buf + x, " N%d=%lu",
3821 					node, nodes[node]);
3822 #endif
3823 	kfree(nodes);
3824 	return x + sprintf(buf + x, "\n");
3825 }
3826 
3827 static int any_slab_objects(struct kmem_cache *s)
3828 {
3829 	int node;
3830 
3831 	for_each_online_node(node) {
3832 		struct kmem_cache_node *n = get_node(s, node);
3833 
3834 		if (!n)
3835 			continue;
3836 
3837 		if (atomic_long_read(&n->total_objects))
3838 			return 1;
3839 	}
3840 	return 0;
3841 }
3842 
3843 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3844 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3845 
3846 struct slab_attribute {
3847 	struct attribute attr;
3848 	ssize_t (*show)(struct kmem_cache *s, char *buf);
3849 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3850 };
3851 
3852 #define SLAB_ATTR_RO(_name) \
3853 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3854 
3855 #define SLAB_ATTR(_name) \
3856 	static struct slab_attribute _name##_attr =  \
3857 	__ATTR(_name, 0644, _name##_show, _name##_store)
3858 
3859 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3860 {
3861 	return sprintf(buf, "%d\n", s->size);
3862 }
3863 SLAB_ATTR_RO(slab_size);
3864 
3865 static ssize_t align_show(struct kmem_cache *s, char *buf)
3866 {
3867 	return sprintf(buf, "%d\n", s->align);
3868 }
3869 SLAB_ATTR_RO(align);
3870 
3871 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3872 {
3873 	return sprintf(buf, "%d\n", s->objsize);
3874 }
3875 SLAB_ATTR_RO(object_size);
3876 
3877 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3878 {
3879 	return sprintf(buf, "%d\n", oo_objects(s->oo));
3880 }
3881 SLAB_ATTR_RO(objs_per_slab);
3882 
3883 static ssize_t order_store(struct kmem_cache *s,
3884 				const char *buf, size_t length)
3885 {
3886 	unsigned long order;
3887 	int err;
3888 
3889 	err = strict_strtoul(buf, 10, &order);
3890 	if (err)
3891 		return err;
3892 
3893 	if (order > slub_max_order || order < slub_min_order)
3894 		return -EINVAL;
3895 
3896 	calculate_sizes(s, order);
3897 	return length;
3898 }
3899 
3900 static ssize_t order_show(struct kmem_cache *s, char *buf)
3901 {
3902 	return sprintf(buf, "%d\n", oo_order(s->oo));
3903 }
3904 SLAB_ATTR(order);
3905 
3906 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3907 {
3908 	return sprintf(buf, "%lu\n", s->min_partial);
3909 }
3910 
3911 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3912 				 size_t length)
3913 {
3914 	unsigned long min;
3915 	int err;
3916 
3917 	err = strict_strtoul(buf, 10, &min);
3918 	if (err)
3919 		return err;
3920 
3921 	set_min_partial(s, min);
3922 	return length;
3923 }
3924 SLAB_ATTR(min_partial);
3925 
3926 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3927 {
3928 	if (s->ctor) {
3929 		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3930 
3931 		return n + sprintf(buf + n, "\n");
3932 	}
3933 	return 0;
3934 }
3935 SLAB_ATTR_RO(ctor);
3936 
3937 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3938 {
3939 	return sprintf(buf, "%d\n", s->refcount - 1);
3940 }
3941 SLAB_ATTR_RO(aliases);
3942 
3943 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3944 {
3945 	return show_slab_objects(s, buf, SO_ALL);
3946 }
3947 SLAB_ATTR_RO(slabs);
3948 
3949 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3950 {
3951 	return show_slab_objects(s, buf, SO_PARTIAL);
3952 }
3953 SLAB_ATTR_RO(partial);
3954 
3955 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3956 {
3957 	return show_slab_objects(s, buf, SO_CPU);
3958 }
3959 SLAB_ATTR_RO(cpu_slabs);
3960 
3961 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3962 {
3963 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
3964 }
3965 SLAB_ATTR_RO(objects);
3966 
3967 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3968 {
3969 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3970 }
3971 SLAB_ATTR_RO(objects_partial);
3972 
3973 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3974 {
3975 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3976 }
3977 SLAB_ATTR_RO(total_objects);
3978 
3979 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3980 {
3981 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3982 }
3983 
3984 static ssize_t sanity_checks_store(struct kmem_cache *s,
3985 				const char *buf, size_t length)
3986 {
3987 	s->flags &= ~SLAB_DEBUG_FREE;
3988 	if (buf[0] == '1')
3989 		s->flags |= SLAB_DEBUG_FREE;
3990 	return length;
3991 }
3992 SLAB_ATTR(sanity_checks);
3993 
3994 static ssize_t trace_show(struct kmem_cache *s, char *buf)
3995 {
3996 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3997 }
3998 
3999 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4000 							size_t length)
4001 {
4002 	s->flags &= ~SLAB_TRACE;
4003 	if (buf[0] == '1')
4004 		s->flags |= SLAB_TRACE;
4005 	return length;
4006 }
4007 SLAB_ATTR(trace);
4008 
4009 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4010 {
4011 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4012 }
4013 
4014 static ssize_t reclaim_account_store(struct kmem_cache *s,
4015 				const char *buf, size_t length)
4016 {
4017 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4018 	if (buf[0] == '1')
4019 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4020 	return length;
4021 }
4022 SLAB_ATTR(reclaim_account);
4023 
4024 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4025 {
4026 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4027 }
4028 SLAB_ATTR_RO(hwcache_align);
4029 
4030 #ifdef CONFIG_ZONE_DMA
4031 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4032 {
4033 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4034 }
4035 SLAB_ATTR_RO(cache_dma);
4036 #endif
4037 
4038 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4039 {
4040 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4041 }
4042 SLAB_ATTR_RO(destroy_by_rcu);
4043 
4044 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4045 {
4046 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4047 }
4048 
4049 static ssize_t red_zone_store(struct kmem_cache *s,
4050 				const char *buf, size_t length)
4051 {
4052 	if (any_slab_objects(s))
4053 		return -EBUSY;
4054 
4055 	s->flags &= ~SLAB_RED_ZONE;
4056 	if (buf[0] == '1')
4057 		s->flags |= SLAB_RED_ZONE;
4058 	calculate_sizes(s, -1);
4059 	return length;
4060 }
4061 SLAB_ATTR(red_zone);
4062 
4063 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4064 {
4065 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4066 }
4067 
4068 static ssize_t poison_store(struct kmem_cache *s,
4069 				const char *buf, size_t length)
4070 {
4071 	if (any_slab_objects(s))
4072 		return -EBUSY;
4073 
4074 	s->flags &= ~SLAB_POISON;
4075 	if (buf[0] == '1')
4076 		s->flags |= SLAB_POISON;
4077 	calculate_sizes(s, -1);
4078 	return length;
4079 }
4080 SLAB_ATTR(poison);
4081 
4082 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4083 {
4084 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4085 }
4086 
4087 static ssize_t store_user_store(struct kmem_cache *s,
4088 				const char *buf, size_t length)
4089 {
4090 	if (any_slab_objects(s))
4091 		return -EBUSY;
4092 
4093 	s->flags &= ~SLAB_STORE_USER;
4094 	if (buf[0] == '1')
4095 		s->flags |= SLAB_STORE_USER;
4096 	calculate_sizes(s, -1);
4097 	return length;
4098 }
4099 SLAB_ATTR(store_user);
4100 
4101 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4102 {
4103 	return 0;
4104 }
4105 
4106 static ssize_t validate_store(struct kmem_cache *s,
4107 			const char *buf, size_t length)
4108 {
4109 	int ret = -EINVAL;
4110 
4111 	if (buf[0] == '1') {
4112 		ret = validate_slab_cache(s);
4113 		if (ret >= 0)
4114 			ret = length;
4115 	}
4116 	return ret;
4117 }
4118 SLAB_ATTR(validate);
4119 
4120 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4121 {
4122 	return 0;
4123 }
4124 
4125 static ssize_t shrink_store(struct kmem_cache *s,
4126 			const char *buf, size_t length)
4127 {
4128 	if (buf[0] == '1') {
4129 		int rc = kmem_cache_shrink(s);
4130 
4131 		if (rc)
4132 			return rc;
4133 	} else
4134 		return -EINVAL;
4135 	return length;
4136 }
4137 SLAB_ATTR(shrink);
4138 
4139 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4140 {
4141 	if (!(s->flags & SLAB_STORE_USER))
4142 		return -ENOSYS;
4143 	return list_locations(s, buf, TRACK_ALLOC);
4144 }
4145 SLAB_ATTR_RO(alloc_calls);
4146 
4147 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4148 {
4149 	if (!(s->flags & SLAB_STORE_USER))
4150 		return -ENOSYS;
4151 	return list_locations(s, buf, TRACK_FREE);
4152 }
4153 SLAB_ATTR_RO(free_calls);
4154 
4155 #ifdef CONFIG_NUMA
4156 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4157 {
4158 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4159 }
4160 
4161 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4162 				const char *buf, size_t length)
4163 {
4164 	unsigned long ratio;
4165 	int err;
4166 
4167 	err = strict_strtoul(buf, 10, &ratio);
4168 	if (err)
4169 		return err;
4170 
4171 	if (ratio <= 100)
4172 		s->remote_node_defrag_ratio = ratio * 10;
4173 
4174 	return length;
4175 }
4176 SLAB_ATTR(remote_node_defrag_ratio);
4177 #endif
4178 
4179 #ifdef CONFIG_SLUB_STATS
4180 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4181 {
4182 	unsigned long sum  = 0;
4183 	int cpu;
4184 	int len;
4185 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4186 
4187 	if (!data)
4188 		return -ENOMEM;
4189 
4190 	for_each_online_cpu(cpu) {
4191 		unsigned x = get_cpu_slab(s, cpu)->stat[si];
4192 
4193 		data[cpu] = x;
4194 		sum += x;
4195 	}
4196 
4197 	len = sprintf(buf, "%lu", sum);
4198 
4199 #ifdef CONFIG_SMP
4200 	for_each_online_cpu(cpu) {
4201 		if (data[cpu] && len < PAGE_SIZE - 20)
4202 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4203 	}
4204 #endif
4205 	kfree(data);
4206 	return len + sprintf(buf + len, "\n");
4207 }
4208 
4209 #define STAT_ATTR(si, text) 					\
4210 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4211 {								\
4212 	return show_stat(s, buf, si);				\
4213 }								\
4214 SLAB_ATTR_RO(text);						\
4215 
4216 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4217 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4218 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4219 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4220 STAT_ATTR(FREE_FROZEN, free_frozen);
4221 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4222 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4223 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4224 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4225 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4226 STAT_ATTR(FREE_SLAB, free_slab);
4227 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4228 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4229 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4230 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4231 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4232 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4233 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4234 #endif
4235 
4236 static struct attribute *slab_attrs[] = {
4237 	&slab_size_attr.attr,
4238 	&object_size_attr.attr,
4239 	&objs_per_slab_attr.attr,
4240 	&order_attr.attr,
4241 	&min_partial_attr.attr,
4242 	&objects_attr.attr,
4243 	&objects_partial_attr.attr,
4244 	&total_objects_attr.attr,
4245 	&slabs_attr.attr,
4246 	&partial_attr.attr,
4247 	&cpu_slabs_attr.attr,
4248 	&ctor_attr.attr,
4249 	&aliases_attr.attr,
4250 	&align_attr.attr,
4251 	&sanity_checks_attr.attr,
4252 	&trace_attr.attr,
4253 	&hwcache_align_attr.attr,
4254 	&reclaim_account_attr.attr,
4255 	&destroy_by_rcu_attr.attr,
4256 	&red_zone_attr.attr,
4257 	&poison_attr.attr,
4258 	&store_user_attr.attr,
4259 	&validate_attr.attr,
4260 	&shrink_attr.attr,
4261 	&alloc_calls_attr.attr,
4262 	&free_calls_attr.attr,
4263 #ifdef CONFIG_ZONE_DMA
4264 	&cache_dma_attr.attr,
4265 #endif
4266 #ifdef CONFIG_NUMA
4267 	&remote_node_defrag_ratio_attr.attr,
4268 #endif
4269 #ifdef CONFIG_SLUB_STATS
4270 	&alloc_fastpath_attr.attr,
4271 	&alloc_slowpath_attr.attr,
4272 	&free_fastpath_attr.attr,
4273 	&free_slowpath_attr.attr,
4274 	&free_frozen_attr.attr,
4275 	&free_add_partial_attr.attr,
4276 	&free_remove_partial_attr.attr,
4277 	&alloc_from_partial_attr.attr,
4278 	&alloc_slab_attr.attr,
4279 	&alloc_refill_attr.attr,
4280 	&free_slab_attr.attr,
4281 	&cpuslab_flush_attr.attr,
4282 	&deactivate_full_attr.attr,
4283 	&deactivate_empty_attr.attr,
4284 	&deactivate_to_head_attr.attr,
4285 	&deactivate_to_tail_attr.attr,
4286 	&deactivate_remote_frees_attr.attr,
4287 	&order_fallback_attr.attr,
4288 #endif
4289 	NULL
4290 };
4291 
4292 static struct attribute_group slab_attr_group = {
4293 	.attrs = slab_attrs,
4294 };
4295 
4296 static ssize_t slab_attr_show(struct kobject *kobj,
4297 				struct attribute *attr,
4298 				char *buf)
4299 {
4300 	struct slab_attribute *attribute;
4301 	struct kmem_cache *s;
4302 	int err;
4303 
4304 	attribute = to_slab_attr(attr);
4305 	s = to_slab(kobj);
4306 
4307 	if (!attribute->show)
4308 		return -EIO;
4309 
4310 	err = attribute->show(s, buf);
4311 
4312 	return err;
4313 }
4314 
4315 static ssize_t slab_attr_store(struct kobject *kobj,
4316 				struct attribute *attr,
4317 				const char *buf, size_t len)
4318 {
4319 	struct slab_attribute *attribute;
4320 	struct kmem_cache *s;
4321 	int err;
4322 
4323 	attribute = to_slab_attr(attr);
4324 	s = to_slab(kobj);
4325 
4326 	if (!attribute->store)
4327 		return -EIO;
4328 
4329 	err = attribute->store(s, buf, len);
4330 
4331 	return err;
4332 }
4333 
4334 static void kmem_cache_release(struct kobject *kobj)
4335 {
4336 	struct kmem_cache *s = to_slab(kobj);
4337 
4338 	kfree(s);
4339 }
4340 
4341 static struct sysfs_ops slab_sysfs_ops = {
4342 	.show = slab_attr_show,
4343 	.store = slab_attr_store,
4344 };
4345 
4346 static struct kobj_type slab_ktype = {
4347 	.sysfs_ops = &slab_sysfs_ops,
4348 	.release = kmem_cache_release
4349 };
4350 
4351 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4352 {
4353 	struct kobj_type *ktype = get_ktype(kobj);
4354 
4355 	if (ktype == &slab_ktype)
4356 		return 1;
4357 	return 0;
4358 }
4359 
4360 static struct kset_uevent_ops slab_uevent_ops = {
4361 	.filter = uevent_filter,
4362 };
4363 
4364 static struct kset *slab_kset;
4365 
4366 #define ID_STR_LENGTH 64
4367 
4368 /* Create a unique string id for a slab cache:
4369  *
4370  * Format	:[flags-]size
4371  */
4372 static char *create_unique_id(struct kmem_cache *s)
4373 {
4374 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4375 	char *p = name;
4376 
4377 	BUG_ON(!name);
4378 
4379 	*p++ = ':';
4380 	/*
4381 	 * First flags affecting slabcache operations. We will only
4382 	 * get here for aliasable slabs so we do not need to support
4383 	 * too many flags. The flags here must cover all flags that
4384 	 * are matched during merging to guarantee that the id is
4385 	 * unique.
4386 	 */
4387 	if (s->flags & SLAB_CACHE_DMA)
4388 		*p++ = 'd';
4389 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4390 		*p++ = 'a';
4391 	if (s->flags & SLAB_DEBUG_FREE)
4392 		*p++ = 'F';
4393 	if (p != name + 1)
4394 		*p++ = '-';
4395 	p += sprintf(p, "%07d", s->size);
4396 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4397 	return name;
4398 }
4399 
4400 static int sysfs_slab_add(struct kmem_cache *s)
4401 {
4402 	int err;
4403 	const char *name;
4404 	int unmergeable;
4405 
4406 	if (slab_state < SYSFS)
4407 		/* Defer until later */
4408 		return 0;
4409 
4410 	unmergeable = slab_unmergeable(s);
4411 	if (unmergeable) {
4412 		/*
4413 		 * Slabcache can never be merged so we can use the name proper.
4414 		 * This is typically the case for debug situations. In that
4415 		 * case we can catch duplicate names easily.
4416 		 */
4417 		sysfs_remove_link(&slab_kset->kobj, s->name);
4418 		name = s->name;
4419 	} else {
4420 		/*
4421 		 * Create a unique name for the slab as a target
4422 		 * for the symlinks.
4423 		 */
4424 		name = create_unique_id(s);
4425 	}
4426 
4427 	s->kobj.kset = slab_kset;
4428 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4429 	if (err) {
4430 		kobject_put(&s->kobj);
4431 		return err;
4432 	}
4433 
4434 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4435 	if (err)
4436 		return err;
4437 	kobject_uevent(&s->kobj, KOBJ_ADD);
4438 	if (!unmergeable) {
4439 		/* Setup first alias */
4440 		sysfs_slab_alias(s, s->name);
4441 		kfree(name);
4442 	}
4443 	return 0;
4444 }
4445 
4446 static void sysfs_slab_remove(struct kmem_cache *s)
4447 {
4448 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4449 	kobject_del(&s->kobj);
4450 	kobject_put(&s->kobj);
4451 }
4452 
4453 /*
4454  * Need to buffer aliases during bootup until sysfs becomes
4455  * available lest we lose that information.
4456  */
4457 struct saved_alias {
4458 	struct kmem_cache *s;
4459 	const char *name;
4460 	struct saved_alias *next;
4461 };
4462 
4463 static struct saved_alias *alias_list;
4464 
4465 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4466 {
4467 	struct saved_alias *al;
4468 
4469 	if (slab_state == SYSFS) {
4470 		/*
4471 		 * If we have a leftover link then remove it.
4472 		 */
4473 		sysfs_remove_link(&slab_kset->kobj, name);
4474 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4475 	}
4476 
4477 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4478 	if (!al)
4479 		return -ENOMEM;
4480 
4481 	al->s = s;
4482 	al->name = name;
4483 	al->next = alias_list;
4484 	alias_list = al;
4485 	return 0;
4486 }
4487 
4488 static int __init slab_sysfs_init(void)
4489 {
4490 	struct kmem_cache *s;
4491 	int err;
4492 
4493 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4494 	if (!slab_kset) {
4495 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4496 		return -ENOSYS;
4497 	}
4498 
4499 	slab_state = SYSFS;
4500 
4501 	list_for_each_entry(s, &slab_caches, list) {
4502 		err = sysfs_slab_add(s);
4503 		if (err)
4504 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4505 						" to sysfs\n", s->name);
4506 	}
4507 
4508 	while (alias_list) {
4509 		struct saved_alias *al = alias_list;
4510 
4511 		alias_list = alias_list->next;
4512 		err = sysfs_slab_alias(al->s, al->name);
4513 		if (err)
4514 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4515 					" %s to sysfs\n", s->name);
4516 		kfree(al);
4517 	}
4518 
4519 	resiliency_test();
4520 	return 0;
4521 }
4522 
4523 __initcall(slab_sysfs_init);
4524 #endif
4525 
4526 /*
4527  * The /proc/slabinfo ABI
4528  */
4529 #ifdef CONFIG_SLABINFO
4530 static void print_slabinfo_header(struct seq_file *m)
4531 {
4532 	seq_puts(m, "slabinfo - version: 2.1\n");
4533 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4534 		 "<objperslab> <pagesperslab>");
4535 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4536 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4537 	seq_putc(m, '\n');
4538 }
4539 
4540 static void *s_start(struct seq_file *m, loff_t *pos)
4541 {
4542 	loff_t n = *pos;
4543 
4544 	down_read(&slub_lock);
4545 	if (!n)
4546 		print_slabinfo_header(m);
4547 
4548 	return seq_list_start(&slab_caches, *pos);
4549 }
4550 
4551 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4552 {
4553 	return seq_list_next(p, &slab_caches, pos);
4554 }
4555 
4556 static void s_stop(struct seq_file *m, void *p)
4557 {
4558 	up_read(&slub_lock);
4559 }
4560 
4561 static int s_show(struct seq_file *m, void *p)
4562 {
4563 	unsigned long nr_partials = 0;
4564 	unsigned long nr_slabs = 0;
4565 	unsigned long nr_inuse = 0;
4566 	unsigned long nr_objs = 0;
4567 	unsigned long nr_free = 0;
4568 	struct kmem_cache *s;
4569 	int node;
4570 
4571 	s = list_entry(p, struct kmem_cache, list);
4572 
4573 	for_each_online_node(node) {
4574 		struct kmem_cache_node *n = get_node(s, node);
4575 
4576 		if (!n)
4577 			continue;
4578 
4579 		nr_partials += n->nr_partial;
4580 		nr_slabs += atomic_long_read(&n->nr_slabs);
4581 		nr_objs += atomic_long_read(&n->total_objects);
4582 		nr_free += count_partial(n, count_free);
4583 	}
4584 
4585 	nr_inuse = nr_objs - nr_free;
4586 
4587 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4588 		   nr_objs, s->size, oo_objects(s->oo),
4589 		   (1 << oo_order(s->oo)));
4590 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4591 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4592 		   0UL);
4593 	seq_putc(m, '\n');
4594 	return 0;
4595 }
4596 
4597 static const struct seq_operations slabinfo_op = {
4598 	.start = s_start,
4599 	.next = s_next,
4600 	.stop = s_stop,
4601 	.show = s_show,
4602 };
4603 
4604 static int slabinfo_open(struct inode *inode, struct file *file)
4605 {
4606 	return seq_open(file, &slabinfo_op);
4607 }
4608 
4609 static const struct file_operations proc_slabinfo_operations = {
4610 	.open		= slabinfo_open,
4611 	.read		= seq_read,
4612 	.llseek		= seq_lseek,
4613 	.release	= seq_release,
4614 };
4615 
4616 static int __init slab_proc_init(void)
4617 {
4618 	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4619 	return 0;
4620 }
4621 module_init(slab_proc_init);
4622 #endif /* CONFIG_SLABINFO */
4623