xref: /openbmc/linux/mm/slub.c (revision 78c99ba1)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/swap.h> /* struct reclaim_state */
13 #include <linux/module.h>
14 #include <linux/bit_spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/bitops.h>
17 #include <linux/slab.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/kmemtrace.h>
21 #include <linux/cpu.h>
22 #include <linux/cpuset.h>
23 #include <linux/kmemleak.h>
24 #include <linux/mempolicy.h>
25 #include <linux/ctype.h>
26 #include <linux/debugobjects.h>
27 #include <linux/kallsyms.h>
28 #include <linux/memory.h>
29 #include <linux/math64.h>
30 #include <linux/fault-inject.h>
31 
32 /*
33  * Lock order:
34  *   1. slab_lock(page)
35  *   2. slab->list_lock
36  *
37  *   The slab_lock protects operations on the object of a particular
38  *   slab and its metadata in the page struct. If the slab lock
39  *   has been taken then no allocations nor frees can be performed
40  *   on the objects in the slab nor can the slab be added or removed
41  *   from the partial or full lists since this would mean modifying
42  *   the page_struct of the slab.
43  *
44  *   The list_lock protects the partial and full list on each node and
45  *   the partial slab counter. If taken then no new slabs may be added or
46  *   removed from the lists nor make the number of partial slabs be modified.
47  *   (Note that the total number of slabs is an atomic value that may be
48  *   modified without taking the list lock).
49  *
50  *   The list_lock is a centralized lock and thus we avoid taking it as
51  *   much as possible. As long as SLUB does not have to handle partial
52  *   slabs, operations can continue without any centralized lock. F.e.
53  *   allocating a long series of objects that fill up slabs does not require
54  *   the list lock.
55  *
56  *   The lock order is sometimes inverted when we are trying to get a slab
57  *   off a list. We take the list_lock and then look for a page on the list
58  *   to use. While we do that objects in the slabs may be freed. We can
59  *   only operate on the slab if we have also taken the slab_lock. So we use
60  *   a slab_trylock() on the slab. If trylock was successful then no frees
61  *   can occur anymore and we can use the slab for allocations etc. If the
62  *   slab_trylock() does not succeed then frees are in progress in the slab and
63  *   we must stay away from it for a while since we may cause a bouncing
64  *   cacheline if we try to acquire the lock. So go onto the next slab.
65  *   If all pages are busy then we may allocate a new slab instead of reusing
66  *   a partial slab. A new slab has noone operating on it and thus there is
67  *   no danger of cacheline contention.
68  *
69  *   Interrupts are disabled during allocation and deallocation in order to
70  *   make the slab allocator safe to use in the context of an irq. In addition
71  *   interrupts are disabled to ensure that the processor does not change
72  *   while handling per_cpu slabs, due to kernel preemption.
73  *
74  * SLUB assigns one slab for allocation to each processor.
75  * Allocations only occur from these slabs called cpu slabs.
76  *
77  * Slabs with free elements are kept on a partial list and during regular
78  * operations no list for full slabs is used. If an object in a full slab is
79  * freed then the slab will show up again on the partial lists.
80  * We track full slabs for debugging purposes though because otherwise we
81  * cannot scan all objects.
82  *
83  * Slabs are freed when they become empty. Teardown and setup is
84  * minimal so we rely on the page allocators per cpu caches for
85  * fast frees and allocs.
86  *
87  * Overloading of page flags that are otherwise used for LRU management.
88  *
89  * PageActive 		The slab is frozen and exempt from list processing.
90  * 			This means that the slab is dedicated to a purpose
91  * 			such as satisfying allocations for a specific
92  * 			processor. Objects may be freed in the slab while
93  * 			it is frozen but slab_free will then skip the usual
94  * 			list operations. It is up to the processor holding
95  * 			the slab to integrate the slab into the slab lists
96  * 			when the slab is no longer needed.
97  *
98  * 			One use of this flag is to mark slabs that are
99  * 			used for allocations. Then such a slab becomes a cpu
100  * 			slab. The cpu slab may be equipped with an additional
101  * 			freelist that allows lockless access to
102  * 			free objects in addition to the regular freelist
103  * 			that requires the slab lock.
104  *
105  * PageError		Slab requires special handling due to debug
106  * 			options set. This moves	slab handling out of
107  * 			the fast path and disables lockless freelists.
108  */
109 
110 #ifdef CONFIG_SLUB_DEBUG
111 #define SLABDEBUG 1
112 #else
113 #define SLABDEBUG 0
114 #endif
115 
116 /*
117  * Issues still to be resolved:
118  *
119  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
120  *
121  * - Variable sizing of the per node arrays
122  */
123 
124 /* Enable to test recovery from slab corruption on boot */
125 #undef SLUB_RESILIENCY_TEST
126 
127 /*
128  * Mininum number of partial slabs. These will be left on the partial
129  * lists even if they are empty. kmem_cache_shrink may reclaim them.
130  */
131 #define MIN_PARTIAL 5
132 
133 /*
134  * Maximum number of desirable partial slabs.
135  * The existence of more partial slabs makes kmem_cache_shrink
136  * sort the partial list by the number of objects in the.
137  */
138 #define MAX_PARTIAL 10
139 
140 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
141 				SLAB_POISON | SLAB_STORE_USER)
142 
143 /*
144  * Set of flags that will prevent slab merging
145  */
146 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
147 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
148 
149 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
150 		SLAB_CACHE_DMA)
151 
152 #ifndef ARCH_KMALLOC_MINALIGN
153 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
154 #endif
155 
156 #ifndef ARCH_SLAB_MINALIGN
157 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
158 #endif
159 
160 #define OO_SHIFT	16
161 #define OO_MASK		((1 << OO_SHIFT) - 1)
162 #define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
163 
164 /* Internal SLUB flags */
165 #define __OBJECT_POISON		0x80000000 /* Poison object */
166 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
167 
168 static int kmem_size = sizeof(struct kmem_cache);
169 
170 #ifdef CONFIG_SMP
171 static struct notifier_block slab_notifier;
172 #endif
173 
174 static enum {
175 	DOWN,		/* No slab functionality available */
176 	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
177 	UP,		/* Everything works but does not show up in sysfs */
178 	SYSFS		/* Sysfs up */
179 } slab_state = DOWN;
180 
181 /* A list of all slab caches on the system */
182 static DECLARE_RWSEM(slub_lock);
183 static LIST_HEAD(slab_caches);
184 
185 /*
186  * Tracking user of a slab.
187  */
188 struct track {
189 	unsigned long addr;	/* Called from address */
190 	int cpu;		/* Was running on cpu */
191 	int pid;		/* Pid context */
192 	unsigned long when;	/* When did the operation occur */
193 };
194 
195 enum track_item { TRACK_ALLOC, TRACK_FREE };
196 
197 #ifdef CONFIG_SLUB_DEBUG
198 static int sysfs_slab_add(struct kmem_cache *);
199 static int sysfs_slab_alias(struct kmem_cache *, const char *);
200 static void sysfs_slab_remove(struct kmem_cache *);
201 
202 #else
203 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
204 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
205 							{ return 0; }
206 static inline void sysfs_slab_remove(struct kmem_cache *s)
207 {
208 	kfree(s);
209 }
210 
211 #endif
212 
213 static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
214 {
215 #ifdef CONFIG_SLUB_STATS
216 	c->stat[si]++;
217 #endif
218 }
219 
220 /********************************************************************
221  * 			Core slab cache functions
222  *******************************************************************/
223 
224 int slab_is_available(void)
225 {
226 	return slab_state >= UP;
227 }
228 
229 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
230 {
231 #ifdef CONFIG_NUMA
232 	return s->node[node];
233 #else
234 	return &s->local_node;
235 #endif
236 }
237 
238 static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
239 {
240 #ifdef CONFIG_SMP
241 	return s->cpu_slab[cpu];
242 #else
243 	return &s->cpu_slab;
244 #endif
245 }
246 
247 /* Verify that a pointer has an address that is valid within a slab page */
248 static inline int check_valid_pointer(struct kmem_cache *s,
249 				struct page *page, const void *object)
250 {
251 	void *base;
252 
253 	if (!object)
254 		return 1;
255 
256 	base = page_address(page);
257 	if (object < base || object >= base + page->objects * s->size ||
258 		(object - base) % s->size) {
259 		return 0;
260 	}
261 
262 	return 1;
263 }
264 
265 /*
266  * Slow version of get and set free pointer.
267  *
268  * This version requires touching the cache lines of kmem_cache which
269  * we avoid to do in the fast alloc free paths. There we obtain the offset
270  * from the page struct.
271  */
272 static inline void *get_freepointer(struct kmem_cache *s, void *object)
273 {
274 	return *(void **)(object + s->offset);
275 }
276 
277 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
278 {
279 	*(void **)(object + s->offset) = fp;
280 }
281 
282 /* Loop over all objects in a slab */
283 #define for_each_object(__p, __s, __addr, __objects) \
284 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
285 			__p += (__s)->size)
286 
287 /* Scan freelist */
288 #define for_each_free_object(__p, __s, __free) \
289 	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
290 
291 /* Determine object index from a given position */
292 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
293 {
294 	return (p - addr) / s->size;
295 }
296 
297 static inline struct kmem_cache_order_objects oo_make(int order,
298 						unsigned long size)
299 {
300 	struct kmem_cache_order_objects x = {
301 		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
302 	};
303 
304 	return x;
305 }
306 
307 static inline int oo_order(struct kmem_cache_order_objects x)
308 {
309 	return x.x >> OO_SHIFT;
310 }
311 
312 static inline int oo_objects(struct kmem_cache_order_objects x)
313 {
314 	return x.x & OO_MASK;
315 }
316 
317 #ifdef CONFIG_SLUB_DEBUG
318 /*
319  * Debug settings:
320  */
321 #ifdef CONFIG_SLUB_DEBUG_ON
322 static int slub_debug = DEBUG_DEFAULT_FLAGS;
323 #else
324 static int slub_debug;
325 #endif
326 
327 static char *slub_debug_slabs;
328 
329 /*
330  * Object debugging
331  */
332 static void print_section(char *text, u8 *addr, unsigned int length)
333 {
334 	int i, offset;
335 	int newline = 1;
336 	char ascii[17];
337 
338 	ascii[16] = 0;
339 
340 	for (i = 0; i < length; i++) {
341 		if (newline) {
342 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
343 			newline = 0;
344 		}
345 		printk(KERN_CONT " %02x", addr[i]);
346 		offset = i % 16;
347 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
348 		if (offset == 15) {
349 			printk(KERN_CONT " %s\n", ascii);
350 			newline = 1;
351 		}
352 	}
353 	if (!newline) {
354 		i %= 16;
355 		while (i < 16) {
356 			printk(KERN_CONT "   ");
357 			ascii[i] = ' ';
358 			i++;
359 		}
360 		printk(KERN_CONT " %s\n", ascii);
361 	}
362 }
363 
364 static struct track *get_track(struct kmem_cache *s, void *object,
365 	enum track_item alloc)
366 {
367 	struct track *p;
368 
369 	if (s->offset)
370 		p = object + s->offset + sizeof(void *);
371 	else
372 		p = object + s->inuse;
373 
374 	return p + alloc;
375 }
376 
377 static void set_track(struct kmem_cache *s, void *object,
378 			enum track_item alloc, unsigned long addr)
379 {
380 	struct track *p = get_track(s, object, alloc);
381 
382 	if (addr) {
383 		p->addr = addr;
384 		p->cpu = smp_processor_id();
385 		p->pid = current->pid;
386 		p->when = jiffies;
387 	} else
388 		memset(p, 0, sizeof(struct track));
389 }
390 
391 static void init_tracking(struct kmem_cache *s, void *object)
392 {
393 	if (!(s->flags & SLAB_STORE_USER))
394 		return;
395 
396 	set_track(s, object, TRACK_FREE, 0UL);
397 	set_track(s, object, TRACK_ALLOC, 0UL);
398 }
399 
400 static void print_track(const char *s, struct track *t)
401 {
402 	if (!t->addr)
403 		return;
404 
405 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
406 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
407 }
408 
409 static void print_tracking(struct kmem_cache *s, void *object)
410 {
411 	if (!(s->flags & SLAB_STORE_USER))
412 		return;
413 
414 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
415 	print_track("Freed", get_track(s, object, TRACK_FREE));
416 }
417 
418 static void print_page_info(struct page *page)
419 {
420 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
421 		page, page->objects, page->inuse, page->freelist, page->flags);
422 
423 }
424 
425 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
426 {
427 	va_list args;
428 	char buf[100];
429 
430 	va_start(args, fmt);
431 	vsnprintf(buf, sizeof(buf), fmt, args);
432 	va_end(args);
433 	printk(KERN_ERR "========================================"
434 			"=====================================\n");
435 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
436 	printk(KERN_ERR "----------------------------------------"
437 			"-------------------------------------\n\n");
438 }
439 
440 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
441 {
442 	va_list args;
443 	char buf[100];
444 
445 	va_start(args, fmt);
446 	vsnprintf(buf, sizeof(buf), fmt, args);
447 	va_end(args);
448 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
449 }
450 
451 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
452 {
453 	unsigned int off;	/* Offset of last byte */
454 	u8 *addr = page_address(page);
455 
456 	print_tracking(s, p);
457 
458 	print_page_info(page);
459 
460 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
461 			p, p - addr, get_freepointer(s, p));
462 
463 	if (p > addr + 16)
464 		print_section("Bytes b4", p - 16, 16);
465 
466 	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
467 
468 	if (s->flags & SLAB_RED_ZONE)
469 		print_section("Redzone", p + s->objsize,
470 			s->inuse - s->objsize);
471 
472 	if (s->offset)
473 		off = s->offset + sizeof(void *);
474 	else
475 		off = s->inuse;
476 
477 	if (s->flags & SLAB_STORE_USER)
478 		off += 2 * sizeof(struct track);
479 
480 	if (off != s->size)
481 		/* Beginning of the filler is the free pointer */
482 		print_section("Padding", p + off, s->size - off);
483 
484 	dump_stack();
485 }
486 
487 static void object_err(struct kmem_cache *s, struct page *page,
488 			u8 *object, char *reason)
489 {
490 	slab_bug(s, "%s", reason);
491 	print_trailer(s, page, object);
492 }
493 
494 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
495 {
496 	va_list args;
497 	char buf[100];
498 
499 	va_start(args, fmt);
500 	vsnprintf(buf, sizeof(buf), fmt, args);
501 	va_end(args);
502 	slab_bug(s, "%s", buf);
503 	print_page_info(page);
504 	dump_stack();
505 }
506 
507 static void init_object(struct kmem_cache *s, void *object, int active)
508 {
509 	u8 *p = object;
510 
511 	if (s->flags & __OBJECT_POISON) {
512 		memset(p, POISON_FREE, s->objsize - 1);
513 		p[s->objsize - 1] = POISON_END;
514 	}
515 
516 	if (s->flags & SLAB_RED_ZONE)
517 		memset(p + s->objsize,
518 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
519 			s->inuse - s->objsize);
520 }
521 
522 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
523 {
524 	while (bytes) {
525 		if (*start != (u8)value)
526 			return start;
527 		start++;
528 		bytes--;
529 	}
530 	return NULL;
531 }
532 
533 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
534 						void *from, void *to)
535 {
536 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
537 	memset(from, data, to - from);
538 }
539 
540 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
541 			u8 *object, char *what,
542 			u8 *start, unsigned int value, unsigned int bytes)
543 {
544 	u8 *fault;
545 	u8 *end;
546 
547 	fault = check_bytes(start, value, bytes);
548 	if (!fault)
549 		return 1;
550 
551 	end = start + bytes;
552 	while (end > fault && end[-1] == value)
553 		end--;
554 
555 	slab_bug(s, "%s overwritten", what);
556 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
557 					fault, end - 1, fault[0], value);
558 	print_trailer(s, page, object);
559 
560 	restore_bytes(s, what, value, fault, end);
561 	return 0;
562 }
563 
564 /*
565  * Object layout:
566  *
567  * object address
568  * 	Bytes of the object to be managed.
569  * 	If the freepointer may overlay the object then the free
570  * 	pointer is the first word of the object.
571  *
572  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
573  * 	0xa5 (POISON_END)
574  *
575  * object + s->objsize
576  * 	Padding to reach word boundary. This is also used for Redzoning.
577  * 	Padding is extended by another word if Redzoning is enabled and
578  * 	objsize == inuse.
579  *
580  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
581  * 	0xcc (RED_ACTIVE) for objects in use.
582  *
583  * object + s->inuse
584  * 	Meta data starts here.
585  *
586  * 	A. Free pointer (if we cannot overwrite object on free)
587  * 	B. Tracking data for SLAB_STORE_USER
588  * 	C. Padding to reach required alignment boundary or at mininum
589  * 		one word if debugging is on to be able to detect writes
590  * 		before the word boundary.
591  *
592  *	Padding is done using 0x5a (POISON_INUSE)
593  *
594  * object + s->size
595  * 	Nothing is used beyond s->size.
596  *
597  * If slabcaches are merged then the objsize and inuse boundaries are mostly
598  * ignored. And therefore no slab options that rely on these boundaries
599  * may be used with merged slabcaches.
600  */
601 
602 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
603 {
604 	unsigned long off = s->inuse;	/* The end of info */
605 
606 	if (s->offset)
607 		/* Freepointer is placed after the object. */
608 		off += sizeof(void *);
609 
610 	if (s->flags & SLAB_STORE_USER)
611 		/* We also have user information there */
612 		off += 2 * sizeof(struct track);
613 
614 	if (s->size == off)
615 		return 1;
616 
617 	return check_bytes_and_report(s, page, p, "Object padding",
618 				p + off, POISON_INUSE, s->size - off);
619 }
620 
621 /* Check the pad bytes at the end of a slab page */
622 static int slab_pad_check(struct kmem_cache *s, struct page *page)
623 {
624 	u8 *start;
625 	u8 *fault;
626 	u8 *end;
627 	int length;
628 	int remainder;
629 
630 	if (!(s->flags & SLAB_POISON))
631 		return 1;
632 
633 	start = page_address(page);
634 	length = (PAGE_SIZE << compound_order(page));
635 	end = start + length;
636 	remainder = length % s->size;
637 	if (!remainder)
638 		return 1;
639 
640 	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
641 	if (!fault)
642 		return 1;
643 	while (end > fault && end[-1] == POISON_INUSE)
644 		end--;
645 
646 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
647 	print_section("Padding", end - remainder, remainder);
648 
649 	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
650 	return 0;
651 }
652 
653 static int check_object(struct kmem_cache *s, struct page *page,
654 					void *object, int active)
655 {
656 	u8 *p = object;
657 	u8 *endobject = object + s->objsize;
658 
659 	if (s->flags & SLAB_RED_ZONE) {
660 		unsigned int red =
661 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
662 
663 		if (!check_bytes_and_report(s, page, object, "Redzone",
664 			endobject, red, s->inuse - s->objsize))
665 			return 0;
666 	} else {
667 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
668 			check_bytes_and_report(s, page, p, "Alignment padding",
669 				endobject, POISON_INUSE, s->inuse - s->objsize);
670 		}
671 	}
672 
673 	if (s->flags & SLAB_POISON) {
674 		if (!active && (s->flags & __OBJECT_POISON) &&
675 			(!check_bytes_and_report(s, page, p, "Poison", p,
676 					POISON_FREE, s->objsize - 1) ||
677 			 !check_bytes_and_report(s, page, p, "Poison",
678 				p + s->objsize - 1, POISON_END, 1)))
679 			return 0;
680 		/*
681 		 * check_pad_bytes cleans up on its own.
682 		 */
683 		check_pad_bytes(s, page, p);
684 	}
685 
686 	if (!s->offset && active)
687 		/*
688 		 * Object and freepointer overlap. Cannot check
689 		 * freepointer while object is allocated.
690 		 */
691 		return 1;
692 
693 	/* Check free pointer validity */
694 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
695 		object_err(s, page, p, "Freepointer corrupt");
696 		/*
697 		 * No choice but to zap it and thus lose the remainder
698 		 * of the free objects in this slab. May cause
699 		 * another error because the object count is now wrong.
700 		 */
701 		set_freepointer(s, p, NULL);
702 		return 0;
703 	}
704 	return 1;
705 }
706 
707 static int check_slab(struct kmem_cache *s, struct page *page)
708 {
709 	int maxobj;
710 
711 	VM_BUG_ON(!irqs_disabled());
712 
713 	if (!PageSlab(page)) {
714 		slab_err(s, page, "Not a valid slab page");
715 		return 0;
716 	}
717 
718 	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
719 	if (page->objects > maxobj) {
720 		slab_err(s, page, "objects %u > max %u",
721 			s->name, page->objects, maxobj);
722 		return 0;
723 	}
724 	if (page->inuse > page->objects) {
725 		slab_err(s, page, "inuse %u > max %u",
726 			s->name, page->inuse, page->objects);
727 		return 0;
728 	}
729 	/* Slab_pad_check fixes things up after itself */
730 	slab_pad_check(s, page);
731 	return 1;
732 }
733 
734 /*
735  * Determine if a certain object on a page is on the freelist. Must hold the
736  * slab lock to guarantee that the chains are in a consistent state.
737  */
738 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
739 {
740 	int nr = 0;
741 	void *fp = page->freelist;
742 	void *object = NULL;
743 	unsigned long max_objects;
744 
745 	while (fp && nr <= page->objects) {
746 		if (fp == search)
747 			return 1;
748 		if (!check_valid_pointer(s, page, fp)) {
749 			if (object) {
750 				object_err(s, page, object,
751 					"Freechain corrupt");
752 				set_freepointer(s, object, NULL);
753 				break;
754 			} else {
755 				slab_err(s, page, "Freepointer corrupt");
756 				page->freelist = NULL;
757 				page->inuse = page->objects;
758 				slab_fix(s, "Freelist cleared");
759 				return 0;
760 			}
761 			break;
762 		}
763 		object = fp;
764 		fp = get_freepointer(s, object);
765 		nr++;
766 	}
767 
768 	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
769 	if (max_objects > MAX_OBJS_PER_PAGE)
770 		max_objects = MAX_OBJS_PER_PAGE;
771 
772 	if (page->objects != max_objects) {
773 		slab_err(s, page, "Wrong number of objects. Found %d but "
774 			"should be %d", page->objects, max_objects);
775 		page->objects = max_objects;
776 		slab_fix(s, "Number of objects adjusted.");
777 	}
778 	if (page->inuse != page->objects - nr) {
779 		slab_err(s, page, "Wrong object count. Counter is %d but "
780 			"counted were %d", page->inuse, page->objects - nr);
781 		page->inuse = page->objects - nr;
782 		slab_fix(s, "Object count adjusted.");
783 	}
784 	return search == NULL;
785 }
786 
787 static void trace(struct kmem_cache *s, struct page *page, void *object,
788 								int alloc)
789 {
790 	if (s->flags & SLAB_TRACE) {
791 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
792 			s->name,
793 			alloc ? "alloc" : "free",
794 			object, page->inuse,
795 			page->freelist);
796 
797 		if (!alloc)
798 			print_section("Object", (void *)object, s->objsize);
799 
800 		dump_stack();
801 	}
802 }
803 
804 /*
805  * Tracking of fully allocated slabs for debugging purposes.
806  */
807 static void add_full(struct kmem_cache_node *n, struct page *page)
808 {
809 	spin_lock(&n->list_lock);
810 	list_add(&page->lru, &n->full);
811 	spin_unlock(&n->list_lock);
812 }
813 
814 static void remove_full(struct kmem_cache *s, struct page *page)
815 {
816 	struct kmem_cache_node *n;
817 
818 	if (!(s->flags & SLAB_STORE_USER))
819 		return;
820 
821 	n = get_node(s, page_to_nid(page));
822 
823 	spin_lock(&n->list_lock);
824 	list_del(&page->lru);
825 	spin_unlock(&n->list_lock);
826 }
827 
828 /* Tracking of the number of slabs for debugging purposes */
829 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
830 {
831 	struct kmem_cache_node *n = get_node(s, node);
832 
833 	return atomic_long_read(&n->nr_slabs);
834 }
835 
836 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
837 {
838 	struct kmem_cache_node *n = get_node(s, node);
839 
840 	/*
841 	 * May be called early in order to allocate a slab for the
842 	 * kmem_cache_node structure. Solve the chicken-egg
843 	 * dilemma by deferring the increment of the count during
844 	 * bootstrap (see early_kmem_cache_node_alloc).
845 	 */
846 	if (!NUMA_BUILD || n) {
847 		atomic_long_inc(&n->nr_slabs);
848 		atomic_long_add(objects, &n->total_objects);
849 	}
850 }
851 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
852 {
853 	struct kmem_cache_node *n = get_node(s, node);
854 
855 	atomic_long_dec(&n->nr_slabs);
856 	atomic_long_sub(objects, &n->total_objects);
857 }
858 
859 /* Object debug checks for alloc/free paths */
860 static void setup_object_debug(struct kmem_cache *s, struct page *page,
861 								void *object)
862 {
863 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
864 		return;
865 
866 	init_object(s, object, 0);
867 	init_tracking(s, object);
868 }
869 
870 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
871 					void *object, unsigned long addr)
872 {
873 	if (!check_slab(s, page))
874 		goto bad;
875 
876 	if (!on_freelist(s, page, object)) {
877 		object_err(s, page, object, "Object already allocated");
878 		goto bad;
879 	}
880 
881 	if (!check_valid_pointer(s, page, object)) {
882 		object_err(s, page, object, "Freelist Pointer check fails");
883 		goto bad;
884 	}
885 
886 	if (!check_object(s, page, object, 0))
887 		goto bad;
888 
889 	/* Success perform special debug activities for allocs */
890 	if (s->flags & SLAB_STORE_USER)
891 		set_track(s, object, TRACK_ALLOC, addr);
892 	trace(s, page, object, 1);
893 	init_object(s, object, 1);
894 	return 1;
895 
896 bad:
897 	if (PageSlab(page)) {
898 		/*
899 		 * If this is a slab page then lets do the best we can
900 		 * to avoid issues in the future. Marking all objects
901 		 * as used avoids touching the remaining objects.
902 		 */
903 		slab_fix(s, "Marking all objects used");
904 		page->inuse = page->objects;
905 		page->freelist = NULL;
906 	}
907 	return 0;
908 }
909 
910 static int free_debug_processing(struct kmem_cache *s, struct page *page,
911 					void *object, unsigned long addr)
912 {
913 	if (!check_slab(s, page))
914 		goto fail;
915 
916 	if (!check_valid_pointer(s, page, object)) {
917 		slab_err(s, page, "Invalid object pointer 0x%p", object);
918 		goto fail;
919 	}
920 
921 	if (on_freelist(s, page, object)) {
922 		object_err(s, page, object, "Object already free");
923 		goto fail;
924 	}
925 
926 	if (!check_object(s, page, object, 1))
927 		return 0;
928 
929 	if (unlikely(s != page->slab)) {
930 		if (!PageSlab(page)) {
931 			slab_err(s, page, "Attempt to free object(0x%p) "
932 				"outside of slab", object);
933 		} else if (!page->slab) {
934 			printk(KERN_ERR
935 				"SLUB <none>: no slab for object 0x%p.\n",
936 						object);
937 			dump_stack();
938 		} else
939 			object_err(s, page, object,
940 					"page slab pointer corrupt.");
941 		goto fail;
942 	}
943 
944 	/* Special debug activities for freeing objects */
945 	if (!PageSlubFrozen(page) && !page->freelist)
946 		remove_full(s, page);
947 	if (s->flags & SLAB_STORE_USER)
948 		set_track(s, object, TRACK_FREE, addr);
949 	trace(s, page, object, 0);
950 	init_object(s, object, 0);
951 	return 1;
952 
953 fail:
954 	slab_fix(s, "Object at 0x%p not freed", object);
955 	return 0;
956 }
957 
958 static int __init setup_slub_debug(char *str)
959 {
960 	slub_debug = DEBUG_DEFAULT_FLAGS;
961 	if (*str++ != '=' || !*str)
962 		/*
963 		 * No options specified. Switch on full debugging.
964 		 */
965 		goto out;
966 
967 	if (*str == ',')
968 		/*
969 		 * No options but restriction on slabs. This means full
970 		 * debugging for slabs matching a pattern.
971 		 */
972 		goto check_slabs;
973 
974 	slub_debug = 0;
975 	if (*str == '-')
976 		/*
977 		 * Switch off all debugging measures.
978 		 */
979 		goto out;
980 
981 	/*
982 	 * Determine which debug features should be switched on
983 	 */
984 	for (; *str && *str != ','; str++) {
985 		switch (tolower(*str)) {
986 		case 'f':
987 			slub_debug |= SLAB_DEBUG_FREE;
988 			break;
989 		case 'z':
990 			slub_debug |= SLAB_RED_ZONE;
991 			break;
992 		case 'p':
993 			slub_debug |= SLAB_POISON;
994 			break;
995 		case 'u':
996 			slub_debug |= SLAB_STORE_USER;
997 			break;
998 		case 't':
999 			slub_debug |= SLAB_TRACE;
1000 			break;
1001 		default:
1002 			printk(KERN_ERR "slub_debug option '%c' "
1003 				"unknown. skipped\n", *str);
1004 		}
1005 	}
1006 
1007 check_slabs:
1008 	if (*str == ',')
1009 		slub_debug_slabs = str + 1;
1010 out:
1011 	return 1;
1012 }
1013 
1014 __setup("slub_debug", setup_slub_debug);
1015 
1016 static unsigned long kmem_cache_flags(unsigned long objsize,
1017 	unsigned long flags, const char *name,
1018 	void (*ctor)(void *))
1019 {
1020 	/*
1021 	 * Enable debugging if selected on the kernel commandline.
1022 	 */
1023 	if (slub_debug && (!slub_debug_slabs ||
1024 	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1025 			flags |= slub_debug;
1026 
1027 	return flags;
1028 }
1029 #else
1030 static inline void setup_object_debug(struct kmem_cache *s,
1031 			struct page *page, void *object) {}
1032 
1033 static inline int alloc_debug_processing(struct kmem_cache *s,
1034 	struct page *page, void *object, unsigned long addr) { return 0; }
1035 
1036 static inline int free_debug_processing(struct kmem_cache *s,
1037 	struct page *page, void *object, unsigned long addr) { return 0; }
1038 
1039 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1040 			{ return 1; }
1041 static inline int check_object(struct kmem_cache *s, struct page *page,
1042 			void *object, int active) { return 1; }
1043 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1044 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1045 	unsigned long flags, const char *name,
1046 	void (*ctor)(void *))
1047 {
1048 	return flags;
1049 }
1050 #define slub_debug 0
1051 
1052 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1053 							{ return 0; }
1054 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1055 							int objects) {}
1056 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1057 							int objects) {}
1058 #endif
1059 
1060 /*
1061  * Slab allocation and freeing
1062  */
1063 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1064 					struct kmem_cache_order_objects oo)
1065 {
1066 	int order = oo_order(oo);
1067 
1068 	if (node == -1)
1069 		return alloc_pages(flags, order);
1070 	else
1071 		return alloc_pages_node(node, flags, order);
1072 }
1073 
1074 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1075 {
1076 	struct page *page;
1077 	struct kmem_cache_order_objects oo = s->oo;
1078 
1079 	flags |= s->allocflags;
1080 
1081 	page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1082 									oo);
1083 	if (unlikely(!page)) {
1084 		oo = s->min;
1085 		/*
1086 		 * Allocation may have failed due to fragmentation.
1087 		 * Try a lower order alloc if possible
1088 		 */
1089 		page = alloc_slab_page(flags, node, oo);
1090 		if (!page)
1091 			return NULL;
1092 
1093 		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1094 	}
1095 	page->objects = oo_objects(oo);
1096 	mod_zone_page_state(page_zone(page),
1097 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1098 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1099 		1 << oo_order(oo));
1100 
1101 	return page;
1102 }
1103 
1104 static void setup_object(struct kmem_cache *s, struct page *page,
1105 				void *object)
1106 {
1107 	setup_object_debug(s, page, object);
1108 	if (unlikely(s->ctor))
1109 		s->ctor(object);
1110 }
1111 
1112 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1113 {
1114 	struct page *page;
1115 	void *start;
1116 	void *last;
1117 	void *p;
1118 
1119 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1120 
1121 	page = allocate_slab(s,
1122 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1123 	if (!page)
1124 		goto out;
1125 
1126 	inc_slabs_node(s, page_to_nid(page), page->objects);
1127 	page->slab = s;
1128 	page->flags |= 1 << PG_slab;
1129 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1130 			SLAB_STORE_USER | SLAB_TRACE))
1131 		__SetPageSlubDebug(page);
1132 
1133 	start = page_address(page);
1134 
1135 	if (unlikely(s->flags & SLAB_POISON))
1136 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1137 
1138 	last = start;
1139 	for_each_object(p, s, start, page->objects) {
1140 		setup_object(s, page, last);
1141 		set_freepointer(s, last, p);
1142 		last = p;
1143 	}
1144 	setup_object(s, page, last);
1145 	set_freepointer(s, last, NULL);
1146 
1147 	page->freelist = start;
1148 	page->inuse = 0;
1149 out:
1150 	return page;
1151 }
1152 
1153 static void __free_slab(struct kmem_cache *s, struct page *page)
1154 {
1155 	int order = compound_order(page);
1156 	int pages = 1 << order;
1157 
1158 	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1159 		void *p;
1160 
1161 		slab_pad_check(s, page);
1162 		for_each_object(p, s, page_address(page),
1163 						page->objects)
1164 			check_object(s, page, p, 0);
1165 		__ClearPageSlubDebug(page);
1166 	}
1167 
1168 	mod_zone_page_state(page_zone(page),
1169 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1170 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1171 		-pages);
1172 
1173 	__ClearPageSlab(page);
1174 	reset_page_mapcount(page);
1175 	if (current->reclaim_state)
1176 		current->reclaim_state->reclaimed_slab += pages;
1177 	__free_pages(page, order);
1178 }
1179 
1180 static void rcu_free_slab(struct rcu_head *h)
1181 {
1182 	struct page *page;
1183 
1184 	page = container_of((struct list_head *)h, struct page, lru);
1185 	__free_slab(page->slab, page);
1186 }
1187 
1188 static void free_slab(struct kmem_cache *s, struct page *page)
1189 {
1190 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1191 		/*
1192 		 * RCU free overloads the RCU head over the LRU
1193 		 */
1194 		struct rcu_head *head = (void *)&page->lru;
1195 
1196 		call_rcu(head, rcu_free_slab);
1197 	} else
1198 		__free_slab(s, page);
1199 }
1200 
1201 static void discard_slab(struct kmem_cache *s, struct page *page)
1202 {
1203 	dec_slabs_node(s, page_to_nid(page), page->objects);
1204 	free_slab(s, page);
1205 }
1206 
1207 /*
1208  * Per slab locking using the pagelock
1209  */
1210 static __always_inline void slab_lock(struct page *page)
1211 {
1212 	bit_spin_lock(PG_locked, &page->flags);
1213 }
1214 
1215 static __always_inline void slab_unlock(struct page *page)
1216 {
1217 	__bit_spin_unlock(PG_locked, &page->flags);
1218 }
1219 
1220 static __always_inline int slab_trylock(struct page *page)
1221 {
1222 	int rc = 1;
1223 
1224 	rc = bit_spin_trylock(PG_locked, &page->flags);
1225 	return rc;
1226 }
1227 
1228 /*
1229  * Management of partially allocated slabs
1230  */
1231 static void add_partial(struct kmem_cache_node *n,
1232 				struct page *page, int tail)
1233 {
1234 	spin_lock(&n->list_lock);
1235 	n->nr_partial++;
1236 	if (tail)
1237 		list_add_tail(&page->lru, &n->partial);
1238 	else
1239 		list_add(&page->lru, &n->partial);
1240 	spin_unlock(&n->list_lock);
1241 }
1242 
1243 static void remove_partial(struct kmem_cache *s, struct page *page)
1244 {
1245 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1246 
1247 	spin_lock(&n->list_lock);
1248 	list_del(&page->lru);
1249 	n->nr_partial--;
1250 	spin_unlock(&n->list_lock);
1251 }
1252 
1253 /*
1254  * Lock slab and remove from the partial list.
1255  *
1256  * Must hold list_lock.
1257  */
1258 static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1259 							struct page *page)
1260 {
1261 	if (slab_trylock(page)) {
1262 		list_del(&page->lru);
1263 		n->nr_partial--;
1264 		__SetPageSlubFrozen(page);
1265 		return 1;
1266 	}
1267 	return 0;
1268 }
1269 
1270 /*
1271  * Try to allocate a partial slab from a specific node.
1272  */
1273 static struct page *get_partial_node(struct kmem_cache_node *n)
1274 {
1275 	struct page *page;
1276 
1277 	/*
1278 	 * Racy check. If we mistakenly see no partial slabs then we
1279 	 * just allocate an empty slab. If we mistakenly try to get a
1280 	 * partial slab and there is none available then get_partials()
1281 	 * will return NULL.
1282 	 */
1283 	if (!n || !n->nr_partial)
1284 		return NULL;
1285 
1286 	spin_lock(&n->list_lock);
1287 	list_for_each_entry(page, &n->partial, lru)
1288 		if (lock_and_freeze_slab(n, page))
1289 			goto out;
1290 	page = NULL;
1291 out:
1292 	spin_unlock(&n->list_lock);
1293 	return page;
1294 }
1295 
1296 /*
1297  * Get a page from somewhere. Search in increasing NUMA distances.
1298  */
1299 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1300 {
1301 #ifdef CONFIG_NUMA
1302 	struct zonelist *zonelist;
1303 	struct zoneref *z;
1304 	struct zone *zone;
1305 	enum zone_type high_zoneidx = gfp_zone(flags);
1306 	struct page *page;
1307 
1308 	/*
1309 	 * The defrag ratio allows a configuration of the tradeoffs between
1310 	 * inter node defragmentation and node local allocations. A lower
1311 	 * defrag_ratio increases the tendency to do local allocations
1312 	 * instead of attempting to obtain partial slabs from other nodes.
1313 	 *
1314 	 * If the defrag_ratio is set to 0 then kmalloc() always
1315 	 * returns node local objects. If the ratio is higher then kmalloc()
1316 	 * may return off node objects because partial slabs are obtained
1317 	 * from other nodes and filled up.
1318 	 *
1319 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1320 	 * defrag_ratio = 1000) then every (well almost) allocation will
1321 	 * first attempt to defrag slab caches on other nodes. This means
1322 	 * scanning over all nodes to look for partial slabs which may be
1323 	 * expensive if we do it every time we are trying to find a slab
1324 	 * with available objects.
1325 	 */
1326 	if (!s->remote_node_defrag_ratio ||
1327 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1328 		return NULL;
1329 
1330 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1331 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1332 		struct kmem_cache_node *n;
1333 
1334 		n = get_node(s, zone_to_nid(zone));
1335 
1336 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1337 				n->nr_partial > s->min_partial) {
1338 			page = get_partial_node(n);
1339 			if (page)
1340 				return page;
1341 		}
1342 	}
1343 #endif
1344 	return NULL;
1345 }
1346 
1347 /*
1348  * Get a partial page, lock it and return it.
1349  */
1350 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1351 {
1352 	struct page *page;
1353 	int searchnode = (node == -1) ? numa_node_id() : node;
1354 
1355 	page = get_partial_node(get_node(s, searchnode));
1356 	if (page || (flags & __GFP_THISNODE))
1357 		return page;
1358 
1359 	return get_any_partial(s, flags);
1360 }
1361 
1362 /*
1363  * Move a page back to the lists.
1364  *
1365  * Must be called with the slab lock held.
1366  *
1367  * On exit the slab lock will have been dropped.
1368  */
1369 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1370 {
1371 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1372 	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1373 
1374 	__ClearPageSlubFrozen(page);
1375 	if (page->inuse) {
1376 
1377 		if (page->freelist) {
1378 			add_partial(n, page, tail);
1379 			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1380 		} else {
1381 			stat(c, DEACTIVATE_FULL);
1382 			if (SLABDEBUG && PageSlubDebug(page) &&
1383 						(s->flags & SLAB_STORE_USER))
1384 				add_full(n, page);
1385 		}
1386 		slab_unlock(page);
1387 	} else {
1388 		stat(c, DEACTIVATE_EMPTY);
1389 		if (n->nr_partial < s->min_partial) {
1390 			/*
1391 			 * Adding an empty slab to the partial slabs in order
1392 			 * to avoid page allocator overhead. This slab needs
1393 			 * to come after the other slabs with objects in
1394 			 * so that the others get filled first. That way the
1395 			 * size of the partial list stays small.
1396 			 *
1397 			 * kmem_cache_shrink can reclaim any empty slabs from
1398 			 * the partial list.
1399 			 */
1400 			add_partial(n, page, 1);
1401 			slab_unlock(page);
1402 		} else {
1403 			slab_unlock(page);
1404 			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1405 			discard_slab(s, page);
1406 		}
1407 	}
1408 }
1409 
1410 /*
1411  * Remove the cpu slab
1412  */
1413 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1414 {
1415 	struct page *page = c->page;
1416 	int tail = 1;
1417 
1418 	if (page->freelist)
1419 		stat(c, DEACTIVATE_REMOTE_FREES);
1420 	/*
1421 	 * Merge cpu freelist into slab freelist. Typically we get here
1422 	 * because both freelists are empty. So this is unlikely
1423 	 * to occur.
1424 	 */
1425 	while (unlikely(c->freelist)) {
1426 		void **object;
1427 
1428 		tail = 0;	/* Hot objects. Put the slab first */
1429 
1430 		/* Retrieve object from cpu_freelist */
1431 		object = c->freelist;
1432 		c->freelist = c->freelist[c->offset];
1433 
1434 		/* And put onto the regular freelist */
1435 		object[c->offset] = page->freelist;
1436 		page->freelist = object;
1437 		page->inuse--;
1438 	}
1439 	c->page = NULL;
1440 	unfreeze_slab(s, page, tail);
1441 }
1442 
1443 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1444 {
1445 	stat(c, CPUSLAB_FLUSH);
1446 	slab_lock(c->page);
1447 	deactivate_slab(s, c);
1448 }
1449 
1450 /*
1451  * Flush cpu slab.
1452  *
1453  * Called from IPI handler with interrupts disabled.
1454  */
1455 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1456 {
1457 	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1458 
1459 	if (likely(c && c->page))
1460 		flush_slab(s, c);
1461 }
1462 
1463 static void flush_cpu_slab(void *d)
1464 {
1465 	struct kmem_cache *s = d;
1466 
1467 	__flush_cpu_slab(s, smp_processor_id());
1468 }
1469 
1470 static void flush_all(struct kmem_cache *s)
1471 {
1472 	on_each_cpu(flush_cpu_slab, s, 1);
1473 }
1474 
1475 /*
1476  * Check if the objects in a per cpu structure fit numa
1477  * locality expectations.
1478  */
1479 static inline int node_match(struct kmem_cache_cpu *c, int node)
1480 {
1481 #ifdef CONFIG_NUMA
1482 	if (node != -1 && c->node != node)
1483 		return 0;
1484 #endif
1485 	return 1;
1486 }
1487 
1488 /*
1489  * Slow path. The lockless freelist is empty or we need to perform
1490  * debugging duties.
1491  *
1492  * Interrupts are disabled.
1493  *
1494  * Processing is still very fast if new objects have been freed to the
1495  * regular freelist. In that case we simply take over the regular freelist
1496  * as the lockless freelist and zap the regular freelist.
1497  *
1498  * If that is not working then we fall back to the partial lists. We take the
1499  * first element of the freelist as the object to allocate now and move the
1500  * rest of the freelist to the lockless freelist.
1501  *
1502  * And if we were unable to get a new slab from the partial slab lists then
1503  * we need to allocate a new slab. This is the slowest path since it involves
1504  * a call to the page allocator and the setup of a new slab.
1505  */
1506 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1507 			  unsigned long addr, struct kmem_cache_cpu *c)
1508 {
1509 	void **object;
1510 	struct page *new;
1511 
1512 	/* We handle __GFP_ZERO in the caller */
1513 	gfpflags &= ~__GFP_ZERO;
1514 
1515 	if (!c->page)
1516 		goto new_slab;
1517 
1518 	slab_lock(c->page);
1519 	if (unlikely(!node_match(c, node)))
1520 		goto another_slab;
1521 
1522 	stat(c, ALLOC_REFILL);
1523 
1524 load_freelist:
1525 	object = c->page->freelist;
1526 	if (unlikely(!object))
1527 		goto another_slab;
1528 	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1529 		goto debug;
1530 
1531 	c->freelist = object[c->offset];
1532 	c->page->inuse = c->page->objects;
1533 	c->page->freelist = NULL;
1534 	c->node = page_to_nid(c->page);
1535 unlock_out:
1536 	slab_unlock(c->page);
1537 	stat(c, ALLOC_SLOWPATH);
1538 	return object;
1539 
1540 another_slab:
1541 	deactivate_slab(s, c);
1542 
1543 new_slab:
1544 	new = get_partial(s, gfpflags, node);
1545 	if (new) {
1546 		c->page = new;
1547 		stat(c, ALLOC_FROM_PARTIAL);
1548 		goto load_freelist;
1549 	}
1550 
1551 	if (gfpflags & __GFP_WAIT)
1552 		local_irq_enable();
1553 
1554 	new = new_slab(s, gfpflags, node);
1555 
1556 	if (gfpflags & __GFP_WAIT)
1557 		local_irq_disable();
1558 
1559 	if (new) {
1560 		c = get_cpu_slab(s, smp_processor_id());
1561 		stat(c, ALLOC_SLAB);
1562 		if (c->page)
1563 			flush_slab(s, c);
1564 		slab_lock(new);
1565 		__SetPageSlubFrozen(new);
1566 		c->page = new;
1567 		goto load_freelist;
1568 	}
1569 	return NULL;
1570 debug:
1571 	if (!alloc_debug_processing(s, c->page, object, addr))
1572 		goto another_slab;
1573 
1574 	c->page->inuse++;
1575 	c->page->freelist = object[c->offset];
1576 	c->node = -1;
1577 	goto unlock_out;
1578 }
1579 
1580 /*
1581  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1582  * have the fastpath folded into their functions. So no function call
1583  * overhead for requests that can be satisfied on the fastpath.
1584  *
1585  * The fastpath works by first checking if the lockless freelist can be used.
1586  * If not then __slab_alloc is called for slow processing.
1587  *
1588  * Otherwise we can simply pick the next object from the lockless free list.
1589  */
1590 static __always_inline void *slab_alloc(struct kmem_cache *s,
1591 		gfp_t gfpflags, int node, unsigned long addr)
1592 {
1593 	void **object;
1594 	struct kmem_cache_cpu *c;
1595 	unsigned long flags;
1596 	unsigned int objsize;
1597 
1598 	lockdep_trace_alloc(gfpflags);
1599 	might_sleep_if(gfpflags & __GFP_WAIT);
1600 
1601 	if (should_failslab(s->objsize, gfpflags))
1602 		return NULL;
1603 
1604 	local_irq_save(flags);
1605 	c = get_cpu_slab(s, smp_processor_id());
1606 	objsize = c->objsize;
1607 	if (unlikely(!c->freelist || !node_match(c, node)))
1608 
1609 		object = __slab_alloc(s, gfpflags, node, addr, c);
1610 
1611 	else {
1612 		object = c->freelist;
1613 		c->freelist = object[c->offset];
1614 		stat(c, ALLOC_FASTPATH);
1615 	}
1616 	local_irq_restore(flags);
1617 
1618 	if (unlikely((gfpflags & __GFP_ZERO) && object))
1619 		memset(object, 0, objsize);
1620 
1621 	kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1622 	return object;
1623 }
1624 
1625 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1626 {
1627 	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1628 
1629 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1630 
1631 	return ret;
1632 }
1633 EXPORT_SYMBOL(kmem_cache_alloc);
1634 
1635 #ifdef CONFIG_KMEMTRACE
1636 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1637 {
1638 	return slab_alloc(s, gfpflags, -1, _RET_IP_);
1639 }
1640 EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1641 #endif
1642 
1643 #ifdef CONFIG_NUMA
1644 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1645 {
1646 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1647 
1648 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
1649 				    s->objsize, s->size, gfpflags, node);
1650 
1651 	return ret;
1652 }
1653 EXPORT_SYMBOL(kmem_cache_alloc_node);
1654 #endif
1655 
1656 #ifdef CONFIG_KMEMTRACE
1657 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1658 				    gfp_t gfpflags,
1659 				    int node)
1660 {
1661 	return slab_alloc(s, gfpflags, node, _RET_IP_);
1662 }
1663 EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1664 #endif
1665 
1666 /*
1667  * Slow patch handling. This may still be called frequently since objects
1668  * have a longer lifetime than the cpu slabs in most processing loads.
1669  *
1670  * So we still attempt to reduce cache line usage. Just take the slab
1671  * lock and free the item. If there is no additional partial page
1672  * handling required then we can return immediately.
1673  */
1674 static void __slab_free(struct kmem_cache *s, struct page *page,
1675 			void *x, unsigned long addr, unsigned int offset)
1676 {
1677 	void *prior;
1678 	void **object = (void *)x;
1679 	struct kmem_cache_cpu *c;
1680 
1681 	c = get_cpu_slab(s, raw_smp_processor_id());
1682 	stat(c, FREE_SLOWPATH);
1683 	slab_lock(page);
1684 
1685 	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1686 		goto debug;
1687 
1688 checks_ok:
1689 	prior = object[offset] = page->freelist;
1690 	page->freelist = object;
1691 	page->inuse--;
1692 
1693 	if (unlikely(PageSlubFrozen(page))) {
1694 		stat(c, FREE_FROZEN);
1695 		goto out_unlock;
1696 	}
1697 
1698 	if (unlikely(!page->inuse))
1699 		goto slab_empty;
1700 
1701 	/*
1702 	 * Objects left in the slab. If it was not on the partial list before
1703 	 * then add it.
1704 	 */
1705 	if (unlikely(!prior)) {
1706 		add_partial(get_node(s, page_to_nid(page)), page, 1);
1707 		stat(c, FREE_ADD_PARTIAL);
1708 	}
1709 
1710 out_unlock:
1711 	slab_unlock(page);
1712 	return;
1713 
1714 slab_empty:
1715 	if (prior) {
1716 		/*
1717 		 * Slab still on the partial list.
1718 		 */
1719 		remove_partial(s, page);
1720 		stat(c, FREE_REMOVE_PARTIAL);
1721 	}
1722 	slab_unlock(page);
1723 	stat(c, FREE_SLAB);
1724 	discard_slab(s, page);
1725 	return;
1726 
1727 debug:
1728 	if (!free_debug_processing(s, page, x, addr))
1729 		goto out_unlock;
1730 	goto checks_ok;
1731 }
1732 
1733 /*
1734  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1735  * can perform fastpath freeing without additional function calls.
1736  *
1737  * The fastpath is only possible if we are freeing to the current cpu slab
1738  * of this processor. This typically the case if we have just allocated
1739  * the item before.
1740  *
1741  * If fastpath is not possible then fall back to __slab_free where we deal
1742  * with all sorts of special processing.
1743  */
1744 static __always_inline void slab_free(struct kmem_cache *s,
1745 			struct page *page, void *x, unsigned long addr)
1746 {
1747 	void **object = (void *)x;
1748 	struct kmem_cache_cpu *c;
1749 	unsigned long flags;
1750 
1751 	kmemleak_free_recursive(x, s->flags);
1752 	local_irq_save(flags);
1753 	c = get_cpu_slab(s, smp_processor_id());
1754 	debug_check_no_locks_freed(object, c->objsize);
1755 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1756 		debug_check_no_obj_freed(object, c->objsize);
1757 	if (likely(page == c->page && c->node >= 0)) {
1758 		object[c->offset] = c->freelist;
1759 		c->freelist = object;
1760 		stat(c, FREE_FASTPATH);
1761 	} else
1762 		__slab_free(s, page, x, addr, c->offset);
1763 
1764 	local_irq_restore(flags);
1765 }
1766 
1767 void kmem_cache_free(struct kmem_cache *s, void *x)
1768 {
1769 	struct page *page;
1770 
1771 	page = virt_to_head_page(x);
1772 
1773 	slab_free(s, page, x, _RET_IP_);
1774 
1775 	trace_kmem_cache_free(_RET_IP_, x);
1776 }
1777 EXPORT_SYMBOL(kmem_cache_free);
1778 
1779 /* Figure out on which slab page the object resides */
1780 static struct page *get_object_page(const void *x)
1781 {
1782 	struct page *page = virt_to_head_page(x);
1783 
1784 	if (!PageSlab(page))
1785 		return NULL;
1786 
1787 	return page;
1788 }
1789 
1790 /*
1791  * Object placement in a slab is made very easy because we always start at
1792  * offset 0. If we tune the size of the object to the alignment then we can
1793  * get the required alignment by putting one properly sized object after
1794  * another.
1795  *
1796  * Notice that the allocation order determines the sizes of the per cpu
1797  * caches. Each processor has always one slab available for allocations.
1798  * Increasing the allocation order reduces the number of times that slabs
1799  * must be moved on and off the partial lists and is therefore a factor in
1800  * locking overhead.
1801  */
1802 
1803 /*
1804  * Mininum / Maximum order of slab pages. This influences locking overhead
1805  * and slab fragmentation. A higher order reduces the number of partial slabs
1806  * and increases the number of allocations possible without having to
1807  * take the list_lock.
1808  */
1809 static int slub_min_order;
1810 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1811 static int slub_min_objects;
1812 
1813 /*
1814  * Merge control. If this is set then no merging of slab caches will occur.
1815  * (Could be removed. This was introduced to pacify the merge skeptics.)
1816  */
1817 static int slub_nomerge;
1818 
1819 /*
1820  * Calculate the order of allocation given an slab object size.
1821  *
1822  * The order of allocation has significant impact on performance and other
1823  * system components. Generally order 0 allocations should be preferred since
1824  * order 0 does not cause fragmentation in the page allocator. Larger objects
1825  * be problematic to put into order 0 slabs because there may be too much
1826  * unused space left. We go to a higher order if more than 1/16th of the slab
1827  * would be wasted.
1828  *
1829  * In order to reach satisfactory performance we must ensure that a minimum
1830  * number of objects is in one slab. Otherwise we may generate too much
1831  * activity on the partial lists which requires taking the list_lock. This is
1832  * less a concern for large slabs though which are rarely used.
1833  *
1834  * slub_max_order specifies the order where we begin to stop considering the
1835  * number of objects in a slab as critical. If we reach slub_max_order then
1836  * we try to keep the page order as low as possible. So we accept more waste
1837  * of space in favor of a small page order.
1838  *
1839  * Higher order allocations also allow the placement of more objects in a
1840  * slab and thereby reduce object handling overhead. If the user has
1841  * requested a higher mininum order then we start with that one instead of
1842  * the smallest order which will fit the object.
1843  */
1844 static inline int slab_order(int size, int min_objects,
1845 				int max_order, int fract_leftover)
1846 {
1847 	int order;
1848 	int rem;
1849 	int min_order = slub_min_order;
1850 
1851 	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1852 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1853 
1854 	for (order = max(min_order,
1855 				fls(min_objects * size - 1) - PAGE_SHIFT);
1856 			order <= max_order; order++) {
1857 
1858 		unsigned long slab_size = PAGE_SIZE << order;
1859 
1860 		if (slab_size < min_objects * size)
1861 			continue;
1862 
1863 		rem = slab_size % size;
1864 
1865 		if (rem <= slab_size / fract_leftover)
1866 			break;
1867 
1868 	}
1869 
1870 	return order;
1871 }
1872 
1873 static inline int calculate_order(int size)
1874 {
1875 	int order;
1876 	int min_objects;
1877 	int fraction;
1878 	int max_objects;
1879 
1880 	/*
1881 	 * Attempt to find best configuration for a slab. This
1882 	 * works by first attempting to generate a layout with
1883 	 * the best configuration and backing off gradually.
1884 	 *
1885 	 * First we reduce the acceptable waste in a slab. Then
1886 	 * we reduce the minimum objects required in a slab.
1887 	 */
1888 	min_objects = slub_min_objects;
1889 	if (!min_objects)
1890 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
1891 	max_objects = (PAGE_SIZE << slub_max_order)/size;
1892 	min_objects = min(min_objects, max_objects);
1893 
1894 	while (min_objects > 1) {
1895 		fraction = 16;
1896 		while (fraction >= 4) {
1897 			order = slab_order(size, min_objects,
1898 						slub_max_order, fraction);
1899 			if (order <= slub_max_order)
1900 				return order;
1901 			fraction /= 2;
1902 		}
1903 		min_objects --;
1904 	}
1905 
1906 	/*
1907 	 * We were unable to place multiple objects in a slab. Now
1908 	 * lets see if we can place a single object there.
1909 	 */
1910 	order = slab_order(size, 1, slub_max_order, 1);
1911 	if (order <= slub_max_order)
1912 		return order;
1913 
1914 	/*
1915 	 * Doh this slab cannot be placed using slub_max_order.
1916 	 */
1917 	order = slab_order(size, 1, MAX_ORDER, 1);
1918 	if (order < MAX_ORDER)
1919 		return order;
1920 	return -ENOSYS;
1921 }
1922 
1923 /*
1924  * Figure out what the alignment of the objects will be.
1925  */
1926 static unsigned long calculate_alignment(unsigned long flags,
1927 		unsigned long align, unsigned long size)
1928 {
1929 	/*
1930 	 * If the user wants hardware cache aligned objects then follow that
1931 	 * suggestion if the object is sufficiently large.
1932 	 *
1933 	 * The hardware cache alignment cannot override the specified
1934 	 * alignment though. If that is greater then use it.
1935 	 */
1936 	if (flags & SLAB_HWCACHE_ALIGN) {
1937 		unsigned long ralign = cache_line_size();
1938 		while (size <= ralign / 2)
1939 			ralign /= 2;
1940 		align = max(align, ralign);
1941 	}
1942 
1943 	if (align < ARCH_SLAB_MINALIGN)
1944 		align = ARCH_SLAB_MINALIGN;
1945 
1946 	return ALIGN(align, sizeof(void *));
1947 }
1948 
1949 static void init_kmem_cache_cpu(struct kmem_cache *s,
1950 			struct kmem_cache_cpu *c)
1951 {
1952 	c->page = NULL;
1953 	c->freelist = NULL;
1954 	c->node = 0;
1955 	c->offset = s->offset / sizeof(void *);
1956 	c->objsize = s->objsize;
1957 #ifdef CONFIG_SLUB_STATS
1958 	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1959 #endif
1960 }
1961 
1962 static void
1963 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1964 {
1965 	n->nr_partial = 0;
1966 	spin_lock_init(&n->list_lock);
1967 	INIT_LIST_HEAD(&n->partial);
1968 #ifdef CONFIG_SLUB_DEBUG
1969 	atomic_long_set(&n->nr_slabs, 0);
1970 	atomic_long_set(&n->total_objects, 0);
1971 	INIT_LIST_HEAD(&n->full);
1972 #endif
1973 }
1974 
1975 #ifdef CONFIG_SMP
1976 /*
1977  * Per cpu array for per cpu structures.
1978  *
1979  * The per cpu array places all kmem_cache_cpu structures from one processor
1980  * close together meaning that it becomes possible that multiple per cpu
1981  * structures are contained in one cacheline. This may be particularly
1982  * beneficial for the kmalloc caches.
1983  *
1984  * A desktop system typically has around 60-80 slabs. With 100 here we are
1985  * likely able to get per cpu structures for all caches from the array defined
1986  * here. We must be able to cover all kmalloc caches during bootstrap.
1987  *
1988  * If the per cpu array is exhausted then fall back to kmalloc
1989  * of individual cachelines. No sharing is possible then.
1990  */
1991 #define NR_KMEM_CACHE_CPU 100
1992 
1993 static DEFINE_PER_CPU(struct kmem_cache_cpu,
1994 				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1995 
1996 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1997 static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
1998 
1999 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
2000 							int cpu, gfp_t flags)
2001 {
2002 	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
2003 
2004 	if (c)
2005 		per_cpu(kmem_cache_cpu_free, cpu) =
2006 				(void *)c->freelist;
2007 	else {
2008 		/* Table overflow: So allocate ourselves */
2009 		c = kmalloc_node(
2010 			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
2011 			flags, cpu_to_node(cpu));
2012 		if (!c)
2013 			return NULL;
2014 	}
2015 
2016 	init_kmem_cache_cpu(s, c);
2017 	return c;
2018 }
2019 
2020 static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
2021 {
2022 	if (c < per_cpu(kmem_cache_cpu, cpu) ||
2023 			c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
2024 		kfree(c);
2025 		return;
2026 	}
2027 	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
2028 	per_cpu(kmem_cache_cpu_free, cpu) = c;
2029 }
2030 
2031 static void free_kmem_cache_cpus(struct kmem_cache *s)
2032 {
2033 	int cpu;
2034 
2035 	for_each_online_cpu(cpu) {
2036 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2037 
2038 		if (c) {
2039 			s->cpu_slab[cpu] = NULL;
2040 			free_kmem_cache_cpu(c, cpu);
2041 		}
2042 	}
2043 }
2044 
2045 static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2046 {
2047 	int cpu;
2048 
2049 	for_each_online_cpu(cpu) {
2050 		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2051 
2052 		if (c)
2053 			continue;
2054 
2055 		c = alloc_kmem_cache_cpu(s, cpu, flags);
2056 		if (!c) {
2057 			free_kmem_cache_cpus(s);
2058 			return 0;
2059 		}
2060 		s->cpu_slab[cpu] = c;
2061 	}
2062 	return 1;
2063 }
2064 
2065 /*
2066  * Initialize the per cpu array.
2067  */
2068 static void init_alloc_cpu_cpu(int cpu)
2069 {
2070 	int i;
2071 
2072 	if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2073 		return;
2074 
2075 	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2076 		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2077 
2078 	cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2079 }
2080 
2081 static void __init init_alloc_cpu(void)
2082 {
2083 	int cpu;
2084 
2085 	for_each_online_cpu(cpu)
2086 		init_alloc_cpu_cpu(cpu);
2087   }
2088 
2089 #else
2090 static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2091 static inline void init_alloc_cpu(void) {}
2092 
2093 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2094 {
2095 	init_kmem_cache_cpu(s, &s->cpu_slab);
2096 	return 1;
2097 }
2098 #endif
2099 
2100 #ifdef CONFIG_NUMA
2101 /*
2102  * No kmalloc_node yet so do it by hand. We know that this is the first
2103  * slab on the node for this slabcache. There are no concurrent accesses
2104  * possible.
2105  *
2106  * Note that this function only works on the kmalloc_node_cache
2107  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2108  * memory on a fresh node that has no slab structures yet.
2109  */
2110 static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2111 {
2112 	struct page *page;
2113 	struct kmem_cache_node *n;
2114 	unsigned long flags;
2115 
2116 	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2117 
2118 	page = new_slab(kmalloc_caches, gfpflags, node);
2119 
2120 	BUG_ON(!page);
2121 	if (page_to_nid(page) != node) {
2122 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2123 				"node %d\n", node);
2124 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2125 				"in order to be able to continue\n");
2126 	}
2127 
2128 	n = page->freelist;
2129 	BUG_ON(!n);
2130 	page->freelist = get_freepointer(kmalloc_caches, n);
2131 	page->inuse++;
2132 	kmalloc_caches->node[node] = n;
2133 #ifdef CONFIG_SLUB_DEBUG
2134 	init_object(kmalloc_caches, n, 1);
2135 	init_tracking(kmalloc_caches, n);
2136 #endif
2137 	init_kmem_cache_node(n, kmalloc_caches);
2138 	inc_slabs_node(kmalloc_caches, node, page->objects);
2139 
2140 	/*
2141 	 * lockdep requires consistent irq usage for each lock
2142 	 * so even though there cannot be a race this early in
2143 	 * the boot sequence, we still disable irqs.
2144 	 */
2145 	local_irq_save(flags);
2146 	add_partial(n, page, 0);
2147 	local_irq_restore(flags);
2148 }
2149 
2150 static void free_kmem_cache_nodes(struct kmem_cache *s)
2151 {
2152 	int node;
2153 
2154 	for_each_node_state(node, N_NORMAL_MEMORY) {
2155 		struct kmem_cache_node *n = s->node[node];
2156 		if (n && n != &s->local_node)
2157 			kmem_cache_free(kmalloc_caches, n);
2158 		s->node[node] = NULL;
2159 	}
2160 }
2161 
2162 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2163 {
2164 	int node;
2165 	int local_node;
2166 
2167 	if (slab_state >= UP)
2168 		local_node = page_to_nid(virt_to_page(s));
2169 	else
2170 		local_node = 0;
2171 
2172 	for_each_node_state(node, N_NORMAL_MEMORY) {
2173 		struct kmem_cache_node *n;
2174 
2175 		if (local_node == node)
2176 			n = &s->local_node;
2177 		else {
2178 			if (slab_state == DOWN) {
2179 				early_kmem_cache_node_alloc(gfpflags, node);
2180 				continue;
2181 			}
2182 			n = kmem_cache_alloc_node(kmalloc_caches,
2183 							gfpflags, node);
2184 
2185 			if (!n) {
2186 				free_kmem_cache_nodes(s);
2187 				return 0;
2188 			}
2189 
2190 		}
2191 		s->node[node] = n;
2192 		init_kmem_cache_node(n, s);
2193 	}
2194 	return 1;
2195 }
2196 #else
2197 static void free_kmem_cache_nodes(struct kmem_cache *s)
2198 {
2199 }
2200 
2201 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2202 {
2203 	init_kmem_cache_node(&s->local_node, s);
2204 	return 1;
2205 }
2206 #endif
2207 
2208 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2209 {
2210 	if (min < MIN_PARTIAL)
2211 		min = MIN_PARTIAL;
2212 	else if (min > MAX_PARTIAL)
2213 		min = MAX_PARTIAL;
2214 	s->min_partial = min;
2215 }
2216 
2217 /*
2218  * calculate_sizes() determines the order and the distribution of data within
2219  * a slab object.
2220  */
2221 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2222 {
2223 	unsigned long flags = s->flags;
2224 	unsigned long size = s->objsize;
2225 	unsigned long align = s->align;
2226 	int order;
2227 
2228 	/*
2229 	 * Round up object size to the next word boundary. We can only
2230 	 * place the free pointer at word boundaries and this determines
2231 	 * the possible location of the free pointer.
2232 	 */
2233 	size = ALIGN(size, sizeof(void *));
2234 
2235 #ifdef CONFIG_SLUB_DEBUG
2236 	/*
2237 	 * Determine if we can poison the object itself. If the user of
2238 	 * the slab may touch the object after free or before allocation
2239 	 * then we should never poison the object itself.
2240 	 */
2241 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2242 			!s->ctor)
2243 		s->flags |= __OBJECT_POISON;
2244 	else
2245 		s->flags &= ~__OBJECT_POISON;
2246 
2247 
2248 	/*
2249 	 * If we are Redzoning then check if there is some space between the
2250 	 * end of the object and the free pointer. If not then add an
2251 	 * additional word to have some bytes to store Redzone information.
2252 	 */
2253 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2254 		size += sizeof(void *);
2255 #endif
2256 
2257 	/*
2258 	 * With that we have determined the number of bytes in actual use
2259 	 * by the object. This is the potential offset to the free pointer.
2260 	 */
2261 	s->inuse = size;
2262 
2263 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2264 		s->ctor)) {
2265 		/*
2266 		 * Relocate free pointer after the object if it is not
2267 		 * permitted to overwrite the first word of the object on
2268 		 * kmem_cache_free.
2269 		 *
2270 		 * This is the case if we do RCU, have a constructor or
2271 		 * destructor or are poisoning the objects.
2272 		 */
2273 		s->offset = size;
2274 		size += sizeof(void *);
2275 	}
2276 
2277 #ifdef CONFIG_SLUB_DEBUG
2278 	if (flags & SLAB_STORE_USER)
2279 		/*
2280 		 * Need to store information about allocs and frees after
2281 		 * the object.
2282 		 */
2283 		size += 2 * sizeof(struct track);
2284 
2285 	if (flags & SLAB_RED_ZONE)
2286 		/*
2287 		 * Add some empty padding so that we can catch
2288 		 * overwrites from earlier objects rather than let
2289 		 * tracking information or the free pointer be
2290 		 * corrupted if a user writes before the start
2291 		 * of the object.
2292 		 */
2293 		size += sizeof(void *);
2294 #endif
2295 
2296 	/*
2297 	 * Determine the alignment based on various parameters that the
2298 	 * user specified and the dynamic determination of cache line size
2299 	 * on bootup.
2300 	 */
2301 	align = calculate_alignment(flags, align, s->objsize);
2302 
2303 	/*
2304 	 * SLUB stores one object immediately after another beginning from
2305 	 * offset 0. In order to align the objects we have to simply size
2306 	 * each object to conform to the alignment.
2307 	 */
2308 	size = ALIGN(size, align);
2309 	s->size = size;
2310 	if (forced_order >= 0)
2311 		order = forced_order;
2312 	else
2313 		order = calculate_order(size);
2314 
2315 	if (order < 0)
2316 		return 0;
2317 
2318 	s->allocflags = 0;
2319 	if (order)
2320 		s->allocflags |= __GFP_COMP;
2321 
2322 	if (s->flags & SLAB_CACHE_DMA)
2323 		s->allocflags |= SLUB_DMA;
2324 
2325 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2326 		s->allocflags |= __GFP_RECLAIMABLE;
2327 
2328 	/*
2329 	 * Determine the number of objects per slab
2330 	 */
2331 	s->oo = oo_make(order, size);
2332 	s->min = oo_make(get_order(size), size);
2333 	if (oo_objects(s->oo) > oo_objects(s->max))
2334 		s->max = s->oo;
2335 
2336 	return !!oo_objects(s->oo);
2337 
2338 }
2339 
2340 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2341 		const char *name, size_t size,
2342 		size_t align, unsigned long flags,
2343 		void (*ctor)(void *))
2344 {
2345 	memset(s, 0, kmem_size);
2346 	s->name = name;
2347 	s->ctor = ctor;
2348 	s->objsize = size;
2349 	s->align = align;
2350 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2351 
2352 	if (!calculate_sizes(s, -1))
2353 		goto error;
2354 
2355 	/*
2356 	 * The larger the object size is, the more pages we want on the partial
2357 	 * list to avoid pounding the page allocator excessively.
2358 	 */
2359 	set_min_partial(s, ilog2(s->size));
2360 	s->refcount = 1;
2361 #ifdef CONFIG_NUMA
2362 	s->remote_node_defrag_ratio = 1000;
2363 #endif
2364 	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2365 		goto error;
2366 
2367 	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2368 		return 1;
2369 	free_kmem_cache_nodes(s);
2370 error:
2371 	if (flags & SLAB_PANIC)
2372 		panic("Cannot create slab %s size=%lu realsize=%u "
2373 			"order=%u offset=%u flags=%lx\n",
2374 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2375 			s->offset, flags);
2376 	return 0;
2377 }
2378 
2379 /*
2380  * Check if a given pointer is valid
2381  */
2382 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2383 {
2384 	struct page *page;
2385 
2386 	page = get_object_page(object);
2387 
2388 	if (!page || s != page->slab)
2389 		/* No slab or wrong slab */
2390 		return 0;
2391 
2392 	if (!check_valid_pointer(s, page, object))
2393 		return 0;
2394 
2395 	/*
2396 	 * We could also check if the object is on the slabs freelist.
2397 	 * But this would be too expensive and it seems that the main
2398 	 * purpose of kmem_ptr_valid() is to check if the object belongs
2399 	 * to a certain slab.
2400 	 */
2401 	return 1;
2402 }
2403 EXPORT_SYMBOL(kmem_ptr_validate);
2404 
2405 /*
2406  * Determine the size of a slab object
2407  */
2408 unsigned int kmem_cache_size(struct kmem_cache *s)
2409 {
2410 	return s->objsize;
2411 }
2412 EXPORT_SYMBOL(kmem_cache_size);
2413 
2414 const char *kmem_cache_name(struct kmem_cache *s)
2415 {
2416 	return s->name;
2417 }
2418 EXPORT_SYMBOL(kmem_cache_name);
2419 
2420 static void list_slab_objects(struct kmem_cache *s, struct page *page,
2421 							const char *text)
2422 {
2423 #ifdef CONFIG_SLUB_DEBUG
2424 	void *addr = page_address(page);
2425 	void *p;
2426 	DECLARE_BITMAP(map, page->objects);
2427 
2428 	bitmap_zero(map, page->objects);
2429 	slab_err(s, page, "%s", text);
2430 	slab_lock(page);
2431 	for_each_free_object(p, s, page->freelist)
2432 		set_bit(slab_index(p, s, addr), map);
2433 
2434 	for_each_object(p, s, addr, page->objects) {
2435 
2436 		if (!test_bit(slab_index(p, s, addr), map)) {
2437 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2438 							p, p - addr);
2439 			print_tracking(s, p);
2440 		}
2441 	}
2442 	slab_unlock(page);
2443 #endif
2444 }
2445 
2446 /*
2447  * Attempt to free all partial slabs on a node.
2448  */
2449 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2450 {
2451 	unsigned long flags;
2452 	struct page *page, *h;
2453 
2454 	spin_lock_irqsave(&n->list_lock, flags);
2455 	list_for_each_entry_safe(page, h, &n->partial, lru) {
2456 		if (!page->inuse) {
2457 			list_del(&page->lru);
2458 			discard_slab(s, page);
2459 			n->nr_partial--;
2460 		} else {
2461 			list_slab_objects(s, page,
2462 				"Objects remaining on kmem_cache_close()");
2463 		}
2464 	}
2465 	spin_unlock_irqrestore(&n->list_lock, flags);
2466 }
2467 
2468 /*
2469  * Release all resources used by a slab cache.
2470  */
2471 static inline int kmem_cache_close(struct kmem_cache *s)
2472 {
2473 	int node;
2474 
2475 	flush_all(s);
2476 
2477 	/* Attempt to free all objects */
2478 	free_kmem_cache_cpus(s);
2479 	for_each_node_state(node, N_NORMAL_MEMORY) {
2480 		struct kmem_cache_node *n = get_node(s, node);
2481 
2482 		free_partial(s, n);
2483 		if (n->nr_partial || slabs_node(s, node))
2484 			return 1;
2485 	}
2486 	free_kmem_cache_nodes(s);
2487 	return 0;
2488 }
2489 
2490 /*
2491  * Close a cache and release the kmem_cache structure
2492  * (must be used for caches created using kmem_cache_create)
2493  */
2494 void kmem_cache_destroy(struct kmem_cache *s)
2495 {
2496 	down_write(&slub_lock);
2497 	s->refcount--;
2498 	if (!s->refcount) {
2499 		list_del(&s->list);
2500 		up_write(&slub_lock);
2501 		if (kmem_cache_close(s)) {
2502 			printk(KERN_ERR "SLUB %s: %s called for cache that "
2503 				"still has objects.\n", s->name, __func__);
2504 			dump_stack();
2505 		}
2506 		sysfs_slab_remove(s);
2507 	} else
2508 		up_write(&slub_lock);
2509 }
2510 EXPORT_SYMBOL(kmem_cache_destroy);
2511 
2512 /********************************************************************
2513  *		Kmalloc subsystem
2514  *******************************************************************/
2515 
2516 struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2517 EXPORT_SYMBOL(kmalloc_caches);
2518 
2519 static int __init setup_slub_min_order(char *str)
2520 {
2521 	get_option(&str, &slub_min_order);
2522 
2523 	return 1;
2524 }
2525 
2526 __setup("slub_min_order=", setup_slub_min_order);
2527 
2528 static int __init setup_slub_max_order(char *str)
2529 {
2530 	get_option(&str, &slub_max_order);
2531 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2532 
2533 	return 1;
2534 }
2535 
2536 __setup("slub_max_order=", setup_slub_max_order);
2537 
2538 static int __init setup_slub_min_objects(char *str)
2539 {
2540 	get_option(&str, &slub_min_objects);
2541 
2542 	return 1;
2543 }
2544 
2545 __setup("slub_min_objects=", setup_slub_min_objects);
2546 
2547 static int __init setup_slub_nomerge(char *str)
2548 {
2549 	slub_nomerge = 1;
2550 	return 1;
2551 }
2552 
2553 __setup("slub_nomerge", setup_slub_nomerge);
2554 
2555 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2556 		const char *name, int size, gfp_t gfp_flags)
2557 {
2558 	unsigned int flags = 0;
2559 
2560 	if (gfp_flags & SLUB_DMA)
2561 		flags = SLAB_CACHE_DMA;
2562 
2563 	/*
2564 	 * This function is called with IRQs disabled during early-boot on
2565 	 * single CPU so there's no need to take slub_lock here.
2566 	 */
2567 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2568 								flags, NULL))
2569 		goto panic;
2570 
2571 	list_add(&s->list, &slab_caches);
2572 
2573 	if (sysfs_slab_add(s))
2574 		goto panic;
2575 	return s;
2576 
2577 panic:
2578 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2579 }
2580 
2581 #ifdef CONFIG_ZONE_DMA
2582 static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2583 
2584 static void sysfs_add_func(struct work_struct *w)
2585 {
2586 	struct kmem_cache *s;
2587 
2588 	down_write(&slub_lock);
2589 	list_for_each_entry(s, &slab_caches, list) {
2590 		if (s->flags & __SYSFS_ADD_DEFERRED) {
2591 			s->flags &= ~__SYSFS_ADD_DEFERRED;
2592 			sysfs_slab_add(s);
2593 		}
2594 	}
2595 	up_write(&slub_lock);
2596 }
2597 
2598 static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2599 
2600 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2601 {
2602 	struct kmem_cache *s;
2603 	char *text;
2604 	size_t realsize;
2605 
2606 	s = kmalloc_caches_dma[index];
2607 	if (s)
2608 		return s;
2609 
2610 	/* Dynamically create dma cache */
2611 	if (flags & __GFP_WAIT)
2612 		down_write(&slub_lock);
2613 	else {
2614 		if (!down_write_trylock(&slub_lock))
2615 			goto out;
2616 	}
2617 
2618 	if (kmalloc_caches_dma[index])
2619 		goto unlock_out;
2620 
2621 	realsize = kmalloc_caches[index].objsize;
2622 	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2623 			 (unsigned int)realsize);
2624 	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2625 
2626 	if (!s || !text || !kmem_cache_open(s, flags, text,
2627 			realsize, ARCH_KMALLOC_MINALIGN,
2628 			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2629 		kfree(s);
2630 		kfree(text);
2631 		goto unlock_out;
2632 	}
2633 
2634 	list_add(&s->list, &slab_caches);
2635 	kmalloc_caches_dma[index] = s;
2636 
2637 	schedule_work(&sysfs_add_work);
2638 
2639 unlock_out:
2640 	up_write(&slub_lock);
2641 out:
2642 	return kmalloc_caches_dma[index];
2643 }
2644 #endif
2645 
2646 /*
2647  * Conversion table for small slabs sizes / 8 to the index in the
2648  * kmalloc array. This is necessary for slabs < 192 since we have non power
2649  * of two cache sizes there. The size of larger slabs can be determined using
2650  * fls.
2651  */
2652 static s8 size_index[24] = {
2653 	3,	/* 8 */
2654 	4,	/* 16 */
2655 	5,	/* 24 */
2656 	5,	/* 32 */
2657 	6,	/* 40 */
2658 	6,	/* 48 */
2659 	6,	/* 56 */
2660 	6,	/* 64 */
2661 	1,	/* 72 */
2662 	1,	/* 80 */
2663 	1,	/* 88 */
2664 	1,	/* 96 */
2665 	7,	/* 104 */
2666 	7,	/* 112 */
2667 	7,	/* 120 */
2668 	7,	/* 128 */
2669 	2,	/* 136 */
2670 	2,	/* 144 */
2671 	2,	/* 152 */
2672 	2,	/* 160 */
2673 	2,	/* 168 */
2674 	2,	/* 176 */
2675 	2,	/* 184 */
2676 	2	/* 192 */
2677 };
2678 
2679 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2680 {
2681 	int index;
2682 
2683 	if (size <= 192) {
2684 		if (!size)
2685 			return ZERO_SIZE_PTR;
2686 
2687 		index = size_index[(size - 1) / 8];
2688 	} else
2689 		index = fls(size - 1);
2690 
2691 #ifdef CONFIG_ZONE_DMA
2692 	if (unlikely((flags & SLUB_DMA)))
2693 		return dma_kmalloc_cache(index, flags);
2694 
2695 #endif
2696 	return &kmalloc_caches[index];
2697 }
2698 
2699 void *__kmalloc(size_t size, gfp_t flags)
2700 {
2701 	struct kmem_cache *s;
2702 	void *ret;
2703 
2704 	if (unlikely(size > SLUB_MAX_SIZE))
2705 		return kmalloc_large(size, flags);
2706 
2707 	s = get_slab(size, flags);
2708 
2709 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2710 		return s;
2711 
2712 	ret = slab_alloc(s, flags, -1, _RET_IP_);
2713 
2714 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2715 
2716 	return ret;
2717 }
2718 EXPORT_SYMBOL(__kmalloc);
2719 
2720 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2721 {
2722 	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2723 						get_order(size));
2724 
2725 	if (page)
2726 		return page_address(page);
2727 	else
2728 		return NULL;
2729 }
2730 
2731 #ifdef CONFIG_NUMA
2732 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2733 {
2734 	struct kmem_cache *s;
2735 	void *ret;
2736 
2737 	if (unlikely(size > SLUB_MAX_SIZE)) {
2738 		ret = kmalloc_large_node(size, flags, node);
2739 
2740 		trace_kmalloc_node(_RET_IP_, ret,
2741 				   size, PAGE_SIZE << get_order(size),
2742 				   flags, node);
2743 
2744 		return ret;
2745 	}
2746 
2747 	s = get_slab(size, flags);
2748 
2749 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2750 		return s;
2751 
2752 	ret = slab_alloc(s, flags, node, _RET_IP_);
2753 
2754 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2755 
2756 	return ret;
2757 }
2758 EXPORT_SYMBOL(__kmalloc_node);
2759 #endif
2760 
2761 size_t ksize(const void *object)
2762 {
2763 	struct page *page;
2764 	struct kmem_cache *s;
2765 
2766 	if (unlikely(object == ZERO_SIZE_PTR))
2767 		return 0;
2768 
2769 	page = virt_to_head_page(object);
2770 
2771 	if (unlikely(!PageSlab(page))) {
2772 		WARN_ON(!PageCompound(page));
2773 		return PAGE_SIZE << compound_order(page);
2774 	}
2775 	s = page->slab;
2776 
2777 #ifdef CONFIG_SLUB_DEBUG
2778 	/*
2779 	 * Debugging requires use of the padding between object
2780 	 * and whatever may come after it.
2781 	 */
2782 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2783 		return s->objsize;
2784 
2785 #endif
2786 	/*
2787 	 * If we have the need to store the freelist pointer
2788 	 * back there or track user information then we can
2789 	 * only use the space before that information.
2790 	 */
2791 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2792 		return s->inuse;
2793 	/*
2794 	 * Else we can use all the padding etc for the allocation
2795 	 */
2796 	return s->size;
2797 }
2798 EXPORT_SYMBOL(ksize);
2799 
2800 void kfree(const void *x)
2801 {
2802 	struct page *page;
2803 	void *object = (void *)x;
2804 
2805 	trace_kfree(_RET_IP_, x);
2806 
2807 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2808 		return;
2809 
2810 	page = virt_to_head_page(x);
2811 	if (unlikely(!PageSlab(page))) {
2812 		BUG_ON(!PageCompound(page));
2813 		put_page(page);
2814 		return;
2815 	}
2816 	slab_free(page->slab, page, object, _RET_IP_);
2817 }
2818 EXPORT_SYMBOL(kfree);
2819 
2820 /*
2821  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2822  * the remaining slabs by the number of items in use. The slabs with the
2823  * most items in use come first. New allocations will then fill those up
2824  * and thus they can be removed from the partial lists.
2825  *
2826  * The slabs with the least items are placed last. This results in them
2827  * being allocated from last increasing the chance that the last objects
2828  * are freed in them.
2829  */
2830 int kmem_cache_shrink(struct kmem_cache *s)
2831 {
2832 	int node;
2833 	int i;
2834 	struct kmem_cache_node *n;
2835 	struct page *page;
2836 	struct page *t;
2837 	int objects = oo_objects(s->max);
2838 	struct list_head *slabs_by_inuse =
2839 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2840 	unsigned long flags;
2841 
2842 	if (!slabs_by_inuse)
2843 		return -ENOMEM;
2844 
2845 	flush_all(s);
2846 	for_each_node_state(node, N_NORMAL_MEMORY) {
2847 		n = get_node(s, node);
2848 
2849 		if (!n->nr_partial)
2850 			continue;
2851 
2852 		for (i = 0; i < objects; i++)
2853 			INIT_LIST_HEAD(slabs_by_inuse + i);
2854 
2855 		spin_lock_irqsave(&n->list_lock, flags);
2856 
2857 		/*
2858 		 * Build lists indexed by the items in use in each slab.
2859 		 *
2860 		 * Note that concurrent frees may occur while we hold the
2861 		 * list_lock. page->inuse here is the upper limit.
2862 		 */
2863 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2864 			if (!page->inuse && slab_trylock(page)) {
2865 				/*
2866 				 * Must hold slab lock here because slab_free
2867 				 * may have freed the last object and be
2868 				 * waiting to release the slab.
2869 				 */
2870 				list_del(&page->lru);
2871 				n->nr_partial--;
2872 				slab_unlock(page);
2873 				discard_slab(s, page);
2874 			} else {
2875 				list_move(&page->lru,
2876 				slabs_by_inuse + page->inuse);
2877 			}
2878 		}
2879 
2880 		/*
2881 		 * Rebuild the partial list with the slabs filled up most
2882 		 * first and the least used slabs at the end.
2883 		 */
2884 		for (i = objects - 1; i >= 0; i--)
2885 			list_splice(slabs_by_inuse + i, n->partial.prev);
2886 
2887 		spin_unlock_irqrestore(&n->list_lock, flags);
2888 	}
2889 
2890 	kfree(slabs_by_inuse);
2891 	return 0;
2892 }
2893 EXPORT_SYMBOL(kmem_cache_shrink);
2894 
2895 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2896 static int slab_mem_going_offline_callback(void *arg)
2897 {
2898 	struct kmem_cache *s;
2899 
2900 	down_read(&slub_lock);
2901 	list_for_each_entry(s, &slab_caches, list)
2902 		kmem_cache_shrink(s);
2903 	up_read(&slub_lock);
2904 
2905 	return 0;
2906 }
2907 
2908 static void slab_mem_offline_callback(void *arg)
2909 {
2910 	struct kmem_cache_node *n;
2911 	struct kmem_cache *s;
2912 	struct memory_notify *marg = arg;
2913 	int offline_node;
2914 
2915 	offline_node = marg->status_change_nid;
2916 
2917 	/*
2918 	 * If the node still has available memory. we need kmem_cache_node
2919 	 * for it yet.
2920 	 */
2921 	if (offline_node < 0)
2922 		return;
2923 
2924 	down_read(&slub_lock);
2925 	list_for_each_entry(s, &slab_caches, list) {
2926 		n = get_node(s, offline_node);
2927 		if (n) {
2928 			/*
2929 			 * if n->nr_slabs > 0, slabs still exist on the node
2930 			 * that is going down. We were unable to free them,
2931 			 * and offline_pages() function shoudn't call this
2932 			 * callback. So, we must fail.
2933 			 */
2934 			BUG_ON(slabs_node(s, offline_node));
2935 
2936 			s->node[offline_node] = NULL;
2937 			kmem_cache_free(kmalloc_caches, n);
2938 		}
2939 	}
2940 	up_read(&slub_lock);
2941 }
2942 
2943 static int slab_mem_going_online_callback(void *arg)
2944 {
2945 	struct kmem_cache_node *n;
2946 	struct kmem_cache *s;
2947 	struct memory_notify *marg = arg;
2948 	int nid = marg->status_change_nid;
2949 	int ret = 0;
2950 
2951 	/*
2952 	 * If the node's memory is already available, then kmem_cache_node is
2953 	 * already created. Nothing to do.
2954 	 */
2955 	if (nid < 0)
2956 		return 0;
2957 
2958 	/*
2959 	 * We are bringing a node online. No memory is available yet. We must
2960 	 * allocate a kmem_cache_node structure in order to bring the node
2961 	 * online.
2962 	 */
2963 	down_read(&slub_lock);
2964 	list_for_each_entry(s, &slab_caches, list) {
2965 		/*
2966 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2967 		 *      since memory is not yet available from the node that
2968 		 *      is brought up.
2969 		 */
2970 		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2971 		if (!n) {
2972 			ret = -ENOMEM;
2973 			goto out;
2974 		}
2975 		init_kmem_cache_node(n, s);
2976 		s->node[nid] = n;
2977 	}
2978 out:
2979 	up_read(&slub_lock);
2980 	return ret;
2981 }
2982 
2983 static int slab_memory_callback(struct notifier_block *self,
2984 				unsigned long action, void *arg)
2985 {
2986 	int ret = 0;
2987 
2988 	switch (action) {
2989 	case MEM_GOING_ONLINE:
2990 		ret = slab_mem_going_online_callback(arg);
2991 		break;
2992 	case MEM_GOING_OFFLINE:
2993 		ret = slab_mem_going_offline_callback(arg);
2994 		break;
2995 	case MEM_OFFLINE:
2996 	case MEM_CANCEL_ONLINE:
2997 		slab_mem_offline_callback(arg);
2998 		break;
2999 	case MEM_ONLINE:
3000 	case MEM_CANCEL_OFFLINE:
3001 		break;
3002 	}
3003 	if (ret)
3004 		ret = notifier_from_errno(ret);
3005 	else
3006 		ret = NOTIFY_OK;
3007 	return ret;
3008 }
3009 
3010 #endif /* CONFIG_MEMORY_HOTPLUG */
3011 
3012 /********************************************************************
3013  *			Basic setup of slabs
3014  *******************************************************************/
3015 
3016 void __init kmem_cache_init(void)
3017 {
3018 	int i;
3019 	int caches = 0;
3020 
3021 	init_alloc_cpu();
3022 
3023 #ifdef CONFIG_NUMA
3024 	/*
3025 	 * Must first have the slab cache available for the allocations of the
3026 	 * struct kmem_cache_node's. There is special bootstrap code in
3027 	 * kmem_cache_open for slab_state == DOWN.
3028 	 */
3029 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
3030 		sizeof(struct kmem_cache_node), GFP_NOWAIT);
3031 	kmalloc_caches[0].refcount = -1;
3032 	caches++;
3033 
3034 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3035 #endif
3036 
3037 	/* Able to allocate the per node structures */
3038 	slab_state = PARTIAL;
3039 
3040 	/* Caches that are not of the two-to-the-power-of size */
3041 	if (KMALLOC_MIN_SIZE <= 64) {
3042 		create_kmalloc_cache(&kmalloc_caches[1],
3043 				"kmalloc-96", 96, GFP_NOWAIT);
3044 		caches++;
3045 		create_kmalloc_cache(&kmalloc_caches[2],
3046 				"kmalloc-192", 192, GFP_NOWAIT);
3047 		caches++;
3048 	}
3049 
3050 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3051 		create_kmalloc_cache(&kmalloc_caches[i],
3052 			"kmalloc", 1 << i, GFP_NOWAIT);
3053 		caches++;
3054 	}
3055 
3056 
3057 	/*
3058 	 * Patch up the size_index table if we have strange large alignment
3059 	 * requirements for the kmalloc array. This is only the case for
3060 	 * MIPS it seems. The standard arches will not generate any code here.
3061 	 *
3062 	 * Largest permitted alignment is 256 bytes due to the way we
3063 	 * handle the index determination for the smaller caches.
3064 	 *
3065 	 * Make sure that nothing crazy happens if someone starts tinkering
3066 	 * around with ARCH_KMALLOC_MINALIGN
3067 	 */
3068 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3069 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3070 
3071 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3072 		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3073 
3074 	if (KMALLOC_MIN_SIZE == 128) {
3075 		/*
3076 		 * The 192 byte sized cache is not used if the alignment
3077 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3078 		 * instead.
3079 		 */
3080 		for (i = 128 + 8; i <= 192; i += 8)
3081 			size_index[(i - 1) / 8] = 8;
3082 	}
3083 
3084 	slab_state = UP;
3085 
3086 	/* Provide the correct kmalloc names now that the caches are up */
3087 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3088 		kmalloc_caches[i]. name =
3089 			kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3090 
3091 #ifdef CONFIG_SMP
3092 	register_cpu_notifier(&slab_notifier);
3093 	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
3094 				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
3095 #else
3096 	kmem_size = sizeof(struct kmem_cache);
3097 #endif
3098 
3099 	printk(KERN_INFO
3100 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3101 		" CPUs=%d, Nodes=%d\n",
3102 		caches, cache_line_size(),
3103 		slub_min_order, slub_max_order, slub_min_objects,
3104 		nr_cpu_ids, nr_node_ids);
3105 }
3106 
3107 /*
3108  * Find a mergeable slab cache
3109  */
3110 static int slab_unmergeable(struct kmem_cache *s)
3111 {
3112 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3113 		return 1;
3114 
3115 	if (s->ctor)
3116 		return 1;
3117 
3118 	/*
3119 	 * We may have set a slab to be unmergeable during bootstrap.
3120 	 */
3121 	if (s->refcount < 0)
3122 		return 1;
3123 
3124 	return 0;
3125 }
3126 
3127 static struct kmem_cache *find_mergeable(size_t size,
3128 		size_t align, unsigned long flags, const char *name,
3129 		void (*ctor)(void *))
3130 {
3131 	struct kmem_cache *s;
3132 
3133 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3134 		return NULL;
3135 
3136 	if (ctor)
3137 		return NULL;
3138 
3139 	size = ALIGN(size, sizeof(void *));
3140 	align = calculate_alignment(flags, align, size);
3141 	size = ALIGN(size, align);
3142 	flags = kmem_cache_flags(size, flags, name, NULL);
3143 
3144 	list_for_each_entry(s, &slab_caches, list) {
3145 		if (slab_unmergeable(s))
3146 			continue;
3147 
3148 		if (size > s->size)
3149 			continue;
3150 
3151 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3152 				continue;
3153 		/*
3154 		 * Check if alignment is compatible.
3155 		 * Courtesy of Adrian Drzewiecki
3156 		 */
3157 		if ((s->size & ~(align - 1)) != s->size)
3158 			continue;
3159 
3160 		if (s->size - size >= sizeof(void *))
3161 			continue;
3162 
3163 		return s;
3164 	}
3165 	return NULL;
3166 }
3167 
3168 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3169 		size_t align, unsigned long flags, void (*ctor)(void *))
3170 {
3171 	struct kmem_cache *s;
3172 
3173 	down_write(&slub_lock);
3174 	s = find_mergeable(size, align, flags, name, ctor);
3175 	if (s) {
3176 		int cpu;
3177 
3178 		s->refcount++;
3179 		/*
3180 		 * Adjust the object sizes so that we clear
3181 		 * the complete object on kzalloc.
3182 		 */
3183 		s->objsize = max(s->objsize, (int)size);
3184 
3185 		/*
3186 		 * And then we need to update the object size in the
3187 		 * per cpu structures
3188 		 */
3189 		for_each_online_cpu(cpu)
3190 			get_cpu_slab(s, cpu)->objsize = s->objsize;
3191 
3192 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3193 		up_write(&slub_lock);
3194 
3195 		if (sysfs_slab_alias(s, name)) {
3196 			down_write(&slub_lock);
3197 			s->refcount--;
3198 			up_write(&slub_lock);
3199 			goto err;
3200 		}
3201 		return s;
3202 	}
3203 
3204 	s = kmalloc(kmem_size, GFP_KERNEL);
3205 	if (s) {
3206 		if (kmem_cache_open(s, GFP_KERNEL, name,
3207 				size, align, flags, ctor)) {
3208 			list_add(&s->list, &slab_caches);
3209 			up_write(&slub_lock);
3210 			if (sysfs_slab_add(s)) {
3211 				down_write(&slub_lock);
3212 				list_del(&s->list);
3213 				up_write(&slub_lock);
3214 				kfree(s);
3215 				goto err;
3216 			}
3217 			return s;
3218 		}
3219 		kfree(s);
3220 	}
3221 	up_write(&slub_lock);
3222 
3223 err:
3224 	if (flags & SLAB_PANIC)
3225 		panic("Cannot create slabcache %s\n", name);
3226 	else
3227 		s = NULL;
3228 	return s;
3229 }
3230 EXPORT_SYMBOL(kmem_cache_create);
3231 
3232 #ifdef CONFIG_SMP
3233 /*
3234  * Use the cpu notifier to insure that the cpu slabs are flushed when
3235  * necessary.
3236  */
3237 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3238 		unsigned long action, void *hcpu)
3239 {
3240 	long cpu = (long)hcpu;
3241 	struct kmem_cache *s;
3242 	unsigned long flags;
3243 
3244 	switch (action) {
3245 	case CPU_UP_PREPARE:
3246 	case CPU_UP_PREPARE_FROZEN:
3247 		init_alloc_cpu_cpu(cpu);
3248 		down_read(&slub_lock);
3249 		list_for_each_entry(s, &slab_caches, list)
3250 			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3251 							GFP_KERNEL);
3252 		up_read(&slub_lock);
3253 		break;
3254 
3255 	case CPU_UP_CANCELED:
3256 	case CPU_UP_CANCELED_FROZEN:
3257 	case CPU_DEAD:
3258 	case CPU_DEAD_FROZEN:
3259 		down_read(&slub_lock);
3260 		list_for_each_entry(s, &slab_caches, list) {
3261 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3262 
3263 			local_irq_save(flags);
3264 			__flush_cpu_slab(s, cpu);
3265 			local_irq_restore(flags);
3266 			free_kmem_cache_cpu(c, cpu);
3267 			s->cpu_slab[cpu] = NULL;
3268 		}
3269 		up_read(&slub_lock);
3270 		break;
3271 	default:
3272 		break;
3273 	}
3274 	return NOTIFY_OK;
3275 }
3276 
3277 static struct notifier_block __cpuinitdata slab_notifier = {
3278 	.notifier_call = slab_cpuup_callback
3279 };
3280 
3281 #endif
3282 
3283 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3284 {
3285 	struct kmem_cache *s;
3286 	void *ret;
3287 
3288 	if (unlikely(size > SLUB_MAX_SIZE))
3289 		return kmalloc_large(size, gfpflags);
3290 
3291 	s = get_slab(size, gfpflags);
3292 
3293 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3294 		return s;
3295 
3296 	ret = slab_alloc(s, gfpflags, -1, caller);
3297 
3298 	/* Honor the call site pointer we recieved. */
3299 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3300 
3301 	return ret;
3302 }
3303 
3304 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3305 					int node, unsigned long caller)
3306 {
3307 	struct kmem_cache *s;
3308 	void *ret;
3309 
3310 	if (unlikely(size > SLUB_MAX_SIZE))
3311 		return kmalloc_large_node(size, gfpflags, node);
3312 
3313 	s = get_slab(size, gfpflags);
3314 
3315 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3316 		return s;
3317 
3318 	ret = slab_alloc(s, gfpflags, node, caller);
3319 
3320 	/* Honor the call site pointer we recieved. */
3321 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3322 
3323 	return ret;
3324 }
3325 
3326 #ifdef CONFIG_SLUB_DEBUG
3327 static unsigned long count_partial(struct kmem_cache_node *n,
3328 					int (*get_count)(struct page *))
3329 {
3330 	unsigned long flags;
3331 	unsigned long x = 0;
3332 	struct page *page;
3333 
3334 	spin_lock_irqsave(&n->list_lock, flags);
3335 	list_for_each_entry(page, &n->partial, lru)
3336 		x += get_count(page);
3337 	spin_unlock_irqrestore(&n->list_lock, flags);
3338 	return x;
3339 }
3340 
3341 static int count_inuse(struct page *page)
3342 {
3343 	return page->inuse;
3344 }
3345 
3346 static int count_total(struct page *page)
3347 {
3348 	return page->objects;
3349 }
3350 
3351 static int count_free(struct page *page)
3352 {
3353 	return page->objects - page->inuse;
3354 }
3355 
3356 static int validate_slab(struct kmem_cache *s, struct page *page,
3357 						unsigned long *map)
3358 {
3359 	void *p;
3360 	void *addr = page_address(page);
3361 
3362 	if (!check_slab(s, page) ||
3363 			!on_freelist(s, page, NULL))
3364 		return 0;
3365 
3366 	/* Now we know that a valid freelist exists */
3367 	bitmap_zero(map, page->objects);
3368 
3369 	for_each_free_object(p, s, page->freelist) {
3370 		set_bit(slab_index(p, s, addr), map);
3371 		if (!check_object(s, page, p, 0))
3372 			return 0;
3373 	}
3374 
3375 	for_each_object(p, s, addr, page->objects)
3376 		if (!test_bit(slab_index(p, s, addr), map))
3377 			if (!check_object(s, page, p, 1))
3378 				return 0;
3379 	return 1;
3380 }
3381 
3382 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3383 						unsigned long *map)
3384 {
3385 	if (slab_trylock(page)) {
3386 		validate_slab(s, page, map);
3387 		slab_unlock(page);
3388 	} else
3389 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3390 			s->name, page);
3391 
3392 	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3393 		if (!PageSlubDebug(page))
3394 			printk(KERN_ERR "SLUB %s: SlubDebug not set "
3395 				"on slab 0x%p\n", s->name, page);
3396 	} else {
3397 		if (PageSlubDebug(page))
3398 			printk(KERN_ERR "SLUB %s: SlubDebug set on "
3399 				"slab 0x%p\n", s->name, page);
3400 	}
3401 }
3402 
3403 static int validate_slab_node(struct kmem_cache *s,
3404 		struct kmem_cache_node *n, unsigned long *map)
3405 {
3406 	unsigned long count = 0;
3407 	struct page *page;
3408 	unsigned long flags;
3409 
3410 	spin_lock_irqsave(&n->list_lock, flags);
3411 
3412 	list_for_each_entry(page, &n->partial, lru) {
3413 		validate_slab_slab(s, page, map);
3414 		count++;
3415 	}
3416 	if (count != n->nr_partial)
3417 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3418 			"counter=%ld\n", s->name, count, n->nr_partial);
3419 
3420 	if (!(s->flags & SLAB_STORE_USER))
3421 		goto out;
3422 
3423 	list_for_each_entry(page, &n->full, lru) {
3424 		validate_slab_slab(s, page, map);
3425 		count++;
3426 	}
3427 	if (count != atomic_long_read(&n->nr_slabs))
3428 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3429 			"counter=%ld\n", s->name, count,
3430 			atomic_long_read(&n->nr_slabs));
3431 
3432 out:
3433 	spin_unlock_irqrestore(&n->list_lock, flags);
3434 	return count;
3435 }
3436 
3437 static long validate_slab_cache(struct kmem_cache *s)
3438 {
3439 	int node;
3440 	unsigned long count = 0;
3441 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3442 				sizeof(unsigned long), GFP_KERNEL);
3443 
3444 	if (!map)
3445 		return -ENOMEM;
3446 
3447 	flush_all(s);
3448 	for_each_node_state(node, N_NORMAL_MEMORY) {
3449 		struct kmem_cache_node *n = get_node(s, node);
3450 
3451 		count += validate_slab_node(s, n, map);
3452 	}
3453 	kfree(map);
3454 	return count;
3455 }
3456 
3457 #ifdef SLUB_RESILIENCY_TEST
3458 static void resiliency_test(void)
3459 {
3460 	u8 *p;
3461 
3462 	printk(KERN_ERR "SLUB resiliency testing\n");
3463 	printk(KERN_ERR "-----------------------\n");
3464 	printk(KERN_ERR "A. Corruption after allocation\n");
3465 
3466 	p = kzalloc(16, GFP_KERNEL);
3467 	p[16] = 0x12;
3468 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3469 			" 0x12->0x%p\n\n", p + 16);
3470 
3471 	validate_slab_cache(kmalloc_caches + 4);
3472 
3473 	/* Hmmm... The next two are dangerous */
3474 	p = kzalloc(32, GFP_KERNEL);
3475 	p[32 + sizeof(void *)] = 0x34;
3476 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3477 			" 0x34 -> -0x%p\n", p);
3478 	printk(KERN_ERR
3479 		"If allocated object is overwritten then not detectable\n\n");
3480 
3481 	validate_slab_cache(kmalloc_caches + 5);
3482 	p = kzalloc(64, GFP_KERNEL);
3483 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3484 	*p = 0x56;
3485 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3486 									p);
3487 	printk(KERN_ERR
3488 		"If allocated object is overwritten then not detectable\n\n");
3489 	validate_slab_cache(kmalloc_caches + 6);
3490 
3491 	printk(KERN_ERR "\nB. Corruption after free\n");
3492 	p = kzalloc(128, GFP_KERNEL);
3493 	kfree(p);
3494 	*p = 0x78;
3495 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3496 	validate_slab_cache(kmalloc_caches + 7);
3497 
3498 	p = kzalloc(256, GFP_KERNEL);
3499 	kfree(p);
3500 	p[50] = 0x9a;
3501 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3502 			p);
3503 	validate_slab_cache(kmalloc_caches + 8);
3504 
3505 	p = kzalloc(512, GFP_KERNEL);
3506 	kfree(p);
3507 	p[512] = 0xab;
3508 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3509 	validate_slab_cache(kmalloc_caches + 9);
3510 }
3511 #else
3512 static void resiliency_test(void) {};
3513 #endif
3514 
3515 /*
3516  * Generate lists of code addresses where slabcache objects are allocated
3517  * and freed.
3518  */
3519 
3520 struct location {
3521 	unsigned long count;
3522 	unsigned long addr;
3523 	long long sum_time;
3524 	long min_time;
3525 	long max_time;
3526 	long min_pid;
3527 	long max_pid;
3528 	DECLARE_BITMAP(cpus, NR_CPUS);
3529 	nodemask_t nodes;
3530 };
3531 
3532 struct loc_track {
3533 	unsigned long max;
3534 	unsigned long count;
3535 	struct location *loc;
3536 };
3537 
3538 static void free_loc_track(struct loc_track *t)
3539 {
3540 	if (t->max)
3541 		free_pages((unsigned long)t->loc,
3542 			get_order(sizeof(struct location) * t->max));
3543 }
3544 
3545 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3546 {
3547 	struct location *l;
3548 	int order;
3549 
3550 	order = get_order(sizeof(struct location) * max);
3551 
3552 	l = (void *)__get_free_pages(flags, order);
3553 	if (!l)
3554 		return 0;
3555 
3556 	if (t->count) {
3557 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3558 		free_loc_track(t);
3559 	}
3560 	t->max = max;
3561 	t->loc = l;
3562 	return 1;
3563 }
3564 
3565 static int add_location(struct loc_track *t, struct kmem_cache *s,
3566 				const struct track *track)
3567 {
3568 	long start, end, pos;
3569 	struct location *l;
3570 	unsigned long caddr;
3571 	unsigned long age = jiffies - track->when;
3572 
3573 	start = -1;
3574 	end = t->count;
3575 
3576 	for ( ; ; ) {
3577 		pos = start + (end - start + 1) / 2;
3578 
3579 		/*
3580 		 * There is nothing at "end". If we end up there
3581 		 * we need to add something to before end.
3582 		 */
3583 		if (pos == end)
3584 			break;
3585 
3586 		caddr = t->loc[pos].addr;
3587 		if (track->addr == caddr) {
3588 
3589 			l = &t->loc[pos];
3590 			l->count++;
3591 			if (track->when) {
3592 				l->sum_time += age;
3593 				if (age < l->min_time)
3594 					l->min_time = age;
3595 				if (age > l->max_time)
3596 					l->max_time = age;
3597 
3598 				if (track->pid < l->min_pid)
3599 					l->min_pid = track->pid;
3600 				if (track->pid > l->max_pid)
3601 					l->max_pid = track->pid;
3602 
3603 				cpumask_set_cpu(track->cpu,
3604 						to_cpumask(l->cpus));
3605 			}
3606 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3607 			return 1;
3608 		}
3609 
3610 		if (track->addr < caddr)
3611 			end = pos;
3612 		else
3613 			start = pos;
3614 	}
3615 
3616 	/*
3617 	 * Not found. Insert new tracking element.
3618 	 */
3619 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3620 		return 0;
3621 
3622 	l = t->loc + pos;
3623 	if (pos < t->count)
3624 		memmove(l + 1, l,
3625 			(t->count - pos) * sizeof(struct location));
3626 	t->count++;
3627 	l->count = 1;
3628 	l->addr = track->addr;
3629 	l->sum_time = age;
3630 	l->min_time = age;
3631 	l->max_time = age;
3632 	l->min_pid = track->pid;
3633 	l->max_pid = track->pid;
3634 	cpumask_clear(to_cpumask(l->cpus));
3635 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3636 	nodes_clear(l->nodes);
3637 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3638 	return 1;
3639 }
3640 
3641 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3642 		struct page *page, enum track_item alloc)
3643 {
3644 	void *addr = page_address(page);
3645 	DECLARE_BITMAP(map, page->objects);
3646 	void *p;
3647 
3648 	bitmap_zero(map, page->objects);
3649 	for_each_free_object(p, s, page->freelist)
3650 		set_bit(slab_index(p, s, addr), map);
3651 
3652 	for_each_object(p, s, addr, page->objects)
3653 		if (!test_bit(slab_index(p, s, addr), map))
3654 			add_location(t, s, get_track(s, p, alloc));
3655 }
3656 
3657 static int list_locations(struct kmem_cache *s, char *buf,
3658 					enum track_item alloc)
3659 {
3660 	int len = 0;
3661 	unsigned long i;
3662 	struct loc_track t = { 0, 0, NULL };
3663 	int node;
3664 
3665 	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3666 			GFP_TEMPORARY))
3667 		return sprintf(buf, "Out of memory\n");
3668 
3669 	/* Push back cpu slabs */
3670 	flush_all(s);
3671 
3672 	for_each_node_state(node, N_NORMAL_MEMORY) {
3673 		struct kmem_cache_node *n = get_node(s, node);
3674 		unsigned long flags;
3675 		struct page *page;
3676 
3677 		if (!atomic_long_read(&n->nr_slabs))
3678 			continue;
3679 
3680 		spin_lock_irqsave(&n->list_lock, flags);
3681 		list_for_each_entry(page, &n->partial, lru)
3682 			process_slab(&t, s, page, alloc);
3683 		list_for_each_entry(page, &n->full, lru)
3684 			process_slab(&t, s, page, alloc);
3685 		spin_unlock_irqrestore(&n->list_lock, flags);
3686 	}
3687 
3688 	for (i = 0; i < t.count; i++) {
3689 		struct location *l = &t.loc[i];
3690 
3691 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3692 			break;
3693 		len += sprintf(buf + len, "%7ld ", l->count);
3694 
3695 		if (l->addr)
3696 			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3697 		else
3698 			len += sprintf(buf + len, "<not-available>");
3699 
3700 		if (l->sum_time != l->min_time) {
3701 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3702 				l->min_time,
3703 				(long)div_u64(l->sum_time, l->count),
3704 				l->max_time);
3705 		} else
3706 			len += sprintf(buf + len, " age=%ld",
3707 				l->min_time);
3708 
3709 		if (l->min_pid != l->max_pid)
3710 			len += sprintf(buf + len, " pid=%ld-%ld",
3711 				l->min_pid, l->max_pid);
3712 		else
3713 			len += sprintf(buf + len, " pid=%ld",
3714 				l->min_pid);
3715 
3716 		if (num_online_cpus() > 1 &&
3717 				!cpumask_empty(to_cpumask(l->cpus)) &&
3718 				len < PAGE_SIZE - 60) {
3719 			len += sprintf(buf + len, " cpus=");
3720 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3721 						 to_cpumask(l->cpus));
3722 		}
3723 
3724 		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3725 				len < PAGE_SIZE - 60) {
3726 			len += sprintf(buf + len, " nodes=");
3727 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3728 					l->nodes);
3729 		}
3730 
3731 		len += sprintf(buf + len, "\n");
3732 	}
3733 
3734 	free_loc_track(&t);
3735 	if (!t.count)
3736 		len += sprintf(buf, "No data\n");
3737 	return len;
3738 }
3739 
3740 enum slab_stat_type {
3741 	SL_ALL,			/* All slabs */
3742 	SL_PARTIAL,		/* Only partially allocated slabs */
3743 	SL_CPU,			/* Only slabs used for cpu caches */
3744 	SL_OBJECTS,		/* Determine allocated objects not slabs */
3745 	SL_TOTAL		/* Determine object capacity not slabs */
3746 };
3747 
3748 #define SO_ALL		(1 << SL_ALL)
3749 #define SO_PARTIAL	(1 << SL_PARTIAL)
3750 #define SO_CPU		(1 << SL_CPU)
3751 #define SO_OBJECTS	(1 << SL_OBJECTS)
3752 #define SO_TOTAL	(1 << SL_TOTAL)
3753 
3754 static ssize_t show_slab_objects(struct kmem_cache *s,
3755 			    char *buf, unsigned long flags)
3756 {
3757 	unsigned long total = 0;
3758 	int node;
3759 	int x;
3760 	unsigned long *nodes;
3761 	unsigned long *per_cpu;
3762 
3763 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3764 	if (!nodes)
3765 		return -ENOMEM;
3766 	per_cpu = nodes + nr_node_ids;
3767 
3768 	if (flags & SO_CPU) {
3769 		int cpu;
3770 
3771 		for_each_possible_cpu(cpu) {
3772 			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3773 
3774 			if (!c || c->node < 0)
3775 				continue;
3776 
3777 			if (c->page) {
3778 					if (flags & SO_TOTAL)
3779 						x = c->page->objects;
3780 				else if (flags & SO_OBJECTS)
3781 					x = c->page->inuse;
3782 				else
3783 					x = 1;
3784 
3785 				total += x;
3786 				nodes[c->node] += x;
3787 			}
3788 			per_cpu[c->node]++;
3789 		}
3790 	}
3791 
3792 	if (flags & SO_ALL) {
3793 		for_each_node_state(node, N_NORMAL_MEMORY) {
3794 			struct kmem_cache_node *n = get_node(s, node);
3795 
3796 		if (flags & SO_TOTAL)
3797 			x = atomic_long_read(&n->total_objects);
3798 		else if (flags & SO_OBJECTS)
3799 			x = atomic_long_read(&n->total_objects) -
3800 				count_partial(n, count_free);
3801 
3802 			else
3803 				x = atomic_long_read(&n->nr_slabs);
3804 			total += x;
3805 			nodes[node] += x;
3806 		}
3807 
3808 	} else if (flags & SO_PARTIAL) {
3809 		for_each_node_state(node, N_NORMAL_MEMORY) {
3810 			struct kmem_cache_node *n = get_node(s, node);
3811 
3812 			if (flags & SO_TOTAL)
3813 				x = count_partial(n, count_total);
3814 			else if (flags & SO_OBJECTS)
3815 				x = count_partial(n, count_inuse);
3816 			else
3817 				x = n->nr_partial;
3818 			total += x;
3819 			nodes[node] += x;
3820 		}
3821 	}
3822 	x = sprintf(buf, "%lu", total);
3823 #ifdef CONFIG_NUMA
3824 	for_each_node_state(node, N_NORMAL_MEMORY)
3825 		if (nodes[node])
3826 			x += sprintf(buf + x, " N%d=%lu",
3827 					node, nodes[node]);
3828 #endif
3829 	kfree(nodes);
3830 	return x + sprintf(buf + x, "\n");
3831 }
3832 
3833 static int any_slab_objects(struct kmem_cache *s)
3834 {
3835 	int node;
3836 
3837 	for_each_online_node(node) {
3838 		struct kmem_cache_node *n = get_node(s, node);
3839 
3840 		if (!n)
3841 			continue;
3842 
3843 		if (atomic_long_read(&n->total_objects))
3844 			return 1;
3845 	}
3846 	return 0;
3847 }
3848 
3849 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3850 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3851 
3852 struct slab_attribute {
3853 	struct attribute attr;
3854 	ssize_t (*show)(struct kmem_cache *s, char *buf);
3855 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3856 };
3857 
3858 #define SLAB_ATTR_RO(_name) \
3859 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3860 
3861 #define SLAB_ATTR(_name) \
3862 	static struct slab_attribute _name##_attr =  \
3863 	__ATTR(_name, 0644, _name##_show, _name##_store)
3864 
3865 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3866 {
3867 	return sprintf(buf, "%d\n", s->size);
3868 }
3869 SLAB_ATTR_RO(slab_size);
3870 
3871 static ssize_t align_show(struct kmem_cache *s, char *buf)
3872 {
3873 	return sprintf(buf, "%d\n", s->align);
3874 }
3875 SLAB_ATTR_RO(align);
3876 
3877 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3878 {
3879 	return sprintf(buf, "%d\n", s->objsize);
3880 }
3881 SLAB_ATTR_RO(object_size);
3882 
3883 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3884 {
3885 	return sprintf(buf, "%d\n", oo_objects(s->oo));
3886 }
3887 SLAB_ATTR_RO(objs_per_slab);
3888 
3889 static ssize_t order_store(struct kmem_cache *s,
3890 				const char *buf, size_t length)
3891 {
3892 	unsigned long order;
3893 	int err;
3894 
3895 	err = strict_strtoul(buf, 10, &order);
3896 	if (err)
3897 		return err;
3898 
3899 	if (order > slub_max_order || order < slub_min_order)
3900 		return -EINVAL;
3901 
3902 	calculate_sizes(s, order);
3903 	return length;
3904 }
3905 
3906 static ssize_t order_show(struct kmem_cache *s, char *buf)
3907 {
3908 	return sprintf(buf, "%d\n", oo_order(s->oo));
3909 }
3910 SLAB_ATTR(order);
3911 
3912 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3913 {
3914 	return sprintf(buf, "%lu\n", s->min_partial);
3915 }
3916 
3917 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3918 				 size_t length)
3919 {
3920 	unsigned long min;
3921 	int err;
3922 
3923 	err = strict_strtoul(buf, 10, &min);
3924 	if (err)
3925 		return err;
3926 
3927 	set_min_partial(s, min);
3928 	return length;
3929 }
3930 SLAB_ATTR(min_partial);
3931 
3932 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3933 {
3934 	if (s->ctor) {
3935 		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3936 
3937 		return n + sprintf(buf + n, "\n");
3938 	}
3939 	return 0;
3940 }
3941 SLAB_ATTR_RO(ctor);
3942 
3943 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3944 {
3945 	return sprintf(buf, "%d\n", s->refcount - 1);
3946 }
3947 SLAB_ATTR_RO(aliases);
3948 
3949 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3950 {
3951 	return show_slab_objects(s, buf, SO_ALL);
3952 }
3953 SLAB_ATTR_RO(slabs);
3954 
3955 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3956 {
3957 	return show_slab_objects(s, buf, SO_PARTIAL);
3958 }
3959 SLAB_ATTR_RO(partial);
3960 
3961 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3962 {
3963 	return show_slab_objects(s, buf, SO_CPU);
3964 }
3965 SLAB_ATTR_RO(cpu_slabs);
3966 
3967 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3968 {
3969 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
3970 }
3971 SLAB_ATTR_RO(objects);
3972 
3973 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3974 {
3975 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3976 }
3977 SLAB_ATTR_RO(objects_partial);
3978 
3979 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3980 {
3981 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3982 }
3983 SLAB_ATTR_RO(total_objects);
3984 
3985 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3986 {
3987 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3988 }
3989 
3990 static ssize_t sanity_checks_store(struct kmem_cache *s,
3991 				const char *buf, size_t length)
3992 {
3993 	s->flags &= ~SLAB_DEBUG_FREE;
3994 	if (buf[0] == '1')
3995 		s->flags |= SLAB_DEBUG_FREE;
3996 	return length;
3997 }
3998 SLAB_ATTR(sanity_checks);
3999 
4000 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4001 {
4002 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4003 }
4004 
4005 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4006 							size_t length)
4007 {
4008 	s->flags &= ~SLAB_TRACE;
4009 	if (buf[0] == '1')
4010 		s->flags |= SLAB_TRACE;
4011 	return length;
4012 }
4013 SLAB_ATTR(trace);
4014 
4015 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4016 {
4017 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4018 }
4019 
4020 static ssize_t reclaim_account_store(struct kmem_cache *s,
4021 				const char *buf, size_t length)
4022 {
4023 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4024 	if (buf[0] == '1')
4025 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4026 	return length;
4027 }
4028 SLAB_ATTR(reclaim_account);
4029 
4030 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4031 {
4032 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4033 }
4034 SLAB_ATTR_RO(hwcache_align);
4035 
4036 #ifdef CONFIG_ZONE_DMA
4037 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4038 {
4039 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4040 }
4041 SLAB_ATTR_RO(cache_dma);
4042 #endif
4043 
4044 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4045 {
4046 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4047 }
4048 SLAB_ATTR_RO(destroy_by_rcu);
4049 
4050 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4051 {
4052 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4053 }
4054 
4055 static ssize_t red_zone_store(struct kmem_cache *s,
4056 				const char *buf, size_t length)
4057 {
4058 	if (any_slab_objects(s))
4059 		return -EBUSY;
4060 
4061 	s->flags &= ~SLAB_RED_ZONE;
4062 	if (buf[0] == '1')
4063 		s->flags |= SLAB_RED_ZONE;
4064 	calculate_sizes(s, -1);
4065 	return length;
4066 }
4067 SLAB_ATTR(red_zone);
4068 
4069 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4070 {
4071 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4072 }
4073 
4074 static ssize_t poison_store(struct kmem_cache *s,
4075 				const char *buf, size_t length)
4076 {
4077 	if (any_slab_objects(s))
4078 		return -EBUSY;
4079 
4080 	s->flags &= ~SLAB_POISON;
4081 	if (buf[0] == '1')
4082 		s->flags |= SLAB_POISON;
4083 	calculate_sizes(s, -1);
4084 	return length;
4085 }
4086 SLAB_ATTR(poison);
4087 
4088 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4089 {
4090 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4091 }
4092 
4093 static ssize_t store_user_store(struct kmem_cache *s,
4094 				const char *buf, size_t length)
4095 {
4096 	if (any_slab_objects(s))
4097 		return -EBUSY;
4098 
4099 	s->flags &= ~SLAB_STORE_USER;
4100 	if (buf[0] == '1')
4101 		s->flags |= SLAB_STORE_USER;
4102 	calculate_sizes(s, -1);
4103 	return length;
4104 }
4105 SLAB_ATTR(store_user);
4106 
4107 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4108 {
4109 	return 0;
4110 }
4111 
4112 static ssize_t validate_store(struct kmem_cache *s,
4113 			const char *buf, size_t length)
4114 {
4115 	int ret = -EINVAL;
4116 
4117 	if (buf[0] == '1') {
4118 		ret = validate_slab_cache(s);
4119 		if (ret >= 0)
4120 			ret = length;
4121 	}
4122 	return ret;
4123 }
4124 SLAB_ATTR(validate);
4125 
4126 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4127 {
4128 	return 0;
4129 }
4130 
4131 static ssize_t shrink_store(struct kmem_cache *s,
4132 			const char *buf, size_t length)
4133 {
4134 	if (buf[0] == '1') {
4135 		int rc = kmem_cache_shrink(s);
4136 
4137 		if (rc)
4138 			return rc;
4139 	} else
4140 		return -EINVAL;
4141 	return length;
4142 }
4143 SLAB_ATTR(shrink);
4144 
4145 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4146 {
4147 	if (!(s->flags & SLAB_STORE_USER))
4148 		return -ENOSYS;
4149 	return list_locations(s, buf, TRACK_ALLOC);
4150 }
4151 SLAB_ATTR_RO(alloc_calls);
4152 
4153 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4154 {
4155 	if (!(s->flags & SLAB_STORE_USER))
4156 		return -ENOSYS;
4157 	return list_locations(s, buf, TRACK_FREE);
4158 }
4159 SLAB_ATTR_RO(free_calls);
4160 
4161 #ifdef CONFIG_NUMA
4162 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4163 {
4164 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4165 }
4166 
4167 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4168 				const char *buf, size_t length)
4169 {
4170 	unsigned long ratio;
4171 	int err;
4172 
4173 	err = strict_strtoul(buf, 10, &ratio);
4174 	if (err)
4175 		return err;
4176 
4177 	if (ratio <= 100)
4178 		s->remote_node_defrag_ratio = ratio * 10;
4179 
4180 	return length;
4181 }
4182 SLAB_ATTR(remote_node_defrag_ratio);
4183 #endif
4184 
4185 #ifdef CONFIG_SLUB_STATS
4186 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4187 {
4188 	unsigned long sum  = 0;
4189 	int cpu;
4190 	int len;
4191 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4192 
4193 	if (!data)
4194 		return -ENOMEM;
4195 
4196 	for_each_online_cpu(cpu) {
4197 		unsigned x = get_cpu_slab(s, cpu)->stat[si];
4198 
4199 		data[cpu] = x;
4200 		sum += x;
4201 	}
4202 
4203 	len = sprintf(buf, "%lu", sum);
4204 
4205 #ifdef CONFIG_SMP
4206 	for_each_online_cpu(cpu) {
4207 		if (data[cpu] && len < PAGE_SIZE - 20)
4208 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4209 	}
4210 #endif
4211 	kfree(data);
4212 	return len + sprintf(buf + len, "\n");
4213 }
4214 
4215 #define STAT_ATTR(si, text) 					\
4216 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4217 {								\
4218 	return show_stat(s, buf, si);				\
4219 }								\
4220 SLAB_ATTR_RO(text);						\
4221 
4222 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4223 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4224 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4225 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4226 STAT_ATTR(FREE_FROZEN, free_frozen);
4227 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4228 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4229 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4230 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4231 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4232 STAT_ATTR(FREE_SLAB, free_slab);
4233 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4234 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4235 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4236 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4237 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4238 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4239 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4240 #endif
4241 
4242 static struct attribute *slab_attrs[] = {
4243 	&slab_size_attr.attr,
4244 	&object_size_attr.attr,
4245 	&objs_per_slab_attr.attr,
4246 	&order_attr.attr,
4247 	&min_partial_attr.attr,
4248 	&objects_attr.attr,
4249 	&objects_partial_attr.attr,
4250 	&total_objects_attr.attr,
4251 	&slabs_attr.attr,
4252 	&partial_attr.attr,
4253 	&cpu_slabs_attr.attr,
4254 	&ctor_attr.attr,
4255 	&aliases_attr.attr,
4256 	&align_attr.attr,
4257 	&sanity_checks_attr.attr,
4258 	&trace_attr.attr,
4259 	&hwcache_align_attr.attr,
4260 	&reclaim_account_attr.attr,
4261 	&destroy_by_rcu_attr.attr,
4262 	&red_zone_attr.attr,
4263 	&poison_attr.attr,
4264 	&store_user_attr.attr,
4265 	&validate_attr.attr,
4266 	&shrink_attr.attr,
4267 	&alloc_calls_attr.attr,
4268 	&free_calls_attr.attr,
4269 #ifdef CONFIG_ZONE_DMA
4270 	&cache_dma_attr.attr,
4271 #endif
4272 #ifdef CONFIG_NUMA
4273 	&remote_node_defrag_ratio_attr.attr,
4274 #endif
4275 #ifdef CONFIG_SLUB_STATS
4276 	&alloc_fastpath_attr.attr,
4277 	&alloc_slowpath_attr.attr,
4278 	&free_fastpath_attr.attr,
4279 	&free_slowpath_attr.attr,
4280 	&free_frozen_attr.attr,
4281 	&free_add_partial_attr.attr,
4282 	&free_remove_partial_attr.attr,
4283 	&alloc_from_partial_attr.attr,
4284 	&alloc_slab_attr.attr,
4285 	&alloc_refill_attr.attr,
4286 	&free_slab_attr.attr,
4287 	&cpuslab_flush_attr.attr,
4288 	&deactivate_full_attr.attr,
4289 	&deactivate_empty_attr.attr,
4290 	&deactivate_to_head_attr.attr,
4291 	&deactivate_to_tail_attr.attr,
4292 	&deactivate_remote_frees_attr.attr,
4293 	&order_fallback_attr.attr,
4294 #endif
4295 	NULL
4296 };
4297 
4298 static struct attribute_group slab_attr_group = {
4299 	.attrs = slab_attrs,
4300 };
4301 
4302 static ssize_t slab_attr_show(struct kobject *kobj,
4303 				struct attribute *attr,
4304 				char *buf)
4305 {
4306 	struct slab_attribute *attribute;
4307 	struct kmem_cache *s;
4308 	int err;
4309 
4310 	attribute = to_slab_attr(attr);
4311 	s = to_slab(kobj);
4312 
4313 	if (!attribute->show)
4314 		return -EIO;
4315 
4316 	err = attribute->show(s, buf);
4317 
4318 	return err;
4319 }
4320 
4321 static ssize_t slab_attr_store(struct kobject *kobj,
4322 				struct attribute *attr,
4323 				const char *buf, size_t len)
4324 {
4325 	struct slab_attribute *attribute;
4326 	struct kmem_cache *s;
4327 	int err;
4328 
4329 	attribute = to_slab_attr(attr);
4330 	s = to_slab(kobj);
4331 
4332 	if (!attribute->store)
4333 		return -EIO;
4334 
4335 	err = attribute->store(s, buf, len);
4336 
4337 	return err;
4338 }
4339 
4340 static void kmem_cache_release(struct kobject *kobj)
4341 {
4342 	struct kmem_cache *s = to_slab(kobj);
4343 
4344 	kfree(s);
4345 }
4346 
4347 static struct sysfs_ops slab_sysfs_ops = {
4348 	.show = slab_attr_show,
4349 	.store = slab_attr_store,
4350 };
4351 
4352 static struct kobj_type slab_ktype = {
4353 	.sysfs_ops = &slab_sysfs_ops,
4354 	.release = kmem_cache_release
4355 };
4356 
4357 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4358 {
4359 	struct kobj_type *ktype = get_ktype(kobj);
4360 
4361 	if (ktype == &slab_ktype)
4362 		return 1;
4363 	return 0;
4364 }
4365 
4366 static struct kset_uevent_ops slab_uevent_ops = {
4367 	.filter = uevent_filter,
4368 };
4369 
4370 static struct kset *slab_kset;
4371 
4372 #define ID_STR_LENGTH 64
4373 
4374 /* Create a unique string id for a slab cache:
4375  *
4376  * Format	:[flags-]size
4377  */
4378 static char *create_unique_id(struct kmem_cache *s)
4379 {
4380 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4381 	char *p = name;
4382 
4383 	BUG_ON(!name);
4384 
4385 	*p++ = ':';
4386 	/*
4387 	 * First flags affecting slabcache operations. We will only
4388 	 * get here for aliasable slabs so we do not need to support
4389 	 * too many flags. The flags here must cover all flags that
4390 	 * are matched during merging to guarantee that the id is
4391 	 * unique.
4392 	 */
4393 	if (s->flags & SLAB_CACHE_DMA)
4394 		*p++ = 'd';
4395 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4396 		*p++ = 'a';
4397 	if (s->flags & SLAB_DEBUG_FREE)
4398 		*p++ = 'F';
4399 	if (p != name + 1)
4400 		*p++ = '-';
4401 	p += sprintf(p, "%07d", s->size);
4402 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4403 	return name;
4404 }
4405 
4406 static int sysfs_slab_add(struct kmem_cache *s)
4407 {
4408 	int err;
4409 	const char *name;
4410 	int unmergeable;
4411 
4412 	if (slab_state < SYSFS)
4413 		/* Defer until later */
4414 		return 0;
4415 
4416 	unmergeable = slab_unmergeable(s);
4417 	if (unmergeable) {
4418 		/*
4419 		 * Slabcache can never be merged so we can use the name proper.
4420 		 * This is typically the case for debug situations. In that
4421 		 * case we can catch duplicate names easily.
4422 		 */
4423 		sysfs_remove_link(&slab_kset->kobj, s->name);
4424 		name = s->name;
4425 	} else {
4426 		/*
4427 		 * Create a unique name for the slab as a target
4428 		 * for the symlinks.
4429 		 */
4430 		name = create_unique_id(s);
4431 	}
4432 
4433 	s->kobj.kset = slab_kset;
4434 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4435 	if (err) {
4436 		kobject_put(&s->kobj);
4437 		return err;
4438 	}
4439 
4440 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4441 	if (err)
4442 		return err;
4443 	kobject_uevent(&s->kobj, KOBJ_ADD);
4444 	if (!unmergeable) {
4445 		/* Setup first alias */
4446 		sysfs_slab_alias(s, s->name);
4447 		kfree(name);
4448 	}
4449 	return 0;
4450 }
4451 
4452 static void sysfs_slab_remove(struct kmem_cache *s)
4453 {
4454 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4455 	kobject_del(&s->kobj);
4456 	kobject_put(&s->kobj);
4457 }
4458 
4459 /*
4460  * Need to buffer aliases during bootup until sysfs becomes
4461  * available lest we lose that information.
4462  */
4463 struct saved_alias {
4464 	struct kmem_cache *s;
4465 	const char *name;
4466 	struct saved_alias *next;
4467 };
4468 
4469 static struct saved_alias *alias_list;
4470 
4471 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4472 {
4473 	struct saved_alias *al;
4474 
4475 	if (slab_state == SYSFS) {
4476 		/*
4477 		 * If we have a leftover link then remove it.
4478 		 */
4479 		sysfs_remove_link(&slab_kset->kobj, name);
4480 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4481 	}
4482 
4483 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4484 	if (!al)
4485 		return -ENOMEM;
4486 
4487 	al->s = s;
4488 	al->name = name;
4489 	al->next = alias_list;
4490 	alias_list = al;
4491 	return 0;
4492 }
4493 
4494 static int __init slab_sysfs_init(void)
4495 {
4496 	struct kmem_cache *s;
4497 	int err;
4498 
4499 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4500 	if (!slab_kset) {
4501 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4502 		return -ENOSYS;
4503 	}
4504 
4505 	slab_state = SYSFS;
4506 
4507 	list_for_each_entry(s, &slab_caches, list) {
4508 		err = sysfs_slab_add(s);
4509 		if (err)
4510 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4511 						" to sysfs\n", s->name);
4512 	}
4513 
4514 	while (alias_list) {
4515 		struct saved_alias *al = alias_list;
4516 
4517 		alias_list = alias_list->next;
4518 		err = sysfs_slab_alias(al->s, al->name);
4519 		if (err)
4520 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4521 					" %s to sysfs\n", s->name);
4522 		kfree(al);
4523 	}
4524 
4525 	resiliency_test();
4526 	return 0;
4527 }
4528 
4529 __initcall(slab_sysfs_init);
4530 #endif
4531 
4532 /*
4533  * The /proc/slabinfo ABI
4534  */
4535 #ifdef CONFIG_SLABINFO
4536 static void print_slabinfo_header(struct seq_file *m)
4537 {
4538 	seq_puts(m, "slabinfo - version: 2.1\n");
4539 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4540 		 "<objperslab> <pagesperslab>");
4541 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4542 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4543 	seq_putc(m, '\n');
4544 }
4545 
4546 static void *s_start(struct seq_file *m, loff_t *pos)
4547 {
4548 	loff_t n = *pos;
4549 
4550 	down_read(&slub_lock);
4551 	if (!n)
4552 		print_slabinfo_header(m);
4553 
4554 	return seq_list_start(&slab_caches, *pos);
4555 }
4556 
4557 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4558 {
4559 	return seq_list_next(p, &slab_caches, pos);
4560 }
4561 
4562 static void s_stop(struct seq_file *m, void *p)
4563 {
4564 	up_read(&slub_lock);
4565 }
4566 
4567 static int s_show(struct seq_file *m, void *p)
4568 {
4569 	unsigned long nr_partials = 0;
4570 	unsigned long nr_slabs = 0;
4571 	unsigned long nr_inuse = 0;
4572 	unsigned long nr_objs = 0;
4573 	unsigned long nr_free = 0;
4574 	struct kmem_cache *s;
4575 	int node;
4576 
4577 	s = list_entry(p, struct kmem_cache, list);
4578 
4579 	for_each_online_node(node) {
4580 		struct kmem_cache_node *n = get_node(s, node);
4581 
4582 		if (!n)
4583 			continue;
4584 
4585 		nr_partials += n->nr_partial;
4586 		nr_slabs += atomic_long_read(&n->nr_slabs);
4587 		nr_objs += atomic_long_read(&n->total_objects);
4588 		nr_free += count_partial(n, count_free);
4589 	}
4590 
4591 	nr_inuse = nr_objs - nr_free;
4592 
4593 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4594 		   nr_objs, s->size, oo_objects(s->oo),
4595 		   (1 << oo_order(s->oo)));
4596 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4597 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4598 		   0UL);
4599 	seq_putc(m, '\n');
4600 	return 0;
4601 }
4602 
4603 static const struct seq_operations slabinfo_op = {
4604 	.start = s_start,
4605 	.next = s_next,
4606 	.stop = s_stop,
4607 	.show = s_show,
4608 };
4609 
4610 static int slabinfo_open(struct inode *inode, struct file *file)
4611 {
4612 	return seq_open(file, &slabinfo_op);
4613 }
4614 
4615 static const struct file_operations proc_slabinfo_operations = {
4616 	.open		= slabinfo_open,
4617 	.read		= seq_read,
4618 	.llseek		= seq_lseek,
4619 	.release	= seq_release,
4620 };
4621 
4622 static int __init slab_proc_init(void)
4623 {
4624 	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4625 	return 0;
4626 }
4627 module_init(slab_proc_init);
4628 #endif /* CONFIG_SLABINFO */
4629