xref: /openbmc/linux/mm/slub.c (revision e4c0d0e2)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/swap.h> /* struct reclaim_state */
13 #include <linux/module.h>
14 #include <linux/bit_spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/bitops.h>
17 #include <linux/slab.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/kmemcheck.h>
21 #include <linux/cpu.h>
22 #include <linux/cpuset.h>
23 #include <linux/mempolicy.h>
24 #include <linux/ctype.h>
25 #include <linux/debugobjects.h>
26 #include <linux/kallsyms.h>
27 #include <linux/memory.h>
28 #include <linux/math64.h>
29 #include <linux/fault-inject.h>
30 
31 #include <trace/events/kmem.h>
32 
33 /*
34  * Lock order:
35  *   1. slab_lock(page)
36  *   2. slab->list_lock
37  *
38  *   The slab_lock protects operations on the object of a particular
39  *   slab and its metadata in the page struct. If the slab lock
40  *   has been taken then no allocations nor frees can be performed
41  *   on the objects in the slab nor can the slab be added or removed
42  *   from the partial or full lists since this would mean modifying
43  *   the page_struct of the slab.
44  *
45  *   The list_lock protects the partial and full list on each node and
46  *   the partial slab counter. If taken then no new slabs may be added or
47  *   removed from the lists nor make the number of partial slabs be modified.
48  *   (Note that the total number of slabs is an atomic value that may be
49  *   modified without taking the list lock).
50  *
51  *   The list_lock is a centralized lock and thus we avoid taking it as
52  *   much as possible. As long as SLUB does not have to handle partial
53  *   slabs, operations can continue without any centralized lock. F.e.
54  *   allocating a long series of objects that fill up slabs does not require
55  *   the list lock.
56  *
57  *   The lock order is sometimes inverted when we are trying to get a slab
58  *   off a list. We take the list_lock and then look for a page on the list
59  *   to use. While we do that objects in the slabs may be freed. We can
60  *   only operate on the slab if we have also taken the slab_lock. So we use
61  *   a slab_trylock() on the slab. If trylock was successful then no frees
62  *   can occur anymore and we can use the slab for allocations etc. If the
63  *   slab_trylock() does not succeed then frees are in progress in the slab and
64  *   we must stay away from it for a while since we may cause a bouncing
65  *   cacheline if we try to acquire the lock. So go onto the next slab.
66  *   If all pages are busy then we may allocate a new slab instead of reusing
67  *   a partial slab. A new slab has no one operating on it and thus there is
68  *   no danger of cacheline contention.
69  *
70  *   Interrupts are disabled during allocation and deallocation in order to
71  *   make the slab allocator safe to use in the context of an irq. In addition
72  *   interrupts are disabled to ensure that the processor does not change
73  *   while handling per_cpu slabs, due to kernel preemption.
74  *
75  * SLUB assigns one slab for allocation to each processor.
76  * Allocations only occur from these slabs called cpu slabs.
77  *
78  * Slabs with free elements are kept on a partial list and during regular
79  * operations no list for full slabs is used. If an object in a full slab is
80  * freed then the slab will show up again on the partial lists.
81  * We track full slabs for debugging purposes though because otherwise we
82  * cannot scan all objects.
83  *
84  * Slabs are freed when they become empty. Teardown and setup is
85  * minimal so we rely on the page allocators per cpu caches for
86  * fast frees and allocs.
87  *
88  * Overloading of page flags that are otherwise used for LRU management.
89  *
90  * PageActive 		The slab is frozen and exempt from list processing.
91  * 			This means that the slab is dedicated to a purpose
92  * 			such as satisfying allocations for a specific
93  * 			processor. Objects may be freed in the slab while
94  * 			it is frozen but slab_free will then skip the usual
95  * 			list operations. It is up to the processor holding
96  * 			the slab to integrate the slab into the slab lists
97  * 			when the slab is no longer needed.
98  *
99  * 			One use of this flag is to mark slabs that are
100  * 			used for allocations. Then such a slab becomes a cpu
101  * 			slab. The cpu slab may be equipped with an additional
102  * 			freelist that allows lockless access to
103  * 			free objects in addition to the regular freelist
104  * 			that requires the slab lock.
105  *
106  * PageError		Slab requires special handling due to debug
107  * 			options set. This moves	slab handling out of
108  * 			the fast path and disables lockless freelists.
109  */
110 
111 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
112 		SLAB_TRACE | SLAB_DEBUG_FREE)
113 
114 static inline int kmem_cache_debug(struct kmem_cache *s)
115 {
116 #ifdef CONFIG_SLUB_DEBUG
117 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
118 #else
119 	return 0;
120 #endif
121 }
122 
123 /*
124  * Issues still to be resolved:
125  *
126  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
127  *
128  * - Variable sizing of the per node arrays
129  */
130 
131 /* Enable to test recovery from slab corruption on boot */
132 #undef SLUB_RESILIENCY_TEST
133 
134 /*
135  * Mininum number of partial slabs. These will be left on the partial
136  * lists even if they are empty. kmem_cache_shrink may reclaim them.
137  */
138 #define MIN_PARTIAL 5
139 
140 /*
141  * Maximum number of desirable partial slabs.
142  * The existence of more partial slabs makes kmem_cache_shrink
143  * sort the partial list by the number of objects in the.
144  */
145 #define MAX_PARTIAL 10
146 
147 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
148 				SLAB_POISON | SLAB_STORE_USER)
149 
150 /*
151  * Debugging flags that require metadata to be stored in the slab.  These get
152  * disabled when slub_debug=O is used and a cache's min order increases with
153  * metadata.
154  */
155 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
156 
157 /*
158  * Set of flags that will prevent slab merging
159  */
160 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
161 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
162 		SLAB_FAILSLAB)
163 
164 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
165 		SLAB_CACHE_DMA | SLAB_NOTRACK)
166 
167 #define OO_SHIFT	16
168 #define OO_MASK		((1 << OO_SHIFT) - 1)
169 #define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
170 
171 /* Internal SLUB flags */
172 #define __OBJECT_POISON		0x80000000UL /* Poison object */
173 
174 static int kmem_size = sizeof(struct kmem_cache);
175 
176 #ifdef CONFIG_SMP
177 static struct notifier_block slab_notifier;
178 #endif
179 
180 static enum {
181 	DOWN,		/* No slab functionality available */
182 	PARTIAL,	/* Kmem_cache_node works */
183 	UP,		/* Everything works but does not show up in sysfs */
184 	SYSFS		/* Sysfs up */
185 } slab_state = DOWN;
186 
187 /* A list of all slab caches on the system */
188 static DECLARE_RWSEM(slub_lock);
189 static LIST_HEAD(slab_caches);
190 
191 /*
192  * Tracking user of a slab.
193  */
194 struct track {
195 	unsigned long addr;	/* Called from address */
196 	int cpu;		/* Was running on cpu */
197 	int pid;		/* Pid context */
198 	unsigned long when;	/* When did the operation occur */
199 };
200 
201 enum track_item { TRACK_ALLOC, TRACK_FREE };
202 
203 #ifdef CONFIG_SYSFS
204 static int sysfs_slab_add(struct kmem_cache *);
205 static int sysfs_slab_alias(struct kmem_cache *, const char *);
206 static void sysfs_slab_remove(struct kmem_cache *);
207 
208 #else
209 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
210 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
211 							{ return 0; }
212 static inline void sysfs_slab_remove(struct kmem_cache *s)
213 {
214 	kfree(s->name);
215 	kfree(s);
216 }
217 
218 #endif
219 
220 static inline void stat(const struct kmem_cache *s, enum stat_item si)
221 {
222 #ifdef CONFIG_SLUB_STATS
223 	__this_cpu_inc(s->cpu_slab->stat[si]);
224 #endif
225 }
226 
227 /********************************************************************
228  * 			Core slab cache functions
229  *******************************************************************/
230 
231 int slab_is_available(void)
232 {
233 	return slab_state >= UP;
234 }
235 
236 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
237 {
238 	return s->node[node];
239 }
240 
241 /* Verify that a pointer has an address that is valid within a slab page */
242 static inline int check_valid_pointer(struct kmem_cache *s,
243 				struct page *page, const void *object)
244 {
245 	void *base;
246 
247 	if (!object)
248 		return 1;
249 
250 	base = page_address(page);
251 	if (object < base || object >= base + page->objects * s->size ||
252 		(object - base) % s->size) {
253 		return 0;
254 	}
255 
256 	return 1;
257 }
258 
259 static inline void *get_freepointer(struct kmem_cache *s, void *object)
260 {
261 	return *(void **)(object + s->offset);
262 }
263 
264 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
265 {
266 	void *p;
267 
268 #ifdef CONFIG_DEBUG_PAGEALLOC
269 	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
270 #else
271 	p = get_freepointer(s, object);
272 #endif
273 	return p;
274 }
275 
276 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
277 {
278 	*(void **)(object + s->offset) = fp;
279 }
280 
281 /* Loop over all objects in a slab */
282 #define for_each_object(__p, __s, __addr, __objects) \
283 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
284 			__p += (__s)->size)
285 
286 /* Determine object index from a given position */
287 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
288 {
289 	return (p - addr) / s->size;
290 }
291 
292 static inline size_t slab_ksize(const struct kmem_cache *s)
293 {
294 #ifdef CONFIG_SLUB_DEBUG
295 	/*
296 	 * Debugging requires use of the padding between object
297 	 * and whatever may come after it.
298 	 */
299 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
300 		return s->objsize;
301 
302 #endif
303 	/*
304 	 * If we have the need to store the freelist pointer
305 	 * back there or track user information then we can
306 	 * only use the space before that information.
307 	 */
308 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
309 		return s->inuse;
310 	/*
311 	 * Else we can use all the padding etc for the allocation
312 	 */
313 	return s->size;
314 }
315 
316 static inline int order_objects(int order, unsigned long size, int reserved)
317 {
318 	return ((PAGE_SIZE << order) - reserved) / size;
319 }
320 
321 static inline struct kmem_cache_order_objects oo_make(int order,
322 		unsigned long size, int reserved)
323 {
324 	struct kmem_cache_order_objects x = {
325 		(order << OO_SHIFT) + order_objects(order, size, reserved)
326 	};
327 
328 	return x;
329 }
330 
331 static inline int oo_order(struct kmem_cache_order_objects x)
332 {
333 	return x.x >> OO_SHIFT;
334 }
335 
336 static inline int oo_objects(struct kmem_cache_order_objects x)
337 {
338 	return x.x & OO_MASK;
339 }
340 
341 #ifdef CONFIG_SLUB_DEBUG
342 /*
343  * Determine a map of object in use on a page.
344  *
345  * Slab lock or node listlock must be held to guarantee that the page does
346  * not vanish from under us.
347  */
348 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
349 {
350 	void *p;
351 	void *addr = page_address(page);
352 
353 	for (p = page->freelist; p; p = get_freepointer(s, p))
354 		set_bit(slab_index(p, s, addr), map);
355 }
356 
357 /*
358  * Debug settings:
359  */
360 #ifdef CONFIG_SLUB_DEBUG_ON
361 static int slub_debug = DEBUG_DEFAULT_FLAGS;
362 #else
363 static int slub_debug;
364 #endif
365 
366 static char *slub_debug_slabs;
367 static int disable_higher_order_debug;
368 
369 /*
370  * Object debugging
371  */
372 static void print_section(char *text, u8 *addr, unsigned int length)
373 {
374 	int i, offset;
375 	int newline = 1;
376 	char ascii[17];
377 
378 	ascii[16] = 0;
379 
380 	for (i = 0; i < length; i++) {
381 		if (newline) {
382 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
383 			newline = 0;
384 		}
385 		printk(KERN_CONT " %02x", addr[i]);
386 		offset = i % 16;
387 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
388 		if (offset == 15) {
389 			printk(KERN_CONT " %s\n", ascii);
390 			newline = 1;
391 		}
392 	}
393 	if (!newline) {
394 		i %= 16;
395 		while (i < 16) {
396 			printk(KERN_CONT "   ");
397 			ascii[i] = ' ';
398 			i++;
399 		}
400 		printk(KERN_CONT " %s\n", ascii);
401 	}
402 }
403 
404 static struct track *get_track(struct kmem_cache *s, void *object,
405 	enum track_item alloc)
406 {
407 	struct track *p;
408 
409 	if (s->offset)
410 		p = object + s->offset + sizeof(void *);
411 	else
412 		p = object + s->inuse;
413 
414 	return p + alloc;
415 }
416 
417 static void set_track(struct kmem_cache *s, void *object,
418 			enum track_item alloc, unsigned long addr)
419 {
420 	struct track *p = get_track(s, object, alloc);
421 
422 	if (addr) {
423 		p->addr = addr;
424 		p->cpu = smp_processor_id();
425 		p->pid = current->pid;
426 		p->when = jiffies;
427 	} else
428 		memset(p, 0, sizeof(struct track));
429 }
430 
431 static void init_tracking(struct kmem_cache *s, void *object)
432 {
433 	if (!(s->flags & SLAB_STORE_USER))
434 		return;
435 
436 	set_track(s, object, TRACK_FREE, 0UL);
437 	set_track(s, object, TRACK_ALLOC, 0UL);
438 }
439 
440 static void print_track(const char *s, struct track *t)
441 {
442 	if (!t->addr)
443 		return;
444 
445 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
446 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
447 }
448 
449 static void print_tracking(struct kmem_cache *s, void *object)
450 {
451 	if (!(s->flags & SLAB_STORE_USER))
452 		return;
453 
454 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
455 	print_track("Freed", get_track(s, object, TRACK_FREE));
456 }
457 
458 static void print_page_info(struct page *page)
459 {
460 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
461 		page, page->objects, page->inuse, page->freelist, page->flags);
462 
463 }
464 
465 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
466 {
467 	va_list args;
468 	char buf[100];
469 
470 	va_start(args, fmt);
471 	vsnprintf(buf, sizeof(buf), fmt, args);
472 	va_end(args);
473 	printk(KERN_ERR "========================================"
474 			"=====================================\n");
475 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
476 	printk(KERN_ERR "----------------------------------------"
477 			"-------------------------------------\n\n");
478 }
479 
480 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
481 {
482 	va_list args;
483 	char buf[100];
484 
485 	va_start(args, fmt);
486 	vsnprintf(buf, sizeof(buf), fmt, args);
487 	va_end(args);
488 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
489 }
490 
491 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
492 {
493 	unsigned int off;	/* Offset of last byte */
494 	u8 *addr = page_address(page);
495 
496 	print_tracking(s, p);
497 
498 	print_page_info(page);
499 
500 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
501 			p, p - addr, get_freepointer(s, p));
502 
503 	if (p > addr + 16)
504 		print_section("Bytes b4", p - 16, 16);
505 
506 	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
507 
508 	if (s->flags & SLAB_RED_ZONE)
509 		print_section("Redzone", p + s->objsize,
510 			s->inuse - s->objsize);
511 
512 	if (s->offset)
513 		off = s->offset + sizeof(void *);
514 	else
515 		off = s->inuse;
516 
517 	if (s->flags & SLAB_STORE_USER)
518 		off += 2 * sizeof(struct track);
519 
520 	if (off != s->size)
521 		/* Beginning of the filler is the free pointer */
522 		print_section("Padding", p + off, s->size - off);
523 
524 	dump_stack();
525 }
526 
527 static void object_err(struct kmem_cache *s, struct page *page,
528 			u8 *object, char *reason)
529 {
530 	slab_bug(s, "%s", reason);
531 	print_trailer(s, page, object);
532 }
533 
534 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
535 {
536 	va_list args;
537 	char buf[100];
538 
539 	va_start(args, fmt);
540 	vsnprintf(buf, sizeof(buf), fmt, args);
541 	va_end(args);
542 	slab_bug(s, "%s", buf);
543 	print_page_info(page);
544 	dump_stack();
545 }
546 
547 static void init_object(struct kmem_cache *s, void *object, u8 val)
548 {
549 	u8 *p = object;
550 
551 	if (s->flags & __OBJECT_POISON) {
552 		memset(p, POISON_FREE, s->objsize - 1);
553 		p[s->objsize - 1] = POISON_END;
554 	}
555 
556 	if (s->flags & SLAB_RED_ZONE)
557 		memset(p + s->objsize, val, s->inuse - s->objsize);
558 }
559 
560 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
561 {
562 	while (bytes) {
563 		if (*start != (u8)value)
564 			return start;
565 		start++;
566 		bytes--;
567 	}
568 	return NULL;
569 }
570 
571 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
572 						void *from, void *to)
573 {
574 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
575 	memset(from, data, to - from);
576 }
577 
578 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
579 			u8 *object, char *what,
580 			u8 *start, unsigned int value, unsigned int bytes)
581 {
582 	u8 *fault;
583 	u8 *end;
584 
585 	fault = check_bytes(start, value, bytes);
586 	if (!fault)
587 		return 1;
588 
589 	end = start + bytes;
590 	while (end > fault && end[-1] == value)
591 		end--;
592 
593 	slab_bug(s, "%s overwritten", what);
594 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
595 					fault, end - 1, fault[0], value);
596 	print_trailer(s, page, object);
597 
598 	restore_bytes(s, what, value, fault, end);
599 	return 0;
600 }
601 
602 /*
603  * Object layout:
604  *
605  * object address
606  * 	Bytes of the object to be managed.
607  * 	If the freepointer may overlay the object then the free
608  * 	pointer is the first word of the object.
609  *
610  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
611  * 	0xa5 (POISON_END)
612  *
613  * object + s->objsize
614  * 	Padding to reach word boundary. This is also used for Redzoning.
615  * 	Padding is extended by another word if Redzoning is enabled and
616  * 	objsize == inuse.
617  *
618  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
619  * 	0xcc (RED_ACTIVE) for objects in use.
620  *
621  * object + s->inuse
622  * 	Meta data starts here.
623  *
624  * 	A. Free pointer (if we cannot overwrite object on free)
625  * 	B. Tracking data for SLAB_STORE_USER
626  * 	C. Padding to reach required alignment boundary or at mininum
627  * 		one word if debugging is on to be able to detect writes
628  * 		before the word boundary.
629  *
630  *	Padding is done using 0x5a (POISON_INUSE)
631  *
632  * object + s->size
633  * 	Nothing is used beyond s->size.
634  *
635  * If slabcaches are merged then the objsize and inuse boundaries are mostly
636  * ignored. And therefore no slab options that rely on these boundaries
637  * may be used with merged slabcaches.
638  */
639 
640 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
641 {
642 	unsigned long off = s->inuse;	/* The end of info */
643 
644 	if (s->offset)
645 		/* Freepointer is placed after the object. */
646 		off += sizeof(void *);
647 
648 	if (s->flags & SLAB_STORE_USER)
649 		/* We also have user information there */
650 		off += 2 * sizeof(struct track);
651 
652 	if (s->size == off)
653 		return 1;
654 
655 	return check_bytes_and_report(s, page, p, "Object padding",
656 				p + off, POISON_INUSE, s->size - off);
657 }
658 
659 /* Check the pad bytes at the end of a slab page */
660 static int slab_pad_check(struct kmem_cache *s, struct page *page)
661 {
662 	u8 *start;
663 	u8 *fault;
664 	u8 *end;
665 	int length;
666 	int remainder;
667 
668 	if (!(s->flags & SLAB_POISON))
669 		return 1;
670 
671 	start = page_address(page);
672 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
673 	end = start + length;
674 	remainder = length % s->size;
675 	if (!remainder)
676 		return 1;
677 
678 	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
679 	if (!fault)
680 		return 1;
681 	while (end > fault && end[-1] == POISON_INUSE)
682 		end--;
683 
684 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
685 	print_section("Padding", end - remainder, remainder);
686 
687 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
688 	return 0;
689 }
690 
691 static int check_object(struct kmem_cache *s, struct page *page,
692 					void *object, u8 val)
693 {
694 	u8 *p = object;
695 	u8 *endobject = object + s->objsize;
696 
697 	if (s->flags & SLAB_RED_ZONE) {
698 		if (!check_bytes_and_report(s, page, object, "Redzone",
699 			endobject, val, s->inuse - s->objsize))
700 			return 0;
701 	} else {
702 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
703 			check_bytes_and_report(s, page, p, "Alignment padding",
704 				endobject, POISON_INUSE, s->inuse - s->objsize);
705 		}
706 	}
707 
708 	if (s->flags & SLAB_POISON) {
709 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
710 			(!check_bytes_and_report(s, page, p, "Poison", p,
711 					POISON_FREE, s->objsize - 1) ||
712 			 !check_bytes_and_report(s, page, p, "Poison",
713 				p + s->objsize - 1, POISON_END, 1)))
714 			return 0;
715 		/*
716 		 * check_pad_bytes cleans up on its own.
717 		 */
718 		check_pad_bytes(s, page, p);
719 	}
720 
721 	if (!s->offset && val == SLUB_RED_ACTIVE)
722 		/*
723 		 * Object and freepointer overlap. Cannot check
724 		 * freepointer while object is allocated.
725 		 */
726 		return 1;
727 
728 	/* Check free pointer validity */
729 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
730 		object_err(s, page, p, "Freepointer corrupt");
731 		/*
732 		 * No choice but to zap it and thus lose the remainder
733 		 * of the free objects in this slab. May cause
734 		 * another error because the object count is now wrong.
735 		 */
736 		set_freepointer(s, p, NULL);
737 		return 0;
738 	}
739 	return 1;
740 }
741 
742 static int check_slab(struct kmem_cache *s, struct page *page)
743 {
744 	int maxobj;
745 
746 	VM_BUG_ON(!irqs_disabled());
747 
748 	if (!PageSlab(page)) {
749 		slab_err(s, page, "Not a valid slab page");
750 		return 0;
751 	}
752 
753 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
754 	if (page->objects > maxobj) {
755 		slab_err(s, page, "objects %u > max %u",
756 			s->name, page->objects, maxobj);
757 		return 0;
758 	}
759 	if (page->inuse > page->objects) {
760 		slab_err(s, page, "inuse %u > max %u",
761 			s->name, page->inuse, page->objects);
762 		return 0;
763 	}
764 	/* Slab_pad_check fixes things up after itself */
765 	slab_pad_check(s, page);
766 	return 1;
767 }
768 
769 /*
770  * Determine if a certain object on a page is on the freelist. Must hold the
771  * slab lock to guarantee that the chains are in a consistent state.
772  */
773 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
774 {
775 	int nr = 0;
776 	void *fp = page->freelist;
777 	void *object = NULL;
778 	unsigned long max_objects;
779 
780 	while (fp && nr <= page->objects) {
781 		if (fp == search)
782 			return 1;
783 		if (!check_valid_pointer(s, page, fp)) {
784 			if (object) {
785 				object_err(s, page, object,
786 					"Freechain corrupt");
787 				set_freepointer(s, object, NULL);
788 				break;
789 			} else {
790 				slab_err(s, page, "Freepointer corrupt");
791 				page->freelist = NULL;
792 				page->inuse = page->objects;
793 				slab_fix(s, "Freelist cleared");
794 				return 0;
795 			}
796 			break;
797 		}
798 		object = fp;
799 		fp = get_freepointer(s, object);
800 		nr++;
801 	}
802 
803 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
804 	if (max_objects > MAX_OBJS_PER_PAGE)
805 		max_objects = MAX_OBJS_PER_PAGE;
806 
807 	if (page->objects != max_objects) {
808 		slab_err(s, page, "Wrong number of objects. Found %d but "
809 			"should be %d", page->objects, max_objects);
810 		page->objects = max_objects;
811 		slab_fix(s, "Number of objects adjusted.");
812 	}
813 	if (page->inuse != page->objects - nr) {
814 		slab_err(s, page, "Wrong object count. Counter is %d but "
815 			"counted were %d", page->inuse, page->objects - nr);
816 		page->inuse = page->objects - nr;
817 		slab_fix(s, "Object count adjusted.");
818 	}
819 	return search == NULL;
820 }
821 
822 static void trace(struct kmem_cache *s, struct page *page, void *object,
823 								int alloc)
824 {
825 	if (s->flags & SLAB_TRACE) {
826 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
827 			s->name,
828 			alloc ? "alloc" : "free",
829 			object, page->inuse,
830 			page->freelist);
831 
832 		if (!alloc)
833 			print_section("Object", (void *)object, s->objsize);
834 
835 		dump_stack();
836 	}
837 }
838 
839 /*
840  * Hooks for other subsystems that check memory allocations. In a typical
841  * production configuration these hooks all should produce no code at all.
842  */
843 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
844 {
845 	flags &= gfp_allowed_mask;
846 	lockdep_trace_alloc(flags);
847 	might_sleep_if(flags & __GFP_WAIT);
848 
849 	return should_failslab(s->objsize, flags, s->flags);
850 }
851 
852 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
853 {
854 	flags &= gfp_allowed_mask;
855 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
856 	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
857 }
858 
859 static inline void slab_free_hook(struct kmem_cache *s, void *x)
860 {
861 	kmemleak_free_recursive(x, s->flags);
862 
863 	/*
864 	 * Trouble is that we may no longer disable interupts in the fast path
865 	 * So in order to make the debug calls that expect irqs to be
866 	 * disabled we need to disable interrupts temporarily.
867 	 */
868 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
869 	{
870 		unsigned long flags;
871 
872 		local_irq_save(flags);
873 		kmemcheck_slab_free(s, x, s->objsize);
874 		debug_check_no_locks_freed(x, s->objsize);
875 		local_irq_restore(flags);
876 	}
877 #endif
878 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
879 		debug_check_no_obj_freed(x, s->objsize);
880 }
881 
882 /*
883  * Tracking of fully allocated slabs for debugging purposes.
884  */
885 static void add_full(struct kmem_cache_node *n, struct page *page)
886 {
887 	spin_lock(&n->list_lock);
888 	list_add(&page->lru, &n->full);
889 	spin_unlock(&n->list_lock);
890 }
891 
892 static void remove_full(struct kmem_cache *s, struct page *page)
893 {
894 	struct kmem_cache_node *n;
895 
896 	if (!(s->flags & SLAB_STORE_USER))
897 		return;
898 
899 	n = get_node(s, page_to_nid(page));
900 
901 	spin_lock(&n->list_lock);
902 	list_del(&page->lru);
903 	spin_unlock(&n->list_lock);
904 }
905 
906 /* Tracking of the number of slabs for debugging purposes */
907 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
908 {
909 	struct kmem_cache_node *n = get_node(s, node);
910 
911 	return atomic_long_read(&n->nr_slabs);
912 }
913 
914 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
915 {
916 	return atomic_long_read(&n->nr_slabs);
917 }
918 
919 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
920 {
921 	struct kmem_cache_node *n = get_node(s, node);
922 
923 	/*
924 	 * May be called early in order to allocate a slab for the
925 	 * kmem_cache_node structure. Solve the chicken-egg
926 	 * dilemma by deferring the increment of the count during
927 	 * bootstrap (see early_kmem_cache_node_alloc).
928 	 */
929 	if (n) {
930 		atomic_long_inc(&n->nr_slabs);
931 		atomic_long_add(objects, &n->total_objects);
932 	}
933 }
934 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
935 {
936 	struct kmem_cache_node *n = get_node(s, node);
937 
938 	atomic_long_dec(&n->nr_slabs);
939 	atomic_long_sub(objects, &n->total_objects);
940 }
941 
942 /* Object debug checks for alloc/free paths */
943 static void setup_object_debug(struct kmem_cache *s, struct page *page,
944 								void *object)
945 {
946 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
947 		return;
948 
949 	init_object(s, object, SLUB_RED_INACTIVE);
950 	init_tracking(s, object);
951 }
952 
953 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
954 					void *object, unsigned long addr)
955 {
956 	if (!check_slab(s, page))
957 		goto bad;
958 
959 	if (!on_freelist(s, page, object)) {
960 		object_err(s, page, object, "Object already allocated");
961 		goto bad;
962 	}
963 
964 	if (!check_valid_pointer(s, page, object)) {
965 		object_err(s, page, object, "Freelist Pointer check fails");
966 		goto bad;
967 	}
968 
969 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
970 		goto bad;
971 
972 	/* Success perform special debug activities for allocs */
973 	if (s->flags & SLAB_STORE_USER)
974 		set_track(s, object, TRACK_ALLOC, addr);
975 	trace(s, page, object, 1);
976 	init_object(s, object, SLUB_RED_ACTIVE);
977 	return 1;
978 
979 bad:
980 	if (PageSlab(page)) {
981 		/*
982 		 * If this is a slab page then lets do the best we can
983 		 * to avoid issues in the future. Marking all objects
984 		 * as used avoids touching the remaining objects.
985 		 */
986 		slab_fix(s, "Marking all objects used");
987 		page->inuse = page->objects;
988 		page->freelist = NULL;
989 	}
990 	return 0;
991 }
992 
993 static noinline int free_debug_processing(struct kmem_cache *s,
994 		 struct page *page, void *object, unsigned long addr)
995 {
996 	if (!check_slab(s, page))
997 		goto fail;
998 
999 	if (!check_valid_pointer(s, page, object)) {
1000 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1001 		goto fail;
1002 	}
1003 
1004 	if (on_freelist(s, page, object)) {
1005 		object_err(s, page, object, "Object already free");
1006 		goto fail;
1007 	}
1008 
1009 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1010 		return 0;
1011 
1012 	if (unlikely(s != page->slab)) {
1013 		if (!PageSlab(page)) {
1014 			slab_err(s, page, "Attempt to free object(0x%p) "
1015 				"outside of slab", object);
1016 		} else if (!page->slab) {
1017 			printk(KERN_ERR
1018 				"SLUB <none>: no slab for object 0x%p.\n",
1019 						object);
1020 			dump_stack();
1021 		} else
1022 			object_err(s, page, object,
1023 					"page slab pointer corrupt.");
1024 		goto fail;
1025 	}
1026 
1027 	/* Special debug activities for freeing objects */
1028 	if (!PageSlubFrozen(page) && !page->freelist)
1029 		remove_full(s, page);
1030 	if (s->flags & SLAB_STORE_USER)
1031 		set_track(s, object, TRACK_FREE, addr);
1032 	trace(s, page, object, 0);
1033 	init_object(s, object, SLUB_RED_INACTIVE);
1034 	return 1;
1035 
1036 fail:
1037 	slab_fix(s, "Object at 0x%p not freed", object);
1038 	return 0;
1039 }
1040 
1041 static int __init setup_slub_debug(char *str)
1042 {
1043 	slub_debug = DEBUG_DEFAULT_FLAGS;
1044 	if (*str++ != '=' || !*str)
1045 		/*
1046 		 * No options specified. Switch on full debugging.
1047 		 */
1048 		goto out;
1049 
1050 	if (*str == ',')
1051 		/*
1052 		 * No options but restriction on slabs. This means full
1053 		 * debugging for slabs matching a pattern.
1054 		 */
1055 		goto check_slabs;
1056 
1057 	if (tolower(*str) == 'o') {
1058 		/*
1059 		 * Avoid enabling debugging on caches if its minimum order
1060 		 * would increase as a result.
1061 		 */
1062 		disable_higher_order_debug = 1;
1063 		goto out;
1064 	}
1065 
1066 	slub_debug = 0;
1067 	if (*str == '-')
1068 		/*
1069 		 * Switch off all debugging measures.
1070 		 */
1071 		goto out;
1072 
1073 	/*
1074 	 * Determine which debug features should be switched on
1075 	 */
1076 	for (; *str && *str != ','; str++) {
1077 		switch (tolower(*str)) {
1078 		case 'f':
1079 			slub_debug |= SLAB_DEBUG_FREE;
1080 			break;
1081 		case 'z':
1082 			slub_debug |= SLAB_RED_ZONE;
1083 			break;
1084 		case 'p':
1085 			slub_debug |= SLAB_POISON;
1086 			break;
1087 		case 'u':
1088 			slub_debug |= SLAB_STORE_USER;
1089 			break;
1090 		case 't':
1091 			slub_debug |= SLAB_TRACE;
1092 			break;
1093 		case 'a':
1094 			slub_debug |= SLAB_FAILSLAB;
1095 			break;
1096 		default:
1097 			printk(KERN_ERR "slub_debug option '%c' "
1098 				"unknown. skipped\n", *str);
1099 		}
1100 	}
1101 
1102 check_slabs:
1103 	if (*str == ',')
1104 		slub_debug_slabs = str + 1;
1105 out:
1106 	return 1;
1107 }
1108 
1109 __setup("slub_debug", setup_slub_debug);
1110 
1111 static unsigned long kmem_cache_flags(unsigned long objsize,
1112 	unsigned long flags, const char *name,
1113 	void (*ctor)(void *))
1114 {
1115 	/*
1116 	 * Enable debugging if selected on the kernel commandline.
1117 	 */
1118 	if (slub_debug && (!slub_debug_slabs ||
1119 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1120 		flags |= slub_debug;
1121 
1122 	return flags;
1123 }
1124 #else
1125 static inline void setup_object_debug(struct kmem_cache *s,
1126 			struct page *page, void *object) {}
1127 
1128 static inline int alloc_debug_processing(struct kmem_cache *s,
1129 	struct page *page, void *object, unsigned long addr) { return 0; }
1130 
1131 static inline int free_debug_processing(struct kmem_cache *s,
1132 	struct page *page, void *object, unsigned long addr) { return 0; }
1133 
1134 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1135 			{ return 1; }
1136 static inline int check_object(struct kmem_cache *s, struct page *page,
1137 			void *object, u8 val) { return 1; }
1138 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1139 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1140 	unsigned long flags, const char *name,
1141 	void (*ctor)(void *))
1142 {
1143 	return flags;
1144 }
1145 #define slub_debug 0
1146 
1147 #define disable_higher_order_debug 0
1148 
1149 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1150 							{ return 0; }
1151 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1152 							{ return 0; }
1153 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1154 							int objects) {}
1155 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1156 							int objects) {}
1157 
1158 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1159 							{ return 0; }
1160 
1161 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1162 		void *object) {}
1163 
1164 static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1165 
1166 #endif /* CONFIG_SLUB_DEBUG */
1167 
1168 /*
1169  * Slab allocation and freeing
1170  */
1171 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1172 					struct kmem_cache_order_objects oo)
1173 {
1174 	int order = oo_order(oo);
1175 
1176 	flags |= __GFP_NOTRACK;
1177 
1178 	if (node == NUMA_NO_NODE)
1179 		return alloc_pages(flags, order);
1180 	else
1181 		return alloc_pages_exact_node(node, flags, order);
1182 }
1183 
1184 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1185 {
1186 	struct page *page;
1187 	struct kmem_cache_order_objects oo = s->oo;
1188 	gfp_t alloc_gfp;
1189 
1190 	flags |= s->allocflags;
1191 
1192 	/*
1193 	 * Let the initial higher-order allocation fail under memory pressure
1194 	 * so we fall-back to the minimum order allocation.
1195 	 */
1196 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1197 
1198 	page = alloc_slab_page(alloc_gfp, node, oo);
1199 	if (unlikely(!page)) {
1200 		oo = s->min;
1201 		/*
1202 		 * Allocation may have failed due to fragmentation.
1203 		 * Try a lower order alloc if possible
1204 		 */
1205 		page = alloc_slab_page(flags, node, oo);
1206 		if (!page)
1207 			return NULL;
1208 
1209 		stat(s, ORDER_FALLBACK);
1210 	}
1211 
1212 	if (kmemcheck_enabled
1213 		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1214 		int pages = 1 << oo_order(oo);
1215 
1216 		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1217 
1218 		/*
1219 		 * Objects from caches that have a constructor don't get
1220 		 * cleared when they're allocated, so we need to do it here.
1221 		 */
1222 		if (s->ctor)
1223 			kmemcheck_mark_uninitialized_pages(page, pages);
1224 		else
1225 			kmemcheck_mark_unallocated_pages(page, pages);
1226 	}
1227 
1228 	page->objects = oo_objects(oo);
1229 	mod_zone_page_state(page_zone(page),
1230 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1231 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1232 		1 << oo_order(oo));
1233 
1234 	return page;
1235 }
1236 
1237 static void setup_object(struct kmem_cache *s, struct page *page,
1238 				void *object)
1239 {
1240 	setup_object_debug(s, page, object);
1241 	if (unlikely(s->ctor))
1242 		s->ctor(object);
1243 }
1244 
1245 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1246 {
1247 	struct page *page;
1248 	void *start;
1249 	void *last;
1250 	void *p;
1251 
1252 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1253 
1254 	page = allocate_slab(s,
1255 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1256 	if (!page)
1257 		goto out;
1258 
1259 	inc_slabs_node(s, page_to_nid(page), page->objects);
1260 	page->slab = s;
1261 	page->flags |= 1 << PG_slab;
1262 
1263 	start = page_address(page);
1264 
1265 	if (unlikely(s->flags & SLAB_POISON))
1266 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1267 
1268 	last = start;
1269 	for_each_object(p, s, start, page->objects) {
1270 		setup_object(s, page, last);
1271 		set_freepointer(s, last, p);
1272 		last = p;
1273 	}
1274 	setup_object(s, page, last);
1275 	set_freepointer(s, last, NULL);
1276 
1277 	page->freelist = start;
1278 	page->inuse = 0;
1279 out:
1280 	return page;
1281 }
1282 
1283 static void __free_slab(struct kmem_cache *s, struct page *page)
1284 {
1285 	int order = compound_order(page);
1286 	int pages = 1 << order;
1287 
1288 	if (kmem_cache_debug(s)) {
1289 		void *p;
1290 
1291 		slab_pad_check(s, page);
1292 		for_each_object(p, s, page_address(page),
1293 						page->objects)
1294 			check_object(s, page, p, SLUB_RED_INACTIVE);
1295 	}
1296 
1297 	kmemcheck_free_shadow(page, compound_order(page));
1298 
1299 	mod_zone_page_state(page_zone(page),
1300 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1301 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1302 		-pages);
1303 
1304 	__ClearPageSlab(page);
1305 	reset_page_mapcount(page);
1306 	if (current->reclaim_state)
1307 		current->reclaim_state->reclaimed_slab += pages;
1308 	__free_pages(page, order);
1309 }
1310 
1311 #define need_reserve_slab_rcu						\
1312 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1313 
1314 static void rcu_free_slab(struct rcu_head *h)
1315 {
1316 	struct page *page;
1317 
1318 	if (need_reserve_slab_rcu)
1319 		page = virt_to_head_page(h);
1320 	else
1321 		page = container_of((struct list_head *)h, struct page, lru);
1322 
1323 	__free_slab(page->slab, page);
1324 }
1325 
1326 static void free_slab(struct kmem_cache *s, struct page *page)
1327 {
1328 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1329 		struct rcu_head *head;
1330 
1331 		if (need_reserve_slab_rcu) {
1332 			int order = compound_order(page);
1333 			int offset = (PAGE_SIZE << order) - s->reserved;
1334 
1335 			VM_BUG_ON(s->reserved != sizeof(*head));
1336 			head = page_address(page) + offset;
1337 		} else {
1338 			/*
1339 			 * RCU free overloads the RCU head over the LRU
1340 			 */
1341 			head = (void *)&page->lru;
1342 		}
1343 
1344 		call_rcu(head, rcu_free_slab);
1345 	} else
1346 		__free_slab(s, page);
1347 }
1348 
1349 static void discard_slab(struct kmem_cache *s, struct page *page)
1350 {
1351 	dec_slabs_node(s, page_to_nid(page), page->objects);
1352 	free_slab(s, page);
1353 }
1354 
1355 /*
1356  * Per slab locking using the pagelock
1357  */
1358 static __always_inline void slab_lock(struct page *page)
1359 {
1360 	bit_spin_lock(PG_locked, &page->flags);
1361 }
1362 
1363 static __always_inline void slab_unlock(struct page *page)
1364 {
1365 	__bit_spin_unlock(PG_locked, &page->flags);
1366 }
1367 
1368 static __always_inline int slab_trylock(struct page *page)
1369 {
1370 	int rc = 1;
1371 
1372 	rc = bit_spin_trylock(PG_locked, &page->flags);
1373 	return rc;
1374 }
1375 
1376 /*
1377  * Management of partially allocated slabs
1378  */
1379 static void add_partial(struct kmem_cache_node *n,
1380 				struct page *page, int tail)
1381 {
1382 	spin_lock(&n->list_lock);
1383 	n->nr_partial++;
1384 	if (tail)
1385 		list_add_tail(&page->lru, &n->partial);
1386 	else
1387 		list_add(&page->lru, &n->partial);
1388 	spin_unlock(&n->list_lock);
1389 }
1390 
1391 static inline void __remove_partial(struct kmem_cache_node *n,
1392 					struct page *page)
1393 {
1394 	list_del(&page->lru);
1395 	n->nr_partial--;
1396 }
1397 
1398 static void remove_partial(struct kmem_cache *s, struct page *page)
1399 {
1400 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1401 
1402 	spin_lock(&n->list_lock);
1403 	__remove_partial(n, page);
1404 	spin_unlock(&n->list_lock);
1405 }
1406 
1407 /*
1408  * Lock slab and remove from the partial list.
1409  *
1410  * Must hold list_lock.
1411  */
1412 static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1413 							struct page *page)
1414 {
1415 	if (slab_trylock(page)) {
1416 		__remove_partial(n, page);
1417 		__SetPageSlubFrozen(page);
1418 		return 1;
1419 	}
1420 	return 0;
1421 }
1422 
1423 /*
1424  * Try to allocate a partial slab from a specific node.
1425  */
1426 static struct page *get_partial_node(struct kmem_cache_node *n)
1427 {
1428 	struct page *page;
1429 
1430 	/*
1431 	 * Racy check. If we mistakenly see no partial slabs then we
1432 	 * just allocate an empty slab. If we mistakenly try to get a
1433 	 * partial slab and there is none available then get_partials()
1434 	 * will return NULL.
1435 	 */
1436 	if (!n || !n->nr_partial)
1437 		return NULL;
1438 
1439 	spin_lock(&n->list_lock);
1440 	list_for_each_entry(page, &n->partial, lru)
1441 		if (lock_and_freeze_slab(n, page))
1442 			goto out;
1443 	page = NULL;
1444 out:
1445 	spin_unlock(&n->list_lock);
1446 	return page;
1447 }
1448 
1449 /*
1450  * Get a page from somewhere. Search in increasing NUMA distances.
1451  */
1452 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1453 {
1454 #ifdef CONFIG_NUMA
1455 	struct zonelist *zonelist;
1456 	struct zoneref *z;
1457 	struct zone *zone;
1458 	enum zone_type high_zoneidx = gfp_zone(flags);
1459 	struct page *page;
1460 
1461 	/*
1462 	 * The defrag ratio allows a configuration of the tradeoffs between
1463 	 * inter node defragmentation and node local allocations. A lower
1464 	 * defrag_ratio increases the tendency to do local allocations
1465 	 * instead of attempting to obtain partial slabs from other nodes.
1466 	 *
1467 	 * If the defrag_ratio is set to 0 then kmalloc() always
1468 	 * returns node local objects. If the ratio is higher then kmalloc()
1469 	 * may return off node objects because partial slabs are obtained
1470 	 * from other nodes and filled up.
1471 	 *
1472 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1473 	 * defrag_ratio = 1000) then every (well almost) allocation will
1474 	 * first attempt to defrag slab caches on other nodes. This means
1475 	 * scanning over all nodes to look for partial slabs which may be
1476 	 * expensive if we do it every time we are trying to find a slab
1477 	 * with available objects.
1478 	 */
1479 	if (!s->remote_node_defrag_ratio ||
1480 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1481 		return NULL;
1482 
1483 	get_mems_allowed();
1484 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1485 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1486 		struct kmem_cache_node *n;
1487 
1488 		n = get_node(s, zone_to_nid(zone));
1489 
1490 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1491 				n->nr_partial > s->min_partial) {
1492 			page = get_partial_node(n);
1493 			if (page) {
1494 				put_mems_allowed();
1495 				return page;
1496 			}
1497 		}
1498 	}
1499 	put_mems_allowed();
1500 #endif
1501 	return NULL;
1502 }
1503 
1504 /*
1505  * Get a partial page, lock it and return it.
1506  */
1507 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1508 {
1509 	struct page *page;
1510 	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1511 
1512 	page = get_partial_node(get_node(s, searchnode));
1513 	if (page || node != NUMA_NO_NODE)
1514 		return page;
1515 
1516 	return get_any_partial(s, flags);
1517 }
1518 
1519 /*
1520  * Move a page back to the lists.
1521  *
1522  * Must be called with the slab lock held.
1523  *
1524  * On exit the slab lock will have been dropped.
1525  */
1526 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1527 	__releases(bitlock)
1528 {
1529 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1530 
1531 	__ClearPageSlubFrozen(page);
1532 	if (page->inuse) {
1533 
1534 		if (page->freelist) {
1535 			add_partial(n, page, tail);
1536 			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1537 		} else {
1538 			stat(s, DEACTIVATE_FULL);
1539 			if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1540 				add_full(n, page);
1541 		}
1542 		slab_unlock(page);
1543 	} else {
1544 		stat(s, DEACTIVATE_EMPTY);
1545 		if (n->nr_partial < s->min_partial) {
1546 			/*
1547 			 * Adding an empty slab to the partial slabs in order
1548 			 * to avoid page allocator overhead. This slab needs
1549 			 * to come after the other slabs with objects in
1550 			 * so that the others get filled first. That way the
1551 			 * size of the partial list stays small.
1552 			 *
1553 			 * kmem_cache_shrink can reclaim any empty slabs from
1554 			 * the partial list.
1555 			 */
1556 			add_partial(n, page, 1);
1557 			slab_unlock(page);
1558 		} else {
1559 			slab_unlock(page);
1560 			stat(s, FREE_SLAB);
1561 			discard_slab(s, page);
1562 		}
1563 	}
1564 }
1565 
1566 #ifdef CONFIG_PREEMPT
1567 /*
1568  * Calculate the next globally unique transaction for disambiguiation
1569  * during cmpxchg. The transactions start with the cpu number and are then
1570  * incremented by CONFIG_NR_CPUS.
1571  */
1572 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1573 #else
1574 /*
1575  * No preemption supported therefore also no need to check for
1576  * different cpus.
1577  */
1578 #define TID_STEP 1
1579 #endif
1580 
1581 static inline unsigned long next_tid(unsigned long tid)
1582 {
1583 	return tid + TID_STEP;
1584 }
1585 
1586 static inline unsigned int tid_to_cpu(unsigned long tid)
1587 {
1588 	return tid % TID_STEP;
1589 }
1590 
1591 static inline unsigned long tid_to_event(unsigned long tid)
1592 {
1593 	return tid / TID_STEP;
1594 }
1595 
1596 static inline unsigned int init_tid(int cpu)
1597 {
1598 	return cpu;
1599 }
1600 
1601 static inline void note_cmpxchg_failure(const char *n,
1602 		const struct kmem_cache *s, unsigned long tid)
1603 {
1604 #ifdef SLUB_DEBUG_CMPXCHG
1605 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1606 
1607 	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1608 
1609 #ifdef CONFIG_PREEMPT
1610 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1611 		printk("due to cpu change %d -> %d\n",
1612 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
1613 	else
1614 #endif
1615 	if (tid_to_event(tid) != tid_to_event(actual_tid))
1616 		printk("due to cpu running other code. Event %ld->%ld\n",
1617 			tid_to_event(tid), tid_to_event(actual_tid));
1618 	else
1619 		printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1620 			actual_tid, tid, next_tid(tid));
1621 #endif
1622 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1623 }
1624 
1625 void init_kmem_cache_cpus(struct kmem_cache *s)
1626 {
1627 	int cpu;
1628 
1629 	for_each_possible_cpu(cpu)
1630 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1631 }
1632 /*
1633  * Remove the cpu slab
1634  */
1635 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1636 	__releases(bitlock)
1637 {
1638 	struct page *page = c->page;
1639 	int tail = 1;
1640 
1641 	if (page->freelist)
1642 		stat(s, DEACTIVATE_REMOTE_FREES);
1643 	/*
1644 	 * Merge cpu freelist into slab freelist. Typically we get here
1645 	 * because both freelists are empty. So this is unlikely
1646 	 * to occur.
1647 	 */
1648 	while (unlikely(c->freelist)) {
1649 		void **object;
1650 
1651 		tail = 0;	/* Hot objects. Put the slab first */
1652 
1653 		/* Retrieve object from cpu_freelist */
1654 		object = c->freelist;
1655 		c->freelist = get_freepointer(s, c->freelist);
1656 
1657 		/* And put onto the regular freelist */
1658 		set_freepointer(s, object, page->freelist);
1659 		page->freelist = object;
1660 		page->inuse--;
1661 	}
1662 	c->page = NULL;
1663 	c->tid = next_tid(c->tid);
1664 	unfreeze_slab(s, page, tail);
1665 }
1666 
1667 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1668 {
1669 	stat(s, CPUSLAB_FLUSH);
1670 	slab_lock(c->page);
1671 	deactivate_slab(s, c);
1672 }
1673 
1674 /*
1675  * Flush cpu slab.
1676  *
1677  * Called from IPI handler with interrupts disabled.
1678  */
1679 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1680 {
1681 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
1682 
1683 	if (likely(c && c->page))
1684 		flush_slab(s, c);
1685 }
1686 
1687 static void flush_cpu_slab(void *d)
1688 {
1689 	struct kmem_cache *s = d;
1690 
1691 	__flush_cpu_slab(s, smp_processor_id());
1692 }
1693 
1694 static void flush_all(struct kmem_cache *s)
1695 {
1696 	on_each_cpu(flush_cpu_slab, s, 1);
1697 }
1698 
1699 /*
1700  * Check if the objects in a per cpu structure fit numa
1701  * locality expectations.
1702  */
1703 static inline int node_match(struct kmem_cache_cpu *c, int node)
1704 {
1705 #ifdef CONFIG_NUMA
1706 	if (node != NUMA_NO_NODE && c->node != node)
1707 		return 0;
1708 #endif
1709 	return 1;
1710 }
1711 
1712 static int count_free(struct page *page)
1713 {
1714 	return page->objects - page->inuse;
1715 }
1716 
1717 static unsigned long count_partial(struct kmem_cache_node *n,
1718 					int (*get_count)(struct page *))
1719 {
1720 	unsigned long flags;
1721 	unsigned long x = 0;
1722 	struct page *page;
1723 
1724 	spin_lock_irqsave(&n->list_lock, flags);
1725 	list_for_each_entry(page, &n->partial, lru)
1726 		x += get_count(page);
1727 	spin_unlock_irqrestore(&n->list_lock, flags);
1728 	return x;
1729 }
1730 
1731 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1732 {
1733 #ifdef CONFIG_SLUB_DEBUG
1734 	return atomic_long_read(&n->total_objects);
1735 #else
1736 	return 0;
1737 #endif
1738 }
1739 
1740 static noinline void
1741 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1742 {
1743 	int node;
1744 
1745 	printk(KERN_WARNING
1746 		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1747 		nid, gfpflags);
1748 	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
1749 		"default order: %d, min order: %d\n", s->name, s->objsize,
1750 		s->size, oo_order(s->oo), oo_order(s->min));
1751 
1752 	if (oo_order(s->min) > get_order(s->objsize))
1753 		printk(KERN_WARNING "  %s debugging increased min order, use "
1754 		       "slub_debug=O to disable.\n", s->name);
1755 
1756 	for_each_online_node(node) {
1757 		struct kmem_cache_node *n = get_node(s, node);
1758 		unsigned long nr_slabs;
1759 		unsigned long nr_objs;
1760 		unsigned long nr_free;
1761 
1762 		if (!n)
1763 			continue;
1764 
1765 		nr_free  = count_partial(n, count_free);
1766 		nr_slabs = node_nr_slabs(n);
1767 		nr_objs  = node_nr_objs(n);
1768 
1769 		printk(KERN_WARNING
1770 			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
1771 			node, nr_slabs, nr_objs, nr_free);
1772 	}
1773 }
1774 
1775 /*
1776  * Slow path. The lockless freelist is empty or we need to perform
1777  * debugging duties.
1778  *
1779  * Interrupts are disabled.
1780  *
1781  * Processing is still very fast if new objects have been freed to the
1782  * regular freelist. In that case we simply take over the regular freelist
1783  * as the lockless freelist and zap the regular freelist.
1784  *
1785  * If that is not working then we fall back to the partial lists. We take the
1786  * first element of the freelist as the object to allocate now and move the
1787  * rest of the freelist to the lockless freelist.
1788  *
1789  * And if we were unable to get a new slab from the partial slab lists then
1790  * we need to allocate a new slab. This is the slowest path since it involves
1791  * a call to the page allocator and the setup of a new slab.
1792  */
1793 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1794 			  unsigned long addr, struct kmem_cache_cpu *c)
1795 {
1796 	void **object;
1797 	struct page *page;
1798 	unsigned long flags;
1799 
1800 	local_irq_save(flags);
1801 #ifdef CONFIG_PREEMPT
1802 	/*
1803 	 * We may have been preempted and rescheduled on a different
1804 	 * cpu before disabling interrupts. Need to reload cpu area
1805 	 * pointer.
1806 	 */
1807 	c = this_cpu_ptr(s->cpu_slab);
1808 #endif
1809 
1810 	/* We handle __GFP_ZERO in the caller */
1811 	gfpflags &= ~__GFP_ZERO;
1812 
1813 	page = c->page;
1814 	if (!page)
1815 		goto new_slab;
1816 
1817 	slab_lock(page);
1818 	if (unlikely(!node_match(c, node)))
1819 		goto another_slab;
1820 
1821 	stat(s, ALLOC_REFILL);
1822 
1823 load_freelist:
1824 	object = page->freelist;
1825 	if (unlikely(!object))
1826 		goto another_slab;
1827 	if (kmem_cache_debug(s))
1828 		goto debug;
1829 
1830 	c->freelist = get_freepointer(s, object);
1831 	page->inuse = page->objects;
1832 	page->freelist = NULL;
1833 
1834 	slab_unlock(page);
1835 	c->tid = next_tid(c->tid);
1836 	local_irq_restore(flags);
1837 	stat(s, ALLOC_SLOWPATH);
1838 	return object;
1839 
1840 another_slab:
1841 	deactivate_slab(s, c);
1842 
1843 new_slab:
1844 	page = get_partial(s, gfpflags, node);
1845 	if (page) {
1846 		stat(s, ALLOC_FROM_PARTIAL);
1847 		c->node = page_to_nid(page);
1848 		c->page = page;
1849 		goto load_freelist;
1850 	}
1851 
1852 	gfpflags &= gfp_allowed_mask;
1853 	if (gfpflags & __GFP_WAIT)
1854 		local_irq_enable();
1855 
1856 	page = new_slab(s, gfpflags, node);
1857 
1858 	if (gfpflags & __GFP_WAIT)
1859 		local_irq_disable();
1860 
1861 	if (page) {
1862 		c = __this_cpu_ptr(s->cpu_slab);
1863 		stat(s, ALLOC_SLAB);
1864 		if (c->page)
1865 			flush_slab(s, c);
1866 
1867 		slab_lock(page);
1868 		__SetPageSlubFrozen(page);
1869 		c->node = page_to_nid(page);
1870 		c->page = page;
1871 		goto load_freelist;
1872 	}
1873 	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1874 		slab_out_of_memory(s, gfpflags, node);
1875 	local_irq_restore(flags);
1876 	return NULL;
1877 debug:
1878 	if (!alloc_debug_processing(s, page, object, addr))
1879 		goto another_slab;
1880 
1881 	page->inuse++;
1882 	page->freelist = get_freepointer(s, object);
1883 	deactivate_slab(s, c);
1884 	c->page = NULL;
1885 	c->node = NUMA_NO_NODE;
1886 	local_irq_restore(flags);
1887 	return object;
1888 }
1889 
1890 /*
1891  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1892  * have the fastpath folded into their functions. So no function call
1893  * overhead for requests that can be satisfied on the fastpath.
1894  *
1895  * The fastpath works by first checking if the lockless freelist can be used.
1896  * If not then __slab_alloc is called for slow processing.
1897  *
1898  * Otherwise we can simply pick the next object from the lockless free list.
1899  */
1900 static __always_inline void *slab_alloc(struct kmem_cache *s,
1901 		gfp_t gfpflags, int node, unsigned long addr)
1902 {
1903 	void **object;
1904 	struct kmem_cache_cpu *c;
1905 	unsigned long tid;
1906 
1907 	if (slab_pre_alloc_hook(s, gfpflags))
1908 		return NULL;
1909 
1910 redo:
1911 
1912 	/*
1913 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
1914 	 * enabled. We may switch back and forth between cpus while
1915 	 * reading from one cpu area. That does not matter as long
1916 	 * as we end up on the original cpu again when doing the cmpxchg.
1917 	 */
1918 	c = __this_cpu_ptr(s->cpu_slab);
1919 
1920 	/*
1921 	 * The transaction ids are globally unique per cpu and per operation on
1922 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
1923 	 * occurs on the right processor and that there was no operation on the
1924 	 * linked list in between.
1925 	 */
1926 	tid = c->tid;
1927 	barrier();
1928 
1929 	object = c->freelist;
1930 	if (unlikely(!object || !node_match(c, node)))
1931 
1932 		object = __slab_alloc(s, gfpflags, node, addr, c);
1933 
1934 	else {
1935 		/*
1936 		 * The cmpxchg will only match if there was no additional
1937 		 * operation and if we are on the right processor.
1938 		 *
1939 		 * The cmpxchg does the following atomically (without lock semantics!)
1940 		 * 1. Relocate first pointer to the current per cpu area.
1941 		 * 2. Verify that tid and freelist have not been changed
1942 		 * 3. If they were not changed replace tid and freelist
1943 		 *
1944 		 * Since this is without lock semantics the protection is only against
1945 		 * code executing on this cpu *not* from access by other cpus.
1946 		 */
1947 		if (unlikely(!irqsafe_cpu_cmpxchg_double(
1948 				s->cpu_slab->freelist, s->cpu_slab->tid,
1949 				object, tid,
1950 				get_freepointer_safe(s, object), next_tid(tid)))) {
1951 
1952 			note_cmpxchg_failure("slab_alloc", s, tid);
1953 			goto redo;
1954 		}
1955 		stat(s, ALLOC_FASTPATH);
1956 	}
1957 
1958 	if (unlikely(gfpflags & __GFP_ZERO) && object)
1959 		memset(object, 0, s->objsize);
1960 
1961 	slab_post_alloc_hook(s, gfpflags, object);
1962 
1963 	return object;
1964 }
1965 
1966 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1967 {
1968 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1969 
1970 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1971 
1972 	return ret;
1973 }
1974 EXPORT_SYMBOL(kmem_cache_alloc);
1975 
1976 #ifdef CONFIG_TRACING
1977 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1978 {
1979 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1980 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
1981 	return ret;
1982 }
1983 EXPORT_SYMBOL(kmem_cache_alloc_trace);
1984 
1985 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1986 {
1987 	void *ret = kmalloc_order(size, flags, order);
1988 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1989 	return ret;
1990 }
1991 EXPORT_SYMBOL(kmalloc_order_trace);
1992 #endif
1993 
1994 #ifdef CONFIG_NUMA
1995 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1996 {
1997 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1998 
1999 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2000 				    s->objsize, s->size, gfpflags, node);
2001 
2002 	return ret;
2003 }
2004 EXPORT_SYMBOL(kmem_cache_alloc_node);
2005 
2006 #ifdef CONFIG_TRACING
2007 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2008 				    gfp_t gfpflags,
2009 				    int node, size_t size)
2010 {
2011 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2012 
2013 	trace_kmalloc_node(_RET_IP_, ret,
2014 			   size, s->size, gfpflags, node);
2015 	return ret;
2016 }
2017 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2018 #endif
2019 #endif
2020 
2021 /*
2022  * Slow patch handling. This may still be called frequently since objects
2023  * have a longer lifetime than the cpu slabs in most processing loads.
2024  *
2025  * So we still attempt to reduce cache line usage. Just take the slab
2026  * lock and free the item. If there is no additional partial page
2027  * handling required then we can return immediately.
2028  */
2029 static void __slab_free(struct kmem_cache *s, struct page *page,
2030 			void *x, unsigned long addr)
2031 {
2032 	void *prior;
2033 	void **object = (void *)x;
2034 	unsigned long flags;
2035 
2036 	local_irq_save(flags);
2037 	slab_lock(page);
2038 	stat(s, FREE_SLOWPATH);
2039 
2040 	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2041 		goto out_unlock;
2042 
2043 	prior = page->freelist;
2044 	set_freepointer(s, object, prior);
2045 	page->freelist = object;
2046 	page->inuse--;
2047 
2048 	if (unlikely(PageSlubFrozen(page))) {
2049 		stat(s, FREE_FROZEN);
2050 		goto out_unlock;
2051 	}
2052 
2053 	if (unlikely(!page->inuse))
2054 		goto slab_empty;
2055 
2056 	/*
2057 	 * Objects left in the slab. If it was not on the partial list before
2058 	 * then add it.
2059 	 */
2060 	if (unlikely(!prior)) {
2061 		add_partial(get_node(s, page_to_nid(page)), page, 1);
2062 		stat(s, FREE_ADD_PARTIAL);
2063 	}
2064 
2065 out_unlock:
2066 	slab_unlock(page);
2067 	local_irq_restore(flags);
2068 	return;
2069 
2070 slab_empty:
2071 	if (prior) {
2072 		/*
2073 		 * Slab still on the partial list.
2074 		 */
2075 		remove_partial(s, page);
2076 		stat(s, FREE_REMOVE_PARTIAL);
2077 	}
2078 	slab_unlock(page);
2079 	local_irq_restore(flags);
2080 	stat(s, FREE_SLAB);
2081 	discard_slab(s, page);
2082 }
2083 
2084 /*
2085  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2086  * can perform fastpath freeing without additional function calls.
2087  *
2088  * The fastpath is only possible if we are freeing to the current cpu slab
2089  * of this processor. This typically the case if we have just allocated
2090  * the item before.
2091  *
2092  * If fastpath is not possible then fall back to __slab_free where we deal
2093  * with all sorts of special processing.
2094  */
2095 static __always_inline void slab_free(struct kmem_cache *s,
2096 			struct page *page, void *x, unsigned long addr)
2097 {
2098 	void **object = (void *)x;
2099 	struct kmem_cache_cpu *c;
2100 	unsigned long tid;
2101 
2102 	slab_free_hook(s, x);
2103 
2104 redo:
2105 
2106 	/*
2107 	 * Determine the currently cpus per cpu slab.
2108 	 * The cpu may change afterward. However that does not matter since
2109 	 * data is retrieved via this pointer. If we are on the same cpu
2110 	 * during the cmpxchg then the free will succedd.
2111 	 */
2112 	c = __this_cpu_ptr(s->cpu_slab);
2113 
2114 	tid = c->tid;
2115 	barrier();
2116 
2117 	if (likely(page == c->page)) {
2118 		set_freepointer(s, object, c->freelist);
2119 
2120 		if (unlikely(!irqsafe_cpu_cmpxchg_double(
2121 				s->cpu_slab->freelist, s->cpu_slab->tid,
2122 				c->freelist, tid,
2123 				object, next_tid(tid)))) {
2124 
2125 			note_cmpxchg_failure("slab_free", s, tid);
2126 			goto redo;
2127 		}
2128 		stat(s, FREE_FASTPATH);
2129 	} else
2130 		__slab_free(s, page, x, addr);
2131 
2132 }
2133 
2134 void kmem_cache_free(struct kmem_cache *s, void *x)
2135 {
2136 	struct page *page;
2137 
2138 	page = virt_to_head_page(x);
2139 
2140 	slab_free(s, page, x, _RET_IP_);
2141 
2142 	trace_kmem_cache_free(_RET_IP_, x);
2143 }
2144 EXPORT_SYMBOL(kmem_cache_free);
2145 
2146 /*
2147  * Object placement in a slab is made very easy because we always start at
2148  * offset 0. If we tune the size of the object to the alignment then we can
2149  * get the required alignment by putting one properly sized object after
2150  * another.
2151  *
2152  * Notice that the allocation order determines the sizes of the per cpu
2153  * caches. Each processor has always one slab available for allocations.
2154  * Increasing the allocation order reduces the number of times that slabs
2155  * must be moved on and off the partial lists and is therefore a factor in
2156  * locking overhead.
2157  */
2158 
2159 /*
2160  * Mininum / Maximum order of slab pages. This influences locking overhead
2161  * and slab fragmentation. A higher order reduces the number of partial slabs
2162  * and increases the number of allocations possible without having to
2163  * take the list_lock.
2164  */
2165 static int slub_min_order;
2166 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2167 static int slub_min_objects;
2168 
2169 /*
2170  * Merge control. If this is set then no merging of slab caches will occur.
2171  * (Could be removed. This was introduced to pacify the merge skeptics.)
2172  */
2173 static int slub_nomerge;
2174 
2175 /*
2176  * Calculate the order of allocation given an slab object size.
2177  *
2178  * The order of allocation has significant impact on performance and other
2179  * system components. Generally order 0 allocations should be preferred since
2180  * order 0 does not cause fragmentation in the page allocator. Larger objects
2181  * be problematic to put into order 0 slabs because there may be too much
2182  * unused space left. We go to a higher order if more than 1/16th of the slab
2183  * would be wasted.
2184  *
2185  * In order to reach satisfactory performance we must ensure that a minimum
2186  * number of objects is in one slab. Otherwise we may generate too much
2187  * activity on the partial lists which requires taking the list_lock. This is
2188  * less a concern for large slabs though which are rarely used.
2189  *
2190  * slub_max_order specifies the order where we begin to stop considering the
2191  * number of objects in a slab as critical. If we reach slub_max_order then
2192  * we try to keep the page order as low as possible. So we accept more waste
2193  * of space in favor of a small page order.
2194  *
2195  * Higher order allocations also allow the placement of more objects in a
2196  * slab and thereby reduce object handling overhead. If the user has
2197  * requested a higher mininum order then we start with that one instead of
2198  * the smallest order which will fit the object.
2199  */
2200 static inline int slab_order(int size, int min_objects,
2201 				int max_order, int fract_leftover, int reserved)
2202 {
2203 	int order;
2204 	int rem;
2205 	int min_order = slub_min_order;
2206 
2207 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2208 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2209 
2210 	for (order = max(min_order,
2211 				fls(min_objects * size - 1) - PAGE_SHIFT);
2212 			order <= max_order; order++) {
2213 
2214 		unsigned long slab_size = PAGE_SIZE << order;
2215 
2216 		if (slab_size < min_objects * size + reserved)
2217 			continue;
2218 
2219 		rem = (slab_size - reserved) % size;
2220 
2221 		if (rem <= slab_size / fract_leftover)
2222 			break;
2223 
2224 	}
2225 
2226 	return order;
2227 }
2228 
2229 static inline int calculate_order(int size, int reserved)
2230 {
2231 	int order;
2232 	int min_objects;
2233 	int fraction;
2234 	int max_objects;
2235 
2236 	/*
2237 	 * Attempt to find best configuration for a slab. This
2238 	 * works by first attempting to generate a layout with
2239 	 * the best configuration and backing off gradually.
2240 	 *
2241 	 * First we reduce the acceptable waste in a slab. Then
2242 	 * we reduce the minimum objects required in a slab.
2243 	 */
2244 	min_objects = slub_min_objects;
2245 	if (!min_objects)
2246 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
2247 	max_objects = order_objects(slub_max_order, size, reserved);
2248 	min_objects = min(min_objects, max_objects);
2249 
2250 	while (min_objects > 1) {
2251 		fraction = 16;
2252 		while (fraction >= 4) {
2253 			order = slab_order(size, min_objects,
2254 					slub_max_order, fraction, reserved);
2255 			if (order <= slub_max_order)
2256 				return order;
2257 			fraction /= 2;
2258 		}
2259 		min_objects--;
2260 	}
2261 
2262 	/*
2263 	 * We were unable to place multiple objects in a slab. Now
2264 	 * lets see if we can place a single object there.
2265 	 */
2266 	order = slab_order(size, 1, slub_max_order, 1, reserved);
2267 	if (order <= slub_max_order)
2268 		return order;
2269 
2270 	/*
2271 	 * Doh this slab cannot be placed using slub_max_order.
2272 	 */
2273 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2274 	if (order < MAX_ORDER)
2275 		return order;
2276 	return -ENOSYS;
2277 }
2278 
2279 /*
2280  * Figure out what the alignment of the objects will be.
2281  */
2282 static unsigned long calculate_alignment(unsigned long flags,
2283 		unsigned long align, unsigned long size)
2284 {
2285 	/*
2286 	 * If the user wants hardware cache aligned objects then follow that
2287 	 * suggestion if the object is sufficiently large.
2288 	 *
2289 	 * The hardware cache alignment cannot override the specified
2290 	 * alignment though. If that is greater then use it.
2291 	 */
2292 	if (flags & SLAB_HWCACHE_ALIGN) {
2293 		unsigned long ralign = cache_line_size();
2294 		while (size <= ralign / 2)
2295 			ralign /= 2;
2296 		align = max(align, ralign);
2297 	}
2298 
2299 	if (align < ARCH_SLAB_MINALIGN)
2300 		align = ARCH_SLAB_MINALIGN;
2301 
2302 	return ALIGN(align, sizeof(void *));
2303 }
2304 
2305 static void
2306 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2307 {
2308 	n->nr_partial = 0;
2309 	spin_lock_init(&n->list_lock);
2310 	INIT_LIST_HEAD(&n->partial);
2311 #ifdef CONFIG_SLUB_DEBUG
2312 	atomic_long_set(&n->nr_slabs, 0);
2313 	atomic_long_set(&n->total_objects, 0);
2314 	INIT_LIST_HEAD(&n->full);
2315 #endif
2316 }
2317 
2318 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2319 {
2320 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2321 			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2322 
2323 	/*
2324 	 * Must align to double word boundary for the double cmpxchg
2325 	 * instructions to work; see __pcpu_double_call_return_bool().
2326 	 */
2327 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2328 				     2 * sizeof(void *));
2329 
2330 	if (!s->cpu_slab)
2331 		return 0;
2332 
2333 	init_kmem_cache_cpus(s);
2334 
2335 	return 1;
2336 }
2337 
2338 static struct kmem_cache *kmem_cache_node;
2339 
2340 /*
2341  * No kmalloc_node yet so do it by hand. We know that this is the first
2342  * slab on the node for this slabcache. There are no concurrent accesses
2343  * possible.
2344  *
2345  * Note that this function only works on the kmalloc_node_cache
2346  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2347  * memory on a fresh node that has no slab structures yet.
2348  */
2349 static void early_kmem_cache_node_alloc(int node)
2350 {
2351 	struct page *page;
2352 	struct kmem_cache_node *n;
2353 	unsigned long flags;
2354 
2355 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2356 
2357 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2358 
2359 	BUG_ON(!page);
2360 	if (page_to_nid(page) != node) {
2361 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2362 				"node %d\n", node);
2363 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2364 				"in order to be able to continue\n");
2365 	}
2366 
2367 	n = page->freelist;
2368 	BUG_ON(!n);
2369 	page->freelist = get_freepointer(kmem_cache_node, n);
2370 	page->inuse++;
2371 	kmem_cache_node->node[node] = n;
2372 #ifdef CONFIG_SLUB_DEBUG
2373 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2374 	init_tracking(kmem_cache_node, n);
2375 #endif
2376 	init_kmem_cache_node(n, kmem_cache_node);
2377 	inc_slabs_node(kmem_cache_node, node, page->objects);
2378 
2379 	/*
2380 	 * lockdep requires consistent irq usage for each lock
2381 	 * so even though there cannot be a race this early in
2382 	 * the boot sequence, we still disable irqs.
2383 	 */
2384 	local_irq_save(flags);
2385 	add_partial(n, page, 0);
2386 	local_irq_restore(flags);
2387 }
2388 
2389 static void free_kmem_cache_nodes(struct kmem_cache *s)
2390 {
2391 	int node;
2392 
2393 	for_each_node_state(node, N_NORMAL_MEMORY) {
2394 		struct kmem_cache_node *n = s->node[node];
2395 
2396 		if (n)
2397 			kmem_cache_free(kmem_cache_node, n);
2398 
2399 		s->node[node] = NULL;
2400 	}
2401 }
2402 
2403 static int init_kmem_cache_nodes(struct kmem_cache *s)
2404 {
2405 	int node;
2406 
2407 	for_each_node_state(node, N_NORMAL_MEMORY) {
2408 		struct kmem_cache_node *n;
2409 
2410 		if (slab_state == DOWN) {
2411 			early_kmem_cache_node_alloc(node);
2412 			continue;
2413 		}
2414 		n = kmem_cache_alloc_node(kmem_cache_node,
2415 						GFP_KERNEL, node);
2416 
2417 		if (!n) {
2418 			free_kmem_cache_nodes(s);
2419 			return 0;
2420 		}
2421 
2422 		s->node[node] = n;
2423 		init_kmem_cache_node(n, s);
2424 	}
2425 	return 1;
2426 }
2427 
2428 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2429 {
2430 	if (min < MIN_PARTIAL)
2431 		min = MIN_PARTIAL;
2432 	else if (min > MAX_PARTIAL)
2433 		min = MAX_PARTIAL;
2434 	s->min_partial = min;
2435 }
2436 
2437 /*
2438  * calculate_sizes() determines the order and the distribution of data within
2439  * a slab object.
2440  */
2441 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2442 {
2443 	unsigned long flags = s->flags;
2444 	unsigned long size = s->objsize;
2445 	unsigned long align = s->align;
2446 	int order;
2447 
2448 	/*
2449 	 * Round up object size to the next word boundary. We can only
2450 	 * place the free pointer at word boundaries and this determines
2451 	 * the possible location of the free pointer.
2452 	 */
2453 	size = ALIGN(size, sizeof(void *));
2454 
2455 #ifdef CONFIG_SLUB_DEBUG
2456 	/*
2457 	 * Determine if we can poison the object itself. If the user of
2458 	 * the slab may touch the object after free or before allocation
2459 	 * then we should never poison the object itself.
2460 	 */
2461 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2462 			!s->ctor)
2463 		s->flags |= __OBJECT_POISON;
2464 	else
2465 		s->flags &= ~__OBJECT_POISON;
2466 
2467 
2468 	/*
2469 	 * If we are Redzoning then check if there is some space between the
2470 	 * end of the object and the free pointer. If not then add an
2471 	 * additional word to have some bytes to store Redzone information.
2472 	 */
2473 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2474 		size += sizeof(void *);
2475 #endif
2476 
2477 	/*
2478 	 * With that we have determined the number of bytes in actual use
2479 	 * by the object. This is the potential offset to the free pointer.
2480 	 */
2481 	s->inuse = size;
2482 
2483 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2484 		s->ctor)) {
2485 		/*
2486 		 * Relocate free pointer after the object if it is not
2487 		 * permitted to overwrite the first word of the object on
2488 		 * kmem_cache_free.
2489 		 *
2490 		 * This is the case if we do RCU, have a constructor or
2491 		 * destructor or are poisoning the objects.
2492 		 */
2493 		s->offset = size;
2494 		size += sizeof(void *);
2495 	}
2496 
2497 #ifdef CONFIG_SLUB_DEBUG
2498 	if (flags & SLAB_STORE_USER)
2499 		/*
2500 		 * Need to store information about allocs and frees after
2501 		 * the object.
2502 		 */
2503 		size += 2 * sizeof(struct track);
2504 
2505 	if (flags & SLAB_RED_ZONE)
2506 		/*
2507 		 * Add some empty padding so that we can catch
2508 		 * overwrites from earlier objects rather than let
2509 		 * tracking information or the free pointer be
2510 		 * corrupted if a user writes before the start
2511 		 * of the object.
2512 		 */
2513 		size += sizeof(void *);
2514 #endif
2515 
2516 	/*
2517 	 * Determine the alignment based on various parameters that the
2518 	 * user specified and the dynamic determination of cache line size
2519 	 * on bootup.
2520 	 */
2521 	align = calculate_alignment(flags, align, s->objsize);
2522 	s->align = align;
2523 
2524 	/*
2525 	 * SLUB stores one object immediately after another beginning from
2526 	 * offset 0. In order to align the objects we have to simply size
2527 	 * each object to conform to the alignment.
2528 	 */
2529 	size = ALIGN(size, align);
2530 	s->size = size;
2531 	if (forced_order >= 0)
2532 		order = forced_order;
2533 	else
2534 		order = calculate_order(size, s->reserved);
2535 
2536 	if (order < 0)
2537 		return 0;
2538 
2539 	s->allocflags = 0;
2540 	if (order)
2541 		s->allocflags |= __GFP_COMP;
2542 
2543 	if (s->flags & SLAB_CACHE_DMA)
2544 		s->allocflags |= SLUB_DMA;
2545 
2546 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2547 		s->allocflags |= __GFP_RECLAIMABLE;
2548 
2549 	/*
2550 	 * Determine the number of objects per slab
2551 	 */
2552 	s->oo = oo_make(order, size, s->reserved);
2553 	s->min = oo_make(get_order(size), size, s->reserved);
2554 	if (oo_objects(s->oo) > oo_objects(s->max))
2555 		s->max = s->oo;
2556 
2557 	return !!oo_objects(s->oo);
2558 
2559 }
2560 
2561 static int kmem_cache_open(struct kmem_cache *s,
2562 		const char *name, size_t size,
2563 		size_t align, unsigned long flags,
2564 		void (*ctor)(void *))
2565 {
2566 	memset(s, 0, kmem_size);
2567 	s->name = name;
2568 	s->ctor = ctor;
2569 	s->objsize = size;
2570 	s->align = align;
2571 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2572 	s->reserved = 0;
2573 
2574 	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2575 		s->reserved = sizeof(struct rcu_head);
2576 
2577 	if (!calculate_sizes(s, -1))
2578 		goto error;
2579 	if (disable_higher_order_debug) {
2580 		/*
2581 		 * Disable debugging flags that store metadata if the min slab
2582 		 * order increased.
2583 		 */
2584 		if (get_order(s->size) > get_order(s->objsize)) {
2585 			s->flags &= ~DEBUG_METADATA_FLAGS;
2586 			s->offset = 0;
2587 			if (!calculate_sizes(s, -1))
2588 				goto error;
2589 		}
2590 	}
2591 
2592 	/*
2593 	 * The larger the object size is, the more pages we want on the partial
2594 	 * list to avoid pounding the page allocator excessively.
2595 	 */
2596 	set_min_partial(s, ilog2(s->size));
2597 	s->refcount = 1;
2598 #ifdef CONFIG_NUMA
2599 	s->remote_node_defrag_ratio = 1000;
2600 #endif
2601 	if (!init_kmem_cache_nodes(s))
2602 		goto error;
2603 
2604 	if (alloc_kmem_cache_cpus(s))
2605 		return 1;
2606 
2607 	free_kmem_cache_nodes(s);
2608 error:
2609 	if (flags & SLAB_PANIC)
2610 		panic("Cannot create slab %s size=%lu realsize=%u "
2611 			"order=%u offset=%u flags=%lx\n",
2612 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2613 			s->offset, flags);
2614 	return 0;
2615 }
2616 
2617 /*
2618  * Determine the size of a slab object
2619  */
2620 unsigned int kmem_cache_size(struct kmem_cache *s)
2621 {
2622 	return s->objsize;
2623 }
2624 EXPORT_SYMBOL(kmem_cache_size);
2625 
2626 static void list_slab_objects(struct kmem_cache *s, struct page *page,
2627 							const char *text)
2628 {
2629 #ifdef CONFIG_SLUB_DEBUG
2630 	void *addr = page_address(page);
2631 	void *p;
2632 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
2633 				     sizeof(long), GFP_ATOMIC);
2634 	if (!map)
2635 		return;
2636 	slab_err(s, page, "%s", text);
2637 	slab_lock(page);
2638 
2639 	get_map(s, page, map);
2640 	for_each_object(p, s, addr, page->objects) {
2641 
2642 		if (!test_bit(slab_index(p, s, addr), map)) {
2643 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2644 							p, p - addr);
2645 			print_tracking(s, p);
2646 		}
2647 	}
2648 	slab_unlock(page);
2649 	kfree(map);
2650 #endif
2651 }
2652 
2653 /*
2654  * Attempt to free all partial slabs on a node.
2655  */
2656 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2657 {
2658 	unsigned long flags;
2659 	struct page *page, *h;
2660 
2661 	spin_lock_irqsave(&n->list_lock, flags);
2662 	list_for_each_entry_safe(page, h, &n->partial, lru) {
2663 		if (!page->inuse) {
2664 			__remove_partial(n, page);
2665 			discard_slab(s, page);
2666 		} else {
2667 			list_slab_objects(s, page,
2668 				"Objects remaining on kmem_cache_close()");
2669 		}
2670 	}
2671 	spin_unlock_irqrestore(&n->list_lock, flags);
2672 }
2673 
2674 /*
2675  * Release all resources used by a slab cache.
2676  */
2677 static inline int kmem_cache_close(struct kmem_cache *s)
2678 {
2679 	int node;
2680 
2681 	flush_all(s);
2682 	free_percpu(s->cpu_slab);
2683 	/* Attempt to free all objects */
2684 	for_each_node_state(node, N_NORMAL_MEMORY) {
2685 		struct kmem_cache_node *n = get_node(s, node);
2686 
2687 		free_partial(s, n);
2688 		if (n->nr_partial || slabs_node(s, node))
2689 			return 1;
2690 	}
2691 	free_kmem_cache_nodes(s);
2692 	return 0;
2693 }
2694 
2695 /*
2696  * Close a cache and release the kmem_cache structure
2697  * (must be used for caches created using kmem_cache_create)
2698  */
2699 void kmem_cache_destroy(struct kmem_cache *s)
2700 {
2701 	down_write(&slub_lock);
2702 	s->refcount--;
2703 	if (!s->refcount) {
2704 		list_del(&s->list);
2705 		if (kmem_cache_close(s)) {
2706 			printk(KERN_ERR "SLUB %s: %s called for cache that "
2707 				"still has objects.\n", s->name, __func__);
2708 			dump_stack();
2709 		}
2710 		if (s->flags & SLAB_DESTROY_BY_RCU)
2711 			rcu_barrier();
2712 		sysfs_slab_remove(s);
2713 	}
2714 	up_write(&slub_lock);
2715 }
2716 EXPORT_SYMBOL(kmem_cache_destroy);
2717 
2718 /********************************************************************
2719  *		Kmalloc subsystem
2720  *******************************************************************/
2721 
2722 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
2723 EXPORT_SYMBOL(kmalloc_caches);
2724 
2725 static struct kmem_cache *kmem_cache;
2726 
2727 #ifdef CONFIG_ZONE_DMA
2728 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
2729 #endif
2730 
2731 static int __init setup_slub_min_order(char *str)
2732 {
2733 	get_option(&str, &slub_min_order);
2734 
2735 	return 1;
2736 }
2737 
2738 __setup("slub_min_order=", setup_slub_min_order);
2739 
2740 static int __init setup_slub_max_order(char *str)
2741 {
2742 	get_option(&str, &slub_max_order);
2743 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2744 
2745 	return 1;
2746 }
2747 
2748 __setup("slub_max_order=", setup_slub_max_order);
2749 
2750 static int __init setup_slub_min_objects(char *str)
2751 {
2752 	get_option(&str, &slub_min_objects);
2753 
2754 	return 1;
2755 }
2756 
2757 __setup("slub_min_objects=", setup_slub_min_objects);
2758 
2759 static int __init setup_slub_nomerge(char *str)
2760 {
2761 	slub_nomerge = 1;
2762 	return 1;
2763 }
2764 
2765 __setup("slub_nomerge", setup_slub_nomerge);
2766 
2767 static struct kmem_cache *__init create_kmalloc_cache(const char *name,
2768 						int size, unsigned int flags)
2769 {
2770 	struct kmem_cache *s;
2771 
2772 	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
2773 
2774 	/*
2775 	 * This function is called with IRQs disabled during early-boot on
2776 	 * single CPU so there's no need to take slub_lock here.
2777 	 */
2778 	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
2779 								flags, NULL))
2780 		goto panic;
2781 
2782 	list_add(&s->list, &slab_caches);
2783 	return s;
2784 
2785 panic:
2786 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2787 	return NULL;
2788 }
2789 
2790 /*
2791  * Conversion table for small slabs sizes / 8 to the index in the
2792  * kmalloc array. This is necessary for slabs < 192 since we have non power
2793  * of two cache sizes there. The size of larger slabs can be determined using
2794  * fls.
2795  */
2796 static s8 size_index[24] = {
2797 	3,	/* 8 */
2798 	4,	/* 16 */
2799 	5,	/* 24 */
2800 	5,	/* 32 */
2801 	6,	/* 40 */
2802 	6,	/* 48 */
2803 	6,	/* 56 */
2804 	6,	/* 64 */
2805 	1,	/* 72 */
2806 	1,	/* 80 */
2807 	1,	/* 88 */
2808 	1,	/* 96 */
2809 	7,	/* 104 */
2810 	7,	/* 112 */
2811 	7,	/* 120 */
2812 	7,	/* 128 */
2813 	2,	/* 136 */
2814 	2,	/* 144 */
2815 	2,	/* 152 */
2816 	2,	/* 160 */
2817 	2,	/* 168 */
2818 	2,	/* 176 */
2819 	2,	/* 184 */
2820 	2	/* 192 */
2821 };
2822 
2823 static inline int size_index_elem(size_t bytes)
2824 {
2825 	return (bytes - 1) / 8;
2826 }
2827 
2828 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2829 {
2830 	int index;
2831 
2832 	if (size <= 192) {
2833 		if (!size)
2834 			return ZERO_SIZE_PTR;
2835 
2836 		index = size_index[size_index_elem(size)];
2837 	} else
2838 		index = fls(size - 1);
2839 
2840 #ifdef CONFIG_ZONE_DMA
2841 	if (unlikely((flags & SLUB_DMA)))
2842 		return kmalloc_dma_caches[index];
2843 
2844 #endif
2845 	return kmalloc_caches[index];
2846 }
2847 
2848 void *__kmalloc(size_t size, gfp_t flags)
2849 {
2850 	struct kmem_cache *s;
2851 	void *ret;
2852 
2853 	if (unlikely(size > SLUB_MAX_SIZE))
2854 		return kmalloc_large(size, flags);
2855 
2856 	s = get_slab(size, flags);
2857 
2858 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2859 		return s;
2860 
2861 	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
2862 
2863 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2864 
2865 	return ret;
2866 }
2867 EXPORT_SYMBOL(__kmalloc);
2868 
2869 #ifdef CONFIG_NUMA
2870 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2871 {
2872 	struct page *page;
2873 	void *ptr = NULL;
2874 
2875 	flags |= __GFP_COMP | __GFP_NOTRACK;
2876 	page = alloc_pages_node(node, flags, get_order(size));
2877 	if (page)
2878 		ptr = page_address(page);
2879 
2880 	kmemleak_alloc(ptr, size, 1, flags);
2881 	return ptr;
2882 }
2883 
2884 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2885 {
2886 	struct kmem_cache *s;
2887 	void *ret;
2888 
2889 	if (unlikely(size > SLUB_MAX_SIZE)) {
2890 		ret = kmalloc_large_node(size, flags, node);
2891 
2892 		trace_kmalloc_node(_RET_IP_, ret,
2893 				   size, PAGE_SIZE << get_order(size),
2894 				   flags, node);
2895 
2896 		return ret;
2897 	}
2898 
2899 	s = get_slab(size, flags);
2900 
2901 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2902 		return s;
2903 
2904 	ret = slab_alloc(s, flags, node, _RET_IP_);
2905 
2906 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2907 
2908 	return ret;
2909 }
2910 EXPORT_SYMBOL(__kmalloc_node);
2911 #endif
2912 
2913 size_t ksize(const void *object)
2914 {
2915 	struct page *page;
2916 
2917 	if (unlikely(object == ZERO_SIZE_PTR))
2918 		return 0;
2919 
2920 	page = virt_to_head_page(object);
2921 
2922 	if (unlikely(!PageSlab(page))) {
2923 		WARN_ON(!PageCompound(page));
2924 		return PAGE_SIZE << compound_order(page);
2925 	}
2926 
2927 	return slab_ksize(page->slab);
2928 }
2929 EXPORT_SYMBOL(ksize);
2930 
2931 void kfree(const void *x)
2932 {
2933 	struct page *page;
2934 	void *object = (void *)x;
2935 
2936 	trace_kfree(_RET_IP_, x);
2937 
2938 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2939 		return;
2940 
2941 	page = virt_to_head_page(x);
2942 	if (unlikely(!PageSlab(page))) {
2943 		BUG_ON(!PageCompound(page));
2944 		kmemleak_free(x);
2945 		put_page(page);
2946 		return;
2947 	}
2948 	slab_free(page->slab, page, object, _RET_IP_);
2949 }
2950 EXPORT_SYMBOL(kfree);
2951 
2952 /*
2953  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2954  * the remaining slabs by the number of items in use. The slabs with the
2955  * most items in use come first. New allocations will then fill those up
2956  * and thus they can be removed from the partial lists.
2957  *
2958  * The slabs with the least items are placed last. This results in them
2959  * being allocated from last increasing the chance that the last objects
2960  * are freed in them.
2961  */
2962 int kmem_cache_shrink(struct kmem_cache *s)
2963 {
2964 	int node;
2965 	int i;
2966 	struct kmem_cache_node *n;
2967 	struct page *page;
2968 	struct page *t;
2969 	int objects = oo_objects(s->max);
2970 	struct list_head *slabs_by_inuse =
2971 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2972 	unsigned long flags;
2973 
2974 	if (!slabs_by_inuse)
2975 		return -ENOMEM;
2976 
2977 	flush_all(s);
2978 	for_each_node_state(node, N_NORMAL_MEMORY) {
2979 		n = get_node(s, node);
2980 
2981 		if (!n->nr_partial)
2982 			continue;
2983 
2984 		for (i = 0; i < objects; i++)
2985 			INIT_LIST_HEAD(slabs_by_inuse + i);
2986 
2987 		spin_lock_irqsave(&n->list_lock, flags);
2988 
2989 		/*
2990 		 * Build lists indexed by the items in use in each slab.
2991 		 *
2992 		 * Note that concurrent frees may occur while we hold the
2993 		 * list_lock. page->inuse here is the upper limit.
2994 		 */
2995 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2996 			if (!page->inuse && slab_trylock(page)) {
2997 				/*
2998 				 * Must hold slab lock here because slab_free
2999 				 * may have freed the last object and be
3000 				 * waiting to release the slab.
3001 				 */
3002 				__remove_partial(n, page);
3003 				slab_unlock(page);
3004 				discard_slab(s, page);
3005 			} else {
3006 				list_move(&page->lru,
3007 				slabs_by_inuse + page->inuse);
3008 			}
3009 		}
3010 
3011 		/*
3012 		 * Rebuild the partial list with the slabs filled up most
3013 		 * first and the least used slabs at the end.
3014 		 */
3015 		for (i = objects - 1; i >= 0; i--)
3016 			list_splice(slabs_by_inuse + i, n->partial.prev);
3017 
3018 		spin_unlock_irqrestore(&n->list_lock, flags);
3019 	}
3020 
3021 	kfree(slabs_by_inuse);
3022 	return 0;
3023 }
3024 EXPORT_SYMBOL(kmem_cache_shrink);
3025 
3026 #if defined(CONFIG_MEMORY_HOTPLUG)
3027 static int slab_mem_going_offline_callback(void *arg)
3028 {
3029 	struct kmem_cache *s;
3030 
3031 	down_read(&slub_lock);
3032 	list_for_each_entry(s, &slab_caches, list)
3033 		kmem_cache_shrink(s);
3034 	up_read(&slub_lock);
3035 
3036 	return 0;
3037 }
3038 
3039 static void slab_mem_offline_callback(void *arg)
3040 {
3041 	struct kmem_cache_node *n;
3042 	struct kmem_cache *s;
3043 	struct memory_notify *marg = arg;
3044 	int offline_node;
3045 
3046 	offline_node = marg->status_change_nid;
3047 
3048 	/*
3049 	 * If the node still has available memory. we need kmem_cache_node
3050 	 * for it yet.
3051 	 */
3052 	if (offline_node < 0)
3053 		return;
3054 
3055 	down_read(&slub_lock);
3056 	list_for_each_entry(s, &slab_caches, list) {
3057 		n = get_node(s, offline_node);
3058 		if (n) {
3059 			/*
3060 			 * if n->nr_slabs > 0, slabs still exist on the node
3061 			 * that is going down. We were unable to free them,
3062 			 * and offline_pages() function shouldn't call this
3063 			 * callback. So, we must fail.
3064 			 */
3065 			BUG_ON(slabs_node(s, offline_node));
3066 
3067 			s->node[offline_node] = NULL;
3068 			kmem_cache_free(kmem_cache_node, n);
3069 		}
3070 	}
3071 	up_read(&slub_lock);
3072 }
3073 
3074 static int slab_mem_going_online_callback(void *arg)
3075 {
3076 	struct kmem_cache_node *n;
3077 	struct kmem_cache *s;
3078 	struct memory_notify *marg = arg;
3079 	int nid = marg->status_change_nid;
3080 	int ret = 0;
3081 
3082 	/*
3083 	 * If the node's memory is already available, then kmem_cache_node is
3084 	 * already created. Nothing to do.
3085 	 */
3086 	if (nid < 0)
3087 		return 0;
3088 
3089 	/*
3090 	 * We are bringing a node online. No memory is available yet. We must
3091 	 * allocate a kmem_cache_node structure in order to bring the node
3092 	 * online.
3093 	 */
3094 	down_read(&slub_lock);
3095 	list_for_each_entry(s, &slab_caches, list) {
3096 		/*
3097 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
3098 		 *      since memory is not yet available from the node that
3099 		 *      is brought up.
3100 		 */
3101 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3102 		if (!n) {
3103 			ret = -ENOMEM;
3104 			goto out;
3105 		}
3106 		init_kmem_cache_node(n, s);
3107 		s->node[nid] = n;
3108 	}
3109 out:
3110 	up_read(&slub_lock);
3111 	return ret;
3112 }
3113 
3114 static int slab_memory_callback(struct notifier_block *self,
3115 				unsigned long action, void *arg)
3116 {
3117 	int ret = 0;
3118 
3119 	switch (action) {
3120 	case MEM_GOING_ONLINE:
3121 		ret = slab_mem_going_online_callback(arg);
3122 		break;
3123 	case MEM_GOING_OFFLINE:
3124 		ret = slab_mem_going_offline_callback(arg);
3125 		break;
3126 	case MEM_OFFLINE:
3127 	case MEM_CANCEL_ONLINE:
3128 		slab_mem_offline_callback(arg);
3129 		break;
3130 	case MEM_ONLINE:
3131 	case MEM_CANCEL_OFFLINE:
3132 		break;
3133 	}
3134 	if (ret)
3135 		ret = notifier_from_errno(ret);
3136 	else
3137 		ret = NOTIFY_OK;
3138 	return ret;
3139 }
3140 
3141 #endif /* CONFIG_MEMORY_HOTPLUG */
3142 
3143 /********************************************************************
3144  *			Basic setup of slabs
3145  *******************************************************************/
3146 
3147 /*
3148  * Used for early kmem_cache structures that were allocated using
3149  * the page allocator
3150  */
3151 
3152 static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3153 {
3154 	int node;
3155 
3156 	list_add(&s->list, &slab_caches);
3157 	s->refcount = -1;
3158 
3159 	for_each_node_state(node, N_NORMAL_MEMORY) {
3160 		struct kmem_cache_node *n = get_node(s, node);
3161 		struct page *p;
3162 
3163 		if (n) {
3164 			list_for_each_entry(p, &n->partial, lru)
3165 				p->slab = s;
3166 
3167 #ifdef CONFIG_SLUB_DEBUG
3168 			list_for_each_entry(p, &n->full, lru)
3169 				p->slab = s;
3170 #endif
3171 		}
3172 	}
3173 }
3174 
3175 void __init kmem_cache_init(void)
3176 {
3177 	int i;
3178 	int caches = 0;
3179 	struct kmem_cache *temp_kmem_cache;
3180 	int order;
3181 	struct kmem_cache *temp_kmem_cache_node;
3182 	unsigned long kmalloc_size;
3183 
3184 	kmem_size = offsetof(struct kmem_cache, node) +
3185 				nr_node_ids * sizeof(struct kmem_cache_node *);
3186 
3187 	/* Allocate two kmem_caches from the page allocator */
3188 	kmalloc_size = ALIGN(kmem_size, cache_line_size());
3189 	order = get_order(2 * kmalloc_size);
3190 	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3191 
3192 	/*
3193 	 * Must first have the slab cache available for the allocations of the
3194 	 * struct kmem_cache_node's. There is special bootstrap code in
3195 	 * kmem_cache_open for slab_state == DOWN.
3196 	 */
3197 	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3198 
3199 	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3200 		sizeof(struct kmem_cache_node),
3201 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3202 
3203 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3204 
3205 	/* Able to allocate the per node structures */
3206 	slab_state = PARTIAL;
3207 
3208 	temp_kmem_cache = kmem_cache;
3209 	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3210 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3211 	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3212 	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3213 
3214 	/*
3215 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
3216 	 * kmem_cache_node is separately allocated so no need to
3217 	 * update any list pointers.
3218 	 */
3219 	temp_kmem_cache_node = kmem_cache_node;
3220 
3221 	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3222 	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3223 
3224 	kmem_cache_bootstrap_fixup(kmem_cache_node);
3225 
3226 	caches++;
3227 	kmem_cache_bootstrap_fixup(kmem_cache);
3228 	caches++;
3229 	/* Free temporary boot structure */
3230 	free_pages((unsigned long)temp_kmem_cache, order);
3231 
3232 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
3233 
3234 	/*
3235 	 * Patch up the size_index table if we have strange large alignment
3236 	 * requirements for the kmalloc array. This is only the case for
3237 	 * MIPS it seems. The standard arches will not generate any code here.
3238 	 *
3239 	 * Largest permitted alignment is 256 bytes due to the way we
3240 	 * handle the index determination for the smaller caches.
3241 	 *
3242 	 * Make sure that nothing crazy happens if someone starts tinkering
3243 	 * around with ARCH_KMALLOC_MINALIGN
3244 	 */
3245 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3246 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3247 
3248 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3249 		int elem = size_index_elem(i);
3250 		if (elem >= ARRAY_SIZE(size_index))
3251 			break;
3252 		size_index[elem] = KMALLOC_SHIFT_LOW;
3253 	}
3254 
3255 	if (KMALLOC_MIN_SIZE == 64) {
3256 		/*
3257 		 * The 96 byte size cache is not used if the alignment
3258 		 * is 64 byte.
3259 		 */
3260 		for (i = 64 + 8; i <= 96; i += 8)
3261 			size_index[size_index_elem(i)] = 7;
3262 	} else if (KMALLOC_MIN_SIZE == 128) {
3263 		/*
3264 		 * The 192 byte sized cache is not used if the alignment
3265 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3266 		 * instead.
3267 		 */
3268 		for (i = 128 + 8; i <= 192; i += 8)
3269 			size_index[size_index_elem(i)] = 8;
3270 	}
3271 
3272 	/* Caches that are not of the two-to-the-power-of size */
3273 	if (KMALLOC_MIN_SIZE <= 32) {
3274 		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3275 		caches++;
3276 	}
3277 
3278 	if (KMALLOC_MIN_SIZE <= 64) {
3279 		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3280 		caches++;
3281 	}
3282 
3283 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3284 		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3285 		caches++;
3286 	}
3287 
3288 	slab_state = UP;
3289 
3290 	/* Provide the correct kmalloc names now that the caches are up */
3291 	if (KMALLOC_MIN_SIZE <= 32) {
3292 		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3293 		BUG_ON(!kmalloc_caches[1]->name);
3294 	}
3295 
3296 	if (KMALLOC_MIN_SIZE <= 64) {
3297 		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3298 		BUG_ON(!kmalloc_caches[2]->name);
3299 	}
3300 
3301 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3302 		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3303 
3304 		BUG_ON(!s);
3305 		kmalloc_caches[i]->name = s;
3306 	}
3307 
3308 #ifdef CONFIG_SMP
3309 	register_cpu_notifier(&slab_notifier);
3310 #endif
3311 
3312 #ifdef CONFIG_ZONE_DMA
3313 	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3314 		struct kmem_cache *s = kmalloc_caches[i];
3315 
3316 		if (s && s->size) {
3317 			char *name = kasprintf(GFP_NOWAIT,
3318 				 "dma-kmalloc-%d", s->objsize);
3319 
3320 			BUG_ON(!name);
3321 			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3322 				s->objsize, SLAB_CACHE_DMA);
3323 		}
3324 	}
3325 #endif
3326 	printk(KERN_INFO
3327 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3328 		" CPUs=%d, Nodes=%d\n",
3329 		caches, cache_line_size(),
3330 		slub_min_order, slub_max_order, slub_min_objects,
3331 		nr_cpu_ids, nr_node_ids);
3332 }
3333 
3334 void __init kmem_cache_init_late(void)
3335 {
3336 }
3337 
3338 /*
3339  * Find a mergeable slab cache
3340  */
3341 static int slab_unmergeable(struct kmem_cache *s)
3342 {
3343 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3344 		return 1;
3345 
3346 	if (s->ctor)
3347 		return 1;
3348 
3349 	/*
3350 	 * We may have set a slab to be unmergeable during bootstrap.
3351 	 */
3352 	if (s->refcount < 0)
3353 		return 1;
3354 
3355 	return 0;
3356 }
3357 
3358 static struct kmem_cache *find_mergeable(size_t size,
3359 		size_t align, unsigned long flags, const char *name,
3360 		void (*ctor)(void *))
3361 {
3362 	struct kmem_cache *s;
3363 
3364 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3365 		return NULL;
3366 
3367 	if (ctor)
3368 		return NULL;
3369 
3370 	size = ALIGN(size, sizeof(void *));
3371 	align = calculate_alignment(flags, align, size);
3372 	size = ALIGN(size, align);
3373 	flags = kmem_cache_flags(size, flags, name, NULL);
3374 
3375 	list_for_each_entry(s, &slab_caches, list) {
3376 		if (slab_unmergeable(s))
3377 			continue;
3378 
3379 		if (size > s->size)
3380 			continue;
3381 
3382 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3383 				continue;
3384 		/*
3385 		 * Check if alignment is compatible.
3386 		 * Courtesy of Adrian Drzewiecki
3387 		 */
3388 		if ((s->size & ~(align - 1)) != s->size)
3389 			continue;
3390 
3391 		if (s->size - size >= sizeof(void *))
3392 			continue;
3393 
3394 		return s;
3395 	}
3396 	return NULL;
3397 }
3398 
3399 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3400 		size_t align, unsigned long flags, void (*ctor)(void *))
3401 {
3402 	struct kmem_cache *s;
3403 	char *n;
3404 
3405 	if (WARN_ON(!name))
3406 		return NULL;
3407 
3408 	down_write(&slub_lock);
3409 	s = find_mergeable(size, align, flags, name, ctor);
3410 	if (s) {
3411 		s->refcount++;
3412 		/*
3413 		 * Adjust the object sizes so that we clear
3414 		 * the complete object on kzalloc.
3415 		 */
3416 		s->objsize = max(s->objsize, (int)size);
3417 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3418 
3419 		if (sysfs_slab_alias(s, name)) {
3420 			s->refcount--;
3421 			goto err;
3422 		}
3423 		up_write(&slub_lock);
3424 		return s;
3425 	}
3426 
3427 	n = kstrdup(name, GFP_KERNEL);
3428 	if (!n)
3429 		goto err;
3430 
3431 	s = kmalloc(kmem_size, GFP_KERNEL);
3432 	if (s) {
3433 		if (kmem_cache_open(s, n,
3434 				size, align, flags, ctor)) {
3435 			list_add(&s->list, &slab_caches);
3436 			if (sysfs_slab_add(s)) {
3437 				list_del(&s->list);
3438 				kfree(n);
3439 				kfree(s);
3440 				goto err;
3441 			}
3442 			up_write(&slub_lock);
3443 			return s;
3444 		}
3445 		kfree(n);
3446 		kfree(s);
3447 	}
3448 err:
3449 	up_write(&slub_lock);
3450 
3451 	if (flags & SLAB_PANIC)
3452 		panic("Cannot create slabcache %s\n", name);
3453 	else
3454 		s = NULL;
3455 	return s;
3456 }
3457 EXPORT_SYMBOL(kmem_cache_create);
3458 
3459 #ifdef CONFIG_SMP
3460 /*
3461  * Use the cpu notifier to insure that the cpu slabs are flushed when
3462  * necessary.
3463  */
3464 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3465 		unsigned long action, void *hcpu)
3466 {
3467 	long cpu = (long)hcpu;
3468 	struct kmem_cache *s;
3469 	unsigned long flags;
3470 
3471 	switch (action) {
3472 	case CPU_UP_CANCELED:
3473 	case CPU_UP_CANCELED_FROZEN:
3474 	case CPU_DEAD:
3475 	case CPU_DEAD_FROZEN:
3476 		down_read(&slub_lock);
3477 		list_for_each_entry(s, &slab_caches, list) {
3478 			local_irq_save(flags);
3479 			__flush_cpu_slab(s, cpu);
3480 			local_irq_restore(flags);
3481 		}
3482 		up_read(&slub_lock);
3483 		break;
3484 	default:
3485 		break;
3486 	}
3487 	return NOTIFY_OK;
3488 }
3489 
3490 static struct notifier_block __cpuinitdata slab_notifier = {
3491 	.notifier_call = slab_cpuup_callback
3492 };
3493 
3494 #endif
3495 
3496 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3497 {
3498 	struct kmem_cache *s;
3499 	void *ret;
3500 
3501 	if (unlikely(size > SLUB_MAX_SIZE))
3502 		return kmalloc_large(size, gfpflags);
3503 
3504 	s = get_slab(size, gfpflags);
3505 
3506 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3507 		return s;
3508 
3509 	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3510 
3511 	/* Honor the call site pointer we received. */
3512 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3513 
3514 	return ret;
3515 }
3516 
3517 #ifdef CONFIG_NUMA
3518 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3519 					int node, unsigned long caller)
3520 {
3521 	struct kmem_cache *s;
3522 	void *ret;
3523 
3524 	if (unlikely(size > SLUB_MAX_SIZE)) {
3525 		ret = kmalloc_large_node(size, gfpflags, node);
3526 
3527 		trace_kmalloc_node(caller, ret,
3528 				   size, PAGE_SIZE << get_order(size),
3529 				   gfpflags, node);
3530 
3531 		return ret;
3532 	}
3533 
3534 	s = get_slab(size, gfpflags);
3535 
3536 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3537 		return s;
3538 
3539 	ret = slab_alloc(s, gfpflags, node, caller);
3540 
3541 	/* Honor the call site pointer we received. */
3542 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3543 
3544 	return ret;
3545 }
3546 #endif
3547 
3548 #ifdef CONFIG_SYSFS
3549 static int count_inuse(struct page *page)
3550 {
3551 	return page->inuse;
3552 }
3553 
3554 static int count_total(struct page *page)
3555 {
3556 	return page->objects;
3557 }
3558 #endif
3559 
3560 #ifdef CONFIG_SLUB_DEBUG
3561 static int validate_slab(struct kmem_cache *s, struct page *page,
3562 						unsigned long *map)
3563 {
3564 	void *p;
3565 	void *addr = page_address(page);
3566 
3567 	if (!check_slab(s, page) ||
3568 			!on_freelist(s, page, NULL))
3569 		return 0;
3570 
3571 	/* Now we know that a valid freelist exists */
3572 	bitmap_zero(map, page->objects);
3573 
3574 	get_map(s, page, map);
3575 	for_each_object(p, s, addr, page->objects) {
3576 		if (test_bit(slab_index(p, s, addr), map))
3577 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3578 				return 0;
3579 	}
3580 
3581 	for_each_object(p, s, addr, page->objects)
3582 		if (!test_bit(slab_index(p, s, addr), map))
3583 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3584 				return 0;
3585 	return 1;
3586 }
3587 
3588 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3589 						unsigned long *map)
3590 {
3591 	if (slab_trylock(page)) {
3592 		validate_slab(s, page, map);
3593 		slab_unlock(page);
3594 	} else
3595 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3596 			s->name, page);
3597 }
3598 
3599 static int validate_slab_node(struct kmem_cache *s,
3600 		struct kmem_cache_node *n, unsigned long *map)
3601 {
3602 	unsigned long count = 0;
3603 	struct page *page;
3604 	unsigned long flags;
3605 
3606 	spin_lock_irqsave(&n->list_lock, flags);
3607 
3608 	list_for_each_entry(page, &n->partial, lru) {
3609 		validate_slab_slab(s, page, map);
3610 		count++;
3611 	}
3612 	if (count != n->nr_partial)
3613 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3614 			"counter=%ld\n", s->name, count, n->nr_partial);
3615 
3616 	if (!(s->flags & SLAB_STORE_USER))
3617 		goto out;
3618 
3619 	list_for_each_entry(page, &n->full, lru) {
3620 		validate_slab_slab(s, page, map);
3621 		count++;
3622 	}
3623 	if (count != atomic_long_read(&n->nr_slabs))
3624 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3625 			"counter=%ld\n", s->name, count,
3626 			atomic_long_read(&n->nr_slabs));
3627 
3628 out:
3629 	spin_unlock_irqrestore(&n->list_lock, flags);
3630 	return count;
3631 }
3632 
3633 static long validate_slab_cache(struct kmem_cache *s)
3634 {
3635 	int node;
3636 	unsigned long count = 0;
3637 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3638 				sizeof(unsigned long), GFP_KERNEL);
3639 
3640 	if (!map)
3641 		return -ENOMEM;
3642 
3643 	flush_all(s);
3644 	for_each_node_state(node, N_NORMAL_MEMORY) {
3645 		struct kmem_cache_node *n = get_node(s, node);
3646 
3647 		count += validate_slab_node(s, n, map);
3648 	}
3649 	kfree(map);
3650 	return count;
3651 }
3652 /*
3653  * Generate lists of code addresses where slabcache objects are allocated
3654  * and freed.
3655  */
3656 
3657 struct location {
3658 	unsigned long count;
3659 	unsigned long addr;
3660 	long long sum_time;
3661 	long min_time;
3662 	long max_time;
3663 	long min_pid;
3664 	long max_pid;
3665 	DECLARE_BITMAP(cpus, NR_CPUS);
3666 	nodemask_t nodes;
3667 };
3668 
3669 struct loc_track {
3670 	unsigned long max;
3671 	unsigned long count;
3672 	struct location *loc;
3673 };
3674 
3675 static void free_loc_track(struct loc_track *t)
3676 {
3677 	if (t->max)
3678 		free_pages((unsigned long)t->loc,
3679 			get_order(sizeof(struct location) * t->max));
3680 }
3681 
3682 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3683 {
3684 	struct location *l;
3685 	int order;
3686 
3687 	order = get_order(sizeof(struct location) * max);
3688 
3689 	l = (void *)__get_free_pages(flags, order);
3690 	if (!l)
3691 		return 0;
3692 
3693 	if (t->count) {
3694 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3695 		free_loc_track(t);
3696 	}
3697 	t->max = max;
3698 	t->loc = l;
3699 	return 1;
3700 }
3701 
3702 static int add_location(struct loc_track *t, struct kmem_cache *s,
3703 				const struct track *track)
3704 {
3705 	long start, end, pos;
3706 	struct location *l;
3707 	unsigned long caddr;
3708 	unsigned long age = jiffies - track->when;
3709 
3710 	start = -1;
3711 	end = t->count;
3712 
3713 	for ( ; ; ) {
3714 		pos = start + (end - start + 1) / 2;
3715 
3716 		/*
3717 		 * There is nothing at "end". If we end up there
3718 		 * we need to add something to before end.
3719 		 */
3720 		if (pos == end)
3721 			break;
3722 
3723 		caddr = t->loc[pos].addr;
3724 		if (track->addr == caddr) {
3725 
3726 			l = &t->loc[pos];
3727 			l->count++;
3728 			if (track->when) {
3729 				l->sum_time += age;
3730 				if (age < l->min_time)
3731 					l->min_time = age;
3732 				if (age > l->max_time)
3733 					l->max_time = age;
3734 
3735 				if (track->pid < l->min_pid)
3736 					l->min_pid = track->pid;
3737 				if (track->pid > l->max_pid)
3738 					l->max_pid = track->pid;
3739 
3740 				cpumask_set_cpu(track->cpu,
3741 						to_cpumask(l->cpus));
3742 			}
3743 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3744 			return 1;
3745 		}
3746 
3747 		if (track->addr < caddr)
3748 			end = pos;
3749 		else
3750 			start = pos;
3751 	}
3752 
3753 	/*
3754 	 * Not found. Insert new tracking element.
3755 	 */
3756 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3757 		return 0;
3758 
3759 	l = t->loc + pos;
3760 	if (pos < t->count)
3761 		memmove(l + 1, l,
3762 			(t->count - pos) * sizeof(struct location));
3763 	t->count++;
3764 	l->count = 1;
3765 	l->addr = track->addr;
3766 	l->sum_time = age;
3767 	l->min_time = age;
3768 	l->max_time = age;
3769 	l->min_pid = track->pid;
3770 	l->max_pid = track->pid;
3771 	cpumask_clear(to_cpumask(l->cpus));
3772 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3773 	nodes_clear(l->nodes);
3774 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3775 	return 1;
3776 }
3777 
3778 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3779 		struct page *page, enum track_item alloc,
3780 		unsigned long *map)
3781 {
3782 	void *addr = page_address(page);
3783 	void *p;
3784 
3785 	bitmap_zero(map, page->objects);
3786 	get_map(s, page, map);
3787 
3788 	for_each_object(p, s, addr, page->objects)
3789 		if (!test_bit(slab_index(p, s, addr), map))
3790 			add_location(t, s, get_track(s, p, alloc));
3791 }
3792 
3793 static int list_locations(struct kmem_cache *s, char *buf,
3794 					enum track_item alloc)
3795 {
3796 	int len = 0;
3797 	unsigned long i;
3798 	struct loc_track t = { 0, 0, NULL };
3799 	int node;
3800 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3801 				     sizeof(unsigned long), GFP_KERNEL);
3802 
3803 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3804 				     GFP_TEMPORARY)) {
3805 		kfree(map);
3806 		return sprintf(buf, "Out of memory\n");
3807 	}
3808 	/* Push back cpu slabs */
3809 	flush_all(s);
3810 
3811 	for_each_node_state(node, N_NORMAL_MEMORY) {
3812 		struct kmem_cache_node *n = get_node(s, node);
3813 		unsigned long flags;
3814 		struct page *page;
3815 
3816 		if (!atomic_long_read(&n->nr_slabs))
3817 			continue;
3818 
3819 		spin_lock_irqsave(&n->list_lock, flags);
3820 		list_for_each_entry(page, &n->partial, lru)
3821 			process_slab(&t, s, page, alloc, map);
3822 		list_for_each_entry(page, &n->full, lru)
3823 			process_slab(&t, s, page, alloc, map);
3824 		spin_unlock_irqrestore(&n->list_lock, flags);
3825 	}
3826 
3827 	for (i = 0; i < t.count; i++) {
3828 		struct location *l = &t.loc[i];
3829 
3830 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3831 			break;
3832 		len += sprintf(buf + len, "%7ld ", l->count);
3833 
3834 		if (l->addr)
3835 			len += sprintf(buf + len, "%pS", (void *)l->addr);
3836 		else
3837 			len += sprintf(buf + len, "<not-available>");
3838 
3839 		if (l->sum_time != l->min_time) {
3840 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3841 				l->min_time,
3842 				(long)div_u64(l->sum_time, l->count),
3843 				l->max_time);
3844 		} else
3845 			len += sprintf(buf + len, " age=%ld",
3846 				l->min_time);
3847 
3848 		if (l->min_pid != l->max_pid)
3849 			len += sprintf(buf + len, " pid=%ld-%ld",
3850 				l->min_pid, l->max_pid);
3851 		else
3852 			len += sprintf(buf + len, " pid=%ld",
3853 				l->min_pid);
3854 
3855 		if (num_online_cpus() > 1 &&
3856 				!cpumask_empty(to_cpumask(l->cpus)) &&
3857 				len < PAGE_SIZE - 60) {
3858 			len += sprintf(buf + len, " cpus=");
3859 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3860 						 to_cpumask(l->cpus));
3861 		}
3862 
3863 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
3864 				len < PAGE_SIZE - 60) {
3865 			len += sprintf(buf + len, " nodes=");
3866 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3867 					l->nodes);
3868 		}
3869 
3870 		len += sprintf(buf + len, "\n");
3871 	}
3872 
3873 	free_loc_track(&t);
3874 	kfree(map);
3875 	if (!t.count)
3876 		len += sprintf(buf, "No data\n");
3877 	return len;
3878 }
3879 #endif
3880 
3881 #ifdef SLUB_RESILIENCY_TEST
3882 static void resiliency_test(void)
3883 {
3884 	u8 *p;
3885 
3886 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
3887 
3888 	printk(KERN_ERR "SLUB resiliency testing\n");
3889 	printk(KERN_ERR "-----------------------\n");
3890 	printk(KERN_ERR "A. Corruption after allocation\n");
3891 
3892 	p = kzalloc(16, GFP_KERNEL);
3893 	p[16] = 0x12;
3894 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3895 			" 0x12->0x%p\n\n", p + 16);
3896 
3897 	validate_slab_cache(kmalloc_caches[4]);
3898 
3899 	/* Hmmm... The next two are dangerous */
3900 	p = kzalloc(32, GFP_KERNEL);
3901 	p[32 + sizeof(void *)] = 0x34;
3902 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3903 			" 0x34 -> -0x%p\n", p);
3904 	printk(KERN_ERR
3905 		"If allocated object is overwritten then not detectable\n\n");
3906 
3907 	validate_slab_cache(kmalloc_caches[5]);
3908 	p = kzalloc(64, GFP_KERNEL);
3909 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3910 	*p = 0x56;
3911 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3912 									p);
3913 	printk(KERN_ERR
3914 		"If allocated object is overwritten then not detectable\n\n");
3915 	validate_slab_cache(kmalloc_caches[6]);
3916 
3917 	printk(KERN_ERR "\nB. Corruption after free\n");
3918 	p = kzalloc(128, GFP_KERNEL);
3919 	kfree(p);
3920 	*p = 0x78;
3921 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3922 	validate_slab_cache(kmalloc_caches[7]);
3923 
3924 	p = kzalloc(256, GFP_KERNEL);
3925 	kfree(p);
3926 	p[50] = 0x9a;
3927 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3928 			p);
3929 	validate_slab_cache(kmalloc_caches[8]);
3930 
3931 	p = kzalloc(512, GFP_KERNEL);
3932 	kfree(p);
3933 	p[512] = 0xab;
3934 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3935 	validate_slab_cache(kmalloc_caches[9]);
3936 }
3937 #else
3938 #ifdef CONFIG_SYSFS
3939 static void resiliency_test(void) {};
3940 #endif
3941 #endif
3942 
3943 #ifdef CONFIG_SYSFS
3944 enum slab_stat_type {
3945 	SL_ALL,			/* All slabs */
3946 	SL_PARTIAL,		/* Only partially allocated slabs */
3947 	SL_CPU,			/* Only slabs used for cpu caches */
3948 	SL_OBJECTS,		/* Determine allocated objects not slabs */
3949 	SL_TOTAL		/* Determine object capacity not slabs */
3950 };
3951 
3952 #define SO_ALL		(1 << SL_ALL)
3953 #define SO_PARTIAL	(1 << SL_PARTIAL)
3954 #define SO_CPU		(1 << SL_CPU)
3955 #define SO_OBJECTS	(1 << SL_OBJECTS)
3956 #define SO_TOTAL	(1 << SL_TOTAL)
3957 
3958 static ssize_t show_slab_objects(struct kmem_cache *s,
3959 			    char *buf, unsigned long flags)
3960 {
3961 	unsigned long total = 0;
3962 	int node;
3963 	int x;
3964 	unsigned long *nodes;
3965 	unsigned long *per_cpu;
3966 
3967 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3968 	if (!nodes)
3969 		return -ENOMEM;
3970 	per_cpu = nodes + nr_node_ids;
3971 
3972 	if (flags & SO_CPU) {
3973 		int cpu;
3974 
3975 		for_each_possible_cpu(cpu) {
3976 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3977 
3978 			if (!c || c->node < 0)
3979 				continue;
3980 
3981 			if (c->page) {
3982 					if (flags & SO_TOTAL)
3983 						x = c->page->objects;
3984 				else if (flags & SO_OBJECTS)
3985 					x = c->page->inuse;
3986 				else
3987 					x = 1;
3988 
3989 				total += x;
3990 				nodes[c->node] += x;
3991 			}
3992 			per_cpu[c->node]++;
3993 		}
3994 	}
3995 
3996 	lock_memory_hotplug();
3997 #ifdef CONFIG_SLUB_DEBUG
3998 	if (flags & SO_ALL) {
3999 		for_each_node_state(node, N_NORMAL_MEMORY) {
4000 			struct kmem_cache_node *n = get_node(s, node);
4001 
4002 		if (flags & SO_TOTAL)
4003 			x = atomic_long_read(&n->total_objects);
4004 		else if (flags & SO_OBJECTS)
4005 			x = atomic_long_read(&n->total_objects) -
4006 				count_partial(n, count_free);
4007 
4008 			else
4009 				x = atomic_long_read(&n->nr_slabs);
4010 			total += x;
4011 			nodes[node] += x;
4012 		}
4013 
4014 	} else
4015 #endif
4016 	if (flags & SO_PARTIAL) {
4017 		for_each_node_state(node, N_NORMAL_MEMORY) {
4018 			struct kmem_cache_node *n = get_node(s, node);
4019 
4020 			if (flags & SO_TOTAL)
4021 				x = count_partial(n, count_total);
4022 			else if (flags & SO_OBJECTS)
4023 				x = count_partial(n, count_inuse);
4024 			else
4025 				x = n->nr_partial;
4026 			total += x;
4027 			nodes[node] += x;
4028 		}
4029 	}
4030 	x = sprintf(buf, "%lu", total);
4031 #ifdef CONFIG_NUMA
4032 	for_each_node_state(node, N_NORMAL_MEMORY)
4033 		if (nodes[node])
4034 			x += sprintf(buf + x, " N%d=%lu",
4035 					node, nodes[node]);
4036 #endif
4037 	unlock_memory_hotplug();
4038 	kfree(nodes);
4039 	return x + sprintf(buf + x, "\n");
4040 }
4041 
4042 #ifdef CONFIG_SLUB_DEBUG
4043 static int any_slab_objects(struct kmem_cache *s)
4044 {
4045 	int node;
4046 
4047 	for_each_online_node(node) {
4048 		struct kmem_cache_node *n = get_node(s, node);
4049 
4050 		if (!n)
4051 			continue;
4052 
4053 		if (atomic_long_read(&n->total_objects))
4054 			return 1;
4055 	}
4056 	return 0;
4057 }
4058 #endif
4059 
4060 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4061 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
4062 
4063 struct slab_attribute {
4064 	struct attribute attr;
4065 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4066 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4067 };
4068 
4069 #define SLAB_ATTR_RO(_name) \
4070 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
4071 
4072 #define SLAB_ATTR(_name) \
4073 	static struct slab_attribute _name##_attr =  \
4074 	__ATTR(_name, 0644, _name##_show, _name##_store)
4075 
4076 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4077 {
4078 	return sprintf(buf, "%d\n", s->size);
4079 }
4080 SLAB_ATTR_RO(slab_size);
4081 
4082 static ssize_t align_show(struct kmem_cache *s, char *buf)
4083 {
4084 	return sprintf(buf, "%d\n", s->align);
4085 }
4086 SLAB_ATTR_RO(align);
4087 
4088 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4089 {
4090 	return sprintf(buf, "%d\n", s->objsize);
4091 }
4092 SLAB_ATTR_RO(object_size);
4093 
4094 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4095 {
4096 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4097 }
4098 SLAB_ATTR_RO(objs_per_slab);
4099 
4100 static ssize_t order_store(struct kmem_cache *s,
4101 				const char *buf, size_t length)
4102 {
4103 	unsigned long order;
4104 	int err;
4105 
4106 	err = strict_strtoul(buf, 10, &order);
4107 	if (err)
4108 		return err;
4109 
4110 	if (order > slub_max_order || order < slub_min_order)
4111 		return -EINVAL;
4112 
4113 	calculate_sizes(s, order);
4114 	return length;
4115 }
4116 
4117 static ssize_t order_show(struct kmem_cache *s, char *buf)
4118 {
4119 	return sprintf(buf, "%d\n", oo_order(s->oo));
4120 }
4121 SLAB_ATTR(order);
4122 
4123 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4124 {
4125 	return sprintf(buf, "%lu\n", s->min_partial);
4126 }
4127 
4128 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4129 				 size_t length)
4130 {
4131 	unsigned long min;
4132 	int err;
4133 
4134 	err = strict_strtoul(buf, 10, &min);
4135 	if (err)
4136 		return err;
4137 
4138 	set_min_partial(s, min);
4139 	return length;
4140 }
4141 SLAB_ATTR(min_partial);
4142 
4143 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4144 {
4145 	if (!s->ctor)
4146 		return 0;
4147 	return sprintf(buf, "%pS\n", s->ctor);
4148 }
4149 SLAB_ATTR_RO(ctor);
4150 
4151 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4152 {
4153 	return sprintf(buf, "%d\n", s->refcount - 1);
4154 }
4155 SLAB_ATTR_RO(aliases);
4156 
4157 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4158 {
4159 	return show_slab_objects(s, buf, SO_PARTIAL);
4160 }
4161 SLAB_ATTR_RO(partial);
4162 
4163 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4164 {
4165 	return show_slab_objects(s, buf, SO_CPU);
4166 }
4167 SLAB_ATTR_RO(cpu_slabs);
4168 
4169 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4170 {
4171 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4172 }
4173 SLAB_ATTR_RO(objects);
4174 
4175 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4176 {
4177 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4178 }
4179 SLAB_ATTR_RO(objects_partial);
4180 
4181 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4182 {
4183 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4184 }
4185 
4186 static ssize_t reclaim_account_store(struct kmem_cache *s,
4187 				const char *buf, size_t length)
4188 {
4189 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4190 	if (buf[0] == '1')
4191 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4192 	return length;
4193 }
4194 SLAB_ATTR(reclaim_account);
4195 
4196 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4197 {
4198 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4199 }
4200 SLAB_ATTR_RO(hwcache_align);
4201 
4202 #ifdef CONFIG_ZONE_DMA
4203 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4204 {
4205 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4206 }
4207 SLAB_ATTR_RO(cache_dma);
4208 #endif
4209 
4210 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4211 {
4212 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4213 }
4214 SLAB_ATTR_RO(destroy_by_rcu);
4215 
4216 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4217 {
4218 	return sprintf(buf, "%d\n", s->reserved);
4219 }
4220 SLAB_ATTR_RO(reserved);
4221 
4222 #ifdef CONFIG_SLUB_DEBUG
4223 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4224 {
4225 	return show_slab_objects(s, buf, SO_ALL);
4226 }
4227 SLAB_ATTR_RO(slabs);
4228 
4229 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4230 {
4231 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4232 }
4233 SLAB_ATTR_RO(total_objects);
4234 
4235 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4236 {
4237 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4238 }
4239 
4240 static ssize_t sanity_checks_store(struct kmem_cache *s,
4241 				const char *buf, size_t length)
4242 {
4243 	s->flags &= ~SLAB_DEBUG_FREE;
4244 	if (buf[0] == '1')
4245 		s->flags |= SLAB_DEBUG_FREE;
4246 	return length;
4247 }
4248 SLAB_ATTR(sanity_checks);
4249 
4250 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4251 {
4252 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4253 }
4254 
4255 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4256 							size_t length)
4257 {
4258 	s->flags &= ~SLAB_TRACE;
4259 	if (buf[0] == '1')
4260 		s->flags |= SLAB_TRACE;
4261 	return length;
4262 }
4263 SLAB_ATTR(trace);
4264 
4265 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4266 {
4267 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4268 }
4269 
4270 static ssize_t red_zone_store(struct kmem_cache *s,
4271 				const char *buf, size_t length)
4272 {
4273 	if (any_slab_objects(s))
4274 		return -EBUSY;
4275 
4276 	s->flags &= ~SLAB_RED_ZONE;
4277 	if (buf[0] == '1')
4278 		s->flags |= SLAB_RED_ZONE;
4279 	calculate_sizes(s, -1);
4280 	return length;
4281 }
4282 SLAB_ATTR(red_zone);
4283 
4284 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4285 {
4286 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4287 }
4288 
4289 static ssize_t poison_store(struct kmem_cache *s,
4290 				const char *buf, size_t length)
4291 {
4292 	if (any_slab_objects(s))
4293 		return -EBUSY;
4294 
4295 	s->flags &= ~SLAB_POISON;
4296 	if (buf[0] == '1')
4297 		s->flags |= SLAB_POISON;
4298 	calculate_sizes(s, -1);
4299 	return length;
4300 }
4301 SLAB_ATTR(poison);
4302 
4303 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4304 {
4305 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4306 }
4307 
4308 static ssize_t store_user_store(struct kmem_cache *s,
4309 				const char *buf, size_t length)
4310 {
4311 	if (any_slab_objects(s))
4312 		return -EBUSY;
4313 
4314 	s->flags &= ~SLAB_STORE_USER;
4315 	if (buf[0] == '1')
4316 		s->flags |= SLAB_STORE_USER;
4317 	calculate_sizes(s, -1);
4318 	return length;
4319 }
4320 SLAB_ATTR(store_user);
4321 
4322 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4323 {
4324 	return 0;
4325 }
4326 
4327 static ssize_t validate_store(struct kmem_cache *s,
4328 			const char *buf, size_t length)
4329 {
4330 	int ret = -EINVAL;
4331 
4332 	if (buf[0] == '1') {
4333 		ret = validate_slab_cache(s);
4334 		if (ret >= 0)
4335 			ret = length;
4336 	}
4337 	return ret;
4338 }
4339 SLAB_ATTR(validate);
4340 
4341 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4342 {
4343 	if (!(s->flags & SLAB_STORE_USER))
4344 		return -ENOSYS;
4345 	return list_locations(s, buf, TRACK_ALLOC);
4346 }
4347 SLAB_ATTR_RO(alloc_calls);
4348 
4349 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4350 {
4351 	if (!(s->flags & SLAB_STORE_USER))
4352 		return -ENOSYS;
4353 	return list_locations(s, buf, TRACK_FREE);
4354 }
4355 SLAB_ATTR_RO(free_calls);
4356 #endif /* CONFIG_SLUB_DEBUG */
4357 
4358 #ifdef CONFIG_FAILSLAB
4359 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4360 {
4361 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4362 }
4363 
4364 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4365 							size_t length)
4366 {
4367 	s->flags &= ~SLAB_FAILSLAB;
4368 	if (buf[0] == '1')
4369 		s->flags |= SLAB_FAILSLAB;
4370 	return length;
4371 }
4372 SLAB_ATTR(failslab);
4373 #endif
4374 
4375 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4376 {
4377 	return 0;
4378 }
4379 
4380 static ssize_t shrink_store(struct kmem_cache *s,
4381 			const char *buf, size_t length)
4382 {
4383 	if (buf[0] == '1') {
4384 		int rc = kmem_cache_shrink(s);
4385 
4386 		if (rc)
4387 			return rc;
4388 	} else
4389 		return -EINVAL;
4390 	return length;
4391 }
4392 SLAB_ATTR(shrink);
4393 
4394 #ifdef CONFIG_NUMA
4395 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4396 {
4397 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4398 }
4399 
4400 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4401 				const char *buf, size_t length)
4402 {
4403 	unsigned long ratio;
4404 	int err;
4405 
4406 	err = strict_strtoul(buf, 10, &ratio);
4407 	if (err)
4408 		return err;
4409 
4410 	if (ratio <= 100)
4411 		s->remote_node_defrag_ratio = ratio * 10;
4412 
4413 	return length;
4414 }
4415 SLAB_ATTR(remote_node_defrag_ratio);
4416 #endif
4417 
4418 #ifdef CONFIG_SLUB_STATS
4419 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4420 {
4421 	unsigned long sum  = 0;
4422 	int cpu;
4423 	int len;
4424 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4425 
4426 	if (!data)
4427 		return -ENOMEM;
4428 
4429 	for_each_online_cpu(cpu) {
4430 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4431 
4432 		data[cpu] = x;
4433 		sum += x;
4434 	}
4435 
4436 	len = sprintf(buf, "%lu", sum);
4437 
4438 #ifdef CONFIG_SMP
4439 	for_each_online_cpu(cpu) {
4440 		if (data[cpu] && len < PAGE_SIZE - 20)
4441 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4442 	}
4443 #endif
4444 	kfree(data);
4445 	return len + sprintf(buf + len, "\n");
4446 }
4447 
4448 static void clear_stat(struct kmem_cache *s, enum stat_item si)
4449 {
4450 	int cpu;
4451 
4452 	for_each_online_cpu(cpu)
4453 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4454 }
4455 
4456 #define STAT_ATTR(si, text) 					\
4457 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4458 {								\
4459 	return show_stat(s, buf, si);				\
4460 }								\
4461 static ssize_t text##_store(struct kmem_cache *s,		\
4462 				const char *buf, size_t length)	\
4463 {								\
4464 	if (buf[0] != '0')					\
4465 		return -EINVAL;					\
4466 	clear_stat(s, si);					\
4467 	return length;						\
4468 }								\
4469 SLAB_ATTR(text);						\
4470 
4471 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4472 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4473 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4474 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4475 STAT_ATTR(FREE_FROZEN, free_frozen);
4476 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4477 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4478 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4479 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4480 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4481 STAT_ATTR(FREE_SLAB, free_slab);
4482 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4483 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4484 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4485 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4486 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4487 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4488 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4489 #endif
4490 
4491 static struct attribute *slab_attrs[] = {
4492 	&slab_size_attr.attr,
4493 	&object_size_attr.attr,
4494 	&objs_per_slab_attr.attr,
4495 	&order_attr.attr,
4496 	&min_partial_attr.attr,
4497 	&objects_attr.attr,
4498 	&objects_partial_attr.attr,
4499 	&partial_attr.attr,
4500 	&cpu_slabs_attr.attr,
4501 	&ctor_attr.attr,
4502 	&aliases_attr.attr,
4503 	&align_attr.attr,
4504 	&hwcache_align_attr.attr,
4505 	&reclaim_account_attr.attr,
4506 	&destroy_by_rcu_attr.attr,
4507 	&shrink_attr.attr,
4508 	&reserved_attr.attr,
4509 #ifdef CONFIG_SLUB_DEBUG
4510 	&total_objects_attr.attr,
4511 	&slabs_attr.attr,
4512 	&sanity_checks_attr.attr,
4513 	&trace_attr.attr,
4514 	&red_zone_attr.attr,
4515 	&poison_attr.attr,
4516 	&store_user_attr.attr,
4517 	&validate_attr.attr,
4518 	&alloc_calls_attr.attr,
4519 	&free_calls_attr.attr,
4520 #endif
4521 #ifdef CONFIG_ZONE_DMA
4522 	&cache_dma_attr.attr,
4523 #endif
4524 #ifdef CONFIG_NUMA
4525 	&remote_node_defrag_ratio_attr.attr,
4526 #endif
4527 #ifdef CONFIG_SLUB_STATS
4528 	&alloc_fastpath_attr.attr,
4529 	&alloc_slowpath_attr.attr,
4530 	&free_fastpath_attr.attr,
4531 	&free_slowpath_attr.attr,
4532 	&free_frozen_attr.attr,
4533 	&free_add_partial_attr.attr,
4534 	&free_remove_partial_attr.attr,
4535 	&alloc_from_partial_attr.attr,
4536 	&alloc_slab_attr.attr,
4537 	&alloc_refill_attr.attr,
4538 	&free_slab_attr.attr,
4539 	&cpuslab_flush_attr.attr,
4540 	&deactivate_full_attr.attr,
4541 	&deactivate_empty_attr.attr,
4542 	&deactivate_to_head_attr.attr,
4543 	&deactivate_to_tail_attr.attr,
4544 	&deactivate_remote_frees_attr.attr,
4545 	&order_fallback_attr.attr,
4546 #endif
4547 #ifdef CONFIG_FAILSLAB
4548 	&failslab_attr.attr,
4549 #endif
4550 
4551 	NULL
4552 };
4553 
4554 static struct attribute_group slab_attr_group = {
4555 	.attrs = slab_attrs,
4556 };
4557 
4558 static ssize_t slab_attr_show(struct kobject *kobj,
4559 				struct attribute *attr,
4560 				char *buf)
4561 {
4562 	struct slab_attribute *attribute;
4563 	struct kmem_cache *s;
4564 	int err;
4565 
4566 	attribute = to_slab_attr(attr);
4567 	s = to_slab(kobj);
4568 
4569 	if (!attribute->show)
4570 		return -EIO;
4571 
4572 	err = attribute->show(s, buf);
4573 
4574 	return err;
4575 }
4576 
4577 static ssize_t slab_attr_store(struct kobject *kobj,
4578 				struct attribute *attr,
4579 				const char *buf, size_t len)
4580 {
4581 	struct slab_attribute *attribute;
4582 	struct kmem_cache *s;
4583 	int err;
4584 
4585 	attribute = to_slab_attr(attr);
4586 	s = to_slab(kobj);
4587 
4588 	if (!attribute->store)
4589 		return -EIO;
4590 
4591 	err = attribute->store(s, buf, len);
4592 
4593 	return err;
4594 }
4595 
4596 static void kmem_cache_release(struct kobject *kobj)
4597 {
4598 	struct kmem_cache *s = to_slab(kobj);
4599 
4600 	kfree(s->name);
4601 	kfree(s);
4602 }
4603 
4604 static const struct sysfs_ops slab_sysfs_ops = {
4605 	.show = slab_attr_show,
4606 	.store = slab_attr_store,
4607 };
4608 
4609 static struct kobj_type slab_ktype = {
4610 	.sysfs_ops = &slab_sysfs_ops,
4611 	.release = kmem_cache_release
4612 };
4613 
4614 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4615 {
4616 	struct kobj_type *ktype = get_ktype(kobj);
4617 
4618 	if (ktype == &slab_ktype)
4619 		return 1;
4620 	return 0;
4621 }
4622 
4623 static const struct kset_uevent_ops slab_uevent_ops = {
4624 	.filter = uevent_filter,
4625 };
4626 
4627 static struct kset *slab_kset;
4628 
4629 #define ID_STR_LENGTH 64
4630 
4631 /* Create a unique string id for a slab cache:
4632  *
4633  * Format	:[flags-]size
4634  */
4635 static char *create_unique_id(struct kmem_cache *s)
4636 {
4637 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4638 	char *p = name;
4639 
4640 	BUG_ON(!name);
4641 
4642 	*p++ = ':';
4643 	/*
4644 	 * First flags affecting slabcache operations. We will only
4645 	 * get here for aliasable slabs so we do not need to support
4646 	 * too many flags. The flags here must cover all flags that
4647 	 * are matched during merging to guarantee that the id is
4648 	 * unique.
4649 	 */
4650 	if (s->flags & SLAB_CACHE_DMA)
4651 		*p++ = 'd';
4652 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4653 		*p++ = 'a';
4654 	if (s->flags & SLAB_DEBUG_FREE)
4655 		*p++ = 'F';
4656 	if (!(s->flags & SLAB_NOTRACK))
4657 		*p++ = 't';
4658 	if (p != name + 1)
4659 		*p++ = '-';
4660 	p += sprintf(p, "%07d", s->size);
4661 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4662 	return name;
4663 }
4664 
4665 static int sysfs_slab_add(struct kmem_cache *s)
4666 {
4667 	int err;
4668 	const char *name;
4669 	int unmergeable;
4670 
4671 	if (slab_state < SYSFS)
4672 		/* Defer until later */
4673 		return 0;
4674 
4675 	unmergeable = slab_unmergeable(s);
4676 	if (unmergeable) {
4677 		/*
4678 		 * Slabcache can never be merged so we can use the name proper.
4679 		 * This is typically the case for debug situations. In that
4680 		 * case we can catch duplicate names easily.
4681 		 */
4682 		sysfs_remove_link(&slab_kset->kobj, s->name);
4683 		name = s->name;
4684 	} else {
4685 		/*
4686 		 * Create a unique name for the slab as a target
4687 		 * for the symlinks.
4688 		 */
4689 		name = create_unique_id(s);
4690 	}
4691 
4692 	s->kobj.kset = slab_kset;
4693 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4694 	if (err) {
4695 		kobject_put(&s->kobj);
4696 		return err;
4697 	}
4698 
4699 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4700 	if (err) {
4701 		kobject_del(&s->kobj);
4702 		kobject_put(&s->kobj);
4703 		return err;
4704 	}
4705 	kobject_uevent(&s->kobj, KOBJ_ADD);
4706 	if (!unmergeable) {
4707 		/* Setup first alias */
4708 		sysfs_slab_alias(s, s->name);
4709 		kfree(name);
4710 	}
4711 	return 0;
4712 }
4713 
4714 static void sysfs_slab_remove(struct kmem_cache *s)
4715 {
4716 	if (slab_state < SYSFS)
4717 		/*
4718 		 * Sysfs has not been setup yet so no need to remove the
4719 		 * cache from sysfs.
4720 		 */
4721 		return;
4722 
4723 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4724 	kobject_del(&s->kobj);
4725 	kobject_put(&s->kobj);
4726 }
4727 
4728 /*
4729  * Need to buffer aliases during bootup until sysfs becomes
4730  * available lest we lose that information.
4731  */
4732 struct saved_alias {
4733 	struct kmem_cache *s;
4734 	const char *name;
4735 	struct saved_alias *next;
4736 };
4737 
4738 static struct saved_alias *alias_list;
4739 
4740 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4741 {
4742 	struct saved_alias *al;
4743 
4744 	if (slab_state == SYSFS) {
4745 		/*
4746 		 * If we have a leftover link then remove it.
4747 		 */
4748 		sysfs_remove_link(&slab_kset->kobj, name);
4749 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4750 	}
4751 
4752 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4753 	if (!al)
4754 		return -ENOMEM;
4755 
4756 	al->s = s;
4757 	al->name = name;
4758 	al->next = alias_list;
4759 	alias_list = al;
4760 	return 0;
4761 }
4762 
4763 static int __init slab_sysfs_init(void)
4764 {
4765 	struct kmem_cache *s;
4766 	int err;
4767 
4768 	down_write(&slub_lock);
4769 
4770 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4771 	if (!slab_kset) {
4772 		up_write(&slub_lock);
4773 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4774 		return -ENOSYS;
4775 	}
4776 
4777 	slab_state = SYSFS;
4778 
4779 	list_for_each_entry(s, &slab_caches, list) {
4780 		err = sysfs_slab_add(s);
4781 		if (err)
4782 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4783 						" to sysfs\n", s->name);
4784 	}
4785 
4786 	while (alias_list) {
4787 		struct saved_alias *al = alias_list;
4788 
4789 		alias_list = alias_list->next;
4790 		err = sysfs_slab_alias(al->s, al->name);
4791 		if (err)
4792 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4793 					" %s to sysfs\n", s->name);
4794 		kfree(al);
4795 	}
4796 
4797 	up_write(&slub_lock);
4798 	resiliency_test();
4799 	return 0;
4800 }
4801 
4802 __initcall(slab_sysfs_init);
4803 #endif /* CONFIG_SYSFS */
4804 
4805 /*
4806  * The /proc/slabinfo ABI
4807  */
4808 #ifdef CONFIG_SLABINFO
4809 static void print_slabinfo_header(struct seq_file *m)
4810 {
4811 	seq_puts(m, "slabinfo - version: 2.1\n");
4812 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4813 		 "<objperslab> <pagesperslab>");
4814 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4815 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4816 	seq_putc(m, '\n');
4817 }
4818 
4819 static void *s_start(struct seq_file *m, loff_t *pos)
4820 {
4821 	loff_t n = *pos;
4822 
4823 	down_read(&slub_lock);
4824 	if (!n)
4825 		print_slabinfo_header(m);
4826 
4827 	return seq_list_start(&slab_caches, *pos);
4828 }
4829 
4830 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4831 {
4832 	return seq_list_next(p, &slab_caches, pos);
4833 }
4834 
4835 static void s_stop(struct seq_file *m, void *p)
4836 {
4837 	up_read(&slub_lock);
4838 }
4839 
4840 static int s_show(struct seq_file *m, void *p)
4841 {
4842 	unsigned long nr_partials = 0;
4843 	unsigned long nr_slabs = 0;
4844 	unsigned long nr_inuse = 0;
4845 	unsigned long nr_objs = 0;
4846 	unsigned long nr_free = 0;
4847 	struct kmem_cache *s;
4848 	int node;
4849 
4850 	s = list_entry(p, struct kmem_cache, list);
4851 
4852 	for_each_online_node(node) {
4853 		struct kmem_cache_node *n = get_node(s, node);
4854 
4855 		if (!n)
4856 			continue;
4857 
4858 		nr_partials += n->nr_partial;
4859 		nr_slabs += atomic_long_read(&n->nr_slabs);
4860 		nr_objs += atomic_long_read(&n->total_objects);
4861 		nr_free += count_partial(n, count_free);
4862 	}
4863 
4864 	nr_inuse = nr_objs - nr_free;
4865 
4866 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4867 		   nr_objs, s->size, oo_objects(s->oo),
4868 		   (1 << oo_order(s->oo)));
4869 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4870 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4871 		   0UL);
4872 	seq_putc(m, '\n');
4873 	return 0;
4874 }
4875 
4876 static const struct seq_operations slabinfo_op = {
4877 	.start = s_start,
4878 	.next = s_next,
4879 	.stop = s_stop,
4880 	.show = s_show,
4881 };
4882 
4883 static int slabinfo_open(struct inode *inode, struct file *file)
4884 {
4885 	return seq_open(file, &slabinfo_op);
4886 }
4887 
4888 static const struct file_operations proc_slabinfo_operations = {
4889 	.open		= slabinfo_open,
4890 	.read		= seq_read,
4891 	.llseek		= seq_lseek,
4892 	.release	= seq_release,
4893 };
4894 
4895 static int __init slab_proc_init(void)
4896 {
4897 	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
4898 	return 0;
4899 }
4900 module_init(slab_proc_init);
4901 #endif /* CONFIG_SLABINFO */
4902