xref: /openbmc/linux/mm/slub.c (revision 1ab142d4)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks or atomic operatios
6  * and only uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  * (C) 2011 Linux Foundation, Christoph Lameter
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/kmemcheck.h>
22 #include <linux/cpu.h>
23 #include <linux/cpuset.h>
24 #include <linux/mempolicy.h>
25 #include <linux/ctype.h>
26 #include <linux/debugobjects.h>
27 #include <linux/kallsyms.h>
28 #include <linux/memory.h>
29 #include <linux/math64.h>
30 #include <linux/fault-inject.h>
31 #include <linux/stacktrace.h>
32 
33 #include <trace/events/kmem.h>
34 
35 /*
36  * Lock order:
37  *   1. slub_lock (Global Semaphore)
38  *   2. node->list_lock
39  *   3. slab_lock(page) (Only on some arches and for debugging)
40  *
41  *   slub_lock
42  *
43  *   The role of the slub_lock is to protect the list of all the slabs
44  *   and to synchronize major metadata changes to slab cache structures.
45  *
46  *   The slab_lock is only used for debugging and on arches that do not
47  *   have the ability to do a cmpxchg_double. It only protects the second
48  *   double word in the page struct. Meaning
49  *	A. page->freelist	-> List of object free in a page
50  *	B. page->counters	-> Counters of objects
51  *	C. page->frozen		-> frozen state
52  *
53  *   If a slab is frozen then it is exempt from list management. It is not
54  *   on any list. The processor that froze the slab is the one who can
55  *   perform list operations on the page. Other processors may put objects
56  *   onto the freelist but the processor that froze the slab is the only
57  *   one that can retrieve the objects from the page's freelist.
58  *
59  *   The list_lock protects the partial and full list on each node and
60  *   the partial slab counter. If taken then no new slabs may be added or
61  *   removed from the lists nor make the number of partial slabs be modified.
62  *   (Note that the total number of slabs is an atomic value that may be
63  *   modified without taking the list lock).
64  *
65  *   The list_lock is a centralized lock and thus we avoid taking it as
66  *   much as possible. As long as SLUB does not have to handle partial
67  *   slabs, operations can continue without any centralized lock. F.e.
68  *   allocating a long series of objects that fill up slabs does not require
69  *   the list lock.
70  *   Interrupts are disabled during allocation and deallocation in order to
71  *   make the slab allocator safe to use in the context of an irq. In addition
72  *   interrupts are disabled to ensure that the processor does not change
73  *   while handling per_cpu slabs, due to kernel preemption.
74  *
75  * SLUB assigns one slab for allocation to each processor.
76  * Allocations only occur from these slabs called cpu slabs.
77  *
78  * Slabs with free elements are kept on a partial list and during regular
79  * operations no list for full slabs is used. If an object in a full slab is
80  * freed then the slab will show up again on the partial lists.
81  * We track full slabs for debugging purposes though because otherwise we
82  * cannot scan all objects.
83  *
84  * Slabs are freed when they become empty. Teardown and setup is
85  * minimal so we rely on the page allocators per cpu caches for
86  * fast frees and allocs.
87  *
88  * Overloading of page flags that are otherwise used for LRU management.
89  *
90  * PageActive 		The slab is frozen and exempt from list processing.
91  * 			This means that the slab is dedicated to a purpose
92  * 			such as satisfying allocations for a specific
93  * 			processor. Objects may be freed in the slab while
94  * 			it is frozen but slab_free will then skip the usual
95  * 			list operations. It is up to the processor holding
96  * 			the slab to integrate the slab into the slab lists
97  * 			when the slab is no longer needed.
98  *
99  * 			One use of this flag is to mark slabs that are
100  * 			used for allocations. Then such a slab becomes a cpu
101  * 			slab. The cpu slab may be equipped with an additional
102  * 			freelist that allows lockless access to
103  * 			free objects in addition to the regular freelist
104  * 			that requires the slab lock.
105  *
106  * PageError		Slab requires special handling due to debug
107  * 			options set. This moves	slab handling out of
108  * 			the fast path and disables lockless freelists.
109  */
110 
111 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
112 		SLAB_TRACE | SLAB_DEBUG_FREE)
113 
114 static inline int kmem_cache_debug(struct kmem_cache *s)
115 {
116 #ifdef CONFIG_SLUB_DEBUG
117 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
118 #else
119 	return 0;
120 #endif
121 }
122 
123 /*
124  * Issues still to be resolved:
125  *
126  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
127  *
128  * - Variable sizing of the per node arrays
129  */
130 
131 /* Enable to test recovery from slab corruption on boot */
132 #undef SLUB_RESILIENCY_TEST
133 
134 /* Enable to log cmpxchg failures */
135 #undef SLUB_DEBUG_CMPXCHG
136 
137 /*
138  * Mininum number of partial slabs. These will be left on the partial
139  * lists even if they are empty. kmem_cache_shrink may reclaim them.
140  */
141 #define MIN_PARTIAL 5
142 
143 /*
144  * Maximum number of desirable partial slabs.
145  * The existence of more partial slabs makes kmem_cache_shrink
146  * sort the partial list by the number of objects in the.
147  */
148 #define MAX_PARTIAL 10
149 
150 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
151 				SLAB_POISON | SLAB_STORE_USER)
152 
153 /*
154  * Debugging flags that require metadata to be stored in the slab.  These get
155  * disabled when slub_debug=O is used and a cache's min order increases with
156  * metadata.
157  */
158 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
159 
160 /*
161  * Set of flags that will prevent slab merging
162  */
163 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
164 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
165 		SLAB_FAILSLAB)
166 
167 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
168 		SLAB_CACHE_DMA | SLAB_NOTRACK)
169 
170 #define OO_SHIFT	16
171 #define OO_MASK		((1 << OO_SHIFT) - 1)
172 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
173 
174 /* Internal SLUB flags */
175 #define __OBJECT_POISON		0x80000000UL /* Poison object */
176 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
177 
178 static int kmem_size = sizeof(struct kmem_cache);
179 
180 #ifdef CONFIG_SMP
181 static struct notifier_block slab_notifier;
182 #endif
183 
184 static enum {
185 	DOWN,		/* No slab functionality available */
186 	PARTIAL,	/* Kmem_cache_node works */
187 	UP,		/* Everything works but does not show up in sysfs */
188 	SYSFS		/* Sysfs up */
189 } slab_state = DOWN;
190 
191 /* A list of all slab caches on the system */
192 static DECLARE_RWSEM(slub_lock);
193 static LIST_HEAD(slab_caches);
194 
195 /*
196  * Tracking user of a slab.
197  */
198 #define TRACK_ADDRS_COUNT 16
199 struct track {
200 	unsigned long addr;	/* Called from address */
201 #ifdef CONFIG_STACKTRACE
202 	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
203 #endif
204 	int cpu;		/* Was running on cpu */
205 	int pid;		/* Pid context */
206 	unsigned long when;	/* When did the operation occur */
207 };
208 
209 enum track_item { TRACK_ALLOC, TRACK_FREE };
210 
211 #ifdef CONFIG_SYSFS
212 static int sysfs_slab_add(struct kmem_cache *);
213 static int sysfs_slab_alias(struct kmem_cache *, const char *);
214 static void sysfs_slab_remove(struct kmem_cache *);
215 
216 #else
217 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
218 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
219 							{ return 0; }
220 static inline void sysfs_slab_remove(struct kmem_cache *s)
221 {
222 	kfree(s->name);
223 	kfree(s);
224 }
225 
226 #endif
227 
228 static inline void stat(const struct kmem_cache *s, enum stat_item si)
229 {
230 #ifdef CONFIG_SLUB_STATS
231 	__this_cpu_inc(s->cpu_slab->stat[si]);
232 #endif
233 }
234 
235 /********************************************************************
236  * 			Core slab cache functions
237  *******************************************************************/
238 
239 int slab_is_available(void)
240 {
241 	return slab_state >= UP;
242 }
243 
244 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
245 {
246 	return s->node[node];
247 }
248 
249 /* Verify that a pointer has an address that is valid within a slab page */
250 static inline int check_valid_pointer(struct kmem_cache *s,
251 				struct page *page, const void *object)
252 {
253 	void *base;
254 
255 	if (!object)
256 		return 1;
257 
258 	base = page_address(page);
259 	if (object < base || object >= base + page->objects * s->size ||
260 		(object - base) % s->size) {
261 		return 0;
262 	}
263 
264 	return 1;
265 }
266 
267 static inline void *get_freepointer(struct kmem_cache *s, void *object)
268 {
269 	return *(void **)(object + s->offset);
270 }
271 
272 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
273 {
274 	void *p;
275 
276 #ifdef CONFIG_DEBUG_PAGEALLOC
277 	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
278 #else
279 	p = get_freepointer(s, object);
280 #endif
281 	return p;
282 }
283 
284 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
285 {
286 	*(void **)(object + s->offset) = fp;
287 }
288 
289 /* Loop over all objects in a slab */
290 #define for_each_object(__p, __s, __addr, __objects) \
291 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
292 			__p += (__s)->size)
293 
294 /* Determine object index from a given position */
295 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
296 {
297 	return (p - addr) / s->size;
298 }
299 
300 static inline size_t slab_ksize(const struct kmem_cache *s)
301 {
302 #ifdef CONFIG_SLUB_DEBUG
303 	/*
304 	 * Debugging requires use of the padding between object
305 	 * and whatever may come after it.
306 	 */
307 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
308 		return s->objsize;
309 
310 #endif
311 	/*
312 	 * If we have the need to store the freelist pointer
313 	 * back there or track user information then we can
314 	 * only use the space before that information.
315 	 */
316 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
317 		return s->inuse;
318 	/*
319 	 * Else we can use all the padding etc for the allocation
320 	 */
321 	return s->size;
322 }
323 
324 static inline int order_objects(int order, unsigned long size, int reserved)
325 {
326 	return ((PAGE_SIZE << order) - reserved) / size;
327 }
328 
329 static inline struct kmem_cache_order_objects oo_make(int order,
330 		unsigned long size, int reserved)
331 {
332 	struct kmem_cache_order_objects x = {
333 		(order << OO_SHIFT) + order_objects(order, size, reserved)
334 	};
335 
336 	return x;
337 }
338 
339 static inline int oo_order(struct kmem_cache_order_objects x)
340 {
341 	return x.x >> OO_SHIFT;
342 }
343 
344 static inline int oo_objects(struct kmem_cache_order_objects x)
345 {
346 	return x.x & OO_MASK;
347 }
348 
349 /*
350  * Per slab locking using the pagelock
351  */
352 static __always_inline void slab_lock(struct page *page)
353 {
354 	bit_spin_lock(PG_locked, &page->flags);
355 }
356 
357 static __always_inline void slab_unlock(struct page *page)
358 {
359 	__bit_spin_unlock(PG_locked, &page->flags);
360 }
361 
362 /* Interrupts must be disabled (for the fallback code to work right) */
363 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
364 		void *freelist_old, unsigned long counters_old,
365 		void *freelist_new, unsigned long counters_new,
366 		const char *n)
367 {
368 	VM_BUG_ON(!irqs_disabled());
369 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
370     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
371 	if (s->flags & __CMPXCHG_DOUBLE) {
372 		if (cmpxchg_double(&page->freelist, &page->counters,
373 			freelist_old, counters_old,
374 			freelist_new, counters_new))
375 		return 1;
376 	} else
377 #endif
378 	{
379 		slab_lock(page);
380 		if (page->freelist == freelist_old && page->counters == counters_old) {
381 			page->freelist = freelist_new;
382 			page->counters = counters_new;
383 			slab_unlock(page);
384 			return 1;
385 		}
386 		slab_unlock(page);
387 	}
388 
389 	cpu_relax();
390 	stat(s, CMPXCHG_DOUBLE_FAIL);
391 
392 #ifdef SLUB_DEBUG_CMPXCHG
393 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
394 #endif
395 
396 	return 0;
397 }
398 
399 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
400 		void *freelist_old, unsigned long counters_old,
401 		void *freelist_new, unsigned long counters_new,
402 		const char *n)
403 {
404 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
405     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
406 	if (s->flags & __CMPXCHG_DOUBLE) {
407 		if (cmpxchg_double(&page->freelist, &page->counters,
408 			freelist_old, counters_old,
409 			freelist_new, counters_new))
410 		return 1;
411 	} else
412 #endif
413 	{
414 		unsigned long flags;
415 
416 		local_irq_save(flags);
417 		slab_lock(page);
418 		if (page->freelist == freelist_old && page->counters == counters_old) {
419 			page->freelist = freelist_new;
420 			page->counters = counters_new;
421 			slab_unlock(page);
422 			local_irq_restore(flags);
423 			return 1;
424 		}
425 		slab_unlock(page);
426 		local_irq_restore(flags);
427 	}
428 
429 	cpu_relax();
430 	stat(s, CMPXCHG_DOUBLE_FAIL);
431 
432 #ifdef SLUB_DEBUG_CMPXCHG
433 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
434 #endif
435 
436 	return 0;
437 }
438 
439 #ifdef CONFIG_SLUB_DEBUG
440 /*
441  * Determine a map of object in use on a page.
442  *
443  * Node listlock must be held to guarantee that the page does
444  * not vanish from under us.
445  */
446 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
447 {
448 	void *p;
449 	void *addr = page_address(page);
450 
451 	for (p = page->freelist; p; p = get_freepointer(s, p))
452 		set_bit(slab_index(p, s, addr), map);
453 }
454 
455 /*
456  * Debug settings:
457  */
458 #ifdef CONFIG_SLUB_DEBUG_ON
459 static int slub_debug = DEBUG_DEFAULT_FLAGS;
460 #else
461 static int slub_debug;
462 #endif
463 
464 static char *slub_debug_slabs;
465 static int disable_higher_order_debug;
466 
467 /*
468  * Object debugging
469  */
470 static void print_section(char *text, u8 *addr, unsigned int length)
471 {
472 	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
473 			length, 1);
474 }
475 
476 static struct track *get_track(struct kmem_cache *s, void *object,
477 	enum track_item alloc)
478 {
479 	struct track *p;
480 
481 	if (s->offset)
482 		p = object + s->offset + sizeof(void *);
483 	else
484 		p = object + s->inuse;
485 
486 	return p + alloc;
487 }
488 
489 static void set_track(struct kmem_cache *s, void *object,
490 			enum track_item alloc, unsigned long addr)
491 {
492 	struct track *p = get_track(s, object, alloc);
493 
494 	if (addr) {
495 #ifdef CONFIG_STACKTRACE
496 		struct stack_trace trace;
497 		int i;
498 
499 		trace.nr_entries = 0;
500 		trace.max_entries = TRACK_ADDRS_COUNT;
501 		trace.entries = p->addrs;
502 		trace.skip = 3;
503 		save_stack_trace(&trace);
504 
505 		/* See rant in lockdep.c */
506 		if (trace.nr_entries != 0 &&
507 		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
508 			trace.nr_entries--;
509 
510 		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
511 			p->addrs[i] = 0;
512 #endif
513 		p->addr = addr;
514 		p->cpu = smp_processor_id();
515 		p->pid = current->pid;
516 		p->when = jiffies;
517 	} else
518 		memset(p, 0, sizeof(struct track));
519 }
520 
521 static void init_tracking(struct kmem_cache *s, void *object)
522 {
523 	if (!(s->flags & SLAB_STORE_USER))
524 		return;
525 
526 	set_track(s, object, TRACK_FREE, 0UL);
527 	set_track(s, object, TRACK_ALLOC, 0UL);
528 }
529 
530 static void print_track(const char *s, struct track *t)
531 {
532 	if (!t->addr)
533 		return;
534 
535 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
536 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
537 #ifdef CONFIG_STACKTRACE
538 	{
539 		int i;
540 		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
541 			if (t->addrs[i])
542 				printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
543 			else
544 				break;
545 	}
546 #endif
547 }
548 
549 static void print_tracking(struct kmem_cache *s, void *object)
550 {
551 	if (!(s->flags & SLAB_STORE_USER))
552 		return;
553 
554 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
555 	print_track("Freed", get_track(s, object, TRACK_FREE));
556 }
557 
558 static void print_page_info(struct page *page)
559 {
560 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
561 		page, page->objects, page->inuse, page->freelist, page->flags);
562 
563 }
564 
565 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
566 {
567 	va_list args;
568 	char buf[100];
569 
570 	va_start(args, fmt);
571 	vsnprintf(buf, sizeof(buf), fmt, args);
572 	va_end(args);
573 	printk(KERN_ERR "========================================"
574 			"=====================================\n");
575 	printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
576 	printk(KERN_ERR "----------------------------------------"
577 			"-------------------------------------\n\n");
578 }
579 
580 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
581 {
582 	va_list args;
583 	char buf[100];
584 
585 	va_start(args, fmt);
586 	vsnprintf(buf, sizeof(buf), fmt, args);
587 	va_end(args);
588 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
589 }
590 
591 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
592 {
593 	unsigned int off;	/* Offset of last byte */
594 	u8 *addr = page_address(page);
595 
596 	print_tracking(s, p);
597 
598 	print_page_info(page);
599 
600 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
601 			p, p - addr, get_freepointer(s, p));
602 
603 	if (p > addr + 16)
604 		print_section("Bytes b4 ", p - 16, 16);
605 
606 	print_section("Object ", p, min_t(unsigned long, s->objsize,
607 				PAGE_SIZE));
608 	if (s->flags & SLAB_RED_ZONE)
609 		print_section("Redzone ", p + s->objsize,
610 			s->inuse - s->objsize);
611 
612 	if (s->offset)
613 		off = s->offset + sizeof(void *);
614 	else
615 		off = s->inuse;
616 
617 	if (s->flags & SLAB_STORE_USER)
618 		off += 2 * sizeof(struct track);
619 
620 	if (off != s->size)
621 		/* Beginning of the filler is the free pointer */
622 		print_section("Padding ", p + off, s->size - off);
623 
624 	dump_stack();
625 }
626 
627 static void object_err(struct kmem_cache *s, struct page *page,
628 			u8 *object, char *reason)
629 {
630 	slab_bug(s, "%s", reason);
631 	print_trailer(s, page, object);
632 }
633 
634 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
635 {
636 	va_list args;
637 	char buf[100];
638 
639 	va_start(args, fmt);
640 	vsnprintf(buf, sizeof(buf), fmt, args);
641 	va_end(args);
642 	slab_bug(s, "%s", buf);
643 	print_page_info(page);
644 	dump_stack();
645 }
646 
647 static void init_object(struct kmem_cache *s, void *object, u8 val)
648 {
649 	u8 *p = object;
650 
651 	if (s->flags & __OBJECT_POISON) {
652 		memset(p, POISON_FREE, s->objsize - 1);
653 		p[s->objsize - 1] = POISON_END;
654 	}
655 
656 	if (s->flags & SLAB_RED_ZONE)
657 		memset(p + s->objsize, val, s->inuse - s->objsize);
658 }
659 
660 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
661 						void *from, void *to)
662 {
663 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
664 	memset(from, data, to - from);
665 }
666 
667 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
668 			u8 *object, char *what,
669 			u8 *start, unsigned int value, unsigned int bytes)
670 {
671 	u8 *fault;
672 	u8 *end;
673 
674 	fault = memchr_inv(start, value, bytes);
675 	if (!fault)
676 		return 1;
677 
678 	end = start + bytes;
679 	while (end > fault && end[-1] == value)
680 		end--;
681 
682 	slab_bug(s, "%s overwritten", what);
683 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
684 					fault, end - 1, fault[0], value);
685 	print_trailer(s, page, object);
686 
687 	restore_bytes(s, what, value, fault, end);
688 	return 0;
689 }
690 
691 /*
692  * Object layout:
693  *
694  * object address
695  * 	Bytes of the object to be managed.
696  * 	If the freepointer may overlay the object then the free
697  * 	pointer is the first word of the object.
698  *
699  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
700  * 	0xa5 (POISON_END)
701  *
702  * object + s->objsize
703  * 	Padding to reach word boundary. This is also used for Redzoning.
704  * 	Padding is extended by another word if Redzoning is enabled and
705  * 	objsize == inuse.
706  *
707  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
708  * 	0xcc (RED_ACTIVE) for objects in use.
709  *
710  * object + s->inuse
711  * 	Meta data starts here.
712  *
713  * 	A. Free pointer (if we cannot overwrite object on free)
714  * 	B. Tracking data for SLAB_STORE_USER
715  * 	C. Padding to reach required alignment boundary or at mininum
716  * 		one word if debugging is on to be able to detect writes
717  * 		before the word boundary.
718  *
719  *	Padding is done using 0x5a (POISON_INUSE)
720  *
721  * object + s->size
722  * 	Nothing is used beyond s->size.
723  *
724  * If slabcaches are merged then the objsize and inuse boundaries are mostly
725  * ignored. And therefore no slab options that rely on these boundaries
726  * may be used with merged slabcaches.
727  */
728 
729 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
730 {
731 	unsigned long off = s->inuse;	/* The end of info */
732 
733 	if (s->offset)
734 		/* Freepointer is placed after the object. */
735 		off += sizeof(void *);
736 
737 	if (s->flags & SLAB_STORE_USER)
738 		/* We also have user information there */
739 		off += 2 * sizeof(struct track);
740 
741 	if (s->size == off)
742 		return 1;
743 
744 	return check_bytes_and_report(s, page, p, "Object padding",
745 				p + off, POISON_INUSE, s->size - off);
746 }
747 
748 /* Check the pad bytes at the end of a slab page */
749 static int slab_pad_check(struct kmem_cache *s, struct page *page)
750 {
751 	u8 *start;
752 	u8 *fault;
753 	u8 *end;
754 	int length;
755 	int remainder;
756 
757 	if (!(s->flags & SLAB_POISON))
758 		return 1;
759 
760 	start = page_address(page);
761 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
762 	end = start + length;
763 	remainder = length % s->size;
764 	if (!remainder)
765 		return 1;
766 
767 	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
768 	if (!fault)
769 		return 1;
770 	while (end > fault && end[-1] == POISON_INUSE)
771 		end--;
772 
773 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
774 	print_section("Padding ", end - remainder, remainder);
775 
776 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
777 	return 0;
778 }
779 
780 static int check_object(struct kmem_cache *s, struct page *page,
781 					void *object, u8 val)
782 {
783 	u8 *p = object;
784 	u8 *endobject = object + s->objsize;
785 
786 	if (s->flags & SLAB_RED_ZONE) {
787 		if (!check_bytes_and_report(s, page, object, "Redzone",
788 			endobject, val, s->inuse - s->objsize))
789 			return 0;
790 	} else {
791 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
792 			check_bytes_and_report(s, page, p, "Alignment padding",
793 				endobject, POISON_INUSE, s->inuse - s->objsize);
794 		}
795 	}
796 
797 	if (s->flags & SLAB_POISON) {
798 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
799 			(!check_bytes_and_report(s, page, p, "Poison", p,
800 					POISON_FREE, s->objsize - 1) ||
801 			 !check_bytes_and_report(s, page, p, "Poison",
802 				p + s->objsize - 1, POISON_END, 1)))
803 			return 0;
804 		/*
805 		 * check_pad_bytes cleans up on its own.
806 		 */
807 		check_pad_bytes(s, page, p);
808 	}
809 
810 	if (!s->offset && val == SLUB_RED_ACTIVE)
811 		/*
812 		 * Object and freepointer overlap. Cannot check
813 		 * freepointer while object is allocated.
814 		 */
815 		return 1;
816 
817 	/* Check free pointer validity */
818 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
819 		object_err(s, page, p, "Freepointer corrupt");
820 		/*
821 		 * No choice but to zap it and thus lose the remainder
822 		 * of the free objects in this slab. May cause
823 		 * another error because the object count is now wrong.
824 		 */
825 		set_freepointer(s, p, NULL);
826 		return 0;
827 	}
828 	return 1;
829 }
830 
831 static int check_slab(struct kmem_cache *s, struct page *page)
832 {
833 	int maxobj;
834 
835 	VM_BUG_ON(!irqs_disabled());
836 
837 	if (!PageSlab(page)) {
838 		slab_err(s, page, "Not a valid slab page");
839 		return 0;
840 	}
841 
842 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
843 	if (page->objects > maxobj) {
844 		slab_err(s, page, "objects %u > max %u",
845 			s->name, page->objects, maxobj);
846 		return 0;
847 	}
848 	if (page->inuse > page->objects) {
849 		slab_err(s, page, "inuse %u > max %u",
850 			s->name, page->inuse, page->objects);
851 		return 0;
852 	}
853 	/* Slab_pad_check fixes things up after itself */
854 	slab_pad_check(s, page);
855 	return 1;
856 }
857 
858 /*
859  * Determine if a certain object on a page is on the freelist. Must hold the
860  * slab lock to guarantee that the chains are in a consistent state.
861  */
862 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
863 {
864 	int nr = 0;
865 	void *fp;
866 	void *object = NULL;
867 	unsigned long max_objects;
868 
869 	fp = page->freelist;
870 	while (fp && nr <= page->objects) {
871 		if (fp == search)
872 			return 1;
873 		if (!check_valid_pointer(s, page, fp)) {
874 			if (object) {
875 				object_err(s, page, object,
876 					"Freechain corrupt");
877 				set_freepointer(s, object, NULL);
878 				break;
879 			} else {
880 				slab_err(s, page, "Freepointer corrupt");
881 				page->freelist = NULL;
882 				page->inuse = page->objects;
883 				slab_fix(s, "Freelist cleared");
884 				return 0;
885 			}
886 			break;
887 		}
888 		object = fp;
889 		fp = get_freepointer(s, object);
890 		nr++;
891 	}
892 
893 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
894 	if (max_objects > MAX_OBJS_PER_PAGE)
895 		max_objects = MAX_OBJS_PER_PAGE;
896 
897 	if (page->objects != max_objects) {
898 		slab_err(s, page, "Wrong number of objects. Found %d but "
899 			"should be %d", page->objects, max_objects);
900 		page->objects = max_objects;
901 		slab_fix(s, "Number of objects adjusted.");
902 	}
903 	if (page->inuse != page->objects - nr) {
904 		slab_err(s, page, "Wrong object count. Counter is %d but "
905 			"counted were %d", page->inuse, page->objects - nr);
906 		page->inuse = page->objects - nr;
907 		slab_fix(s, "Object count adjusted.");
908 	}
909 	return search == NULL;
910 }
911 
912 static void trace(struct kmem_cache *s, struct page *page, void *object,
913 								int alloc)
914 {
915 	if (s->flags & SLAB_TRACE) {
916 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
917 			s->name,
918 			alloc ? "alloc" : "free",
919 			object, page->inuse,
920 			page->freelist);
921 
922 		if (!alloc)
923 			print_section("Object ", (void *)object, s->objsize);
924 
925 		dump_stack();
926 	}
927 }
928 
929 /*
930  * Hooks for other subsystems that check memory allocations. In a typical
931  * production configuration these hooks all should produce no code at all.
932  */
933 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
934 {
935 	flags &= gfp_allowed_mask;
936 	lockdep_trace_alloc(flags);
937 	might_sleep_if(flags & __GFP_WAIT);
938 
939 	return should_failslab(s->objsize, flags, s->flags);
940 }
941 
942 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
943 {
944 	flags &= gfp_allowed_mask;
945 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
946 	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
947 }
948 
949 static inline void slab_free_hook(struct kmem_cache *s, void *x)
950 {
951 	kmemleak_free_recursive(x, s->flags);
952 
953 	/*
954 	 * Trouble is that we may no longer disable interupts in the fast path
955 	 * So in order to make the debug calls that expect irqs to be
956 	 * disabled we need to disable interrupts temporarily.
957 	 */
958 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
959 	{
960 		unsigned long flags;
961 
962 		local_irq_save(flags);
963 		kmemcheck_slab_free(s, x, s->objsize);
964 		debug_check_no_locks_freed(x, s->objsize);
965 		local_irq_restore(flags);
966 	}
967 #endif
968 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
969 		debug_check_no_obj_freed(x, s->objsize);
970 }
971 
972 /*
973  * Tracking of fully allocated slabs for debugging purposes.
974  *
975  * list_lock must be held.
976  */
977 static void add_full(struct kmem_cache *s,
978 	struct kmem_cache_node *n, struct page *page)
979 {
980 	if (!(s->flags & SLAB_STORE_USER))
981 		return;
982 
983 	list_add(&page->lru, &n->full);
984 }
985 
986 /*
987  * list_lock must be held.
988  */
989 static void remove_full(struct kmem_cache *s, struct page *page)
990 {
991 	if (!(s->flags & SLAB_STORE_USER))
992 		return;
993 
994 	list_del(&page->lru);
995 }
996 
997 /* Tracking of the number of slabs for debugging purposes */
998 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
999 {
1000 	struct kmem_cache_node *n = get_node(s, node);
1001 
1002 	return atomic_long_read(&n->nr_slabs);
1003 }
1004 
1005 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1006 {
1007 	return atomic_long_read(&n->nr_slabs);
1008 }
1009 
1010 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1011 {
1012 	struct kmem_cache_node *n = get_node(s, node);
1013 
1014 	/*
1015 	 * May be called early in order to allocate a slab for the
1016 	 * kmem_cache_node structure. Solve the chicken-egg
1017 	 * dilemma by deferring the increment of the count during
1018 	 * bootstrap (see early_kmem_cache_node_alloc).
1019 	 */
1020 	if (n) {
1021 		atomic_long_inc(&n->nr_slabs);
1022 		atomic_long_add(objects, &n->total_objects);
1023 	}
1024 }
1025 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1026 {
1027 	struct kmem_cache_node *n = get_node(s, node);
1028 
1029 	atomic_long_dec(&n->nr_slabs);
1030 	atomic_long_sub(objects, &n->total_objects);
1031 }
1032 
1033 /* Object debug checks for alloc/free paths */
1034 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1035 								void *object)
1036 {
1037 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1038 		return;
1039 
1040 	init_object(s, object, SLUB_RED_INACTIVE);
1041 	init_tracking(s, object);
1042 }
1043 
1044 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
1045 					void *object, unsigned long addr)
1046 {
1047 	if (!check_slab(s, page))
1048 		goto bad;
1049 
1050 	if (!check_valid_pointer(s, page, object)) {
1051 		object_err(s, page, object, "Freelist Pointer check fails");
1052 		goto bad;
1053 	}
1054 
1055 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1056 		goto bad;
1057 
1058 	/* Success perform special debug activities for allocs */
1059 	if (s->flags & SLAB_STORE_USER)
1060 		set_track(s, object, TRACK_ALLOC, addr);
1061 	trace(s, page, object, 1);
1062 	init_object(s, object, SLUB_RED_ACTIVE);
1063 	return 1;
1064 
1065 bad:
1066 	if (PageSlab(page)) {
1067 		/*
1068 		 * If this is a slab page then lets do the best we can
1069 		 * to avoid issues in the future. Marking all objects
1070 		 * as used avoids touching the remaining objects.
1071 		 */
1072 		slab_fix(s, "Marking all objects used");
1073 		page->inuse = page->objects;
1074 		page->freelist = NULL;
1075 	}
1076 	return 0;
1077 }
1078 
1079 static noinline int free_debug_processing(struct kmem_cache *s,
1080 		 struct page *page, void *object, unsigned long addr)
1081 {
1082 	unsigned long flags;
1083 	int rc = 0;
1084 
1085 	local_irq_save(flags);
1086 	slab_lock(page);
1087 
1088 	if (!check_slab(s, page))
1089 		goto fail;
1090 
1091 	if (!check_valid_pointer(s, page, object)) {
1092 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1093 		goto fail;
1094 	}
1095 
1096 	if (on_freelist(s, page, object)) {
1097 		object_err(s, page, object, "Object already free");
1098 		goto fail;
1099 	}
1100 
1101 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1102 		goto out;
1103 
1104 	if (unlikely(s != page->slab)) {
1105 		if (!PageSlab(page)) {
1106 			slab_err(s, page, "Attempt to free object(0x%p) "
1107 				"outside of slab", object);
1108 		} else if (!page->slab) {
1109 			printk(KERN_ERR
1110 				"SLUB <none>: no slab for object 0x%p.\n",
1111 						object);
1112 			dump_stack();
1113 		} else
1114 			object_err(s, page, object,
1115 					"page slab pointer corrupt.");
1116 		goto fail;
1117 	}
1118 
1119 	if (s->flags & SLAB_STORE_USER)
1120 		set_track(s, object, TRACK_FREE, addr);
1121 	trace(s, page, object, 0);
1122 	init_object(s, object, SLUB_RED_INACTIVE);
1123 	rc = 1;
1124 out:
1125 	slab_unlock(page);
1126 	local_irq_restore(flags);
1127 	return rc;
1128 
1129 fail:
1130 	slab_fix(s, "Object at 0x%p not freed", object);
1131 	goto out;
1132 }
1133 
1134 static int __init setup_slub_debug(char *str)
1135 {
1136 	slub_debug = DEBUG_DEFAULT_FLAGS;
1137 	if (*str++ != '=' || !*str)
1138 		/*
1139 		 * No options specified. Switch on full debugging.
1140 		 */
1141 		goto out;
1142 
1143 	if (*str == ',')
1144 		/*
1145 		 * No options but restriction on slabs. This means full
1146 		 * debugging for slabs matching a pattern.
1147 		 */
1148 		goto check_slabs;
1149 
1150 	if (tolower(*str) == 'o') {
1151 		/*
1152 		 * Avoid enabling debugging on caches if its minimum order
1153 		 * would increase as a result.
1154 		 */
1155 		disable_higher_order_debug = 1;
1156 		goto out;
1157 	}
1158 
1159 	slub_debug = 0;
1160 	if (*str == '-')
1161 		/*
1162 		 * Switch off all debugging measures.
1163 		 */
1164 		goto out;
1165 
1166 	/*
1167 	 * Determine which debug features should be switched on
1168 	 */
1169 	for (; *str && *str != ','; str++) {
1170 		switch (tolower(*str)) {
1171 		case 'f':
1172 			slub_debug |= SLAB_DEBUG_FREE;
1173 			break;
1174 		case 'z':
1175 			slub_debug |= SLAB_RED_ZONE;
1176 			break;
1177 		case 'p':
1178 			slub_debug |= SLAB_POISON;
1179 			break;
1180 		case 'u':
1181 			slub_debug |= SLAB_STORE_USER;
1182 			break;
1183 		case 't':
1184 			slub_debug |= SLAB_TRACE;
1185 			break;
1186 		case 'a':
1187 			slub_debug |= SLAB_FAILSLAB;
1188 			break;
1189 		default:
1190 			printk(KERN_ERR "slub_debug option '%c' "
1191 				"unknown. skipped\n", *str);
1192 		}
1193 	}
1194 
1195 check_slabs:
1196 	if (*str == ',')
1197 		slub_debug_slabs = str + 1;
1198 out:
1199 	return 1;
1200 }
1201 
1202 __setup("slub_debug", setup_slub_debug);
1203 
1204 static unsigned long kmem_cache_flags(unsigned long objsize,
1205 	unsigned long flags, const char *name,
1206 	void (*ctor)(void *))
1207 {
1208 	/*
1209 	 * Enable debugging if selected on the kernel commandline.
1210 	 */
1211 	if (slub_debug && (!slub_debug_slabs ||
1212 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1213 		flags |= slub_debug;
1214 
1215 	return flags;
1216 }
1217 #else
1218 static inline void setup_object_debug(struct kmem_cache *s,
1219 			struct page *page, void *object) {}
1220 
1221 static inline int alloc_debug_processing(struct kmem_cache *s,
1222 	struct page *page, void *object, unsigned long addr) { return 0; }
1223 
1224 static inline int free_debug_processing(struct kmem_cache *s,
1225 	struct page *page, void *object, unsigned long addr) { return 0; }
1226 
1227 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1228 			{ return 1; }
1229 static inline int check_object(struct kmem_cache *s, struct page *page,
1230 			void *object, u8 val) { return 1; }
1231 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1232 					struct page *page) {}
1233 static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1234 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1235 	unsigned long flags, const char *name,
1236 	void (*ctor)(void *))
1237 {
1238 	return flags;
1239 }
1240 #define slub_debug 0
1241 
1242 #define disable_higher_order_debug 0
1243 
1244 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1245 							{ return 0; }
1246 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1247 							{ return 0; }
1248 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1249 							int objects) {}
1250 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1251 							int objects) {}
1252 
1253 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1254 							{ return 0; }
1255 
1256 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1257 		void *object) {}
1258 
1259 static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1260 
1261 #endif /* CONFIG_SLUB_DEBUG */
1262 
1263 /*
1264  * Slab allocation and freeing
1265  */
1266 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1267 					struct kmem_cache_order_objects oo)
1268 {
1269 	int order = oo_order(oo);
1270 
1271 	flags |= __GFP_NOTRACK;
1272 
1273 	if (node == NUMA_NO_NODE)
1274 		return alloc_pages(flags, order);
1275 	else
1276 		return alloc_pages_exact_node(node, flags, order);
1277 }
1278 
1279 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1280 {
1281 	struct page *page;
1282 	struct kmem_cache_order_objects oo = s->oo;
1283 	gfp_t alloc_gfp;
1284 
1285 	flags &= gfp_allowed_mask;
1286 
1287 	if (flags & __GFP_WAIT)
1288 		local_irq_enable();
1289 
1290 	flags |= s->allocflags;
1291 
1292 	/*
1293 	 * Let the initial higher-order allocation fail under memory pressure
1294 	 * so we fall-back to the minimum order allocation.
1295 	 */
1296 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1297 
1298 	page = alloc_slab_page(alloc_gfp, node, oo);
1299 	if (unlikely(!page)) {
1300 		oo = s->min;
1301 		/*
1302 		 * Allocation may have failed due to fragmentation.
1303 		 * Try a lower order alloc if possible
1304 		 */
1305 		page = alloc_slab_page(flags, node, oo);
1306 
1307 		if (page)
1308 			stat(s, ORDER_FALLBACK);
1309 	}
1310 
1311 	if (flags & __GFP_WAIT)
1312 		local_irq_disable();
1313 
1314 	if (!page)
1315 		return NULL;
1316 
1317 	if (kmemcheck_enabled
1318 		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1319 		int pages = 1 << oo_order(oo);
1320 
1321 		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1322 
1323 		/*
1324 		 * Objects from caches that have a constructor don't get
1325 		 * cleared when they're allocated, so we need to do it here.
1326 		 */
1327 		if (s->ctor)
1328 			kmemcheck_mark_uninitialized_pages(page, pages);
1329 		else
1330 			kmemcheck_mark_unallocated_pages(page, pages);
1331 	}
1332 
1333 	page->objects = oo_objects(oo);
1334 	mod_zone_page_state(page_zone(page),
1335 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1336 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1337 		1 << oo_order(oo));
1338 
1339 	return page;
1340 }
1341 
1342 static void setup_object(struct kmem_cache *s, struct page *page,
1343 				void *object)
1344 {
1345 	setup_object_debug(s, page, object);
1346 	if (unlikely(s->ctor))
1347 		s->ctor(object);
1348 }
1349 
1350 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1351 {
1352 	struct page *page;
1353 	void *start;
1354 	void *last;
1355 	void *p;
1356 
1357 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1358 
1359 	page = allocate_slab(s,
1360 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1361 	if (!page)
1362 		goto out;
1363 
1364 	inc_slabs_node(s, page_to_nid(page), page->objects);
1365 	page->slab = s;
1366 	page->flags |= 1 << PG_slab;
1367 
1368 	start = page_address(page);
1369 
1370 	if (unlikely(s->flags & SLAB_POISON))
1371 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1372 
1373 	last = start;
1374 	for_each_object(p, s, start, page->objects) {
1375 		setup_object(s, page, last);
1376 		set_freepointer(s, last, p);
1377 		last = p;
1378 	}
1379 	setup_object(s, page, last);
1380 	set_freepointer(s, last, NULL);
1381 
1382 	page->freelist = start;
1383 	page->inuse = page->objects;
1384 	page->frozen = 1;
1385 out:
1386 	return page;
1387 }
1388 
1389 static void __free_slab(struct kmem_cache *s, struct page *page)
1390 {
1391 	int order = compound_order(page);
1392 	int pages = 1 << order;
1393 
1394 	if (kmem_cache_debug(s)) {
1395 		void *p;
1396 
1397 		slab_pad_check(s, page);
1398 		for_each_object(p, s, page_address(page),
1399 						page->objects)
1400 			check_object(s, page, p, SLUB_RED_INACTIVE);
1401 	}
1402 
1403 	kmemcheck_free_shadow(page, compound_order(page));
1404 
1405 	mod_zone_page_state(page_zone(page),
1406 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1407 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1408 		-pages);
1409 
1410 	__ClearPageSlab(page);
1411 	reset_page_mapcount(page);
1412 	if (current->reclaim_state)
1413 		current->reclaim_state->reclaimed_slab += pages;
1414 	__free_pages(page, order);
1415 }
1416 
1417 #define need_reserve_slab_rcu						\
1418 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1419 
1420 static void rcu_free_slab(struct rcu_head *h)
1421 {
1422 	struct page *page;
1423 
1424 	if (need_reserve_slab_rcu)
1425 		page = virt_to_head_page(h);
1426 	else
1427 		page = container_of((struct list_head *)h, struct page, lru);
1428 
1429 	__free_slab(page->slab, page);
1430 }
1431 
1432 static void free_slab(struct kmem_cache *s, struct page *page)
1433 {
1434 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1435 		struct rcu_head *head;
1436 
1437 		if (need_reserve_slab_rcu) {
1438 			int order = compound_order(page);
1439 			int offset = (PAGE_SIZE << order) - s->reserved;
1440 
1441 			VM_BUG_ON(s->reserved != sizeof(*head));
1442 			head = page_address(page) + offset;
1443 		} else {
1444 			/*
1445 			 * RCU free overloads the RCU head over the LRU
1446 			 */
1447 			head = (void *)&page->lru;
1448 		}
1449 
1450 		call_rcu(head, rcu_free_slab);
1451 	} else
1452 		__free_slab(s, page);
1453 }
1454 
1455 static void discard_slab(struct kmem_cache *s, struct page *page)
1456 {
1457 	dec_slabs_node(s, page_to_nid(page), page->objects);
1458 	free_slab(s, page);
1459 }
1460 
1461 /*
1462  * Management of partially allocated slabs.
1463  *
1464  * list_lock must be held.
1465  */
1466 static inline void add_partial(struct kmem_cache_node *n,
1467 				struct page *page, int tail)
1468 {
1469 	n->nr_partial++;
1470 	if (tail == DEACTIVATE_TO_TAIL)
1471 		list_add_tail(&page->lru, &n->partial);
1472 	else
1473 		list_add(&page->lru, &n->partial);
1474 }
1475 
1476 /*
1477  * list_lock must be held.
1478  */
1479 static inline void remove_partial(struct kmem_cache_node *n,
1480 					struct page *page)
1481 {
1482 	list_del(&page->lru);
1483 	n->nr_partial--;
1484 }
1485 
1486 /*
1487  * Lock slab, remove from the partial list and put the object into the
1488  * per cpu freelist.
1489  *
1490  * Returns a list of objects or NULL if it fails.
1491  *
1492  * Must hold list_lock.
1493  */
1494 static inline void *acquire_slab(struct kmem_cache *s,
1495 		struct kmem_cache_node *n, struct page *page,
1496 		int mode)
1497 {
1498 	void *freelist;
1499 	unsigned long counters;
1500 	struct page new;
1501 
1502 	/*
1503 	 * Zap the freelist and set the frozen bit.
1504 	 * The old freelist is the list of objects for the
1505 	 * per cpu allocation list.
1506 	 */
1507 	do {
1508 		freelist = page->freelist;
1509 		counters = page->counters;
1510 		new.counters = counters;
1511 		if (mode)
1512 			new.inuse = page->objects;
1513 
1514 		VM_BUG_ON(new.frozen);
1515 		new.frozen = 1;
1516 
1517 	} while (!__cmpxchg_double_slab(s, page,
1518 			freelist, counters,
1519 			NULL, new.counters,
1520 			"lock and freeze"));
1521 
1522 	remove_partial(n, page);
1523 	return freelist;
1524 }
1525 
1526 static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1527 
1528 /*
1529  * Try to allocate a partial slab from a specific node.
1530  */
1531 static void *get_partial_node(struct kmem_cache *s,
1532 		struct kmem_cache_node *n, struct kmem_cache_cpu *c)
1533 {
1534 	struct page *page, *page2;
1535 	void *object = NULL;
1536 
1537 	/*
1538 	 * Racy check. If we mistakenly see no partial slabs then we
1539 	 * just allocate an empty slab. If we mistakenly try to get a
1540 	 * partial slab and there is none available then get_partials()
1541 	 * will return NULL.
1542 	 */
1543 	if (!n || !n->nr_partial)
1544 		return NULL;
1545 
1546 	spin_lock(&n->list_lock);
1547 	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1548 		void *t = acquire_slab(s, n, page, object == NULL);
1549 		int available;
1550 
1551 		if (!t)
1552 			break;
1553 
1554 		if (!object) {
1555 			c->page = page;
1556 			c->node = page_to_nid(page);
1557 			stat(s, ALLOC_FROM_PARTIAL);
1558 			object = t;
1559 			available =  page->objects - page->inuse;
1560 		} else {
1561 			page->freelist = t;
1562 			available = put_cpu_partial(s, page, 0);
1563 		}
1564 		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1565 			break;
1566 
1567 	}
1568 	spin_unlock(&n->list_lock);
1569 	return object;
1570 }
1571 
1572 /*
1573  * Get a page from somewhere. Search in increasing NUMA distances.
1574  */
1575 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1576 		struct kmem_cache_cpu *c)
1577 {
1578 #ifdef CONFIG_NUMA
1579 	struct zonelist *zonelist;
1580 	struct zoneref *z;
1581 	struct zone *zone;
1582 	enum zone_type high_zoneidx = gfp_zone(flags);
1583 	void *object;
1584 	unsigned int cpuset_mems_cookie;
1585 
1586 	/*
1587 	 * The defrag ratio allows a configuration of the tradeoffs between
1588 	 * inter node defragmentation and node local allocations. A lower
1589 	 * defrag_ratio increases the tendency to do local allocations
1590 	 * instead of attempting to obtain partial slabs from other nodes.
1591 	 *
1592 	 * If the defrag_ratio is set to 0 then kmalloc() always
1593 	 * returns node local objects. If the ratio is higher then kmalloc()
1594 	 * may return off node objects because partial slabs are obtained
1595 	 * from other nodes and filled up.
1596 	 *
1597 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1598 	 * defrag_ratio = 1000) then every (well almost) allocation will
1599 	 * first attempt to defrag slab caches on other nodes. This means
1600 	 * scanning over all nodes to look for partial slabs which may be
1601 	 * expensive if we do it every time we are trying to find a slab
1602 	 * with available objects.
1603 	 */
1604 	if (!s->remote_node_defrag_ratio ||
1605 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1606 		return NULL;
1607 
1608 	do {
1609 		cpuset_mems_cookie = get_mems_allowed();
1610 		zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1611 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1612 			struct kmem_cache_node *n;
1613 
1614 			n = get_node(s, zone_to_nid(zone));
1615 
1616 			if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1617 					n->nr_partial > s->min_partial) {
1618 				object = get_partial_node(s, n, c);
1619 				if (object) {
1620 					/*
1621 					 * Return the object even if
1622 					 * put_mems_allowed indicated that
1623 					 * the cpuset mems_allowed was
1624 					 * updated in parallel. It's a
1625 					 * harmless race between the alloc
1626 					 * and the cpuset update.
1627 					 */
1628 					put_mems_allowed(cpuset_mems_cookie);
1629 					return object;
1630 				}
1631 			}
1632 		}
1633 	} while (!put_mems_allowed(cpuset_mems_cookie));
1634 #endif
1635 	return NULL;
1636 }
1637 
1638 /*
1639  * Get a partial page, lock it and return it.
1640  */
1641 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1642 		struct kmem_cache_cpu *c)
1643 {
1644 	void *object;
1645 	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1646 
1647 	object = get_partial_node(s, get_node(s, searchnode), c);
1648 	if (object || node != NUMA_NO_NODE)
1649 		return object;
1650 
1651 	return get_any_partial(s, flags, c);
1652 }
1653 
1654 #ifdef CONFIG_PREEMPT
1655 /*
1656  * Calculate the next globally unique transaction for disambiguiation
1657  * during cmpxchg. The transactions start with the cpu number and are then
1658  * incremented by CONFIG_NR_CPUS.
1659  */
1660 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1661 #else
1662 /*
1663  * No preemption supported therefore also no need to check for
1664  * different cpus.
1665  */
1666 #define TID_STEP 1
1667 #endif
1668 
1669 static inline unsigned long next_tid(unsigned long tid)
1670 {
1671 	return tid + TID_STEP;
1672 }
1673 
1674 static inline unsigned int tid_to_cpu(unsigned long tid)
1675 {
1676 	return tid % TID_STEP;
1677 }
1678 
1679 static inline unsigned long tid_to_event(unsigned long tid)
1680 {
1681 	return tid / TID_STEP;
1682 }
1683 
1684 static inline unsigned int init_tid(int cpu)
1685 {
1686 	return cpu;
1687 }
1688 
1689 static inline void note_cmpxchg_failure(const char *n,
1690 		const struct kmem_cache *s, unsigned long tid)
1691 {
1692 #ifdef SLUB_DEBUG_CMPXCHG
1693 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1694 
1695 	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1696 
1697 #ifdef CONFIG_PREEMPT
1698 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1699 		printk("due to cpu change %d -> %d\n",
1700 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
1701 	else
1702 #endif
1703 	if (tid_to_event(tid) != tid_to_event(actual_tid))
1704 		printk("due to cpu running other code. Event %ld->%ld\n",
1705 			tid_to_event(tid), tid_to_event(actual_tid));
1706 	else
1707 		printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1708 			actual_tid, tid, next_tid(tid));
1709 #endif
1710 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1711 }
1712 
1713 void init_kmem_cache_cpus(struct kmem_cache *s)
1714 {
1715 	int cpu;
1716 
1717 	for_each_possible_cpu(cpu)
1718 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1719 }
1720 
1721 /*
1722  * Remove the cpu slab
1723  */
1724 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1725 {
1726 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1727 	struct page *page = c->page;
1728 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1729 	int lock = 0;
1730 	enum slab_modes l = M_NONE, m = M_NONE;
1731 	void *freelist;
1732 	void *nextfree;
1733 	int tail = DEACTIVATE_TO_HEAD;
1734 	struct page new;
1735 	struct page old;
1736 
1737 	if (page->freelist) {
1738 		stat(s, DEACTIVATE_REMOTE_FREES);
1739 		tail = DEACTIVATE_TO_TAIL;
1740 	}
1741 
1742 	c->tid = next_tid(c->tid);
1743 	c->page = NULL;
1744 	freelist = c->freelist;
1745 	c->freelist = NULL;
1746 
1747 	/*
1748 	 * Stage one: Free all available per cpu objects back
1749 	 * to the page freelist while it is still frozen. Leave the
1750 	 * last one.
1751 	 *
1752 	 * There is no need to take the list->lock because the page
1753 	 * is still frozen.
1754 	 */
1755 	while (freelist && (nextfree = get_freepointer(s, freelist))) {
1756 		void *prior;
1757 		unsigned long counters;
1758 
1759 		do {
1760 			prior = page->freelist;
1761 			counters = page->counters;
1762 			set_freepointer(s, freelist, prior);
1763 			new.counters = counters;
1764 			new.inuse--;
1765 			VM_BUG_ON(!new.frozen);
1766 
1767 		} while (!__cmpxchg_double_slab(s, page,
1768 			prior, counters,
1769 			freelist, new.counters,
1770 			"drain percpu freelist"));
1771 
1772 		freelist = nextfree;
1773 	}
1774 
1775 	/*
1776 	 * Stage two: Ensure that the page is unfrozen while the
1777 	 * list presence reflects the actual number of objects
1778 	 * during unfreeze.
1779 	 *
1780 	 * We setup the list membership and then perform a cmpxchg
1781 	 * with the count. If there is a mismatch then the page
1782 	 * is not unfrozen but the page is on the wrong list.
1783 	 *
1784 	 * Then we restart the process which may have to remove
1785 	 * the page from the list that we just put it on again
1786 	 * because the number of objects in the slab may have
1787 	 * changed.
1788 	 */
1789 redo:
1790 
1791 	old.freelist = page->freelist;
1792 	old.counters = page->counters;
1793 	VM_BUG_ON(!old.frozen);
1794 
1795 	/* Determine target state of the slab */
1796 	new.counters = old.counters;
1797 	if (freelist) {
1798 		new.inuse--;
1799 		set_freepointer(s, freelist, old.freelist);
1800 		new.freelist = freelist;
1801 	} else
1802 		new.freelist = old.freelist;
1803 
1804 	new.frozen = 0;
1805 
1806 	if (!new.inuse && n->nr_partial > s->min_partial)
1807 		m = M_FREE;
1808 	else if (new.freelist) {
1809 		m = M_PARTIAL;
1810 		if (!lock) {
1811 			lock = 1;
1812 			/*
1813 			 * Taking the spinlock removes the possiblity
1814 			 * that acquire_slab() will see a slab page that
1815 			 * is frozen
1816 			 */
1817 			spin_lock(&n->list_lock);
1818 		}
1819 	} else {
1820 		m = M_FULL;
1821 		if (kmem_cache_debug(s) && !lock) {
1822 			lock = 1;
1823 			/*
1824 			 * This also ensures that the scanning of full
1825 			 * slabs from diagnostic functions will not see
1826 			 * any frozen slabs.
1827 			 */
1828 			spin_lock(&n->list_lock);
1829 		}
1830 	}
1831 
1832 	if (l != m) {
1833 
1834 		if (l == M_PARTIAL)
1835 
1836 			remove_partial(n, page);
1837 
1838 		else if (l == M_FULL)
1839 
1840 			remove_full(s, page);
1841 
1842 		if (m == M_PARTIAL) {
1843 
1844 			add_partial(n, page, tail);
1845 			stat(s, tail);
1846 
1847 		} else if (m == M_FULL) {
1848 
1849 			stat(s, DEACTIVATE_FULL);
1850 			add_full(s, n, page);
1851 
1852 		}
1853 	}
1854 
1855 	l = m;
1856 	if (!__cmpxchg_double_slab(s, page,
1857 				old.freelist, old.counters,
1858 				new.freelist, new.counters,
1859 				"unfreezing slab"))
1860 		goto redo;
1861 
1862 	if (lock)
1863 		spin_unlock(&n->list_lock);
1864 
1865 	if (m == M_FREE) {
1866 		stat(s, DEACTIVATE_EMPTY);
1867 		discard_slab(s, page);
1868 		stat(s, FREE_SLAB);
1869 	}
1870 }
1871 
1872 /* Unfreeze all the cpu partial slabs */
1873 static void unfreeze_partials(struct kmem_cache *s)
1874 {
1875 	struct kmem_cache_node *n = NULL;
1876 	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1877 	struct page *page, *discard_page = NULL;
1878 
1879 	while ((page = c->partial)) {
1880 		enum slab_modes { M_PARTIAL, M_FREE };
1881 		enum slab_modes l, m;
1882 		struct page new;
1883 		struct page old;
1884 
1885 		c->partial = page->next;
1886 		l = M_FREE;
1887 
1888 		do {
1889 
1890 			old.freelist = page->freelist;
1891 			old.counters = page->counters;
1892 			VM_BUG_ON(!old.frozen);
1893 
1894 			new.counters = old.counters;
1895 			new.freelist = old.freelist;
1896 
1897 			new.frozen = 0;
1898 
1899 			if (!new.inuse && (!n || n->nr_partial > s->min_partial))
1900 				m = M_FREE;
1901 			else {
1902 				struct kmem_cache_node *n2 = get_node(s,
1903 							page_to_nid(page));
1904 
1905 				m = M_PARTIAL;
1906 				if (n != n2) {
1907 					if (n)
1908 						spin_unlock(&n->list_lock);
1909 
1910 					n = n2;
1911 					spin_lock(&n->list_lock);
1912 				}
1913 			}
1914 
1915 			if (l != m) {
1916 				if (l == M_PARTIAL) {
1917 					remove_partial(n, page);
1918 					stat(s, FREE_REMOVE_PARTIAL);
1919 				} else {
1920 					add_partial(n, page,
1921 						DEACTIVATE_TO_TAIL);
1922 					stat(s, FREE_ADD_PARTIAL);
1923 				}
1924 
1925 				l = m;
1926 			}
1927 
1928 		} while (!cmpxchg_double_slab(s, page,
1929 				old.freelist, old.counters,
1930 				new.freelist, new.counters,
1931 				"unfreezing slab"));
1932 
1933 		if (m == M_FREE) {
1934 			page->next = discard_page;
1935 			discard_page = page;
1936 		}
1937 	}
1938 
1939 	if (n)
1940 		spin_unlock(&n->list_lock);
1941 
1942 	while (discard_page) {
1943 		page = discard_page;
1944 		discard_page = discard_page->next;
1945 
1946 		stat(s, DEACTIVATE_EMPTY);
1947 		discard_slab(s, page);
1948 		stat(s, FREE_SLAB);
1949 	}
1950 }
1951 
1952 /*
1953  * Put a page that was just frozen (in __slab_free) into a partial page
1954  * slot if available. This is done without interrupts disabled and without
1955  * preemption disabled. The cmpxchg is racy and may put the partial page
1956  * onto a random cpus partial slot.
1957  *
1958  * If we did not find a slot then simply move all the partials to the
1959  * per node partial list.
1960  */
1961 int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1962 {
1963 	struct page *oldpage;
1964 	int pages;
1965 	int pobjects;
1966 
1967 	do {
1968 		pages = 0;
1969 		pobjects = 0;
1970 		oldpage = this_cpu_read(s->cpu_slab->partial);
1971 
1972 		if (oldpage) {
1973 			pobjects = oldpage->pobjects;
1974 			pages = oldpage->pages;
1975 			if (drain && pobjects > s->cpu_partial) {
1976 				unsigned long flags;
1977 				/*
1978 				 * partial array is full. Move the existing
1979 				 * set to the per node partial list.
1980 				 */
1981 				local_irq_save(flags);
1982 				unfreeze_partials(s);
1983 				local_irq_restore(flags);
1984 				pobjects = 0;
1985 				pages = 0;
1986 			}
1987 		}
1988 
1989 		pages++;
1990 		pobjects += page->objects - page->inuse;
1991 
1992 		page->pages = pages;
1993 		page->pobjects = pobjects;
1994 		page->next = oldpage;
1995 
1996 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1997 	stat(s, CPU_PARTIAL_FREE);
1998 	return pobjects;
1999 }
2000 
2001 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2002 {
2003 	stat(s, CPUSLAB_FLUSH);
2004 	deactivate_slab(s, c);
2005 }
2006 
2007 /*
2008  * Flush cpu slab.
2009  *
2010  * Called from IPI handler with interrupts disabled.
2011  */
2012 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2013 {
2014 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2015 
2016 	if (likely(c)) {
2017 		if (c->page)
2018 			flush_slab(s, c);
2019 
2020 		unfreeze_partials(s);
2021 	}
2022 }
2023 
2024 static void flush_cpu_slab(void *d)
2025 {
2026 	struct kmem_cache *s = d;
2027 
2028 	__flush_cpu_slab(s, smp_processor_id());
2029 }
2030 
2031 static void flush_all(struct kmem_cache *s)
2032 {
2033 	on_each_cpu(flush_cpu_slab, s, 1);
2034 }
2035 
2036 /*
2037  * Check if the objects in a per cpu structure fit numa
2038  * locality expectations.
2039  */
2040 static inline int node_match(struct kmem_cache_cpu *c, int node)
2041 {
2042 #ifdef CONFIG_NUMA
2043 	if (node != NUMA_NO_NODE && c->node != node)
2044 		return 0;
2045 #endif
2046 	return 1;
2047 }
2048 
2049 static int count_free(struct page *page)
2050 {
2051 	return page->objects - page->inuse;
2052 }
2053 
2054 static unsigned long count_partial(struct kmem_cache_node *n,
2055 					int (*get_count)(struct page *))
2056 {
2057 	unsigned long flags;
2058 	unsigned long x = 0;
2059 	struct page *page;
2060 
2061 	spin_lock_irqsave(&n->list_lock, flags);
2062 	list_for_each_entry(page, &n->partial, lru)
2063 		x += get_count(page);
2064 	spin_unlock_irqrestore(&n->list_lock, flags);
2065 	return x;
2066 }
2067 
2068 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2069 {
2070 #ifdef CONFIG_SLUB_DEBUG
2071 	return atomic_long_read(&n->total_objects);
2072 #else
2073 	return 0;
2074 #endif
2075 }
2076 
2077 static noinline void
2078 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2079 {
2080 	int node;
2081 
2082 	printk(KERN_WARNING
2083 		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2084 		nid, gfpflags);
2085 	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
2086 		"default order: %d, min order: %d\n", s->name, s->objsize,
2087 		s->size, oo_order(s->oo), oo_order(s->min));
2088 
2089 	if (oo_order(s->min) > get_order(s->objsize))
2090 		printk(KERN_WARNING "  %s debugging increased min order, use "
2091 		       "slub_debug=O to disable.\n", s->name);
2092 
2093 	for_each_online_node(node) {
2094 		struct kmem_cache_node *n = get_node(s, node);
2095 		unsigned long nr_slabs;
2096 		unsigned long nr_objs;
2097 		unsigned long nr_free;
2098 
2099 		if (!n)
2100 			continue;
2101 
2102 		nr_free  = count_partial(n, count_free);
2103 		nr_slabs = node_nr_slabs(n);
2104 		nr_objs  = node_nr_objs(n);
2105 
2106 		printk(KERN_WARNING
2107 			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2108 			node, nr_slabs, nr_objs, nr_free);
2109 	}
2110 }
2111 
2112 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2113 			int node, struct kmem_cache_cpu **pc)
2114 {
2115 	void *object;
2116 	struct kmem_cache_cpu *c;
2117 	struct page *page = new_slab(s, flags, node);
2118 
2119 	if (page) {
2120 		c = __this_cpu_ptr(s->cpu_slab);
2121 		if (c->page)
2122 			flush_slab(s, c);
2123 
2124 		/*
2125 		 * No other reference to the page yet so we can
2126 		 * muck around with it freely without cmpxchg
2127 		 */
2128 		object = page->freelist;
2129 		page->freelist = NULL;
2130 
2131 		stat(s, ALLOC_SLAB);
2132 		c->node = page_to_nid(page);
2133 		c->page = page;
2134 		*pc = c;
2135 	} else
2136 		object = NULL;
2137 
2138 	return object;
2139 }
2140 
2141 /*
2142  * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2143  * or deactivate the page.
2144  *
2145  * The page is still frozen if the return value is not NULL.
2146  *
2147  * If this function returns NULL then the page has been unfrozen.
2148  */
2149 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2150 {
2151 	struct page new;
2152 	unsigned long counters;
2153 	void *freelist;
2154 
2155 	do {
2156 		freelist = page->freelist;
2157 		counters = page->counters;
2158 		new.counters = counters;
2159 		VM_BUG_ON(!new.frozen);
2160 
2161 		new.inuse = page->objects;
2162 		new.frozen = freelist != NULL;
2163 
2164 	} while (!cmpxchg_double_slab(s, page,
2165 		freelist, counters,
2166 		NULL, new.counters,
2167 		"get_freelist"));
2168 
2169 	return freelist;
2170 }
2171 
2172 /*
2173  * Slow path. The lockless freelist is empty or we need to perform
2174  * debugging duties.
2175  *
2176  * Processing is still very fast if new objects have been freed to the
2177  * regular freelist. In that case we simply take over the regular freelist
2178  * as the lockless freelist and zap the regular freelist.
2179  *
2180  * If that is not working then we fall back to the partial lists. We take the
2181  * first element of the freelist as the object to allocate now and move the
2182  * rest of the freelist to the lockless freelist.
2183  *
2184  * And if we were unable to get a new slab from the partial slab lists then
2185  * we need to allocate a new slab. This is the slowest path since it involves
2186  * a call to the page allocator and the setup of a new slab.
2187  */
2188 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2189 			  unsigned long addr, struct kmem_cache_cpu *c)
2190 {
2191 	void **object;
2192 	unsigned long flags;
2193 
2194 	local_irq_save(flags);
2195 #ifdef CONFIG_PREEMPT
2196 	/*
2197 	 * We may have been preempted and rescheduled on a different
2198 	 * cpu before disabling interrupts. Need to reload cpu area
2199 	 * pointer.
2200 	 */
2201 	c = this_cpu_ptr(s->cpu_slab);
2202 #endif
2203 
2204 	if (!c->page)
2205 		goto new_slab;
2206 redo:
2207 	if (unlikely(!node_match(c, node))) {
2208 		stat(s, ALLOC_NODE_MISMATCH);
2209 		deactivate_slab(s, c);
2210 		goto new_slab;
2211 	}
2212 
2213 	/* must check again c->freelist in case of cpu migration or IRQ */
2214 	object = c->freelist;
2215 	if (object)
2216 		goto load_freelist;
2217 
2218 	stat(s, ALLOC_SLOWPATH);
2219 
2220 	object = get_freelist(s, c->page);
2221 
2222 	if (!object) {
2223 		c->page = NULL;
2224 		stat(s, DEACTIVATE_BYPASS);
2225 		goto new_slab;
2226 	}
2227 
2228 	stat(s, ALLOC_REFILL);
2229 
2230 load_freelist:
2231 	c->freelist = get_freepointer(s, object);
2232 	c->tid = next_tid(c->tid);
2233 	local_irq_restore(flags);
2234 	return object;
2235 
2236 new_slab:
2237 
2238 	if (c->partial) {
2239 		c->page = c->partial;
2240 		c->partial = c->page->next;
2241 		c->node = page_to_nid(c->page);
2242 		stat(s, CPU_PARTIAL_ALLOC);
2243 		c->freelist = NULL;
2244 		goto redo;
2245 	}
2246 
2247 	/* Then do expensive stuff like retrieving pages from the partial lists */
2248 	object = get_partial(s, gfpflags, node, c);
2249 
2250 	if (unlikely(!object)) {
2251 
2252 		object = new_slab_objects(s, gfpflags, node, &c);
2253 
2254 		if (unlikely(!object)) {
2255 			if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2256 				slab_out_of_memory(s, gfpflags, node);
2257 
2258 			local_irq_restore(flags);
2259 			return NULL;
2260 		}
2261 	}
2262 
2263 	if (likely(!kmem_cache_debug(s)))
2264 		goto load_freelist;
2265 
2266 	/* Only entered in the debug case */
2267 	if (!alloc_debug_processing(s, c->page, object, addr))
2268 		goto new_slab;	/* Slab failed checks. Next slab needed */
2269 
2270 	c->freelist = get_freepointer(s, object);
2271 	deactivate_slab(s, c);
2272 	c->node = NUMA_NO_NODE;
2273 	local_irq_restore(flags);
2274 	return object;
2275 }
2276 
2277 /*
2278  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2279  * have the fastpath folded into their functions. So no function call
2280  * overhead for requests that can be satisfied on the fastpath.
2281  *
2282  * The fastpath works by first checking if the lockless freelist can be used.
2283  * If not then __slab_alloc is called for slow processing.
2284  *
2285  * Otherwise we can simply pick the next object from the lockless free list.
2286  */
2287 static __always_inline void *slab_alloc(struct kmem_cache *s,
2288 		gfp_t gfpflags, int node, unsigned long addr)
2289 {
2290 	void **object;
2291 	struct kmem_cache_cpu *c;
2292 	unsigned long tid;
2293 
2294 	if (slab_pre_alloc_hook(s, gfpflags))
2295 		return NULL;
2296 
2297 redo:
2298 
2299 	/*
2300 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2301 	 * enabled. We may switch back and forth between cpus while
2302 	 * reading from one cpu area. That does not matter as long
2303 	 * as we end up on the original cpu again when doing the cmpxchg.
2304 	 */
2305 	c = __this_cpu_ptr(s->cpu_slab);
2306 
2307 	/*
2308 	 * The transaction ids are globally unique per cpu and per operation on
2309 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2310 	 * occurs on the right processor and that there was no operation on the
2311 	 * linked list in between.
2312 	 */
2313 	tid = c->tid;
2314 	barrier();
2315 
2316 	object = c->freelist;
2317 	if (unlikely(!object || !node_match(c, node)))
2318 
2319 		object = __slab_alloc(s, gfpflags, node, addr, c);
2320 
2321 	else {
2322 		/*
2323 		 * The cmpxchg will only match if there was no additional
2324 		 * operation and if we are on the right processor.
2325 		 *
2326 		 * The cmpxchg does the following atomically (without lock semantics!)
2327 		 * 1. Relocate first pointer to the current per cpu area.
2328 		 * 2. Verify that tid and freelist have not been changed
2329 		 * 3. If they were not changed replace tid and freelist
2330 		 *
2331 		 * Since this is without lock semantics the protection is only against
2332 		 * code executing on this cpu *not* from access by other cpus.
2333 		 */
2334 		if (unlikely(!this_cpu_cmpxchg_double(
2335 				s->cpu_slab->freelist, s->cpu_slab->tid,
2336 				object, tid,
2337 				get_freepointer_safe(s, object), next_tid(tid)))) {
2338 
2339 			note_cmpxchg_failure("slab_alloc", s, tid);
2340 			goto redo;
2341 		}
2342 		stat(s, ALLOC_FASTPATH);
2343 	}
2344 
2345 	if (unlikely(gfpflags & __GFP_ZERO) && object)
2346 		memset(object, 0, s->objsize);
2347 
2348 	slab_post_alloc_hook(s, gfpflags, object);
2349 
2350 	return object;
2351 }
2352 
2353 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2354 {
2355 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2356 
2357 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
2358 
2359 	return ret;
2360 }
2361 EXPORT_SYMBOL(kmem_cache_alloc);
2362 
2363 #ifdef CONFIG_TRACING
2364 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2365 {
2366 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2367 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2368 	return ret;
2369 }
2370 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2371 
2372 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2373 {
2374 	void *ret = kmalloc_order(size, flags, order);
2375 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2376 	return ret;
2377 }
2378 EXPORT_SYMBOL(kmalloc_order_trace);
2379 #endif
2380 
2381 #ifdef CONFIG_NUMA
2382 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2383 {
2384 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2385 
2386 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2387 				    s->objsize, s->size, gfpflags, node);
2388 
2389 	return ret;
2390 }
2391 EXPORT_SYMBOL(kmem_cache_alloc_node);
2392 
2393 #ifdef CONFIG_TRACING
2394 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2395 				    gfp_t gfpflags,
2396 				    int node, size_t size)
2397 {
2398 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2399 
2400 	trace_kmalloc_node(_RET_IP_, ret,
2401 			   size, s->size, gfpflags, node);
2402 	return ret;
2403 }
2404 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2405 #endif
2406 #endif
2407 
2408 /*
2409  * Slow patch handling. This may still be called frequently since objects
2410  * have a longer lifetime than the cpu slabs in most processing loads.
2411  *
2412  * So we still attempt to reduce cache line usage. Just take the slab
2413  * lock and free the item. If there is no additional partial page
2414  * handling required then we can return immediately.
2415  */
2416 static void __slab_free(struct kmem_cache *s, struct page *page,
2417 			void *x, unsigned long addr)
2418 {
2419 	void *prior;
2420 	void **object = (void *)x;
2421 	int was_frozen;
2422 	int inuse;
2423 	struct page new;
2424 	unsigned long counters;
2425 	struct kmem_cache_node *n = NULL;
2426 	unsigned long uninitialized_var(flags);
2427 
2428 	stat(s, FREE_SLOWPATH);
2429 
2430 	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2431 		return;
2432 
2433 	do {
2434 		prior = page->freelist;
2435 		counters = page->counters;
2436 		set_freepointer(s, object, prior);
2437 		new.counters = counters;
2438 		was_frozen = new.frozen;
2439 		new.inuse--;
2440 		if ((!new.inuse || !prior) && !was_frozen && !n) {
2441 
2442 			if (!kmem_cache_debug(s) && !prior)
2443 
2444 				/*
2445 				 * Slab was on no list before and will be partially empty
2446 				 * We can defer the list move and instead freeze it.
2447 				 */
2448 				new.frozen = 1;
2449 
2450 			else { /* Needs to be taken off a list */
2451 
2452 	                        n = get_node(s, page_to_nid(page));
2453 				/*
2454 				 * Speculatively acquire the list_lock.
2455 				 * If the cmpxchg does not succeed then we may
2456 				 * drop the list_lock without any processing.
2457 				 *
2458 				 * Otherwise the list_lock will synchronize with
2459 				 * other processors updating the list of slabs.
2460 				 */
2461 				spin_lock_irqsave(&n->list_lock, flags);
2462 
2463 			}
2464 		}
2465 		inuse = new.inuse;
2466 
2467 	} while (!cmpxchg_double_slab(s, page,
2468 		prior, counters,
2469 		object, new.counters,
2470 		"__slab_free"));
2471 
2472 	if (likely(!n)) {
2473 
2474 		/*
2475 		 * If we just froze the page then put it onto the
2476 		 * per cpu partial list.
2477 		 */
2478 		if (new.frozen && !was_frozen)
2479 			put_cpu_partial(s, page, 1);
2480 
2481 		/*
2482 		 * The list lock was not taken therefore no list
2483 		 * activity can be necessary.
2484 		 */
2485                 if (was_frozen)
2486                         stat(s, FREE_FROZEN);
2487                 return;
2488         }
2489 
2490 	/*
2491 	 * was_frozen may have been set after we acquired the list_lock in
2492 	 * an earlier loop. So we need to check it here again.
2493 	 */
2494 	if (was_frozen)
2495 		stat(s, FREE_FROZEN);
2496 	else {
2497 		if (unlikely(!inuse && n->nr_partial > s->min_partial))
2498                         goto slab_empty;
2499 
2500 		/*
2501 		 * Objects left in the slab. If it was not on the partial list before
2502 		 * then add it.
2503 		 */
2504 		if (unlikely(!prior)) {
2505 			remove_full(s, page);
2506 			add_partial(n, page, DEACTIVATE_TO_TAIL);
2507 			stat(s, FREE_ADD_PARTIAL);
2508 		}
2509 	}
2510 	spin_unlock_irqrestore(&n->list_lock, flags);
2511 	return;
2512 
2513 slab_empty:
2514 	if (prior) {
2515 		/*
2516 		 * Slab on the partial list.
2517 		 */
2518 		remove_partial(n, page);
2519 		stat(s, FREE_REMOVE_PARTIAL);
2520 	} else
2521 		/* Slab must be on the full list */
2522 		remove_full(s, page);
2523 
2524 	spin_unlock_irqrestore(&n->list_lock, flags);
2525 	stat(s, FREE_SLAB);
2526 	discard_slab(s, page);
2527 }
2528 
2529 /*
2530  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2531  * can perform fastpath freeing without additional function calls.
2532  *
2533  * The fastpath is only possible if we are freeing to the current cpu slab
2534  * of this processor. This typically the case if we have just allocated
2535  * the item before.
2536  *
2537  * If fastpath is not possible then fall back to __slab_free where we deal
2538  * with all sorts of special processing.
2539  */
2540 static __always_inline void slab_free(struct kmem_cache *s,
2541 			struct page *page, void *x, unsigned long addr)
2542 {
2543 	void **object = (void *)x;
2544 	struct kmem_cache_cpu *c;
2545 	unsigned long tid;
2546 
2547 	slab_free_hook(s, x);
2548 
2549 redo:
2550 	/*
2551 	 * Determine the currently cpus per cpu slab.
2552 	 * The cpu may change afterward. However that does not matter since
2553 	 * data is retrieved via this pointer. If we are on the same cpu
2554 	 * during the cmpxchg then the free will succedd.
2555 	 */
2556 	c = __this_cpu_ptr(s->cpu_slab);
2557 
2558 	tid = c->tid;
2559 	barrier();
2560 
2561 	if (likely(page == c->page)) {
2562 		set_freepointer(s, object, c->freelist);
2563 
2564 		if (unlikely(!this_cpu_cmpxchg_double(
2565 				s->cpu_slab->freelist, s->cpu_slab->tid,
2566 				c->freelist, tid,
2567 				object, next_tid(tid)))) {
2568 
2569 			note_cmpxchg_failure("slab_free", s, tid);
2570 			goto redo;
2571 		}
2572 		stat(s, FREE_FASTPATH);
2573 	} else
2574 		__slab_free(s, page, x, addr);
2575 
2576 }
2577 
2578 void kmem_cache_free(struct kmem_cache *s, void *x)
2579 {
2580 	struct page *page;
2581 
2582 	page = virt_to_head_page(x);
2583 
2584 	slab_free(s, page, x, _RET_IP_);
2585 
2586 	trace_kmem_cache_free(_RET_IP_, x);
2587 }
2588 EXPORT_SYMBOL(kmem_cache_free);
2589 
2590 /*
2591  * Object placement in a slab is made very easy because we always start at
2592  * offset 0. If we tune the size of the object to the alignment then we can
2593  * get the required alignment by putting one properly sized object after
2594  * another.
2595  *
2596  * Notice that the allocation order determines the sizes of the per cpu
2597  * caches. Each processor has always one slab available for allocations.
2598  * Increasing the allocation order reduces the number of times that slabs
2599  * must be moved on and off the partial lists and is therefore a factor in
2600  * locking overhead.
2601  */
2602 
2603 /*
2604  * Mininum / Maximum order of slab pages. This influences locking overhead
2605  * and slab fragmentation. A higher order reduces the number of partial slabs
2606  * and increases the number of allocations possible without having to
2607  * take the list_lock.
2608  */
2609 static int slub_min_order;
2610 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2611 static int slub_min_objects;
2612 
2613 /*
2614  * Merge control. If this is set then no merging of slab caches will occur.
2615  * (Could be removed. This was introduced to pacify the merge skeptics.)
2616  */
2617 static int slub_nomerge;
2618 
2619 /*
2620  * Calculate the order of allocation given an slab object size.
2621  *
2622  * The order of allocation has significant impact on performance and other
2623  * system components. Generally order 0 allocations should be preferred since
2624  * order 0 does not cause fragmentation in the page allocator. Larger objects
2625  * be problematic to put into order 0 slabs because there may be too much
2626  * unused space left. We go to a higher order if more than 1/16th of the slab
2627  * would be wasted.
2628  *
2629  * In order to reach satisfactory performance we must ensure that a minimum
2630  * number of objects is in one slab. Otherwise we may generate too much
2631  * activity on the partial lists which requires taking the list_lock. This is
2632  * less a concern for large slabs though which are rarely used.
2633  *
2634  * slub_max_order specifies the order where we begin to stop considering the
2635  * number of objects in a slab as critical. If we reach slub_max_order then
2636  * we try to keep the page order as low as possible. So we accept more waste
2637  * of space in favor of a small page order.
2638  *
2639  * Higher order allocations also allow the placement of more objects in a
2640  * slab and thereby reduce object handling overhead. If the user has
2641  * requested a higher mininum order then we start with that one instead of
2642  * the smallest order which will fit the object.
2643  */
2644 static inline int slab_order(int size, int min_objects,
2645 				int max_order, int fract_leftover, int reserved)
2646 {
2647 	int order;
2648 	int rem;
2649 	int min_order = slub_min_order;
2650 
2651 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2652 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2653 
2654 	for (order = max(min_order,
2655 				fls(min_objects * size - 1) - PAGE_SHIFT);
2656 			order <= max_order; order++) {
2657 
2658 		unsigned long slab_size = PAGE_SIZE << order;
2659 
2660 		if (slab_size < min_objects * size + reserved)
2661 			continue;
2662 
2663 		rem = (slab_size - reserved) % size;
2664 
2665 		if (rem <= slab_size / fract_leftover)
2666 			break;
2667 
2668 	}
2669 
2670 	return order;
2671 }
2672 
2673 static inline int calculate_order(int size, int reserved)
2674 {
2675 	int order;
2676 	int min_objects;
2677 	int fraction;
2678 	int max_objects;
2679 
2680 	/*
2681 	 * Attempt to find best configuration for a slab. This
2682 	 * works by first attempting to generate a layout with
2683 	 * the best configuration and backing off gradually.
2684 	 *
2685 	 * First we reduce the acceptable waste in a slab. Then
2686 	 * we reduce the minimum objects required in a slab.
2687 	 */
2688 	min_objects = slub_min_objects;
2689 	if (!min_objects)
2690 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
2691 	max_objects = order_objects(slub_max_order, size, reserved);
2692 	min_objects = min(min_objects, max_objects);
2693 
2694 	while (min_objects > 1) {
2695 		fraction = 16;
2696 		while (fraction >= 4) {
2697 			order = slab_order(size, min_objects,
2698 					slub_max_order, fraction, reserved);
2699 			if (order <= slub_max_order)
2700 				return order;
2701 			fraction /= 2;
2702 		}
2703 		min_objects--;
2704 	}
2705 
2706 	/*
2707 	 * We were unable to place multiple objects in a slab. Now
2708 	 * lets see if we can place a single object there.
2709 	 */
2710 	order = slab_order(size, 1, slub_max_order, 1, reserved);
2711 	if (order <= slub_max_order)
2712 		return order;
2713 
2714 	/*
2715 	 * Doh this slab cannot be placed using slub_max_order.
2716 	 */
2717 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2718 	if (order < MAX_ORDER)
2719 		return order;
2720 	return -ENOSYS;
2721 }
2722 
2723 /*
2724  * Figure out what the alignment of the objects will be.
2725  */
2726 static unsigned long calculate_alignment(unsigned long flags,
2727 		unsigned long align, unsigned long size)
2728 {
2729 	/*
2730 	 * If the user wants hardware cache aligned objects then follow that
2731 	 * suggestion if the object is sufficiently large.
2732 	 *
2733 	 * The hardware cache alignment cannot override the specified
2734 	 * alignment though. If that is greater then use it.
2735 	 */
2736 	if (flags & SLAB_HWCACHE_ALIGN) {
2737 		unsigned long ralign = cache_line_size();
2738 		while (size <= ralign / 2)
2739 			ralign /= 2;
2740 		align = max(align, ralign);
2741 	}
2742 
2743 	if (align < ARCH_SLAB_MINALIGN)
2744 		align = ARCH_SLAB_MINALIGN;
2745 
2746 	return ALIGN(align, sizeof(void *));
2747 }
2748 
2749 static void
2750 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2751 {
2752 	n->nr_partial = 0;
2753 	spin_lock_init(&n->list_lock);
2754 	INIT_LIST_HEAD(&n->partial);
2755 #ifdef CONFIG_SLUB_DEBUG
2756 	atomic_long_set(&n->nr_slabs, 0);
2757 	atomic_long_set(&n->total_objects, 0);
2758 	INIT_LIST_HEAD(&n->full);
2759 #endif
2760 }
2761 
2762 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2763 {
2764 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2765 			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2766 
2767 	/*
2768 	 * Must align to double word boundary for the double cmpxchg
2769 	 * instructions to work; see __pcpu_double_call_return_bool().
2770 	 */
2771 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2772 				     2 * sizeof(void *));
2773 
2774 	if (!s->cpu_slab)
2775 		return 0;
2776 
2777 	init_kmem_cache_cpus(s);
2778 
2779 	return 1;
2780 }
2781 
2782 static struct kmem_cache *kmem_cache_node;
2783 
2784 /*
2785  * No kmalloc_node yet so do it by hand. We know that this is the first
2786  * slab on the node for this slabcache. There are no concurrent accesses
2787  * possible.
2788  *
2789  * Note that this function only works on the kmalloc_node_cache
2790  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2791  * memory on a fresh node that has no slab structures yet.
2792  */
2793 static void early_kmem_cache_node_alloc(int node)
2794 {
2795 	struct page *page;
2796 	struct kmem_cache_node *n;
2797 
2798 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2799 
2800 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2801 
2802 	BUG_ON(!page);
2803 	if (page_to_nid(page) != node) {
2804 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2805 				"node %d\n", node);
2806 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2807 				"in order to be able to continue\n");
2808 	}
2809 
2810 	n = page->freelist;
2811 	BUG_ON(!n);
2812 	page->freelist = get_freepointer(kmem_cache_node, n);
2813 	page->inuse = 1;
2814 	page->frozen = 0;
2815 	kmem_cache_node->node[node] = n;
2816 #ifdef CONFIG_SLUB_DEBUG
2817 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2818 	init_tracking(kmem_cache_node, n);
2819 #endif
2820 	init_kmem_cache_node(n, kmem_cache_node);
2821 	inc_slabs_node(kmem_cache_node, node, page->objects);
2822 
2823 	add_partial(n, page, DEACTIVATE_TO_HEAD);
2824 }
2825 
2826 static void free_kmem_cache_nodes(struct kmem_cache *s)
2827 {
2828 	int node;
2829 
2830 	for_each_node_state(node, N_NORMAL_MEMORY) {
2831 		struct kmem_cache_node *n = s->node[node];
2832 
2833 		if (n)
2834 			kmem_cache_free(kmem_cache_node, n);
2835 
2836 		s->node[node] = NULL;
2837 	}
2838 }
2839 
2840 static int init_kmem_cache_nodes(struct kmem_cache *s)
2841 {
2842 	int node;
2843 
2844 	for_each_node_state(node, N_NORMAL_MEMORY) {
2845 		struct kmem_cache_node *n;
2846 
2847 		if (slab_state == DOWN) {
2848 			early_kmem_cache_node_alloc(node);
2849 			continue;
2850 		}
2851 		n = kmem_cache_alloc_node(kmem_cache_node,
2852 						GFP_KERNEL, node);
2853 
2854 		if (!n) {
2855 			free_kmem_cache_nodes(s);
2856 			return 0;
2857 		}
2858 
2859 		s->node[node] = n;
2860 		init_kmem_cache_node(n, s);
2861 	}
2862 	return 1;
2863 }
2864 
2865 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2866 {
2867 	if (min < MIN_PARTIAL)
2868 		min = MIN_PARTIAL;
2869 	else if (min > MAX_PARTIAL)
2870 		min = MAX_PARTIAL;
2871 	s->min_partial = min;
2872 }
2873 
2874 /*
2875  * calculate_sizes() determines the order and the distribution of data within
2876  * a slab object.
2877  */
2878 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2879 {
2880 	unsigned long flags = s->flags;
2881 	unsigned long size = s->objsize;
2882 	unsigned long align = s->align;
2883 	int order;
2884 
2885 	/*
2886 	 * Round up object size to the next word boundary. We can only
2887 	 * place the free pointer at word boundaries and this determines
2888 	 * the possible location of the free pointer.
2889 	 */
2890 	size = ALIGN(size, sizeof(void *));
2891 
2892 #ifdef CONFIG_SLUB_DEBUG
2893 	/*
2894 	 * Determine if we can poison the object itself. If the user of
2895 	 * the slab may touch the object after free or before allocation
2896 	 * then we should never poison the object itself.
2897 	 */
2898 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2899 			!s->ctor)
2900 		s->flags |= __OBJECT_POISON;
2901 	else
2902 		s->flags &= ~__OBJECT_POISON;
2903 
2904 
2905 	/*
2906 	 * If we are Redzoning then check if there is some space between the
2907 	 * end of the object and the free pointer. If not then add an
2908 	 * additional word to have some bytes to store Redzone information.
2909 	 */
2910 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2911 		size += sizeof(void *);
2912 #endif
2913 
2914 	/*
2915 	 * With that we have determined the number of bytes in actual use
2916 	 * by the object. This is the potential offset to the free pointer.
2917 	 */
2918 	s->inuse = size;
2919 
2920 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2921 		s->ctor)) {
2922 		/*
2923 		 * Relocate free pointer after the object if it is not
2924 		 * permitted to overwrite the first word of the object on
2925 		 * kmem_cache_free.
2926 		 *
2927 		 * This is the case if we do RCU, have a constructor or
2928 		 * destructor or are poisoning the objects.
2929 		 */
2930 		s->offset = size;
2931 		size += sizeof(void *);
2932 	}
2933 
2934 #ifdef CONFIG_SLUB_DEBUG
2935 	if (flags & SLAB_STORE_USER)
2936 		/*
2937 		 * Need to store information about allocs and frees after
2938 		 * the object.
2939 		 */
2940 		size += 2 * sizeof(struct track);
2941 
2942 	if (flags & SLAB_RED_ZONE)
2943 		/*
2944 		 * Add some empty padding so that we can catch
2945 		 * overwrites from earlier objects rather than let
2946 		 * tracking information or the free pointer be
2947 		 * corrupted if a user writes before the start
2948 		 * of the object.
2949 		 */
2950 		size += sizeof(void *);
2951 #endif
2952 
2953 	/*
2954 	 * Determine the alignment based on various parameters that the
2955 	 * user specified and the dynamic determination of cache line size
2956 	 * on bootup.
2957 	 */
2958 	align = calculate_alignment(flags, align, s->objsize);
2959 	s->align = align;
2960 
2961 	/*
2962 	 * SLUB stores one object immediately after another beginning from
2963 	 * offset 0. In order to align the objects we have to simply size
2964 	 * each object to conform to the alignment.
2965 	 */
2966 	size = ALIGN(size, align);
2967 	s->size = size;
2968 	if (forced_order >= 0)
2969 		order = forced_order;
2970 	else
2971 		order = calculate_order(size, s->reserved);
2972 
2973 	if (order < 0)
2974 		return 0;
2975 
2976 	s->allocflags = 0;
2977 	if (order)
2978 		s->allocflags |= __GFP_COMP;
2979 
2980 	if (s->flags & SLAB_CACHE_DMA)
2981 		s->allocflags |= SLUB_DMA;
2982 
2983 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2984 		s->allocflags |= __GFP_RECLAIMABLE;
2985 
2986 	/*
2987 	 * Determine the number of objects per slab
2988 	 */
2989 	s->oo = oo_make(order, size, s->reserved);
2990 	s->min = oo_make(get_order(size), size, s->reserved);
2991 	if (oo_objects(s->oo) > oo_objects(s->max))
2992 		s->max = s->oo;
2993 
2994 	return !!oo_objects(s->oo);
2995 
2996 }
2997 
2998 static int kmem_cache_open(struct kmem_cache *s,
2999 		const char *name, size_t size,
3000 		size_t align, unsigned long flags,
3001 		void (*ctor)(void *))
3002 {
3003 	memset(s, 0, kmem_size);
3004 	s->name = name;
3005 	s->ctor = ctor;
3006 	s->objsize = size;
3007 	s->align = align;
3008 	s->flags = kmem_cache_flags(size, flags, name, ctor);
3009 	s->reserved = 0;
3010 
3011 	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3012 		s->reserved = sizeof(struct rcu_head);
3013 
3014 	if (!calculate_sizes(s, -1))
3015 		goto error;
3016 	if (disable_higher_order_debug) {
3017 		/*
3018 		 * Disable debugging flags that store metadata if the min slab
3019 		 * order increased.
3020 		 */
3021 		if (get_order(s->size) > get_order(s->objsize)) {
3022 			s->flags &= ~DEBUG_METADATA_FLAGS;
3023 			s->offset = 0;
3024 			if (!calculate_sizes(s, -1))
3025 				goto error;
3026 		}
3027 	}
3028 
3029 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3030     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3031 	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3032 		/* Enable fast mode */
3033 		s->flags |= __CMPXCHG_DOUBLE;
3034 #endif
3035 
3036 	/*
3037 	 * The larger the object size is, the more pages we want on the partial
3038 	 * list to avoid pounding the page allocator excessively.
3039 	 */
3040 	set_min_partial(s, ilog2(s->size) / 2);
3041 
3042 	/*
3043 	 * cpu_partial determined the maximum number of objects kept in the
3044 	 * per cpu partial lists of a processor.
3045 	 *
3046 	 * Per cpu partial lists mainly contain slabs that just have one
3047 	 * object freed. If they are used for allocation then they can be
3048 	 * filled up again with minimal effort. The slab will never hit the
3049 	 * per node partial lists and therefore no locking will be required.
3050 	 *
3051 	 * This setting also determines
3052 	 *
3053 	 * A) The number of objects from per cpu partial slabs dumped to the
3054 	 *    per node list when we reach the limit.
3055 	 * B) The number of objects in cpu partial slabs to extract from the
3056 	 *    per node list when we run out of per cpu objects. We only fetch 50%
3057 	 *    to keep some capacity around for frees.
3058 	 */
3059 	if (kmem_cache_debug(s))
3060 		s->cpu_partial = 0;
3061 	else if (s->size >= PAGE_SIZE)
3062 		s->cpu_partial = 2;
3063 	else if (s->size >= 1024)
3064 		s->cpu_partial = 6;
3065 	else if (s->size >= 256)
3066 		s->cpu_partial = 13;
3067 	else
3068 		s->cpu_partial = 30;
3069 
3070 	s->refcount = 1;
3071 #ifdef CONFIG_NUMA
3072 	s->remote_node_defrag_ratio = 1000;
3073 #endif
3074 	if (!init_kmem_cache_nodes(s))
3075 		goto error;
3076 
3077 	if (alloc_kmem_cache_cpus(s))
3078 		return 1;
3079 
3080 	free_kmem_cache_nodes(s);
3081 error:
3082 	if (flags & SLAB_PANIC)
3083 		panic("Cannot create slab %s size=%lu realsize=%u "
3084 			"order=%u offset=%u flags=%lx\n",
3085 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
3086 			s->offset, flags);
3087 	return 0;
3088 }
3089 
3090 /*
3091  * Determine the size of a slab object
3092  */
3093 unsigned int kmem_cache_size(struct kmem_cache *s)
3094 {
3095 	return s->objsize;
3096 }
3097 EXPORT_SYMBOL(kmem_cache_size);
3098 
3099 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3100 							const char *text)
3101 {
3102 #ifdef CONFIG_SLUB_DEBUG
3103 	void *addr = page_address(page);
3104 	void *p;
3105 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3106 				     sizeof(long), GFP_ATOMIC);
3107 	if (!map)
3108 		return;
3109 	slab_err(s, page, "%s", text);
3110 	slab_lock(page);
3111 
3112 	get_map(s, page, map);
3113 	for_each_object(p, s, addr, page->objects) {
3114 
3115 		if (!test_bit(slab_index(p, s, addr), map)) {
3116 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3117 							p, p - addr);
3118 			print_tracking(s, p);
3119 		}
3120 	}
3121 	slab_unlock(page);
3122 	kfree(map);
3123 #endif
3124 }
3125 
3126 /*
3127  * Attempt to free all partial slabs on a node.
3128  * This is called from kmem_cache_close(). We must be the last thread
3129  * using the cache and therefore we do not need to lock anymore.
3130  */
3131 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3132 {
3133 	struct page *page, *h;
3134 
3135 	list_for_each_entry_safe(page, h, &n->partial, lru) {
3136 		if (!page->inuse) {
3137 			remove_partial(n, page);
3138 			discard_slab(s, page);
3139 		} else {
3140 			list_slab_objects(s, page,
3141 				"Objects remaining on kmem_cache_close()");
3142 		}
3143 	}
3144 }
3145 
3146 /*
3147  * Release all resources used by a slab cache.
3148  */
3149 static inline int kmem_cache_close(struct kmem_cache *s)
3150 {
3151 	int node;
3152 
3153 	flush_all(s);
3154 	free_percpu(s->cpu_slab);
3155 	/* Attempt to free all objects */
3156 	for_each_node_state(node, N_NORMAL_MEMORY) {
3157 		struct kmem_cache_node *n = get_node(s, node);
3158 
3159 		free_partial(s, n);
3160 		if (n->nr_partial || slabs_node(s, node))
3161 			return 1;
3162 	}
3163 	free_kmem_cache_nodes(s);
3164 	return 0;
3165 }
3166 
3167 /*
3168  * Close a cache and release the kmem_cache structure
3169  * (must be used for caches created using kmem_cache_create)
3170  */
3171 void kmem_cache_destroy(struct kmem_cache *s)
3172 {
3173 	down_write(&slub_lock);
3174 	s->refcount--;
3175 	if (!s->refcount) {
3176 		list_del(&s->list);
3177 		up_write(&slub_lock);
3178 		if (kmem_cache_close(s)) {
3179 			printk(KERN_ERR "SLUB %s: %s called for cache that "
3180 				"still has objects.\n", s->name, __func__);
3181 			dump_stack();
3182 		}
3183 		if (s->flags & SLAB_DESTROY_BY_RCU)
3184 			rcu_barrier();
3185 		sysfs_slab_remove(s);
3186 	} else
3187 		up_write(&slub_lock);
3188 }
3189 EXPORT_SYMBOL(kmem_cache_destroy);
3190 
3191 /********************************************************************
3192  *		Kmalloc subsystem
3193  *******************************************************************/
3194 
3195 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3196 EXPORT_SYMBOL(kmalloc_caches);
3197 
3198 static struct kmem_cache *kmem_cache;
3199 
3200 #ifdef CONFIG_ZONE_DMA
3201 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3202 #endif
3203 
3204 static int __init setup_slub_min_order(char *str)
3205 {
3206 	get_option(&str, &slub_min_order);
3207 
3208 	return 1;
3209 }
3210 
3211 __setup("slub_min_order=", setup_slub_min_order);
3212 
3213 static int __init setup_slub_max_order(char *str)
3214 {
3215 	get_option(&str, &slub_max_order);
3216 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3217 
3218 	return 1;
3219 }
3220 
3221 __setup("slub_max_order=", setup_slub_max_order);
3222 
3223 static int __init setup_slub_min_objects(char *str)
3224 {
3225 	get_option(&str, &slub_min_objects);
3226 
3227 	return 1;
3228 }
3229 
3230 __setup("slub_min_objects=", setup_slub_min_objects);
3231 
3232 static int __init setup_slub_nomerge(char *str)
3233 {
3234 	slub_nomerge = 1;
3235 	return 1;
3236 }
3237 
3238 __setup("slub_nomerge", setup_slub_nomerge);
3239 
3240 static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3241 						int size, unsigned int flags)
3242 {
3243 	struct kmem_cache *s;
3244 
3245 	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3246 
3247 	/*
3248 	 * This function is called with IRQs disabled during early-boot on
3249 	 * single CPU so there's no need to take slub_lock here.
3250 	 */
3251 	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
3252 								flags, NULL))
3253 		goto panic;
3254 
3255 	list_add(&s->list, &slab_caches);
3256 	return s;
3257 
3258 panic:
3259 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3260 	return NULL;
3261 }
3262 
3263 /*
3264  * Conversion table for small slabs sizes / 8 to the index in the
3265  * kmalloc array. This is necessary for slabs < 192 since we have non power
3266  * of two cache sizes there. The size of larger slabs can be determined using
3267  * fls.
3268  */
3269 static s8 size_index[24] = {
3270 	3,	/* 8 */
3271 	4,	/* 16 */
3272 	5,	/* 24 */
3273 	5,	/* 32 */
3274 	6,	/* 40 */
3275 	6,	/* 48 */
3276 	6,	/* 56 */
3277 	6,	/* 64 */
3278 	1,	/* 72 */
3279 	1,	/* 80 */
3280 	1,	/* 88 */
3281 	1,	/* 96 */
3282 	7,	/* 104 */
3283 	7,	/* 112 */
3284 	7,	/* 120 */
3285 	7,	/* 128 */
3286 	2,	/* 136 */
3287 	2,	/* 144 */
3288 	2,	/* 152 */
3289 	2,	/* 160 */
3290 	2,	/* 168 */
3291 	2,	/* 176 */
3292 	2,	/* 184 */
3293 	2	/* 192 */
3294 };
3295 
3296 static inline int size_index_elem(size_t bytes)
3297 {
3298 	return (bytes - 1) / 8;
3299 }
3300 
3301 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3302 {
3303 	int index;
3304 
3305 	if (size <= 192) {
3306 		if (!size)
3307 			return ZERO_SIZE_PTR;
3308 
3309 		index = size_index[size_index_elem(size)];
3310 	} else
3311 		index = fls(size - 1);
3312 
3313 #ifdef CONFIG_ZONE_DMA
3314 	if (unlikely((flags & SLUB_DMA)))
3315 		return kmalloc_dma_caches[index];
3316 
3317 #endif
3318 	return kmalloc_caches[index];
3319 }
3320 
3321 void *__kmalloc(size_t size, gfp_t flags)
3322 {
3323 	struct kmem_cache *s;
3324 	void *ret;
3325 
3326 	if (unlikely(size > SLUB_MAX_SIZE))
3327 		return kmalloc_large(size, flags);
3328 
3329 	s = get_slab(size, flags);
3330 
3331 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3332 		return s;
3333 
3334 	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
3335 
3336 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3337 
3338 	return ret;
3339 }
3340 EXPORT_SYMBOL(__kmalloc);
3341 
3342 #ifdef CONFIG_NUMA
3343 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3344 {
3345 	struct page *page;
3346 	void *ptr = NULL;
3347 
3348 	flags |= __GFP_COMP | __GFP_NOTRACK;
3349 	page = alloc_pages_node(node, flags, get_order(size));
3350 	if (page)
3351 		ptr = page_address(page);
3352 
3353 	kmemleak_alloc(ptr, size, 1, flags);
3354 	return ptr;
3355 }
3356 
3357 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3358 {
3359 	struct kmem_cache *s;
3360 	void *ret;
3361 
3362 	if (unlikely(size > SLUB_MAX_SIZE)) {
3363 		ret = kmalloc_large_node(size, flags, node);
3364 
3365 		trace_kmalloc_node(_RET_IP_, ret,
3366 				   size, PAGE_SIZE << get_order(size),
3367 				   flags, node);
3368 
3369 		return ret;
3370 	}
3371 
3372 	s = get_slab(size, flags);
3373 
3374 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3375 		return s;
3376 
3377 	ret = slab_alloc(s, flags, node, _RET_IP_);
3378 
3379 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3380 
3381 	return ret;
3382 }
3383 EXPORT_SYMBOL(__kmalloc_node);
3384 #endif
3385 
3386 size_t ksize(const void *object)
3387 {
3388 	struct page *page;
3389 
3390 	if (unlikely(object == ZERO_SIZE_PTR))
3391 		return 0;
3392 
3393 	page = virt_to_head_page(object);
3394 
3395 	if (unlikely(!PageSlab(page))) {
3396 		WARN_ON(!PageCompound(page));
3397 		return PAGE_SIZE << compound_order(page);
3398 	}
3399 
3400 	return slab_ksize(page->slab);
3401 }
3402 EXPORT_SYMBOL(ksize);
3403 
3404 #ifdef CONFIG_SLUB_DEBUG
3405 bool verify_mem_not_deleted(const void *x)
3406 {
3407 	struct page *page;
3408 	void *object = (void *)x;
3409 	unsigned long flags;
3410 	bool rv;
3411 
3412 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3413 		return false;
3414 
3415 	local_irq_save(flags);
3416 
3417 	page = virt_to_head_page(x);
3418 	if (unlikely(!PageSlab(page))) {
3419 		/* maybe it was from stack? */
3420 		rv = true;
3421 		goto out_unlock;
3422 	}
3423 
3424 	slab_lock(page);
3425 	if (on_freelist(page->slab, page, object)) {
3426 		object_err(page->slab, page, object, "Object is on free-list");
3427 		rv = false;
3428 	} else {
3429 		rv = true;
3430 	}
3431 	slab_unlock(page);
3432 
3433 out_unlock:
3434 	local_irq_restore(flags);
3435 	return rv;
3436 }
3437 EXPORT_SYMBOL(verify_mem_not_deleted);
3438 #endif
3439 
3440 void kfree(const void *x)
3441 {
3442 	struct page *page;
3443 	void *object = (void *)x;
3444 
3445 	trace_kfree(_RET_IP_, x);
3446 
3447 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3448 		return;
3449 
3450 	page = virt_to_head_page(x);
3451 	if (unlikely(!PageSlab(page))) {
3452 		BUG_ON(!PageCompound(page));
3453 		kmemleak_free(x);
3454 		put_page(page);
3455 		return;
3456 	}
3457 	slab_free(page->slab, page, object, _RET_IP_);
3458 }
3459 EXPORT_SYMBOL(kfree);
3460 
3461 /*
3462  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3463  * the remaining slabs by the number of items in use. The slabs with the
3464  * most items in use come first. New allocations will then fill those up
3465  * and thus they can be removed from the partial lists.
3466  *
3467  * The slabs with the least items are placed last. This results in them
3468  * being allocated from last increasing the chance that the last objects
3469  * are freed in them.
3470  */
3471 int kmem_cache_shrink(struct kmem_cache *s)
3472 {
3473 	int node;
3474 	int i;
3475 	struct kmem_cache_node *n;
3476 	struct page *page;
3477 	struct page *t;
3478 	int objects = oo_objects(s->max);
3479 	struct list_head *slabs_by_inuse =
3480 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
3481 	unsigned long flags;
3482 
3483 	if (!slabs_by_inuse)
3484 		return -ENOMEM;
3485 
3486 	flush_all(s);
3487 	for_each_node_state(node, N_NORMAL_MEMORY) {
3488 		n = get_node(s, node);
3489 
3490 		if (!n->nr_partial)
3491 			continue;
3492 
3493 		for (i = 0; i < objects; i++)
3494 			INIT_LIST_HEAD(slabs_by_inuse + i);
3495 
3496 		spin_lock_irqsave(&n->list_lock, flags);
3497 
3498 		/*
3499 		 * Build lists indexed by the items in use in each slab.
3500 		 *
3501 		 * Note that concurrent frees may occur while we hold the
3502 		 * list_lock. page->inuse here is the upper limit.
3503 		 */
3504 		list_for_each_entry_safe(page, t, &n->partial, lru) {
3505 			list_move(&page->lru, slabs_by_inuse + page->inuse);
3506 			if (!page->inuse)
3507 				n->nr_partial--;
3508 		}
3509 
3510 		/*
3511 		 * Rebuild the partial list with the slabs filled up most
3512 		 * first and the least used slabs at the end.
3513 		 */
3514 		for (i = objects - 1; i > 0; i--)
3515 			list_splice(slabs_by_inuse + i, n->partial.prev);
3516 
3517 		spin_unlock_irqrestore(&n->list_lock, flags);
3518 
3519 		/* Release empty slabs */
3520 		list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3521 			discard_slab(s, page);
3522 	}
3523 
3524 	kfree(slabs_by_inuse);
3525 	return 0;
3526 }
3527 EXPORT_SYMBOL(kmem_cache_shrink);
3528 
3529 #if defined(CONFIG_MEMORY_HOTPLUG)
3530 static int slab_mem_going_offline_callback(void *arg)
3531 {
3532 	struct kmem_cache *s;
3533 
3534 	down_read(&slub_lock);
3535 	list_for_each_entry(s, &slab_caches, list)
3536 		kmem_cache_shrink(s);
3537 	up_read(&slub_lock);
3538 
3539 	return 0;
3540 }
3541 
3542 static void slab_mem_offline_callback(void *arg)
3543 {
3544 	struct kmem_cache_node *n;
3545 	struct kmem_cache *s;
3546 	struct memory_notify *marg = arg;
3547 	int offline_node;
3548 
3549 	offline_node = marg->status_change_nid;
3550 
3551 	/*
3552 	 * If the node still has available memory. we need kmem_cache_node
3553 	 * for it yet.
3554 	 */
3555 	if (offline_node < 0)
3556 		return;
3557 
3558 	down_read(&slub_lock);
3559 	list_for_each_entry(s, &slab_caches, list) {
3560 		n = get_node(s, offline_node);
3561 		if (n) {
3562 			/*
3563 			 * if n->nr_slabs > 0, slabs still exist on the node
3564 			 * that is going down. We were unable to free them,
3565 			 * and offline_pages() function shouldn't call this
3566 			 * callback. So, we must fail.
3567 			 */
3568 			BUG_ON(slabs_node(s, offline_node));
3569 
3570 			s->node[offline_node] = NULL;
3571 			kmem_cache_free(kmem_cache_node, n);
3572 		}
3573 	}
3574 	up_read(&slub_lock);
3575 }
3576 
3577 static int slab_mem_going_online_callback(void *arg)
3578 {
3579 	struct kmem_cache_node *n;
3580 	struct kmem_cache *s;
3581 	struct memory_notify *marg = arg;
3582 	int nid = marg->status_change_nid;
3583 	int ret = 0;
3584 
3585 	/*
3586 	 * If the node's memory is already available, then kmem_cache_node is
3587 	 * already created. Nothing to do.
3588 	 */
3589 	if (nid < 0)
3590 		return 0;
3591 
3592 	/*
3593 	 * We are bringing a node online. No memory is available yet. We must
3594 	 * allocate a kmem_cache_node structure in order to bring the node
3595 	 * online.
3596 	 */
3597 	down_read(&slub_lock);
3598 	list_for_each_entry(s, &slab_caches, list) {
3599 		/*
3600 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
3601 		 *      since memory is not yet available from the node that
3602 		 *      is brought up.
3603 		 */
3604 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3605 		if (!n) {
3606 			ret = -ENOMEM;
3607 			goto out;
3608 		}
3609 		init_kmem_cache_node(n, s);
3610 		s->node[nid] = n;
3611 	}
3612 out:
3613 	up_read(&slub_lock);
3614 	return ret;
3615 }
3616 
3617 static int slab_memory_callback(struct notifier_block *self,
3618 				unsigned long action, void *arg)
3619 {
3620 	int ret = 0;
3621 
3622 	switch (action) {
3623 	case MEM_GOING_ONLINE:
3624 		ret = slab_mem_going_online_callback(arg);
3625 		break;
3626 	case MEM_GOING_OFFLINE:
3627 		ret = slab_mem_going_offline_callback(arg);
3628 		break;
3629 	case MEM_OFFLINE:
3630 	case MEM_CANCEL_ONLINE:
3631 		slab_mem_offline_callback(arg);
3632 		break;
3633 	case MEM_ONLINE:
3634 	case MEM_CANCEL_OFFLINE:
3635 		break;
3636 	}
3637 	if (ret)
3638 		ret = notifier_from_errno(ret);
3639 	else
3640 		ret = NOTIFY_OK;
3641 	return ret;
3642 }
3643 
3644 #endif /* CONFIG_MEMORY_HOTPLUG */
3645 
3646 /********************************************************************
3647  *			Basic setup of slabs
3648  *******************************************************************/
3649 
3650 /*
3651  * Used for early kmem_cache structures that were allocated using
3652  * the page allocator
3653  */
3654 
3655 static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3656 {
3657 	int node;
3658 
3659 	list_add(&s->list, &slab_caches);
3660 	s->refcount = -1;
3661 
3662 	for_each_node_state(node, N_NORMAL_MEMORY) {
3663 		struct kmem_cache_node *n = get_node(s, node);
3664 		struct page *p;
3665 
3666 		if (n) {
3667 			list_for_each_entry(p, &n->partial, lru)
3668 				p->slab = s;
3669 
3670 #ifdef CONFIG_SLUB_DEBUG
3671 			list_for_each_entry(p, &n->full, lru)
3672 				p->slab = s;
3673 #endif
3674 		}
3675 	}
3676 }
3677 
3678 void __init kmem_cache_init(void)
3679 {
3680 	int i;
3681 	int caches = 0;
3682 	struct kmem_cache *temp_kmem_cache;
3683 	int order;
3684 	struct kmem_cache *temp_kmem_cache_node;
3685 	unsigned long kmalloc_size;
3686 
3687 	if (debug_guardpage_minorder())
3688 		slub_max_order = 0;
3689 
3690 	kmem_size = offsetof(struct kmem_cache, node) +
3691 				nr_node_ids * sizeof(struct kmem_cache_node *);
3692 
3693 	/* Allocate two kmem_caches from the page allocator */
3694 	kmalloc_size = ALIGN(kmem_size, cache_line_size());
3695 	order = get_order(2 * kmalloc_size);
3696 	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3697 
3698 	/*
3699 	 * Must first have the slab cache available for the allocations of the
3700 	 * struct kmem_cache_node's. There is special bootstrap code in
3701 	 * kmem_cache_open for slab_state == DOWN.
3702 	 */
3703 	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3704 
3705 	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3706 		sizeof(struct kmem_cache_node),
3707 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3708 
3709 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3710 
3711 	/* Able to allocate the per node structures */
3712 	slab_state = PARTIAL;
3713 
3714 	temp_kmem_cache = kmem_cache;
3715 	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3716 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3717 	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3718 	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3719 
3720 	/*
3721 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
3722 	 * kmem_cache_node is separately allocated so no need to
3723 	 * update any list pointers.
3724 	 */
3725 	temp_kmem_cache_node = kmem_cache_node;
3726 
3727 	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3728 	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3729 
3730 	kmem_cache_bootstrap_fixup(kmem_cache_node);
3731 
3732 	caches++;
3733 	kmem_cache_bootstrap_fixup(kmem_cache);
3734 	caches++;
3735 	/* Free temporary boot structure */
3736 	free_pages((unsigned long)temp_kmem_cache, order);
3737 
3738 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
3739 
3740 	/*
3741 	 * Patch up the size_index table if we have strange large alignment
3742 	 * requirements for the kmalloc array. This is only the case for
3743 	 * MIPS it seems. The standard arches will not generate any code here.
3744 	 *
3745 	 * Largest permitted alignment is 256 bytes due to the way we
3746 	 * handle the index determination for the smaller caches.
3747 	 *
3748 	 * Make sure that nothing crazy happens if someone starts tinkering
3749 	 * around with ARCH_KMALLOC_MINALIGN
3750 	 */
3751 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3752 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3753 
3754 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3755 		int elem = size_index_elem(i);
3756 		if (elem >= ARRAY_SIZE(size_index))
3757 			break;
3758 		size_index[elem] = KMALLOC_SHIFT_LOW;
3759 	}
3760 
3761 	if (KMALLOC_MIN_SIZE == 64) {
3762 		/*
3763 		 * The 96 byte size cache is not used if the alignment
3764 		 * is 64 byte.
3765 		 */
3766 		for (i = 64 + 8; i <= 96; i += 8)
3767 			size_index[size_index_elem(i)] = 7;
3768 	} else if (KMALLOC_MIN_SIZE == 128) {
3769 		/*
3770 		 * The 192 byte sized cache is not used if the alignment
3771 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3772 		 * instead.
3773 		 */
3774 		for (i = 128 + 8; i <= 192; i += 8)
3775 			size_index[size_index_elem(i)] = 8;
3776 	}
3777 
3778 	/* Caches that are not of the two-to-the-power-of size */
3779 	if (KMALLOC_MIN_SIZE <= 32) {
3780 		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3781 		caches++;
3782 	}
3783 
3784 	if (KMALLOC_MIN_SIZE <= 64) {
3785 		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3786 		caches++;
3787 	}
3788 
3789 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3790 		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3791 		caches++;
3792 	}
3793 
3794 	slab_state = UP;
3795 
3796 	/* Provide the correct kmalloc names now that the caches are up */
3797 	if (KMALLOC_MIN_SIZE <= 32) {
3798 		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3799 		BUG_ON(!kmalloc_caches[1]->name);
3800 	}
3801 
3802 	if (KMALLOC_MIN_SIZE <= 64) {
3803 		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3804 		BUG_ON(!kmalloc_caches[2]->name);
3805 	}
3806 
3807 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3808 		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3809 
3810 		BUG_ON(!s);
3811 		kmalloc_caches[i]->name = s;
3812 	}
3813 
3814 #ifdef CONFIG_SMP
3815 	register_cpu_notifier(&slab_notifier);
3816 #endif
3817 
3818 #ifdef CONFIG_ZONE_DMA
3819 	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3820 		struct kmem_cache *s = kmalloc_caches[i];
3821 
3822 		if (s && s->size) {
3823 			char *name = kasprintf(GFP_NOWAIT,
3824 				 "dma-kmalloc-%d", s->objsize);
3825 
3826 			BUG_ON(!name);
3827 			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3828 				s->objsize, SLAB_CACHE_DMA);
3829 		}
3830 	}
3831 #endif
3832 	printk(KERN_INFO
3833 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3834 		" CPUs=%d, Nodes=%d\n",
3835 		caches, cache_line_size(),
3836 		slub_min_order, slub_max_order, slub_min_objects,
3837 		nr_cpu_ids, nr_node_ids);
3838 }
3839 
3840 void __init kmem_cache_init_late(void)
3841 {
3842 }
3843 
3844 /*
3845  * Find a mergeable slab cache
3846  */
3847 static int slab_unmergeable(struct kmem_cache *s)
3848 {
3849 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3850 		return 1;
3851 
3852 	if (s->ctor)
3853 		return 1;
3854 
3855 	/*
3856 	 * We may have set a slab to be unmergeable during bootstrap.
3857 	 */
3858 	if (s->refcount < 0)
3859 		return 1;
3860 
3861 	return 0;
3862 }
3863 
3864 static struct kmem_cache *find_mergeable(size_t size,
3865 		size_t align, unsigned long flags, const char *name,
3866 		void (*ctor)(void *))
3867 {
3868 	struct kmem_cache *s;
3869 
3870 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3871 		return NULL;
3872 
3873 	if (ctor)
3874 		return NULL;
3875 
3876 	size = ALIGN(size, sizeof(void *));
3877 	align = calculate_alignment(flags, align, size);
3878 	size = ALIGN(size, align);
3879 	flags = kmem_cache_flags(size, flags, name, NULL);
3880 
3881 	list_for_each_entry(s, &slab_caches, list) {
3882 		if (slab_unmergeable(s))
3883 			continue;
3884 
3885 		if (size > s->size)
3886 			continue;
3887 
3888 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3889 				continue;
3890 		/*
3891 		 * Check if alignment is compatible.
3892 		 * Courtesy of Adrian Drzewiecki
3893 		 */
3894 		if ((s->size & ~(align - 1)) != s->size)
3895 			continue;
3896 
3897 		if (s->size - size >= sizeof(void *))
3898 			continue;
3899 
3900 		return s;
3901 	}
3902 	return NULL;
3903 }
3904 
3905 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3906 		size_t align, unsigned long flags, void (*ctor)(void *))
3907 {
3908 	struct kmem_cache *s;
3909 	char *n;
3910 
3911 	if (WARN_ON(!name))
3912 		return NULL;
3913 
3914 	down_write(&slub_lock);
3915 	s = find_mergeable(size, align, flags, name, ctor);
3916 	if (s) {
3917 		s->refcount++;
3918 		/*
3919 		 * Adjust the object sizes so that we clear
3920 		 * the complete object on kzalloc.
3921 		 */
3922 		s->objsize = max(s->objsize, (int)size);
3923 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3924 
3925 		if (sysfs_slab_alias(s, name)) {
3926 			s->refcount--;
3927 			goto err;
3928 		}
3929 		up_write(&slub_lock);
3930 		return s;
3931 	}
3932 
3933 	n = kstrdup(name, GFP_KERNEL);
3934 	if (!n)
3935 		goto err;
3936 
3937 	s = kmalloc(kmem_size, GFP_KERNEL);
3938 	if (s) {
3939 		if (kmem_cache_open(s, n,
3940 				size, align, flags, ctor)) {
3941 			list_add(&s->list, &slab_caches);
3942 			if (sysfs_slab_add(s)) {
3943 				list_del(&s->list);
3944 				kfree(n);
3945 				kfree(s);
3946 				goto err;
3947 			}
3948 			up_write(&slub_lock);
3949 			return s;
3950 		}
3951 		kfree(n);
3952 		kfree(s);
3953 	}
3954 err:
3955 	up_write(&slub_lock);
3956 
3957 	if (flags & SLAB_PANIC)
3958 		panic("Cannot create slabcache %s\n", name);
3959 	else
3960 		s = NULL;
3961 	return s;
3962 }
3963 EXPORT_SYMBOL(kmem_cache_create);
3964 
3965 #ifdef CONFIG_SMP
3966 /*
3967  * Use the cpu notifier to insure that the cpu slabs are flushed when
3968  * necessary.
3969  */
3970 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3971 		unsigned long action, void *hcpu)
3972 {
3973 	long cpu = (long)hcpu;
3974 	struct kmem_cache *s;
3975 	unsigned long flags;
3976 
3977 	switch (action) {
3978 	case CPU_UP_CANCELED:
3979 	case CPU_UP_CANCELED_FROZEN:
3980 	case CPU_DEAD:
3981 	case CPU_DEAD_FROZEN:
3982 		down_read(&slub_lock);
3983 		list_for_each_entry(s, &slab_caches, list) {
3984 			local_irq_save(flags);
3985 			__flush_cpu_slab(s, cpu);
3986 			local_irq_restore(flags);
3987 		}
3988 		up_read(&slub_lock);
3989 		break;
3990 	default:
3991 		break;
3992 	}
3993 	return NOTIFY_OK;
3994 }
3995 
3996 static struct notifier_block __cpuinitdata slab_notifier = {
3997 	.notifier_call = slab_cpuup_callback
3998 };
3999 
4000 #endif
4001 
4002 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4003 {
4004 	struct kmem_cache *s;
4005 	void *ret;
4006 
4007 	if (unlikely(size > SLUB_MAX_SIZE))
4008 		return kmalloc_large(size, gfpflags);
4009 
4010 	s = get_slab(size, gfpflags);
4011 
4012 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4013 		return s;
4014 
4015 	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
4016 
4017 	/* Honor the call site pointer we received. */
4018 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4019 
4020 	return ret;
4021 }
4022 
4023 #ifdef CONFIG_NUMA
4024 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4025 					int node, unsigned long caller)
4026 {
4027 	struct kmem_cache *s;
4028 	void *ret;
4029 
4030 	if (unlikely(size > SLUB_MAX_SIZE)) {
4031 		ret = kmalloc_large_node(size, gfpflags, node);
4032 
4033 		trace_kmalloc_node(caller, ret,
4034 				   size, PAGE_SIZE << get_order(size),
4035 				   gfpflags, node);
4036 
4037 		return ret;
4038 	}
4039 
4040 	s = get_slab(size, gfpflags);
4041 
4042 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4043 		return s;
4044 
4045 	ret = slab_alloc(s, gfpflags, node, caller);
4046 
4047 	/* Honor the call site pointer we received. */
4048 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4049 
4050 	return ret;
4051 }
4052 #endif
4053 
4054 #ifdef CONFIG_SYSFS
4055 static int count_inuse(struct page *page)
4056 {
4057 	return page->inuse;
4058 }
4059 
4060 static int count_total(struct page *page)
4061 {
4062 	return page->objects;
4063 }
4064 #endif
4065 
4066 #ifdef CONFIG_SLUB_DEBUG
4067 static int validate_slab(struct kmem_cache *s, struct page *page,
4068 						unsigned long *map)
4069 {
4070 	void *p;
4071 	void *addr = page_address(page);
4072 
4073 	if (!check_slab(s, page) ||
4074 			!on_freelist(s, page, NULL))
4075 		return 0;
4076 
4077 	/* Now we know that a valid freelist exists */
4078 	bitmap_zero(map, page->objects);
4079 
4080 	get_map(s, page, map);
4081 	for_each_object(p, s, addr, page->objects) {
4082 		if (test_bit(slab_index(p, s, addr), map))
4083 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4084 				return 0;
4085 	}
4086 
4087 	for_each_object(p, s, addr, page->objects)
4088 		if (!test_bit(slab_index(p, s, addr), map))
4089 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4090 				return 0;
4091 	return 1;
4092 }
4093 
4094 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4095 						unsigned long *map)
4096 {
4097 	slab_lock(page);
4098 	validate_slab(s, page, map);
4099 	slab_unlock(page);
4100 }
4101 
4102 static int validate_slab_node(struct kmem_cache *s,
4103 		struct kmem_cache_node *n, unsigned long *map)
4104 {
4105 	unsigned long count = 0;
4106 	struct page *page;
4107 	unsigned long flags;
4108 
4109 	spin_lock_irqsave(&n->list_lock, flags);
4110 
4111 	list_for_each_entry(page, &n->partial, lru) {
4112 		validate_slab_slab(s, page, map);
4113 		count++;
4114 	}
4115 	if (count != n->nr_partial)
4116 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4117 			"counter=%ld\n", s->name, count, n->nr_partial);
4118 
4119 	if (!(s->flags & SLAB_STORE_USER))
4120 		goto out;
4121 
4122 	list_for_each_entry(page, &n->full, lru) {
4123 		validate_slab_slab(s, page, map);
4124 		count++;
4125 	}
4126 	if (count != atomic_long_read(&n->nr_slabs))
4127 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4128 			"counter=%ld\n", s->name, count,
4129 			atomic_long_read(&n->nr_slabs));
4130 
4131 out:
4132 	spin_unlock_irqrestore(&n->list_lock, flags);
4133 	return count;
4134 }
4135 
4136 static long validate_slab_cache(struct kmem_cache *s)
4137 {
4138 	int node;
4139 	unsigned long count = 0;
4140 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4141 				sizeof(unsigned long), GFP_KERNEL);
4142 
4143 	if (!map)
4144 		return -ENOMEM;
4145 
4146 	flush_all(s);
4147 	for_each_node_state(node, N_NORMAL_MEMORY) {
4148 		struct kmem_cache_node *n = get_node(s, node);
4149 
4150 		count += validate_slab_node(s, n, map);
4151 	}
4152 	kfree(map);
4153 	return count;
4154 }
4155 /*
4156  * Generate lists of code addresses where slabcache objects are allocated
4157  * and freed.
4158  */
4159 
4160 struct location {
4161 	unsigned long count;
4162 	unsigned long addr;
4163 	long long sum_time;
4164 	long min_time;
4165 	long max_time;
4166 	long min_pid;
4167 	long max_pid;
4168 	DECLARE_BITMAP(cpus, NR_CPUS);
4169 	nodemask_t nodes;
4170 };
4171 
4172 struct loc_track {
4173 	unsigned long max;
4174 	unsigned long count;
4175 	struct location *loc;
4176 };
4177 
4178 static void free_loc_track(struct loc_track *t)
4179 {
4180 	if (t->max)
4181 		free_pages((unsigned long)t->loc,
4182 			get_order(sizeof(struct location) * t->max));
4183 }
4184 
4185 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4186 {
4187 	struct location *l;
4188 	int order;
4189 
4190 	order = get_order(sizeof(struct location) * max);
4191 
4192 	l = (void *)__get_free_pages(flags, order);
4193 	if (!l)
4194 		return 0;
4195 
4196 	if (t->count) {
4197 		memcpy(l, t->loc, sizeof(struct location) * t->count);
4198 		free_loc_track(t);
4199 	}
4200 	t->max = max;
4201 	t->loc = l;
4202 	return 1;
4203 }
4204 
4205 static int add_location(struct loc_track *t, struct kmem_cache *s,
4206 				const struct track *track)
4207 {
4208 	long start, end, pos;
4209 	struct location *l;
4210 	unsigned long caddr;
4211 	unsigned long age = jiffies - track->when;
4212 
4213 	start = -1;
4214 	end = t->count;
4215 
4216 	for ( ; ; ) {
4217 		pos = start + (end - start + 1) / 2;
4218 
4219 		/*
4220 		 * There is nothing at "end". If we end up there
4221 		 * we need to add something to before end.
4222 		 */
4223 		if (pos == end)
4224 			break;
4225 
4226 		caddr = t->loc[pos].addr;
4227 		if (track->addr == caddr) {
4228 
4229 			l = &t->loc[pos];
4230 			l->count++;
4231 			if (track->when) {
4232 				l->sum_time += age;
4233 				if (age < l->min_time)
4234 					l->min_time = age;
4235 				if (age > l->max_time)
4236 					l->max_time = age;
4237 
4238 				if (track->pid < l->min_pid)
4239 					l->min_pid = track->pid;
4240 				if (track->pid > l->max_pid)
4241 					l->max_pid = track->pid;
4242 
4243 				cpumask_set_cpu(track->cpu,
4244 						to_cpumask(l->cpus));
4245 			}
4246 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4247 			return 1;
4248 		}
4249 
4250 		if (track->addr < caddr)
4251 			end = pos;
4252 		else
4253 			start = pos;
4254 	}
4255 
4256 	/*
4257 	 * Not found. Insert new tracking element.
4258 	 */
4259 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4260 		return 0;
4261 
4262 	l = t->loc + pos;
4263 	if (pos < t->count)
4264 		memmove(l + 1, l,
4265 			(t->count - pos) * sizeof(struct location));
4266 	t->count++;
4267 	l->count = 1;
4268 	l->addr = track->addr;
4269 	l->sum_time = age;
4270 	l->min_time = age;
4271 	l->max_time = age;
4272 	l->min_pid = track->pid;
4273 	l->max_pid = track->pid;
4274 	cpumask_clear(to_cpumask(l->cpus));
4275 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4276 	nodes_clear(l->nodes);
4277 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4278 	return 1;
4279 }
4280 
4281 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4282 		struct page *page, enum track_item alloc,
4283 		unsigned long *map)
4284 {
4285 	void *addr = page_address(page);
4286 	void *p;
4287 
4288 	bitmap_zero(map, page->objects);
4289 	get_map(s, page, map);
4290 
4291 	for_each_object(p, s, addr, page->objects)
4292 		if (!test_bit(slab_index(p, s, addr), map))
4293 			add_location(t, s, get_track(s, p, alloc));
4294 }
4295 
4296 static int list_locations(struct kmem_cache *s, char *buf,
4297 					enum track_item alloc)
4298 {
4299 	int len = 0;
4300 	unsigned long i;
4301 	struct loc_track t = { 0, 0, NULL };
4302 	int node;
4303 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4304 				     sizeof(unsigned long), GFP_KERNEL);
4305 
4306 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4307 				     GFP_TEMPORARY)) {
4308 		kfree(map);
4309 		return sprintf(buf, "Out of memory\n");
4310 	}
4311 	/* Push back cpu slabs */
4312 	flush_all(s);
4313 
4314 	for_each_node_state(node, N_NORMAL_MEMORY) {
4315 		struct kmem_cache_node *n = get_node(s, node);
4316 		unsigned long flags;
4317 		struct page *page;
4318 
4319 		if (!atomic_long_read(&n->nr_slabs))
4320 			continue;
4321 
4322 		spin_lock_irqsave(&n->list_lock, flags);
4323 		list_for_each_entry(page, &n->partial, lru)
4324 			process_slab(&t, s, page, alloc, map);
4325 		list_for_each_entry(page, &n->full, lru)
4326 			process_slab(&t, s, page, alloc, map);
4327 		spin_unlock_irqrestore(&n->list_lock, flags);
4328 	}
4329 
4330 	for (i = 0; i < t.count; i++) {
4331 		struct location *l = &t.loc[i];
4332 
4333 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4334 			break;
4335 		len += sprintf(buf + len, "%7ld ", l->count);
4336 
4337 		if (l->addr)
4338 			len += sprintf(buf + len, "%pS", (void *)l->addr);
4339 		else
4340 			len += sprintf(buf + len, "<not-available>");
4341 
4342 		if (l->sum_time != l->min_time) {
4343 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4344 				l->min_time,
4345 				(long)div_u64(l->sum_time, l->count),
4346 				l->max_time);
4347 		} else
4348 			len += sprintf(buf + len, " age=%ld",
4349 				l->min_time);
4350 
4351 		if (l->min_pid != l->max_pid)
4352 			len += sprintf(buf + len, " pid=%ld-%ld",
4353 				l->min_pid, l->max_pid);
4354 		else
4355 			len += sprintf(buf + len, " pid=%ld",
4356 				l->min_pid);
4357 
4358 		if (num_online_cpus() > 1 &&
4359 				!cpumask_empty(to_cpumask(l->cpus)) &&
4360 				len < PAGE_SIZE - 60) {
4361 			len += sprintf(buf + len, " cpus=");
4362 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4363 						 to_cpumask(l->cpus));
4364 		}
4365 
4366 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4367 				len < PAGE_SIZE - 60) {
4368 			len += sprintf(buf + len, " nodes=");
4369 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4370 					l->nodes);
4371 		}
4372 
4373 		len += sprintf(buf + len, "\n");
4374 	}
4375 
4376 	free_loc_track(&t);
4377 	kfree(map);
4378 	if (!t.count)
4379 		len += sprintf(buf, "No data\n");
4380 	return len;
4381 }
4382 #endif
4383 
4384 #ifdef SLUB_RESILIENCY_TEST
4385 static void resiliency_test(void)
4386 {
4387 	u8 *p;
4388 
4389 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4390 
4391 	printk(KERN_ERR "SLUB resiliency testing\n");
4392 	printk(KERN_ERR "-----------------------\n");
4393 	printk(KERN_ERR "A. Corruption after allocation\n");
4394 
4395 	p = kzalloc(16, GFP_KERNEL);
4396 	p[16] = 0x12;
4397 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4398 			" 0x12->0x%p\n\n", p + 16);
4399 
4400 	validate_slab_cache(kmalloc_caches[4]);
4401 
4402 	/* Hmmm... The next two are dangerous */
4403 	p = kzalloc(32, GFP_KERNEL);
4404 	p[32 + sizeof(void *)] = 0x34;
4405 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4406 			" 0x34 -> -0x%p\n", p);
4407 	printk(KERN_ERR
4408 		"If allocated object is overwritten then not detectable\n\n");
4409 
4410 	validate_slab_cache(kmalloc_caches[5]);
4411 	p = kzalloc(64, GFP_KERNEL);
4412 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4413 	*p = 0x56;
4414 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4415 									p);
4416 	printk(KERN_ERR
4417 		"If allocated object is overwritten then not detectable\n\n");
4418 	validate_slab_cache(kmalloc_caches[6]);
4419 
4420 	printk(KERN_ERR "\nB. Corruption after free\n");
4421 	p = kzalloc(128, GFP_KERNEL);
4422 	kfree(p);
4423 	*p = 0x78;
4424 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4425 	validate_slab_cache(kmalloc_caches[7]);
4426 
4427 	p = kzalloc(256, GFP_KERNEL);
4428 	kfree(p);
4429 	p[50] = 0x9a;
4430 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4431 			p);
4432 	validate_slab_cache(kmalloc_caches[8]);
4433 
4434 	p = kzalloc(512, GFP_KERNEL);
4435 	kfree(p);
4436 	p[512] = 0xab;
4437 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4438 	validate_slab_cache(kmalloc_caches[9]);
4439 }
4440 #else
4441 #ifdef CONFIG_SYSFS
4442 static void resiliency_test(void) {};
4443 #endif
4444 #endif
4445 
4446 #ifdef CONFIG_SYSFS
4447 enum slab_stat_type {
4448 	SL_ALL,			/* All slabs */
4449 	SL_PARTIAL,		/* Only partially allocated slabs */
4450 	SL_CPU,			/* Only slabs used for cpu caches */
4451 	SL_OBJECTS,		/* Determine allocated objects not slabs */
4452 	SL_TOTAL		/* Determine object capacity not slabs */
4453 };
4454 
4455 #define SO_ALL		(1 << SL_ALL)
4456 #define SO_PARTIAL	(1 << SL_PARTIAL)
4457 #define SO_CPU		(1 << SL_CPU)
4458 #define SO_OBJECTS	(1 << SL_OBJECTS)
4459 #define SO_TOTAL	(1 << SL_TOTAL)
4460 
4461 static ssize_t show_slab_objects(struct kmem_cache *s,
4462 			    char *buf, unsigned long flags)
4463 {
4464 	unsigned long total = 0;
4465 	int node;
4466 	int x;
4467 	unsigned long *nodes;
4468 	unsigned long *per_cpu;
4469 
4470 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4471 	if (!nodes)
4472 		return -ENOMEM;
4473 	per_cpu = nodes + nr_node_ids;
4474 
4475 	if (flags & SO_CPU) {
4476 		int cpu;
4477 
4478 		for_each_possible_cpu(cpu) {
4479 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4480 			int node = ACCESS_ONCE(c->node);
4481 			struct page *page;
4482 
4483 			if (node < 0)
4484 				continue;
4485 			page = ACCESS_ONCE(c->page);
4486 			if (page) {
4487 				if (flags & SO_TOTAL)
4488 					x = page->objects;
4489 				else if (flags & SO_OBJECTS)
4490 					x = page->inuse;
4491 				else
4492 					x = 1;
4493 
4494 				total += x;
4495 				nodes[node] += x;
4496 			}
4497 			page = c->partial;
4498 
4499 			if (page) {
4500 				x = page->pobjects;
4501 				total += x;
4502 				nodes[node] += x;
4503 			}
4504 			per_cpu[node]++;
4505 		}
4506 	}
4507 
4508 	lock_memory_hotplug();
4509 #ifdef CONFIG_SLUB_DEBUG
4510 	if (flags & SO_ALL) {
4511 		for_each_node_state(node, N_NORMAL_MEMORY) {
4512 			struct kmem_cache_node *n = get_node(s, node);
4513 
4514 		if (flags & SO_TOTAL)
4515 			x = atomic_long_read(&n->total_objects);
4516 		else if (flags & SO_OBJECTS)
4517 			x = atomic_long_read(&n->total_objects) -
4518 				count_partial(n, count_free);
4519 
4520 			else
4521 				x = atomic_long_read(&n->nr_slabs);
4522 			total += x;
4523 			nodes[node] += x;
4524 		}
4525 
4526 	} else
4527 #endif
4528 	if (flags & SO_PARTIAL) {
4529 		for_each_node_state(node, N_NORMAL_MEMORY) {
4530 			struct kmem_cache_node *n = get_node(s, node);
4531 
4532 			if (flags & SO_TOTAL)
4533 				x = count_partial(n, count_total);
4534 			else if (flags & SO_OBJECTS)
4535 				x = count_partial(n, count_inuse);
4536 			else
4537 				x = n->nr_partial;
4538 			total += x;
4539 			nodes[node] += x;
4540 		}
4541 	}
4542 	x = sprintf(buf, "%lu", total);
4543 #ifdef CONFIG_NUMA
4544 	for_each_node_state(node, N_NORMAL_MEMORY)
4545 		if (nodes[node])
4546 			x += sprintf(buf + x, " N%d=%lu",
4547 					node, nodes[node]);
4548 #endif
4549 	unlock_memory_hotplug();
4550 	kfree(nodes);
4551 	return x + sprintf(buf + x, "\n");
4552 }
4553 
4554 #ifdef CONFIG_SLUB_DEBUG
4555 static int any_slab_objects(struct kmem_cache *s)
4556 {
4557 	int node;
4558 
4559 	for_each_online_node(node) {
4560 		struct kmem_cache_node *n = get_node(s, node);
4561 
4562 		if (!n)
4563 			continue;
4564 
4565 		if (atomic_long_read(&n->total_objects))
4566 			return 1;
4567 	}
4568 	return 0;
4569 }
4570 #endif
4571 
4572 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4573 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4574 
4575 struct slab_attribute {
4576 	struct attribute attr;
4577 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4578 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4579 };
4580 
4581 #define SLAB_ATTR_RO(_name) \
4582 	static struct slab_attribute _name##_attr = \
4583 	__ATTR(_name, 0400, _name##_show, NULL)
4584 
4585 #define SLAB_ATTR(_name) \
4586 	static struct slab_attribute _name##_attr =  \
4587 	__ATTR(_name, 0600, _name##_show, _name##_store)
4588 
4589 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4590 {
4591 	return sprintf(buf, "%d\n", s->size);
4592 }
4593 SLAB_ATTR_RO(slab_size);
4594 
4595 static ssize_t align_show(struct kmem_cache *s, char *buf)
4596 {
4597 	return sprintf(buf, "%d\n", s->align);
4598 }
4599 SLAB_ATTR_RO(align);
4600 
4601 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4602 {
4603 	return sprintf(buf, "%d\n", s->objsize);
4604 }
4605 SLAB_ATTR_RO(object_size);
4606 
4607 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4608 {
4609 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4610 }
4611 SLAB_ATTR_RO(objs_per_slab);
4612 
4613 static ssize_t order_store(struct kmem_cache *s,
4614 				const char *buf, size_t length)
4615 {
4616 	unsigned long order;
4617 	int err;
4618 
4619 	err = strict_strtoul(buf, 10, &order);
4620 	if (err)
4621 		return err;
4622 
4623 	if (order > slub_max_order || order < slub_min_order)
4624 		return -EINVAL;
4625 
4626 	calculate_sizes(s, order);
4627 	return length;
4628 }
4629 
4630 static ssize_t order_show(struct kmem_cache *s, char *buf)
4631 {
4632 	return sprintf(buf, "%d\n", oo_order(s->oo));
4633 }
4634 SLAB_ATTR(order);
4635 
4636 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4637 {
4638 	return sprintf(buf, "%lu\n", s->min_partial);
4639 }
4640 
4641 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4642 				 size_t length)
4643 {
4644 	unsigned long min;
4645 	int err;
4646 
4647 	err = strict_strtoul(buf, 10, &min);
4648 	if (err)
4649 		return err;
4650 
4651 	set_min_partial(s, min);
4652 	return length;
4653 }
4654 SLAB_ATTR(min_partial);
4655 
4656 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4657 {
4658 	return sprintf(buf, "%u\n", s->cpu_partial);
4659 }
4660 
4661 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4662 				 size_t length)
4663 {
4664 	unsigned long objects;
4665 	int err;
4666 
4667 	err = strict_strtoul(buf, 10, &objects);
4668 	if (err)
4669 		return err;
4670 	if (objects && kmem_cache_debug(s))
4671 		return -EINVAL;
4672 
4673 	s->cpu_partial = objects;
4674 	flush_all(s);
4675 	return length;
4676 }
4677 SLAB_ATTR(cpu_partial);
4678 
4679 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4680 {
4681 	if (!s->ctor)
4682 		return 0;
4683 	return sprintf(buf, "%pS\n", s->ctor);
4684 }
4685 SLAB_ATTR_RO(ctor);
4686 
4687 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4688 {
4689 	return sprintf(buf, "%d\n", s->refcount - 1);
4690 }
4691 SLAB_ATTR_RO(aliases);
4692 
4693 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4694 {
4695 	return show_slab_objects(s, buf, SO_PARTIAL);
4696 }
4697 SLAB_ATTR_RO(partial);
4698 
4699 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4700 {
4701 	return show_slab_objects(s, buf, SO_CPU);
4702 }
4703 SLAB_ATTR_RO(cpu_slabs);
4704 
4705 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4706 {
4707 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4708 }
4709 SLAB_ATTR_RO(objects);
4710 
4711 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4712 {
4713 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4714 }
4715 SLAB_ATTR_RO(objects_partial);
4716 
4717 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4718 {
4719 	int objects = 0;
4720 	int pages = 0;
4721 	int cpu;
4722 	int len;
4723 
4724 	for_each_online_cpu(cpu) {
4725 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4726 
4727 		if (page) {
4728 			pages += page->pages;
4729 			objects += page->pobjects;
4730 		}
4731 	}
4732 
4733 	len = sprintf(buf, "%d(%d)", objects, pages);
4734 
4735 #ifdef CONFIG_SMP
4736 	for_each_online_cpu(cpu) {
4737 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4738 
4739 		if (page && len < PAGE_SIZE - 20)
4740 			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4741 				page->pobjects, page->pages);
4742 	}
4743 #endif
4744 	return len + sprintf(buf + len, "\n");
4745 }
4746 SLAB_ATTR_RO(slabs_cpu_partial);
4747 
4748 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4749 {
4750 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4751 }
4752 
4753 static ssize_t reclaim_account_store(struct kmem_cache *s,
4754 				const char *buf, size_t length)
4755 {
4756 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4757 	if (buf[0] == '1')
4758 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4759 	return length;
4760 }
4761 SLAB_ATTR(reclaim_account);
4762 
4763 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4764 {
4765 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4766 }
4767 SLAB_ATTR_RO(hwcache_align);
4768 
4769 #ifdef CONFIG_ZONE_DMA
4770 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4771 {
4772 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4773 }
4774 SLAB_ATTR_RO(cache_dma);
4775 #endif
4776 
4777 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4778 {
4779 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4780 }
4781 SLAB_ATTR_RO(destroy_by_rcu);
4782 
4783 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4784 {
4785 	return sprintf(buf, "%d\n", s->reserved);
4786 }
4787 SLAB_ATTR_RO(reserved);
4788 
4789 #ifdef CONFIG_SLUB_DEBUG
4790 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4791 {
4792 	return show_slab_objects(s, buf, SO_ALL);
4793 }
4794 SLAB_ATTR_RO(slabs);
4795 
4796 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4797 {
4798 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4799 }
4800 SLAB_ATTR_RO(total_objects);
4801 
4802 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4803 {
4804 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4805 }
4806 
4807 static ssize_t sanity_checks_store(struct kmem_cache *s,
4808 				const char *buf, size_t length)
4809 {
4810 	s->flags &= ~SLAB_DEBUG_FREE;
4811 	if (buf[0] == '1') {
4812 		s->flags &= ~__CMPXCHG_DOUBLE;
4813 		s->flags |= SLAB_DEBUG_FREE;
4814 	}
4815 	return length;
4816 }
4817 SLAB_ATTR(sanity_checks);
4818 
4819 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4820 {
4821 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4822 }
4823 
4824 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4825 							size_t length)
4826 {
4827 	s->flags &= ~SLAB_TRACE;
4828 	if (buf[0] == '1') {
4829 		s->flags &= ~__CMPXCHG_DOUBLE;
4830 		s->flags |= SLAB_TRACE;
4831 	}
4832 	return length;
4833 }
4834 SLAB_ATTR(trace);
4835 
4836 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4837 {
4838 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4839 }
4840 
4841 static ssize_t red_zone_store(struct kmem_cache *s,
4842 				const char *buf, size_t length)
4843 {
4844 	if (any_slab_objects(s))
4845 		return -EBUSY;
4846 
4847 	s->flags &= ~SLAB_RED_ZONE;
4848 	if (buf[0] == '1') {
4849 		s->flags &= ~__CMPXCHG_DOUBLE;
4850 		s->flags |= SLAB_RED_ZONE;
4851 	}
4852 	calculate_sizes(s, -1);
4853 	return length;
4854 }
4855 SLAB_ATTR(red_zone);
4856 
4857 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4858 {
4859 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4860 }
4861 
4862 static ssize_t poison_store(struct kmem_cache *s,
4863 				const char *buf, size_t length)
4864 {
4865 	if (any_slab_objects(s))
4866 		return -EBUSY;
4867 
4868 	s->flags &= ~SLAB_POISON;
4869 	if (buf[0] == '1') {
4870 		s->flags &= ~__CMPXCHG_DOUBLE;
4871 		s->flags |= SLAB_POISON;
4872 	}
4873 	calculate_sizes(s, -1);
4874 	return length;
4875 }
4876 SLAB_ATTR(poison);
4877 
4878 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4879 {
4880 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4881 }
4882 
4883 static ssize_t store_user_store(struct kmem_cache *s,
4884 				const char *buf, size_t length)
4885 {
4886 	if (any_slab_objects(s))
4887 		return -EBUSY;
4888 
4889 	s->flags &= ~SLAB_STORE_USER;
4890 	if (buf[0] == '1') {
4891 		s->flags &= ~__CMPXCHG_DOUBLE;
4892 		s->flags |= SLAB_STORE_USER;
4893 	}
4894 	calculate_sizes(s, -1);
4895 	return length;
4896 }
4897 SLAB_ATTR(store_user);
4898 
4899 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4900 {
4901 	return 0;
4902 }
4903 
4904 static ssize_t validate_store(struct kmem_cache *s,
4905 			const char *buf, size_t length)
4906 {
4907 	int ret = -EINVAL;
4908 
4909 	if (buf[0] == '1') {
4910 		ret = validate_slab_cache(s);
4911 		if (ret >= 0)
4912 			ret = length;
4913 	}
4914 	return ret;
4915 }
4916 SLAB_ATTR(validate);
4917 
4918 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4919 {
4920 	if (!(s->flags & SLAB_STORE_USER))
4921 		return -ENOSYS;
4922 	return list_locations(s, buf, TRACK_ALLOC);
4923 }
4924 SLAB_ATTR_RO(alloc_calls);
4925 
4926 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4927 {
4928 	if (!(s->flags & SLAB_STORE_USER))
4929 		return -ENOSYS;
4930 	return list_locations(s, buf, TRACK_FREE);
4931 }
4932 SLAB_ATTR_RO(free_calls);
4933 #endif /* CONFIG_SLUB_DEBUG */
4934 
4935 #ifdef CONFIG_FAILSLAB
4936 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4937 {
4938 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4939 }
4940 
4941 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4942 							size_t length)
4943 {
4944 	s->flags &= ~SLAB_FAILSLAB;
4945 	if (buf[0] == '1')
4946 		s->flags |= SLAB_FAILSLAB;
4947 	return length;
4948 }
4949 SLAB_ATTR(failslab);
4950 #endif
4951 
4952 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4953 {
4954 	return 0;
4955 }
4956 
4957 static ssize_t shrink_store(struct kmem_cache *s,
4958 			const char *buf, size_t length)
4959 {
4960 	if (buf[0] == '1') {
4961 		int rc = kmem_cache_shrink(s);
4962 
4963 		if (rc)
4964 			return rc;
4965 	} else
4966 		return -EINVAL;
4967 	return length;
4968 }
4969 SLAB_ATTR(shrink);
4970 
4971 #ifdef CONFIG_NUMA
4972 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4973 {
4974 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4975 }
4976 
4977 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4978 				const char *buf, size_t length)
4979 {
4980 	unsigned long ratio;
4981 	int err;
4982 
4983 	err = strict_strtoul(buf, 10, &ratio);
4984 	if (err)
4985 		return err;
4986 
4987 	if (ratio <= 100)
4988 		s->remote_node_defrag_ratio = ratio * 10;
4989 
4990 	return length;
4991 }
4992 SLAB_ATTR(remote_node_defrag_ratio);
4993 #endif
4994 
4995 #ifdef CONFIG_SLUB_STATS
4996 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4997 {
4998 	unsigned long sum  = 0;
4999 	int cpu;
5000 	int len;
5001 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5002 
5003 	if (!data)
5004 		return -ENOMEM;
5005 
5006 	for_each_online_cpu(cpu) {
5007 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5008 
5009 		data[cpu] = x;
5010 		sum += x;
5011 	}
5012 
5013 	len = sprintf(buf, "%lu", sum);
5014 
5015 #ifdef CONFIG_SMP
5016 	for_each_online_cpu(cpu) {
5017 		if (data[cpu] && len < PAGE_SIZE - 20)
5018 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5019 	}
5020 #endif
5021 	kfree(data);
5022 	return len + sprintf(buf + len, "\n");
5023 }
5024 
5025 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5026 {
5027 	int cpu;
5028 
5029 	for_each_online_cpu(cpu)
5030 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5031 }
5032 
5033 #define STAT_ATTR(si, text) 					\
5034 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5035 {								\
5036 	return show_stat(s, buf, si);				\
5037 }								\
5038 static ssize_t text##_store(struct kmem_cache *s,		\
5039 				const char *buf, size_t length)	\
5040 {								\
5041 	if (buf[0] != '0')					\
5042 		return -EINVAL;					\
5043 	clear_stat(s, si);					\
5044 	return length;						\
5045 }								\
5046 SLAB_ATTR(text);						\
5047 
5048 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5049 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5050 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5051 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5052 STAT_ATTR(FREE_FROZEN, free_frozen);
5053 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5054 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5055 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5056 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5057 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5058 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5059 STAT_ATTR(FREE_SLAB, free_slab);
5060 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5061 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5062 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5063 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5064 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5065 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5066 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5067 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5068 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5069 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5070 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5071 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5072 #endif
5073 
5074 static struct attribute *slab_attrs[] = {
5075 	&slab_size_attr.attr,
5076 	&object_size_attr.attr,
5077 	&objs_per_slab_attr.attr,
5078 	&order_attr.attr,
5079 	&min_partial_attr.attr,
5080 	&cpu_partial_attr.attr,
5081 	&objects_attr.attr,
5082 	&objects_partial_attr.attr,
5083 	&partial_attr.attr,
5084 	&cpu_slabs_attr.attr,
5085 	&ctor_attr.attr,
5086 	&aliases_attr.attr,
5087 	&align_attr.attr,
5088 	&hwcache_align_attr.attr,
5089 	&reclaim_account_attr.attr,
5090 	&destroy_by_rcu_attr.attr,
5091 	&shrink_attr.attr,
5092 	&reserved_attr.attr,
5093 	&slabs_cpu_partial_attr.attr,
5094 #ifdef CONFIG_SLUB_DEBUG
5095 	&total_objects_attr.attr,
5096 	&slabs_attr.attr,
5097 	&sanity_checks_attr.attr,
5098 	&trace_attr.attr,
5099 	&red_zone_attr.attr,
5100 	&poison_attr.attr,
5101 	&store_user_attr.attr,
5102 	&validate_attr.attr,
5103 	&alloc_calls_attr.attr,
5104 	&free_calls_attr.attr,
5105 #endif
5106 #ifdef CONFIG_ZONE_DMA
5107 	&cache_dma_attr.attr,
5108 #endif
5109 #ifdef CONFIG_NUMA
5110 	&remote_node_defrag_ratio_attr.attr,
5111 #endif
5112 #ifdef CONFIG_SLUB_STATS
5113 	&alloc_fastpath_attr.attr,
5114 	&alloc_slowpath_attr.attr,
5115 	&free_fastpath_attr.attr,
5116 	&free_slowpath_attr.attr,
5117 	&free_frozen_attr.attr,
5118 	&free_add_partial_attr.attr,
5119 	&free_remove_partial_attr.attr,
5120 	&alloc_from_partial_attr.attr,
5121 	&alloc_slab_attr.attr,
5122 	&alloc_refill_attr.attr,
5123 	&alloc_node_mismatch_attr.attr,
5124 	&free_slab_attr.attr,
5125 	&cpuslab_flush_attr.attr,
5126 	&deactivate_full_attr.attr,
5127 	&deactivate_empty_attr.attr,
5128 	&deactivate_to_head_attr.attr,
5129 	&deactivate_to_tail_attr.attr,
5130 	&deactivate_remote_frees_attr.attr,
5131 	&deactivate_bypass_attr.attr,
5132 	&order_fallback_attr.attr,
5133 	&cmpxchg_double_fail_attr.attr,
5134 	&cmpxchg_double_cpu_fail_attr.attr,
5135 	&cpu_partial_alloc_attr.attr,
5136 	&cpu_partial_free_attr.attr,
5137 #endif
5138 #ifdef CONFIG_FAILSLAB
5139 	&failslab_attr.attr,
5140 #endif
5141 
5142 	NULL
5143 };
5144 
5145 static struct attribute_group slab_attr_group = {
5146 	.attrs = slab_attrs,
5147 };
5148 
5149 static ssize_t slab_attr_show(struct kobject *kobj,
5150 				struct attribute *attr,
5151 				char *buf)
5152 {
5153 	struct slab_attribute *attribute;
5154 	struct kmem_cache *s;
5155 	int err;
5156 
5157 	attribute = to_slab_attr(attr);
5158 	s = to_slab(kobj);
5159 
5160 	if (!attribute->show)
5161 		return -EIO;
5162 
5163 	err = attribute->show(s, buf);
5164 
5165 	return err;
5166 }
5167 
5168 static ssize_t slab_attr_store(struct kobject *kobj,
5169 				struct attribute *attr,
5170 				const char *buf, size_t len)
5171 {
5172 	struct slab_attribute *attribute;
5173 	struct kmem_cache *s;
5174 	int err;
5175 
5176 	attribute = to_slab_attr(attr);
5177 	s = to_slab(kobj);
5178 
5179 	if (!attribute->store)
5180 		return -EIO;
5181 
5182 	err = attribute->store(s, buf, len);
5183 
5184 	return err;
5185 }
5186 
5187 static void kmem_cache_release(struct kobject *kobj)
5188 {
5189 	struct kmem_cache *s = to_slab(kobj);
5190 
5191 	kfree(s->name);
5192 	kfree(s);
5193 }
5194 
5195 static const struct sysfs_ops slab_sysfs_ops = {
5196 	.show = slab_attr_show,
5197 	.store = slab_attr_store,
5198 };
5199 
5200 static struct kobj_type slab_ktype = {
5201 	.sysfs_ops = &slab_sysfs_ops,
5202 	.release = kmem_cache_release
5203 };
5204 
5205 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5206 {
5207 	struct kobj_type *ktype = get_ktype(kobj);
5208 
5209 	if (ktype == &slab_ktype)
5210 		return 1;
5211 	return 0;
5212 }
5213 
5214 static const struct kset_uevent_ops slab_uevent_ops = {
5215 	.filter = uevent_filter,
5216 };
5217 
5218 static struct kset *slab_kset;
5219 
5220 #define ID_STR_LENGTH 64
5221 
5222 /* Create a unique string id for a slab cache:
5223  *
5224  * Format	:[flags-]size
5225  */
5226 static char *create_unique_id(struct kmem_cache *s)
5227 {
5228 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5229 	char *p = name;
5230 
5231 	BUG_ON(!name);
5232 
5233 	*p++ = ':';
5234 	/*
5235 	 * First flags affecting slabcache operations. We will only
5236 	 * get here for aliasable slabs so we do not need to support
5237 	 * too many flags. The flags here must cover all flags that
5238 	 * are matched during merging to guarantee that the id is
5239 	 * unique.
5240 	 */
5241 	if (s->flags & SLAB_CACHE_DMA)
5242 		*p++ = 'd';
5243 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5244 		*p++ = 'a';
5245 	if (s->flags & SLAB_DEBUG_FREE)
5246 		*p++ = 'F';
5247 	if (!(s->flags & SLAB_NOTRACK))
5248 		*p++ = 't';
5249 	if (p != name + 1)
5250 		*p++ = '-';
5251 	p += sprintf(p, "%07d", s->size);
5252 	BUG_ON(p > name + ID_STR_LENGTH - 1);
5253 	return name;
5254 }
5255 
5256 static int sysfs_slab_add(struct kmem_cache *s)
5257 {
5258 	int err;
5259 	const char *name;
5260 	int unmergeable;
5261 
5262 	if (slab_state < SYSFS)
5263 		/* Defer until later */
5264 		return 0;
5265 
5266 	unmergeable = slab_unmergeable(s);
5267 	if (unmergeable) {
5268 		/*
5269 		 * Slabcache can never be merged so we can use the name proper.
5270 		 * This is typically the case for debug situations. In that
5271 		 * case we can catch duplicate names easily.
5272 		 */
5273 		sysfs_remove_link(&slab_kset->kobj, s->name);
5274 		name = s->name;
5275 	} else {
5276 		/*
5277 		 * Create a unique name for the slab as a target
5278 		 * for the symlinks.
5279 		 */
5280 		name = create_unique_id(s);
5281 	}
5282 
5283 	s->kobj.kset = slab_kset;
5284 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5285 	if (err) {
5286 		kobject_put(&s->kobj);
5287 		return err;
5288 	}
5289 
5290 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5291 	if (err) {
5292 		kobject_del(&s->kobj);
5293 		kobject_put(&s->kobj);
5294 		return err;
5295 	}
5296 	kobject_uevent(&s->kobj, KOBJ_ADD);
5297 	if (!unmergeable) {
5298 		/* Setup first alias */
5299 		sysfs_slab_alias(s, s->name);
5300 		kfree(name);
5301 	}
5302 	return 0;
5303 }
5304 
5305 static void sysfs_slab_remove(struct kmem_cache *s)
5306 {
5307 	if (slab_state < SYSFS)
5308 		/*
5309 		 * Sysfs has not been setup yet so no need to remove the
5310 		 * cache from sysfs.
5311 		 */
5312 		return;
5313 
5314 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5315 	kobject_del(&s->kobj);
5316 	kobject_put(&s->kobj);
5317 }
5318 
5319 /*
5320  * Need to buffer aliases during bootup until sysfs becomes
5321  * available lest we lose that information.
5322  */
5323 struct saved_alias {
5324 	struct kmem_cache *s;
5325 	const char *name;
5326 	struct saved_alias *next;
5327 };
5328 
5329 static struct saved_alias *alias_list;
5330 
5331 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5332 {
5333 	struct saved_alias *al;
5334 
5335 	if (slab_state == SYSFS) {
5336 		/*
5337 		 * If we have a leftover link then remove it.
5338 		 */
5339 		sysfs_remove_link(&slab_kset->kobj, name);
5340 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5341 	}
5342 
5343 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5344 	if (!al)
5345 		return -ENOMEM;
5346 
5347 	al->s = s;
5348 	al->name = name;
5349 	al->next = alias_list;
5350 	alias_list = al;
5351 	return 0;
5352 }
5353 
5354 static int __init slab_sysfs_init(void)
5355 {
5356 	struct kmem_cache *s;
5357 	int err;
5358 
5359 	down_write(&slub_lock);
5360 
5361 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5362 	if (!slab_kset) {
5363 		up_write(&slub_lock);
5364 		printk(KERN_ERR "Cannot register slab subsystem.\n");
5365 		return -ENOSYS;
5366 	}
5367 
5368 	slab_state = SYSFS;
5369 
5370 	list_for_each_entry(s, &slab_caches, list) {
5371 		err = sysfs_slab_add(s);
5372 		if (err)
5373 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5374 						" to sysfs\n", s->name);
5375 	}
5376 
5377 	while (alias_list) {
5378 		struct saved_alias *al = alias_list;
5379 
5380 		alias_list = alias_list->next;
5381 		err = sysfs_slab_alias(al->s, al->name);
5382 		if (err)
5383 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
5384 					" %s to sysfs\n", s->name);
5385 		kfree(al);
5386 	}
5387 
5388 	up_write(&slub_lock);
5389 	resiliency_test();
5390 	return 0;
5391 }
5392 
5393 __initcall(slab_sysfs_init);
5394 #endif /* CONFIG_SYSFS */
5395 
5396 /*
5397  * The /proc/slabinfo ABI
5398  */
5399 #ifdef CONFIG_SLABINFO
5400 static void print_slabinfo_header(struct seq_file *m)
5401 {
5402 	seq_puts(m, "slabinfo - version: 2.1\n");
5403 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
5404 		 "<objperslab> <pagesperslab>");
5405 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5406 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5407 	seq_putc(m, '\n');
5408 }
5409 
5410 static void *s_start(struct seq_file *m, loff_t *pos)
5411 {
5412 	loff_t n = *pos;
5413 
5414 	down_read(&slub_lock);
5415 	if (!n)
5416 		print_slabinfo_header(m);
5417 
5418 	return seq_list_start(&slab_caches, *pos);
5419 }
5420 
5421 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5422 {
5423 	return seq_list_next(p, &slab_caches, pos);
5424 }
5425 
5426 static void s_stop(struct seq_file *m, void *p)
5427 {
5428 	up_read(&slub_lock);
5429 }
5430 
5431 static int s_show(struct seq_file *m, void *p)
5432 {
5433 	unsigned long nr_partials = 0;
5434 	unsigned long nr_slabs = 0;
5435 	unsigned long nr_inuse = 0;
5436 	unsigned long nr_objs = 0;
5437 	unsigned long nr_free = 0;
5438 	struct kmem_cache *s;
5439 	int node;
5440 
5441 	s = list_entry(p, struct kmem_cache, list);
5442 
5443 	for_each_online_node(node) {
5444 		struct kmem_cache_node *n = get_node(s, node);
5445 
5446 		if (!n)
5447 			continue;
5448 
5449 		nr_partials += n->nr_partial;
5450 		nr_slabs += atomic_long_read(&n->nr_slabs);
5451 		nr_objs += atomic_long_read(&n->total_objects);
5452 		nr_free += count_partial(n, count_free);
5453 	}
5454 
5455 	nr_inuse = nr_objs - nr_free;
5456 
5457 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
5458 		   nr_objs, s->size, oo_objects(s->oo),
5459 		   (1 << oo_order(s->oo)));
5460 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5461 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5462 		   0UL);
5463 	seq_putc(m, '\n');
5464 	return 0;
5465 }
5466 
5467 static const struct seq_operations slabinfo_op = {
5468 	.start = s_start,
5469 	.next = s_next,
5470 	.stop = s_stop,
5471 	.show = s_show,
5472 };
5473 
5474 static int slabinfo_open(struct inode *inode, struct file *file)
5475 {
5476 	return seq_open(file, &slabinfo_op);
5477 }
5478 
5479 static const struct file_operations proc_slabinfo_operations = {
5480 	.open		= slabinfo_open,
5481 	.read		= seq_read,
5482 	.llseek		= seq_lseek,
5483 	.release	= seq_release,
5484 };
5485 
5486 static int __init slab_proc_init(void)
5487 {
5488 	proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
5489 	return 0;
5490 }
5491 module_init(slab_proc_init);
5492 #endif /* CONFIG_SLABINFO */
5493