xref: /openbmc/linux/mm/slub.c (revision b34e08d5)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks or atomic operatios
6  * and only uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  * (C) 2011 Linux Foundation, Christoph Lameter
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include "slab.h"
20 #include <linux/proc_fs.h>
21 #include <linux/notifier.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmemcheck.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuset.h>
26 #include <linux/mempolicy.h>
27 #include <linux/ctype.h>
28 #include <linux/debugobjects.h>
29 #include <linux/kallsyms.h>
30 #include <linux/memory.h>
31 #include <linux/math64.h>
32 #include <linux/fault-inject.h>
33 #include <linux/stacktrace.h>
34 #include <linux/prefetch.h>
35 #include <linux/memcontrol.h>
36 
37 #include <trace/events/kmem.h>
38 
39 #include "internal.h"
40 
41 /*
42  * Lock order:
43  *   1. slab_mutex (Global Mutex)
44  *   2. node->list_lock
45  *   3. slab_lock(page) (Only on some arches and for debugging)
46  *
47  *   slab_mutex
48  *
49  *   The role of the slab_mutex is to protect the list of all the slabs
50  *   and to synchronize major metadata changes to slab cache structures.
51  *
52  *   The slab_lock is only used for debugging and on arches that do not
53  *   have the ability to do a cmpxchg_double. It only protects the second
54  *   double word in the page struct. Meaning
55  *	A. page->freelist	-> List of object free in a page
56  *	B. page->counters	-> Counters of objects
57  *	C. page->frozen		-> frozen state
58  *
59  *   If a slab is frozen then it is exempt from list management. It is not
60  *   on any list. The processor that froze the slab is the one who can
61  *   perform list operations on the page. Other processors may put objects
62  *   onto the freelist but the processor that froze the slab is the only
63  *   one that can retrieve the objects from the page's freelist.
64  *
65  *   The list_lock protects the partial and full list on each node and
66  *   the partial slab counter. If taken then no new slabs may be added or
67  *   removed from the lists nor make the number of partial slabs be modified.
68  *   (Note that the total number of slabs is an atomic value that may be
69  *   modified without taking the list lock).
70  *
71  *   The list_lock is a centralized lock and thus we avoid taking it as
72  *   much as possible. As long as SLUB does not have to handle partial
73  *   slabs, operations can continue without any centralized lock. F.e.
74  *   allocating a long series of objects that fill up slabs does not require
75  *   the list lock.
76  *   Interrupts are disabled during allocation and deallocation in order to
77  *   make the slab allocator safe to use in the context of an irq. In addition
78  *   interrupts are disabled to ensure that the processor does not change
79  *   while handling per_cpu slabs, due to kernel preemption.
80  *
81  * SLUB assigns one slab for allocation to each processor.
82  * Allocations only occur from these slabs called cpu slabs.
83  *
84  * Slabs with free elements are kept on a partial list and during regular
85  * operations no list for full slabs is used. If an object in a full slab is
86  * freed then the slab will show up again on the partial lists.
87  * We track full slabs for debugging purposes though because otherwise we
88  * cannot scan all objects.
89  *
90  * Slabs are freed when they become empty. Teardown and setup is
91  * minimal so we rely on the page allocators per cpu caches for
92  * fast frees and allocs.
93  *
94  * Overloading of page flags that are otherwise used for LRU management.
95  *
96  * PageActive 		The slab is frozen and exempt from list processing.
97  * 			This means that the slab is dedicated to a purpose
98  * 			such as satisfying allocations for a specific
99  * 			processor. Objects may be freed in the slab while
100  * 			it is frozen but slab_free will then skip the usual
101  * 			list operations. It is up to the processor holding
102  * 			the slab to integrate the slab into the slab lists
103  * 			when the slab is no longer needed.
104  *
105  * 			One use of this flag is to mark slabs that are
106  * 			used for allocations. Then such a slab becomes a cpu
107  * 			slab. The cpu slab may be equipped with an additional
108  * 			freelist that allows lockless access to
109  * 			free objects in addition to the regular freelist
110  * 			that requires the slab lock.
111  *
112  * PageError		Slab requires special handling due to debug
113  * 			options set. This moves	slab handling out of
114  * 			the fast path and disables lockless freelists.
115  */
116 
117 static inline int kmem_cache_debug(struct kmem_cache *s)
118 {
119 #ifdef CONFIG_SLUB_DEBUG
120 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
121 #else
122 	return 0;
123 #endif
124 }
125 
126 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
127 {
128 #ifdef CONFIG_SLUB_CPU_PARTIAL
129 	return !kmem_cache_debug(s);
130 #else
131 	return false;
132 #endif
133 }
134 
135 /*
136  * Issues still to be resolved:
137  *
138  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
139  *
140  * - Variable sizing of the per node arrays
141  */
142 
143 /* Enable to test recovery from slab corruption on boot */
144 #undef SLUB_RESILIENCY_TEST
145 
146 /* Enable to log cmpxchg failures */
147 #undef SLUB_DEBUG_CMPXCHG
148 
149 /*
150  * Mininum number of partial slabs. These will be left on the partial
151  * lists even if they are empty. kmem_cache_shrink may reclaim them.
152  */
153 #define MIN_PARTIAL 5
154 
155 /*
156  * Maximum number of desirable partial slabs.
157  * The existence of more partial slabs makes kmem_cache_shrink
158  * sort the partial list by the number of objects in use.
159  */
160 #define MAX_PARTIAL 10
161 
162 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
163 				SLAB_POISON | SLAB_STORE_USER)
164 
165 /*
166  * Debugging flags that require metadata to be stored in the slab.  These get
167  * disabled when slub_debug=O is used and a cache's min order increases with
168  * metadata.
169  */
170 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
171 
172 /*
173  * Set of flags that will prevent slab merging
174  */
175 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
176 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
177 		SLAB_FAILSLAB)
178 
179 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
180 		SLAB_CACHE_DMA | SLAB_NOTRACK)
181 
182 #define OO_SHIFT	16
183 #define OO_MASK		((1 << OO_SHIFT) - 1)
184 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
185 
186 /* Internal SLUB flags */
187 #define __OBJECT_POISON		0x80000000UL /* Poison object */
188 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
189 
190 #ifdef CONFIG_SMP
191 static struct notifier_block slab_notifier;
192 #endif
193 
194 /*
195  * Tracking user of a slab.
196  */
197 #define TRACK_ADDRS_COUNT 16
198 struct track {
199 	unsigned long addr;	/* Called from address */
200 #ifdef CONFIG_STACKTRACE
201 	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
202 #endif
203 	int cpu;		/* Was running on cpu */
204 	int pid;		/* Pid context */
205 	unsigned long when;	/* When did the operation occur */
206 };
207 
208 enum track_item { TRACK_ALLOC, TRACK_FREE };
209 
210 #ifdef CONFIG_SYSFS
211 static int sysfs_slab_add(struct kmem_cache *);
212 static int sysfs_slab_alias(struct kmem_cache *, const char *);
213 static void sysfs_slab_remove(struct kmem_cache *);
214 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
215 #else
216 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
217 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
218 							{ return 0; }
219 static inline void sysfs_slab_remove(struct kmem_cache *s) { }
220 
221 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
222 #endif
223 
224 static inline void stat(const struct kmem_cache *s, enum stat_item si)
225 {
226 #ifdef CONFIG_SLUB_STATS
227 	/*
228 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
229 	 * avoid this_cpu_add()'s irq-disable overhead.
230 	 */
231 	raw_cpu_inc(s->cpu_slab->stat[si]);
232 #endif
233 }
234 
235 /********************************************************************
236  * 			Core slab cache functions
237  *******************************************************************/
238 
239 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
240 {
241 	return s->node[node];
242 }
243 
244 /* Verify that a pointer has an address that is valid within a slab page */
245 static inline int check_valid_pointer(struct kmem_cache *s,
246 				struct page *page, const void *object)
247 {
248 	void *base;
249 
250 	if (!object)
251 		return 1;
252 
253 	base = page_address(page);
254 	if (object < base || object >= base + page->objects * s->size ||
255 		(object - base) % s->size) {
256 		return 0;
257 	}
258 
259 	return 1;
260 }
261 
262 static inline void *get_freepointer(struct kmem_cache *s, void *object)
263 {
264 	return *(void **)(object + s->offset);
265 }
266 
267 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
268 {
269 	prefetch(object + s->offset);
270 }
271 
272 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
273 {
274 	void *p;
275 
276 #ifdef CONFIG_DEBUG_PAGEALLOC
277 	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
278 #else
279 	p = get_freepointer(s, object);
280 #endif
281 	return p;
282 }
283 
284 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
285 {
286 	*(void **)(object + s->offset) = fp;
287 }
288 
289 /* Loop over all objects in a slab */
290 #define for_each_object(__p, __s, __addr, __objects) \
291 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
292 			__p += (__s)->size)
293 
294 /* Determine object index from a given position */
295 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
296 {
297 	return (p - addr) / s->size;
298 }
299 
300 static inline size_t slab_ksize(const struct kmem_cache *s)
301 {
302 #ifdef CONFIG_SLUB_DEBUG
303 	/*
304 	 * Debugging requires use of the padding between object
305 	 * and whatever may come after it.
306 	 */
307 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
308 		return s->object_size;
309 
310 #endif
311 	/*
312 	 * If we have the need to store the freelist pointer
313 	 * back there or track user information then we can
314 	 * only use the space before that information.
315 	 */
316 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
317 		return s->inuse;
318 	/*
319 	 * Else we can use all the padding etc for the allocation
320 	 */
321 	return s->size;
322 }
323 
324 static inline int order_objects(int order, unsigned long size, int reserved)
325 {
326 	return ((PAGE_SIZE << order) - reserved) / size;
327 }
328 
329 static inline struct kmem_cache_order_objects oo_make(int order,
330 		unsigned long size, int reserved)
331 {
332 	struct kmem_cache_order_objects x = {
333 		(order << OO_SHIFT) + order_objects(order, size, reserved)
334 	};
335 
336 	return x;
337 }
338 
339 static inline int oo_order(struct kmem_cache_order_objects x)
340 {
341 	return x.x >> OO_SHIFT;
342 }
343 
344 static inline int oo_objects(struct kmem_cache_order_objects x)
345 {
346 	return x.x & OO_MASK;
347 }
348 
349 /*
350  * Per slab locking using the pagelock
351  */
352 static __always_inline void slab_lock(struct page *page)
353 {
354 	bit_spin_lock(PG_locked, &page->flags);
355 }
356 
357 static __always_inline void slab_unlock(struct page *page)
358 {
359 	__bit_spin_unlock(PG_locked, &page->flags);
360 }
361 
362 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
363 {
364 	struct page tmp;
365 	tmp.counters = counters_new;
366 	/*
367 	 * page->counters can cover frozen/inuse/objects as well
368 	 * as page->_count.  If we assign to ->counters directly
369 	 * we run the risk of losing updates to page->_count, so
370 	 * be careful and only assign to the fields we need.
371 	 */
372 	page->frozen  = tmp.frozen;
373 	page->inuse   = tmp.inuse;
374 	page->objects = tmp.objects;
375 }
376 
377 /* Interrupts must be disabled (for the fallback code to work right) */
378 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
379 		void *freelist_old, unsigned long counters_old,
380 		void *freelist_new, unsigned long counters_new,
381 		const char *n)
382 {
383 	VM_BUG_ON(!irqs_disabled());
384 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
385     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
386 	if (s->flags & __CMPXCHG_DOUBLE) {
387 		if (cmpxchg_double(&page->freelist, &page->counters,
388 			freelist_old, counters_old,
389 			freelist_new, counters_new))
390 		return 1;
391 	} else
392 #endif
393 	{
394 		slab_lock(page);
395 		if (page->freelist == freelist_old &&
396 					page->counters == counters_old) {
397 			page->freelist = freelist_new;
398 			set_page_slub_counters(page, counters_new);
399 			slab_unlock(page);
400 			return 1;
401 		}
402 		slab_unlock(page);
403 	}
404 
405 	cpu_relax();
406 	stat(s, CMPXCHG_DOUBLE_FAIL);
407 
408 #ifdef SLUB_DEBUG_CMPXCHG
409 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
410 #endif
411 
412 	return 0;
413 }
414 
415 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
416 		void *freelist_old, unsigned long counters_old,
417 		void *freelist_new, unsigned long counters_new,
418 		const char *n)
419 {
420 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
421     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
422 	if (s->flags & __CMPXCHG_DOUBLE) {
423 		if (cmpxchg_double(&page->freelist, &page->counters,
424 			freelist_old, counters_old,
425 			freelist_new, counters_new))
426 		return 1;
427 	} else
428 #endif
429 	{
430 		unsigned long flags;
431 
432 		local_irq_save(flags);
433 		slab_lock(page);
434 		if (page->freelist == freelist_old &&
435 					page->counters == counters_old) {
436 			page->freelist = freelist_new;
437 			set_page_slub_counters(page, counters_new);
438 			slab_unlock(page);
439 			local_irq_restore(flags);
440 			return 1;
441 		}
442 		slab_unlock(page);
443 		local_irq_restore(flags);
444 	}
445 
446 	cpu_relax();
447 	stat(s, CMPXCHG_DOUBLE_FAIL);
448 
449 #ifdef SLUB_DEBUG_CMPXCHG
450 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
451 #endif
452 
453 	return 0;
454 }
455 
456 #ifdef CONFIG_SLUB_DEBUG
457 /*
458  * Determine a map of object in use on a page.
459  *
460  * Node listlock must be held to guarantee that the page does
461  * not vanish from under us.
462  */
463 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
464 {
465 	void *p;
466 	void *addr = page_address(page);
467 
468 	for (p = page->freelist; p; p = get_freepointer(s, p))
469 		set_bit(slab_index(p, s, addr), map);
470 }
471 
472 /*
473  * Debug settings:
474  */
475 #ifdef CONFIG_SLUB_DEBUG_ON
476 static int slub_debug = DEBUG_DEFAULT_FLAGS;
477 #else
478 static int slub_debug;
479 #endif
480 
481 static char *slub_debug_slabs;
482 static int disable_higher_order_debug;
483 
484 /*
485  * Object debugging
486  */
487 static void print_section(char *text, u8 *addr, unsigned int length)
488 {
489 	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
490 			length, 1);
491 }
492 
493 static struct track *get_track(struct kmem_cache *s, void *object,
494 	enum track_item alloc)
495 {
496 	struct track *p;
497 
498 	if (s->offset)
499 		p = object + s->offset + sizeof(void *);
500 	else
501 		p = object + s->inuse;
502 
503 	return p + alloc;
504 }
505 
506 static void set_track(struct kmem_cache *s, void *object,
507 			enum track_item alloc, unsigned long addr)
508 {
509 	struct track *p = get_track(s, object, alloc);
510 
511 	if (addr) {
512 #ifdef CONFIG_STACKTRACE
513 		struct stack_trace trace;
514 		int i;
515 
516 		trace.nr_entries = 0;
517 		trace.max_entries = TRACK_ADDRS_COUNT;
518 		trace.entries = p->addrs;
519 		trace.skip = 3;
520 		save_stack_trace(&trace);
521 
522 		/* See rant in lockdep.c */
523 		if (trace.nr_entries != 0 &&
524 		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
525 			trace.nr_entries--;
526 
527 		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
528 			p->addrs[i] = 0;
529 #endif
530 		p->addr = addr;
531 		p->cpu = smp_processor_id();
532 		p->pid = current->pid;
533 		p->when = jiffies;
534 	} else
535 		memset(p, 0, sizeof(struct track));
536 }
537 
538 static void init_tracking(struct kmem_cache *s, void *object)
539 {
540 	if (!(s->flags & SLAB_STORE_USER))
541 		return;
542 
543 	set_track(s, object, TRACK_FREE, 0UL);
544 	set_track(s, object, TRACK_ALLOC, 0UL);
545 }
546 
547 static void print_track(const char *s, struct track *t)
548 {
549 	if (!t->addr)
550 		return;
551 
552 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
553 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
554 #ifdef CONFIG_STACKTRACE
555 	{
556 		int i;
557 		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
558 			if (t->addrs[i])
559 				printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
560 			else
561 				break;
562 	}
563 #endif
564 }
565 
566 static void print_tracking(struct kmem_cache *s, void *object)
567 {
568 	if (!(s->flags & SLAB_STORE_USER))
569 		return;
570 
571 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
572 	print_track("Freed", get_track(s, object, TRACK_FREE));
573 }
574 
575 static void print_page_info(struct page *page)
576 {
577 	printk(KERN_ERR
578 	       "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
579 	       page, page->objects, page->inuse, page->freelist, page->flags);
580 
581 }
582 
583 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
584 {
585 	va_list args;
586 	char buf[100];
587 
588 	va_start(args, fmt);
589 	vsnprintf(buf, sizeof(buf), fmt, args);
590 	va_end(args);
591 	printk(KERN_ERR "========================================"
592 			"=====================================\n");
593 	printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
594 	printk(KERN_ERR "----------------------------------------"
595 			"-------------------------------------\n\n");
596 
597 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
598 }
599 
600 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
601 {
602 	va_list args;
603 	char buf[100];
604 
605 	va_start(args, fmt);
606 	vsnprintf(buf, sizeof(buf), fmt, args);
607 	va_end(args);
608 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
609 }
610 
611 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
612 {
613 	unsigned int off;	/* Offset of last byte */
614 	u8 *addr = page_address(page);
615 
616 	print_tracking(s, p);
617 
618 	print_page_info(page);
619 
620 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
621 			p, p - addr, get_freepointer(s, p));
622 
623 	if (p > addr + 16)
624 		print_section("Bytes b4 ", p - 16, 16);
625 
626 	print_section("Object ", p, min_t(unsigned long, s->object_size,
627 				PAGE_SIZE));
628 	if (s->flags & SLAB_RED_ZONE)
629 		print_section("Redzone ", p + s->object_size,
630 			s->inuse - s->object_size);
631 
632 	if (s->offset)
633 		off = s->offset + sizeof(void *);
634 	else
635 		off = s->inuse;
636 
637 	if (s->flags & SLAB_STORE_USER)
638 		off += 2 * sizeof(struct track);
639 
640 	if (off != s->size)
641 		/* Beginning of the filler is the free pointer */
642 		print_section("Padding ", p + off, s->size - off);
643 
644 	dump_stack();
645 }
646 
647 static void object_err(struct kmem_cache *s, struct page *page,
648 			u8 *object, char *reason)
649 {
650 	slab_bug(s, "%s", reason);
651 	print_trailer(s, page, object);
652 }
653 
654 static void slab_err(struct kmem_cache *s, struct page *page,
655 			const char *fmt, ...)
656 {
657 	va_list args;
658 	char buf[100];
659 
660 	va_start(args, fmt);
661 	vsnprintf(buf, sizeof(buf), fmt, args);
662 	va_end(args);
663 	slab_bug(s, "%s", buf);
664 	print_page_info(page);
665 	dump_stack();
666 }
667 
668 static void init_object(struct kmem_cache *s, void *object, u8 val)
669 {
670 	u8 *p = object;
671 
672 	if (s->flags & __OBJECT_POISON) {
673 		memset(p, POISON_FREE, s->object_size - 1);
674 		p[s->object_size - 1] = POISON_END;
675 	}
676 
677 	if (s->flags & SLAB_RED_ZONE)
678 		memset(p + s->object_size, val, s->inuse - s->object_size);
679 }
680 
681 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
682 						void *from, void *to)
683 {
684 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
685 	memset(from, data, to - from);
686 }
687 
688 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
689 			u8 *object, char *what,
690 			u8 *start, unsigned int value, unsigned int bytes)
691 {
692 	u8 *fault;
693 	u8 *end;
694 
695 	fault = memchr_inv(start, value, bytes);
696 	if (!fault)
697 		return 1;
698 
699 	end = start + bytes;
700 	while (end > fault && end[-1] == value)
701 		end--;
702 
703 	slab_bug(s, "%s overwritten", what);
704 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
705 					fault, end - 1, fault[0], value);
706 	print_trailer(s, page, object);
707 
708 	restore_bytes(s, what, value, fault, end);
709 	return 0;
710 }
711 
712 /*
713  * Object layout:
714  *
715  * object address
716  * 	Bytes of the object to be managed.
717  * 	If the freepointer may overlay the object then the free
718  * 	pointer is the first word of the object.
719  *
720  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
721  * 	0xa5 (POISON_END)
722  *
723  * object + s->object_size
724  * 	Padding to reach word boundary. This is also used for Redzoning.
725  * 	Padding is extended by another word if Redzoning is enabled and
726  * 	object_size == inuse.
727  *
728  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
729  * 	0xcc (RED_ACTIVE) for objects in use.
730  *
731  * object + s->inuse
732  * 	Meta data starts here.
733  *
734  * 	A. Free pointer (if we cannot overwrite object on free)
735  * 	B. Tracking data for SLAB_STORE_USER
736  * 	C. Padding to reach required alignment boundary or at mininum
737  * 		one word if debugging is on to be able to detect writes
738  * 		before the word boundary.
739  *
740  *	Padding is done using 0x5a (POISON_INUSE)
741  *
742  * object + s->size
743  * 	Nothing is used beyond s->size.
744  *
745  * If slabcaches are merged then the object_size and inuse boundaries are mostly
746  * ignored. And therefore no slab options that rely on these boundaries
747  * may be used with merged slabcaches.
748  */
749 
750 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
751 {
752 	unsigned long off = s->inuse;	/* The end of info */
753 
754 	if (s->offset)
755 		/* Freepointer is placed after the object. */
756 		off += sizeof(void *);
757 
758 	if (s->flags & SLAB_STORE_USER)
759 		/* We also have user information there */
760 		off += 2 * sizeof(struct track);
761 
762 	if (s->size == off)
763 		return 1;
764 
765 	return check_bytes_and_report(s, page, p, "Object padding",
766 				p + off, POISON_INUSE, s->size - off);
767 }
768 
769 /* Check the pad bytes at the end of a slab page */
770 static int slab_pad_check(struct kmem_cache *s, struct page *page)
771 {
772 	u8 *start;
773 	u8 *fault;
774 	u8 *end;
775 	int length;
776 	int remainder;
777 
778 	if (!(s->flags & SLAB_POISON))
779 		return 1;
780 
781 	start = page_address(page);
782 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
783 	end = start + length;
784 	remainder = length % s->size;
785 	if (!remainder)
786 		return 1;
787 
788 	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
789 	if (!fault)
790 		return 1;
791 	while (end > fault && end[-1] == POISON_INUSE)
792 		end--;
793 
794 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
795 	print_section("Padding ", end - remainder, remainder);
796 
797 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
798 	return 0;
799 }
800 
801 static int check_object(struct kmem_cache *s, struct page *page,
802 					void *object, u8 val)
803 {
804 	u8 *p = object;
805 	u8 *endobject = object + s->object_size;
806 
807 	if (s->flags & SLAB_RED_ZONE) {
808 		if (!check_bytes_and_report(s, page, object, "Redzone",
809 			endobject, val, s->inuse - s->object_size))
810 			return 0;
811 	} else {
812 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
813 			check_bytes_and_report(s, page, p, "Alignment padding",
814 				endobject, POISON_INUSE,
815 				s->inuse - s->object_size);
816 		}
817 	}
818 
819 	if (s->flags & SLAB_POISON) {
820 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
821 			(!check_bytes_and_report(s, page, p, "Poison", p,
822 					POISON_FREE, s->object_size - 1) ||
823 			 !check_bytes_and_report(s, page, p, "Poison",
824 				p + s->object_size - 1, POISON_END, 1)))
825 			return 0;
826 		/*
827 		 * check_pad_bytes cleans up on its own.
828 		 */
829 		check_pad_bytes(s, page, p);
830 	}
831 
832 	if (!s->offset && val == SLUB_RED_ACTIVE)
833 		/*
834 		 * Object and freepointer overlap. Cannot check
835 		 * freepointer while object is allocated.
836 		 */
837 		return 1;
838 
839 	/* Check free pointer validity */
840 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
841 		object_err(s, page, p, "Freepointer corrupt");
842 		/*
843 		 * No choice but to zap it and thus lose the remainder
844 		 * of the free objects in this slab. May cause
845 		 * another error because the object count is now wrong.
846 		 */
847 		set_freepointer(s, p, NULL);
848 		return 0;
849 	}
850 	return 1;
851 }
852 
853 static int check_slab(struct kmem_cache *s, struct page *page)
854 {
855 	int maxobj;
856 
857 	VM_BUG_ON(!irqs_disabled());
858 
859 	if (!PageSlab(page)) {
860 		slab_err(s, page, "Not a valid slab page");
861 		return 0;
862 	}
863 
864 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
865 	if (page->objects > maxobj) {
866 		slab_err(s, page, "objects %u > max %u",
867 			s->name, page->objects, maxobj);
868 		return 0;
869 	}
870 	if (page->inuse > page->objects) {
871 		slab_err(s, page, "inuse %u > max %u",
872 			s->name, page->inuse, page->objects);
873 		return 0;
874 	}
875 	/* Slab_pad_check fixes things up after itself */
876 	slab_pad_check(s, page);
877 	return 1;
878 }
879 
880 /*
881  * Determine if a certain object on a page is on the freelist. Must hold the
882  * slab lock to guarantee that the chains are in a consistent state.
883  */
884 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
885 {
886 	int nr = 0;
887 	void *fp;
888 	void *object = NULL;
889 	unsigned long max_objects;
890 
891 	fp = page->freelist;
892 	while (fp && nr <= page->objects) {
893 		if (fp == search)
894 			return 1;
895 		if (!check_valid_pointer(s, page, fp)) {
896 			if (object) {
897 				object_err(s, page, object,
898 					"Freechain corrupt");
899 				set_freepointer(s, object, NULL);
900 			} else {
901 				slab_err(s, page, "Freepointer corrupt");
902 				page->freelist = NULL;
903 				page->inuse = page->objects;
904 				slab_fix(s, "Freelist cleared");
905 				return 0;
906 			}
907 			break;
908 		}
909 		object = fp;
910 		fp = get_freepointer(s, object);
911 		nr++;
912 	}
913 
914 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
915 	if (max_objects > MAX_OBJS_PER_PAGE)
916 		max_objects = MAX_OBJS_PER_PAGE;
917 
918 	if (page->objects != max_objects) {
919 		slab_err(s, page, "Wrong number of objects. Found %d but "
920 			"should be %d", page->objects, max_objects);
921 		page->objects = max_objects;
922 		slab_fix(s, "Number of objects adjusted.");
923 	}
924 	if (page->inuse != page->objects - nr) {
925 		slab_err(s, page, "Wrong object count. Counter is %d but "
926 			"counted were %d", page->inuse, page->objects - nr);
927 		page->inuse = page->objects - nr;
928 		slab_fix(s, "Object count adjusted.");
929 	}
930 	return search == NULL;
931 }
932 
933 static void trace(struct kmem_cache *s, struct page *page, void *object,
934 								int alloc)
935 {
936 	if (s->flags & SLAB_TRACE) {
937 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
938 			s->name,
939 			alloc ? "alloc" : "free",
940 			object, page->inuse,
941 			page->freelist);
942 
943 		if (!alloc)
944 			print_section("Object ", (void *)object,
945 					s->object_size);
946 
947 		dump_stack();
948 	}
949 }
950 
951 /*
952  * Hooks for other subsystems that check memory allocations. In a typical
953  * production configuration these hooks all should produce no code at all.
954  */
955 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
956 {
957 	kmemleak_alloc(ptr, size, 1, flags);
958 }
959 
960 static inline void kfree_hook(const void *x)
961 {
962 	kmemleak_free(x);
963 }
964 
965 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
966 {
967 	flags &= gfp_allowed_mask;
968 	lockdep_trace_alloc(flags);
969 	might_sleep_if(flags & __GFP_WAIT);
970 
971 	return should_failslab(s->object_size, flags, s->flags);
972 }
973 
974 static inline void slab_post_alloc_hook(struct kmem_cache *s,
975 					gfp_t flags, void *object)
976 {
977 	flags &= gfp_allowed_mask;
978 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
979 	kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
980 }
981 
982 static inline void slab_free_hook(struct kmem_cache *s, void *x)
983 {
984 	kmemleak_free_recursive(x, s->flags);
985 
986 	/*
987 	 * Trouble is that we may no longer disable interrupts in the fast path
988 	 * So in order to make the debug calls that expect irqs to be
989 	 * disabled we need to disable interrupts temporarily.
990 	 */
991 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
992 	{
993 		unsigned long flags;
994 
995 		local_irq_save(flags);
996 		kmemcheck_slab_free(s, x, s->object_size);
997 		debug_check_no_locks_freed(x, s->object_size);
998 		local_irq_restore(flags);
999 	}
1000 #endif
1001 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1002 		debug_check_no_obj_freed(x, s->object_size);
1003 }
1004 
1005 /*
1006  * Tracking of fully allocated slabs for debugging purposes.
1007  */
1008 static void add_full(struct kmem_cache *s,
1009 	struct kmem_cache_node *n, struct page *page)
1010 {
1011 	if (!(s->flags & SLAB_STORE_USER))
1012 		return;
1013 
1014 	lockdep_assert_held(&n->list_lock);
1015 	list_add(&page->lru, &n->full);
1016 }
1017 
1018 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1019 {
1020 	if (!(s->flags & SLAB_STORE_USER))
1021 		return;
1022 
1023 	lockdep_assert_held(&n->list_lock);
1024 	list_del(&page->lru);
1025 }
1026 
1027 /* Tracking of the number of slabs for debugging purposes */
1028 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1029 {
1030 	struct kmem_cache_node *n = get_node(s, node);
1031 
1032 	return atomic_long_read(&n->nr_slabs);
1033 }
1034 
1035 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1036 {
1037 	return atomic_long_read(&n->nr_slabs);
1038 }
1039 
1040 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1041 {
1042 	struct kmem_cache_node *n = get_node(s, node);
1043 
1044 	/*
1045 	 * May be called early in order to allocate a slab for the
1046 	 * kmem_cache_node structure. Solve the chicken-egg
1047 	 * dilemma by deferring the increment of the count during
1048 	 * bootstrap (see early_kmem_cache_node_alloc).
1049 	 */
1050 	if (likely(n)) {
1051 		atomic_long_inc(&n->nr_slabs);
1052 		atomic_long_add(objects, &n->total_objects);
1053 	}
1054 }
1055 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1056 {
1057 	struct kmem_cache_node *n = get_node(s, node);
1058 
1059 	atomic_long_dec(&n->nr_slabs);
1060 	atomic_long_sub(objects, &n->total_objects);
1061 }
1062 
1063 /* Object debug checks for alloc/free paths */
1064 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1065 								void *object)
1066 {
1067 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1068 		return;
1069 
1070 	init_object(s, object, SLUB_RED_INACTIVE);
1071 	init_tracking(s, object);
1072 }
1073 
1074 static noinline int alloc_debug_processing(struct kmem_cache *s,
1075 					struct page *page,
1076 					void *object, unsigned long addr)
1077 {
1078 	if (!check_slab(s, page))
1079 		goto bad;
1080 
1081 	if (!check_valid_pointer(s, page, object)) {
1082 		object_err(s, page, object, "Freelist Pointer check fails");
1083 		goto bad;
1084 	}
1085 
1086 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1087 		goto bad;
1088 
1089 	/* Success perform special debug activities for allocs */
1090 	if (s->flags & SLAB_STORE_USER)
1091 		set_track(s, object, TRACK_ALLOC, addr);
1092 	trace(s, page, object, 1);
1093 	init_object(s, object, SLUB_RED_ACTIVE);
1094 	return 1;
1095 
1096 bad:
1097 	if (PageSlab(page)) {
1098 		/*
1099 		 * If this is a slab page then lets do the best we can
1100 		 * to avoid issues in the future. Marking all objects
1101 		 * as used avoids touching the remaining objects.
1102 		 */
1103 		slab_fix(s, "Marking all objects used");
1104 		page->inuse = page->objects;
1105 		page->freelist = NULL;
1106 	}
1107 	return 0;
1108 }
1109 
1110 static noinline struct kmem_cache_node *free_debug_processing(
1111 	struct kmem_cache *s, struct page *page, void *object,
1112 	unsigned long addr, unsigned long *flags)
1113 {
1114 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1115 
1116 	spin_lock_irqsave(&n->list_lock, *flags);
1117 	slab_lock(page);
1118 
1119 	if (!check_slab(s, page))
1120 		goto fail;
1121 
1122 	if (!check_valid_pointer(s, page, object)) {
1123 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1124 		goto fail;
1125 	}
1126 
1127 	if (on_freelist(s, page, object)) {
1128 		object_err(s, page, object, "Object already free");
1129 		goto fail;
1130 	}
1131 
1132 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1133 		goto out;
1134 
1135 	if (unlikely(s != page->slab_cache)) {
1136 		if (!PageSlab(page)) {
1137 			slab_err(s, page, "Attempt to free object(0x%p) "
1138 				"outside of slab", object);
1139 		} else if (!page->slab_cache) {
1140 			printk(KERN_ERR
1141 				"SLUB <none>: no slab for object 0x%p.\n",
1142 						object);
1143 			dump_stack();
1144 		} else
1145 			object_err(s, page, object,
1146 					"page slab pointer corrupt.");
1147 		goto fail;
1148 	}
1149 
1150 	if (s->flags & SLAB_STORE_USER)
1151 		set_track(s, object, TRACK_FREE, addr);
1152 	trace(s, page, object, 0);
1153 	init_object(s, object, SLUB_RED_INACTIVE);
1154 out:
1155 	slab_unlock(page);
1156 	/*
1157 	 * Keep node_lock to preserve integrity
1158 	 * until the object is actually freed
1159 	 */
1160 	return n;
1161 
1162 fail:
1163 	slab_unlock(page);
1164 	spin_unlock_irqrestore(&n->list_lock, *flags);
1165 	slab_fix(s, "Object at 0x%p not freed", object);
1166 	return NULL;
1167 }
1168 
1169 static int __init setup_slub_debug(char *str)
1170 {
1171 	slub_debug = DEBUG_DEFAULT_FLAGS;
1172 	if (*str++ != '=' || !*str)
1173 		/*
1174 		 * No options specified. Switch on full debugging.
1175 		 */
1176 		goto out;
1177 
1178 	if (*str == ',')
1179 		/*
1180 		 * No options but restriction on slabs. This means full
1181 		 * debugging for slabs matching a pattern.
1182 		 */
1183 		goto check_slabs;
1184 
1185 	if (tolower(*str) == 'o') {
1186 		/*
1187 		 * Avoid enabling debugging on caches if its minimum order
1188 		 * would increase as a result.
1189 		 */
1190 		disable_higher_order_debug = 1;
1191 		goto out;
1192 	}
1193 
1194 	slub_debug = 0;
1195 	if (*str == '-')
1196 		/*
1197 		 * Switch off all debugging measures.
1198 		 */
1199 		goto out;
1200 
1201 	/*
1202 	 * Determine which debug features should be switched on
1203 	 */
1204 	for (; *str && *str != ','; str++) {
1205 		switch (tolower(*str)) {
1206 		case 'f':
1207 			slub_debug |= SLAB_DEBUG_FREE;
1208 			break;
1209 		case 'z':
1210 			slub_debug |= SLAB_RED_ZONE;
1211 			break;
1212 		case 'p':
1213 			slub_debug |= SLAB_POISON;
1214 			break;
1215 		case 'u':
1216 			slub_debug |= SLAB_STORE_USER;
1217 			break;
1218 		case 't':
1219 			slub_debug |= SLAB_TRACE;
1220 			break;
1221 		case 'a':
1222 			slub_debug |= SLAB_FAILSLAB;
1223 			break;
1224 		default:
1225 			printk(KERN_ERR "slub_debug option '%c' "
1226 				"unknown. skipped\n", *str);
1227 		}
1228 	}
1229 
1230 check_slabs:
1231 	if (*str == ',')
1232 		slub_debug_slabs = str + 1;
1233 out:
1234 	return 1;
1235 }
1236 
1237 __setup("slub_debug", setup_slub_debug);
1238 
1239 static unsigned long kmem_cache_flags(unsigned long object_size,
1240 	unsigned long flags, const char *name,
1241 	void (*ctor)(void *))
1242 {
1243 	/*
1244 	 * Enable debugging if selected on the kernel commandline.
1245 	 */
1246 	if (slub_debug && (!slub_debug_slabs || (name &&
1247 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1248 		flags |= slub_debug;
1249 
1250 	return flags;
1251 }
1252 #else
1253 static inline void setup_object_debug(struct kmem_cache *s,
1254 			struct page *page, void *object) {}
1255 
1256 static inline int alloc_debug_processing(struct kmem_cache *s,
1257 	struct page *page, void *object, unsigned long addr) { return 0; }
1258 
1259 static inline struct kmem_cache_node *free_debug_processing(
1260 	struct kmem_cache *s, struct page *page, void *object,
1261 	unsigned long addr, unsigned long *flags) { return NULL; }
1262 
1263 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1264 			{ return 1; }
1265 static inline int check_object(struct kmem_cache *s, struct page *page,
1266 			void *object, u8 val) { return 1; }
1267 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1268 					struct page *page) {}
1269 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1270 					struct page *page) {}
1271 static inline unsigned long kmem_cache_flags(unsigned long object_size,
1272 	unsigned long flags, const char *name,
1273 	void (*ctor)(void *))
1274 {
1275 	return flags;
1276 }
1277 #define slub_debug 0
1278 
1279 #define disable_higher_order_debug 0
1280 
1281 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1282 							{ return 0; }
1283 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1284 							{ return 0; }
1285 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1286 							int objects) {}
1287 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1288 							int objects) {}
1289 
1290 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1291 {
1292 	kmemleak_alloc(ptr, size, 1, flags);
1293 }
1294 
1295 static inline void kfree_hook(const void *x)
1296 {
1297 	kmemleak_free(x);
1298 }
1299 
1300 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1301 							{ return 0; }
1302 
1303 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1304 		void *object)
1305 {
1306 	kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1307 		flags & gfp_allowed_mask);
1308 }
1309 
1310 static inline void slab_free_hook(struct kmem_cache *s, void *x)
1311 {
1312 	kmemleak_free_recursive(x, s->flags);
1313 }
1314 
1315 #endif /* CONFIG_SLUB_DEBUG */
1316 
1317 /*
1318  * Slab allocation and freeing
1319  */
1320 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1321 					struct kmem_cache_order_objects oo)
1322 {
1323 	int order = oo_order(oo);
1324 
1325 	flags |= __GFP_NOTRACK;
1326 
1327 	if (node == NUMA_NO_NODE)
1328 		return alloc_pages(flags, order);
1329 	else
1330 		return alloc_pages_exact_node(node, flags, order);
1331 }
1332 
1333 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1334 {
1335 	struct page *page;
1336 	struct kmem_cache_order_objects oo = s->oo;
1337 	gfp_t alloc_gfp;
1338 
1339 	flags &= gfp_allowed_mask;
1340 
1341 	if (flags & __GFP_WAIT)
1342 		local_irq_enable();
1343 
1344 	flags |= s->allocflags;
1345 
1346 	/*
1347 	 * Let the initial higher-order allocation fail under memory pressure
1348 	 * so we fall-back to the minimum order allocation.
1349 	 */
1350 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1351 
1352 	page = alloc_slab_page(alloc_gfp, node, oo);
1353 	if (unlikely(!page)) {
1354 		oo = s->min;
1355 		alloc_gfp = flags;
1356 		/*
1357 		 * Allocation may have failed due to fragmentation.
1358 		 * Try a lower order alloc if possible
1359 		 */
1360 		page = alloc_slab_page(alloc_gfp, node, oo);
1361 
1362 		if (page)
1363 			stat(s, ORDER_FALLBACK);
1364 	}
1365 
1366 	if (kmemcheck_enabled && page
1367 		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1368 		int pages = 1 << oo_order(oo);
1369 
1370 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1371 
1372 		/*
1373 		 * Objects from caches that have a constructor don't get
1374 		 * cleared when they're allocated, so we need to do it here.
1375 		 */
1376 		if (s->ctor)
1377 			kmemcheck_mark_uninitialized_pages(page, pages);
1378 		else
1379 			kmemcheck_mark_unallocated_pages(page, pages);
1380 	}
1381 
1382 	if (flags & __GFP_WAIT)
1383 		local_irq_disable();
1384 	if (!page)
1385 		return NULL;
1386 
1387 	page->objects = oo_objects(oo);
1388 	mod_zone_page_state(page_zone(page),
1389 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1390 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1391 		1 << oo_order(oo));
1392 
1393 	return page;
1394 }
1395 
1396 static void setup_object(struct kmem_cache *s, struct page *page,
1397 				void *object)
1398 {
1399 	setup_object_debug(s, page, object);
1400 	if (unlikely(s->ctor))
1401 		s->ctor(object);
1402 }
1403 
1404 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1405 {
1406 	struct page *page;
1407 	void *start;
1408 	void *last;
1409 	void *p;
1410 	int order;
1411 
1412 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1413 
1414 	page = allocate_slab(s,
1415 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1416 	if (!page)
1417 		goto out;
1418 
1419 	order = compound_order(page);
1420 	inc_slabs_node(s, page_to_nid(page), page->objects);
1421 	memcg_bind_pages(s, order);
1422 	page->slab_cache = s;
1423 	__SetPageSlab(page);
1424 	if (page->pfmemalloc)
1425 		SetPageSlabPfmemalloc(page);
1426 
1427 	start = page_address(page);
1428 
1429 	if (unlikely(s->flags & SLAB_POISON))
1430 		memset(start, POISON_INUSE, PAGE_SIZE << order);
1431 
1432 	last = start;
1433 	for_each_object(p, s, start, page->objects) {
1434 		setup_object(s, page, last);
1435 		set_freepointer(s, last, p);
1436 		last = p;
1437 	}
1438 	setup_object(s, page, last);
1439 	set_freepointer(s, last, NULL);
1440 
1441 	page->freelist = start;
1442 	page->inuse = page->objects;
1443 	page->frozen = 1;
1444 out:
1445 	return page;
1446 }
1447 
1448 static void __free_slab(struct kmem_cache *s, struct page *page)
1449 {
1450 	int order = compound_order(page);
1451 	int pages = 1 << order;
1452 
1453 	if (kmem_cache_debug(s)) {
1454 		void *p;
1455 
1456 		slab_pad_check(s, page);
1457 		for_each_object(p, s, page_address(page),
1458 						page->objects)
1459 			check_object(s, page, p, SLUB_RED_INACTIVE);
1460 	}
1461 
1462 	kmemcheck_free_shadow(page, compound_order(page));
1463 
1464 	mod_zone_page_state(page_zone(page),
1465 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1466 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1467 		-pages);
1468 
1469 	__ClearPageSlabPfmemalloc(page);
1470 	__ClearPageSlab(page);
1471 
1472 	memcg_release_pages(s, order);
1473 	page_mapcount_reset(page);
1474 	if (current->reclaim_state)
1475 		current->reclaim_state->reclaimed_slab += pages;
1476 	__free_memcg_kmem_pages(page, order);
1477 }
1478 
1479 #define need_reserve_slab_rcu						\
1480 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1481 
1482 static void rcu_free_slab(struct rcu_head *h)
1483 {
1484 	struct page *page;
1485 
1486 	if (need_reserve_slab_rcu)
1487 		page = virt_to_head_page(h);
1488 	else
1489 		page = container_of((struct list_head *)h, struct page, lru);
1490 
1491 	__free_slab(page->slab_cache, page);
1492 }
1493 
1494 static void free_slab(struct kmem_cache *s, struct page *page)
1495 {
1496 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1497 		struct rcu_head *head;
1498 
1499 		if (need_reserve_slab_rcu) {
1500 			int order = compound_order(page);
1501 			int offset = (PAGE_SIZE << order) - s->reserved;
1502 
1503 			VM_BUG_ON(s->reserved != sizeof(*head));
1504 			head = page_address(page) + offset;
1505 		} else {
1506 			/*
1507 			 * RCU free overloads the RCU head over the LRU
1508 			 */
1509 			head = (void *)&page->lru;
1510 		}
1511 
1512 		call_rcu(head, rcu_free_slab);
1513 	} else
1514 		__free_slab(s, page);
1515 }
1516 
1517 static void discard_slab(struct kmem_cache *s, struct page *page)
1518 {
1519 	dec_slabs_node(s, page_to_nid(page), page->objects);
1520 	free_slab(s, page);
1521 }
1522 
1523 /*
1524  * Management of partially allocated slabs.
1525  */
1526 static inline void
1527 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1528 {
1529 	n->nr_partial++;
1530 	if (tail == DEACTIVATE_TO_TAIL)
1531 		list_add_tail(&page->lru, &n->partial);
1532 	else
1533 		list_add(&page->lru, &n->partial);
1534 }
1535 
1536 static inline void add_partial(struct kmem_cache_node *n,
1537 				struct page *page, int tail)
1538 {
1539 	lockdep_assert_held(&n->list_lock);
1540 	__add_partial(n, page, tail);
1541 }
1542 
1543 static inline void
1544 __remove_partial(struct kmem_cache_node *n, struct page *page)
1545 {
1546 	list_del(&page->lru);
1547 	n->nr_partial--;
1548 }
1549 
1550 static inline void remove_partial(struct kmem_cache_node *n,
1551 					struct page *page)
1552 {
1553 	lockdep_assert_held(&n->list_lock);
1554 	__remove_partial(n, page);
1555 }
1556 
1557 /*
1558  * Remove slab from the partial list, freeze it and
1559  * return the pointer to the freelist.
1560  *
1561  * Returns a list of objects or NULL if it fails.
1562  */
1563 static inline void *acquire_slab(struct kmem_cache *s,
1564 		struct kmem_cache_node *n, struct page *page,
1565 		int mode, int *objects)
1566 {
1567 	void *freelist;
1568 	unsigned long counters;
1569 	struct page new;
1570 
1571 	lockdep_assert_held(&n->list_lock);
1572 
1573 	/*
1574 	 * Zap the freelist and set the frozen bit.
1575 	 * The old freelist is the list of objects for the
1576 	 * per cpu allocation list.
1577 	 */
1578 	freelist = page->freelist;
1579 	counters = page->counters;
1580 	new.counters = counters;
1581 	*objects = new.objects - new.inuse;
1582 	if (mode) {
1583 		new.inuse = page->objects;
1584 		new.freelist = NULL;
1585 	} else {
1586 		new.freelist = freelist;
1587 	}
1588 
1589 	VM_BUG_ON(new.frozen);
1590 	new.frozen = 1;
1591 
1592 	if (!__cmpxchg_double_slab(s, page,
1593 			freelist, counters,
1594 			new.freelist, new.counters,
1595 			"acquire_slab"))
1596 		return NULL;
1597 
1598 	remove_partial(n, page);
1599 	WARN_ON(!freelist);
1600 	return freelist;
1601 }
1602 
1603 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1604 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1605 
1606 /*
1607  * Try to allocate a partial slab from a specific node.
1608  */
1609 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1610 				struct kmem_cache_cpu *c, gfp_t flags)
1611 {
1612 	struct page *page, *page2;
1613 	void *object = NULL;
1614 	int available = 0;
1615 	int objects;
1616 
1617 	/*
1618 	 * Racy check. If we mistakenly see no partial slabs then we
1619 	 * just allocate an empty slab. If we mistakenly try to get a
1620 	 * partial slab and there is none available then get_partials()
1621 	 * will return NULL.
1622 	 */
1623 	if (!n || !n->nr_partial)
1624 		return NULL;
1625 
1626 	spin_lock(&n->list_lock);
1627 	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1628 		void *t;
1629 
1630 		if (!pfmemalloc_match(page, flags))
1631 			continue;
1632 
1633 		t = acquire_slab(s, n, page, object == NULL, &objects);
1634 		if (!t)
1635 			break;
1636 
1637 		available += objects;
1638 		if (!object) {
1639 			c->page = page;
1640 			stat(s, ALLOC_FROM_PARTIAL);
1641 			object = t;
1642 		} else {
1643 			put_cpu_partial(s, page, 0);
1644 			stat(s, CPU_PARTIAL_NODE);
1645 		}
1646 		if (!kmem_cache_has_cpu_partial(s)
1647 			|| available > s->cpu_partial / 2)
1648 			break;
1649 
1650 	}
1651 	spin_unlock(&n->list_lock);
1652 	return object;
1653 }
1654 
1655 /*
1656  * Get a page from somewhere. Search in increasing NUMA distances.
1657  */
1658 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1659 		struct kmem_cache_cpu *c)
1660 {
1661 #ifdef CONFIG_NUMA
1662 	struct zonelist *zonelist;
1663 	struct zoneref *z;
1664 	struct zone *zone;
1665 	enum zone_type high_zoneidx = gfp_zone(flags);
1666 	void *object;
1667 	unsigned int cpuset_mems_cookie;
1668 
1669 	/*
1670 	 * The defrag ratio allows a configuration of the tradeoffs between
1671 	 * inter node defragmentation and node local allocations. A lower
1672 	 * defrag_ratio increases the tendency to do local allocations
1673 	 * instead of attempting to obtain partial slabs from other nodes.
1674 	 *
1675 	 * If the defrag_ratio is set to 0 then kmalloc() always
1676 	 * returns node local objects. If the ratio is higher then kmalloc()
1677 	 * may return off node objects because partial slabs are obtained
1678 	 * from other nodes and filled up.
1679 	 *
1680 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1681 	 * defrag_ratio = 1000) then every (well almost) allocation will
1682 	 * first attempt to defrag slab caches on other nodes. This means
1683 	 * scanning over all nodes to look for partial slabs which may be
1684 	 * expensive if we do it every time we are trying to find a slab
1685 	 * with available objects.
1686 	 */
1687 	if (!s->remote_node_defrag_ratio ||
1688 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1689 		return NULL;
1690 
1691 	do {
1692 		cpuset_mems_cookie = read_mems_allowed_begin();
1693 		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1694 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1695 			struct kmem_cache_node *n;
1696 
1697 			n = get_node(s, zone_to_nid(zone));
1698 
1699 			if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1700 					n->nr_partial > s->min_partial) {
1701 				object = get_partial_node(s, n, c, flags);
1702 				if (object) {
1703 					/*
1704 					 * Don't check read_mems_allowed_retry()
1705 					 * here - if mems_allowed was updated in
1706 					 * parallel, that was a harmless race
1707 					 * between allocation and the cpuset
1708 					 * update
1709 					 */
1710 					return object;
1711 				}
1712 			}
1713 		}
1714 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1715 #endif
1716 	return NULL;
1717 }
1718 
1719 /*
1720  * Get a partial page, lock it and return it.
1721  */
1722 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1723 		struct kmem_cache_cpu *c)
1724 {
1725 	void *object;
1726 	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1727 
1728 	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1729 	if (object || node != NUMA_NO_NODE)
1730 		return object;
1731 
1732 	return get_any_partial(s, flags, c);
1733 }
1734 
1735 #ifdef CONFIG_PREEMPT
1736 /*
1737  * Calculate the next globally unique transaction for disambiguiation
1738  * during cmpxchg. The transactions start with the cpu number and are then
1739  * incremented by CONFIG_NR_CPUS.
1740  */
1741 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1742 #else
1743 /*
1744  * No preemption supported therefore also no need to check for
1745  * different cpus.
1746  */
1747 #define TID_STEP 1
1748 #endif
1749 
1750 static inline unsigned long next_tid(unsigned long tid)
1751 {
1752 	return tid + TID_STEP;
1753 }
1754 
1755 static inline unsigned int tid_to_cpu(unsigned long tid)
1756 {
1757 	return tid % TID_STEP;
1758 }
1759 
1760 static inline unsigned long tid_to_event(unsigned long tid)
1761 {
1762 	return tid / TID_STEP;
1763 }
1764 
1765 static inline unsigned int init_tid(int cpu)
1766 {
1767 	return cpu;
1768 }
1769 
1770 static inline void note_cmpxchg_failure(const char *n,
1771 		const struct kmem_cache *s, unsigned long tid)
1772 {
1773 #ifdef SLUB_DEBUG_CMPXCHG
1774 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1775 
1776 	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1777 
1778 #ifdef CONFIG_PREEMPT
1779 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1780 		printk("due to cpu change %d -> %d\n",
1781 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
1782 	else
1783 #endif
1784 	if (tid_to_event(tid) != tid_to_event(actual_tid))
1785 		printk("due to cpu running other code. Event %ld->%ld\n",
1786 			tid_to_event(tid), tid_to_event(actual_tid));
1787 	else
1788 		printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1789 			actual_tid, tid, next_tid(tid));
1790 #endif
1791 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1792 }
1793 
1794 static void init_kmem_cache_cpus(struct kmem_cache *s)
1795 {
1796 	int cpu;
1797 
1798 	for_each_possible_cpu(cpu)
1799 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1800 }
1801 
1802 /*
1803  * Remove the cpu slab
1804  */
1805 static void deactivate_slab(struct kmem_cache *s, struct page *page,
1806 				void *freelist)
1807 {
1808 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1809 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1810 	int lock = 0;
1811 	enum slab_modes l = M_NONE, m = M_NONE;
1812 	void *nextfree;
1813 	int tail = DEACTIVATE_TO_HEAD;
1814 	struct page new;
1815 	struct page old;
1816 
1817 	if (page->freelist) {
1818 		stat(s, DEACTIVATE_REMOTE_FREES);
1819 		tail = DEACTIVATE_TO_TAIL;
1820 	}
1821 
1822 	/*
1823 	 * Stage one: Free all available per cpu objects back
1824 	 * to the page freelist while it is still frozen. Leave the
1825 	 * last one.
1826 	 *
1827 	 * There is no need to take the list->lock because the page
1828 	 * is still frozen.
1829 	 */
1830 	while (freelist && (nextfree = get_freepointer(s, freelist))) {
1831 		void *prior;
1832 		unsigned long counters;
1833 
1834 		do {
1835 			prior = page->freelist;
1836 			counters = page->counters;
1837 			set_freepointer(s, freelist, prior);
1838 			new.counters = counters;
1839 			new.inuse--;
1840 			VM_BUG_ON(!new.frozen);
1841 
1842 		} while (!__cmpxchg_double_slab(s, page,
1843 			prior, counters,
1844 			freelist, new.counters,
1845 			"drain percpu freelist"));
1846 
1847 		freelist = nextfree;
1848 	}
1849 
1850 	/*
1851 	 * Stage two: Ensure that the page is unfrozen while the
1852 	 * list presence reflects the actual number of objects
1853 	 * during unfreeze.
1854 	 *
1855 	 * We setup the list membership and then perform a cmpxchg
1856 	 * with the count. If there is a mismatch then the page
1857 	 * is not unfrozen but the page is on the wrong list.
1858 	 *
1859 	 * Then we restart the process which may have to remove
1860 	 * the page from the list that we just put it on again
1861 	 * because the number of objects in the slab may have
1862 	 * changed.
1863 	 */
1864 redo:
1865 
1866 	old.freelist = page->freelist;
1867 	old.counters = page->counters;
1868 	VM_BUG_ON(!old.frozen);
1869 
1870 	/* Determine target state of the slab */
1871 	new.counters = old.counters;
1872 	if (freelist) {
1873 		new.inuse--;
1874 		set_freepointer(s, freelist, old.freelist);
1875 		new.freelist = freelist;
1876 	} else
1877 		new.freelist = old.freelist;
1878 
1879 	new.frozen = 0;
1880 
1881 	if (!new.inuse && n->nr_partial > s->min_partial)
1882 		m = M_FREE;
1883 	else if (new.freelist) {
1884 		m = M_PARTIAL;
1885 		if (!lock) {
1886 			lock = 1;
1887 			/*
1888 			 * Taking the spinlock removes the possiblity
1889 			 * that acquire_slab() will see a slab page that
1890 			 * is frozen
1891 			 */
1892 			spin_lock(&n->list_lock);
1893 		}
1894 	} else {
1895 		m = M_FULL;
1896 		if (kmem_cache_debug(s) && !lock) {
1897 			lock = 1;
1898 			/*
1899 			 * This also ensures that the scanning of full
1900 			 * slabs from diagnostic functions will not see
1901 			 * any frozen slabs.
1902 			 */
1903 			spin_lock(&n->list_lock);
1904 		}
1905 	}
1906 
1907 	if (l != m) {
1908 
1909 		if (l == M_PARTIAL)
1910 
1911 			remove_partial(n, page);
1912 
1913 		else if (l == M_FULL)
1914 
1915 			remove_full(s, n, page);
1916 
1917 		if (m == M_PARTIAL) {
1918 
1919 			add_partial(n, page, tail);
1920 			stat(s, tail);
1921 
1922 		} else if (m == M_FULL) {
1923 
1924 			stat(s, DEACTIVATE_FULL);
1925 			add_full(s, n, page);
1926 
1927 		}
1928 	}
1929 
1930 	l = m;
1931 	if (!__cmpxchg_double_slab(s, page,
1932 				old.freelist, old.counters,
1933 				new.freelist, new.counters,
1934 				"unfreezing slab"))
1935 		goto redo;
1936 
1937 	if (lock)
1938 		spin_unlock(&n->list_lock);
1939 
1940 	if (m == M_FREE) {
1941 		stat(s, DEACTIVATE_EMPTY);
1942 		discard_slab(s, page);
1943 		stat(s, FREE_SLAB);
1944 	}
1945 }
1946 
1947 /*
1948  * Unfreeze all the cpu partial slabs.
1949  *
1950  * This function must be called with interrupts disabled
1951  * for the cpu using c (or some other guarantee must be there
1952  * to guarantee no concurrent accesses).
1953  */
1954 static void unfreeze_partials(struct kmem_cache *s,
1955 		struct kmem_cache_cpu *c)
1956 {
1957 #ifdef CONFIG_SLUB_CPU_PARTIAL
1958 	struct kmem_cache_node *n = NULL, *n2 = NULL;
1959 	struct page *page, *discard_page = NULL;
1960 
1961 	while ((page = c->partial)) {
1962 		struct page new;
1963 		struct page old;
1964 
1965 		c->partial = page->next;
1966 
1967 		n2 = get_node(s, page_to_nid(page));
1968 		if (n != n2) {
1969 			if (n)
1970 				spin_unlock(&n->list_lock);
1971 
1972 			n = n2;
1973 			spin_lock(&n->list_lock);
1974 		}
1975 
1976 		do {
1977 
1978 			old.freelist = page->freelist;
1979 			old.counters = page->counters;
1980 			VM_BUG_ON(!old.frozen);
1981 
1982 			new.counters = old.counters;
1983 			new.freelist = old.freelist;
1984 
1985 			new.frozen = 0;
1986 
1987 		} while (!__cmpxchg_double_slab(s, page,
1988 				old.freelist, old.counters,
1989 				new.freelist, new.counters,
1990 				"unfreezing slab"));
1991 
1992 		if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
1993 			page->next = discard_page;
1994 			discard_page = page;
1995 		} else {
1996 			add_partial(n, page, DEACTIVATE_TO_TAIL);
1997 			stat(s, FREE_ADD_PARTIAL);
1998 		}
1999 	}
2000 
2001 	if (n)
2002 		spin_unlock(&n->list_lock);
2003 
2004 	while (discard_page) {
2005 		page = discard_page;
2006 		discard_page = discard_page->next;
2007 
2008 		stat(s, DEACTIVATE_EMPTY);
2009 		discard_slab(s, page);
2010 		stat(s, FREE_SLAB);
2011 	}
2012 #endif
2013 }
2014 
2015 /*
2016  * Put a page that was just frozen (in __slab_free) into a partial page
2017  * slot if available. This is done without interrupts disabled and without
2018  * preemption disabled. The cmpxchg is racy and may put the partial page
2019  * onto a random cpus partial slot.
2020  *
2021  * If we did not find a slot then simply move all the partials to the
2022  * per node partial list.
2023  */
2024 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2025 {
2026 #ifdef CONFIG_SLUB_CPU_PARTIAL
2027 	struct page *oldpage;
2028 	int pages;
2029 	int pobjects;
2030 
2031 	do {
2032 		pages = 0;
2033 		pobjects = 0;
2034 		oldpage = this_cpu_read(s->cpu_slab->partial);
2035 
2036 		if (oldpage) {
2037 			pobjects = oldpage->pobjects;
2038 			pages = oldpage->pages;
2039 			if (drain && pobjects > s->cpu_partial) {
2040 				unsigned long flags;
2041 				/*
2042 				 * partial array is full. Move the existing
2043 				 * set to the per node partial list.
2044 				 */
2045 				local_irq_save(flags);
2046 				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2047 				local_irq_restore(flags);
2048 				oldpage = NULL;
2049 				pobjects = 0;
2050 				pages = 0;
2051 				stat(s, CPU_PARTIAL_DRAIN);
2052 			}
2053 		}
2054 
2055 		pages++;
2056 		pobjects += page->objects - page->inuse;
2057 
2058 		page->pages = pages;
2059 		page->pobjects = pobjects;
2060 		page->next = oldpage;
2061 
2062 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2063 								!= oldpage);
2064 #endif
2065 }
2066 
2067 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2068 {
2069 	stat(s, CPUSLAB_FLUSH);
2070 	deactivate_slab(s, c->page, c->freelist);
2071 
2072 	c->tid = next_tid(c->tid);
2073 	c->page = NULL;
2074 	c->freelist = NULL;
2075 }
2076 
2077 /*
2078  * Flush cpu slab.
2079  *
2080  * Called from IPI handler with interrupts disabled.
2081  */
2082 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2083 {
2084 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2085 
2086 	if (likely(c)) {
2087 		if (c->page)
2088 			flush_slab(s, c);
2089 
2090 		unfreeze_partials(s, c);
2091 	}
2092 }
2093 
2094 static void flush_cpu_slab(void *d)
2095 {
2096 	struct kmem_cache *s = d;
2097 
2098 	__flush_cpu_slab(s, smp_processor_id());
2099 }
2100 
2101 static bool has_cpu_slab(int cpu, void *info)
2102 {
2103 	struct kmem_cache *s = info;
2104 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2105 
2106 	return c->page || c->partial;
2107 }
2108 
2109 static void flush_all(struct kmem_cache *s)
2110 {
2111 	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2112 }
2113 
2114 /*
2115  * Check if the objects in a per cpu structure fit numa
2116  * locality expectations.
2117  */
2118 static inline int node_match(struct page *page, int node)
2119 {
2120 #ifdef CONFIG_NUMA
2121 	if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2122 		return 0;
2123 #endif
2124 	return 1;
2125 }
2126 
2127 static int count_free(struct page *page)
2128 {
2129 	return page->objects - page->inuse;
2130 }
2131 
2132 static unsigned long count_partial(struct kmem_cache_node *n,
2133 					int (*get_count)(struct page *))
2134 {
2135 	unsigned long flags;
2136 	unsigned long x = 0;
2137 	struct page *page;
2138 
2139 	spin_lock_irqsave(&n->list_lock, flags);
2140 	list_for_each_entry(page, &n->partial, lru)
2141 		x += get_count(page);
2142 	spin_unlock_irqrestore(&n->list_lock, flags);
2143 	return x;
2144 }
2145 
2146 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2147 {
2148 #ifdef CONFIG_SLUB_DEBUG
2149 	return atomic_long_read(&n->total_objects);
2150 #else
2151 	return 0;
2152 #endif
2153 }
2154 
2155 static noinline void
2156 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2157 {
2158 	int node;
2159 
2160 	printk(KERN_WARNING
2161 		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2162 		nid, gfpflags);
2163 	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
2164 		"default order: %d, min order: %d\n", s->name, s->object_size,
2165 		s->size, oo_order(s->oo), oo_order(s->min));
2166 
2167 	if (oo_order(s->min) > get_order(s->object_size))
2168 		printk(KERN_WARNING "  %s debugging increased min order, use "
2169 		       "slub_debug=O to disable.\n", s->name);
2170 
2171 	for_each_online_node(node) {
2172 		struct kmem_cache_node *n = get_node(s, node);
2173 		unsigned long nr_slabs;
2174 		unsigned long nr_objs;
2175 		unsigned long nr_free;
2176 
2177 		if (!n)
2178 			continue;
2179 
2180 		nr_free  = count_partial(n, count_free);
2181 		nr_slabs = node_nr_slabs(n);
2182 		nr_objs  = node_nr_objs(n);
2183 
2184 		printk(KERN_WARNING
2185 			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2186 			node, nr_slabs, nr_objs, nr_free);
2187 	}
2188 }
2189 
2190 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2191 			int node, struct kmem_cache_cpu **pc)
2192 {
2193 	void *freelist;
2194 	struct kmem_cache_cpu *c = *pc;
2195 	struct page *page;
2196 
2197 	freelist = get_partial(s, flags, node, c);
2198 
2199 	if (freelist)
2200 		return freelist;
2201 
2202 	page = new_slab(s, flags, node);
2203 	if (page) {
2204 		c = __this_cpu_ptr(s->cpu_slab);
2205 		if (c->page)
2206 			flush_slab(s, c);
2207 
2208 		/*
2209 		 * No other reference to the page yet so we can
2210 		 * muck around with it freely without cmpxchg
2211 		 */
2212 		freelist = page->freelist;
2213 		page->freelist = NULL;
2214 
2215 		stat(s, ALLOC_SLAB);
2216 		c->page = page;
2217 		*pc = c;
2218 	} else
2219 		freelist = NULL;
2220 
2221 	return freelist;
2222 }
2223 
2224 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2225 {
2226 	if (unlikely(PageSlabPfmemalloc(page)))
2227 		return gfp_pfmemalloc_allowed(gfpflags);
2228 
2229 	return true;
2230 }
2231 
2232 /*
2233  * Check the page->freelist of a page and either transfer the freelist to the
2234  * per cpu freelist or deactivate the page.
2235  *
2236  * The page is still frozen if the return value is not NULL.
2237  *
2238  * If this function returns NULL then the page has been unfrozen.
2239  *
2240  * This function must be called with interrupt disabled.
2241  */
2242 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2243 {
2244 	struct page new;
2245 	unsigned long counters;
2246 	void *freelist;
2247 
2248 	do {
2249 		freelist = page->freelist;
2250 		counters = page->counters;
2251 
2252 		new.counters = counters;
2253 		VM_BUG_ON(!new.frozen);
2254 
2255 		new.inuse = page->objects;
2256 		new.frozen = freelist != NULL;
2257 
2258 	} while (!__cmpxchg_double_slab(s, page,
2259 		freelist, counters,
2260 		NULL, new.counters,
2261 		"get_freelist"));
2262 
2263 	return freelist;
2264 }
2265 
2266 /*
2267  * Slow path. The lockless freelist is empty or we need to perform
2268  * debugging duties.
2269  *
2270  * Processing is still very fast if new objects have been freed to the
2271  * regular freelist. In that case we simply take over the regular freelist
2272  * as the lockless freelist and zap the regular freelist.
2273  *
2274  * If that is not working then we fall back to the partial lists. We take the
2275  * first element of the freelist as the object to allocate now and move the
2276  * rest of the freelist to the lockless freelist.
2277  *
2278  * And if we were unable to get a new slab from the partial slab lists then
2279  * we need to allocate a new slab. This is the slowest path since it involves
2280  * a call to the page allocator and the setup of a new slab.
2281  */
2282 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2283 			  unsigned long addr, struct kmem_cache_cpu *c)
2284 {
2285 	void *freelist;
2286 	struct page *page;
2287 	unsigned long flags;
2288 
2289 	local_irq_save(flags);
2290 #ifdef CONFIG_PREEMPT
2291 	/*
2292 	 * We may have been preempted and rescheduled on a different
2293 	 * cpu before disabling interrupts. Need to reload cpu area
2294 	 * pointer.
2295 	 */
2296 	c = this_cpu_ptr(s->cpu_slab);
2297 #endif
2298 
2299 	page = c->page;
2300 	if (!page)
2301 		goto new_slab;
2302 redo:
2303 
2304 	if (unlikely(!node_match(page, node))) {
2305 		stat(s, ALLOC_NODE_MISMATCH);
2306 		deactivate_slab(s, page, c->freelist);
2307 		c->page = NULL;
2308 		c->freelist = NULL;
2309 		goto new_slab;
2310 	}
2311 
2312 	/*
2313 	 * By rights, we should be searching for a slab page that was
2314 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2315 	 * information when the page leaves the per-cpu allocator
2316 	 */
2317 	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2318 		deactivate_slab(s, page, c->freelist);
2319 		c->page = NULL;
2320 		c->freelist = NULL;
2321 		goto new_slab;
2322 	}
2323 
2324 	/* must check again c->freelist in case of cpu migration or IRQ */
2325 	freelist = c->freelist;
2326 	if (freelist)
2327 		goto load_freelist;
2328 
2329 	stat(s, ALLOC_SLOWPATH);
2330 
2331 	freelist = get_freelist(s, page);
2332 
2333 	if (!freelist) {
2334 		c->page = NULL;
2335 		stat(s, DEACTIVATE_BYPASS);
2336 		goto new_slab;
2337 	}
2338 
2339 	stat(s, ALLOC_REFILL);
2340 
2341 load_freelist:
2342 	/*
2343 	 * freelist is pointing to the list of objects to be used.
2344 	 * page is pointing to the page from which the objects are obtained.
2345 	 * That page must be frozen for per cpu allocations to work.
2346 	 */
2347 	VM_BUG_ON(!c->page->frozen);
2348 	c->freelist = get_freepointer(s, freelist);
2349 	c->tid = next_tid(c->tid);
2350 	local_irq_restore(flags);
2351 	return freelist;
2352 
2353 new_slab:
2354 
2355 	if (c->partial) {
2356 		page = c->page = c->partial;
2357 		c->partial = page->next;
2358 		stat(s, CPU_PARTIAL_ALLOC);
2359 		c->freelist = NULL;
2360 		goto redo;
2361 	}
2362 
2363 	freelist = new_slab_objects(s, gfpflags, node, &c);
2364 
2365 	if (unlikely(!freelist)) {
2366 		if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2367 			slab_out_of_memory(s, gfpflags, node);
2368 
2369 		local_irq_restore(flags);
2370 		return NULL;
2371 	}
2372 
2373 	page = c->page;
2374 	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2375 		goto load_freelist;
2376 
2377 	/* Only entered in the debug case */
2378 	if (kmem_cache_debug(s) &&
2379 			!alloc_debug_processing(s, page, freelist, addr))
2380 		goto new_slab;	/* Slab failed checks. Next slab needed */
2381 
2382 	deactivate_slab(s, page, get_freepointer(s, freelist));
2383 	c->page = NULL;
2384 	c->freelist = NULL;
2385 	local_irq_restore(flags);
2386 	return freelist;
2387 }
2388 
2389 /*
2390  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2391  * have the fastpath folded into their functions. So no function call
2392  * overhead for requests that can be satisfied on the fastpath.
2393  *
2394  * The fastpath works by first checking if the lockless freelist can be used.
2395  * If not then __slab_alloc is called for slow processing.
2396  *
2397  * Otherwise we can simply pick the next object from the lockless free list.
2398  */
2399 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2400 		gfp_t gfpflags, int node, unsigned long addr)
2401 {
2402 	void **object;
2403 	struct kmem_cache_cpu *c;
2404 	struct page *page;
2405 	unsigned long tid;
2406 
2407 	if (slab_pre_alloc_hook(s, gfpflags))
2408 		return NULL;
2409 
2410 	s = memcg_kmem_get_cache(s, gfpflags);
2411 redo:
2412 	/*
2413 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2414 	 * enabled. We may switch back and forth between cpus while
2415 	 * reading from one cpu area. That does not matter as long
2416 	 * as we end up on the original cpu again when doing the cmpxchg.
2417 	 *
2418 	 * Preemption is disabled for the retrieval of the tid because that
2419 	 * must occur from the current processor. We cannot allow rescheduling
2420 	 * on a different processor between the determination of the pointer
2421 	 * and the retrieval of the tid.
2422 	 */
2423 	preempt_disable();
2424 	c = __this_cpu_ptr(s->cpu_slab);
2425 
2426 	/*
2427 	 * The transaction ids are globally unique per cpu and per operation on
2428 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2429 	 * occurs on the right processor and that there was no operation on the
2430 	 * linked list in between.
2431 	 */
2432 	tid = c->tid;
2433 	preempt_enable();
2434 
2435 	object = c->freelist;
2436 	page = c->page;
2437 	if (unlikely(!object || !node_match(page, node)))
2438 		object = __slab_alloc(s, gfpflags, node, addr, c);
2439 
2440 	else {
2441 		void *next_object = get_freepointer_safe(s, object);
2442 
2443 		/*
2444 		 * The cmpxchg will only match if there was no additional
2445 		 * operation and if we are on the right processor.
2446 		 *
2447 		 * The cmpxchg does the following atomically (without lock
2448 		 * semantics!)
2449 		 * 1. Relocate first pointer to the current per cpu area.
2450 		 * 2. Verify that tid and freelist have not been changed
2451 		 * 3. If they were not changed replace tid and freelist
2452 		 *
2453 		 * Since this is without lock semantics the protection is only
2454 		 * against code executing on this cpu *not* from access by
2455 		 * other cpus.
2456 		 */
2457 		if (unlikely(!this_cpu_cmpxchg_double(
2458 				s->cpu_slab->freelist, s->cpu_slab->tid,
2459 				object, tid,
2460 				next_object, next_tid(tid)))) {
2461 
2462 			note_cmpxchg_failure("slab_alloc", s, tid);
2463 			goto redo;
2464 		}
2465 		prefetch_freepointer(s, next_object);
2466 		stat(s, ALLOC_FASTPATH);
2467 	}
2468 
2469 	if (unlikely(gfpflags & __GFP_ZERO) && object)
2470 		memset(object, 0, s->object_size);
2471 
2472 	slab_post_alloc_hook(s, gfpflags, object);
2473 
2474 	return object;
2475 }
2476 
2477 static __always_inline void *slab_alloc(struct kmem_cache *s,
2478 		gfp_t gfpflags, unsigned long addr)
2479 {
2480 	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2481 }
2482 
2483 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2484 {
2485 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2486 
2487 	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2488 				s->size, gfpflags);
2489 
2490 	return ret;
2491 }
2492 EXPORT_SYMBOL(kmem_cache_alloc);
2493 
2494 #ifdef CONFIG_TRACING
2495 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2496 {
2497 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2498 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2499 	return ret;
2500 }
2501 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2502 #endif
2503 
2504 #ifdef CONFIG_NUMA
2505 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2506 {
2507 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2508 
2509 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2510 				    s->object_size, s->size, gfpflags, node);
2511 
2512 	return ret;
2513 }
2514 EXPORT_SYMBOL(kmem_cache_alloc_node);
2515 
2516 #ifdef CONFIG_TRACING
2517 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2518 				    gfp_t gfpflags,
2519 				    int node, size_t size)
2520 {
2521 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2522 
2523 	trace_kmalloc_node(_RET_IP_, ret,
2524 			   size, s->size, gfpflags, node);
2525 	return ret;
2526 }
2527 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2528 #endif
2529 #endif
2530 
2531 /*
2532  * Slow patch handling. This may still be called frequently since objects
2533  * have a longer lifetime than the cpu slabs in most processing loads.
2534  *
2535  * So we still attempt to reduce cache line usage. Just take the slab
2536  * lock and free the item. If there is no additional partial page
2537  * handling required then we can return immediately.
2538  */
2539 static void __slab_free(struct kmem_cache *s, struct page *page,
2540 			void *x, unsigned long addr)
2541 {
2542 	void *prior;
2543 	void **object = (void *)x;
2544 	int was_frozen;
2545 	struct page new;
2546 	unsigned long counters;
2547 	struct kmem_cache_node *n = NULL;
2548 	unsigned long uninitialized_var(flags);
2549 
2550 	stat(s, FREE_SLOWPATH);
2551 
2552 	if (kmem_cache_debug(s) &&
2553 		!(n = free_debug_processing(s, page, x, addr, &flags)))
2554 		return;
2555 
2556 	do {
2557 		if (unlikely(n)) {
2558 			spin_unlock_irqrestore(&n->list_lock, flags);
2559 			n = NULL;
2560 		}
2561 		prior = page->freelist;
2562 		counters = page->counters;
2563 		set_freepointer(s, object, prior);
2564 		new.counters = counters;
2565 		was_frozen = new.frozen;
2566 		new.inuse--;
2567 		if ((!new.inuse || !prior) && !was_frozen) {
2568 
2569 			if (kmem_cache_has_cpu_partial(s) && !prior) {
2570 
2571 				/*
2572 				 * Slab was on no list before and will be
2573 				 * partially empty
2574 				 * We can defer the list move and instead
2575 				 * freeze it.
2576 				 */
2577 				new.frozen = 1;
2578 
2579 			} else { /* Needs to be taken off a list */
2580 
2581 	                        n = get_node(s, page_to_nid(page));
2582 				/*
2583 				 * Speculatively acquire the list_lock.
2584 				 * If the cmpxchg does not succeed then we may
2585 				 * drop the list_lock without any processing.
2586 				 *
2587 				 * Otherwise the list_lock will synchronize with
2588 				 * other processors updating the list of slabs.
2589 				 */
2590 				spin_lock_irqsave(&n->list_lock, flags);
2591 
2592 			}
2593 		}
2594 
2595 	} while (!cmpxchg_double_slab(s, page,
2596 		prior, counters,
2597 		object, new.counters,
2598 		"__slab_free"));
2599 
2600 	if (likely(!n)) {
2601 
2602 		/*
2603 		 * If we just froze the page then put it onto the
2604 		 * per cpu partial list.
2605 		 */
2606 		if (new.frozen && !was_frozen) {
2607 			put_cpu_partial(s, page, 1);
2608 			stat(s, CPU_PARTIAL_FREE);
2609 		}
2610 		/*
2611 		 * The list lock was not taken therefore no list
2612 		 * activity can be necessary.
2613 		 */
2614                 if (was_frozen)
2615                         stat(s, FREE_FROZEN);
2616                 return;
2617         }
2618 
2619 	if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2620 		goto slab_empty;
2621 
2622 	/*
2623 	 * Objects left in the slab. If it was not on the partial list before
2624 	 * then add it.
2625 	 */
2626 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2627 		if (kmem_cache_debug(s))
2628 			remove_full(s, n, page);
2629 		add_partial(n, page, DEACTIVATE_TO_TAIL);
2630 		stat(s, FREE_ADD_PARTIAL);
2631 	}
2632 	spin_unlock_irqrestore(&n->list_lock, flags);
2633 	return;
2634 
2635 slab_empty:
2636 	if (prior) {
2637 		/*
2638 		 * Slab on the partial list.
2639 		 */
2640 		remove_partial(n, page);
2641 		stat(s, FREE_REMOVE_PARTIAL);
2642 	} else {
2643 		/* Slab must be on the full list */
2644 		remove_full(s, n, page);
2645 	}
2646 
2647 	spin_unlock_irqrestore(&n->list_lock, flags);
2648 	stat(s, FREE_SLAB);
2649 	discard_slab(s, page);
2650 }
2651 
2652 /*
2653  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2654  * can perform fastpath freeing without additional function calls.
2655  *
2656  * The fastpath is only possible if we are freeing to the current cpu slab
2657  * of this processor. This typically the case if we have just allocated
2658  * the item before.
2659  *
2660  * If fastpath is not possible then fall back to __slab_free where we deal
2661  * with all sorts of special processing.
2662  */
2663 static __always_inline void slab_free(struct kmem_cache *s,
2664 			struct page *page, void *x, unsigned long addr)
2665 {
2666 	void **object = (void *)x;
2667 	struct kmem_cache_cpu *c;
2668 	unsigned long tid;
2669 
2670 	slab_free_hook(s, x);
2671 
2672 redo:
2673 	/*
2674 	 * Determine the currently cpus per cpu slab.
2675 	 * The cpu may change afterward. However that does not matter since
2676 	 * data is retrieved via this pointer. If we are on the same cpu
2677 	 * during the cmpxchg then the free will succedd.
2678 	 */
2679 	preempt_disable();
2680 	c = __this_cpu_ptr(s->cpu_slab);
2681 
2682 	tid = c->tid;
2683 	preempt_enable();
2684 
2685 	if (likely(page == c->page)) {
2686 		set_freepointer(s, object, c->freelist);
2687 
2688 		if (unlikely(!this_cpu_cmpxchg_double(
2689 				s->cpu_slab->freelist, s->cpu_slab->tid,
2690 				c->freelist, tid,
2691 				object, next_tid(tid)))) {
2692 
2693 			note_cmpxchg_failure("slab_free", s, tid);
2694 			goto redo;
2695 		}
2696 		stat(s, FREE_FASTPATH);
2697 	} else
2698 		__slab_free(s, page, x, addr);
2699 
2700 }
2701 
2702 void kmem_cache_free(struct kmem_cache *s, void *x)
2703 {
2704 	s = cache_from_obj(s, x);
2705 	if (!s)
2706 		return;
2707 	slab_free(s, virt_to_head_page(x), x, _RET_IP_);
2708 	trace_kmem_cache_free(_RET_IP_, x);
2709 }
2710 EXPORT_SYMBOL(kmem_cache_free);
2711 
2712 /*
2713  * Object placement in a slab is made very easy because we always start at
2714  * offset 0. If we tune the size of the object to the alignment then we can
2715  * get the required alignment by putting one properly sized object after
2716  * another.
2717  *
2718  * Notice that the allocation order determines the sizes of the per cpu
2719  * caches. Each processor has always one slab available for allocations.
2720  * Increasing the allocation order reduces the number of times that slabs
2721  * must be moved on and off the partial lists and is therefore a factor in
2722  * locking overhead.
2723  */
2724 
2725 /*
2726  * Mininum / Maximum order of slab pages. This influences locking overhead
2727  * and slab fragmentation. A higher order reduces the number of partial slabs
2728  * and increases the number of allocations possible without having to
2729  * take the list_lock.
2730  */
2731 static int slub_min_order;
2732 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2733 static int slub_min_objects;
2734 
2735 /*
2736  * Merge control. If this is set then no merging of slab caches will occur.
2737  * (Could be removed. This was introduced to pacify the merge skeptics.)
2738  */
2739 static int slub_nomerge;
2740 
2741 /*
2742  * Calculate the order of allocation given an slab object size.
2743  *
2744  * The order of allocation has significant impact on performance and other
2745  * system components. Generally order 0 allocations should be preferred since
2746  * order 0 does not cause fragmentation in the page allocator. Larger objects
2747  * be problematic to put into order 0 slabs because there may be too much
2748  * unused space left. We go to a higher order if more than 1/16th of the slab
2749  * would be wasted.
2750  *
2751  * In order to reach satisfactory performance we must ensure that a minimum
2752  * number of objects is in one slab. Otherwise we may generate too much
2753  * activity on the partial lists which requires taking the list_lock. This is
2754  * less a concern for large slabs though which are rarely used.
2755  *
2756  * slub_max_order specifies the order where we begin to stop considering the
2757  * number of objects in a slab as critical. If we reach slub_max_order then
2758  * we try to keep the page order as low as possible. So we accept more waste
2759  * of space in favor of a small page order.
2760  *
2761  * Higher order allocations also allow the placement of more objects in a
2762  * slab and thereby reduce object handling overhead. If the user has
2763  * requested a higher mininum order then we start with that one instead of
2764  * the smallest order which will fit the object.
2765  */
2766 static inline int slab_order(int size, int min_objects,
2767 				int max_order, int fract_leftover, int reserved)
2768 {
2769 	int order;
2770 	int rem;
2771 	int min_order = slub_min_order;
2772 
2773 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2774 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2775 
2776 	for (order = max(min_order,
2777 				fls(min_objects * size - 1) - PAGE_SHIFT);
2778 			order <= max_order; order++) {
2779 
2780 		unsigned long slab_size = PAGE_SIZE << order;
2781 
2782 		if (slab_size < min_objects * size + reserved)
2783 			continue;
2784 
2785 		rem = (slab_size - reserved) % size;
2786 
2787 		if (rem <= slab_size / fract_leftover)
2788 			break;
2789 
2790 	}
2791 
2792 	return order;
2793 }
2794 
2795 static inline int calculate_order(int size, int reserved)
2796 {
2797 	int order;
2798 	int min_objects;
2799 	int fraction;
2800 	int max_objects;
2801 
2802 	/*
2803 	 * Attempt to find best configuration for a slab. This
2804 	 * works by first attempting to generate a layout with
2805 	 * the best configuration and backing off gradually.
2806 	 *
2807 	 * First we reduce the acceptable waste in a slab. Then
2808 	 * we reduce the minimum objects required in a slab.
2809 	 */
2810 	min_objects = slub_min_objects;
2811 	if (!min_objects)
2812 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
2813 	max_objects = order_objects(slub_max_order, size, reserved);
2814 	min_objects = min(min_objects, max_objects);
2815 
2816 	while (min_objects > 1) {
2817 		fraction = 16;
2818 		while (fraction >= 4) {
2819 			order = slab_order(size, min_objects,
2820 					slub_max_order, fraction, reserved);
2821 			if (order <= slub_max_order)
2822 				return order;
2823 			fraction /= 2;
2824 		}
2825 		min_objects--;
2826 	}
2827 
2828 	/*
2829 	 * We were unable to place multiple objects in a slab. Now
2830 	 * lets see if we can place a single object there.
2831 	 */
2832 	order = slab_order(size, 1, slub_max_order, 1, reserved);
2833 	if (order <= slub_max_order)
2834 		return order;
2835 
2836 	/*
2837 	 * Doh this slab cannot be placed using slub_max_order.
2838 	 */
2839 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2840 	if (order < MAX_ORDER)
2841 		return order;
2842 	return -ENOSYS;
2843 }
2844 
2845 static void
2846 init_kmem_cache_node(struct kmem_cache_node *n)
2847 {
2848 	n->nr_partial = 0;
2849 	spin_lock_init(&n->list_lock);
2850 	INIT_LIST_HEAD(&n->partial);
2851 #ifdef CONFIG_SLUB_DEBUG
2852 	atomic_long_set(&n->nr_slabs, 0);
2853 	atomic_long_set(&n->total_objects, 0);
2854 	INIT_LIST_HEAD(&n->full);
2855 #endif
2856 }
2857 
2858 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2859 {
2860 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2861 			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
2862 
2863 	/*
2864 	 * Must align to double word boundary for the double cmpxchg
2865 	 * instructions to work; see __pcpu_double_call_return_bool().
2866 	 */
2867 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2868 				     2 * sizeof(void *));
2869 
2870 	if (!s->cpu_slab)
2871 		return 0;
2872 
2873 	init_kmem_cache_cpus(s);
2874 
2875 	return 1;
2876 }
2877 
2878 static struct kmem_cache *kmem_cache_node;
2879 
2880 /*
2881  * No kmalloc_node yet so do it by hand. We know that this is the first
2882  * slab on the node for this slabcache. There are no concurrent accesses
2883  * possible.
2884  *
2885  * Note that this function only works on the kmem_cache_node
2886  * when allocating for the kmem_cache_node. This is used for bootstrapping
2887  * memory on a fresh node that has no slab structures yet.
2888  */
2889 static void early_kmem_cache_node_alloc(int node)
2890 {
2891 	struct page *page;
2892 	struct kmem_cache_node *n;
2893 
2894 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2895 
2896 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2897 
2898 	BUG_ON(!page);
2899 	if (page_to_nid(page) != node) {
2900 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2901 				"node %d\n", node);
2902 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2903 				"in order to be able to continue\n");
2904 	}
2905 
2906 	n = page->freelist;
2907 	BUG_ON(!n);
2908 	page->freelist = get_freepointer(kmem_cache_node, n);
2909 	page->inuse = 1;
2910 	page->frozen = 0;
2911 	kmem_cache_node->node[node] = n;
2912 #ifdef CONFIG_SLUB_DEBUG
2913 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2914 	init_tracking(kmem_cache_node, n);
2915 #endif
2916 	init_kmem_cache_node(n);
2917 	inc_slabs_node(kmem_cache_node, node, page->objects);
2918 
2919 	/*
2920 	 * No locks need to be taken here as it has just been
2921 	 * initialized and there is no concurrent access.
2922 	 */
2923 	__add_partial(n, page, DEACTIVATE_TO_HEAD);
2924 }
2925 
2926 static void free_kmem_cache_nodes(struct kmem_cache *s)
2927 {
2928 	int node;
2929 
2930 	for_each_node_state(node, N_NORMAL_MEMORY) {
2931 		struct kmem_cache_node *n = s->node[node];
2932 
2933 		if (n)
2934 			kmem_cache_free(kmem_cache_node, n);
2935 
2936 		s->node[node] = NULL;
2937 	}
2938 }
2939 
2940 static int init_kmem_cache_nodes(struct kmem_cache *s)
2941 {
2942 	int node;
2943 
2944 	for_each_node_state(node, N_NORMAL_MEMORY) {
2945 		struct kmem_cache_node *n;
2946 
2947 		if (slab_state == DOWN) {
2948 			early_kmem_cache_node_alloc(node);
2949 			continue;
2950 		}
2951 		n = kmem_cache_alloc_node(kmem_cache_node,
2952 						GFP_KERNEL, node);
2953 
2954 		if (!n) {
2955 			free_kmem_cache_nodes(s);
2956 			return 0;
2957 		}
2958 
2959 		s->node[node] = n;
2960 		init_kmem_cache_node(n);
2961 	}
2962 	return 1;
2963 }
2964 
2965 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2966 {
2967 	if (min < MIN_PARTIAL)
2968 		min = MIN_PARTIAL;
2969 	else if (min > MAX_PARTIAL)
2970 		min = MAX_PARTIAL;
2971 	s->min_partial = min;
2972 }
2973 
2974 /*
2975  * calculate_sizes() determines the order and the distribution of data within
2976  * a slab object.
2977  */
2978 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2979 {
2980 	unsigned long flags = s->flags;
2981 	unsigned long size = s->object_size;
2982 	int order;
2983 
2984 	/*
2985 	 * Round up object size to the next word boundary. We can only
2986 	 * place the free pointer at word boundaries and this determines
2987 	 * the possible location of the free pointer.
2988 	 */
2989 	size = ALIGN(size, sizeof(void *));
2990 
2991 #ifdef CONFIG_SLUB_DEBUG
2992 	/*
2993 	 * Determine if we can poison the object itself. If the user of
2994 	 * the slab may touch the object after free or before allocation
2995 	 * then we should never poison the object itself.
2996 	 */
2997 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2998 			!s->ctor)
2999 		s->flags |= __OBJECT_POISON;
3000 	else
3001 		s->flags &= ~__OBJECT_POISON;
3002 
3003 
3004 	/*
3005 	 * If we are Redzoning then check if there is some space between the
3006 	 * end of the object and the free pointer. If not then add an
3007 	 * additional word to have some bytes to store Redzone information.
3008 	 */
3009 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3010 		size += sizeof(void *);
3011 #endif
3012 
3013 	/*
3014 	 * With that we have determined the number of bytes in actual use
3015 	 * by the object. This is the potential offset to the free pointer.
3016 	 */
3017 	s->inuse = size;
3018 
3019 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
3020 		s->ctor)) {
3021 		/*
3022 		 * Relocate free pointer after the object if it is not
3023 		 * permitted to overwrite the first word of the object on
3024 		 * kmem_cache_free.
3025 		 *
3026 		 * This is the case if we do RCU, have a constructor or
3027 		 * destructor or are poisoning the objects.
3028 		 */
3029 		s->offset = size;
3030 		size += sizeof(void *);
3031 	}
3032 
3033 #ifdef CONFIG_SLUB_DEBUG
3034 	if (flags & SLAB_STORE_USER)
3035 		/*
3036 		 * Need to store information about allocs and frees after
3037 		 * the object.
3038 		 */
3039 		size += 2 * sizeof(struct track);
3040 
3041 	if (flags & SLAB_RED_ZONE)
3042 		/*
3043 		 * Add some empty padding so that we can catch
3044 		 * overwrites from earlier objects rather than let
3045 		 * tracking information or the free pointer be
3046 		 * corrupted if a user writes before the start
3047 		 * of the object.
3048 		 */
3049 		size += sizeof(void *);
3050 #endif
3051 
3052 	/*
3053 	 * SLUB stores one object immediately after another beginning from
3054 	 * offset 0. In order to align the objects we have to simply size
3055 	 * each object to conform to the alignment.
3056 	 */
3057 	size = ALIGN(size, s->align);
3058 	s->size = size;
3059 	if (forced_order >= 0)
3060 		order = forced_order;
3061 	else
3062 		order = calculate_order(size, s->reserved);
3063 
3064 	if (order < 0)
3065 		return 0;
3066 
3067 	s->allocflags = 0;
3068 	if (order)
3069 		s->allocflags |= __GFP_COMP;
3070 
3071 	if (s->flags & SLAB_CACHE_DMA)
3072 		s->allocflags |= GFP_DMA;
3073 
3074 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3075 		s->allocflags |= __GFP_RECLAIMABLE;
3076 
3077 	/*
3078 	 * Determine the number of objects per slab
3079 	 */
3080 	s->oo = oo_make(order, size, s->reserved);
3081 	s->min = oo_make(get_order(size), size, s->reserved);
3082 	if (oo_objects(s->oo) > oo_objects(s->max))
3083 		s->max = s->oo;
3084 
3085 	return !!oo_objects(s->oo);
3086 }
3087 
3088 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3089 {
3090 	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3091 	s->reserved = 0;
3092 
3093 	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3094 		s->reserved = sizeof(struct rcu_head);
3095 
3096 	if (!calculate_sizes(s, -1))
3097 		goto error;
3098 	if (disable_higher_order_debug) {
3099 		/*
3100 		 * Disable debugging flags that store metadata if the min slab
3101 		 * order increased.
3102 		 */
3103 		if (get_order(s->size) > get_order(s->object_size)) {
3104 			s->flags &= ~DEBUG_METADATA_FLAGS;
3105 			s->offset = 0;
3106 			if (!calculate_sizes(s, -1))
3107 				goto error;
3108 		}
3109 	}
3110 
3111 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3112     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3113 	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3114 		/* Enable fast mode */
3115 		s->flags |= __CMPXCHG_DOUBLE;
3116 #endif
3117 
3118 	/*
3119 	 * The larger the object size is, the more pages we want on the partial
3120 	 * list to avoid pounding the page allocator excessively.
3121 	 */
3122 	set_min_partial(s, ilog2(s->size) / 2);
3123 
3124 	/*
3125 	 * cpu_partial determined the maximum number of objects kept in the
3126 	 * per cpu partial lists of a processor.
3127 	 *
3128 	 * Per cpu partial lists mainly contain slabs that just have one
3129 	 * object freed. If they are used for allocation then they can be
3130 	 * filled up again with minimal effort. The slab will never hit the
3131 	 * per node partial lists and therefore no locking will be required.
3132 	 *
3133 	 * This setting also determines
3134 	 *
3135 	 * A) The number of objects from per cpu partial slabs dumped to the
3136 	 *    per node list when we reach the limit.
3137 	 * B) The number of objects in cpu partial slabs to extract from the
3138 	 *    per node list when we run out of per cpu objects. We only fetch
3139 	 *    50% to keep some capacity around for frees.
3140 	 */
3141 	if (!kmem_cache_has_cpu_partial(s))
3142 		s->cpu_partial = 0;
3143 	else if (s->size >= PAGE_SIZE)
3144 		s->cpu_partial = 2;
3145 	else if (s->size >= 1024)
3146 		s->cpu_partial = 6;
3147 	else if (s->size >= 256)
3148 		s->cpu_partial = 13;
3149 	else
3150 		s->cpu_partial = 30;
3151 
3152 #ifdef CONFIG_NUMA
3153 	s->remote_node_defrag_ratio = 1000;
3154 #endif
3155 	if (!init_kmem_cache_nodes(s))
3156 		goto error;
3157 
3158 	if (alloc_kmem_cache_cpus(s))
3159 		return 0;
3160 
3161 	free_kmem_cache_nodes(s);
3162 error:
3163 	if (flags & SLAB_PANIC)
3164 		panic("Cannot create slab %s size=%lu realsize=%u "
3165 			"order=%u offset=%u flags=%lx\n",
3166 			s->name, (unsigned long)s->size, s->size,
3167 			oo_order(s->oo), s->offset, flags);
3168 	return -EINVAL;
3169 }
3170 
3171 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3172 							const char *text)
3173 {
3174 #ifdef CONFIG_SLUB_DEBUG
3175 	void *addr = page_address(page);
3176 	void *p;
3177 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3178 				     sizeof(long), GFP_ATOMIC);
3179 	if (!map)
3180 		return;
3181 	slab_err(s, page, text, s->name);
3182 	slab_lock(page);
3183 
3184 	get_map(s, page, map);
3185 	for_each_object(p, s, addr, page->objects) {
3186 
3187 		if (!test_bit(slab_index(p, s, addr), map)) {
3188 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3189 							p, p - addr);
3190 			print_tracking(s, p);
3191 		}
3192 	}
3193 	slab_unlock(page);
3194 	kfree(map);
3195 #endif
3196 }
3197 
3198 /*
3199  * Attempt to free all partial slabs on a node.
3200  * This is called from kmem_cache_close(). We must be the last thread
3201  * using the cache and therefore we do not need to lock anymore.
3202  */
3203 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3204 {
3205 	struct page *page, *h;
3206 
3207 	list_for_each_entry_safe(page, h, &n->partial, lru) {
3208 		if (!page->inuse) {
3209 			__remove_partial(n, page);
3210 			discard_slab(s, page);
3211 		} else {
3212 			list_slab_objects(s, page,
3213 			"Objects remaining in %s on kmem_cache_close()");
3214 		}
3215 	}
3216 }
3217 
3218 /*
3219  * Release all resources used by a slab cache.
3220  */
3221 static inline int kmem_cache_close(struct kmem_cache *s)
3222 {
3223 	int node;
3224 
3225 	flush_all(s);
3226 	/* Attempt to free all objects */
3227 	for_each_node_state(node, N_NORMAL_MEMORY) {
3228 		struct kmem_cache_node *n = get_node(s, node);
3229 
3230 		free_partial(s, n);
3231 		if (n->nr_partial || slabs_node(s, node))
3232 			return 1;
3233 	}
3234 	free_percpu(s->cpu_slab);
3235 	free_kmem_cache_nodes(s);
3236 	return 0;
3237 }
3238 
3239 int __kmem_cache_shutdown(struct kmem_cache *s)
3240 {
3241 	int rc = kmem_cache_close(s);
3242 
3243 	if (!rc) {
3244 		/*
3245 		 * Since slab_attr_store may take the slab_mutex, we should
3246 		 * release the lock while removing the sysfs entry in order to
3247 		 * avoid a deadlock. Because this is pretty much the last
3248 		 * operation we do and the lock will be released shortly after
3249 		 * that in slab_common.c, we could just move sysfs_slab_remove
3250 		 * to a later point in common code. We should do that when we
3251 		 * have a common sysfs framework for all allocators.
3252 		 */
3253 		mutex_unlock(&slab_mutex);
3254 		sysfs_slab_remove(s);
3255 		mutex_lock(&slab_mutex);
3256 	}
3257 
3258 	return rc;
3259 }
3260 
3261 /********************************************************************
3262  *		Kmalloc subsystem
3263  *******************************************************************/
3264 
3265 static int __init setup_slub_min_order(char *str)
3266 {
3267 	get_option(&str, &slub_min_order);
3268 
3269 	return 1;
3270 }
3271 
3272 __setup("slub_min_order=", setup_slub_min_order);
3273 
3274 static int __init setup_slub_max_order(char *str)
3275 {
3276 	get_option(&str, &slub_max_order);
3277 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3278 
3279 	return 1;
3280 }
3281 
3282 __setup("slub_max_order=", setup_slub_max_order);
3283 
3284 static int __init setup_slub_min_objects(char *str)
3285 {
3286 	get_option(&str, &slub_min_objects);
3287 
3288 	return 1;
3289 }
3290 
3291 __setup("slub_min_objects=", setup_slub_min_objects);
3292 
3293 static int __init setup_slub_nomerge(char *str)
3294 {
3295 	slub_nomerge = 1;
3296 	return 1;
3297 }
3298 
3299 __setup("slub_nomerge", setup_slub_nomerge);
3300 
3301 void *__kmalloc(size_t size, gfp_t flags)
3302 {
3303 	struct kmem_cache *s;
3304 	void *ret;
3305 
3306 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3307 		return kmalloc_large(size, flags);
3308 
3309 	s = kmalloc_slab(size, flags);
3310 
3311 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3312 		return s;
3313 
3314 	ret = slab_alloc(s, flags, _RET_IP_);
3315 
3316 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3317 
3318 	return ret;
3319 }
3320 EXPORT_SYMBOL(__kmalloc);
3321 
3322 #ifdef CONFIG_NUMA
3323 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3324 {
3325 	struct page *page;
3326 	void *ptr = NULL;
3327 
3328 	flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
3329 	page = alloc_pages_node(node, flags, get_order(size));
3330 	if (page)
3331 		ptr = page_address(page);
3332 
3333 	kmalloc_large_node_hook(ptr, size, flags);
3334 	return ptr;
3335 }
3336 
3337 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3338 {
3339 	struct kmem_cache *s;
3340 	void *ret;
3341 
3342 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3343 		ret = kmalloc_large_node(size, flags, node);
3344 
3345 		trace_kmalloc_node(_RET_IP_, ret,
3346 				   size, PAGE_SIZE << get_order(size),
3347 				   flags, node);
3348 
3349 		return ret;
3350 	}
3351 
3352 	s = kmalloc_slab(size, flags);
3353 
3354 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3355 		return s;
3356 
3357 	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3358 
3359 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3360 
3361 	return ret;
3362 }
3363 EXPORT_SYMBOL(__kmalloc_node);
3364 #endif
3365 
3366 size_t ksize(const void *object)
3367 {
3368 	struct page *page;
3369 
3370 	if (unlikely(object == ZERO_SIZE_PTR))
3371 		return 0;
3372 
3373 	page = virt_to_head_page(object);
3374 
3375 	if (unlikely(!PageSlab(page))) {
3376 		WARN_ON(!PageCompound(page));
3377 		return PAGE_SIZE << compound_order(page);
3378 	}
3379 
3380 	return slab_ksize(page->slab_cache);
3381 }
3382 EXPORT_SYMBOL(ksize);
3383 
3384 void kfree(const void *x)
3385 {
3386 	struct page *page;
3387 	void *object = (void *)x;
3388 
3389 	trace_kfree(_RET_IP_, x);
3390 
3391 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3392 		return;
3393 
3394 	page = virt_to_head_page(x);
3395 	if (unlikely(!PageSlab(page))) {
3396 		BUG_ON(!PageCompound(page));
3397 		kfree_hook(x);
3398 		__free_memcg_kmem_pages(page, compound_order(page));
3399 		return;
3400 	}
3401 	slab_free(page->slab_cache, page, object, _RET_IP_);
3402 }
3403 EXPORT_SYMBOL(kfree);
3404 
3405 /*
3406  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3407  * the remaining slabs by the number of items in use. The slabs with the
3408  * most items in use come first. New allocations will then fill those up
3409  * and thus they can be removed from the partial lists.
3410  *
3411  * The slabs with the least items are placed last. This results in them
3412  * being allocated from last increasing the chance that the last objects
3413  * are freed in them.
3414  */
3415 int kmem_cache_shrink(struct kmem_cache *s)
3416 {
3417 	int node;
3418 	int i;
3419 	struct kmem_cache_node *n;
3420 	struct page *page;
3421 	struct page *t;
3422 	int objects = oo_objects(s->max);
3423 	struct list_head *slabs_by_inuse =
3424 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
3425 	unsigned long flags;
3426 
3427 	if (!slabs_by_inuse)
3428 		return -ENOMEM;
3429 
3430 	flush_all(s);
3431 	for_each_node_state(node, N_NORMAL_MEMORY) {
3432 		n = get_node(s, node);
3433 
3434 		if (!n->nr_partial)
3435 			continue;
3436 
3437 		for (i = 0; i < objects; i++)
3438 			INIT_LIST_HEAD(slabs_by_inuse + i);
3439 
3440 		spin_lock_irqsave(&n->list_lock, flags);
3441 
3442 		/*
3443 		 * Build lists indexed by the items in use in each slab.
3444 		 *
3445 		 * Note that concurrent frees may occur while we hold the
3446 		 * list_lock. page->inuse here is the upper limit.
3447 		 */
3448 		list_for_each_entry_safe(page, t, &n->partial, lru) {
3449 			list_move(&page->lru, slabs_by_inuse + page->inuse);
3450 			if (!page->inuse)
3451 				n->nr_partial--;
3452 		}
3453 
3454 		/*
3455 		 * Rebuild the partial list with the slabs filled up most
3456 		 * first and the least used slabs at the end.
3457 		 */
3458 		for (i = objects - 1; i > 0; i--)
3459 			list_splice(slabs_by_inuse + i, n->partial.prev);
3460 
3461 		spin_unlock_irqrestore(&n->list_lock, flags);
3462 
3463 		/* Release empty slabs */
3464 		list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3465 			discard_slab(s, page);
3466 	}
3467 
3468 	kfree(slabs_by_inuse);
3469 	return 0;
3470 }
3471 EXPORT_SYMBOL(kmem_cache_shrink);
3472 
3473 static int slab_mem_going_offline_callback(void *arg)
3474 {
3475 	struct kmem_cache *s;
3476 
3477 	mutex_lock(&slab_mutex);
3478 	list_for_each_entry(s, &slab_caches, list)
3479 		kmem_cache_shrink(s);
3480 	mutex_unlock(&slab_mutex);
3481 
3482 	return 0;
3483 }
3484 
3485 static void slab_mem_offline_callback(void *arg)
3486 {
3487 	struct kmem_cache_node *n;
3488 	struct kmem_cache *s;
3489 	struct memory_notify *marg = arg;
3490 	int offline_node;
3491 
3492 	offline_node = marg->status_change_nid_normal;
3493 
3494 	/*
3495 	 * If the node still has available memory. we need kmem_cache_node
3496 	 * for it yet.
3497 	 */
3498 	if (offline_node < 0)
3499 		return;
3500 
3501 	mutex_lock(&slab_mutex);
3502 	list_for_each_entry(s, &slab_caches, list) {
3503 		n = get_node(s, offline_node);
3504 		if (n) {
3505 			/*
3506 			 * if n->nr_slabs > 0, slabs still exist on the node
3507 			 * that is going down. We were unable to free them,
3508 			 * and offline_pages() function shouldn't call this
3509 			 * callback. So, we must fail.
3510 			 */
3511 			BUG_ON(slabs_node(s, offline_node));
3512 
3513 			s->node[offline_node] = NULL;
3514 			kmem_cache_free(kmem_cache_node, n);
3515 		}
3516 	}
3517 	mutex_unlock(&slab_mutex);
3518 }
3519 
3520 static int slab_mem_going_online_callback(void *arg)
3521 {
3522 	struct kmem_cache_node *n;
3523 	struct kmem_cache *s;
3524 	struct memory_notify *marg = arg;
3525 	int nid = marg->status_change_nid_normal;
3526 	int ret = 0;
3527 
3528 	/*
3529 	 * If the node's memory is already available, then kmem_cache_node is
3530 	 * already created. Nothing to do.
3531 	 */
3532 	if (nid < 0)
3533 		return 0;
3534 
3535 	/*
3536 	 * We are bringing a node online. No memory is available yet. We must
3537 	 * allocate a kmem_cache_node structure in order to bring the node
3538 	 * online.
3539 	 */
3540 	mutex_lock(&slab_mutex);
3541 	list_for_each_entry(s, &slab_caches, list) {
3542 		/*
3543 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
3544 		 *      since memory is not yet available from the node that
3545 		 *      is brought up.
3546 		 */
3547 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3548 		if (!n) {
3549 			ret = -ENOMEM;
3550 			goto out;
3551 		}
3552 		init_kmem_cache_node(n);
3553 		s->node[nid] = n;
3554 	}
3555 out:
3556 	mutex_unlock(&slab_mutex);
3557 	return ret;
3558 }
3559 
3560 static int slab_memory_callback(struct notifier_block *self,
3561 				unsigned long action, void *arg)
3562 {
3563 	int ret = 0;
3564 
3565 	switch (action) {
3566 	case MEM_GOING_ONLINE:
3567 		ret = slab_mem_going_online_callback(arg);
3568 		break;
3569 	case MEM_GOING_OFFLINE:
3570 		ret = slab_mem_going_offline_callback(arg);
3571 		break;
3572 	case MEM_OFFLINE:
3573 	case MEM_CANCEL_ONLINE:
3574 		slab_mem_offline_callback(arg);
3575 		break;
3576 	case MEM_ONLINE:
3577 	case MEM_CANCEL_OFFLINE:
3578 		break;
3579 	}
3580 	if (ret)
3581 		ret = notifier_from_errno(ret);
3582 	else
3583 		ret = NOTIFY_OK;
3584 	return ret;
3585 }
3586 
3587 static struct notifier_block slab_memory_callback_nb = {
3588 	.notifier_call = slab_memory_callback,
3589 	.priority = SLAB_CALLBACK_PRI,
3590 };
3591 
3592 /********************************************************************
3593  *			Basic setup of slabs
3594  *******************************************************************/
3595 
3596 /*
3597  * Used for early kmem_cache structures that were allocated using
3598  * the page allocator. Allocate them properly then fix up the pointers
3599  * that may be pointing to the wrong kmem_cache structure.
3600  */
3601 
3602 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3603 {
3604 	int node;
3605 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3606 
3607 	memcpy(s, static_cache, kmem_cache->object_size);
3608 
3609 	/*
3610 	 * This runs very early, and only the boot processor is supposed to be
3611 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
3612 	 * IPIs around.
3613 	 */
3614 	__flush_cpu_slab(s, smp_processor_id());
3615 	for_each_node_state(node, N_NORMAL_MEMORY) {
3616 		struct kmem_cache_node *n = get_node(s, node);
3617 		struct page *p;
3618 
3619 		if (n) {
3620 			list_for_each_entry(p, &n->partial, lru)
3621 				p->slab_cache = s;
3622 
3623 #ifdef CONFIG_SLUB_DEBUG
3624 			list_for_each_entry(p, &n->full, lru)
3625 				p->slab_cache = s;
3626 #endif
3627 		}
3628 	}
3629 	list_add(&s->list, &slab_caches);
3630 	return s;
3631 }
3632 
3633 void __init kmem_cache_init(void)
3634 {
3635 	static __initdata struct kmem_cache boot_kmem_cache,
3636 		boot_kmem_cache_node;
3637 
3638 	if (debug_guardpage_minorder())
3639 		slub_max_order = 0;
3640 
3641 	kmem_cache_node = &boot_kmem_cache_node;
3642 	kmem_cache = &boot_kmem_cache;
3643 
3644 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
3645 		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
3646 
3647 	register_hotmemory_notifier(&slab_memory_callback_nb);
3648 
3649 	/* Able to allocate the per node structures */
3650 	slab_state = PARTIAL;
3651 
3652 	create_boot_cache(kmem_cache, "kmem_cache",
3653 			offsetof(struct kmem_cache, node) +
3654 				nr_node_ids * sizeof(struct kmem_cache_node *),
3655 		       SLAB_HWCACHE_ALIGN);
3656 
3657 	kmem_cache = bootstrap(&boot_kmem_cache);
3658 
3659 	/*
3660 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
3661 	 * kmem_cache_node is separately allocated so no need to
3662 	 * update any list pointers.
3663 	 */
3664 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3665 
3666 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
3667 	create_kmalloc_caches(0);
3668 
3669 #ifdef CONFIG_SMP
3670 	register_cpu_notifier(&slab_notifier);
3671 #endif
3672 
3673 	printk(KERN_INFO
3674 		"SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
3675 		" CPUs=%d, Nodes=%d\n",
3676 		cache_line_size(),
3677 		slub_min_order, slub_max_order, slub_min_objects,
3678 		nr_cpu_ids, nr_node_ids);
3679 }
3680 
3681 void __init kmem_cache_init_late(void)
3682 {
3683 }
3684 
3685 /*
3686  * Find a mergeable slab cache
3687  */
3688 static int slab_unmergeable(struct kmem_cache *s)
3689 {
3690 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3691 		return 1;
3692 
3693 	if (!is_root_cache(s))
3694 		return 1;
3695 
3696 	if (s->ctor)
3697 		return 1;
3698 
3699 	/*
3700 	 * We may have set a slab to be unmergeable during bootstrap.
3701 	 */
3702 	if (s->refcount < 0)
3703 		return 1;
3704 
3705 	return 0;
3706 }
3707 
3708 static struct kmem_cache *find_mergeable(size_t size, size_t align,
3709 		unsigned long flags, const char *name, void (*ctor)(void *))
3710 {
3711 	struct kmem_cache *s;
3712 
3713 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3714 		return NULL;
3715 
3716 	if (ctor)
3717 		return NULL;
3718 
3719 	size = ALIGN(size, sizeof(void *));
3720 	align = calculate_alignment(flags, align, size);
3721 	size = ALIGN(size, align);
3722 	flags = kmem_cache_flags(size, flags, name, NULL);
3723 
3724 	list_for_each_entry(s, &slab_caches, list) {
3725 		if (slab_unmergeable(s))
3726 			continue;
3727 
3728 		if (size > s->size)
3729 			continue;
3730 
3731 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3732 			continue;
3733 		/*
3734 		 * Check if alignment is compatible.
3735 		 * Courtesy of Adrian Drzewiecki
3736 		 */
3737 		if ((s->size & ~(align - 1)) != s->size)
3738 			continue;
3739 
3740 		if (s->size - size >= sizeof(void *))
3741 			continue;
3742 
3743 		return s;
3744 	}
3745 	return NULL;
3746 }
3747 
3748 struct kmem_cache *
3749 __kmem_cache_alias(const char *name, size_t size, size_t align,
3750 		   unsigned long flags, void (*ctor)(void *))
3751 {
3752 	struct kmem_cache *s;
3753 
3754 	s = find_mergeable(size, align, flags, name, ctor);
3755 	if (s) {
3756 		int i;
3757 		struct kmem_cache *c;
3758 
3759 		s->refcount++;
3760 
3761 		/*
3762 		 * Adjust the object sizes so that we clear
3763 		 * the complete object on kzalloc.
3764 		 */
3765 		s->object_size = max(s->object_size, (int)size);
3766 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3767 
3768 		for_each_memcg_cache_index(i) {
3769 			c = cache_from_memcg_idx(s, i);
3770 			if (!c)
3771 				continue;
3772 			c->object_size = s->object_size;
3773 			c->inuse = max_t(int, c->inuse,
3774 					 ALIGN(size, sizeof(void *)));
3775 		}
3776 
3777 		if (sysfs_slab_alias(s, name)) {
3778 			s->refcount--;
3779 			s = NULL;
3780 		}
3781 	}
3782 
3783 	return s;
3784 }
3785 
3786 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3787 {
3788 	int err;
3789 
3790 	err = kmem_cache_open(s, flags);
3791 	if (err)
3792 		return err;
3793 
3794 	/* Mutex is not taken during early boot */
3795 	if (slab_state <= UP)
3796 		return 0;
3797 
3798 	memcg_propagate_slab_attrs(s);
3799 	err = sysfs_slab_add(s);
3800 	if (err)
3801 		kmem_cache_close(s);
3802 
3803 	return err;
3804 }
3805 
3806 #ifdef CONFIG_SMP
3807 /*
3808  * Use the cpu notifier to insure that the cpu slabs are flushed when
3809  * necessary.
3810  */
3811 static int slab_cpuup_callback(struct notifier_block *nfb,
3812 		unsigned long action, void *hcpu)
3813 {
3814 	long cpu = (long)hcpu;
3815 	struct kmem_cache *s;
3816 	unsigned long flags;
3817 
3818 	switch (action) {
3819 	case CPU_UP_CANCELED:
3820 	case CPU_UP_CANCELED_FROZEN:
3821 	case CPU_DEAD:
3822 	case CPU_DEAD_FROZEN:
3823 		mutex_lock(&slab_mutex);
3824 		list_for_each_entry(s, &slab_caches, list) {
3825 			local_irq_save(flags);
3826 			__flush_cpu_slab(s, cpu);
3827 			local_irq_restore(flags);
3828 		}
3829 		mutex_unlock(&slab_mutex);
3830 		break;
3831 	default:
3832 		break;
3833 	}
3834 	return NOTIFY_OK;
3835 }
3836 
3837 static struct notifier_block slab_notifier = {
3838 	.notifier_call = slab_cpuup_callback
3839 };
3840 
3841 #endif
3842 
3843 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3844 {
3845 	struct kmem_cache *s;
3846 	void *ret;
3847 
3848 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3849 		return kmalloc_large(size, gfpflags);
3850 
3851 	s = kmalloc_slab(size, gfpflags);
3852 
3853 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3854 		return s;
3855 
3856 	ret = slab_alloc(s, gfpflags, caller);
3857 
3858 	/* Honor the call site pointer we received. */
3859 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3860 
3861 	return ret;
3862 }
3863 
3864 #ifdef CONFIG_NUMA
3865 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3866 					int node, unsigned long caller)
3867 {
3868 	struct kmem_cache *s;
3869 	void *ret;
3870 
3871 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3872 		ret = kmalloc_large_node(size, gfpflags, node);
3873 
3874 		trace_kmalloc_node(caller, ret,
3875 				   size, PAGE_SIZE << get_order(size),
3876 				   gfpflags, node);
3877 
3878 		return ret;
3879 	}
3880 
3881 	s = kmalloc_slab(size, gfpflags);
3882 
3883 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3884 		return s;
3885 
3886 	ret = slab_alloc_node(s, gfpflags, node, caller);
3887 
3888 	/* Honor the call site pointer we received. */
3889 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3890 
3891 	return ret;
3892 }
3893 #endif
3894 
3895 #ifdef CONFIG_SYSFS
3896 static int count_inuse(struct page *page)
3897 {
3898 	return page->inuse;
3899 }
3900 
3901 static int count_total(struct page *page)
3902 {
3903 	return page->objects;
3904 }
3905 #endif
3906 
3907 #ifdef CONFIG_SLUB_DEBUG
3908 static int validate_slab(struct kmem_cache *s, struct page *page,
3909 						unsigned long *map)
3910 {
3911 	void *p;
3912 	void *addr = page_address(page);
3913 
3914 	if (!check_slab(s, page) ||
3915 			!on_freelist(s, page, NULL))
3916 		return 0;
3917 
3918 	/* Now we know that a valid freelist exists */
3919 	bitmap_zero(map, page->objects);
3920 
3921 	get_map(s, page, map);
3922 	for_each_object(p, s, addr, page->objects) {
3923 		if (test_bit(slab_index(p, s, addr), map))
3924 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3925 				return 0;
3926 	}
3927 
3928 	for_each_object(p, s, addr, page->objects)
3929 		if (!test_bit(slab_index(p, s, addr), map))
3930 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3931 				return 0;
3932 	return 1;
3933 }
3934 
3935 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3936 						unsigned long *map)
3937 {
3938 	slab_lock(page);
3939 	validate_slab(s, page, map);
3940 	slab_unlock(page);
3941 }
3942 
3943 static int validate_slab_node(struct kmem_cache *s,
3944 		struct kmem_cache_node *n, unsigned long *map)
3945 {
3946 	unsigned long count = 0;
3947 	struct page *page;
3948 	unsigned long flags;
3949 
3950 	spin_lock_irqsave(&n->list_lock, flags);
3951 
3952 	list_for_each_entry(page, &n->partial, lru) {
3953 		validate_slab_slab(s, page, map);
3954 		count++;
3955 	}
3956 	if (count != n->nr_partial)
3957 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3958 			"counter=%ld\n", s->name, count, n->nr_partial);
3959 
3960 	if (!(s->flags & SLAB_STORE_USER))
3961 		goto out;
3962 
3963 	list_for_each_entry(page, &n->full, lru) {
3964 		validate_slab_slab(s, page, map);
3965 		count++;
3966 	}
3967 	if (count != atomic_long_read(&n->nr_slabs))
3968 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3969 			"counter=%ld\n", s->name, count,
3970 			atomic_long_read(&n->nr_slabs));
3971 
3972 out:
3973 	spin_unlock_irqrestore(&n->list_lock, flags);
3974 	return count;
3975 }
3976 
3977 static long validate_slab_cache(struct kmem_cache *s)
3978 {
3979 	int node;
3980 	unsigned long count = 0;
3981 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3982 				sizeof(unsigned long), GFP_KERNEL);
3983 
3984 	if (!map)
3985 		return -ENOMEM;
3986 
3987 	flush_all(s);
3988 	for_each_node_state(node, N_NORMAL_MEMORY) {
3989 		struct kmem_cache_node *n = get_node(s, node);
3990 
3991 		count += validate_slab_node(s, n, map);
3992 	}
3993 	kfree(map);
3994 	return count;
3995 }
3996 /*
3997  * Generate lists of code addresses where slabcache objects are allocated
3998  * and freed.
3999  */
4000 
4001 struct location {
4002 	unsigned long count;
4003 	unsigned long addr;
4004 	long long sum_time;
4005 	long min_time;
4006 	long max_time;
4007 	long min_pid;
4008 	long max_pid;
4009 	DECLARE_BITMAP(cpus, NR_CPUS);
4010 	nodemask_t nodes;
4011 };
4012 
4013 struct loc_track {
4014 	unsigned long max;
4015 	unsigned long count;
4016 	struct location *loc;
4017 };
4018 
4019 static void free_loc_track(struct loc_track *t)
4020 {
4021 	if (t->max)
4022 		free_pages((unsigned long)t->loc,
4023 			get_order(sizeof(struct location) * t->max));
4024 }
4025 
4026 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4027 {
4028 	struct location *l;
4029 	int order;
4030 
4031 	order = get_order(sizeof(struct location) * max);
4032 
4033 	l = (void *)__get_free_pages(flags, order);
4034 	if (!l)
4035 		return 0;
4036 
4037 	if (t->count) {
4038 		memcpy(l, t->loc, sizeof(struct location) * t->count);
4039 		free_loc_track(t);
4040 	}
4041 	t->max = max;
4042 	t->loc = l;
4043 	return 1;
4044 }
4045 
4046 static int add_location(struct loc_track *t, struct kmem_cache *s,
4047 				const struct track *track)
4048 {
4049 	long start, end, pos;
4050 	struct location *l;
4051 	unsigned long caddr;
4052 	unsigned long age = jiffies - track->when;
4053 
4054 	start = -1;
4055 	end = t->count;
4056 
4057 	for ( ; ; ) {
4058 		pos = start + (end - start + 1) / 2;
4059 
4060 		/*
4061 		 * There is nothing at "end". If we end up there
4062 		 * we need to add something to before end.
4063 		 */
4064 		if (pos == end)
4065 			break;
4066 
4067 		caddr = t->loc[pos].addr;
4068 		if (track->addr == caddr) {
4069 
4070 			l = &t->loc[pos];
4071 			l->count++;
4072 			if (track->when) {
4073 				l->sum_time += age;
4074 				if (age < l->min_time)
4075 					l->min_time = age;
4076 				if (age > l->max_time)
4077 					l->max_time = age;
4078 
4079 				if (track->pid < l->min_pid)
4080 					l->min_pid = track->pid;
4081 				if (track->pid > l->max_pid)
4082 					l->max_pid = track->pid;
4083 
4084 				cpumask_set_cpu(track->cpu,
4085 						to_cpumask(l->cpus));
4086 			}
4087 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4088 			return 1;
4089 		}
4090 
4091 		if (track->addr < caddr)
4092 			end = pos;
4093 		else
4094 			start = pos;
4095 	}
4096 
4097 	/*
4098 	 * Not found. Insert new tracking element.
4099 	 */
4100 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4101 		return 0;
4102 
4103 	l = t->loc + pos;
4104 	if (pos < t->count)
4105 		memmove(l + 1, l,
4106 			(t->count - pos) * sizeof(struct location));
4107 	t->count++;
4108 	l->count = 1;
4109 	l->addr = track->addr;
4110 	l->sum_time = age;
4111 	l->min_time = age;
4112 	l->max_time = age;
4113 	l->min_pid = track->pid;
4114 	l->max_pid = track->pid;
4115 	cpumask_clear(to_cpumask(l->cpus));
4116 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4117 	nodes_clear(l->nodes);
4118 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4119 	return 1;
4120 }
4121 
4122 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4123 		struct page *page, enum track_item alloc,
4124 		unsigned long *map)
4125 {
4126 	void *addr = page_address(page);
4127 	void *p;
4128 
4129 	bitmap_zero(map, page->objects);
4130 	get_map(s, page, map);
4131 
4132 	for_each_object(p, s, addr, page->objects)
4133 		if (!test_bit(slab_index(p, s, addr), map))
4134 			add_location(t, s, get_track(s, p, alloc));
4135 }
4136 
4137 static int list_locations(struct kmem_cache *s, char *buf,
4138 					enum track_item alloc)
4139 {
4140 	int len = 0;
4141 	unsigned long i;
4142 	struct loc_track t = { 0, 0, NULL };
4143 	int node;
4144 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4145 				     sizeof(unsigned long), GFP_KERNEL);
4146 
4147 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4148 				     GFP_TEMPORARY)) {
4149 		kfree(map);
4150 		return sprintf(buf, "Out of memory\n");
4151 	}
4152 	/* Push back cpu slabs */
4153 	flush_all(s);
4154 
4155 	for_each_node_state(node, N_NORMAL_MEMORY) {
4156 		struct kmem_cache_node *n = get_node(s, node);
4157 		unsigned long flags;
4158 		struct page *page;
4159 
4160 		if (!atomic_long_read(&n->nr_slabs))
4161 			continue;
4162 
4163 		spin_lock_irqsave(&n->list_lock, flags);
4164 		list_for_each_entry(page, &n->partial, lru)
4165 			process_slab(&t, s, page, alloc, map);
4166 		list_for_each_entry(page, &n->full, lru)
4167 			process_slab(&t, s, page, alloc, map);
4168 		spin_unlock_irqrestore(&n->list_lock, flags);
4169 	}
4170 
4171 	for (i = 0; i < t.count; i++) {
4172 		struct location *l = &t.loc[i];
4173 
4174 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4175 			break;
4176 		len += sprintf(buf + len, "%7ld ", l->count);
4177 
4178 		if (l->addr)
4179 			len += sprintf(buf + len, "%pS", (void *)l->addr);
4180 		else
4181 			len += sprintf(buf + len, "<not-available>");
4182 
4183 		if (l->sum_time != l->min_time) {
4184 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4185 				l->min_time,
4186 				(long)div_u64(l->sum_time, l->count),
4187 				l->max_time);
4188 		} else
4189 			len += sprintf(buf + len, " age=%ld",
4190 				l->min_time);
4191 
4192 		if (l->min_pid != l->max_pid)
4193 			len += sprintf(buf + len, " pid=%ld-%ld",
4194 				l->min_pid, l->max_pid);
4195 		else
4196 			len += sprintf(buf + len, " pid=%ld",
4197 				l->min_pid);
4198 
4199 		if (num_online_cpus() > 1 &&
4200 				!cpumask_empty(to_cpumask(l->cpus)) &&
4201 				len < PAGE_SIZE - 60) {
4202 			len += sprintf(buf + len, " cpus=");
4203 			len += cpulist_scnprintf(buf + len,
4204 						 PAGE_SIZE - len - 50,
4205 						 to_cpumask(l->cpus));
4206 		}
4207 
4208 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4209 				len < PAGE_SIZE - 60) {
4210 			len += sprintf(buf + len, " nodes=");
4211 			len += nodelist_scnprintf(buf + len,
4212 						  PAGE_SIZE - len - 50,
4213 						  l->nodes);
4214 		}
4215 
4216 		len += sprintf(buf + len, "\n");
4217 	}
4218 
4219 	free_loc_track(&t);
4220 	kfree(map);
4221 	if (!t.count)
4222 		len += sprintf(buf, "No data\n");
4223 	return len;
4224 }
4225 #endif
4226 
4227 #ifdef SLUB_RESILIENCY_TEST
4228 static void resiliency_test(void)
4229 {
4230 	u8 *p;
4231 
4232 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4233 
4234 	printk(KERN_ERR "SLUB resiliency testing\n");
4235 	printk(KERN_ERR "-----------------------\n");
4236 	printk(KERN_ERR "A. Corruption after allocation\n");
4237 
4238 	p = kzalloc(16, GFP_KERNEL);
4239 	p[16] = 0x12;
4240 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4241 			" 0x12->0x%p\n\n", p + 16);
4242 
4243 	validate_slab_cache(kmalloc_caches[4]);
4244 
4245 	/* Hmmm... The next two are dangerous */
4246 	p = kzalloc(32, GFP_KERNEL);
4247 	p[32 + sizeof(void *)] = 0x34;
4248 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4249 			" 0x34 -> -0x%p\n", p);
4250 	printk(KERN_ERR
4251 		"If allocated object is overwritten then not detectable\n\n");
4252 
4253 	validate_slab_cache(kmalloc_caches[5]);
4254 	p = kzalloc(64, GFP_KERNEL);
4255 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4256 	*p = 0x56;
4257 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4258 									p);
4259 	printk(KERN_ERR
4260 		"If allocated object is overwritten then not detectable\n\n");
4261 	validate_slab_cache(kmalloc_caches[6]);
4262 
4263 	printk(KERN_ERR "\nB. Corruption after free\n");
4264 	p = kzalloc(128, GFP_KERNEL);
4265 	kfree(p);
4266 	*p = 0x78;
4267 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4268 	validate_slab_cache(kmalloc_caches[7]);
4269 
4270 	p = kzalloc(256, GFP_KERNEL);
4271 	kfree(p);
4272 	p[50] = 0x9a;
4273 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4274 			p);
4275 	validate_slab_cache(kmalloc_caches[8]);
4276 
4277 	p = kzalloc(512, GFP_KERNEL);
4278 	kfree(p);
4279 	p[512] = 0xab;
4280 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4281 	validate_slab_cache(kmalloc_caches[9]);
4282 }
4283 #else
4284 #ifdef CONFIG_SYSFS
4285 static void resiliency_test(void) {};
4286 #endif
4287 #endif
4288 
4289 #ifdef CONFIG_SYSFS
4290 enum slab_stat_type {
4291 	SL_ALL,			/* All slabs */
4292 	SL_PARTIAL,		/* Only partially allocated slabs */
4293 	SL_CPU,			/* Only slabs used for cpu caches */
4294 	SL_OBJECTS,		/* Determine allocated objects not slabs */
4295 	SL_TOTAL		/* Determine object capacity not slabs */
4296 };
4297 
4298 #define SO_ALL		(1 << SL_ALL)
4299 #define SO_PARTIAL	(1 << SL_PARTIAL)
4300 #define SO_CPU		(1 << SL_CPU)
4301 #define SO_OBJECTS	(1 << SL_OBJECTS)
4302 #define SO_TOTAL	(1 << SL_TOTAL)
4303 
4304 static ssize_t show_slab_objects(struct kmem_cache *s,
4305 			    char *buf, unsigned long flags)
4306 {
4307 	unsigned long total = 0;
4308 	int node;
4309 	int x;
4310 	unsigned long *nodes;
4311 
4312 	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4313 	if (!nodes)
4314 		return -ENOMEM;
4315 
4316 	if (flags & SO_CPU) {
4317 		int cpu;
4318 
4319 		for_each_possible_cpu(cpu) {
4320 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4321 							       cpu);
4322 			int node;
4323 			struct page *page;
4324 
4325 			page = ACCESS_ONCE(c->page);
4326 			if (!page)
4327 				continue;
4328 
4329 			node = page_to_nid(page);
4330 			if (flags & SO_TOTAL)
4331 				x = page->objects;
4332 			else if (flags & SO_OBJECTS)
4333 				x = page->inuse;
4334 			else
4335 				x = 1;
4336 
4337 			total += x;
4338 			nodes[node] += x;
4339 
4340 			page = ACCESS_ONCE(c->partial);
4341 			if (page) {
4342 				node = page_to_nid(page);
4343 				if (flags & SO_TOTAL)
4344 					WARN_ON_ONCE(1);
4345 				else if (flags & SO_OBJECTS)
4346 					WARN_ON_ONCE(1);
4347 				else
4348 					x = page->pages;
4349 				total += x;
4350 				nodes[node] += x;
4351 			}
4352 		}
4353 	}
4354 
4355 	lock_memory_hotplug();
4356 #ifdef CONFIG_SLUB_DEBUG
4357 	if (flags & SO_ALL) {
4358 		for_each_node_state(node, N_NORMAL_MEMORY) {
4359 			struct kmem_cache_node *n = get_node(s, node);
4360 
4361 			if (flags & SO_TOTAL)
4362 				x = atomic_long_read(&n->total_objects);
4363 			else if (flags & SO_OBJECTS)
4364 				x = atomic_long_read(&n->total_objects) -
4365 					count_partial(n, count_free);
4366 			else
4367 				x = atomic_long_read(&n->nr_slabs);
4368 			total += x;
4369 			nodes[node] += x;
4370 		}
4371 
4372 	} else
4373 #endif
4374 	if (flags & SO_PARTIAL) {
4375 		for_each_node_state(node, N_NORMAL_MEMORY) {
4376 			struct kmem_cache_node *n = get_node(s, node);
4377 
4378 			if (flags & SO_TOTAL)
4379 				x = count_partial(n, count_total);
4380 			else if (flags & SO_OBJECTS)
4381 				x = count_partial(n, count_inuse);
4382 			else
4383 				x = n->nr_partial;
4384 			total += x;
4385 			nodes[node] += x;
4386 		}
4387 	}
4388 	x = sprintf(buf, "%lu", total);
4389 #ifdef CONFIG_NUMA
4390 	for_each_node_state(node, N_NORMAL_MEMORY)
4391 		if (nodes[node])
4392 			x += sprintf(buf + x, " N%d=%lu",
4393 					node, nodes[node]);
4394 #endif
4395 	unlock_memory_hotplug();
4396 	kfree(nodes);
4397 	return x + sprintf(buf + x, "\n");
4398 }
4399 
4400 #ifdef CONFIG_SLUB_DEBUG
4401 static int any_slab_objects(struct kmem_cache *s)
4402 {
4403 	int node;
4404 
4405 	for_each_online_node(node) {
4406 		struct kmem_cache_node *n = get_node(s, node);
4407 
4408 		if (!n)
4409 			continue;
4410 
4411 		if (atomic_long_read(&n->total_objects))
4412 			return 1;
4413 	}
4414 	return 0;
4415 }
4416 #endif
4417 
4418 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4419 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4420 
4421 struct slab_attribute {
4422 	struct attribute attr;
4423 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4424 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4425 };
4426 
4427 #define SLAB_ATTR_RO(_name) \
4428 	static struct slab_attribute _name##_attr = \
4429 	__ATTR(_name, 0400, _name##_show, NULL)
4430 
4431 #define SLAB_ATTR(_name) \
4432 	static struct slab_attribute _name##_attr =  \
4433 	__ATTR(_name, 0600, _name##_show, _name##_store)
4434 
4435 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4436 {
4437 	return sprintf(buf, "%d\n", s->size);
4438 }
4439 SLAB_ATTR_RO(slab_size);
4440 
4441 static ssize_t align_show(struct kmem_cache *s, char *buf)
4442 {
4443 	return sprintf(buf, "%d\n", s->align);
4444 }
4445 SLAB_ATTR_RO(align);
4446 
4447 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4448 {
4449 	return sprintf(buf, "%d\n", s->object_size);
4450 }
4451 SLAB_ATTR_RO(object_size);
4452 
4453 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4454 {
4455 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4456 }
4457 SLAB_ATTR_RO(objs_per_slab);
4458 
4459 static ssize_t order_store(struct kmem_cache *s,
4460 				const char *buf, size_t length)
4461 {
4462 	unsigned long order;
4463 	int err;
4464 
4465 	err = kstrtoul(buf, 10, &order);
4466 	if (err)
4467 		return err;
4468 
4469 	if (order > slub_max_order || order < slub_min_order)
4470 		return -EINVAL;
4471 
4472 	calculate_sizes(s, order);
4473 	return length;
4474 }
4475 
4476 static ssize_t order_show(struct kmem_cache *s, char *buf)
4477 {
4478 	return sprintf(buf, "%d\n", oo_order(s->oo));
4479 }
4480 SLAB_ATTR(order);
4481 
4482 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4483 {
4484 	return sprintf(buf, "%lu\n", s->min_partial);
4485 }
4486 
4487 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4488 				 size_t length)
4489 {
4490 	unsigned long min;
4491 	int err;
4492 
4493 	err = kstrtoul(buf, 10, &min);
4494 	if (err)
4495 		return err;
4496 
4497 	set_min_partial(s, min);
4498 	return length;
4499 }
4500 SLAB_ATTR(min_partial);
4501 
4502 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4503 {
4504 	return sprintf(buf, "%u\n", s->cpu_partial);
4505 }
4506 
4507 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4508 				 size_t length)
4509 {
4510 	unsigned long objects;
4511 	int err;
4512 
4513 	err = kstrtoul(buf, 10, &objects);
4514 	if (err)
4515 		return err;
4516 	if (objects && !kmem_cache_has_cpu_partial(s))
4517 		return -EINVAL;
4518 
4519 	s->cpu_partial = objects;
4520 	flush_all(s);
4521 	return length;
4522 }
4523 SLAB_ATTR(cpu_partial);
4524 
4525 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4526 {
4527 	if (!s->ctor)
4528 		return 0;
4529 	return sprintf(buf, "%pS\n", s->ctor);
4530 }
4531 SLAB_ATTR_RO(ctor);
4532 
4533 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4534 {
4535 	return sprintf(buf, "%d\n", s->refcount - 1);
4536 }
4537 SLAB_ATTR_RO(aliases);
4538 
4539 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4540 {
4541 	return show_slab_objects(s, buf, SO_PARTIAL);
4542 }
4543 SLAB_ATTR_RO(partial);
4544 
4545 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4546 {
4547 	return show_slab_objects(s, buf, SO_CPU);
4548 }
4549 SLAB_ATTR_RO(cpu_slabs);
4550 
4551 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4552 {
4553 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4554 }
4555 SLAB_ATTR_RO(objects);
4556 
4557 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4558 {
4559 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4560 }
4561 SLAB_ATTR_RO(objects_partial);
4562 
4563 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4564 {
4565 	int objects = 0;
4566 	int pages = 0;
4567 	int cpu;
4568 	int len;
4569 
4570 	for_each_online_cpu(cpu) {
4571 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4572 
4573 		if (page) {
4574 			pages += page->pages;
4575 			objects += page->pobjects;
4576 		}
4577 	}
4578 
4579 	len = sprintf(buf, "%d(%d)", objects, pages);
4580 
4581 #ifdef CONFIG_SMP
4582 	for_each_online_cpu(cpu) {
4583 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4584 
4585 		if (page && len < PAGE_SIZE - 20)
4586 			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4587 				page->pobjects, page->pages);
4588 	}
4589 #endif
4590 	return len + sprintf(buf + len, "\n");
4591 }
4592 SLAB_ATTR_RO(slabs_cpu_partial);
4593 
4594 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4595 {
4596 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4597 }
4598 
4599 static ssize_t reclaim_account_store(struct kmem_cache *s,
4600 				const char *buf, size_t length)
4601 {
4602 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4603 	if (buf[0] == '1')
4604 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4605 	return length;
4606 }
4607 SLAB_ATTR(reclaim_account);
4608 
4609 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4610 {
4611 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4612 }
4613 SLAB_ATTR_RO(hwcache_align);
4614 
4615 #ifdef CONFIG_ZONE_DMA
4616 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4617 {
4618 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4619 }
4620 SLAB_ATTR_RO(cache_dma);
4621 #endif
4622 
4623 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4624 {
4625 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4626 }
4627 SLAB_ATTR_RO(destroy_by_rcu);
4628 
4629 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4630 {
4631 	return sprintf(buf, "%d\n", s->reserved);
4632 }
4633 SLAB_ATTR_RO(reserved);
4634 
4635 #ifdef CONFIG_SLUB_DEBUG
4636 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4637 {
4638 	return show_slab_objects(s, buf, SO_ALL);
4639 }
4640 SLAB_ATTR_RO(slabs);
4641 
4642 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4643 {
4644 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4645 }
4646 SLAB_ATTR_RO(total_objects);
4647 
4648 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4649 {
4650 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4651 }
4652 
4653 static ssize_t sanity_checks_store(struct kmem_cache *s,
4654 				const char *buf, size_t length)
4655 {
4656 	s->flags &= ~SLAB_DEBUG_FREE;
4657 	if (buf[0] == '1') {
4658 		s->flags &= ~__CMPXCHG_DOUBLE;
4659 		s->flags |= SLAB_DEBUG_FREE;
4660 	}
4661 	return length;
4662 }
4663 SLAB_ATTR(sanity_checks);
4664 
4665 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4666 {
4667 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4668 }
4669 
4670 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4671 							size_t length)
4672 {
4673 	s->flags &= ~SLAB_TRACE;
4674 	if (buf[0] == '1') {
4675 		s->flags &= ~__CMPXCHG_DOUBLE;
4676 		s->flags |= SLAB_TRACE;
4677 	}
4678 	return length;
4679 }
4680 SLAB_ATTR(trace);
4681 
4682 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4683 {
4684 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4685 }
4686 
4687 static ssize_t red_zone_store(struct kmem_cache *s,
4688 				const char *buf, size_t length)
4689 {
4690 	if (any_slab_objects(s))
4691 		return -EBUSY;
4692 
4693 	s->flags &= ~SLAB_RED_ZONE;
4694 	if (buf[0] == '1') {
4695 		s->flags &= ~__CMPXCHG_DOUBLE;
4696 		s->flags |= SLAB_RED_ZONE;
4697 	}
4698 	calculate_sizes(s, -1);
4699 	return length;
4700 }
4701 SLAB_ATTR(red_zone);
4702 
4703 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4704 {
4705 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4706 }
4707 
4708 static ssize_t poison_store(struct kmem_cache *s,
4709 				const char *buf, size_t length)
4710 {
4711 	if (any_slab_objects(s))
4712 		return -EBUSY;
4713 
4714 	s->flags &= ~SLAB_POISON;
4715 	if (buf[0] == '1') {
4716 		s->flags &= ~__CMPXCHG_DOUBLE;
4717 		s->flags |= SLAB_POISON;
4718 	}
4719 	calculate_sizes(s, -1);
4720 	return length;
4721 }
4722 SLAB_ATTR(poison);
4723 
4724 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4725 {
4726 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4727 }
4728 
4729 static ssize_t store_user_store(struct kmem_cache *s,
4730 				const char *buf, size_t length)
4731 {
4732 	if (any_slab_objects(s))
4733 		return -EBUSY;
4734 
4735 	s->flags &= ~SLAB_STORE_USER;
4736 	if (buf[0] == '1') {
4737 		s->flags &= ~__CMPXCHG_DOUBLE;
4738 		s->flags |= SLAB_STORE_USER;
4739 	}
4740 	calculate_sizes(s, -1);
4741 	return length;
4742 }
4743 SLAB_ATTR(store_user);
4744 
4745 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4746 {
4747 	return 0;
4748 }
4749 
4750 static ssize_t validate_store(struct kmem_cache *s,
4751 			const char *buf, size_t length)
4752 {
4753 	int ret = -EINVAL;
4754 
4755 	if (buf[0] == '1') {
4756 		ret = validate_slab_cache(s);
4757 		if (ret >= 0)
4758 			ret = length;
4759 	}
4760 	return ret;
4761 }
4762 SLAB_ATTR(validate);
4763 
4764 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4765 {
4766 	if (!(s->flags & SLAB_STORE_USER))
4767 		return -ENOSYS;
4768 	return list_locations(s, buf, TRACK_ALLOC);
4769 }
4770 SLAB_ATTR_RO(alloc_calls);
4771 
4772 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4773 {
4774 	if (!(s->flags & SLAB_STORE_USER))
4775 		return -ENOSYS;
4776 	return list_locations(s, buf, TRACK_FREE);
4777 }
4778 SLAB_ATTR_RO(free_calls);
4779 #endif /* CONFIG_SLUB_DEBUG */
4780 
4781 #ifdef CONFIG_FAILSLAB
4782 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4783 {
4784 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4785 }
4786 
4787 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4788 							size_t length)
4789 {
4790 	s->flags &= ~SLAB_FAILSLAB;
4791 	if (buf[0] == '1')
4792 		s->flags |= SLAB_FAILSLAB;
4793 	return length;
4794 }
4795 SLAB_ATTR(failslab);
4796 #endif
4797 
4798 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4799 {
4800 	return 0;
4801 }
4802 
4803 static ssize_t shrink_store(struct kmem_cache *s,
4804 			const char *buf, size_t length)
4805 {
4806 	if (buf[0] == '1') {
4807 		int rc = kmem_cache_shrink(s);
4808 
4809 		if (rc)
4810 			return rc;
4811 	} else
4812 		return -EINVAL;
4813 	return length;
4814 }
4815 SLAB_ATTR(shrink);
4816 
4817 #ifdef CONFIG_NUMA
4818 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4819 {
4820 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4821 }
4822 
4823 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4824 				const char *buf, size_t length)
4825 {
4826 	unsigned long ratio;
4827 	int err;
4828 
4829 	err = kstrtoul(buf, 10, &ratio);
4830 	if (err)
4831 		return err;
4832 
4833 	if (ratio <= 100)
4834 		s->remote_node_defrag_ratio = ratio * 10;
4835 
4836 	return length;
4837 }
4838 SLAB_ATTR(remote_node_defrag_ratio);
4839 #endif
4840 
4841 #ifdef CONFIG_SLUB_STATS
4842 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4843 {
4844 	unsigned long sum  = 0;
4845 	int cpu;
4846 	int len;
4847 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4848 
4849 	if (!data)
4850 		return -ENOMEM;
4851 
4852 	for_each_online_cpu(cpu) {
4853 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4854 
4855 		data[cpu] = x;
4856 		sum += x;
4857 	}
4858 
4859 	len = sprintf(buf, "%lu", sum);
4860 
4861 #ifdef CONFIG_SMP
4862 	for_each_online_cpu(cpu) {
4863 		if (data[cpu] && len < PAGE_SIZE - 20)
4864 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4865 	}
4866 #endif
4867 	kfree(data);
4868 	return len + sprintf(buf + len, "\n");
4869 }
4870 
4871 static void clear_stat(struct kmem_cache *s, enum stat_item si)
4872 {
4873 	int cpu;
4874 
4875 	for_each_online_cpu(cpu)
4876 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4877 }
4878 
4879 #define STAT_ATTR(si, text) 					\
4880 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4881 {								\
4882 	return show_stat(s, buf, si);				\
4883 }								\
4884 static ssize_t text##_store(struct kmem_cache *s,		\
4885 				const char *buf, size_t length)	\
4886 {								\
4887 	if (buf[0] != '0')					\
4888 		return -EINVAL;					\
4889 	clear_stat(s, si);					\
4890 	return length;						\
4891 }								\
4892 SLAB_ATTR(text);						\
4893 
4894 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4895 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4896 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4897 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4898 STAT_ATTR(FREE_FROZEN, free_frozen);
4899 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4900 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4901 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4902 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4903 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4904 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
4905 STAT_ATTR(FREE_SLAB, free_slab);
4906 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4907 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4908 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4909 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4910 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4911 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4912 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
4913 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4914 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4915 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
4916 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
4917 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
4918 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
4919 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
4920 #endif
4921 
4922 static struct attribute *slab_attrs[] = {
4923 	&slab_size_attr.attr,
4924 	&object_size_attr.attr,
4925 	&objs_per_slab_attr.attr,
4926 	&order_attr.attr,
4927 	&min_partial_attr.attr,
4928 	&cpu_partial_attr.attr,
4929 	&objects_attr.attr,
4930 	&objects_partial_attr.attr,
4931 	&partial_attr.attr,
4932 	&cpu_slabs_attr.attr,
4933 	&ctor_attr.attr,
4934 	&aliases_attr.attr,
4935 	&align_attr.attr,
4936 	&hwcache_align_attr.attr,
4937 	&reclaim_account_attr.attr,
4938 	&destroy_by_rcu_attr.attr,
4939 	&shrink_attr.attr,
4940 	&reserved_attr.attr,
4941 	&slabs_cpu_partial_attr.attr,
4942 #ifdef CONFIG_SLUB_DEBUG
4943 	&total_objects_attr.attr,
4944 	&slabs_attr.attr,
4945 	&sanity_checks_attr.attr,
4946 	&trace_attr.attr,
4947 	&red_zone_attr.attr,
4948 	&poison_attr.attr,
4949 	&store_user_attr.attr,
4950 	&validate_attr.attr,
4951 	&alloc_calls_attr.attr,
4952 	&free_calls_attr.attr,
4953 #endif
4954 #ifdef CONFIG_ZONE_DMA
4955 	&cache_dma_attr.attr,
4956 #endif
4957 #ifdef CONFIG_NUMA
4958 	&remote_node_defrag_ratio_attr.attr,
4959 #endif
4960 #ifdef CONFIG_SLUB_STATS
4961 	&alloc_fastpath_attr.attr,
4962 	&alloc_slowpath_attr.attr,
4963 	&free_fastpath_attr.attr,
4964 	&free_slowpath_attr.attr,
4965 	&free_frozen_attr.attr,
4966 	&free_add_partial_attr.attr,
4967 	&free_remove_partial_attr.attr,
4968 	&alloc_from_partial_attr.attr,
4969 	&alloc_slab_attr.attr,
4970 	&alloc_refill_attr.attr,
4971 	&alloc_node_mismatch_attr.attr,
4972 	&free_slab_attr.attr,
4973 	&cpuslab_flush_attr.attr,
4974 	&deactivate_full_attr.attr,
4975 	&deactivate_empty_attr.attr,
4976 	&deactivate_to_head_attr.attr,
4977 	&deactivate_to_tail_attr.attr,
4978 	&deactivate_remote_frees_attr.attr,
4979 	&deactivate_bypass_attr.attr,
4980 	&order_fallback_attr.attr,
4981 	&cmpxchg_double_fail_attr.attr,
4982 	&cmpxchg_double_cpu_fail_attr.attr,
4983 	&cpu_partial_alloc_attr.attr,
4984 	&cpu_partial_free_attr.attr,
4985 	&cpu_partial_node_attr.attr,
4986 	&cpu_partial_drain_attr.attr,
4987 #endif
4988 #ifdef CONFIG_FAILSLAB
4989 	&failslab_attr.attr,
4990 #endif
4991 
4992 	NULL
4993 };
4994 
4995 static struct attribute_group slab_attr_group = {
4996 	.attrs = slab_attrs,
4997 };
4998 
4999 static ssize_t slab_attr_show(struct kobject *kobj,
5000 				struct attribute *attr,
5001 				char *buf)
5002 {
5003 	struct slab_attribute *attribute;
5004 	struct kmem_cache *s;
5005 	int err;
5006 
5007 	attribute = to_slab_attr(attr);
5008 	s = to_slab(kobj);
5009 
5010 	if (!attribute->show)
5011 		return -EIO;
5012 
5013 	err = attribute->show(s, buf);
5014 
5015 	return err;
5016 }
5017 
5018 static ssize_t slab_attr_store(struct kobject *kobj,
5019 				struct attribute *attr,
5020 				const char *buf, size_t len)
5021 {
5022 	struct slab_attribute *attribute;
5023 	struct kmem_cache *s;
5024 	int err;
5025 
5026 	attribute = to_slab_attr(attr);
5027 	s = to_slab(kobj);
5028 
5029 	if (!attribute->store)
5030 		return -EIO;
5031 
5032 	err = attribute->store(s, buf, len);
5033 #ifdef CONFIG_MEMCG_KMEM
5034 	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5035 		int i;
5036 
5037 		mutex_lock(&slab_mutex);
5038 		if (s->max_attr_size < len)
5039 			s->max_attr_size = len;
5040 
5041 		/*
5042 		 * This is a best effort propagation, so this function's return
5043 		 * value will be determined by the parent cache only. This is
5044 		 * basically because not all attributes will have a well
5045 		 * defined semantics for rollbacks - most of the actions will
5046 		 * have permanent effects.
5047 		 *
5048 		 * Returning the error value of any of the children that fail
5049 		 * is not 100 % defined, in the sense that users seeing the
5050 		 * error code won't be able to know anything about the state of
5051 		 * the cache.
5052 		 *
5053 		 * Only returning the error code for the parent cache at least
5054 		 * has well defined semantics. The cache being written to
5055 		 * directly either failed or succeeded, in which case we loop
5056 		 * through the descendants with best-effort propagation.
5057 		 */
5058 		for_each_memcg_cache_index(i) {
5059 			struct kmem_cache *c = cache_from_memcg_idx(s, i);
5060 			if (c)
5061 				attribute->store(c, buf, len);
5062 		}
5063 		mutex_unlock(&slab_mutex);
5064 	}
5065 #endif
5066 	return err;
5067 }
5068 
5069 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5070 {
5071 #ifdef CONFIG_MEMCG_KMEM
5072 	int i;
5073 	char *buffer = NULL;
5074 
5075 	if (!is_root_cache(s))
5076 		return;
5077 
5078 	/*
5079 	 * This mean this cache had no attribute written. Therefore, no point
5080 	 * in copying default values around
5081 	 */
5082 	if (!s->max_attr_size)
5083 		return;
5084 
5085 	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5086 		char mbuf[64];
5087 		char *buf;
5088 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5089 
5090 		if (!attr || !attr->store || !attr->show)
5091 			continue;
5092 
5093 		/*
5094 		 * It is really bad that we have to allocate here, so we will
5095 		 * do it only as a fallback. If we actually allocate, though,
5096 		 * we can just use the allocated buffer until the end.
5097 		 *
5098 		 * Most of the slub attributes will tend to be very small in
5099 		 * size, but sysfs allows buffers up to a page, so they can
5100 		 * theoretically happen.
5101 		 */
5102 		if (buffer)
5103 			buf = buffer;
5104 		else if (s->max_attr_size < ARRAY_SIZE(mbuf))
5105 			buf = mbuf;
5106 		else {
5107 			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5108 			if (WARN_ON(!buffer))
5109 				continue;
5110 			buf = buffer;
5111 		}
5112 
5113 		attr->show(s->memcg_params->root_cache, buf);
5114 		attr->store(s, buf, strlen(buf));
5115 	}
5116 
5117 	if (buffer)
5118 		free_page((unsigned long)buffer);
5119 #endif
5120 }
5121 
5122 static const struct sysfs_ops slab_sysfs_ops = {
5123 	.show = slab_attr_show,
5124 	.store = slab_attr_store,
5125 };
5126 
5127 static struct kobj_type slab_ktype = {
5128 	.sysfs_ops = &slab_sysfs_ops,
5129 };
5130 
5131 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5132 {
5133 	struct kobj_type *ktype = get_ktype(kobj);
5134 
5135 	if (ktype == &slab_ktype)
5136 		return 1;
5137 	return 0;
5138 }
5139 
5140 static const struct kset_uevent_ops slab_uevent_ops = {
5141 	.filter = uevent_filter,
5142 };
5143 
5144 static struct kset *slab_kset;
5145 
5146 static inline struct kset *cache_kset(struct kmem_cache *s)
5147 {
5148 #ifdef CONFIG_MEMCG_KMEM
5149 	if (!is_root_cache(s))
5150 		return s->memcg_params->root_cache->memcg_kset;
5151 #endif
5152 	return slab_kset;
5153 }
5154 
5155 #define ID_STR_LENGTH 64
5156 
5157 /* Create a unique string id for a slab cache:
5158  *
5159  * Format	:[flags-]size
5160  */
5161 static char *create_unique_id(struct kmem_cache *s)
5162 {
5163 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5164 	char *p = name;
5165 
5166 	BUG_ON(!name);
5167 
5168 	*p++ = ':';
5169 	/*
5170 	 * First flags affecting slabcache operations. We will only
5171 	 * get here for aliasable slabs so we do not need to support
5172 	 * too many flags. The flags here must cover all flags that
5173 	 * are matched during merging to guarantee that the id is
5174 	 * unique.
5175 	 */
5176 	if (s->flags & SLAB_CACHE_DMA)
5177 		*p++ = 'd';
5178 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5179 		*p++ = 'a';
5180 	if (s->flags & SLAB_DEBUG_FREE)
5181 		*p++ = 'F';
5182 	if (!(s->flags & SLAB_NOTRACK))
5183 		*p++ = 't';
5184 	if (p != name + 1)
5185 		*p++ = '-';
5186 	p += sprintf(p, "%07d", s->size);
5187 
5188 #ifdef CONFIG_MEMCG_KMEM
5189 	if (!is_root_cache(s))
5190 		p += sprintf(p, "-%08d",
5191 				memcg_cache_id(s->memcg_params->memcg));
5192 #endif
5193 
5194 	BUG_ON(p > name + ID_STR_LENGTH - 1);
5195 	return name;
5196 }
5197 
5198 static int sysfs_slab_add(struct kmem_cache *s)
5199 {
5200 	int err;
5201 	const char *name;
5202 	int unmergeable = slab_unmergeable(s);
5203 
5204 	if (unmergeable) {
5205 		/*
5206 		 * Slabcache can never be merged so we can use the name proper.
5207 		 * This is typically the case for debug situations. In that
5208 		 * case we can catch duplicate names easily.
5209 		 */
5210 		sysfs_remove_link(&slab_kset->kobj, s->name);
5211 		name = s->name;
5212 	} else {
5213 		/*
5214 		 * Create a unique name for the slab as a target
5215 		 * for the symlinks.
5216 		 */
5217 		name = create_unique_id(s);
5218 	}
5219 
5220 	s->kobj.kset = cache_kset(s);
5221 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5222 	if (err)
5223 		goto out_put_kobj;
5224 
5225 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5226 	if (err)
5227 		goto out_del_kobj;
5228 
5229 #ifdef CONFIG_MEMCG_KMEM
5230 	if (is_root_cache(s)) {
5231 		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5232 		if (!s->memcg_kset) {
5233 			err = -ENOMEM;
5234 			goto out_del_kobj;
5235 		}
5236 	}
5237 #endif
5238 
5239 	kobject_uevent(&s->kobj, KOBJ_ADD);
5240 	if (!unmergeable) {
5241 		/* Setup first alias */
5242 		sysfs_slab_alias(s, s->name);
5243 	}
5244 out:
5245 	if (!unmergeable)
5246 		kfree(name);
5247 	return err;
5248 out_del_kobj:
5249 	kobject_del(&s->kobj);
5250 out_put_kobj:
5251 	kobject_put(&s->kobj);
5252 	goto out;
5253 }
5254 
5255 static void sysfs_slab_remove(struct kmem_cache *s)
5256 {
5257 	if (slab_state < FULL)
5258 		/*
5259 		 * Sysfs has not been setup yet so no need to remove the
5260 		 * cache from sysfs.
5261 		 */
5262 		return;
5263 
5264 #ifdef CONFIG_MEMCG_KMEM
5265 	kset_unregister(s->memcg_kset);
5266 #endif
5267 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5268 	kobject_del(&s->kobj);
5269 	kobject_put(&s->kobj);
5270 }
5271 
5272 /*
5273  * Need to buffer aliases during bootup until sysfs becomes
5274  * available lest we lose that information.
5275  */
5276 struct saved_alias {
5277 	struct kmem_cache *s;
5278 	const char *name;
5279 	struct saved_alias *next;
5280 };
5281 
5282 static struct saved_alias *alias_list;
5283 
5284 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5285 {
5286 	struct saved_alias *al;
5287 
5288 	if (slab_state == FULL) {
5289 		/*
5290 		 * If we have a leftover link then remove it.
5291 		 */
5292 		sysfs_remove_link(&slab_kset->kobj, name);
5293 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5294 	}
5295 
5296 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5297 	if (!al)
5298 		return -ENOMEM;
5299 
5300 	al->s = s;
5301 	al->name = name;
5302 	al->next = alias_list;
5303 	alias_list = al;
5304 	return 0;
5305 }
5306 
5307 static int __init slab_sysfs_init(void)
5308 {
5309 	struct kmem_cache *s;
5310 	int err;
5311 
5312 	mutex_lock(&slab_mutex);
5313 
5314 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5315 	if (!slab_kset) {
5316 		mutex_unlock(&slab_mutex);
5317 		printk(KERN_ERR "Cannot register slab subsystem.\n");
5318 		return -ENOSYS;
5319 	}
5320 
5321 	slab_state = FULL;
5322 
5323 	list_for_each_entry(s, &slab_caches, list) {
5324 		err = sysfs_slab_add(s);
5325 		if (err)
5326 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5327 						" to sysfs\n", s->name);
5328 	}
5329 
5330 	while (alias_list) {
5331 		struct saved_alias *al = alias_list;
5332 
5333 		alias_list = alias_list->next;
5334 		err = sysfs_slab_alias(al->s, al->name);
5335 		if (err)
5336 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
5337 					" %s to sysfs\n", al->name);
5338 		kfree(al);
5339 	}
5340 
5341 	mutex_unlock(&slab_mutex);
5342 	resiliency_test();
5343 	return 0;
5344 }
5345 
5346 __initcall(slab_sysfs_init);
5347 #endif /* CONFIG_SYSFS */
5348 
5349 /*
5350  * The /proc/slabinfo ABI
5351  */
5352 #ifdef CONFIG_SLABINFO
5353 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5354 {
5355 	unsigned long nr_slabs = 0;
5356 	unsigned long nr_objs = 0;
5357 	unsigned long nr_free = 0;
5358 	int node;
5359 
5360 	for_each_online_node(node) {
5361 		struct kmem_cache_node *n = get_node(s, node);
5362 
5363 		if (!n)
5364 			continue;
5365 
5366 		nr_slabs += node_nr_slabs(n);
5367 		nr_objs += node_nr_objs(n);
5368 		nr_free += count_partial(n, count_free);
5369 	}
5370 
5371 	sinfo->active_objs = nr_objs - nr_free;
5372 	sinfo->num_objs = nr_objs;
5373 	sinfo->active_slabs = nr_slabs;
5374 	sinfo->num_slabs = nr_slabs;
5375 	sinfo->objects_per_slab = oo_objects(s->oo);
5376 	sinfo->cache_order = oo_order(s->oo);
5377 }
5378 
5379 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5380 {
5381 }
5382 
5383 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5384 		       size_t count, loff_t *ppos)
5385 {
5386 	return -EIO;
5387 }
5388 #endif /* CONFIG_SLABINFO */
5389