xref: /openbmc/linux/mm/slub.c (revision 41e4b7dc)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks or atomic operatios
6  * and only uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  * (C) 2011 Linux Foundation, Christoph Lameter
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include "slab.h"
20 #include <linux/proc_fs.h>
21 #include <linux/notifier.h>
22 #include <linux/seq_file.h>
23 #include <linux/kasan.h>
24 #include <linux/kmemcheck.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/mempolicy.h>
28 #include <linux/ctype.h>
29 #include <linux/debugobjects.h>
30 #include <linux/kallsyms.h>
31 #include <linux/memory.h>
32 #include <linux/math64.h>
33 #include <linux/fault-inject.h>
34 #include <linux/stacktrace.h>
35 #include <linux/prefetch.h>
36 #include <linux/memcontrol.h>
37 #include <linux/random.h>
38 
39 #include <trace/events/kmem.h>
40 
41 #include "internal.h"
42 
43 /*
44  * Lock order:
45  *   1. slab_mutex (Global Mutex)
46  *   2. node->list_lock
47  *   3. slab_lock(page) (Only on some arches and for debugging)
48  *
49  *   slab_mutex
50  *
51  *   The role of the slab_mutex is to protect the list of all the slabs
52  *   and to synchronize major metadata changes to slab cache structures.
53  *
54  *   The slab_lock is only used for debugging and on arches that do not
55  *   have the ability to do a cmpxchg_double. It only protects the second
56  *   double word in the page struct. Meaning
57  *	A. page->freelist	-> List of object free in a page
58  *	B. page->counters	-> Counters of objects
59  *	C. page->frozen		-> frozen state
60  *
61  *   If a slab is frozen then it is exempt from list management. It is not
62  *   on any list. The processor that froze the slab is the one who can
63  *   perform list operations on the page. Other processors may put objects
64  *   onto the freelist but the processor that froze the slab is the only
65  *   one that can retrieve the objects from the page's freelist.
66  *
67  *   The list_lock protects the partial and full list on each node and
68  *   the partial slab counter. If taken then no new slabs may be added or
69  *   removed from the lists nor make the number of partial slabs be modified.
70  *   (Note that the total number of slabs is an atomic value that may be
71  *   modified without taking the list lock).
72  *
73  *   The list_lock is a centralized lock and thus we avoid taking it as
74  *   much as possible. As long as SLUB does not have to handle partial
75  *   slabs, operations can continue without any centralized lock. F.e.
76  *   allocating a long series of objects that fill up slabs does not require
77  *   the list lock.
78  *   Interrupts are disabled during allocation and deallocation in order to
79  *   make the slab allocator safe to use in the context of an irq. In addition
80  *   interrupts are disabled to ensure that the processor does not change
81  *   while handling per_cpu slabs, due to kernel preemption.
82  *
83  * SLUB assigns one slab for allocation to each processor.
84  * Allocations only occur from these slabs called cpu slabs.
85  *
86  * Slabs with free elements are kept on a partial list and during regular
87  * operations no list for full slabs is used. If an object in a full slab is
88  * freed then the slab will show up again on the partial lists.
89  * We track full slabs for debugging purposes though because otherwise we
90  * cannot scan all objects.
91  *
92  * Slabs are freed when they become empty. Teardown and setup is
93  * minimal so we rely on the page allocators per cpu caches for
94  * fast frees and allocs.
95  *
96  * Overloading of page flags that are otherwise used for LRU management.
97  *
98  * PageActive 		The slab is frozen and exempt from list processing.
99  * 			This means that the slab is dedicated to a purpose
100  * 			such as satisfying allocations for a specific
101  * 			processor. Objects may be freed in the slab while
102  * 			it is frozen but slab_free will then skip the usual
103  * 			list operations. It is up to the processor holding
104  * 			the slab to integrate the slab into the slab lists
105  * 			when the slab is no longer needed.
106  *
107  * 			One use of this flag is to mark slabs that are
108  * 			used for allocations. Then such a slab becomes a cpu
109  * 			slab. The cpu slab may be equipped with an additional
110  * 			freelist that allows lockless access to
111  * 			free objects in addition to the regular freelist
112  * 			that requires the slab lock.
113  *
114  * PageError		Slab requires special handling due to debug
115  * 			options set. This moves	slab handling out of
116  * 			the fast path and disables lockless freelists.
117  */
118 
119 static inline int kmem_cache_debug(struct kmem_cache *s)
120 {
121 #ifdef CONFIG_SLUB_DEBUG
122 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
123 #else
124 	return 0;
125 #endif
126 }
127 
128 void *fixup_red_left(struct kmem_cache *s, void *p)
129 {
130 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
131 		p += s->red_left_pad;
132 
133 	return p;
134 }
135 
136 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
137 {
138 #ifdef CONFIG_SLUB_CPU_PARTIAL
139 	return !kmem_cache_debug(s);
140 #else
141 	return false;
142 #endif
143 }
144 
145 /*
146  * Issues still to be resolved:
147  *
148  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
149  *
150  * - Variable sizing of the per node arrays
151  */
152 
153 /* Enable to test recovery from slab corruption on boot */
154 #undef SLUB_RESILIENCY_TEST
155 
156 /* Enable to log cmpxchg failures */
157 #undef SLUB_DEBUG_CMPXCHG
158 
159 /*
160  * Mininum number of partial slabs. These will be left on the partial
161  * lists even if they are empty. kmem_cache_shrink may reclaim them.
162  */
163 #define MIN_PARTIAL 5
164 
165 /*
166  * Maximum number of desirable partial slabs.
167  * The existence of more partial slabs makes kmem_cache_shrink
168  * sort the partial list by the number of objects in use.
169  */
170 #define MAX_PARTIAL 10
171 
172 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
173 				SLAB_POISON | SLAB_STORE_USER)
174 
175 /*
176  * These debug flags cannot use CMPXCHG because there might be consistency
177  * issues when checking or reading debug information
178  */
179 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
180 				SLAB_TRACE)
181 
182 
183 /*
184  * Debugging flags that require metadata to be stored in the slab.  These get
185  * disabled when slub_debug=O is used and a cache's min order increases with
186  * metadata.
187  */
188 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
189 
190 #define OO_SHIFT	16
191 #define OO_MASK		((1 << OO_SHIFT) - 1)
192 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
193 
194 /* Internal SLUB flags */
195 #define __OBJECT_POISON		0x80000000UL /* Poison object */
196 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
197 
198 /*
199  * Tracking user of a slab.
200  */
201 #define TRACK_ADDRS_COUNT 16
202 struct track {
203 	unsigned long addr;	/* Called from address */
204 #ifdef CONFIG_STACKTRACE
205 	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
206 #endif
207 	int cpu;		/* Was running on cpu */
208 	int pid;		/* Pid context */
209 	unsigned long when;	/* When did the operation occur */
210 };
211 
212 enum track_item { TRACK_ALLOC, TRACK_FREE };
213 
214 #ifdef CONFIG_SYSFS
215 static int sysfs_slab_add(struct kmem_cache *);
216 static int sysfs_slab_alias(struct kmem_cache *, const char *);
217 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
218 static void sysfs_slab_remove(struct kmem_cache *s);
219 #else
220 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
221 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
222 							{ return 0; }
223 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
224 static inline void sysfs_slab_remove(struct kmem_cache *s) { }
225 #endif
226 
227 static inline void stat(const struct kmem_cache *s, enum stat_item si)
228 {
229 #ifdef CONFIG_SLUB_STATS
230 	/*
231 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
232 	 * avoid this_cpu_add()'s irq-disable overhead.
233 	 */
234 	raw_cpu_inc(s->cpu_slab->stat[si]);
235 #endif
236 }
237 
238 /********************************************************************
239  * 			Core slab cache functions
240  *******************************************************************/
241 
242 /*
243  * Returns freelist pointer (ptr). With hardening, this is obfuscated
244  * with an XOR of the address where the pointer is held and a per-cache
245  * random number.
246  */
247 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
248 				 unsigned long ptr_addr)
249 {
250 #ifdef CONFIG_SLAB_FREELIST_HARDENED
251 	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
252 #else
253 	return ptr;
254 #endif
255 }
256 
257 /* Returns the freelist pointer recorded at location ptr_addr. */
258 static inline void *freelist_dereference(const struct kmem_cache *s,
259 					 void *ptr_addr)
260 {
261 	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
262 			    (unsigned long)ptr_addr);
263 }
264 
265 static inline void *get_freepointer(struct kmem_cache *s, void *object)
266 {
267 	return freelist_dereference(s, object + s->offset);
268 }
269 
270 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
271 {
272 	if (object)
273 		prefetch(freelist_dereference(s, object + s->offset));
274 }
275 
276 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
277 {
278 	unsigned long freepointer_addr;
279 	void *p;
280 
281 	if (!debug_pagealloc_enabled())
282 		return get_freepointer(s, object);
283 
284 	freepointer_addr = (unsigned long)object + s->offset;
285 	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
286 	return freelist_ptr(s, p, freepointer_addr);
287 }
288 
289 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
290 {
291 	unsigned long freeptr_addr = (unsigned long)object + s->offset;
292 
293 #ifdef CONFIG_SLAB_FREELIST_HARDENED
294 	BUG_ON(object == fp); /* naive detection of double free or corruption */
295 #endif
296 
297 	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
298 }
299 
300 /* Loop over all objects in a slab */
301 #define for_each_object(__p, __s, __addr, __objects) \
302 	for (__p = fixup_red_left(__s, __addr); \
303 		__p < (__addr) + (__objects) * (__s)->size; \
304 		__p += (__s)->size)
305 
306 #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
307 	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
308 		__idx <= __objects; \
309 		__p += (__s)->size, __idx++)
310 
311 /* Determine object index from a given position */
312 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
313 {
314 	return (p - addr) / s->size;
315 }
316 
317 static inline int order_objects(int order, unsigned long size, int reserved)
318 {
319 	return ((PAGE_SIZE << order) - reserved) / size;
320 }
321 
322 static inline struct kmem_cache_order_objects oo_make(int order,
323 		unsigned long size, int reserved)
324 {
325 	struct kmem_cache_order_objects x = {
326 		(order << OO_SHIFT) + order_objects(order, size, reserved)
327 	};
328 
329 	return x;
330 }
331 
332 static inline int oo_order(struct kmem_cache_order_objects x)
333 {
334 	return x.x >> OO_SHIFT;
335 }
336 
337 static inline int oo_objects(struct kmem_cache_order_objects x)
338 {
339 	return x.x & OO_MASK;
340 }
341 
342 /*
343  * Per slab locking using the pagelock
344  */
345 static __always_inline void slab_lock(struct page *page)
346 {
347 	VM_BUG_ON_PAGE(PageTail(page), page);
348 	bit_spin_lock(PG_locked, &page->flags);
349 }
350 
351 static __always_inline void slab_unlock(struct page *page)
352 {
353 	VM_BUG_ON_PAGE(PageTail(page), page);
354 	__bit_spin_unlock(PG_locked, &page->flags);
355 }
356 
357 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
358 {
359 	struct page tmp;
360 	tmp.counters = counters_new;
361 	/*
362 	 * page->counters can cover frozen/inuse/objects as well
363 	 * as page->_refcount.  If we assign to ->counters directly
364 	 * we run the risk of losing updates to page->_refcount, so
365 	 * be careful and only assign to the fields we need.
366 	 */
367 	page->frozen  = tmp.frozen;
368 	page->inuse   = tmp.inuse;
369 	page->objects = tmp.objects;
370 }
371 
372 /* Interrupts must be disabled (for the fallback code to work right) */
373 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
374 		void *freelist_old, unsigned long counters_old,
375 		void *freelist_new, unsigned long counters_new,
376 		const char *n)
377 {
378 	VM_BUG_ON(!irqs_disabled());
379 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
380     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
381 	if (s->flags & __CMPXCHG_DOUBLE) {
382 		if (cmpxchg_double(&page->freelist, &page->counters,
383 				   freelist_old, counters_old,
384 				   freelist_new, counters_new))
385 			return true;
386 	} else
387 #endif
388 	{
389 		slab_lock(page);
390 		if (page->freelist == freelist_old &&
391 					page->counters == counters_old) {
392 			page->freelist = freelist_new;
393 			set_page_slub_counters(page, counters_new);
394 			slab_unlock(page);
395 			return true;
396 		}
397 		slab_unlock(page);
398 	}
399 
400 	cpu_relax();
401 	stat(s, CMPXCHG_DOUBLE_FAIL);
402 
403 #ifdef SLUB_DEBUG_CMPXCHG
404 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
405 #endif
406 
407 	return false;
408 }
409 
410 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
411 		void *freelist_old, unsigned long counters_old,
412 		void *freelist_new, unsigned long counters_new,
413 		const char *n)
414 {
415 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
416     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
417 	if (s->flags & __CMPXCHG_DOUBLE) {
418 		if (cmpxchg_double(&page->freelist, &page->counters,
419 				   freelist_old, counters_old,
420 				   freelist_new, counters_new))
421 			return true;
422 	} else
423 #endif
424 	{
425 		unsigned long flags;
426 
427 		local_irq_save(flags);
428 		slab_lock(page);
429 		if (page->freelist == freelist_old &&
430 					page->counters == counters_old) {
431 			page->freelist = freelist_new;
432 			set_page_slub_counters(page, counters_new);
433 			slab_unlock(page);
434 			local_irq_restore(flags);
435 			return true;
436 		}
437 		slab_unlock(page);
438 		local_irq_restore(flags);
439 	}
440 
441 	cpu_relax();
442 	stat(s, CMPXCHG_DOUBLE_FAIL);
443 
444 #ifdef SLUB_DEBUG_CMPXCHG
445 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
446 #endif
447 
448 	return false;
449 }
450 
451 #ifdef CONFIG_SLUB_DEBUG
452 /*
453  * Determine a map of object in use on a page.
454  *
455  * Node listlock must be held to guarantee that the page does
456  * not vanish from under us.
457  */
458 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
459 {
460 	void *p;
461 	void *addr = page_address(page);
462 
463 	for (p = page->freelist; p; p = get_freepointer(s, p))
464 		set_bit(slab_index(p, s, addr), map);
465 }
466 
467 static inline int size_from_object(struct kmem_cache *s)
468 {
469 	if (s->flags & SLAB_RED_ZONE)
470 		return s->size - s->red_left_pad;
471 
472 	return s->size;
473 }
474 
475 static inline void *restore_red_left(struct kmem_cache *s, void *p)
476 {
477 	if (s->flags & SLAB_RED_ZONE)
478 		p -= s->red_left_pad;
479 
480 	return p;
481 }
482 
483 /*
484  * Debug settings:
485  */
486 #if defined(CONFIG_SLUB_DEBUG_ON)
487 static int slub_debug = DEBUG_DEFAULT_FLAGS;
488 #else
489 static int slub_debug;
490 #endif
491 
492 static char *slub_debug_slabs;
493 static int disable_higher_order_debug;
494 
495 /*
496  * slub is about to manipulate internal object metadata.  This memory lies
497  * outside the range of the allocated object, so accessing it would normally
498  * be reported by kasan as a bounds error.  metadata_access_enable() is used
499  * to tell kasan that these accesses are OK.
500  */
501 static inline void metadata_access_enable(void)
502 {
503 	kasan_disable_current();
504 }
505 
506 static inline void metadata_access_disable(void)
507 {
508 	kasan_enable_current();
509 }
510 
511 /*
512  * Object debugging
513  */
514 
515 /* Verify that a pointer has an address that is valid within a slab page */
516 static inline int check_valid_pointer(struct kmem_cache *s,
517 				struct page *page, void *object)
518 {
519 	void *base;
520 
521 	if (!object)
522 		return 1;
523 
524 	base = page_address(page);
525 	object = restore_red_left(s, object);
526 	if (object < base || object >= base + page->objects * s->size ||
527 		(object - base) % s->size) {
528 		return 0;
529 	}
530 
531 	return 1;
532 }
533 
534 static void print_section(char *level, char *text, u8 *addr,
535 			  unsigned int length)
536 {
537 	metadata_access_enable();
538 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
539 			length, 1);
540 	metadata_access_disable();
541 }
542 
543 static struct track *get_track(struct kmem_cache *s, void *object,
544 	enum track_item alloc)
545 {
546 	struct track *p;
547 
548 	if (s->offset)
549 		p = object + s->offset + sizeof(void *);
550 	else
551 		p = object + s->inuse;
552 
553 	return p + alloc;
554 }
555 
556 static void set_track(struct kmem_cache *s, void *object,
557 			enum track_item alloc, unsigned long addr)
558 {
559 	struct track *p = get_track(s, object, alloc);
560 
561 	if (addr) {
562 #ifdef CONFIG_STACKTRACE
563 		struct stack_trace trace;
564 		int i;
565 
566 		trace.nr_entries = 0;
567 		trace.max_entries = TRACK_ADDRS_COUNT;
568 		trace.entries = p->addrs;
569 		trace.skip = 3;
570 		metadata_access_enable();
571 		save_stack_trace(&trace);
572 		metadata_access_disable();
573 
574 		/* See rant in lockdep.c */
575 		if (trace.nr_entries != 0 &&
576 		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
577 			trace.nr_entries--;
578 
579 		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
580 			p->addrs[i] = 0;
581 #endif
582 		p->addr = addr;
583 		p->cpu = smp_processor_id();
584 		p->pid = current->pid;
585 		p->when = jiffies;
586 	} else
587 		memset(p, 0, sizeof(struct track));
588 }
589 
590 static void init_tracking(struct kmem_cache *s, void *object)
591 {
592 	if (!(s->flags & SLAB_STORE_USER))
593 		return;
594 
595 	set_track(s, object, TRACK_FREE, 0UL);
596 	set_track(s, object, TRACK_ALLOC, 0UL);
597 }
598 
599 static void print_track(const char *s, struct track *t)
600 {
601 	if (!t->addr)
602 		return;
603 
604 	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
605 	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
606 #ifdef CONFIG_STACKTRACE
607 	{
608 		int i;
609 		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
610 			if (t->addrs[i])
611 				pr_err("\t%pS\n", (void *)t->addrs[i]);
612 			else
613 				break;
614 	}
615 #endif
616 }
617 
618 static void print_tracking(struct kmem_cache *s, void *object)
619 {
620 	if (!(s->flags & SLAB_STORE_USER))
621 		return;
622 
623 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
624 	print_track("Freed", get_track(s, object, TRACK_FREE));
625 }
626 
627 static void print_page_info(struct page *page)
628 {
629 	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
630 	       page, page->objects, page->inuse, page->freelist, page->flags);
631 
632 }
633 
634 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
635 {
636 	struct va_format vaf;
637 	va_list args;
638 
639 	va_start(args, fmt);
640 	vaf.fmt = fmt;
641 	vaf.va = &args;
642 	pr_err("=============================================================================\n");
643 	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
644 	pr_err("-----------------------------------------------------------------------------\n\n");
645 
646 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
647 	va_end(args);
648 }
649 
650 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
651 {
652 	struct va_format vaf;
653 	va_list args;
654 
655 	va_start(args, fmt);
656 	vaf.fmt = fmt;
657 	vaf.va = &args;
658 	pr_err("FIX %s: %pV\n", s->name, &vaf);
659 	va_end(args);
660 }
661 
662 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
663 {
664 	unsigned int off;	/* Offset of last byte */
665 	u8 *addr = page_address(page);
666 
667 	print_tracking(s, p);
668 
669 	print_page_info(page);
670 
671 	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
672 	       p, p - addr, get_freepointer(s, p));
673 
674 	if (s->flags & SLAB_RED_ZONE)
675 		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
676 			      s->red_left_pad);
677 	else if (p > addr + 16)
678 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
679 
680 	print_section(KERN_ERR, "Object ", p,
681 		      min_t(unsigned long, s->object_size, PAGE_SIZE));
682 	if (s->flags & SLAB_RED_ZONE)
683 		print_section(KERN_ERR, "Redzone ", p + s->object_size,
684 			s->inuse - s->object_size);
685 
686 	if (s->offset)
687 		off = s->offset + sizeof(void *);
688 	else
689 		off = s->inuse;
690 
691 	if (s->flags & SLAB_STORE_USER)
692 		off += 2 * sizeof(struct track);
693 
694 	off += kasan_metadata_size(s);
695 
696 	if (off != size_from_object(s))
697 		/* Beginning of the filler is the free pointer */
698 		print_section(KERN_ERR, "Padding ", p + off,
699 			      size_from_object(s) - off);
700 
701 	dump_stack();
702 }
703 
704 void object_err(struct kmem_cache *s, struct page *page,
705 			u8 *object, char *reason)
706 {
707 	slab_bug(s, "%s", reason);
708 	print_trailer(s, page, object);
709 }
710 
711 static void slab_err(struct kmem_cache *s, struct page *page,
712 			const char *fmt, ...)
713 {
714 	va_list args;
715 	char buf[100];
716 
717 	va_start(args, fmt);
718 	vsnprintf(buf, sizeof(buf), fmt, args);
719 	va_end(args);
720 	slab_bug(s, "%s", buf);
721 	print_page_info(page);
722 	dump_stack();
723 }
724 
725 static void init_object(struct kmem_cache *s, void *object, u8 val)
726 {
727 	u8 *p = object;
728 
729 	if (s->flags & SLAB_RED_ZONE)
730 		memset(p - s->red_left_pad, val, s->red_left_pad);
731 
732 	if (s->flags & __OBJECT_POISON) {
733 		memset(p, POISON_FREE, s->object_size - 1);
734 		p[s->object_size - 1] = POISON_END;
735 	}
736 
737 	if (s->flags & SLAB_RED_ZONE)
738 		memset(p + s->object_size, val, s->inuse - s->object_size);
739 }
740 
741 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
742 						void *from, void *to)
743 {
744 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
745 	memset(from, data, to - from);
746 }
747 
748 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
749 			u8 *object, char *what,
750 			u8 *start, unsigned int value, unsigned int bytes)
751 {
752 	u8 *fault;
753 	u8 *end;
754 
755 	metadata_access_enable();
756 	fault = memchr_inv(start, value, bytes);
757 	metadata_access_disable();
758 	if (!fault)
759 		return 1;
760 
761 	end = start + bytes;
762 	while (end > fault && end[-1] == value)
763 		end--;
764 
765 	slab_bug(s, "%s overwritten", what);
766 	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
767 					fault, end - 1, fault[0], value);
768 	print_trailer(s, page, object);
769 
770 	restore_bytes(s, what, value, fault, end);
771 	return 0;
772 }
773 
774 /*
775  * Object layout:
776  *
777  * object address
778  * 	Bytes of the object to be managed.
779  * 	If the freepointer may overlay the object then the free
780  * 	pointer is the first word of the object.
781  *
782  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
783  * 	0xa5 (POISON_END)
784  *
785  * object + s->object_size
786  * 	Padding to reach word boundary. This is also used for Redzoning.
787  * 	Padding is extended by another word if Redzoning is enabled and
788  * 	object_size == inuse.
789  *
790  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
791  * 	0xcc (RED_ACTIVE) for objects in use.
792  *
793  * object + s->inuse
794  * 	Meta data starts here.
795  *
796  * 	A. Free pointer (if we cannot overwrite object on free)
797  * 	B. Tracking data for SLAB_STORE_USER
798  * 	C. Padding to reach required alignment boundary or at mininum
799  * 		one word if debugging is on to be able to detect writes
800  * 		before the word boundary.
801  *
802  *	Padding is done using 0x5a (POISON_INUSE)
803  *
804  * object + s->size
805  * 	Nothing is used beyond s->size.
806  *
807  * If slabcaches are merged then the object_size and inuse boundaries are mostly
808  * ignored. And therefore no slab options that rely on these boundaries
809  * may be used with merged slabcaches.
810  */
811 
812 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
813 {
814 	unsigned long off = s->inuse;	/* The end of info */
815 
816 	if (s->offset)
817 		/* Freepointer is placed after the object. */
818 		off += sizeof(void *);
819 
820 	if (s->flags & SLAB_STORE_USER)
821 		/* We also have user information there */
822 		off += 2 * sizeof(struct track);
823 
824 	off += kasan_metadata_size(s);
825 
826 	if (size_from_object(s) == off)
827 		return 1;
828 
829 	return check_bytes_and_report(s, page, p, "Object padding",
830 			p + off, POISON_INUSE, size_from_object(s) - off);
831 }
832 
833 /* Check the pad bytes at the end of a slab page */
834 static int slab_pad_check(struct kmem_cache *s, struct page *page)
835 {
836 	u8 *start;
837 	u8 *fault;
838 	u8 *end;
839 	int length;
840 	int remainder;
841 
842 	if (!(s->flags & SLAB_POISON))
843 		return 1;
844 
845 	start = page_address(page);
846 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
847 	end = start + length;
848 	remainder = length % s->size;
849 	if (!remainder)
850 		return 1;
851 
852 	metadata_access_enable();
853 	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
854 	metadata_access_disable();
855 	if (!fault)
856 		return 1;
857 	while (end > fault && end[-1] == POISON_INUSE)
858 		end--;
859 
860 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
861 	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
862 
863 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
864 	return 0;
865 }
866 
867 static int check_object(struct kmem_cache *s, struct page *page,
868 					void *object, u8 val)
869 {
870 	u8 *p = object;
871 	u8 *endobject = object + s->object_size;
872 
873 	if (s->flags & SLAB_RED_ZONE) {
874 		if (!check_bytes_and_report(s, page, object, "Redzone",
875 			object - s->red_left_pad, val, s->red_left_pad))
876 			return 0;
877 
878 		if (!check_bytes_and_report(s, page, object, "Redzone",
879 			endobject, val, s->inuse - s->object_size))
880 			return 0;
881 	} else {
882 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
883 			check_bytes_and_report(s, page, p, "Alignment padding",
884 				endobject, POISON_INUSE,
885 				s->inuse - s->object_size);
886 		}
887 	}
888 
889 	if (s->flags & SLAB_POISON) {
890 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
891 			(!check_bytes_and_report(s, page, p, "Poison", p,
892 					POISON_FREE, s->object_size - 1) ||
893 			 !check_bytes_and_report(s, page, p, "Poison",
894 				p + s->object_size - 1, POISON_END, 1)))
895 			return 0;
896 		/*
897 		 * check_pad_bytes cleans up on its own.
898 		 */
899 		check_pad_bytes(s, page, p);
900 	}
901 
902 	if (!s->offset && val == SLUB_RED_ACTIVE)
903 		/*
904 		 * Object and freepointer overlap. Cannot check
905 		 * freepointer while object is allocated.
906 		 */
907 		return 1;
908 
909 	/* Check free pointer validity */
910 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
911 		object_err(s, page, p, "Freepointer corrupt");
912 		/*
913 		 * No choice but to zap it and thus lose the remainder
914 		 * of the free objects in this slab. May cause
915 		 * another error because the object count is now wrong.
916 		 */
917 		set_freepointer(s, p, NULL);
918 		return 0;
919 	}
920 	return 1;
921 }
922 
923 static int check_slab(struct kmem_cache *s, struct page *page)
924 {
925 	int maxobj;
926 
927 	VM_BUG_ON(!irqs_disabled());
928 
929 	if (!PageSlab(page)) {
930 		slab_err(s, page, "Not a valid slab page");
931 		return 0;
932 	}
933 
934 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
935 	if (page->objects > maxobj) {
936 		slab_err(s, page, "objects %u > max %u",
937 			page->objects, maxobj);
938 		return 0;
939 	}
940 	if (page->inuse > page->objects) {
941 		slab_err(s, page, "inuse %u > max %u",
942 			page->inuse, page->objects);
943 		return 0;
944 	}
945 	/* Slab_pad_check fixes things up after itself */
946 	slab_pad_check(s, page);
947 	return 1;
948 }
949 
950 /*
951  * Determine if a certain object on a page is on the freelist. Must hold the
952  * slab lock to guarantee that the chains are in a consistent state.
953  */
954 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
955 {
956 	int nr = 0;
957 	void *fp;
958 	void *object = NULL;
959 	int max_objects;
960 
961 	fp = page->freelist;
962 	while (fp && nr <= page->objects) {
963 		if (fp == search)
964 			return 1;
965 		if (!check_valid_pointer(s, page, fp)) {
966 			if (object) {
967 				object_err(s, page, object,
968 					"Freechain corrupt");
969 				set_freepointer(s, object, NULL);
970 			} else {
971 				slab_err(s, page, "Freepointer corrupt");
972 				page->freelist = NULL;
973 				page->inuse = page->objects;
974 				slab_fix(s, "Freelist cleared");
975 				return 0;
976 			}
977 			break;
978 		}
979 		object = fp;
980 		fp = get_freepointer(s, object);
981 		nr++;
982 	}
983 
984 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
985 	if (max_objects > MAX_OBJS_PER_PAGE)
986 		max_objects = MAX_OBJS_PER_PAGE;
987 
988 	if (page->objects != max_objects) {
989 		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
990 			 page->objects, max_objects);
991 		page->objects = max_objects;
992 		slab_fix(s, "Number of objects adjusted.");
993 	}
994 	if (page->inuse != page->objects - nr) {
995 		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
996 			 page->inuse, page->objects - nr);
997 		page->inuse = page->objects - nr;
998 		slab_fix(s, "Object count adjusted.");
999 	}
1000 	return search == NULL;
1001 }
1002 
1003 static void trace(struct kmem_cache *s, struct page *page, void *object,
1004 								int alloc)
1005 {
1006 	if (s->flags & SLAB_TRACE) {
1007 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1008 			s->name,
1009 			alloc ? "alloc" : "free",
1010 			object, page->inuse,
1011 			page->freelist);
1012 
1013 		if (!alloc)
1014 			print_section(KERN_INFO, "Object ", (void *)object,
1015 					s->object_size);
1016 
1017 		dump_stack();
1018 	}
1019 }
1020 
1021 /*
1022  * Tracking of fully allocated slabs for debugging purposes.
1023  */
1024 static void add_full(struct kmem_cache *s,
1025 	struct kmem_cache_node *n, struct page *page)
1026 {
1027 	if (!(s->flags & SLAB_STORE_USER))
1028 		return;
1029 
1030 	lockdep_assert_held(&n->list_lock);
1031 	list_add(&page->lru, &n->full);
1032 }
1033 
1034 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1035 {
1036 	if (!(s->flags & SLAB_STORE_USER))
1037 		return;
1038 
1039 	lockdep_assert_held(&n->list_lock);
1040 	list_del(&page->lru);
1041 }
1042 
1043 /* Tracking of the number of slabs for debugging purposes */
1044 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1045 {
1046 	struct kmem_cache_node *n = get_node(s, node);
1047 
1048 	return atomic_long_read(&n->nr_slabs);
1049 }
1050 
1051 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1052 {
1053 	return atomic_long_read(&n->nr_slabs);
1054 }
1055 
1056 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1057 {
1058 	struct kmem_cache_node *n = get_node(s, node);
1059 
1060 	/*
1061 	 * May be called early in order to allocate a slab for the
1062 	 * kmem_cache_node structure. Solve the chicken-egg
1063 	 * dilemma by deferring the increment of the count during
1064 	 * bootstrap (see early_kmem_cache_node_alloc).
1065 	 */
1066 	if (likely(n)) {
1067 		atomic_long_inc(&n->nr_slabs);
1068 		atomic_long_add(objects, &n->total_objects);
1069 	}
1070 }
1071 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1072 {
1073 	struct kmem_cache_node *n = get_node(s, node);
1074 
1075 	atomic_long_dec(&n->nr_slabs);
1076 	atomic_long_sub(objects, &n->total_objects);
1077 }
1078 
1079 /* Object debug checks for alloc/free paths */
1080 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1081 								void *object)
1082 {
1083 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1084 		return;
1085 
1086 	init_object(s, object, SLUB_RED_INACTIVE);
1087 	init_tracking(s, object);
1088 }
1089 
1090 static inline int alloc_consistency_checks(struct kmem_cache *s,
1091 					struct page *page,
1092 					void *object, unsigned long addr)
1093 {
1094 	if (!check_slab(s, page))
1095 		return 0;
1096 
1097 	if (!check_valid_pointer(s, page, object)) {
1098 		object_err(s, page, object, "Freelist Pointer check fails");
1099 		return 0;
1100 	}
1101 
1102 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1103 		return 0;
1104 
1105 	return 1;
1106 }
1107 
1108 static noinline int alloc_debug_processing(struct kmem_cache *s,
1109 					struct page *page,
1110 					void *object, unsigned long addr)
1111 {
1112 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1113 		if (!alloc_consistency_checks(s, page, object, addr))
1114 			goto bad;
1115 	}
1116 
1117 	/* Success perform special debug activities for allocs */
1118 	if (s->flags & SLAB_STORE_USER)
1119 		set_track(s, object, TRACK_ALLOC, addr);
1120 	trace(s, page, object, 1);
1121 	init_object(s, object, SLUB_RED_ACTIVE);
1122 	return 1;
1123 
1124 bad:
1125 	if (PageSlab(page)) {
1126 		/*
1127 		 * If this is a slab page then lets do the best we can
1128 		 * to avoid issues in the future. Marking all objects
1129 		 * as used avoids touching the remaining objects.
1130 		 */
1131 		slab_fix(s, "Marking all objects used");
1132 		page->inuse = page->objects;
1133 		page->freelist = NULL;
1134 	}
1135 	return 0;
1136 }
1137 
1138 static inline int free_consistency_checks(struct kmem_cache *s,
1139 		struct page *page, void *object, unsigned long addr)
1140 {
1141 	if (!check_valid_pointer(s, page, object)) {
1142 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1143 		return 0;
1144 	}
1145 
1146 	if (on_freelist(s, page, object)) {
1147 		object_err(s, page, object, "Object already free");
1148 		return 0;
1149 	}
1150 
1151 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1152 		return 0;
1153 
1154 	if (unlikely(s != page->slab_cache)) {
1155 		if (!PageSlab(page)) {
1156 			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1157 				 object);
1158 		} else if (!page->slab_cache) {
1159 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1160 			       object);
1161 			dump_stack();
1162 		} else
1163 			object_err(s, page, object,
1164 					"page slab pointer corrupt.");
1165 		return 0;
1166 	}
1167 	return 1;
1168 }
1169 
1170 /* Supports checking bulk free of a constructed freelist */
1171 static noinline int free_debug_processing(
1172 	struct kmem_cache *s, struct page *page,
1173 	void *head, void *tail, int bulk_cnt,
1174 	unsigned long addr)
1175 {
1176 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1177 	void *object = head;
1178 	int cnt = 0;
1179 	unsigned long uninitialized_var(flags);
1180 	int ret = 0;
1181 
1182 	spin_lock_irqsave(&n->list_lock, flags);
1183 	slab_lock(page);
1184 
1185 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1186 		if (!check_slab(s, page))
1187 			goto out;
1188 	}
1189 
1190 next_object:
1191 	cnt++;
1192 
1193 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1194 		if (!free_consistency_checks(s, page, object, addr))
1195 			goto out;
1196 	}
1197 
1198 	if (s->flags & SLAB_STORE_USER)
1199 		set_track(s, object, TRACK_FREE, addr);
1200 	trace(s, page, object, 0);
1201 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1202 	init_object(s, object, SLUB_RED_INACTIVE);
1203 
1204 	/* Reached end of constructed freelist yet? */
1205 	if (object != tail) {
1206 		object = get_freepointer(s, object);
1207 		goto next_object;
1208 	}
1209 	ret = 1;
1210 
1211 out:
1212 	if (cnt != bulk_cnt)
1213 		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1214 			 bulk_cnt, cnt);
1215 
1216 	slab_unlock(page);
1217 	spin_unlock_irqrestore(&n->list_lock, flags);
1218 	if (!ret)
1219 		slab_fix(s, "Object at 0x%p not freed", object);
1220 	return ret;
1221 }
1222 
1223 static int __init setup_slub_debug(char *str)
1224 {
1225 	slub_debug = DEBUG_DEFAULT_FLAGS;
1226 	if (*str++ != '=' || !*str)
1227 		/*
1228 		 * No options specified. Switch on full debugging.
1229 		 */
1230 		goto out;
1231 
1232 	if (*str == ',')
1233 		/*
1234 		 * No options but restriction on slabs. This means full
1235 		 * debugging for slabs matching a pattern.
1236 		 */
1237 		goto check_slabs;
1238 
1239 	slub_debug = 0;
1240 	if (*str == '-')
1241 		/*
1242 		 * Switch off all debugging measures.
1243 		 */
1244 		goto out;
1245 
1246 	/*
1247 	 * Determine which debug features should be switched on
1248 	 */
1249 	for (; *str && *str != ','; str++) {
1250 		switch (tolower(*str)) {
1251 		case 'f':
1252 			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1253 			break;
1254 		case 'z':
1255 			slub_debug |= SLAB_RED_ZONE;
1256 			break;
1257 		case 'p':
1258 			slub_debug |= SLAB_POISON;
1259 			break;
1260 		case 'u':
1261 			slub_debug |= SLAB_STORE_USER;
1262 			break;
1263 		case 't':
1264 			slub_debug |= SLAB_TRACE;
1265 			break;
1266 		case 'a':
1267 			slub_debug |= SLAB_FAILSLAB;
1268 			break;
1269 		case 'o':
1270 			/*
1271 			 * Avoid enabling debugging on caches if its minimum
1272 			 * order would increase as a result.
1273 			 */
1274 			disable_higher_order_debug = 1;
1275 			break;
1276 		default:
1277 			pr_err("slub_debug option '%c' unknown. skipped\n",
1278 			       *str);
1279 		}
1280 	}
1281 
1282 check_slabs:
1283 	if (*str == ',')
1284 		slub_debug_slabs = str + 1;
1285 out:
1286 	return 1;
1287 }
1288 
1289 __setup("slub_debug", setup_slub_debug);
1290 
1291 unsigned long kmem_cache_flags(unsigned long object_size,
1292 	unsigned long flags, const char *name,
1293 	void (*ctor)(void *))
1294 {
1295 	/*
1296 	 * Enable debugging if selected on the kernel commandline.
1297 	 */
1298 	if (slub_debug && (!slub_debug_slabs || (name &&
1299 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1300 		flags |= slub_debug;
1301 
1302 	return flags;
1303 }
1304 #else /* !CONFIG_SLUB_DEBUG */
1305 static inline void setup_object_debug(struct kmem_cache *s,
1306 			struct page *page, void *object) {}
1307 
1308 static inline int alloc_debug_processing(struct kmem_cache *s,
1309 	struct page *page, void *object, unsigned long addr) { return 0; }
1310 
1311 static inline int free_debug_processing(
1312 	struct kmem_cache *s, struct page *page,
1313 	void *head, void *tail, int bulk_cnt,
1314 	unsigned long addr) { return 0; }
1315 
1316 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1317 			{ return 1; }
1318 static inline int check_object(struct kmem_cache *s, struct page *page,
1319 			void *object, u8 val) { return 1; }
1320 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1321 					struct page *page) {}
1322 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1323 					struct page *page) {}
1324 unsigned long kmem_cache_flags(unsigned long object_size,
1325 	unsigned long flags, const char *name,
1326 	void (*ctor)(void *))
1327 {
1328 	return flags;
1329 }
1330 #define slub_debug 0
1331 
1332 #define disable_higher_order_debug 0
1333 
1334 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1335 							{ return 0; }
1336 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1337 							{ return 0; }
1338 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1339 							int objects) {}
1340 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1341 							int objects) {}
1342 
1343 #endif /* CONFIG_SLUB_DEBUG */
1344 
1345 /*
1346  * Hooks for other subsystems that check memory allocations. In a typical
1347  * production configuration these hooks all should produce no code at all.
1348  */
1349 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1350 {
1351 	kmemleak_alloc(ptr, size, 1, flags);
1352 	kasan_kmalloc_large(ptr, size, flags);
1353 }
1354 
1355 static inline void kfree_hook(const void *x)
1356 {
1357 	kmemleak_free(x);
1358 	kasan_kfree_large(x);
1359 }
1360 
1361 static inline void *slab_free_hook(struct kmem_cache *s, void *x)
1362 {
1363 	void *freeptr;
1364 
1365 	kmemleak_free_recursive(x, s->flags);
1366 
1367 	/*
1368 	 * Trouble is that we may no longer disable interrupts in the fast path
1369 	 * So in order to make the debug calls that expect irqs to be
1370 	 * disabled we need to disable interrupts temporarily.
1371 	 */
1372 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
1373 	{
1374 		unsigned long flags;
1375 
1376 		local_irq_save(flags);
1377 		kmemcheck_slab_free(s, x, s->object_size);
1378 		debug_check_no_locks_freed(x, s->object_size);
1379 		local_irq_restore(flags);
1380 	}
1381 #endif
1382 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1383 		debug_check_no_obj_freed(x, s->object_size);
1384 
1385 	freeptr = get_freepointer(s, x);
1386 	/*
1387 	 * kasan_slab_free() may put x into memory quarantine, delaying its
1388 	 * reuse. In this case the object's freelist pointer is changed.
1389 	 */
1390 	kasan_slab_free(s, x);
1391 	return freeptr;
1392 }
1393 
1394 static inline void slab_free_freelist_hook(struct kmem_cache *s,
1395 					   void *head, void *tail)
1396 {
1397 /*
1398  * Compiler cannot detect this function can be removed if slab_free_hook()
1399  * evaluates to nothing.  Thus, catch all relevant config debug options here.
1400  */
1401 #if defined(CONFIG_KMEMCHECK) ||		\
1402 	defined(CONFIG_LOCKDEP)	||		\
1403 	defined(CONFIG_DEBUG_KMEMLEAK) ||	\
1404 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
1405 	defined(CONFIG_KASAN)
1406 
1407 	void *object = head;
1408 	void *tail_obj = tail ? : head;
1409 	void *freeptr;
1410 
1411 	do {
1412 		freeptr = slab_free_hook(s, object);
1413 	} while ((object != tail_obj) && (object = freeptr));
1414 #endif
1415 }
1416 
1417 static void setup_object(struct kmem_cache *s, struct page *page,
1418 				void *object)
1419 {
1420 	setup_object_debug(s, page, object);
1421 	kasan_init_slab_obj(s, object);
1422 	if (unlikely(s->ctor)) {
1423 		kasan_unpoison_object_data(s, object);
1424 		s->ctor(object);
1425 		kasan_poison_object_data(s, object);
1426 	}
1427 }
1428 
1429 /*
1430  * Slab allocation and freeing
1431  */
1432 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1433 		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1434 {
1435 	struct page *page;
1436 	int order = oo_order(oo);
1437 
1438 	flags |= __GFP_NOTRACK;
1439 
1440 	if (node == NUMA_NO_NODE)
1441 		page = alloc_pages(flags, order);
1442 	else
1443 		page = __alloc_pages_node(node, flags, order);
1444 
1445 	if (page && memcg_charge_slab(page, flags, order, s)) {
1446 		__free_pages(page, order);
1447 		page = NULL;
1448 	}
1449 
1450 	return page;
1451 }
1452 
1453 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1454 /* Pre-initialize the random sequence cache */
1455 static int init_cache_random_seq(struct kmem_cache *s)
1456 {
1457 	int err;
1458 	unsigned long i, count = oo_objects(s->oo);
1459 
1460 	/* Bailout if already initialised */
1461 	if (s->random_seq)
1462 		return 0;
1463 
1464 	err = cache_random_seq_create(s, count, GFP_KERNEL);
1465 	if (err) {
1466 		pr_err("SLUB: Unable to initialize free list for %s\n",
1467 			s->name);
1468 		return err;
1469 	}
1470 
1471 	/* Transform to an offset on the set of pages */
1472 	if (s->random_seq) {
1473 		for (i = 0; i < count; i++)
1474 			s->random_seq[i] *= s->size;
1475 	}
1476 	return 0;
1477 }
1478 
1479 /* Initialize each random sequence freelist per cache */
1480 static void __init init_freelist_randomization(void)
1481 {
1482 	struct kmem_cache *s;
1483 
1484 	mutex_lock(&slab_mutex);
1485 
1486 	list_for_each_entry(s, &slab_caches, list)
1487 		init_cache_random_seq(s);
1488 
1489 	mutex_unlock(&slab_mutex);
1490 }
1491 
1492 /* Get the next entry on the pre-computed freelist randomized */
1493 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1494 				unsigned long *pos, void *start,
1495 				unsigned long page_limit,
1496 				unsigned long freelist_count)
1497 {
1498 	unsigned int idx;
1499 
1500 	/*
1501 	 * If the target page allocation failed, the number of objects on the
1502 	 * page might be smaller than the usual size defined by the cache.
1503 	 */
1504 	do {
1505 		idx = s->random_seq[*pos];
1506 		*pos += 1;
1507 		if (*pos >= freelist_count)
1508 			*pos = 0;
1509 	} while (unlikely(idx >= page_limit));
1510 
1511 	return (char *)start + idx;
1512 }
1513 
1514 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1515 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1516 {
1517 	void *start;
1518 	void *cur;
1519 	void *next;
1520 	unsigned long idx, pos, page_limit, freelist_count;
1521 
1522 	if (page->objects < 2 || !s->random_seq)
1523 		return false;
1524 
1525 	freelist_count = oo_objects(s->oo);
1526 	pos = get_random_int() % freelist_count;
1527 
1528 	page_limit = page->objects * s->size;
1529 	start = fixup_red_left(s, page_address(page));
1530 
1531 	/* First entry is used as the base of the freelist */
1532 	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1533 				freelist_count);
1534 	page->freelist = cur;
1535 
1536 	for (idx = 1; idx < page->objects; idx++) {
1537 		setup_object(s, page, cur);
1538 		next = next_freelist_entry(s, page, &pos, start, page_limit,
1539 			freelist_count);
1540 		set_freepointer(s, cur, next);
1541 		cur = next;
1542 	}
1543 	setup_object(s, page, cur);
1544 	set_freepointer(s, cur, NULL);
1545 
1546 	return true;
1547 }
1548 #else
1549 static inline int init_cache_random_seq(struct kmem_cache *s)
1550 {
1551 	return 0;
1552 }
1553 static inline void init_freelist_randomization(void) { }
1554 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1555 {
1556 	return false;
1557 }
1558 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1559 
1560 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1561 {
1562 	struct page *page;
1563 	struct kmem_cache_order_objects oo = s->oo;
1564 	gfp_t alloc_gfp;
1565 	void *start, *p;
1566 	int idx, order;
1567 	bool shuffle;
1568 
1569 	flags &= gfp_allowed_mask;
1570 
1571 	if (gfpflags_allow_blocking(flags))
1572 		local_irq_enable();
1573 
1574 	flags |= s->allocflags;
1575 
1576 	/*
1577 	 * Let the initial higher-order allocation fail under memory pressure
1578 	 * so we fall-back to the minimum order allocation.
1579 	 */
1580 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1581 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1582 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1583 
1584 	page = alloc_slab_page(s, alloc_gfp, node, oo);
1585 	if (unlikely(!page)) {
1586 		oo = s->min;
1587 		alloc_gfp = flags;
1588 		/*
1589 		 * Allocation may have failed due to fragmentation.
1590 		 * Try a lower order alloc if possible
1591 		 */
1592 		page = alloc_slab_page(s, alloc_gfp, node, oo);
1593 		if (unlikely(!page))
1594 			goto out;
1595 		stat(s, ORDER_FALLBACK);
1596 	}
1597 
1598 	if (kmemcheck_enabled &&
1599 	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1600 		int pages = 1 << oo_order(oo);
1601 
1602 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1603 
1604 		/*
1605 		 * Objects from caches that have a constructor don't get
1606 		 * cleared when they're allocated, so we need to do it here.
1607 		 */
1608 		if (s->ctor)
1609 			kmemcheck_mark_uninitialized_pages(page, pages);
1610 		else
1611 			kmemcheck_mark_unallocated_pages(page, pages);
1612 	}
1613 
1614 	page->objects = oo_objects(oo);
1615 
1616 	order = compound_order(page);
1617 	page->slab_cache = s;
1618 	__SetPageSlab(page);
1619 	if (page_is_pfmemalloc(page))
1620 		SetPageSlabPfmemalloc(page);
1621 
1622 	start = page_address(page);
1623 
1624 	if (unlikely(s->flags & SLAB_POISON))
1625 		memset(start, POISON_INUSE, PAGE_SIZE << order);
1626 
1627 	kasan_poison_slab(page);
1628 
1629 	shuffle = shuffle_freelist(s, page);
1630 
1631 	if (!shuffle) {
1632 		for_each_object_idx(p, idx, s, start, page->objects) {
1633 			setup_object(s, page, p);
1634 			if (likely(idx < page->objects))
1635 				set_freepointer(s, p, p + s->size);
1636 			else
1637 				set_freepointer(s, p, NULL);
1638 		}
1639 		page->freelist = fixup_red_left(s, start);
1640 	}
1641 
1642 	page->inuse = page->objects;
1643 	page->frozen = 1;
1644 
1645 out:
1646 	if (gfpflags_allow_blocking(flags))
1647 		local_irq_disable();
1648 	if (!page)
1649 		return NULL;
1650 
1651 	mod_lruvec_page_state(page,
1652 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1653 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1654 		1 << oo_order(oo));
1655 
1656 	inc_slabs_node(s, page_to_nid(page), page->objects);
1657 
1658 	return page;
1659 }
1660 
1661 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1662 {
1663 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1664 		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1665 		flags &= ~GFP_SLAB_BUG_MASK;
1666 		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1667 				invalid_mask, &invalid_mask, flags, &flags);
1668 		dump_stack();
1669 	}
1670 
1671 	return allocate_slab(s,
1672 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1673 }
1674 
1675 static void __free_slab(struct kmem_cache *s, struct page *page)
1676 {
1677 	int order = compound_order(page);
1678 	int pages = 1 << order;
1679 
1680 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1681 		void *p;
1682 
1683 		slab_pad_check(s, page);
1684 		for_each_object(p, s, page_address(page),
1685 						page->objects)
1686 			check_object(s, page, p, SLUB_RED_INACTIVE);
1687 	}
1688 
1689 	kmemcheck_free_shadow(page, compound_order(page));
1690 
1691 	mod_lruvec_page_state(page,
1692 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1693 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1694 		-pages);
1695 
1696 	__ClearPageSlabPfmemalloc(page);
1697 	__ClearPageSlab(page);
1698 
1699 	page_mapcount_reset(page);
1700 	if (current->reclaim_state)
1701 		current->reclaim_state->reclaimed_slab += pages;
1702 	memcg_uncharge_slab(page, order, s);
1703 	__free_pages(page, order);
1704 }
1705 
1706 #define need_reserve_slab_rcu						\
1707 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1708 
1709 static void rcu_free_slab(struct rcu_head *h)
1710 {
1711 	struct page *page;
1712 
1713 	if (need_reserve_slab_rcu)
1714 		page = virt_to_head_page(h);
1715 	else
1716 		page = container_of((struct list_head *)h, struct page, lru);
1717 
1718 	__free_slab(page->slab_cache, page);
1719 }
1720 
1721 static void free_slab(struct kmem_cache *s, struct page *page)
1722 {
1723 	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1724 		struct rcu_head *head;
1725 
1726 		if (need_reserve_slab_rcu) {
1727 			int order = compound_order(page);
1728 			int offset = (PAGE_SIZE << order) - s->reserved;
1729 
1730 			VM_BUG_ON(s->reserved != sizeof(*head));
1731 			head = page_address(page) + offset;
1732 		} else {
1733 			head = &page->rcu_head;
1734 		}
1735 
1736 		call_rcu(head, rcu_free_slab);
1737 	} else
1738 		__free_slab(s, page);
1739 }
1740 
1741 static void discard_slab(struct kmem_cache *s, struct page *page)
1742 {
1743 	dec_slabs_node(s, page_to_nid(page), page->objects);
1744 	free_slab(s, page);
1745 }
1746 
1747 /*
1748  * Management of partially allocated slabs.
1749  */
1750 static inline void
1751 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1752 {
1753 	n->nr_partial++;
1754 	if (tail == DEACTIVATE_TO_TAIL)
1755 		list_add_tail(&page->lru, &n->partial);
1756 	else
1757 		list_add(&page->lru, &n->partial);
1758 }
1759 
1760 static inline void add_partial(struct kmem_cache_node *n,
1761 				struct page *page, int tail)
1762 {
1763 	lockdep_assert_held(&n->list_lock);
1764 	__add_partial(n, page, tail);
1765 }
1766 
1767 static inline void remove_partial(struct kmem_cache_node *n,
1768 					struct page *page)
1769 {
1770 	lockdep_assert_held(&n->list_lock);
1771 	list_del(&page->lru);
1772 	n->nr_partial--;
1773 }
1774 
1775 /*
1776  * Remove slab from the partial list, freeze it and
1777  * return the pointer to the freelist.
1778  *
1779  * Returns a list of objects or NULL if it fails.
1780  */
1781 static inline void *acquire_slab(struct kmem_cache *s,
1782 		struct kmem_cache_node *n, struct page *page,
1783 		int mode, int *objects)
1784 {
1785 	void *freelist;
1786 	unsigned long counters;
1787 	struct page new;
1788 
1789 	lockdep_assert_held(&n->list_lock);
1790 
1791 	/*
1792 	 * Zap the freelist and set the frozen bit.
1793 	 * The old freelist is the list of objects for the
1794 	 * per cpu allocation list.
1795 	 */
1796 	freelist = page->freelist;
1797 	counters = page->counters;
1798 	new.counters = counters;
1799 	*objects = new.objects - new.inuse;
1800 	if (mode) {
1801 		new.inuse = page->objects;
1802 		new.freelist = NULL;
1803 	} else {
1804 		new.freelist = freelist;
1805 	}
1806 
1807 	VM_BUG_ON(new.frozen);
1808 	new.frozen = 1;
1809 
1810 	if (!__cmpxchg_double_slab(s, page,
1811 			freelist, counters,
1812 			new.freelist, new.counters,
1813 			"acquire_slab"))
1814 		return NULL;
1815 
1816 	remove_partial(n, page);
1817 	WARN_ON(!freelist);
1818 	return freelist;
1819 }
1820 
1821 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1822 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1823 
1824 /*
1825  * Try to allocate a partial slab from a specific node.
1826  */
1827 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1828 				struct kmem_cache_cpu *c, gfp_t flags)
1829 {
1830 	struct page *page, *page2;
1831 	void *object = NULL;
1832 	int available = 0;
1833 	int objects;
1834 
1835 	/*
1836 	 * Racy check. If we mistakenly see no partial slabs then we
1837 	 * just allocate an empty slab. If we mistakenly try to get a
1838 	 * partial slab and there is none available then get_partials()
1839 	 * will return NULL.
1840 	 */
1841 	if (!n || !n->nr_partial)
1842 		return NULL;
1843 
1844 	spin_lock(&n->list_lock);
1845 	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1846 		void *t;
1847 
1848 		if (!pfmemalloc_match(page, flags))
1849 			continue;
1850 
1851 		t = acquire_slab(s, n, page, object == NULL, &objects);
1852 		if (!t)
1853 			break;
1854 
1855 		available += objects;
1856 		if (!object) {
1857 			c->page = page;
1858 			stat(s, ALLOC_FROM_PARTIAL);
1859 			object = t;
1860 		} else {
1861 			put_cpu_partial(s, page, 0);
1862 			stat(s, CPU_PARTIAL_NODE);
1863 		}
1864 		if (!kmem_cache_has_cpu_partial(s)
1865 			|| available > slub_cpu_partial(s) / 2)
1866 			break;
1867 
1868 	}
1869 	spin_unlock(&n->list_lock);
1870 	return object;
1871 }
1872 
1873 /*
1874  * Get a page from somewhere. Search in increasing NUMA distances.
1875  */
1876 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1877 		struct kmem_cache_cpu *c)
1878 {
1879 #ifdef CONFIG_NUMA
1880 	struct zonelist *zonelist;
1881 	struct zoneref *z;
1882 	struct zone *zone;
1883 	enum zone_type high_zoneidx = gfp_zone(flags);
1884 	void *object;
1885 	unsigned int cpuset_mems_cookie;
1886 
1887 	/*
1888 	 * The defrag ratio allows a configuration of the tradeoffs between
1889 	 * inter node defragmentation and node local allocations. A lower
1890 	 * defrag_ratio increases the tendency to do local allocations
1891 	 * instead of attempting to obtain partial slabs from other nodes.
1892 	 *
1893 	 * If the defrag_ratio is set to 0 then kmalloc() always
1894 	 * returns node local objects. If the ratio is higher then kmalloc()
1895 	 * may return off node objects because partial slabs are obtained
1896 	 * from other nodes and filled up.
1897 	 *
1898 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1899 	 * (which makes defrag_ratio = 1000) then every (well almost)
1900 	 * allocation will first attempt to defrag slab caches on other nodes.
1901 	 * This means scanning over all nodes to look for partial slabs which
1902 	 * may be expensive if we do it every time we are trying to find a slab
1903 	 * with available objects.
1904 	 */
1905 	if (!s->remote_node_defrag_ratio ||
1906 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1907 		return NULL;
1908 
1909 	do {
1910 		cpuset_mems_cookie = read_mems_allowed_begin();
1911 		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1912 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1913 			struct kmem_cache_node *n;
1914 
1915 			n = get_node(s, zone_to_nid(zone));
1916 
1917 			if (n && cpuset_zone_allowed(zone, flags) &&
1918 					n->nr_partial > s->min_partial) {
1919 				object = get_partial_node(s, n, c, flags);
1920 				if (object) {
1921 					/*
1922 					 * Don't check read_mems_allowed_retry()
1923 					 * here - if mems_allowed was updated in
1924 					 * parallel, that was a harmless race
1925 					 * between allocation and the cpuset
1926 					 * update
1927 					 */
1928 					return object;
1929 				}
1930 			}
1931 		}
1932 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1933 #endif
1934 	return NULL;
1935 }
1936 
1937 /*
1938  * Get a partial page, lock it and return it.
1939  */
1940 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1941 		struct kmem_cache_cpu *c)
1942 {
1943 	void *object;
1944 	int searchnode = node;
1945 
1946 	if (node == NUMA_NO_NODE)
1947 		searchnode = numa_mem_id();
1948 	else if (!node_present_pages(node))
1949 		searchnode = node_to_mem_node(node);
1950 
1951 	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1952 	if (object || node != NUMA_NO_NODE)
1953 		return object;
1954 
1955 	return get_any_partial(s, flags, c);
1956 }
1957 
1958 #ifdef CONFIG_PREEMPT
1959 /*
1960  * Calculate the next globally unique transaction for disambiguiation
1961  * during cmpxchg. The transactions start with the cpu number and are then
1962  * incremented by CONFIG_NR_CPUS.
1963  */
1964 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1965 #else
1966 /*
1967  * No preemption supported therefore also no need to check for
1968  * different cpus.
1969  */
1970 #define TID_STEP 1
1971 #endif
1972 
1973 static inline unsigned long next_tid(unsigned long tid)
1974 {
1975 	return tid + TID_STEP;
1976 }
1977 
1978 static inline unsigned int tid_to_cpu(unsigned long tid)
1979 {
1980 	return tid % TID_STEP;
1981 }
1982 
1983 static inline unsigned long tid_to_event(unsigned long tid)
1984 {
1985 	return tid / TID_STEP;
1986 }
1987 
1988 static inline unsigned int init_tid(int cpu)
1989 {
1990 	return cpu;
1991 }
1992 
1993 static inline void note_cmpxchg_failure(const char *n,
1994 		const struct kmem_cache *s, unsigned long tid)
1995 {
1996 #ifdef SLUB_DEBUG_CMPXCHG
1997 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1998 
1999 	pr_info("%s %s: cmpxchg redo ", n, s->name);
2000 
2001 #ifdef CONFIG_PREEMPT
2002 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2003 		pr_warn("due to cpu change %d -> %d\n",
2004 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2005 	else
2006 #endif
2007 	if (tid_to_event(tid) != tid_to_event(actual_tid))
2008 		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2009 			tid_to_event(tid), tid_to_event(actual_tid));
2010 	else
2011 		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2012 			actual_tid, tid, next_tid(tid));
2013 #endif
2014 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2015 }
2016 
2017 static void init_kmem_cache_cpus(struct kmem_cache *s)
2018 {
2019 	int cpu;
2020 
2021 	for_each_possible_cpu(cpu)
2022 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2023 }
2024 
2025 /*
2026  * Remove the cpu slab
2027  */
2028 static void deactivate_slab(struct kmem_cache *s, struct page *page,
2029 				void *freelist, struct kmem_cache_cpu *c)
2030 {
2031 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2032 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2033 	int lock = 0;
2034 	enum slab_modes l = M_NONE, m = M_NONE;
2035 	void *nextfree;
2036 	int tail = DEACTIVATE_TO_HEAD;
2037 	struct page new;
2038 	struct page old;
2039 
2040 	if (page->freelist) {
2041 		stat(s, DEACTIVATE_REMOTE_FREES);
2042 		tail = DEACTIVATE_TO_TAIL;
2043 	}
2044 
2045 	/*
2046 	 * Stage one: Free all available per cpu objects back
2047 	 * to the page freelist while it is still frozen. Leave the
2048 	 * last one.
2049 	 *
2050 	 * There is no need to take the list->lock because the page
2051 	 * is still frozen.
2052 	 */
2053 	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2054 		void *prior;
2055 		unsigned long counters;
2056 
2057 		do {
2058 			prior = page->freelist;
2059 			counters = page->counters;
2060 			set_freepointer(s, freelist, prior);
2061 			new.counters = counters;
2062 			new.inuse--;
2063 			VM_BUG_ON(!new.frozen);
2064 
2065 		} while (!__cmpxchg_double_slab(s, page,
2066 			prior, counters,
2067 			freelist, new.counters,
2068 			"drain percpu freelist"));
2069 
2070 		freelist = nextfree;
2071 	}
2072 
2073 	/*
2074 	 * Stage two: Ensure that the page is unfrozen while the
2075 	 * list presence reflects the actual number of objects
2076 	 * during unfreeze.
2077 	 *
2078 	 * We setup the list membership and then perform a cmpxchg
2079 	 * with the count. If there is a mismatch then the page
2080 	 * is not unfrozen but the page is on the wrong list.
2081 	 *
2082 	 * Then we restart the process which may have to remove
2083 	 * the page from the list that we just put it on again
2084 	 * because the number of objects in the slab may have
2085 	 * changed.
2086 	 */
2087 redo:
2088 
2089 	old.freelist = page->freelist;
2090 	old.counters = page->counters;
2091 	VM_BUG_ON(!old.frozen);
2092 
2093 	/* Determine target state of the slab */
2094 	new.counters = old.counters;
2095 	if (freelist) {
2096 		new.inuse--;
2097 		set_freepointer(s, freelist, old.freelist);
2098 		new.freelist = freelist;
2099 	} else
2100 		new.freelist = old.freelist;
2101 
2102 	new.frozen = 0;
2103 
2104 	if (!new.inuse && n->nr_partial >= s->min_partial)
2105 		m = M_FREE;
2106 	else if (new.freelist) {
2107 		m = M_PARTIAL;
2108 		if (!lock) {
2109 			lock = 1;
2110 			/*
2111 			 * Taking the spinlock removes the possiblity
2112 			 * that acquire_slab() will see a slab page that
2113 			 * is frozen
2114 			 */
2115 			spin_lock(&n->list_lock);
2116 		}
2117 	} else {
2118 		m = M_FULL;
2119 		if (kmem_cache_debug(s) && !lock) {
2120 			lock = 1;
2121 			/*
2122 			 * This also ensures that the scanning of full
2123 			 * slabs from diagnostic functions will not see
2124 			 * any frozen slabs.
2125 			 */
2126 			spin_lock(&n->list_lock);
2127 		}
2128 	}
2129 
2130 	if (l != m) {
2131 
2132 		if (l == M_PARTIAL)
2133 
2134 			remove_partial(n, page);
2135 
2136 		else if (l == M_FULL)
2137 
2138 			remove_full(s, n, page);
2139 
2140 		if (m == M_PARTIAL) {
2141 
2142 			add_partial(n, page, tail);
2143 			stat(s, tail);
2144 
2145 		} else if (m == M_FULL) {
2146 
2147 			stat(s, DEACTIVATE_FULL);
2148 			add_full(s, n, page);
2149 
2150 		}
2151 	}
2152 
2153 	l = m;
2154 	if (!__cmpxchg_double_slab(s, page,
2155 				old.freelist, old.counters,
2156 				new.freelist, new.counters,
2157 				"unfreezing slab"))
2158 		goto redo;
2159 
2160 	if (lock)
2161 		spin_unlock(&n->list_lock);
2162 
2163 	if (m == M_FREE) {
2164 		stat(s, DEACTIVATE_EMPTY);
2165 		discard_slab(s, page);
2166 		stat(s, FREE_SLAB);
2167 	}
2168 
2169 	c->page = NULL;
2170 	c->freelist = NULL;
2171 }
2172 
2173 /*
2174  * Unfreeze all the cpu partial slabs.
2175  *
2176  * This function must be called with interrupts disabled
2177  * for the cpu using c (or some other guarantee must be there
2178  * to guarantee no concurrent accesses).
2179  */
2180 static void unfreeze_partials(struct kmem_cache *s,
2181 		struct kmem_cache_cpu *c)
2182 {
2183 #ifdef CONFIG_SLUB_CPU_PARTIAL
2184 	struct kmem_cache_node *n = NULL, *n2 = NULL;
2185 	struct page *page, *discard_page = NULL;
2186 
2187 	while ((page = c->partial)) {
2188 		struct page new;
2189 		struct page old;
2190 
2191 		c->partial = page->next;
2192 
2193 		n2 = get_node(s, page_to_nid(page));
2194 		if (n != n2) {
2195 			if (n)
2196 				spin_unlock(&n->list_lock);
2197 
2198 			n = n2;
2199 			spin_lock(&n->list_lock);
2200 		}
2201 
2202 		do {
2203 
2204 			old.freelist = page->freelist;
2205 			old.counters = page->counters;
2206 			VM_BUG_ON(!old.frozen);
2207 
2208 			new.counters = old.counters;
2209 			new.freelist = old.freelist;
2210 
2211 			new.frozen = 0;
2212 
2213 		} while (!__cmpxchg_double_slab(s, page,
2214 				old.freelist, old.counters,
2215 				new.freelist, new.counters,
2216 				"unfreezing slab"));
2217 
2218 		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2219 			page->next = discard_page;
2220 			discard_page = page;
2221 		} else {
2222 			add_partial(n, page, DEACTIVATE_TO_TAIL);
2223 			stat(s, FREE_ADD_PARTIAL);
2224 		}
2225 	}
2226 
2227 	if (n)
2228 		spin_unlock(&n->list_lock);
2229 
2230 	while (discard_page) {
2231 		page = discard_page;
2232 		discard_page = discard_page->next;
2233 
2234 		stat(s, DEACTIVATE_EMPTY);
2235 		discard_slab(s, page);
2236 		stat(s, FREE_SLAB);
2237 	}
2238 #endif
2239 }
2240 
2241 /*
2242  * Put a page that was just frozen (in __slab_free) into a partial page
2243  * slot if available. This is done without interrupts disabled and without
2244  * preemption disabled. The cmpxchg is racy and may put the partial page
2245  * onto a random cpus partial slot.
2246  *
2247  * If we did not find a slot then simply move all the partials to the
2248  * per node partial list.
2249  */
2250 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2251 {
2252 #ifdef CONFIG_SLUB_CPU_PARTIAL
2253 	struct page *oldpage;
2254 	int pages;
2255 	int pobjects;
2256 
2257 	preempt_disable();
2258 	do {
2259 		pages = 0;
2260 		pobjects = 0;
2261 		oldpage = this_cpu_read(s->cpu_slab->partial);
2262 
2263 		if (oldpage) {
2264 			pobjects = oldpage->pobjects;
2265 			pages = oldpage->pages;
2266 			if (drain && pobjects > s->cpu_partial) {
2267 				unsigned long flags;
2268 				/*
2269 				 * partial array is full. Move the existing
2270 				 * set to the per node partial list.
2271 				 */
2272 				local_irq_save(flags);
2273 				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2274 				local_irq_restore(flags);
2275 				oldpage = NULL;
2276 				pobjects = 0;
2277 				pages = 0;
2278 				stat(s, CPU_PARTIAL_DRAIN);
2279 			}
2280 		}
2281 
2282 		pages++;
2283 		pobjects += page->objects - page->inuse;
2284 
2285 		page->pages = pages;
2286 		page->pobjects = pobjects;
2287 		page->next = oldpage;
2288 
2289 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2290 								!= oldpage);
2291 	if (unlikely(!s->cpu_partial)) {
2292 		unsigned long flags;
2293 
2294 		local_irq_save(flags);
2295 		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2296 		local_irq_restore(flags);
2297 	}
2298 	preempt_enable();
2299 #endif
2300 }
2301 
2302 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2303 {
2304 	stat(s, CPUSLAB_FLUSH);
2305 	deactivate_slab(s, c->page, c->freelist, c);
2306 
2307 	c->tid = next_tid(c->tid);
2308 }
2309 
2310 /*
2311  * Flush cpu slab.
2312  *
2313  * Called from IPI handler with interrupts disabled.
2314  */
2315 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2316 {
2317 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2318 
2319 	if (likely(c)) {
2320 		if (c->page)
2321 			flush_slab(s, c);
2322 
2323 		unfreeze_partials(s, c);
2324 	}
2325 }
2326 
2327 static void flush_cpu_slab(void *d)
2328 {
2329 	struct kmem_cache *s = d;
2330 
2331 	__flush_cpu_slab(s, smp_processor_id());
2332 }
2333 
2334 static bool has_cpu_slab(int cpu, void *info)
2335 {
2336 	struct kmem_cache *s = info;
2337 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2338 
2339 	return c->page || slub_percpu_partial(c);
2340 }
2341 
2342 static void flush_all(struct kmem_cache *s)
2343 {
2344 	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2345 }
2346 
2347 /*
2348  * Use the cpu notifier to insure that the cpu slabs are flushed when
2349  * necessary.
2350  */
2351 static int slub_cpu_dead(unsigned int cpu)
2352 {
2353 	struct kmem_cache *s;
2354 	unsigned long flags;
2355 
2356 	mutex_lock(&slab_mutex);
2357 	list_for_each_entry(s, &slab_caches, list) {
2358 		local_irq_save(flags);
2359 		__flush_cpu_slab(s, cpu);
2360 		local_irq_restore(flags);
2361 	}
2362 	mutex_unlock(&slab_mutex);
2363 	return 0;
2364 }
2365 
2366 /*
2367  * Check if the objects in a per cpu structure fit numa
2368  * locality expectations.
2369  */
2370 static inline int node_match(struct page *page, int node)
2371 {
2372 #ifdef CONFIG_NUMA
2373 	if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2374 		return 0;
2375 #endif
2376 	return 1;
2377 }
2378 
2379 #ifdef CONFIG_SLUB_DEBUG
2380 static int count_free(struct page *page)
2381 {
2382 	return page->objects - page->inuse;
2383 }
2384 
2385 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2386 {
2387 	return atomic_long_read(&n->total_objects);
2388 }
2389 #endif /* CONFIG_SLUB_DEBUG */
2390 
2391 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2392 static unsigned long count_partial(struct kmem_cache_node *n,
2393 					int (*get_count)(struct page *))
2394 {
2395 	unsigned long flags;
2396 	unsigned long x = 0;
2397 	struct page *page;
2398 
2399 	spin_lock_irqsave(&n->list_lock, flags);
2400 	list_for_each_entry(page, &n->partial, lru)
2401 		x += get_count(page);
2402 	spin_unlock_irqrestore(&n->list_lock, flags);
2403 	return x;
2404 }
2405 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2406 
2407 static noinline void
2408 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2409 {
2410 #ifdef CONFIG_SLUB_DEBUG
2411 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2412 				      DEFAULT_RATELIMIT_BURST);
2413 	int node;
2414 	struct kmem_cache_node *n;
2415 
2416 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2417 		return;
2418 
2419 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2420 		nid, gfpflags, &gfpflags);
2421 	pr_warn("  cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
2422 		s->name, s->object_size, s->size, oo_order(s->oo),
2423 		oo_order(s->min));
2424 
2425 	if (oo_order(s->min) > get_order(s->object_size))
2426 		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2427 			s->name);
2428 
2429 	for_each_kmem_cache_node(s, node, n) {
2430 		unsigned long nr_slabs;
2431 		unsigned long nr_objs;
2432 		unsigned long nr_free;
2433 
2434 		nr_free  = count_partial(n, count_free);
2435 		nr_slabs = node_nr_slabs(n);
2436 		nr_objs  = node_nr_objs(n);
2437 
2438 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2439 			node, nr_slabs, nr_objs, nr_free);
2440 	}
2441 #endif
2442 }
2443 
2444 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2445 			int node, struct kmem_cache_cpu **pc)
2446 {
2447 	void *freelist;
2448 	struct kmem_cache_cpu *c = *pc;
2449 	struct page *page;
2450 
2451 	freelist = get_partial(s, flags, node, c);
2452 
2453 	if (freelist)
2454 		return freelist;
2455 
2456 	page = new_slab(s, flags, node);
2457 	if (page) {
2458 		c = raw_cpu_ptr(s->cpu_slab);
2459 		if (c->page)
2460 			flush_slab(s, c);
2461 
2462 		/*
2463 		 * No other reference to the page yet so we can
2464 		 * muck around with it freely without cmpxchg
2465 		 */
2466 		freelist = page->freelist;
2467 		page->freelist = NULL;
2468 
2469 		stat(s, ALLOC_SLAB);
2470 		c->page = page;
2471 		*pc = c;
2472 	} else
2473 		freelist = NULL;
2474 
2475 	return freelist;
2476 }
2477 
2478 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2479 {
2480 	if (unlikely(PageSlabPfmemalloc(page)))
2481 		return gfp_pfmemalloc_allowed(gfpflags);
2482 
2483 	return true;
2484 }
2485 
2486 /*
2487  * Check the page->freelist of a page and either transfer the freelist to the
2488  * per cpu freelist or deactivate the page.
2489  *
2490  * The page is still frozen if the return value is not NULL.
2491  *
2492  * If this function returns NULL then the page has been unfrozen.
2493  *
2494  * This function must be called with interrupt disabled.
2495  */
2496 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2497 {
2498 	struct page new;
2499 	unsigned long counters;
2500 	void *freelist;
2501 
2502 	do {
2503 		freelist = page->freelist;
2504 		counters = page->counters;
2505 
2506 		new.counters = counters;
2507 		VM_BUG_ON(!new.frozen);
2508 
2509 		new.inuse = page->objects;
2510 		new.frozen = freelist != NULL;
2511 
2512 	} while (!__cmpxchg_double_slab(s, page,
2513 		freelist, counters,
2514 		NULL, new.counters,
2515 		"get_freelist"));
2516 
2517 	return freelist;
2518 }
2519 
2520 /*
2521  * Slow path. The lockless freelist is empty or we need to perform
2522  * debugging duties.
2523  *
2524  * Processing is still very fast if new objects have been freed to the
2525  * regular freelist. In that case we simply take over the regular freelist
2526  * as the lockless freelist and zap the regular freelist.
2527  *
2528  * If that is not working then we fall back to the partial lists. We take the
2529  * first element of the freelist as the object to allocate now and move the
2530  * rest of the freelist to the lockless freelist.
2531  *
2532  * And if we were unable to get a new slab from the partial slab lists then
2533  * we need to allocate a new slab. This is the slowest path since it involves
2534  * a call to the page allocator and the setup of a new slab.
2535  *
2536  * Version of __slab_alloc to use when we know that interrupts are
2537  * already disabled (which is the case for bulk allocation).
2538  */
2539 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2540 			  unsigned long addr, struct kmem_cache_cpu *c)
2541 {
2542 	void *freelist;
2543 	struct page *page;
2544 
2545 	page = c->page;
2546 	if (!page)
2547 		goto new_slab;
2548 redo:
2549 
2550 	if (unlikely(!node_match(page, node))) {
2551 		int searchnode = node;
2552 
2553 		if (node != NUMA_NO_NODE && !node_present_pages(node))
2554 			searchnode = node_to_mem_node(node);
2555 
2556 		if (unlikely(!node_match(page, searchnode))) {
2557 			stat(s, ALLOC_NODE_MISMATCH);
2558 			deactivate_slab(s, page, c->freelist, c);
2559 			goto new_slab;
2560 		}
2561 	}
2562 
2563 	/*
2564 	 * By rights, we should be searching for a slab page that was
2565 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2566 	 * information when the page leaves the per-cpu allocator
2567 	 */
2568 	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2569 		deactivate_slab(s, page, c->freelist, c);
2570 		goto new_slab;
2571 	}
2572 
2573 	/* must check again c->freelist in case of cpu migration or IRQ */
2574 	freelist = c->freelist;
2575 	if (freelist)
2576 		goto load_freelist;
2577 
2578 	freelist = get_freelist(s, page);
2579 
2580 	if (!freelist) {
2581 		c->page = NULL;
2582 		stat(s, DEACTIVATE_BYPASS);
2583 		goto new_slab;
2584 	}
2585 
2586 	stat(s, ALLOC_REFILL);
2587 
2588 load_freelist:
2589 	/*
2590 	 * freelist is pointing to the list of objects to be used.
2591 	 * page is pointing to the page from which the objects are obtained.
2592 	 * That page must be frozen for per cpu allocations to work.
2593 	 */
2594 	VM_BUG_ON(!c->page->frozen);
2595 	c->freelist = get_freepointer(s, freelist);
2596 	c->tid = next_tid(c->tid);
2597 	return freelist;
2598 
2599 new_slab:
2600 
2601 	if (slub_percpu_partial(c)) {
2602 		page = c->page = slub_percpu_partial(c);
2603 		slub_set_percpu_partial(c, page);
2604 		stat(s, CPU_PARTIAL_ALLOC);
2605 		goto redo;
2606 	}
2607 
2608 	freelist = new_slab_objects(s, gfpflags, node, &c);
2609 
2610 	if (unlikely(!freelist)) {
2611 		slab_out_of_memory(s, gfpflags, node);
2612 		return NULL;
2613 	}
2614 
2615 	page = c->page;
2616 	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2617 		goto load_freelist;
2618 
2619 	/* Only entered in the debug case */
2620 	if (kmem_cache_debug(s) &&
2621 			!alloc_debug_processing(s, page, freelist, addr))
2622 		goto new_slab;	/* Slab failed checks. Next slab needed */
2623 
2624 	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2625 	return freelist;
2626 }
2627 
2628 /*
2629  * Another one that disabled interrupt and compensates for possible
2630  * cpu changes by refetching the per cpu area pointer.
2631  */
2632 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2633 			  unsigned long addr, struct kmem_cache_cpu *c)
2634 {
2635 	void *p;
2636 	unsigned long flags;
2637 
2638 	local_irq_save(flags);
2639 #ifdef CONFIG_PREEMPT
2640 	/*
2641 	 * We may have been preempted and rescheduled on a different
2642 	 * cpu before disabling interrupts. Need to reload cpu area
2643 	 * pointer.
2644 	 */
2645 	c = this_cpu_ptr(s->cpu_slab);
2646 #endif
2647 
2648 	p = ___slab_alloc(s, gfpflags, node, addr, c);
2649 	local_irq_restore(flags);
2650 	return p;
2651 }
2652 
2653 /*
2654  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2655  * have the fastpath folded into their functions. So no function call
2656  * overhead for requests that can be satisfied on the fastpath.
2657  *
2658  * The fastpath works by first checking if the lockless freelist can be used.
2659  * If not then __slab_alloc is called for slow processing.
2660  *
2661  * Otherwise we can simply pick the next object from the lockless free list.
2662  */
2663 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2664 		gfp_t gfpflags, int node, unsigned long addr)
2665 {
2666 	void *object;
2667 	struct kmem_cache_cpu *c;
2668 	struct page *page;
2669 	unsigned long tid;
2670 
2671 	s = slab_pre_alloc_hook(s, gfpflags);
2672 	if (!s)
2673 		return NULL;
2674 redo:
2675 	/*
2676 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2677 	 * enabled. We may switch back and forth between cpus while
2678 	 * reading from one cpu area. That does not matter as long
2679 	 * as we end up on the original cpu again when doing the cmpxchg.
2680 	 *
2681 	 * We should guarantee that tid and kmem_cache are retrieved on
2682 	 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2683 	 * to check if it is matched or not.
2684 	 */
2685 	do {
2686 		tid = this_cpu_read(s->cpu_slab->tid);
2687 		c = raw_cpu_ptr(s->cpu_slab);
2688 	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2689 		 unlikely(tid != READ_ONCE(c->tid)));
2690 
2691 	/*
2692 	 * Irqless object alloc/free algorithm used here depends on sequence
2693 	 * of fetching cpu_slab's data. tid should be fetched before anything
2694 	 * on c to guarantee that object and page associated with previous tid
2695 	 * won't be used with current tid. If we fetch tid first, object and
2696 	 * page could be one associated with next tid and our alloc/free
2697 	 * request will be failed. In this case, we will retry. So, no problem.
2698 	 */
2699 	barrier();
2700 
2701 	/*
2702 	 * The transaction ids are globally unique per cpu and per operation on
2703 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2704 	 * occurs on the right processor and that there was no operation on the
2705 	 * linked list in between.
2706 	 */
2707 
2708 	object = c->freelist;
2709 	page = c->page;
2710 	if (unlikely(!object || !node_match(page, node))) {
2711 		object = __slab_alloc(s, gfpflags, node, addr, c);
2712 		stat(s, ALLOC_SLOWPATH);
2713 	} else {
2714 		void *next_object = get_freepointer_safe(s, object);
2715 
2716 		/*
2717 		 * The cmpxchg will only match if there was no additional
2718 		 * operation and if we are on the right processor.
2719 		 *
2720 		 * The cmpxchg does the following atomically (without lock
2721 		 * semantics!)
2722 		 * 1. Relocate first pointer to the current per cpu area.
2723 		 * 2. Verify that tid and freelist have not been changed
2724 		 * 3. If they were not changed replace tid and freelist
2725 		 *
2726 		 * Since this is without lock semantics the protection is only
2727 		 * against code executing on this cpu *not* from access by
2728 		 * other cpus.
2729 		 */
2730 		if (unlikely(!this_cpu_cmpxchg_double(
2731 				s->cpu_slab->freelist, s->cpu_slab->tid,
2732 				object, tid,
2733 				next_object, next_tid(tid)))) {
2734 
2735 			note_cmpxchg_failure("slab_alloc", s, tid);
2736 			goto redo;
2737 		}
2738 		prefetch_freepointer(s, next_object);
2739 		stat(s, ALLOC_FASTPATH);
2740 	}
2741 
2742 	if (unlikely(gfpflags & __GFP_ZERO) && object)
2743 		memset(object, 0, s->object_size);
2744 
2745 	slab_post_alloc_hook(s, gfpflags, 1, &object);
2746 
2747 	return object;
2748 }
2749 
2750 static __always_inline void *slab_alloc(struct kmem_cache *s,
2751 		gfp_t gfpflags, unsigned long addr)
2752 {
2753 	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2754 }
2755 
2756 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2757 {
2758 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2759 
2760 	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2761 				s->size, gfpflags);
2762 
2763 	return ret;
2764 }
2765 EXPORT_SYMBOL(kmem_cache_alloc);
2766 
2767 #ifdef CONFIG_TRACING
2768 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2769 {
2770 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2771 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2772 	kasan_kmalloc(s, ret, size, gfpflags);
2773 	return ret;
2774 }
2775 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2776 #endif
2777 
2778 #ifdef CONFIG_NUMA
2779 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2780 {
2781 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2782 
2783 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2784 				    s->object_size, s->size, gfpflags, node);
2785 
2786 	return ret;
2787 }
2788 EXPORT_SYMBOL(kmem_cache_alloc_node);
2789 
2790 #ifdef CONFIG_TRACING
2791 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2792 				    gfp_t gfpflags,
2793 				    int node, size_t size)
2794 {
2795 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2796 
2797 	trace_kmalloc_node(_RET_IP_, ret,
2798 			   size, s->size, gfpflags, node);
2799 
2800 	kasan_kmalloc(s, ret, size, gfpflags);
2801 	return ret;
2802 }
2803 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2804 #endif
2805 #endif
2806 
2807 /*
2808  * Slow path handling. This may still be called frequently since objects
2809  * have a longer lifetime than the cpu slabs in most processing loads.
2810  *
2811  * So we still attempt to reduce cache line usage. Just take the slab
2812  * lock and free the item. If there is no additional partial page
2813  * handling required then we can return immediately.
2814  */
2815 static void __slab_free(struct kmem_cache *s, struct page *page,
2816 			void *head, void *tail, int cnt,
2817 			unsigned long addr)
2818 
2819 {
2820 	void *prior;
2821 	int was_frozen;
2822 	struct page new;
2823 	unsigned long counters;
2824 	struct kmem_cache_node *n = NULL;
2825 	unsigned long uninitialized_var(flags);
2826 
2827 	stat(s, FREE_SLOWPATH);
2828 
2829 	if (kmem_cache_debug(s) &&
2830 	    !free_debug_processing(s, page, head, tail, cnt, addr))
2831 		return;
2832 
2833 	do {
2834 		if (unlikely(n)) {
2835 			spin_unlock_irqrestore(&n->list_lock, flags);
2836 			n = NULL;
2837 		}
2838 		prior = page->freelist;
2839 		counters = page->counters;
2840 		set_freepointer(s, tail, prior);
2841 		new.counters = counters;
2842 		was_frozen = new.frozen;
2843 		new.inuse -= cnt;
2844 		if ((!new.inuse || !prior) && !was_frozen) {
2845 
2846 			if (kmem_cache_has_cpu_partial(s) && !prior) {
2847 
2848 				/*
2849 				 * Slab was on no list before and will be
2850 				 * partially empty
2851 				 * We can defer the list move and instead
2852 				 * freeze it.
2853 				 */
2854 				new.frozen = 1;
2855 
2856 			} else { /* Needs to be taken off a list */
2857 
2858 				n = get_node(s, page_to_nid(page));
2859 				/*
2860 				 * Speculatively acquire the list_lock.
2861 				 * If the cmpxchg does not succeed then we may
2862 				 * drop the list_lock without any processing.
2863 				 *
2864 				 * Otherwise the list_lock will synchronize with
2865 				 * other processors updating the list of slabs.
2866 				 */
2867 				spin_lock_irqsave(&n->list_lock, flags);
2868 
2869 			}
2870 		}
2871 
2872 	} while (!cmpxchg_double_slab(s, page,
2873 		prior, counters,
2874 		head, new.counters,
2875 		"__slab_free"));
2876 
2877 	if (likely(!n)) {
2878 
2879 		/*
2880 		 * If we just froze the page then put it onto the
2881 		 * per cpu partial list.
2882 		 */
2883 		if (new.frozen && !was_frozen) {
2884 			put_cpu_partial(s, page, 1);
2885 			stat(s, CPU_PARTIAL_FREE);
2886 		}
2887 		/*
2888 		 * The list lock was not taken therefore no list
2889 		 * activity can be necessary.
2890 		 */
2891 		if (was_frozen)
2892 			stat(s, FREE_FROZEN);
2893 		return;
2894 	}
2895 
2896 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2897 		goto slab_empty;
2898 
2899 	/*
2900 	 * Objects left in the slab. If it was not on the partial list before
2901 	 * then add it.
2902 	 */
2903 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2904 		if (kmem_cache_debug(s))
2905 			remove_full(s, n, page);
2906 		add_partial(n, page, DEACTIVATE_TO_TAIL);
2907 		stat(s, FREE_ADD_PARTIAL);
2908 	}
2909 	spin_unlock_irqrestore(&n->list_lock, flags);
2910 	return;
2911 
2912 slab_empty:
2913 	if (prior) {
2914 		/*
2915 		 * Slab on the partial list.
2916 		 */
2917 		remove_partial(n, page);
2918 		stat(s, FREE_REMOVE_PARTIAL);
2919 	} else {
2920 		/* Slab must be on the full list */
2921 		remove_full(s, n, page);
2922 	}
2923 
2924 	spin_unlock_irqrestore(&n->list_lock, flags);
2925 	stat(s, FREE_SLAB);
2926 	discard_slab(s, page);
2927 }
2928 
2929 /*
2930  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2931  * can perform fastpath freeing without additional function calls.
2932  *
2933  * The fastpath is only possible if we are freeing to the current cpu slab
2934  * of this processor. This typically the case if we have just allocated
2935  * the item before.
2936  *
2937  * If fastpath is not possible then fall back to __slab_free where we deal
2938  * with all sorts of special processing.
2939  *
2940  * Bulk free of a freelist with several objects (all pointing to the
2941  * same page) possible by specifying head and tail ptr, plus objects
2942  * count (cnt). Bulk free indicated by tail pointer being set.
2943  */
2944 static __always_inline void do_slab_free(struct kmem_cache *s,
2945 				struct page *page, void *head, void *tail,
2946 				int cnt, unsigned long addr)
2947 {
2948 	void *tail_obj = tail ? : head;
2949 	struct kmem_cache_cpu *c;
2950 	unsigned long tid;
2951 redo:
2952 	/*
2953 	 * Determine the currently cpus per cpu slab.
2954 	 * The cpu may change afterward. However that does not matter since
2955 	 * data is retrieved via this pointer. If we are on the same cpu
2956 	 * during the cmpxchg then the free will succeed.
2957 	 */
2958 	do {
2959 		tid = this_cpu_read(s->cpu_slab->tid);
2960 		c = raw_cpu_ptr(s->cpu_slab);
2961 	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2962 		 unlikely(tid != READ_ONCE(c->tid)));
2963 
2964 	/* Same with comment on barrier() in slab_alloc_node() */
2965 	barrier();
2966 
2967 	if (likely(page == c->page)) {
2968 		set_freepointer(s, tail_obj, c->freelist);
2969 
2970 		if (unlikely(!this_cpu_cmpxchg_double(
2971 				s->cpu_slab->freelist, s->cpu_slab->tid,
2972 				c->freelist, tid,
2973 				head, next_tid(tid)))) {
2974 
2975 			note_cmpxchg_failure("slab_free", s, tid);
2976 			goto redo;
2977 		}
2978 		stat(s, FREE_FASTPATH);
2979 	} else
2980 		__slab_free(s, page, head, tail_obj, cnt, addr);
2981 
2982 }
2983 
2984 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2985 				      void *head, void *tail, int cnt,
2986 				      unsigned long addr)
2987 {
2988 	slab_free_freelist_hook(s, head, tail);
2989 	/*
2990 	 * slab_free_freelist_hook() could have put the items into quarantine.
2991 	 * If so, no need to free them.
2992 	 */
2993 	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
2994 		return;
2995 	do_slab_free(s, page, head, tail, cnt, addr);
2996 }
2997 
2998 #ifdef CONFIG_KASAN
2999 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3000 {
3001 	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3002 }
3003 #endif
3004 
3005 void kmem_cache_free(struct kmem_cache *s, void *x)
3006 {
3007 	s = cache_from_obj(s, x);
3008 	if (!s)
3009 		return;
3010 	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3011 	trace_kmem_cache_free(_RET_IP_, x);
3012 }
3013 EXPORT_SYMBOL(kmem_cache_free);
3014 
3015 struct detached_freelist {
3016 	struct page *page;
3017 	void *tail;
3018 	void *freelist;
3019 	int cnt;
3020 	struct kmem_cache *s;
3021 };
3022 
3023 /*
3024  * This function progressively scans the array with free objects (with
3025  * a limited look ahead) and extract objects belonging to the same
3026  * page.  It builds a detached freelist directly within the given
3027  * page/objects.  This can happen without any need for
3028  * synchronization, because the objects are owned by running process.
3029  * The freelist is build up as a single linked list in the objects.
3030  * The idea is, that this detached freelist can then be bulk
3031  * transferred to the real freelist(s), but only requiring a single
3032  * synchronization primitive.  Look ahead in the array is limited due
3033  * to performance reasons.
3034  */
3035 static inline
3036 int build_detached_freelist(struct kmem_cache *s, size_t size,
3037 			    void **p, struct detached_freelist *df)
3038 {
3039 	size_t first_skipped_index = 0;
3040 	int lookahead = 3;
3041 	void *object;
3042 	struct page *page;
3043 
3044 	/* Always re-init detached_freelist */
3045 	df->page = NULL;
3046 
3047 	do {
3048 		object = p[--size];
3049 		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3050 	} while (!object && size);
3051 
3052 	if (!object)
3053 		return 0;
3054 
3055 	page = virt_to_head_page(object);
3056 	if (!s) {
3057 		/* Handle kalloc'ed objects */
3058 		if (unlikely(!PageSlab(page))) {
3059 			BUG_ON(!PageCompound(page));
3060 			kfree_hook(object);
3061 			__free_pages(page, compound_order(page));
3062 			p[size] = NULL; /* mark object processed */
3063 			return size;
3064 		}
3065 		/* Derive kmem_cache from object */
3066 		df->s = page->slab_cache;
3067 	} else {
3068 		df->s = cache_from_obj(s, object); /* Support for memcg */
3069 	}
3070 
3071 	/* Start new detached freelist */
3072 	df->page = page;
3073 	set_freepointer(df->s, object, NULL);
3074 	df->tail = object;
3075 	df->freelist = object;
3076 	p[size] = NULL; /* mark object processed */
3077 	df->cnt = 1;
3078 
3079 	while (size) {
3080 		object = p[--size];
3081 		if (!object)
3082 			continue; /* Skip processed objects */
3083 
3084 		/* df->page is always set at this point */
3085 		if (df->page == virt_to_head_page(object)) {
3086 			/* Opportunity build freelist */
3087 			set_freepointer(df->s, object, df->freelist);
3088 			df->freelist = object;
3089 			df->cnt++;
3090 			p[size] = NULL; /* mark object processed */
3091 
3092 			continue;
3093 		}
3094 
3095 		/* Limit look ahead search */
3096 		if (!--lookahead)
3097 			break;
3098 
3099 		if (!first_skipped_index)
3100 			first_skipped_index = size + 1;
3101 	}
3102 
3103 	return first_skipped_index;
3104 }
3105 
3106 /* Note that interrupts must be enabled when calling this function. */
3107 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3108 {
3109 	if (WARN_ON(!size))
3110 		return;
3111 
3112 	do {
3113 		struct detached_freelist df;
3114 
3115 		size = build_detached_freelist(s, size, p, &df);
3116 		if (!df.page)
3117 			continue;
3118 
3119 		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3120 	} while (likely(size));
3121 }
3122 EXPORT_SYMBOL(kmem_cache_free_bulk);
3123 
3124 /* Note that interrupts must be enabled when calling this function. */
3125 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3126 			  void **p)
3127 {
3128 	struct kmem_cache_cpu *c;
3129 	int i;
3130 
3131 	/* memcg and kmem_cache debug support */
3132 	s = slab_pre_alloc_hook(s, flags);
3133 	if (unlikely(!s))
3134 		return false;
3135 	/*
3136 	 * Drain objects in the per cpu slab, while disabling local
3137 	 * IRQs, which protects against PREEMPT and interrupts
3138 	 * handlers invoking normal fastpath.
3139 	 */
3140 	local_irq_disable();
3141 	c = this_cpu_ptr(s->cpu_slab);
3142 
3143 	for (i = 0; i < size; i++) {
3144 		void *object = c->freelist;
3145 
3146 		if (unlikely(!object)) {
3147 			/*
3148 			 * Invoking slow path likely have side-effect
3149 			 * of re-populating per CPU c->freelist
3150 			 */
3151 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3152 					    _RET_IP_, c);
3153 			if (unlikely(!p[i]))
3154 				goto error;
3155 
3156 			c = this_cpu_ptr(s->cpu_slab);
3157 			continue; /* goto for-loop */
3158 		}
3159 		c->freelist = get_freepointer(s, object);
3160 		p[i] = object;
3161 	}
3162 	c->tid = next_tid(c->tid);
3163 	local_irq_enable();
3164 
3165 	/* Clear memory outside IRQ disabled fastpath loop */
3166 	if (unlikely(flags & __GFP_ZERO)) {
3167 		int j;
3168 
3169 		for (j = 0; j < i; j++)
3170 			memset(p[j], 0, s->object_size);
3171 	}
3172 
3173 	/* memcg and kmem_cache debug support */
3174 	slab_post_alloc_hook(s, flags, size, p);
3175 	return i;
3176 error:
3177 	local_irq_enable();
3178 	slab_post_alloc_hook(s, flags, i, p);
3179 	__kmem_cache_free_bulk(s, i, p);
3180 	return 0;
3181 }
3182 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3183 
3184 
3185 /*
3186  * Object placement in a slab is made very easy because we always start at
3187  * offset 0. If we tune the size of the object to the alignment then we can
3188  * get the required alignment by putting one properly sized object after
3189  * another.
3190  *
3191  * Notice that the allocation order determines the sizes of the per cpu
3192  * caches. Each processor has always one slab available for allocations.
3193  * Increasing the allocation order reduces the number of times that slabs
3194  * must be moved on and off the partial lists and is therefore a factor in
3195  * locking overhead.
3196  */
3197 
3198 /*
3199  * Mininum / Maximum order of slab pages. This influences locking overhead
3200  * and slab fragmentation. A higher order reduces the number of partial slabs
3201  * and increases the number of allocations possible without having to
3202  * take the list_lock.
3203  */
3204 static int slub_min_order;
3205 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3206 static int slub_min_objects;
3207 
3208 /*
3209  * Calculate the order of allocation given an slab object size.
3210  *
3211  * The order of allocation has significant impact on performance and other
3212  * system components. Generally order 0 allocations should be preferred since
3213  * order 0 does not cause fragmentation in the page allocator. Larger objects
3214  * be problematic to put into order 0 slabs because there may be too much
3215  * unused space left. We go to a higher order if more than 1/16th of the slab
3216  * would be wasted.
3217  *
3218  * In order to reach satisfactory performance we must ensure that a minimum
3219  * number of objects is in one slab. Otherwise we may generate too much
3220  * activity on the partial lists which requires taking the list_lock. This is
3221  * less a concern for large slabs though which are rarely used.
3222  *
3223  * slub_max_order specifies the order where we begin to stop considering the
3224  * number of objects in a slab as critical. If we reach slub_max_order then
3225  * we try to keep the page order as low as possible. So we accept more waste
3226  * of space in favor of a small page order.
3227  *
3228  * Higher order allocations also allow the placement of more objects in a
3229  * slab and thereby reduce object handling overhead. If the user has
3230  * requested a higher mininum order then we start with that one instead of
3231  * the smallest order which will fit the object.
3232  */
3233 static inline int slab_order(int size, int min_objects,
3234 				int max_order, int fract_leftover, int reserved)
3235 {
3236 	int order;
3237 	int rem;
3238 	int min_order = slub_min_order;
3239 
3240 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3241 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3242 
3243 	for (order = max(min_order, get_order(min_objects * size + reserved));
3244 			order <= max_order; order++) {
3245 
3246 		unsigned long slab_size = PAGE_SIZE << order;
3247 
3248 		rem = (slab_size - reserved) % size;
3249 
3250 		if (rem <= slab_size / fract_leftover)
3251 			break;
3252 	}
3253 
3254 	return order;
3255 }
3256 
3257 static inline int calculate_order(int size, int reserved)
3258 {
3259 	int order;
3260 	int min_objects;
3261 	int fraction;
3262 	int max_objects;
3263 
3264 	/*
3265 	 * Attempt to find best configuration for a slab. This
3266 	 * works by first attempting to generate a layout with
3267 	 * the best configuration and backing off gradually.
3268 	 *
3269 	 * First we increase the acceptable waste in a slab. Then
3270 	 * we reduce the minimum objects required in a slab.
3271 	 */
3272 	min_objects = slub_min_objects;
3273 	if (!min_objects)
3274 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3275 	max_objects = order_objects(slub_max_order, size, reserved);
3276 	min_objects = min(min_objects, max_objects);
3277 
3278 	while (min_objects > 1) {
3279 		fraction = 16;
3280 		while (fraction >= 4) {
3281 			order = slab_order(size, min_objects,
3282 					slub_max_order, fraction, reserved);
3283 			if (order <= slub_max_order)
3284 				return order;
3285 			fraction /= 2;
3286 		}
3287 		min_objects--;
3288 	}
3289 
3290 	/*
3291 	 * We were unable to place multiple objects in a slab. Now
3292 	 * lets see if we can place a single object there.
3293 	 */
3294 	order = slab_order(size, 1, slub_max_order, 1, reserved);
3295 	if (order <= slub_max_order)
3296 		return order;
3297 
3298 	/*
3299 	 * Doh this slab cannot be placed using slub_max_order.
3300 	 */
3301 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3302 	if (order < MAX_ORDER)
3303 		return order;
3304 	return -ENOSYS;
3305 }
3306 
3307 static void
3308 init_kmem_cache_node(struct kmem_cache_node *n)
3309 {
3310 	n->nr_partial = 0;
3311 	spin_lock_init(&n->list_lock);
3312 	INIT_LIST_HEAD(&n->partial);
3313 #ifdef CONFIG_SLUB_DEBUG
3314 	atomic_long_set(&n->nr_slabs, 0);
3315 	atomic_long_set(&n->total_objects, 0);
3316 	INIT_LIST_HEAD(&n->full);
3317 #endif
3318 }
3319 
3320 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3321 {
3322 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3323 			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3324 
3325 	/*
3326 	 * Must align to double word boundary for the double cmpxchg
3327 	 * instructions to work; see __pcpu_double_call_return_bool().
3328 	 */
3329 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3330 				     2 * sizeof(void *));
3331 
3332 	if (!s->cpu_slab)
3333 		return 0;
3334 
3335 	init_kmem_cache_cpus(s);
3336 
3337 	return 1;
3338 }
3339 
3340 static struct kmem_cache *kmem_cache_node;
3341 
3342 /*
3343  * No kmalloc_node yet so do it by hand. We know that this is the first
3344  * slab on the node for this slabcache. There are no concurrent accesses
3345  * possible.
3346  *
3347  * Note that this function only works on the kmem_cache_node
3348  * when allocating for the kmem_cache_node. This is used for bootstrapping
3349  * memory on a fresh node that has no slab structures yet.
3350  */
3351 static void early_kmem_cache_node_alloc(int node)
3352 {
3353 	struct page *page;
3354 	struct kmem_cache_node *n;
3355 
3356 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3357 
3358 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3359 
3360 	BUG_ON(!page);
3361 	if (page_to_nid(page) != node) {
3362 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3363 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3364 	}
3365 
3366 	n = page->freelist;
3367 	BUG_ON(!n);
3368 	page->freelist = get_freepointer(kmem_cache_node, n);
3369 	page->inuse = 1;
3370 	page->frozen = 0;
3371 	kmem_cache_node->node[node] = n;
3372 #ifdef CONFIG_SLUB_DEBUG
3373 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3374 	init_tracking(kmem_cache_node, n);
3375 #endif
3376 	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3377 		      GFP_KERNEL);
3378 	init_kmem_cache_node(n);
3379 	inc_slabs_node(kmem_cache_node, node, page->objects);
3380 
3381 	/*
3382 	 * No locks need to be taken here as it has just been
3383 	 * initialized and there is no concurrent access.
3384 	 */
3385 	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3386 }
3387 
3388 static void free_kmem_cache_nodes(struct kmem_cache *s)
3389 {
3390 	int node;
3391 	struct kmem_cache_node *n;
3392 
3393 	for_each_kmem_cache_node(s, node, n) {
3394 		s->node[node] = NULL;
3395 		kmem_cache_free(kmem_cache_node, n);
3396 	}
3397 }
3398 
3399 void __kmem_cache_release(struct kmem_cache *s)
3400 {
3401 	cache_random_seq_destroy(s);
3402 	free_percpu(s->cpu_slab);
3403 	free_kmem_cache_nodes(s);
3404 }
3405 
3406 static int init_kmem_cache_nodes(struct kmem_cache *s)
3407 {
3408 	int node;
3409 
3410 	for_each_node_state(node, N_NORMAL_MEMORY) {
3411 		struct kmem_cache_node *n;
3412 
3413 		if (slab_state == DOWN) {
3414 			early_kmem_cache_node_alloc(node);
3415 			continue;
3416 		}
3417 		n = kmem_cache_alloc_node(kmem_cache_node,
3418 						GFP_KERNEL, node);
3419 
3420 		if (!n) {
3421 			free_kmem_cache_nodes(s);
3422 			return 0;
3423 		}
3424 
3425 		init_kmem_cache_node(n);
3426 		s->node[node] = n;
3427 	}
3428 	return 1;
3429 }
3430 
3431 static void set_min_partial(struct kmem_cache *s, unsigned long min)
3432 {
3433 	if (min < MIN_PARTIAL)
3434 		min = MIN_PARTIAL;
3435 	else if (min > MAX_PARTIAL)
3436 		min = MAX_PARTIAL;
3437 	s->min_partial = min;
3438 }
3439 
3440 static void set_cpu_partial(struct kmem_cache *s)
3441 {
3442 #ifdef CONFIG_SLUB_CPU_PARTIAL
3443 	/*
3444 	 * cpu_partial determined the maximum number of objects kept in the
3445 	 * per cpu partial lists of a processor.
3446 	 *
3447 	 * Per cpu partial lists mainly contain slabs that just have one
3448 	 * object freed. If they are used for allocation then they can be
3449 	 * filled up again with minimal effort. The slab will never hit the
3450 	 * per node partial lists and therefore no locking will be required.
3451 	 *
3452 	 * This setting also determines
3453 	 *
3454 	 * A) The number of objects from per cpu partial slabs dumped to the
3455 	 *    per node list when we reach the limit.
3456 	 * B) The number of objects in cpu partial slabs to extract from the
3457 	 *    per node list when we run out of per cpu objects. We only fetch
3458 	 *    50% to keep some capacity around for frees.
3459 	 */
3460 	if (!kmem_cache_has_cpu_partial(s))
3461 		s->cpu_partial = 0;
3462 	else if (s->size >= PAGE_SIZE)
3463 		s->cpu_partial = 2;
3464 	else if (s->size >= 1024)
3465 		s->cpu_partial = 6;
3466 	else if (s->size >= 256)
3467 		s->cpu_partial = 13;
3468 	else
3469 		s->cpu_partial = 30;
3470 #endif
3471 }
3472 
3473 /*
3474  * calculate_sizes() determines the order and the distribution of data within
3475  * a slab object.
3476  */
3477 static int calculate_sizes(struct kmem_cache *s, int forced_order)
3478 {
3479 	unsigned long flags = s->flags;
3480 	size_t size = s->object_size;
3481 	int order;
3482 
3483 	/*
3484 	 * Round up object size to the next word boundary. We can only
3485 	 * place the free pointer at word boundaries and this determines
3486 	 * the possible location of the free pointer.
3487 	 */
3488 	size = ALIGN(size, sizeof(void *));
3489 
3490 #ifdef CONFIG_SLUB_DEBUG
3491 	/*
3492 	 * Determine if we can poison the object itself. If the user of
3493 	 * the slab may touch the object after free or before allocation
3494 	 * then we should never poison the object itself.
3495 	 */
3496 	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3497 			!s->ctor)
3498 		s->flags |= __OBJECT_POISON;
3499 	else
3500 		s->flags &= ~__OBJECT_POISON;
3501 
3502 
3503 	/*
3504 	 * If we are Redzoning then check if there is some space between the
3505 	 * end of the object and the free pointer. If not then add an
3506 	 * additional word to have some bytes to store Redzone information.
3507 	 */
3508 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3509 		size += sizeof(void *);
3510 #endif
3511 
3512 	/*
3513 	 * With that we have determined the number of bytes in actual use
3514 	 * by the object. This is the potential offset to the free pointer.
3515 	 */
3516 	s->inuse = size;
3517 
3518 	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3519 		s->ctor)) {
3520 		/*
3521 		 * Relocate free pointer after the object if it is not
3522 		 * permitted to overwrite the first word of the object on
3523 		 * kmem_cache_free.
3524 		 *
3525 		 * This is the case if we do RCU, have a constructor or
3526 		 * destructor or are poisoning the objects.
3527 		 */
3528 		s->offset = size;
3529 		size += sizeof(void *);
3530 	}
3531 
3532 #ifdef CONFIG_SLUB_DEBUG
3533 	if (flags & SLAB_STORE_USER)
3534 		/*
3535 		 * Need to store information about allocs and frees after
3536 		 * the object.
3537 		 */
3538 		size += 2 * sizeof(struct track);
3539 #endif
3540 
3541 	kasan_cache_create(s, &size, &s->flags);
3542 #ifdef CONFIG_SLUB_DEBUG
3543 	if (flags & SLAB_RED_ZONE) {
3544 		/*
3545 		 * Add some empty padding so that we can catch
3546 		 * overwrites from earlier objects rather than let
3547 		 * tracking information or the free pointer be
3548 		 * corrupted if a user writes before the start
3549 		 * of the object.
3550 		 */
3551 		size += sizeof(void *);
3552 
3553 		s->red_left_pad = sizeof(void *);
3554 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3555 		size += s->red_left_pad;
3556 	}
3557 #endif
3558 
3559 	/*
3560 	 * SLUB stores one object immediately after another beginning from
3561 	 * offset 0. In order to align the objects we have to simply size
3562 	 * each object to conform to the alignment.
3563 	 */
3564 	size = ALIGN(size, s->align);
3565 	s->size = size;
3566 	if (forced_order >= 0)
3567 		order = forced_order;
3568 	else
3569 		order = calculate_order(size, s->reserved);
3570 
3571 	if (order < 0)
3572 		return 0;
3573 
3574 	s->allocflags = 0;
3575 	if (order)
3576 		s->allocflags |= __GFP_COMP;
3577 
3578 	if (s->flags & SLAB_CACHE_DMA)
3579 		s->allocflags |= GFP_DMA;
3580 
3581 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3582 		s->allocflags |= __GFP_RECLAIMABLE;
3583 
3584 	/*
3585 	 * Determine the number of objects per slab
3586 	 */
3587 	s->oo = oo_make(order, size, s->reserved);
3588 	s->min = oo_make(get_order(size), size, s->reserved);
3589 	if (oo_objects(s->oo) > oo_objects(s->max))
3590 		s->max = s->oo;
3591 
3592 	return !!oo_objects(s->oo);
3593 }
3594 
3595 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3596 {
3597 	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3598 	s->reserved = 0;
3599 #ifdef CONFIG_SLAB_FREELIST_HARDENED
3600 	s->random = get_random_long();
3601 #endif
3602 
3603 	if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
3604 		s->reserved = sizeof(struct rcu_head);
3605 
3606 	if (!calculate_sizes(s, -1))
3607 		goto error;
3608 	if (disable_higher_order_debug) {
3609 		/*
3610 		 * Disable debugging flags that store metadata if the min slab
3611 		 * order increased.
3612 		 */
3613 		if (get_order(s->size) > get_order(s->object_size)) {
3614 			s->flags &= ~DEBUG_METADATA_FLAGS;
3615 			s->offset = 0;
3616 			if (!calculate_sizes(s, -1))
3617 				goto error;
3618 		}
3619 	}
3620 
3621 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3622     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3623 	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3624 		/* Enable fast mode */
3625 		s->flags |= __CMPXCHG_DOUBLE;
3626 #endif
3627 
3628 	/*
3629 	 * The larger the object size is, the more pages we want on the partial
3630 	 * list to avoid pounding the page allocator excessively.
3631 	 */
3632 	set_min_partial(s, ilog2(s->size) / 2);
3633 
3634 	set_cpu_partial(s);
3635 
3636 #ifdef CONFIG_NUMA
3637 	s->remote_node_defrag_ratio = 1000;
3638 #endif
3639 
3640 	/* Initialize the pre-computed randomized freelist if slab is up */
3641 	if (slab_state >= UP) {
3642 		if (init_cache_random_seq(s))
3643 			goto error;
3644 	}
3645 
3646 	if (!init_kmem_cache_nodes(s))
3647 		goto error;
3648 
3649 	if (alloc_kmem_cache_cpus(s))
3650 		return 0;
3651 
3652 	free_kmem_cache_nodes(s);
3653 error:
3654 	if (flags & SLAB_PANIC)
3655 		panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3656 		      s->name, (unsigned long)s->size, s->size,
3657 		      oo_order(s->oo), s->offset, flags);
3658 	return -EINVAL;
3659 }
3660 
3661 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3662 							const char *text)
3663 {
3664 #ifdef CONFIG_SLUB_DEBUG
3665 	void *addr = page_address(page);
3666 	void *p;
3667 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3668 				     sizeof(long), GFP_ATOMIC);
3669 	if (!map)
3670 		return;
3671 	slab_err(s, page, text, s->name);
3672 	slab_lock(page);
3673 
3674 	get_map(s, page, map);
3675 	for_each_object(p, s, addr, page->objects) {
3676 
3677 		if (!test_bit(slab_index(p, s, addr), map)) {
3678 			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3679 			print_tracking(s, p);
3680 		}
3681 	}
3682 	slab_unlock(page);
3683 	kfree(map);
3684 #endif
3685 }
3686 
3687 /*
3688  * Attempt to free all partial slabs on a node.
3689  * This is called from __kmem_cache_shutdown(). We must take list_lock
3690  * because sysfs file might still access partial list after the shutdowning.
3691  */
3692 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3693 {
3694 	LIST_HEAD(discard);
3695 	struct page *page, *h;
3696 
3697 	BUG_ON(irqs_disabled());
3698 	spin_lock_irq(&n->list_lock);
3699 	list_for_each_entry_safe(page, h, &n->partial, lru) {
3700 		if (!page->inuse) {
3701 			remove_partial(n, page);
3702 			list_add(&page->lru, &discard);
3703 		} else {
3704 			list_slab_objects(s, page,
3705 			"Objects remaining in %s on __kmem_cache_shutdown()");
3706 		}
3707 	}
3708 	spin_unlock_irq(&n->list_lock);
3709 
3710 	list_for_each_entry_safe(page, h, &discard, lru)
3711 		discard_slab(s, page);
3712 }
3713 
3714 /*
3715  * Release all resources used by a slab cache.
3716  */
3717 int __kmem_cache_shutdown(struct kmem_cache *s)
3718 {
3719 	int node;
3720 	struct kmem_cache_node *n;
3721 
3722 	flush_all(s);
3723 	/* Attempt to free all objects */
3724 	for_each_kmem_cache_node(s, node, n) {
3725 		free_partial(s, n);
3726 		if (n->nr_partial || slabs_node(s, node))
3727 			return 1;
3728 	}
3729 	sysfs_slab_remove(s);
3730 	return 0;
3731 }
3732 
3733 /********************************************************************
3734  *		Kmalloc subsystem
3735  *******************************************************************/
3736 
3737 static int __init setup_slub_min_order(char *str)
3738 {
3739 	get_option(&str, &slub_min_order);
3740 
3741 	return 1;
3742 }
3743 
3744 __setup("slub_min_order=", setup_slub_min_order);
3745 
3746 static int __init setup_slub_max_order(char *str)
3747 {
3748 	get_option(&str, &slub_max_order);
3749 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3750 
3751 	return 1;
3752 }
3753 
3754 __setup("slub_max_order=", setup_slub_max_order);
3755 
3756 static int __init setup_slub_min_objects(char *str)
3757 {
3758 	get_option(&str, &slub_min_objects);
3759 
3760 	return 1;
3761 }
3762 
3763 __setup("slub_min_objects=", setup_slub_min_objects);
3764 
3765 void *__kmalloc(size_t size, gfp_t flags)
3766 {
3767 	struct kmem_cache *s;
3768 	void *ret;
3769 
3770 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3771 		return kmalloc_large(size, flags);
3772 
3773 	s = kmalloc_slab(size, flags);
3774 
3775 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3776 		return s;
3777 
3778 	ret = slab_alloc(s, flags, _RET_IP_);
3779 
3780 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3781 
3782 	kasan_kmalloc(s, ret, size, flags);
3783 
3784 	return ret;
3785 }
3786 EXPORT_SYMBOL(__kmalloc);
3787 
3788 #ifdef CONFIG_NUMA
3789 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3790 {
3791 	struct page *page;
3792 	void *ptr = NULL;
3793 
3794 	flags |= __GFP_COMP | __GFP_NOTRACK;
3795 	page = alloc_pages_node(node, flags, get_order(size));
3796 	if (page)
3797 		ptr = page_address(page);
3798 
3799 	kmalloc_large_node_hook(ptr, size, flags);
3800 	return ptr;
3801 }
3802 
3803 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3804 {
3805 	struct kmem_cache *s;
3806 	void *ret;
3807 
3808 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3809 		ret = kmalloc_large_node(size, flags, node);
3810 
3811 		trace_kmalloc_node(_RET_IP_, ret,
3812 				   size, PAGE_SIZE << get_order(size),
3813 				   flags, node);
3814 
3815 		return ret;
3816 	}
3817 
3818 	s = kmalloc_slab(size, flags);
3819 
3820 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3821 		return s;
3822 
3823 	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3824 
3825 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3826 
3827 	kasan_kmalloc(s, ret, size, flags);
3828 
3829 	return ret;
3830 }
3831 EXPORT_SYMBOL(__kmalloc_node);
3832 #endif
3833 
3834 #ifdef CONFIG_HARDENED_USERCOPY
3835 /*
3836  * Rejects objects that are incorrectly sized.
3837  *
3838  * Returns NULL if check passes, otherwise const char * to name of cache
3839  * to indicate an error.
3840  */
3841 const char *__check_heap_object(const void *ptr, unsigned long n,
3842 				struct page *page)
3843 {
3844 	struct kmem_cache *s;
3845 	unsigned long offset;
3846 	size_t object_size;
3847 
3848 	/* Find object and usable object size. */
3849 	s = page->slab_cache;
3850 	object_size = slab_ksize(s);
3851 
3852 	/* Reject impossible pointers. */
3853 	if (ptr < page_address(page))
3854 		return s->name;
3855 
3856 	/* Find offset within object. */
3857 	offset = (ptr - page_address(page)) % s->size;
3858 
3859 	/* Adjust for redzone and reject if within the redzone. */
3860 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3861 		if (offset < s->red_left_pad)
3862 			return s->name;
3863 		offset -= s->red_left_pad;
3864 	}
3865 
3866 	/* Allow address range falling entirely within object size. */
3867 	if (offset <= object_size && n <= object_size - offset)
3868 		return NULL;
3869 
3870 	return s->name;
3871 }
3872 #endif /* CONFIG_HARDENED_USERCOPY */
3873 
3874 static size_t __ksize(const void *object)
3875 {
3876 	struct page *page;
3877 
3878 	if (unlikely(object == ZERO_SIZE_PTR))
3879 		return 0;
3880 
3881 	page = virt_to_head_page(object);
3882 
3883 	if (unlikely(!PageSlab(page))) {
3884 		WARN_ON(!PageCompound(page));
3885 		return PAGE_SIZE << compound_order(page);
3886 	}
3887 
3888 	return slab_ksize(page->slab_cache);
3889 }
3890 
3891 size_t ksize(const void *object)
3892 {
3893 	size_t size = __ksize(object);
3894 	/* We assume that ksize callers could use whole allocated area,
3895 	 * so we need to unpoison this area.
3896 	 */
3897 	kasan_unpoison_shadow(object, size);
3898 	return size;
3899 }
3900 EXPORT_SYMBOL(ksize);
3901 
3902 void kfree(const void *x)
3903 {
3904 	struct page *page;
3905 	void *object = (void *)x;
3906 
3907 	trace_kfree(_RET_IP_, x);
3908 
3909 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3910 		return;
3911 
3912 	page = virt_to_head_page(x);
3913 	if (unlikely(!PageSlab(page))) {
3914 		BUG_ON(!PageCompound(page));
3915 		kfree_hook(x);
3916 		__free_pages(page, compound_order(page));
3917 		return;
3918 	}
3919 	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3920 }
3921 EXPORT_SYMBOL(kfree);
3922 
3923 #define SHRINK_PROMOTE_MAX 32
3924 
3925 /*
3926  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3927  * up most to the head of the partial lists. New allocations will then
3928  * fill those up and thus they can be removed from the partial lists.
3929  *
3930  * The slabs with the least items are placed last. This results in them
3931  * being allocated from last increasing the chance that the last objects
3932  * are freed in them.
3933  */
3934 int __kmem_cache_shrink(struct kmem_cache *s)
3935 {
3936 	int node;
3937 	int i;
3938 	struct kmem_cache_node *n;
3939 	struct page *page;
3940 	struct page *t;
3941 	struct list_head discard;
3942 	struct list_head promote[SHRINK_PROMOTE_MAX];
3943 	unsigned long flags;
3944 	int ret = 0;
3945 
3946 	flush_all(s);
3947 	for_each_kmem_cache_node(s, node, n) {
3948 		INIT_LIST_HEAD(&discard);
3949 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3950 			INIT_LIST_HEAD(promote + i);
3951 
3952 		spin_lock_irqsave(&n->list_lock, flags);
3953 
3954 		/*
3955 		 * Build lists of slabs to discard or promote.
3956 		 *
3957 		 * Note that concurrent frees may occur while we hold the
3958 		 * list_lock. page->inuse here is the upper limit.
3959 		 */
3960 		list_for_each_entry_safe(page, t, &n->partial, lru) {
3961 			int free = page->objects - page->inuse;
3962 
3963 			/* Do not reread page->inuse */
3964 			barrier();
3965 
3966 			/* We do not keep full slabs on the list */
3967 			BUG_ON(free <= 0);
3968 
3969 			if (free == page->objects) {
3970 				list_move(&page->lru, &discard);
3971 				n->nr_partial--;
3972 			} else if (free <= SHRINK_PROMOTE_MAX)
3973 				list_move(&page->lru, promote + free - 1);
3974 		}
3975 
3976 		/*
3977 		 * Promote the slabs filled up most to the head of the
3978 		 * partial list.
3979 		 */
3980 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3981 			list_splice(promote + i, &n->partial);
3982 
3983 		spin_unlock_irqrestore(&n->list_lock, flags);
3984 
3985 		/* Release empty slabs */
3986 		list_for_each_entry_safe(page, t, &discard, lru)
3987 			discard_slab(s, page);
3988 
3989 		if (slabs_node(s, node))
3990 			ret = 1;
3991 	}
3992 
3993 	return ret;
3994 }
3995 
3996 #ifdef CONFIG_MEMCG
3997 static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
3998 {
3999 	/*
4000 	 * Called with all the locks held after a sched RCU grace period.
4001 	 * Even if @s becomes empty after shrinking, we can't know that @s
4002 	 * doesn't have allocations already in-flight and thus can't
4003 	 * destroy @s until the associated memcg is released.
4004 	 *
4005 	 * However, let's remove the sysfs files for empty caches here.
4006 	 * Each cache has a lot of interface files which aren't
4007 	 * particularly useful for empty draining caches; otherwise, we can
4008 	 * easily end up with millions of unnecessary sysfs files on
4009 	 * systems which have a lot of memory and transient cgroups.
4010 	 */
4011 	if (!__kmem_cache_shrink(s))
4012 		sysfs_slab_remove(s);
4013 }
4014 
4015 void __kmemcg_cache_deactivate(struct kmem_cache *s)
4016 {
4017 	/*
4018 	 * Disable empty slabs caching. Used to avoid pinning offline
4019 	 * memory cgroups by kmem pages that can be freed.
4020 	 */
4021 	slub_set_cpu_partial(s, 0);
4022 	s->min_partial = 0;
4023 
4024 	/*
4025 	 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
4026 	 * we have to make sure the change is visible before shrinking.
4027 	 */
4028 	slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
4029 }
4030 #endif
4031 
4032 static int slab_mem_going_offline_callback(void *arg)
4033 {
4034 	struct kmem_cache *s;
4035 
4036 	mutex_lock(&slab_mutex);
4037 	list_for_each_entry(s, &slab_caches, list)
4038 		__kmem_cache_shrink(s);
4039 	mutex_unlock(&slab_mutex);
4040 
4041 	return 0;
4042 }
4043 
4044 static void slab_mem_offline_callback(void *arg)
4045 {
4046 	struct kmem_cache_node *n;
4047 	struct kmem_cache *s;
4048 	struct memory_notify *marg = arg;
4049 	int offline_node;
4050 
4051 	offline_node = marg->status_change_nid_normal;
4052 
4053 	/*
4054 	 * If the node still has available memory. we need kmem_cache_node
4055 	 * for it yet.
4056 	 */
4057 	if (offline_node < 0)
4058 		return;
4059 
4060 	mutex_lock(&slab_mutex);
4061 	list_for_each_entry(s, &slab_caches, list) {
4062 		n = get_node(s, offline_node);
4063 		if (n) {
4064 			/*
4065 			 * if n->nr_slabs > 0, slabs still exist on the node
4066 			 * that is going down. We were unable to free them,
4067 			 * and offline_pages() function shouldn't call this
4068 			 * callback. So, we must fail.
4069 			 */
4070 			BUG_ON(slabs_node(s, offline_node));
4071 
4072 			s->node[offline_node] = NULL;
4073 			kmem_cache_free(kmem_cache_node, n);
4074 		}
4075 	}
4076 	mutex_unlock(&slab_mutex);
4077 }
4078 
4079 static int slab_mem_going_online_callback(void *arg)
4080 {
4081 	struct kmem_cache_node *n;
4082 	struct kmem_cache *s;
4083 	struct memory_notify *marg = arg;
4084 	int nid = marg->status_change_nid_normal;
4085 	int ret = 0;
4086 
4087 	/*
4088 	 * If the node's memory is already available, then kmem_cache_node is
4089 	 * already created. Nothing to do.
4090 	 */
4091 	if (nid < 0)
4092 		return 0;
4093 
4094 	/*
4095 	 * We are bringing a node online. No memory is available yet. We must
4096 	 * allocate a kmem_cache_node structure in order to bring the node
4097 	 * online.
4098 	 */
4099 	mutex_lock(&slab_mutex);
4100 	list_for_each_entry(s, &slab_caches, list) {
4101 		/*
4102 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4103 		 *      since memory is not yet available from the node that
4104 		 *      is brought up.
4105 		 */
4106 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4107 		if (!n) {
4108 			ret = -ENOMEM;
4109 			goto out;
4110 		}
4111 		init_kmem_cache_node(n);
4112 		s->node[nid] = n;
4113 	}
4114 out:
4115 	mutex_unlock(&slab_mutex);
4116 	return ret;
4117 }
4118 
4119 static int slab_memory_callback(struct notifier_block *self,
4120 				unsigned long action, void *arg)
4121 {
4122 	int ret = 0;
4123 
4124 	switch (action) {
4125 	case MEM_GOING_ONLINE:
4126 		ret = slab_mem_going_online_callback(arg);
4127 		break;
4128 	case MEM_GOING_OFFLINE:
4129 		ret = slab_mem_going_offline_callback(arg);
4130 		break;
4131 	case MEM_OFFLINE:
4132 	case MEM_CANCEL_ONLINE:
4133 		slab_mem_offline_callback(arg);
4134 		break;
4135 	case MEM_ONLINE:
4136 	case MEM_CANCEL_OFFLINE:
4137 		break;
4138 	}
4139 	if (ret)
4140 		ret = notifier_from_errno(ret);
4141 	else
4142 		ret = NOTIFY_OK;
4143 	return ret;
4144 }
4145 
4146 static struct notifier_block slab_memory_callback_nb = {
4147 	.notifier_call = slab_memory_callback,
4148 	.priority = SLAB_CALLBACK_PRI,
4149 };
4150 
4151 /********************************************************************
4152  *			Basic setup of slabs
4153  *******************************************************************/
4154 
4155 /*
4156  * Used for early kmem_cache structures that were allocated using
4157  * the page allocator. Allocate them properly then fix up the pointers
4158  * that may be pointing to the wrong kmem_cache structure.
4159  */
4160 
4161 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4162 {
4163 	int node;
4164 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4165 	struct kmem_cache_node *n;
4166 
4167 	memcpy(s, static_cache, kmem_cache->object_size);
4168 
4169 	/*
4170 	 * This runs very early, and only the boot processor is supposed to be
4171 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4172 	 * IPIs around.
4173 	 */
4174 	__flush_cpu_slab(s, smp_processor_id());
4175 	for_each_kmem_cache_node(s, node, n) {
4176 		struct page *p;
4177 
4178 		list_for_each_entry(p, &n->partial, lru)
4179 			p->slab_cache = s;
4180 
4181 #ifdef CONFIG_SLUB_DEBUG
4182 		list_for_each_entry(p, &n->full, lru)
4183 			p->slab_cache = s;
4184 #endif
4185 	}
4186 	slab_init_memcg_params(s);
4187 	list_add(&s->list, &slab_caches);
4188 	memcg_link_cache(s);
4189 	return s;
4190 }
4191 
4192 void __init kmem_cache_init(void)
4193 {
4194 	static __initdata struct kmem_cache boot_kmem_cache,
4195 		boot_kmem_cache_node;
4196 
4197 	if (debug_guardpage_minorder())
4198 		slub_max_order = 0;
4199 
4200 	kmem_cache_node = &boot_kmem_cache_node;
4201 	kmem_cache = &boot_kmem_cache;
4202 
4203 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4204 		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
4205 
4206 	register_hotmemory_notifier(&slab_memory_callback_nb);
4207 
4208 	/* Able to allocate the per node structures */
4209 	slab_state = PARTIAL;
4210 
4211 	create_boot_cache(kmem_cache, "kmem_cache",
4212 			offsetof(struct kmem_cache, node) +
4213 				nr_node_ids * sizeof(struct kmem_cache_node *),
4214 		       SLAB_HWCACHE_ALIGN);
4215 
4216 	kmem_cache = bootstrap(&boot_kmem_cache);
4217 
4218 	/*
4219 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
4220 	 * kmem_cache_node is separately allocated so no need to
4221 	 * update any list pointers.
4222 	 */
4223 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4224 
4225 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4226 	setup_kmalloc_cache_index_table();
4227 	create_kmalloc_caches(0);
4228 
4229 	/* Setup random freelists for each cache */
4230 	init_freelist_randomization();
4231 
4232 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4233 				  slub_cpu_dead);
4234 
4235 	pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
4236 		cache_line_size(),
4237 		slub_min_order, slub_max_order, slub_min_objects,
4238 		nr_cpu_ids, nr_node_ids);
4239 }
4240 
4241 void __init kmem_cache_init_late(void)
4242 {
4243 }
4244 
4245 struct kmem_cache *
4246 __kmem_cache_alias(const char *name, size_t size, size_t align,
4247 		   unsigned long flags, void (*ctor)(void *))
4248 {
4249 	struct kmem_cache *s, *c;
4250 
4251 	s = find_mergeable(size, align, flags, name, ctor);
4252 	if (s) {
4253 		s->refcount++;
4254 
4255 		/*
4256 		 * Adjust the object sizes so that we clear
4257 		 * the complete object on kzalloc.
4258 		 */
4259 		s->object_size = max(s->object_size, (int)size);
4260 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
4261 
4262 		for_each_memcg_cache(c, s) {
4263 			c->object_size = s->object_size;
4264 			c->inuse = max_t(int, c->inuse,
4265 					 ALIGN(size, sizeof(void *)));
4266 		}
4267 
4268 		if (sysfs_slab_alias(s, name)) {
4269 			s->refcount--;
4270 			s = NULL;
4271 		}
4272 	}
4273 
4274 	return s;
4275 }
4276 
4277 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
4278 {
4279 	int err;
4280 
4281 	err = kmem_cache_open(s, flags);
4282 	if (err)
4283 		return err;
4284 
4285 	/* Mutex is not taken during early boot */
4286 	if (slab_state <= UP)
4287 		return 0;
4288 
4289 	memcg_propagate_slab_attrs(s);
4290 	err = sysfs_slab_add(s);
4291 	if (err)
4292 		__kmem_cache_release(s);
4293 
4294 	return err;
4295 }
4296 
4297 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4298 {
4299 	struct kmem_cache *s;
4300 	void *ret;
4301 
4302 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4303 		return kmalloc_large(size, gfpflags);
4304 
4305 	s = kmalloc_slab(size, gfpflags);
4306 
4307 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4308 		return s;
4309 
4310 	ret = slab_alloc(s, gfpflags, caller);
4311 
4312 	/* Honor the call site pointer we received. */
4313 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4314 
4315 	return ret;
4316 }
4317 
4318 #ifdef CONFIG_NUMA
4319 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4320 					int node, unsigned long caller)
4321 {
4322 	struct kmem_cache *s;
4323 	void *ret;
4324 
4325 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4326 		ret = kmalloc_large_node(size, gfpflags, node);
4327 
4328 		trace_kmalloc_node(caller, ret,
4329 				   size, PAGE_SIZE << get_order(size),
4330 				   gfpflags, node);
4331 
4332 		return ret;
4333 	}
4334 
4335 	s = kmalloc_slab(size, gfpflags);
4336 
4337 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4338 		return s;
4339 
4340 	ret = slab_alloc_node(s, gfpflags, node, caller);
4341 
4342 	/* Honor the call site pointer we received. */
4343 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4344 
4345 	return ret;
4346 }
4347 #endif
4348 
4349 #ifdef CONFIG_SYSFS
4350 static int count_inuse(struct page *page)
4351 {
4352 	return page->inuse;
4353 }
4354 
4355 static int count_total(struct page *page)
4356 {
4357 	return page->objects;
4358 }
4359 #endif
4360 
4361 #ifdef CONFIG_SLUB_DEBUG
4362 static int validate_slab(struct kmem_cache *s, struct page *page,
4363 						unsigned long *map)
4364 {
4365 	void *p;
4366 	void *addr = page_address(page);
4367 
4368 	if (!check_slab(s, page) ||
4369 			!on_freelist(s, page, NULL))
4370 		return 0;
4371 
4372 	/* Now we know that a valid freelist exists */
4373 	bitmap_zero(map, page->objects);
4374 
4375 	get_map(s, page, map);
4376 	for_each_object(p, s, addr, page->objects) {
4377 		if (test_bit(slab_index(p, s, addr), map))
4378 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4379 				return 0;
4380 	}
4381 
4382 	for_each_object(p, s, addr, page->objects)
4383 		if (!test_bit(slab_index(p, s, addr), map))
4384 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4385 				return 0;
4386 	return 1;
4387 }
4388 
4389 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4390 						unsigned long *map)
4391 {
4392 	slab_lock(page);
4393 	validate_slab(s, page, map);
4394 	slab_unlock(page);
4395 }
4396 
4397 static int validate_slab_node(struct kmem_cache *s,
4398 		struct kmem_cache_node *n, unsigned long *map)
4399 {
4400 	unsigned long count = 0;
4401 	struct page *page;
4402 	unsigned long flags;
4403 
4404 	spin_lock_irqsave(&n->list_lock, flags);
4405 
4406 	list_for_each_entry(page, &n->partial, lru) {
4407 		validate_slab_slab(s, page, map);
4408 		count++;
4409 	}
4410 	if (count != n->nr_partial)
4411 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4412 		       s->name, count, n->nr_partial);
4413 
4414 	if (!(s->flags & SLAB_STORE_USER))
4415 		goto out;
4416 
4417 	list_for_each_entry(page, &n->full, lru) {
4418 		validate_slab_slab(s, page, map);
4419 		count++;
4420 	}
4421 	if (count != atomic_long_read(&n->nr_slabs))
4422 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4423 		       s->name, count, atomic_long_read(&n->nr_slabs));
4424 
4425 out:
4426 	spin_unlock_irqrestore(&n->list_lock, flags);
4427 	return count;
4428 }
4429 
4430 static long validate_slab_cache(struct kmem_cache *s)
4431 {
4432 	int node;
4433 	unsigned long count = 0;
4434 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4435 				sizeof(unsigned long), GFP_KERNEL);
4436 	struct kmem_cache_node *n;
4437 
4438 	if (!map)
4439 		return -ENOMEM;
4440 
4441 	flush_all(s);
4442 	for_each_kmem_cache_node(s, node, n)
4443 		count += validate_slab_node(s, n, map);
4444 	kfree(map);
4445 	return count;
4446 }
4447 /*
4448  * Generate lists of code addresses where slabcache objects are allocated
4449  * and freed.
4450  */
4451 
4452 struct location {
4453 	unsigned long count;
4454 	unsigned long addr;
4455 	long long sum_time;
4456 	long min_time;
4457 	long max_time;
4458 	long min_pid;
4459 	long max_pid;
4460 	DECLARE_BITMAP(cpus, NR_CPUS);
4461 	nodemask_t nodes;
4462 };
4463 
4464 struct loc_track {
4465 	unsigned long max;
4466 	unsigned long count;
4467 	struct location *loc;
4468 };
4469 
4470 static void free_loc_track(struct loc_track *t)
4471 {
4472 	if (t->max)
4473 		free_pages((unsigned long)t->loc,
4474 			get_order(sizeof(struct location) * t->max));
4475 }
4476 
4477 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4478 {
4479 	struct location *l;
4480 	int order;
4481 
4482 	order = get_order(sizeof(struct location) * max);
4483 
4484 	l = (void *)__get_free_pages(flags, order);
4485 	if (!l)
4486 		return 0;
4487 
4488 	if (t->count) {
4489 		memcpy(l, t->loc, sizeof(struct location) * t->count);
4490 		free_loc_track(t);
4491 	}
4492 	t->max = max;
4493 	t->loc = l;
4494 	return 1;
4495 }
4496 
4497 static int add_location(struct loc_track *t, struct kmem_cache *s,
4498 				const struct track *track)
4499 {
4500 	long start, end, pos;
4501 	struct location *l;
4502 	unsigned long caddr;
4503 	unsigned long age = jiffies - track->when;
4504 
4505 	start = -1;
4506 	end = t->count;
4507 
4508 	for ( ; ; ) {
4509 		pos = start + (end - start + 1) / 2;
4510 
4511 		/*
4512 		 * There is nothing at "end". If we end up there
4513 		 * we need to add something to before end.
4514 		 */
4515 		if (pos == end)
4516 			break;
4517 
4518 		caddr = t->loc[pos].addr;
4519 		if (track->addr == caddr) {
4520 
4521 			l = &t->loc[pos];
4522 			l->count++;
4523 			if (track->when) {
4524 				l->sum_time += age;
4525 				if (age < l->min_time)
4526 					l->min_time = age;
4527 				if (age > l->max_time)
4528 					l->max_time = age;
4529 
4530 				if (track->pid < l->min_pid)
4531 					l->min_pid = track->pid;
4532 				if (track->pid > l->max_pid)
4533 					l->max_pid = track->pid;
4534 
4535 				cpumask_set_cpu(track->cpu,
4536 						to_cpumask(l->cpus));
4537 			}
4538 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4539 			return 1;
4540 		}
4541 
4542 		if (track->addr < caddr)
4543 			end = pos;
4544 		else
4545 			start = pos;
4546 	}
4547 
4548 	/*
4549 	 * Not found. Insert new tracking element.
4550 	 */
4551 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4552 		return 0;
4553 
4554 	l = t->loc + pos;
4555 	if (pos < t->count)
4556 		memmove(l + 1, l,
4557 			(t->count - pos) * sizeof(struct location));
4558 	t->count++;
4559 	l->count = 1;
4560 	l->addr = track->addr;
4561 	l->sum_time = age;
4562 	l->min_time = age;
4563 	l->max_time = age;
4564 	l->min_pid = track->pid;
4565 	l->max_pid = track->pid;
4566 	cpumask_clear(to_cpumask(l->cpus));
4567 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4568 	nodes_clear(l->nodes);
4569 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4570 	return 1;
4571 }
4572 
4573 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4574 		struct page *page, enum track_item alloc,
4575 		unsigned long *map)
4576 {
4577 	void *addr = page_address(page);
4578 	void *p;
4579 
4580 	bitmap_zero(map, page->objects);
4581 	get_map(s, page, map);
4582 
4583 	for_each_object(p, s, addr, page->objects)
4584 		if (!test_bit(slab_index(p, s, addr), map))
4585 			add_location(t, s, get_track(s, p, alloc));
4586 }
4587 
4588 static int list_locations(struct kmem_cache *s, char *buf,
4589 					enum track_item alloc)
4590 {
4591 	int len = 0;
4592 	unsigned long i;
4593 	struct loc_track t = { 0, 0, NULL };
4594 	int node;
4595 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4596 				     sizeof(unsigned long), GFP_KERNEL);
4597 	struct kmem_cache_node *n;
4598 
4599 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4600 				     GFP_KERNEL)) {
4601 		kfree(map);
4602 		return sprintf(buf, "Out of memory\n");
4603 	}
4604 	/* Push back cpu slabs */
4605 	flush_all(s);
4606 
4607 	for_each_kmem_cache_node(s, node, n) {
4608 		unsigned long flags;
4609 		struct page *page;
4610 
4611 		if (!atomic_long_read(&n->nr_slabs))
4612 			continue;
4613 
4614 		spin_lock_irqsave(&n->list_lock, flags);
4615 		list_for_each_entry(page, &n->partial, lru)
4616 			process_slab(&t, s, page, alloc, map);
4617 		list_for_each_entry(page, &n->full, lru)
4618 			process_slab(&t, s, page, alloc, map);
4619 		spin_unlock_irqrestore(&n->list_lock, flags);
4620 	}
4621 
4622 	for (i = 0; i < t.count; i++) {
4623 		struct location *l = &t.loc[i];
4624 
4625 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4626 			break;
4627 		len += sprintf(buf + len, "%7ld ", l->count);
4628 
4629 		if (l->addr)
4630 			len += sprintf(buf + len, "%pS", (void *)l->addr);
4631 		else
4632 			len += sprintf(buf + len, "<not-available>");
4633 
4634 		if (l->sum_time != l->min_time) {
4635 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4636 				l->min_time,
4637 				(long)div_u64(l->sum_time, l->count),
4638 				l->max_time);
4639 		} else
4640 			len += sprintf(buf + len, " age=%ld",
4641 				l->min_time);
4642 
4643 		if (l->min_pid != l->max_pid)
4644 			len += sprintf(buf + len, " pid=%ld-%ld",
4645 				l->min_pid, l->max_pid);
4646 		else
4647 			len += sprintf(buf + len, " pid=%ld",
4648 				l->min_pid);
4649 
4650 		if (num_online_cpus() > 1 &&
4651 				!cpumask_empty(to_cpumask(l->cpus)) &&
4652 				len < PAGE_SIZE - 60)
4653 			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4654 					 " cpus=%*pbl",
4655 					 cpumask_pr_args(to_cpumask(l->cpus)));
4656 
4657 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4658 				len < PAGE_SIZE - 60)
4659 			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4660 					 " nodes=%*pbl",
4661 					 nodemask_pr_args(&l->nodes));
4662 
4663 		len += sprintf(buf + len, "\n");
4664 	}
4665 
4666 	free_loc_track(&t);
4667 	kfree(map);
4668 	if (!t.count)
4669 		len += sprintf(buf, "No data\n");
4670 	return len;
4671 }
4672 #endif
4673 
4674 #ifdef SLUB_RESILIENCY_TEST
4675 static void __init resiliency_test(void)
4676 {
4677 	u8 *p;
4678 
4679 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4680 
4681 	pr_err("SLUB resiliency testing\n");
4682 	pr_err("-----------------------\n");
4683 	pr_err("A. Corruption after allocation\n");
4684 
4685 	p = kzalloc(16, GFP_KERNEL);
4686 	p[16] = 0x12;
4687 	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4688 	       p + 16);
4689 
4690 	validate_slab_cache(kmalloc_caches[4]);
4691 
4692 	/* Hmmm... The next two are dangerous */
4693 	p = kzalloc(32, GFP_KERNEL);
4694 	p[32 + sizeof(void *)] = 0x34;
4695 	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4696 	       p);
4697 	pr_err("If allocated object is overwritten then not detectable\n\n");
4698 
4699 	validate_slab_cache(kmalloc_caches[5]);
4700 	p = kzalloc(64, GFP_KERNEL);
4701 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4702 	*p = 0x56;
4703 	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4704 	       p);
4705 	pr_err("If allocated object is overwritten then not detectable\n\n");
4706 	validate_slab_cache(kmalloc_caches[6]);
4707 
4708 	pr_err("\nB. Corruption after free\n");
4709 	p = kzalloc(128, GFP_KERNEL);
4710 	kfree(p);
4711 	*p = 0x78;
4712 	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4713 	validate_slab_cache(kmalloc_caches[7]);
4714 
4715 	p = kzalloc(256, GFP_KERNEL);
4716 	kfree(p);
4717 	p[50] = 0x9a;
4718 	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4719 	validate_slab_cache(kmalloc_caches[8]);
4720 
4721 	p = kzalloc(512, GFP_KERNEL);
4722 	kfree(p);
4723 	p[512] = 0xab;
4724 	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4725 	validate_slab_cache(kmalloc_caches[9]);
4726 }
4727 #else
4728 #ifdef CONFIG_SYSFS
4729 static void resiliency_test(void) {};
4730 #endif
4731 #endif
4732 
4733 #ifdef CONFIG_SYSFS
4734 enum slab_stat_type {
4735 	SL_ALL,			/* All slabs */
4736 	SL_PARTIAL,		/* Only partially allocated slabs */
4737 	SL_CPU,			/* Only slabs used for cpu caches */
4738 	SL_OBJECTS,		/* Determine allocated objects not slabs */
4739 	SL_TOTAL		/* Determine object capacity not slabs */
4740 };
4741 
4742 #define SO_ALL		(1 << SL_ALL)
4743 #define SO_PARTIAL	(1 << SL_PARTIAL)
4744 #define SO_CPU		(1 << SL_CPU)
4745 #define SO_OBJECTS	(1 << SL_OBJECTS)
4746 #define SO_TOTAL	(1 << SL_TOTAL)
4747 
4748 #ifdef CONFIG_MEMCG
4749 static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4750 
4751 static int __init setup_slub_memcg_sysfs(char *str)
4752 {
4753 	int v;
4754 
4755 	if (get_option(&str, &v) > 0)
4756 		memcg_sysfs_enabled = v;
4757 
4758 	return 1;
4759 }
4760 
4761 __setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4762 #endif
4763 
4764 static ssize_t show_slab_objects(struct kmem_cache *s,
4765 			    char *buf, unsigned long flags)
4766 {
4767 	unsigned long total = 0;
4768 	int node;
4769 	int x;
4770 	unsigned long *nodes;
4771 
4772 	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4773 	if (!nodes)
4774 		return -ENOMEM;
4775 
4776 	if (flags & SO_CPU) {
4777 		int cpu;
4778 
4779 		for_each_possible_cpu(cpu) {
4780 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4781 							       cpu);
4782 			int node;
4783 			struct page *page;
4784 
4785 			page = READ_ONCE(c->page);
4786 			if (!page)
4787 				continue;
4788 
4789 			node = page_to_nid(page);
4790 			if (flags & SO_TOTAL)
4791 				x = page->objects;
4792 			else if (flags & SO_OBJECTS)
4793 				x = page->inuse;
4794 			else
4795 				x = 1;
4796 
4797 			total += x;
4798 			nodes[node] += x;
4799 
4800 			page = slub_percpu_partial_read_once(c);
4801 			if (page) {
4802 				node = page_to_nid(page);
4803 				if (flags & SO_TOTAL)
4804 					WARN_ON_ONCE(1);
4805 				else if (flags & SO_OBJECTS)
4806 					WARN_ON_ONCE(1);
4807 				else
4808 					x = page->pages;
4809 				total += x;
4810 				nodes[node] += x;
4811 			}
4812 		}
4813 	}
4814 
4815 	get_online_mems();
4816 #ifdef CONFIG_SLUB_DEBUG
4817 	if (flags & SO_ALL) {
4818 		struct kmem_cache_node *n;
4819 
4820 		for_each_kmem_cache_node(s, node, n) {
4821 
4822 			if (flags & SO_TOTAL)
4823 				x = atomic_long_read(&n->total_objects);
4824 			else if (flags & SO_OBJECTS)
4825 				x = atomic_long_read(&n->total_objects) -
4826 					count_partial(n, count_free);
4827 			else
4828 				x = atomic_long_read(&n->nr_slabs);
4829 			total += x;
4830 			nodes[node] += x;
4831 		}
4832 
4833 	} else
4834 #endif
4835 	if (flags & SO_PARTIAL) {
4836 		struct kmem_cache_node *n;
4837 
4838 		for_each_kmem_cache_node(s, node, n) {
4839 			if (flags & SO_TOTAL)
4840 				x = count_partial(n, count_total);
4841 			else if (flags & SO_OBJECTS)
4842 				x = count_partial(n, count_inuse);
4843 			else
4844 				x = n->nr_partial;
4845 			total += x;
4846 			nodes[node] += x;
4847 		}
4848 	}
4849 	x = sprintf(buf, "%lu", total);
4850 #ifdef CONFIG_NUMA
4851 	for (node = 0; node < nr_node_ids; node++)
4852 		if (nodes[node])
4853 			x += sprintf(buf + x, " N%d=%lu",
4854 					node, nodes[node]);
4855 #endif
4856 	put_online_mems();
4857 	kfree(nodes);
4858 	return x + sprintf(buf + x, "\n");
4859 }
4860 
4861 #ifdef CONFIG_SLUB_DEBUG
4862 static int any_slab_objects(struct kmem_cache *s)
4863 {
4864 	int node;
4865 	struct kmem_cache_node *n;
4866 
4867 	for_each_kmem_cache_node(s, node, n)
4868 		if (atomic_long_read(&n->total_objects))
4869 			return 1;
4870 
4871 	return 0;
4872 }
4873 #endif
4874 
4875 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4876 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4877 
4878 struct slab_attribute {
4879 	struct attribute attr;
4880 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4881 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4882 };
4883 
4884 #define SLAB_ATTR_RO(_name) \
4885 	static struct slab_attribute _name##_attr = \
4886 	__ATTR(_name, 0400, _name##_show, NULL)
4887 
4888 #define SLAB_ATTR(_name) \
4889 	static struct slab_attribute _name##_attr =  \
4890 	__ATTR(_name, 0600, _name##_show, _name##_store)
4891 
4892 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4893 {
4894 	return sprintf(buf, "%d\n", s->size);
4895 }
4896 SLAB_ATTR_RO(slab_size);
4897 
4898 static ssize_t align_show(struct kmem_cache *s, char *buf)
4899 {
4900 	return sprintf(buf, "%d\n", s->align);
4901 }
4902 SLAB_ATTR_RO(align);
4903 
4904 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4905 {
4906 	return sprintf(buf, "%d\n", s->object_size);
4907 }
4908 SLAB_ATTR_RO(object_size);
4909 
4910 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4911 {
4912 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4913 }
4914 SLAB_ATTR_RO(objs_per_slab);
4915 
4916 static ssize_t order_store(struct kmem_cache *s,
4917 				const char *buf, size_t length)
4918 {
4919 	unsigned long order;
4920 	int err;
4921 
4922 	err = kstrtoul(buf, 10, &order);
4923 	if (err)
4924 		return err;
4925 
4926 	if (order > slub_max_order || order < slub_min_order)
4927 		return -EINVAL;
4928 
4929 	calculate_sizes(s, order);
4930 	return length;
4931 }
4932 
4933 static ssize_t order_show(struct kmem_cache *s, char *buf)
4934 {
4935 	return sprintf(buf, "%d\n", oo_order(s->oo));
4936 }
4937 SLAB_ATTR(order);
4938 
4939 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4940 {
4941 	return sprintf(buf, "%lu\n", s->min_partial);
4942 }
4943 
4944 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4945 				 size_t length)
4946 {
4947 	unsigned long min;
4948 	int err;
4949 
4950 	err = kstrtoul(buf, 10, &min);
4951 	if (err)
4952 		return err;
4953 
4954 	set_min_partial(s, min);
4955 	return length;
4956 }
4957 SLAB_ATTR(min_partial);
4958 
4959 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4960 {
4961 	return sprintf(buf, "%u\n", slub_cpu_partial(s));
4962 }
4963 
4964 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4965 				 size_t length)
4966 {
4967 	unsigned long objects;
4968 	int err;
4969 
4970 	err = kstrtoul(buf, 10, &objects);
4971 	if (err)
4972 		return err;
4973 	if (objects && !kmem_cache_has_cpu_partial(s))
4974 		return -EINVAL;
4975 
4976 	slub_set_cpu_partial(s, objects);
4977 	flush_all(s);
4978 	return length;
4979 }
4980 SLAB_ATTR(cpu_partial);
4981 
4982 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4983 {
4984 	if (!s->ctor)
4985 		return 0;
4986 	return sprintf(buf, "%pS\n", s->ctor);
4987 }
4988 SLAB_ATTR_RO(ctor);
4989 
4990 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4991 {
4992 	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
4993 }
4994 SLAB_ATTR_RO(aliases);
4995 
4996 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4997 {
4998 	return show_slab_objects(s, buf, SO_PARTIAL);
4999 }
5000 SLAB_ATTR_RO(partial);
5001 
5002 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5003 {
5004 	return show_slab_objects(s, buf, SO_CPU);
5005 }
5006 SLAB_ATTR_RO(cpu_slabs);
5007 
5008 static ssize_t objects_show(struct kmem_cache *s, char *buf)
5009 {
5010 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5011 }
5012 SLAB_ATTR_RO(objects);
5013 
5014 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5015 {
5016 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5017 }
5018 SLAB_ATTR_RO(objects_partial);
5019 
5020 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5021 {
5022 	int objects = 0;
5023 	int pages = 0;
5024 	int cpu;
5025 	int len;
5026 
5027 	for_each_online_cpu(cpu) {
5028 		struct page *page;
5029 
5030 		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5031 
5032 		if (page) {
5033 			pages += page->pages;
5034 			objects += page->pobjects;
5035 		}
5036 	}
5037 
5038 	len = sprintf(buf, "%d(%d)", objects, pages);
5039 
5040 #ifdef CONFIG_SMP
5041 	for_each_online_cpu(cpu) {
5042 		struct page *page;
5043 
5044 		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5045 
5046 		if (page && len < PAGE_SIZE - 20)
5047 			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5048 				page->pobjects, page->pages);
5049 	}
5050 #endif
5051 	return len + sprintf(buf + len, "\n");
5052 }
5053 SLAB_ATTR_RO(slabs_cpu_partial);
5054 
5055 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5056 {
5057 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5058 }
5059 
5060 static ssize_t reclaim_account_store(struct kmem_cache *s,
5061 				const char *buf, size_t length)
5062 {
5063 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5064 	if (buf[0] == '1')
5065 		s->flags |= SLAB_RECLAIM_ACCOUNT;
5066 	return length;
5067 }
5068 SLAB_ATTR(reclaim_account);
5069 
5070 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5071 {
5072 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5073 }
5074 SLAB_ATTR_RO(hwcache_align);
5075 
5076 #ifdef CONFIG_ZONE_DMA
5077 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5078 {
5079 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5080 }
5081 SLAB_ATTR_RO(cache_dma);
5082 #endif
5083 
5084 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5085 {
5086 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5087 }
5088 SLAB_ATTR_RO(destroy_by_rcu);
5089 
5090 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5091 {
5092 	return sprintf(buf, "%d\n", s->reserved);
5093 }
5094 SLAB_ATTR_RO(reserved);
5095 
5096 #ifdef CONFIG_SLUB_DEBUG
5097 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5098 {
5099 	return show_slab_objects(s, buf, SO_ALL);
5100 }
5101 SLAB_ATTR_RO(slabs);
5102 
5103 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5104 {
5105 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5106 }
5107 SLAB_ATTR_RO(total_objects);
5108 
5109 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5110 {
5111 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5112 }
5113 
5114 static ssize_t sanity_checks_store(struct kmem_cache *s,
5115 				const char *buf, size_t length)
5116 {
5117 	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5118 	if (buf[0] == '1') {
5119 		s->flags &= ~__CMPXCHG_DOUBLE;
5120 		s->flags |= SLAB_CONSISTENCY_CHECKS;
5121 	}
5122 	return length;
5123 }
5124 SLAB_ATTR(sanity_checks);
5125 
5126 static ssize_t trace_show(struct kmem_cache *s, char *buf)
5127 {
5128 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5129 }
5130 
5131 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5132 							size_t length)
5133 {
5134 	/*
5135 	 * Tracing a merged cache is going to give confusing results
5136 	 * as well as cause other issues like converting a mergeable
5137 	 * cache into an umergeable one.
5138 	 */
5139 	if (s->refcount > 1)
5140 		return -EINVAL;
5141 
5142 	s->flags &= ~SLAB_TRACE;
5143 	if (buf[0] == '1') {
5144 		s->flags &= ~__CMPXCHG_DOUBLE;
5145 		s->flags |= SLAB_TRACE;
5146 	}
5147 	return length;
5148 }
5149 SLAB_ATTR(trace);
5150 
5151 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5152 {
5153 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5154 }
5155 
5156 static ssize_t red_zone_store(struct kmem_cache *s,
5157 				const char *buf, size_t length)
5158 {
5159 	if (any_slab_objects(s))
5160 		return -EBUSY;
5161 
5162 	s->flags &= ~SLAB_RED_ZONE;
5163 	if (buf[0] == '1') {
5164 		s->flags |= SLAB_RED_ZONE;
5165 	}
5166 	calculate_sizes(s, -1);
5167 	return length;
5168 }
5169 SLAB_ATTR(red_zone);
5170 
5171 static ssize_t poison_show(struct kmem_cache *s, char *buf)
5172 {
5173 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5174 }
5175 
5176 static ssize_t poison_store(struct kmem_cache *s,
5177 				const char *buf, size_t length)
5178 {
5179 	if (any_slab_objects(s))
5180 		return -EBUSY;
5181 
5182 	s->flags &= ~SLAB_POISON;
5183 	if (buf[0] == '1') {
5184 		s->flags |= SLAB_POISON;
5185 	}
5186 	calculate_sizes(s, -1);
5187 	return length;
5188 }
5189 SLAB_ATTR(poison);
5190 
5191 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5192 {
5193 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5194 }
5195 
5196 static ssize_t store_user_store(struct kmem_cache *s,
5197 				const char *buf, size_t length)
5198 {
5199 	if (any_slab_objects(s))
5200 		return -EBUSY;
5201 
5202 	s->flags &= ~SLAB_STORE_USER;
5203 	if (buf[0] == '1') {
5204 		s->flags &= ~__CMPXCHG_DOUBLE;
5205 		s->flags |= SLAB_STORE_USER;
5206 	}
5207 	calculate_sizes(s, -1);
5208 	return length;
5209 }
5210 SLAB_ATTR(store_user);
5211 
5212 static ssize_t validate_show(struct kmem_cache *s, char *buf)
5213 {
5214 	return 0;
5215 }
5216 
5217 static ssize_t validate_store(struct kmem_cache *s,
5218 			const char *buf, size_t length)
5219 {
5220 	int ret = -EINVAL;
5221 
5222 	if (buf[0] == '1') {
5223 		ret = validate_slab_cache(s);
5224 		if (ret >= 0)
5225 			ret = length;
5226 	}
5227 	return ret;
5228 }
5229 SLAB_ATTR(validate);
5230 
5231 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5232 {
5233 	if (!(s->flags & SLAB_STORE_USER))
5234 		return -ENOSYS;
5235 	return list_locations(s, buf, TRACK_ALLOC);
5236 }
5237 SLAB_ATTR_RO(alloc_calls);
5238 
5239 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5240 {
5241 	if (!(s->flags & SLAB_STORE_USER))
5242 		return -ENOSYS;
5243 	return list_locations(s, buf, TRACK_FREE);
5244 }
5245 SLAB_ATTR_RO(free_calls);
5246 #endif /* CONFIG_SLUB_DEBUG */
5247 
5248 #ifdef CONFIG_FAILSLAB
5249 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5250 {
5251 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5252 }
5253 
5254 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5255 							size_t length)
5256 {
5257 	if (s->refcount > 1)
5258 		return -EINVAL;
5259 
5260 	s->flags &= ~SLAB_FAILSLAB;
5261 	if (buf[0] == '1')
5262 		s->flags |= SLAB_FAILSLAB;
5263 	return length;
5264 }
5265 SLAB_ATTR(failslab);
5266 #endif
5267 
5268 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5269 {
5270 	return 0;
5271 }
5272 
5273 static ssize_t shrink_store(struct kmem_cache *s,
5274 			const char *buf, size_t length)
5275 {
5276 	if (buf[0] == '1')
5277 		kmem_cache_shrink(s);
5278 	else
5279 		return -EINVAL;
5280 	return length;
5281 }
5282 SLAB_ATTR(shrink);
5283 
5284 #ifdef CONFIG_NUMA
5285 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5286 {
5287 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
5288 }
5289 
5290 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5291 				const char *buf, size_t length)
5292 {
5293 	unsigned long ratio;
5294 	int err;
5295 
5296 	err = kstrtoul(buf, 10, &ratio);
5297 	if (err)
5298 		return err;
5299 
5300 	if (ratio <= 100)
5301 		s->remote_node_defrag_ratio = ratio * 10;
5302 
5303 	return length;
5304 }
5305 SLAB_ATTR(remote_node_defrag_ratio);
5306 #endif
5307 
5308 #ifdef CONFIG_SLUB_STATS
5309 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5310 {
5311 	unsigned long sum  = 0;
5312 	int cpu;
5313 	int len;
5314 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5315 
5316 	if (!data)
5317 		return -ENOMEM;
5318 
5319 	for_each_online_cpu(cpu) {
5320 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5321 
5322 		data[cpu] = x;
5323 		sum += x;
5324 	}
5325 
5326 	len = sprintf(buf, "%lu", sum);
5327 
5328 #ifdef CONFIG_SMP
5329 	for_each_online_cpu(cpu) {
5330 		if (data[cpu] && len < PAGE_SIZE - 20)
5331 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5332 	}
5333 #endif
5334 	kfree(data);
5335 	return len + sprintf(buf + len, "\n");
5336 }
5337 
5338 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5339 {
5340 	int cpu;
5341 
5342 	for_each_online_cpu(cpu)
5343 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5344 }
5345 
5346 #define STAT_ATTR(si, text) 					\
5347 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5348 {								\
5349 	return show_stat(s, buf, si);				\
5350 }								\
5351 static ssize_t text##_store(struct kmem_cache *s,		\
5352 				const char *buf, size_t length)	\
5353 {								\
5354 	if (buf[0] != '0')					\
5355 		return -EINVAL;					\
5356 	clear_stat(s, si);					\
5357 	return length;						\
5358 }								\
5359 SLAB_ATTR(text);						\
5360 
5361 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5362 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5363 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5364 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5365 STAT_ATTR(FREE_FROZEN, free_frozen);
5366 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5367 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5368 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5369 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5370 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5371 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5372 STAT_ATTR(FREE_SLAB, free_slab);
5373 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5374 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5375 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5376 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5377 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5378 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5379 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5380 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5381 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5382 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5383 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5384 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5385 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5386 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5387 #endif
5388 
5389 static struct attribute *slab_attrs[] = {
5390 	&slab_size_attr.attr,
5391 	&object_size_attr.attr,
5392 	&objs_per_slab_attr.attr,
5393 	&order_attr.attr,
5394 	&min_partial_attr.attr,
5395 	&cpu_partial_attr.attr,
5396 	&objects_attr.attr,
5397 	&objects_partial_attr.attr,
5398 	&partial_attr.attr,
5399 	&cpu_slabs_attr.attr,
5400 	&ctor_attr.attr,
5401 	&aliases_attr.attr,
5402 	&align_attr.attr,
5403 	&hwcache_align_attr.attr,
5404 	&reclaim_account_attr.attr,
5405 	&destroy_by_rcu_attr.attr,
5406 	&shrink_attr.attr,
5407 	&reserved_attr.attr,
5408 	&slabs_cpu_partial_attr.attr,
5409 #ifdef CONFIG_SLUB_DEBUG
5410 	&total_objects_attr.attr,
5411 	&slabs_attr.attr,
5412 	&sanity_checks_attr.attr,
5413 	&trace_attr.attr,
5414 	&red_zone_attr.attr,
5415 	&poison_attr.attr,
5416 	&store_user_attr.attr,
5417 	&validate_attr.attr,
5418 	&alloc_calls_attr.attr,
5419 	&free_calls_attr.attr,
5420 #endif
5421 #ifdef CONFIG_ZONE_DMA
5422 	&cache_dma_attr.attr,
5423 #endif
5424 #ifdef CONFIG_NUMA
5425 	&remote_node_defrag_ratio_attr.attr,
5426 #endif
5427 #ifdef CONFIG_SLUB_STATS
5428 	&alloc_fastpath_attr.attr,
5429 	&alloc_slowpath_attr.attr,
5430 	&free_fastpath_attr.attr,
5431 	&free_slowpath_attr.attr,
5432 	&free_frozen_attr.attr,
5433 	&free_add_partial_attr.attr,
5434 	&free_remove_partial_attr.attr,
5435 	&alloc_from_partial_attr.attr,
5436 	&alloc_slab_attr.attr,
5437 	&alloc_refill_attr.attr,
5438 	&alloc_node_mismatch_attr.attr,
5439 	&free_slab_attr.attr,
5440 	&cpuslab_flush_attr.attr,
5441 	&deactivate_full_attr.attr,
5442 	&deactivate_empty_attr.attr,
5443 	&deactivate_to_head_attr.attr,
5444 	&deactivate_to_tail_attr.attr,
5445 	&deactivate_remote_frees_attr.attr,
5446 	&deactivate_bypass_attr.attr,
5447 	&order_fallback_attr.attr,
5448 	&cmpxchg_double_fail_attr.attr,
5449 	&cmpxchg_double_cpu_fail_attr.attr,
5450 	&cpu_partial_alloc_attr.attr,
5451 	&cpu_partial_free_attr.attr,
5452 	&cpu_partial_node_attr.attr,
5453 	&cpu_partial_drain_attr.attr,
5454 #endif
5455 #ifdef CONFIG_FAILSLAB
5456 	&failslab_attr.attr,
5457 #endif
5458 
5459 	NULL
5460 };
5461 
5462 static const struct attribute_group slab_attr_group = {
5463 	.attrs = slab_attrs,
5464 };
5465 
5466 static ssize_t slab_attr_show(struct kobject *kobj,
5467 				struct attribute *attr,
5468 				char *buf)
5469 {
5470 	struct slab_attribute *attribute;
5471 	struct kmem_cache *s;
5472 	int err;
5473 
5474 	attribute = to_slab_attr(attr);
5475 	s = to_slab(kobj);
5476 
5477 	if (!attribute->show)
5478 		return -EIO;
5479 
5480 	err = attribute->show(s, buf);
5481 
5482 	return err;
5483 }
5484 
5485 static ssize_t slab_attr_store(struct kobject *kobj,
5486 				struct attribute *attr,
5487 				const char *buf, size_t len)
5488 {
5489 	struct slab_attribute *attribute;
5490 	struct kmem_cache *s;
5491 	int err;
5492 
5493 	attribute = to_slab_attr(attr);
5494 	s = to_slab(kobj);
5495 
5496 	if (!attribute->store)
5497 		return -EIO;
5498 
5499 	err = attribute->store(s, buf, len);
5500 #ifdef CONFIG_MEMCG
5501 	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5502 		struct kmem_cache *c;
5503 
5504 		mutex_lock(&slab_mutex);
5505 		if (s->max_attr_size < len)
5506 			s->max_attr_size = len;
5507 
5508 		/*
5509 		 * This is a best effort propagation, so this function's return
5510 		 * value will be determined by the parent cache only. This is
5511 		 * basically because not all attributes will have a well
5512 		 * defined semantics for rollbacks - most of the actions will
5513 		 * have permanent effects.
5514 		 *
5515 		 * Returning the error value of any of the children that fail
5516 		 * is not 100 % defined, in the sense that users seeing the
5517 		 * error code won't be able to know anything about the state of
5518 		 * the cache.
5519 		 *
5520 		 * Only returning the error code for the parent cache at least
5521 		 * has well defined semantics. The cache being written to
5522 		 * directly either failed or succeeded, in which case we loop
5523 		 * through the descendants with best-effort propagation.
5524 		 */
5525 		for_each_memcg_cache(c, s)
5526 			attribute->store(c, buf, len);
5527 		mutex_unlock(&slab_mutex);
5528 	}
5529 #endif
5530 	return err;
5531 }
5532 
5533 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5534 {
5535 #ifdef CONFIG_MEMCG
5536 	int i;
5537 	char *buffer = NULL;
5538 	struct kmem_cache *root_cache;
5539 
5540 	if (is_root_cache(s))
5541 		return;
5542 
5543 	root_cache = s->memcg_params.root_cache;
5544 
5545 	/*
5546 	 * This mean this cache had no attribute written. Therefore, no point
5547 	 * in copying default values around
5548 	 */
5549 	if (!root_cache->max_attr_size)
5550 		return;
5551 
5552 	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5553 		char mbuf[64];
5554 		char *buf;
5555 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5556 		ssize_t len;
5557 
5558 		if (!attr || !attr->store || !attr->show)
5559 			continue;
5560 
5561 		/*
5562 		 * It is really bad that we have to allocate here, so we will
5563 		 * do it only as a fallback. If we actually allocate, though,
5564 		 * we can just use the allocated buffer until the end.
5565 		 *
5566 		 * Most of the slub attributes will tend to be very small in
5567 		 * size, but sysfs allows buffers up to a page, so they can
5568 		 * theoretically happen.
5569 		 */
5570 		if (buffer)
5571 			buf = buffer;
5572 		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5573 			buf = mbuf;
5574 		else {
5575 			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5576 			if (WARN_ON(!buffer))
5577 				continue;
5578 			buf = buffer;
5579 		}
5580 
5581 		len = attr->show(root_cache, buf);
5582 		if (len > 0)
5583 			attr->store(s, buf, len);
5584 	}
5585 
5586 	if (buffer)
5587 		free_page((unsigned long)buffer);
5588 #endif
5589 }
5590 
5591 static void kmem_cache_release(struct kobject *k)
5592 {
5593 	slab_kmem_cache_release(to_slab(k));
5594 }
5595 
5596 static const struct sysfs_ops slab_sysfs_ops = {
5597 	.show = slab_attr_show,
5598 	.store = slab_attr_store,
5599 };
5600 
5601 static struct kobj_type slab_ktype = {
5602 	.sysfs_ops = &slab_sysfs_ops,
5603 	.release = kmem_cache_release,
5604 };
5605 
5606 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5607 {
5608 	struct kobj_type *ktype = get_ktype(kobj);
5609 
5610 	if (ktype == &slab_ktype)
5611 		return 1;
5612 	return 0;
5613 }
5614 
5615 static const struct kset_uevent_ops slab_uevent_ops = {
5616 	.filter = uevent_filter,
5617 };
5618 
5619 static struct kset *slab_kset;
5620 
5621 static inline struct kset *cache_kset(struct kmem_cache *s)
5622 {
5623 #ifdef CONFIG_MEMCG
5624 	if (!is_root_cache(s))
5625 		return s->memcg_params.root_cache->memcg_kset;
5626 #endif
5627 	return slab_kset;
5628 }
5629 
5630 #define ID_STR_LENGTH 64
5631 
5632 /* Create a unique string id for a slab cache:
5633  *
5634  * Format	:[flags-]size
5635  */
5636 static char *create_unique_id(struct kmem_cache *s)
5637 {
5638 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5639 	char *p = name;
5640 
5641 	BUG_ON(!name);
5642 
5643 	*p++ = ':';
5644 	/*
5645 	 * First flags affecting slabcache operations. We will only
5646 	 * get here for aliasable slabs so we do not need to support
5647 	 * too many flags. The flags here must cover all flags that
5648 	 * are matched during merging to guarantee that the id is
5649 	 * unique.
5650 	 */
5651 	if (s->flags & SLAB_CACHE_DMA)
5652 		*p++ = 'd';
5653 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5654 		*p++ = 'a';
5655 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5656 		*p++ = 'F';
5657 	if (!(s->flags & SLAB_NOTRACK))
5658 		*p++ = 't';
5659 	if (s->flags & SLAB_ACCOUNT)
5660 		*p++ = 'A';
5661 	if (p != name + 1)
5662 		*p++ = '-';
5663 	p += sprintf(p, "%07d", s->size);
5664 
5665 	BUG_ON(p > name + ID_STR_LENGTH - 1);
5666 	return name;
5667 }
5668 
5669 static void sysfs_slab_remove_workfn(struct work_struct *work)
5670 {
5671 	struct kmem_cache *s =
5672 		container_of(work, struct kmem_cache, kobj_remove_work);
5673 
5674 	if (!s->kobj.state_in_sysfs)
5675 		/*
5676 		 * For a memcg cache, this may be called during
5677 		 * deactivation and again on shutdown.  Remove only once.
5678 		 * A cache is never shut down before deactivation is
5679 		 * complete, so no need to worry about synchronization.
5680 		 */
5681 		goto out;
5682 
5683 #ifdef CONFIG_MEMCG
5684 	kset_unregister(s->memcg_kset);
5685 #endif
5686 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5687 	kobject_del(&s->kobj);
5688 out:
5689 	kobject_put(&s->kobj);
5690 }
5691 
5692 static int sysfs_slab_add(struct kmem_cache *s)
5693 {
5694 	int err;
5695 	const char *name;
5696 	struct kset *kset = cache_kset(s);
5697 	int unmergeable = slab_unmergeable(s);
5698 
5699 	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5700 
5701 	if (!kset) {
5702 		kobject_init(&s->kobj, &slab_ktype);
5703 		return 0;
5704 	}
5705 
5706 	if (unmergeable) {
5707 		/*
5708 		 * Slabcache can never be merged so we can use the name proper.
5709 		 * This is typically the case for debug situations. In that
5710 		 * case we can catch duplicate names easily.
5711 		 */
5712 		sysfs_remove_link(&slab_kset->kobj, s->name);
5713 		name = s->name;
5714 	} else {
5715 		/*
5716 		 * Create a unique name for the slab as a target
5717 		 * for the symlinks.
5718 		 */
5719 		name = create_unique_id(s);
5720 	}
5721 
5722 	s->kobj.kset = kset;
5723 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5724 	if (err)
5725 		goto out;
5726 
5727 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5728 	if (err)
5729 		goto out_del_kobj;
5730 
5731 #ifdef CONFIG_MEMCG
5732 	if (is_root_cache(s) && memcg_sysfs_enabled) {
5733 		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5734 		if (!s->memcg_kset) {
5735 			err = -ENOMEM;
5736 			goto out_del_kobj;
5737 		}
5738 	}
5739 #endif
5740 
5741 	kobject_uevent(&s->kobj, KOBJ_ADD);
5742 	if (!unmergeable) {
5743 		/* Setup first alias */
5744 		sysfs_slab_alias(s, s->name);
5745 	}
5746 out:
5747 	if (!unmergeable)
5748 		kfree(name);
5749 	return err;
5750 out_del_kobj:
5751 	kobject_del(&s->kobj);
5752 	goto out;
5753 }
5754 
5755 static void sysfs_slab_remove(struct kmem_cache *s)
5756 {
5757 	if (slab_state < FULL)
5758 		/*
5759 		 * Sysfs has not been setup yet so no need to remove the
5760 		 * cache from sysfs.
5761 		 */
5762 		return;
5763 
5764 	kobject_get(&s->kobj);
5765 	schedule_work(&s->kobj_remove_work);
5766 }
5767 
5768 void sysfs_slab_release(struct kmem_cache *s)
5769 {
5770 	if (slab_state >= FULL)
5771 		kobject_put(&s->kobj);
5772 }
5773 
5774 /*
5775  * Need to buffer aliases during bootup until sysfs becomes
5776  * available lest we lose that information.
5777  */
5778 struct saved_alias {
5779 	struct kmem_cache *s;
5780 	const char *name;
5781 	struct saved_alias *next;
5782 };
5783 
5784 static struct saved_alias *alias_list;
5785 
5786 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5787 {
5788 	struct saved_alias *al;
5789 
5790 	if (slab_state == FULL) {
5791 		/*
5792 		 * If we have a leftover link then remove it.
5793 		 */
5794 		sysfs_remove_link(&slab_kset->kobj, name);
5795 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5796 	}
5797 
5798 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5799 	if (!al)
5800 		return -ENOMEM;
5801 
5802 	al->s = s;
5803 	al->name = name;
5804 	al->next = alias_list;
5805 	alias_list = al;
5806 	return 0;
5807 }
5808 
5809 static int __init slab_sysfs_init(void)
5810 {
5811 	struct kmem_cache *s;
5812 	int err;
5813 
5814 	mutex_lock(&slab_mutex);
5815 
5816 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5817 	if (!slab_kset) {
5818 		mutex_unlock(&slab_mutex);
5819 		pr_err("Cannot register slab subsystem.\n");
5820 		return -ENOSYS;
5821 	}
5822 
5823 	slab_state = FULL;
5824 
5825 	list_for_each_entry(s, &slab_caches, list) {
5826 		err = sysfs_slab_add(s);
5827 		if (err)
5828 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5829 			       s->name);
5830 	}
5831 
5832 	while (alias_list) {
5833 		struct saved_alias *al = alias_list;
5834 
5835 		alias_list = alias_list->next;
5836 		err = sysfs_slab_alias(al->s, al->name);
5837 		if (err)
5838 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5839 			       al->name);
5840 		kfree(al);
5841 	}
5842 
5843 	mutex_unlock(&slab_mutex);
5844 	resiliency_test();
5845 	return 0;
5846 }
5847 
5848 __initcall(slab_sysfs_init);
5849 #endif /* CONFIG_SYSFS */
5850 
5851 /*
5852  * The /proc/slabinfo ABI
5853  */
5854 #ifdef CONFIG_SLABINFO
5855 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5856 {
5857 	unsigned long nr_slabs = 0;
5858 	unsigned long nr_objs = 0;
5859 	unsigned long nr_free = 0;
5860 	int node;
5861 	struct kmem_cache_node *n;
5862 
5863 	for_each_kmem_cache_node(s, node, n) {
5864 		nr_slabs += node_nr_slabs(n);
5865 		nr_objs += node_nr_objs(n);
5866 		nr_free += count_partial(n, count_free);
5867 	}
5868 
5869 	sinfo->active_objs = nr_objs - nr_free;
5870 	sinfo->num_objs = nr_objs;
5871 	sinfo->active_slabs = nr_slabs;
5872 	sinfo->num_slabs = nr_slabs;
5873 	sinfo->objects_per_slab = oo_objects(s->oo);
5874 	sinfo->cache_order = oo_order(s->oo);
5875 }
5876 
5877 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5878 {
5879 }
5880 
5881 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5882 		       size_t count, loff_t *ppos)
5883 {
5884 	return -EIO;
5885 }
5886 #endif /* CONFIG_SLABINFO */
5887