xref: /openbmc/linux/mm/slub.c (revision cff11abeca78aa782378401ca2800bd2194aa14e)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * SLUB: A slab allocator that limits cache line use instead of queuing
4   * objects in per cpu and per node lists.
5   *
6   * The allocator synchronizes using per slab locks or atomic operatios
7   * and only uses a centralized lock to manage a pool of partial slabs.
8   *
9   * (C) 2007 SGI, Christoph Lameter
10   * (C) 2011 Linux Foundation, Christoph Lameter
11   */
12  
13  #include <linux/mm.h>
14  #include <linux/swap.h> /* struct reclaim_state */
15  #include <linux/module.h>
16  #include <linux/bit_spinlock.h>
17  #include <linux/interrupt.h>
18  #include <linux/bitops.h>
19  #include <linux/slab.h>
20  #include "slab.h"
21  #include <linux/proc_fs.h>
22  #include <linux/seq_file.h>
23  #include <linux/kasan.h>
24  #include <linux/cpu.h>
25  #include <linux/cpuset.h>
26  #include <linux/mempolicy.h>
27  #include <linux/ctype.h>
28  #include <linux/debugobjects.h>
29  #include <linux/kallsyms.h>
30  #include <linux/memory.h>
31  #include <linux/math64.h>
32  #include <linux/fault-inject.h>
33  #include <linux/stacktrace.h>
34  #include <linux/prefetch.h>
35  #include <linux/memcontrol.h>
36  #include <linux/random.h>
37  
38  #include <trace/events/kmem.h>
39  
40  #include "internal.h"
41  
42  /*
43   * Lock order:
44   *   1. slab_mutex (Global Mutex)
45   *   2. node->list_lock
46   *   3. slab_lock(page) (Only on some arches and for debugging)
47   *
48   *   slab_mutex
49   *
50   *   The role of the slab_mutex is to protect the list of all the slabs
51   *   and to synchronize major metadata changes to slab cache structures.
52   *
53   *   The slab_lock is only used for debugging and on arches that do not
54   *   have the ability to do a cmpxchg_double. It only protects:
55   *	A. page->freelist	-> List of object free in a page
56   *	B. page->inuse		-> Number of objects in use
57   *	C. page->objects	-> Number of objects in page
58   *	D. page->frozen		-> frozen state
59   *
60   *   If a slab is frozen then it is exempt from list management. It is not
61   *   on any list except per cpu partial list. The processor that froze the
62   *   slab is the one who can perform list operations on the page. Other
63   *   processors may put objects onto the freelist but the processor that
64   *   froze the slab is the only one that can retrieve the objects from the
65   *   page's freelist.
66   *
67   *   The list_lock protects the partial and full list on each node and
68   *   the partial slab counter. If taken then no new slabs may be added or
69   *   removed from the lists nor make the number of partial slabs be modified.
70   *   (Note that the total number of slabs is an atomic value that may be
71   *   modified without taking the list lock).
72   *
73   *   The list_lock is a centralized lock and thus we avoid taking it as
74   *   much as possible. As long as SLUB does not have to handle partial
75   *   slabs, operations can continue without any centralized lock. F.e.
76   *   allocating a long series of objects that fill up slabs does not require
77   *   the list lock.
78   *   Interrupts are disabled during allocation and deallocation in order to
79   *   make the slab allocator safe to use in the context of an irq. In addition
80   *   interrupts are disabled to ensure that the processor does not change
81   *   while handling per_cpu slabs, due to kernel preemption.
82   *
83   * SLUB assigns one slab for allocation to each processor.
84   * Allocations only occur from these slabs called cpu slabs.
85   *
86   * Slabs with free elements are kept on a partial list and during regular
87   * operations no list for full slabs is used. If an object in a full slab is
88   * freed then the slab will show up again on the partial lists.
89   * We track full slabs for debugging purposes though because otherwise we
90   * cannot scan all objects.
91   *
92   * Slabs are freed when they become empty. Teardown and setup is
93   * minimal so we rely on the page allocators per cpu caches for
94   * fast frees and allocs.
95   *
96   * page->frozen		The slab is frozen and exempt from list processing.
97   * 			This means that the slab is dedicated to a purpose
98   * 			such as satisfying allocations for a specific
99   * 			processor. Objects may be freed in the slab while
100   * 			it is frozen but slab_free will then skip the usual
101   * 			list operations. It is up to the processor holding
102   * 			the slab to integrate the slab into the slab lists
103   * 			when the slab is no longer needed.
104   *
105   * 			One use of this flag is to mark slabs that are
106   * 			used for allocations. Then such a slab becomes a cpu
107   * 			slab. The cpu slab may be equipped with an additional
108   * 			freelist that allows lockless access to
109   * 			free objects in addition to the regular freelist
110   * 			that requires the slab lock.
111   *
112   * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
113   * 			options set. This moves	slab handling out of
114   * 			the fast path and disables lockless freelists.
115   */
116  
117  static inline int kmem_cache_debug(struct kmem_cache *s)
118  {
119  #ifdef CONFIG_SLUB_DEBUG
120  	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
121  #else
122  	return 0;
123  #endif
124  }
125  
126  void *fixup_red_left(struct kmem_cache *s, void *p)
127  {
128  	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
129  		p += s->red_left_pad;
130  
131  	return p;
132  }
133  
134  static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
135  {
136  #ifdef CONFIG_SLUB_CPU_PARTIAL
137  	return !kmem_cache_debug(s);
138  #else
139  	return false;
140  #endif
141  }
142  
143  /*
144   * Issues still to be resolved:
145   *
146   * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
147   *
148   * - Variable sizing of the per node arrays
149   */
150  
151  /* Enable to test recovery from slab corruption on boot */
152  #undef SLUB_RESILIENCY_TEST
153  
154  /* Enable to log cmpxchg failures */
155  #undef SLUB_DEBUG_CMPXCHG
156  
157  /*
158   * Mininum number of partial slabs. These will be left on the partial
159   * lists even if they are empty. kmem_cache_shrink may reclaim them.
160   */
161  #define MIN_PARTIAL 5
162  
163  /*
164   * Maximum number of desirable partial slabs.
165   * The existence of more partial slabs makes kmem_cache_shrink
166   * sort the partial list by the number of objects in use.
167   */
168  #define MAX_PARTIAL 10
169  
170  #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
171  				SLAB_POISON | SLAB_STORE_USER)
172  
173  /*
174   * These debug flags cannot use CMPXCHG because there might be consistency
175   * issues when checking or reading debug information
176   */
177  #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
178  				SLAB_TRACE)
179  
180  
181  /*
182   * Debugging flags that require metadata to be stored in the slab.  These get
183   * disabled when slub_debug=O is used and a cache's min order increases with
184   * metadata.
185   */
186  #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
187  
188  #define OO_SHIFT	16
189  #define OO_MASK		((1 << OO_SHIFT) - 1)
190  #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
191  
192  /* Internal SLUB flags */
193  /* Poison object */
194  #define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
195  /* Use cmpxchg_double */
196  #define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
197  
198  /*
199   * Tracking user of a slab.
200   */
201  #define TRACK_ADDRS_COUNT 16
202  struct track {
203  	unsigned long addr;	/* Called from address */
204  #ifdef CONFIG_STACKTRACE
205  	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
206  #endif
207  	int cpu;		/* Was running on cpu */
208  	int pid;		/* Pid context */
209  	unsigned long when;	/* When did the operation occur */
210  };
211  
212  enum track_item { TRACK_ALLOC, TRACK_FREE };
213  
214  #ifdef CONFIG_SYSFS
215  static int sysfs_slab_add(struct kmem_cache *);
216  static int sysfs_slab_alias(struct kmem_cache *, const char *);
217  static void memcg_propagate_slab_attrs(struct kmem_cache *s);
218  static void sysfs_slab_remove(struct kmem_cache *s);
219  #else
220  static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
221  static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
222  							{ return 0; }
223  static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
224  static inline void sysfs_slab_remove(struct kmem_cache *s) { }
225  #endif
226  
227  static inline void stat(const struct kmem_cache *s, enum stat_item si)
228  {
229  #ifdef CONFIG_SLUB_STATS
230  	/*
231  	 * The rmw is racy on a preemptible kernel but this is acceptable, so
232  	 * avoid this_cpu_add()'s irq-disable overhead.
233  	 */
234  	raw_cpu_inc(s->cpu_slab->stat[si]);
235  #endif
236  }
237  
238  /********************************************************************
239   * 			Core slab cache functions
240   *******************************************************************/
241  
242  /*
243   * Returns freelist pointer (ptr). With hardening, this is obfuscated
244   * with an XOR of the address where the pointer is held and a per-cache
245   * random number.
246   */
247  static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
248  				 unsigned long ptr_addr)
249  {
250  #ifdef CONFIG_SLAB_FREELIST_HARDENED
251  	/*
252  	 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
253  	 * Normally, this doesn't cause any issues, as both set_freepointer()
254  	 * and get_freepointer() are called with a pointer with the same tag.
255  	 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
256  	 * example, when __free_slub() iterates over objects in a cache, it
257  	 * passes untagged pointers to check_object(). check_object() in turns
258  	 * calls get_freepointer() with an untagged pointer, which causes the
259  	 * freepointer to be restored incorrectly.
260  	 */
261  	return (void *)((unsigned long)ptr ^ s->random ^
262  			swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
263  #else
264  	return ptr;
265  #endif
266  }
267  
268  /* Returns the freelist pointer recorded at location ptr_addr. */
269  static inline void *freelist_dereference(const struct kmem_cache *s,
270  					 void *ptr_addr)
271  {
272  	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
273  			    (unsigned long)ptr_addr);
274  }
275  
276  static inline void *get_freepointer(struct kmem_cache *s, void *object)
277  {
278  	return freelist_dereference(s, object + s->offset);
279  }
280  
281  static void prefetch_freepointer(const struct kmem_cache *s, void *object)
282  {
283  	prefetch(object + s->offset);
284  }
285  
286  static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
287  {
288  	unsigned long freepointer_addr;
289  	void *p;
290  
291  	if (!debug_pagealloc_enabled_static())
292  		return get_freepointer(s, object);
293  
294  	freepointer_addr = (unsigned long)object + s->offset;
295  	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
296  	return freelist_ptr(s, p, freepointer_addr);
297  }
298  
299  static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
300  {
301  	unsigned long freeptr_addr = (unsigned long)object + s->offset;
302  
303  #ifdef CONFIG_SLAB_FREELIST_HARDENED
304  	BUG_ON(object == fp); /* naive detection of double free or corruption */
305  #endif
306  
307  	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
308  }
309  
310  /* Loop over all objects in a slab */
311  #define for_each_object(__p, __s, __addr, __objects) \
312  	for (__p = fixup_red_left(__s, __addr); \
313  		__p < (__addr) + (__objects) * (__s)->size; \
314  		__p += (__s)->size)
315  
316  /* Determine object index from a given position */
317  static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
318  {
319  	return (kasan_reset_tag(p) - addr) / s->size;
320  }
321  
322  static inline unsigned int order_objects(unsigned int order, unsigned int size)
323  {
324  	return ((unsigned int)PAGE_SIZE << order) / size;
325  }
326  
327  static inline struct kmem_cache_order_objects oo_make(unsigned int order,
328  		unsigned int size)
329  {
330  	struct kmem_cache_order_objects x = {
331  		(order << OO_SHIFT) + order_objects(order, size)
332  	};
333  
334  	return x;
335  }
336  
337  static inline unsigned int oo_order(struct kmem_cache_order_objects x)
338  {
339  	return x.x >> OO_SHIFT;
340  }
341  
342  static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
343  {
344  	return x.x & OO_MASK;
345  }
346  
347  /*
348   * Per slab locking using the pagelock
349   */
350  static __always_inline void slab_lock(struct page *page)
351  {
352  	VM_BUG_ON_PAGE(PageTail(page), page);
353  	bit_spin_lock(PG_locked, &page->flags);
354  }
355  
356  static __always_inline void slab_unlock(struct page *page)
357  {
358  	VM_BUG_ON_PAGE(PageTail(page), page);
359  	__bit_spin_unlock(PG_locked, &page->flags);
360  }
361  
362  /* Interrupts must be disabled (for the fallback code to work right) */
363  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
364  		void *freelist_old, unsigned long counters_old,
365  		void *freelist_new, unsigned long counters_new,
366  		const char *n)
367  {
368  	VM_BUG_ON(!irqs_disabled());
369  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
370      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
371  	if (s->flags & __CMPXCHG_DOUBLE) {
372  		if (cmpxchg_double(&page->freelist, &page->counters,
373  				   freelist_old, counters_old,
374  				   freelist_new, counters_new))
375  			return true;
376  	} else
377  #endif
378  	{
379  		slab_lock(page);
380  		if (page->freelist == freelist_old &&
381  					page->counters == counters_old) {
382  			page->freelist = freelist_new;
383  			page->counters = counters_new;
384  			slab_unlock(page);
385  			return true;
386  		}
387  		slab_unlock(page);
388  	}
389  
390  	cpu_relax();
391  	stat(s, CMPXCHG_DOUBLE_FAIL);
392  
393  #ifdef SLUB_DEBUG_CMPXCHG
394  	pr_info("%s %s: cmpxchg double redo ", n, s->name);
395  #endif
396  
397  	return false;
398  }
399  
400  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
401  		void *freelist_old, unsigned long counters_old,
402  		void *freelist_new, unsigned long counters_new,
403  		const char *n)
404  {
405  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
406      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
407  	if (s->flags & __CMPXCHG_DOUBLE) {
408  		if (cmpxchg_double(&page->freelist, &page->counters,
409  				   freelist_old, counters_old,
410  				   freelist_new, counters_new))
411  			return true;
412  	} else
413  #endif
414  	{
415  		unsigned long flags;
416  
417  		local_irq_save(flags);
418  		slab_lock(page);
419  		if (page->freelist == freelist_old &&
420  					page->counters == counters_old) {
421  			page->freelist = freelist_new;
422  			page->counters = counters_new;
423  			slab_unlock(page);
424  			local_irq_restore(flags);
425  			return true;
426  		}
427  		slab_unlock(page);
428  		local_irq_restore(flags);
429  	}
430  
431  	cpu_relax();
432  	stat(s, CMPXCHG_DOUBLE_FAIL);
433  
434  #ifdef SLUB_DEBUG_CMPXCHG
435  	pr_info("%s %s: cmpxchg double redo ", n, s->name);
436  #endif
437  
438  	return false;
439  }
440  
441  #ifdef CONFIG_SLUB_DEBUG
442  static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
443  static DEFINE_SPINLOCK(object_map_lock);
444  
445  /*
446   * Determine a map of object in use on a page.
447   *
448   * Node listlock must be held to guarantee that the page does
449   * not vanish from under us.
450   */
451  static unsigned long *get_map(struct kmem_cache *s, struct page *page)
452  	__acquires(&object_map_lock)
453  {
454  	void *p;
455  	void *addr = page_address(page);
456  
457  	VM_BUG_ON(!irqs_disabled());
458  
459  	spin_lock(&object_map_lock);
460  
461  	bitmap_zero(object_map, page->objects);
462  
463  	for (p = page->freelist; p; p = get_freepointer(s, p))
464  		set_bit(slab_index(p, s, addr), object_map);
465  
466  	return object_map;
467  }
468  
469  static void put_map(unsigned long *map) __releases(&object_map_lock)
470  {
471  	VM_BUG_ON(map != object_map);
472  	lockdep_assert_held(&object_map_lock);
473  
474  	spin_unlock(&object_map_lock);
475  }
476  
477  static inline unsigned int size_from_object(struct kmem_cache *s)
478  {
479  	if (s->flags & SLAB_RED_ZONE)
480  		return s->size - s->red_left_pad;
481  
482  	return s->size;
483  }
484  
485  static inline void *restore_red_left(struct kmem_cache *s, void *p)
486  {
487  	if (s->flags & SLAB_RED_ZONE)
488  		p -= s->red_left_pad;
489  
490  	return p;
491  }
492  
493  /*
494   * Debug settings:
495   */
496  #if defined(CONFIG_SLUB_DEBUG_ON)
497  static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
498  #else
499  static slab_flags_t slub_debug;
500  #endif
501  
502  static char *slub_debug_slabs;
503  static int disable_higher_order_debug;
504  
505  /*
506   * slub is about to manipulate internal object metadata.  This memory lies
507   * outside the range of the allocated object, so accessing it would normally
508   * be reported by kasan as a bounds error.  metadata_access_enable() is used
509   * to tell kasan that these accesses are OK.
510   */
511  static inline void metadata_access_enable(void)
512  {
513  	kasan_disable_current();
514  }
515  
516  static inline void metadata_access_disable(void)
517  {
518  	kasan_enable_current();
519  }
520  
521  /*
522   * Object debugging
523   */
524  
525  /* Verify that a pointer has an address that is valid within a slab page */
526  static inline int check_valid_pointer(struct kmem_cache *s,
527  				struct page *page, void *object)
528  {
529  	void *base;
530  
531  	if (!object)
532  		return 1;
533  
534  	base = page_address(page);
535  	object = kasan_reset_tag(object);
536  	object = restore_red_left(s, object);
537  	if (object < base || object >= base + page->objects * s->size ||
538  		(object - base) % s->size) {
539  		return 0;
540  	}
541  
542  	return 1;
543  }
544  
545  static void print_section(char *level, char *text, u8 *addr,
546  			  unsigned int length)
547  {
548  	metadata_access_enable();
549  	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
550  			length, 1);
551  	metadata_access_disable();
552  }
553  
554  /*
555   * See comment in calculate_sizes().
556   */
557  static inline bool freeptr_outside_object(struct kmem_cache *s)
558  {
559  	return s->offset >= s->inuse;
560  }
561  
562  /*
563   * Return offset of the end of info block which is inuse + free pointer if
564   * not overlapping with object.
565   */
566  static inline unsigned int get_info_end(struct kmem_cache *s)
567  {
568  	if (freeptr_outside_object(s))
569  		return s->inuse + sizeof(void *);
570  	else
571  		return s->inuse;
572  }
573  
574  static struct track *get_track(struct kmem_cache *s, void *object,
575  	enum track_item alloc)
576  {
577  	struct track *p;
578  
579  	p = object + get_info_end(s);
580  
581  	return p + alloc;
582  }
583  
584  static void set_track(struct kmem_cache *s, void *object,
585  			enum track_item alloc, unsigned long addr)
586  {
587  	struct track *p = get_track(s, object, alloc);
588  
589  	if (addr) {
590  #ifdef CONFIG_STACKTRACE
591  		unsigned int nr_entries;
592  
593  		metadata_access_enable();
594  		nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
595  		metadata_access_disable();
596  
597  		if (nr_entries < TRACK_ADDRS_COUNT)
598  			p->addrs[nr_entries] = 0;
599  #endif
600  		p->addr = addr;
601  		p->cpu = smp_processor_id();
602  		p->pid = current->pid;
603  		p->when = jiffies;
604  	} else {
605  		memset(p, 0, sizeof(struct track));
606  	}
607  }
608  
609  static void init_tracking(struct kmem_cache *s, void *object)
610  {
611  	if (!(s->flags & SLAB_STORE_USER))
612  		return;
613  
614  	set_track(s, object, TRACK_FREE, 0UL);
615  	set_track(s, object, TRACK_ALLOC, 0UL);
616  }
617  
618  static void print_track(const char *s, struct track *t, unsigned long pr_time)
619  {
620  	if (!t->addr)
621  		return;
622  
623  	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
624  	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
625  #ifdef CONFIG_STACKTRACE
626  	{
627  		int i;
628  		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
629  			if (t->addrs[i])
630  				pr_err("\t%pS\n", (void *)t->addrs[i]);
631  			else
632  				break;
633  	}
634  #endif
635  }
636  
637  static void print_tracking(struct kmem_cache *s, void *object)
638  {
639  	unsigned long pr_time = jiffies;
640  	if (!(s->flags & SLAB_STORE_USER))
641  		return;
642  
643  	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
644  	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
645  }
646  
647  static void print_page_info(struct page *page)
648  {
649  	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
650  	       page, page->objects, page->inuse, page->freelist, page->flags);
651  
652  }
653  
654  static void slab_bug(struct kmem_cache *s, char *fmt, ...)
655  {
656  	struct va_format vaf;
657  	va_list args;
658  
659  	va_start(args, fmt);
660  	vaf.fmt = fmt;
661  	vaf.va = &args;
662  	pr_err("=============================================================================\n");
663  	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
664  	pr_err("-----------------------------------------------------------------------------\n\n");
665  
666  	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
667  	va_end(args);
668  }
669  
670  static void slab_fix(struct kmem_cache *s, char *fmt, ...)
671  {
672  	struct va_format vaf;
673  	va_list args;
674  
675  	va_start(args, fmt);
676  	vaf.fmt = fmt;
677  	vaf.va = &args;
678  	pr_err("FIX %s: %pV\n", s->name, &vaf);
679  	va_end(args);
680  }
681  
682  static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
683  			       void *freelist, void *nextfree)
684  {
685  	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
686  	    !check_valid_pointer(s, page, nextfree)) {
687  		object_err(s, page, freelist, "Freechain corrupt");
688  		freelist = NULL;
689  		slab_fix(s, "Isolate corrupted freechain");
690  		return true;
691  	}
692  
693  	return false;
694  }
695  
696  static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
697  {
698  	unsigned int off;	/* Offset of last byte */
699  	u8 *addr = page_address(page);
700  
701  	print_tracking(s, p);
702  
703  	print_page_info(page);
704  
705  	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
706  	       p, p - addr, get_freepointer(s, p));
707  
708  	if (s->flags & SLAB_RED_ZONE)
709  		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
710  			      s->red_left_pad);
711  	else if (p > addr + 16)
712  		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
713  
714  	print_section(KERN_ERR, "Object ", p,
715  		      min_t(unsigned int, s->object_size, PAGE_SIZE));
716  	if (s->flags & SLAB_RED_ZONE)
717  		print_section(KERN_ERR, "Redzone ", p + s->object_size,
718  			s->inuse - s->object_size);
719  
720  	off = get_info_end(s);
721  
722  	if (s->flags & SLAB_STORE_USER)
723  		off += 2 * sizeof(struct track);
724  
725  	off += kasan_metadata_size(s);
726  
727  	if (off != size_from_object(s))
728  		/* Beginning of the filler is the free pointer */
729  		print_section(KERN_ERR, "Padding ", p + off,
730  			      size_from_object(s) - off);
731  
732  	dump_stack();
733  }
734  
735  void object_err(struct kmem_cache *s, struct page *page,
736  			u8 *object, char *reason)
737  {
738  	slab_bug(s, "%s", reason);
739  	print_trailer(s, page, object);
740  }
741  
742  static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
743  			const char *fmt, ...)
744  {
745  	va_list args;
746  	char buf[100];
747  
748  	va_start(args, fmt);
749  	vsnprintf(buf, sizeof(buf), fmt, args);
750  	va_end(args);
751  	slab_bug(s, "%s", buf);
752  	print_page_info(page);
753  	dump_stack();
754  }
755  
756  static void init_object(struct kmem_cache *s, void *object, u8 val)
757  {
758  	u8 *p = object;
759  
760  	if (s->flags & SLAB_RED_ZONE)
761  		memset(p - s->red_left_pad, val, s->red_left_pad);
762  
763  	if (s->flags & __OBJECT_POISON) {
764  		memset(p, POISON_FREE, s->object_size - 1);
765  		p[s->object_size - 1] = POISON_END;
766  	}
767  
768  	if (s->flags & SLAB_RED_ZONE)
769  		memset(p + s->object_size, val, s->inuse - s->object_size);
770  }
771  
772  static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
773  						void *from, void *to)
774  {
775  	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
776  	memset(from, data, to - from);
777  }
778  
779  static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
780  			u8 *object, char *what,
781  			u8 *start, unsigned int value, unsigned int bytes)
782  {
783  	u8 *fault;
784  	u8 *end;
785  	u8 *addr = page_address(page);
786  
787  	metadata_access_enable();
788  	fault = memchr_inv(start, value, bytes);
789  	metadata_access_disable();
790  	if (!fault)
791  		return 1;
792  
793  	end = start + bytes;
794  	while (end > fault && end[-1] == value)
795  		end--;
796  
797  	slab_bug(s, "%s overwritten", what);
798  	pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
799  					fault, end - 1, fault - addr,
800  					fault[0], value);
801  	print_trailer(s, page, object);
802  
803  	restore_bytes(s, what, value, fault, end);
804  	return 0;
805  }
806  
807  /*
808   * Object layout:
809   *
810   * object address
811   * 	Bytes of the object to be managed.
812   * 	If the freepointer may overlay the object then the free
813   *	pointer is at the middle of the object.
814   *
815   * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
816   * 	0xa5 (POISON_END)
817   *
818   * object + s->object_size
819   * 	Padding to reach word boundary. This is also used for Redzoning.
820   * 	Padding is extended by another word if Redzoning is enabled and
821   * 	object_size == inuse.
822   *
823   * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
824   * 	0xcc (RED_ACTIVE) for objects in use.
825   *
826   * object + s->inuse
827   * 	Meta data starts here.
828   *
829   * 	A. Free pointer (if we cannot overwrite object on free)
830   * 	B. Tracking data for SLAB_STORE_USER
831   * 	C. Padding to reach required alignment boundary or at mininum
832   * 		one word if debugging is on to be able to detect writes
833   * 		before the word boundary.
834   *
835   *	Padding is done using 0x5a (POISON_INUSE)
836   *
837   * object + s->size
838   * 	Nothing is used beyond s->size.
839   *
840   * If slabcaches are merged then the object_size and inuse boundaries are mostly
841   * ignored. And therefore no slab options that rely on these boundaries
842   * may be used with merged slabcaches.
843   */
844  
845  static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
846  {
847  	unsigned long off = get_info_end(s);	/* The end of info */
848  
849  	if (s->flags & SLAB_STORE_USER)
850  		/* We also have user information there */
851  		off += 2 * sizeof(struct track);
852  
853  	off += kasan_metadata_size(s);
854  
855  	if (size_from_object(s) == off)
856  		return 1;
857  
858  	return check_bytes_and_report(s, page, p, "Object padding",
859  			p + off, POISON_INUSE, size_from_object(s) - off);
860  }
861  
862  /* Check the pad bytes at the end of a slab page */
863  static int slab_pad_check(struct kmem_cache *s, struct page *page)
864  {
865  	u8 *start;
866  	u8 *fault;
867  	u8 *end;
868  	u8 *pad;
869  	int length;
870  	int remainder;
871  
872  	if (!(s->flags & SLAB_POISON))
873  		return 1;
874  
875  	start = page_address(page);
876  	length = page_size(page);
877  	end = start + length;
878  	remainder = length % s->size;
879  	if (!remainder)
880  		return 1;
881  
882  	pad = end - remainder;
883  	metadata_access_enable();
884  	fault = memchr_inv(pad, POISON_INUSE, remainder);
885  	metadata_access_disable();
886  	if (!fault)
887  		return 1;
888  	while (end > fault && end[-1] == POISON_INUSE)
889  		end--;
890  
891  	slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
892  			fault, end - 1, fault - start);
893  	print_section(KERN_ERR, "Padding ", pad, remainder);
894  
895  	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
896  	return 0;
897  }
898  
899  static int check_object(struct kmem_cache *s, struct page *page,
900  					void *object, u8 val)
901  {
902  	u8 *p = object;
903  	u8 *endobject = object + s->object_size;
904  
905  	if (s->flags & SLAB_RED_ZONE) {
906  		if (!check_bytes_and_report(s, page, object, "Redzone",
907  			object - s->red_left_pad, val, s->red_left_pad))
908  			return 0;
909  
910  		if (!check_bytes_and_report(s, page, object, "Redzone",
911  			endobject, val, s->inuse - s->object_size))
912  			return 0;
913  	} else {
914  		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
915  			check_bytes_and_report(s, page, p, "Alignment padding",
916  				endobject, POISON_INUSE,
917  				s->inuse - s->object_size);
918  		}
919  	}
920  
921  	if (s->flags & SLAB_POISON) {
922  		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
923  			(!check_bytes_and_report(s, page, p, "Poison", p,
924  					POISON_FREE, s->object_size - 1) ||
925  			 !check_bytes_and_report(s, page, p, "Poison",
926  				p + s->object_size - 1, POISON_END, 1)))
927  			return 0;
928  		/*
929  		 * check_pad_bytes cleans up on its own.
930  		 */
931  		check_pad_bytes(s, page, p);
932  	}
933  
934  	if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
935  		/*
936  		 * Object and freepointer overlap. Cannot check
937  		 * freepointer while object is allocated.
938  		 */
939  		return 1;
940  
941  	/* Check free pointer validity */
942  	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
943  		object_err(s, page, p, "Freepointer corrupt");
944  		/*
945  		 * No choice but to zap it and thus lose the remainder
946  		 * of the free objects in this slab. May cause
947  		 * another error because the object count is now wrong.
948  		 */
949  		set_freepointer(s, p, NULL);
950  		return 0;
951  	}
952  	return 1;
953  }
954  
955  static int check_slab(struct kmem_cache *s, struct page *page)
956  {
957  	int maxobj;
958  
959  	VM_BUG_ON(!irqs_disabled());
960  
961  	if (!PageSlab(page)) {
962  		slab_err(s, page, "Not a valid slab page");
963  		return 0;
964  	}
965  
966  	maxobj = order_objects(compound_order(page), s->size);
967  	if (page->objects > maxobj) {
968  		slab_err(s, page, "objects %u > max %u",
969  			page->objects, maxobj);
970  		return 0;
971  	}
972  	if (page->inuse > page->objects) {
973  		slab_err(s, page, "inuse %u > max %u",
974  			page->inuse, page->objects);
975  		return 0;
976  	}
977  	/* Slab_pad_check fixes things up after itself */
978  	slab_pad_check(s, page);
979  	return 1;
980  }
981  
982  /*
983   * Determine if a certain object on a page is on the freelist. Must hold the
984   * slab lock to guarantee that the chains are in a consistent state.
985   */
986  static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
987  {
988  	int nr = 0;
989  	void *fp;
990  	void *object = NULL;
991  	int max_objects;
992  
993  	fp = page->freelist;
994  	while (fp && nr <= page->objects) {
995  		if (fp == search)
996  			return 1;
997  		if (!check_valid_pointer(s, page, fp)) {
998  			if (object) {
999  				object_err(s, page, object,
1000  					"Freechain corrupt");
1001  				set_freepointer(s, object, NULL);
1002  			} else {
1003  				slab_err(s, page, "Freepointer corrupt");
1004  				page->freelist = NULL;
1005  				page->inuse = page->objects;
1006  				slab_fix(s, "Freelist cleared");
1007  				return 0;
1008  			}
1009  			break;
1010  		}
1011  		object = fp;
1012  		fp = get_freepointer(s, object);
1013  		nr++;
1014  	}
1015  
1016  	max_objects = order_objects(compound_order(page), s->size);
1017  	if (max_objects > MAX_OBJS_PER_PAGE)
1018  		max_objects = MAX_OBJS_PER_PAGE;
1019  
1020  	if (page->objects != max_objects) {
1021  		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1022  			 page->objects, max_objects);
1023  		page->objects = max_objects;
1024  		slab_fix(s, "Number of objects adjusted.");
1025  	}
1026  	if (page->inuse != page->objects - nr) {
1027  		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1028  			 page->inuse, page->objects - nr);
1029  		page->inuse = page->objects - nr;
1030  		slab_fix(s, "Object count adjusted.");
1031  	}
1032  	return search == NULL;
1033  }
1034  
1035  static void trace(struct kmem_cache *s, struct page *page, void *object,
1036  								int alloc)
1037  {
1038  	if (s->flags & SLAB_TRACE) {
1039  		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1040  			s->name,
1041  			alloc ? "alloc" : "free",
1042  			object, page->inuse,
1043  			page->freelist);
1044  
1045  		if (!alloc)
1046  			print_section(KERN_INFO, "Object ", (void *)object,
1047  					s->object_size);
1048  
1049  		dump_stack();
1050  	}
1051  }
1052  
1053  /*
1054   * Tracking of fully allocated slabs for debugging purposes.
1055   */
1056  static void add_full(struct kmem_cache *s,
1057  	struct kmem_cache_node *n, struct page *page)
1058  {
1059  	if (!(s->flags & SLAB_STORE_USER))
1060  		return;
1061  
1062  	lockdep_assert_held(&n->list_lock);
1063  	list_add(&page->slab_list, &n->full);
1064  }
1065  
1066  static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1067  {
1068  	if (!(s->flags & SLAB_STORE_USER))
1069  		return;
1070  
1071  	lockdep_assert_held(&n->list_lock);
1072  	list_del(&page->slab_list);
1073  }
1074  
1075  /* Tracking of the number of slabs for debugging purposes */
1076  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1077  {
1078  	struct kmem_cache_node *n = get_node(s, node);
1079  
1080  	return atomic_long_read(&n->nr_slabs);
1081  }
1082  
1083  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1084  {
1085  	return atomic_long_read(&n->nr_slabs);
1086  }
1087  
1088  static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1089  {
1090  	struct kmem_cache_node *n = get_node(s, node);
1091  
1092  	/*
1093  	 * May be called early in order to allocate a slab for the
1094  	 * kmem_cache_node structure. Solve the chicken-egg
1095  	 * dilemma by deferring the increment of the count during
1096  	 * bootstrap (see early_kmem_cache_node_alloc).
1097  	 */
1098  	if (likely(n)) {
1099  		atomic_long_inc(&n->nr_slabs);
1100  		atomic_long_add(objects, &n->total_objects);
1101  	}
1102  }
1103  static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1104  {
1105  	struct kmem_cache_node *n = get_node(s, node);
1106  
1107  	atomic_long_dec(&n->nr_slabs);
1108  	atomic_long_sub(objects, &n->total_objects);
1109  }
1110  
1111  /* Object debug checks for alloc/free paths */
1112  static void setup_object_debug(struct kmem_cache *s, struct page *page,
1113  								void *object)
1114  {
1115  	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1116  		return;
1117  
1118  	init_object(s, object, SLUB_RED_INACTIVE);
1119  	init_tracking(s, object);
1120  }
1121  
1122  static
1123  void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1124  {
1125  	if (!(s->flags & SLAB_POISON))
1126  		return;
1127  
1128  	metadata_access_enable();
1129  	memset(addr, POISON_INUSE, page_size(page));
1130  	metadata_access_disable();
1131  }
1132  
1133  static inline int alloc_consistency_checks(struct kmem_cache *s,
1134  					struct page *page, void *object)
1135  {
1136  	if (!check_slab(s, page))
1137  		return 0;
1138  
1139  	if (!check_valid_pointer(s, page, object)) {
1140  		object_err(s, page, object, "Freelist Pointer check fails");
1141  		return 0;
1142  	}
1143  
1144  	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1145  		return 0;
1146  
1147  	return 1;
1148  }
1149  
1150  static noinline int alloc_debug_processing(struct kmem_cache *s,
1151  					struct page *page,
1152  					void *object, unsigned long addr)
1153  {
1154  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1155  		if (!alloc_consistency_checks(s, page, object))
1156  			goto bad;
1157  	}
1158  
1159  	/* Success perform special debug activities for allocs */
1160  	if (s->flags & SLAB_STORE_USER)
1161  		set_track(s, object, TRACK_ALLOC, addr);
1162  	trace(s, page, object, 1);
1163  	init_object(s, object, SLUB_RED_ACTIVE);
1164  	return 1;
1165  
1166  bad:
1167  	if (PageSlab(page)) {
1168  		/*
1169  		 * If this is a slab page then lets do the best we can
1170  		 * to avoid issues in the future. Marking all objects
1171  		 * as used avoids touching the remaining objects.
1172  		 */
1173  		slab_fix(s, "Marking all objects used");
1174  		page->inuse = page->objects;
1175  		page->freelist = NULL;
1176  	}
1177  	return 0;
1178  }
1179  
1180  static inline int free_consistency_checks(struct kmem_cache *s,
1181  		struct page *page, void *object, unsigned long addr)
1182  {
1183  	if (!check_valid_pointer(s, page, object)) {
1184  		slab_err(s, page, "Invalid object pointer 0x%p", object);
1185  		return 0;
1186  	}
1187  
1188  	if (on_freelist(s, page, object)) {
1189  		object_err(s, page, object, "Object already free");
1190  		return 0;
1191  	}
1192  
1193  	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1194  		return 0;
1195  
1196  	if (unlikely(s != page->slab_cache)) {
1197  		if (!PageSlab(page)) {
1198  			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1199  				 object);
1200  		} else if (!page->slab_cache) {
1201  			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1202  			       object);
1203  			dump_stack();
1204  		} else
1205  			object_err(s, page, object,
1206  					"page slab pointer corrupt.");
1207  		return 0;
1208  	}
1209  	return 1;
1210  }
1211  
1212  /* Supports checking bulk free of a constructed freelist */
1213  static noinline int free_debug_processing(
1214  	struct kmem_cache *s, struct page *page,
1215  	void *head, void *tail, int bulk_cnt,
1216  	unsigned long addr)
1217  {
1218  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1219  	void *object = head;
1220  	int cnt = 0;
1221  	unsigned long uninitialized_var(flags);
1222  	int ret = 0;
1223  
1224  	spin_lock_irqsave(&n->list_lock, flags);
1225  	slab_lock(page);
1226  
1227  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1228  		if (!check_slab(s, page))
1229  			goto out;
1230  	}
1231  
1232  next_object:
1233  	cnt++;
1234  
1235  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1236  		if (!free_consistency_checks(s, page, object, addr))
1237  			goto out;
1238  	}
1239  
1240  	if (s->flags & SLAB_STORE_USER)
1241  		set_track(s, object, TRACK_FREE, addr);
1242  	trace(s, page, object, 0);
1243  	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1244  	init_object(s, object, SLUB_RED_INACTIVE);
1245  
1246  	/* Reached end of constructed freelist yet? */
1247  	if (object != tail) {
1248  		object = get_freepointer(s, object);
1249  		goto next_object;
1250  	}
1251  	ret = 1;
1252  
1253  out:
1254  	if (cnt != bulk_cnt)
1255  		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1256  			 bulk_cnt, cnt);
1257  
1258  	slab_unlock(page);
1259  	spin_unlock_irqrestore(&n->list_lock, flags);
1260  	if (!ret)
1261  		slab_fix(s, "Object at 0x%p not freed", object);
1262  	return ret;
1263  }
1264  
1265  static int __init setup_slub_debug(char *str)
1266  {
1267  	slub_debug = DEBUG_DEFAULT_FLAGS;
1268  	if (*str++ != '=' || !*str)
1269  		/*
1270  		 * No options specified. Switch on full debugging.
1271  		 */
1272  		goto out;
1273  
1274  	if (*str == ',')
1275  		/*
1276  		 * No options but restriction on slabs. This means full
1277  		 * debugging for slabs matching a pattern.
1278  		 */
1279  		goto check_slabs;
1280  
1281  	slub_debug = 0;
1282  	if (*str == '-')
1283  		/*
1284  		 * Switch off all debugging measures.
1285  		 */
1286  		goto out;
1287  
1288  	/*
1289  	 * Determine which debug features should be switched on
1290  	 */
1291  	for (; *str && *str != ','; str++) {
1292  		switch (tolower(*str)) {
1293  		case 'f':
1294  			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1295  			break;
1296  		case 'z':
1297  			slub_debug |= SLAB_RED_ZONE;
1298  			break;
1299  		case 'p':
1300  			slub_debug |= SLAB_POISON;
1301  			break;
1302  		case 'u':
1303  			slub_debug |= SLAB_STORE_USER;
1304  			break;
1305  		case 't':
1306  			slub_debug |= SLAB_TRACE;
1307  			break;
1308  		case 'a':
1309  			slub_debug |= SLAB_FAILSLAB;
1310  			break;
1311  		case 'o':
1312  			/*
1313  			 * Avoid enabling debugging on caches if its minimum
1314  			 * order would increase as a result.
1315  			 */
1316  			disable_higher_order_debug = 1;
1317  			break;
1318  		default:
1319  			pr_err("slub_debug option '%c' unknown. skipped\n",
1320  			       *str);
1321  		}
1322  	}
1323  
1324  check_slabs:
1325  	if (*str == ',')
1326  		slub_debug_slabs = str + 1;
1327  out:
1328  	if ((static_branch_unlikely(&init_on_alloc) ||
1329  	     static_branch_unlikely(&init_on_free)) &&
1330  	    (slub_debug & SLAB_POISON))
1331  		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1332  	return 1;
1333  }
1334  
1335  __setup("slub_debug", setup_slub_debug);
1336  
1337  /*
1338   * kmem_cache_flags - apply debugging options to the cache
1339   * @object_size:	the size of an object without meta data
1340   * @flags:		flags to set
1341   * @name:		name of the cache
1342   * @ctor:		constructor function
1343   *
1344   * Debug option(s) are applied to @flags. In addition to the debug
1345   * option(s), if a slab name (or multiple) is specified i.e.
1346   * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1347   * then only the select slabs will receive the debug option(s).
1348   */
1349  slab_flags_t kmem_cache_flags(unsigned int object_size,
1350  	slab_flags_t flags, const char *name,
1351  	void (*ctor)(void *))
1352  {
1353  	char *iter;
1354  	size_t len;
1355  
1356  	/* If slub_debug = 0, it folds into the if conditional. */
1357  	if (!slub_debug_slabs)
1358  		return flags | slub_debug;
1359  
1360  	len = strlen(name);
1361  	iter = slub_debug_slabs;
1362  	while (*iter) {
1363  		char *end, *glob;
1364  		size_t cmplen;
1365  
1366  		end = strchrnul(iter, ',');
1367  
1368  		glob = strnchr(iter, end - iter, '*');
1369  		if (glob)
1370  			cmplen = glob - iter;
1371  		else
1372  			cmplen = max_t(size_t, len, (end - iter));
1373  
1374  		if (!strncmp(name, iter, cmplen)) {
1375  			flags |= slub_debug;
1376  			break;
1377  		}
1378  
1379  		if (!*end)
1380  			break;
1381  		iter = end + 1;
1382  	}
1383  
1384  	return flags;
1385  }
1386  #else /* !CONFIG_SLUB_DEBUG */
1387  static inline void setup_object_debug(struct kmem_cache *s,
1388  			struct page *page, void *object) {}
1389  static inline
1390  void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1391  
1392  static inline int alloc_debug_processing(struct kmem_cache *s,
1393  	struct page *page, void *object, unsigned long addr) { return 0; }
1394  
1395  static inline int free_debug_processing(
1396  	struct kmem_cache *s, struct page *page,
1397  	void *head, void *tail, int bulk_cnt,
1398  	unsigned long addr) { return 0; }
1399  
1400  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1401  			{ return 1; }
1402  static inline int check_object(struct kmem_cache *s, struct page *page,
1403  			void *object, u8 val) { return 1; }
1404  static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1405  					struct page *page) {}
1406  static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1407  					struct page *page) {}
1408  slab_flags_t kmem_cache_flags(unsigned int object_size,
1409  	slab_flags_t flags, const char *name,
1410  	void (*ctor)(void *))
1411  {
1412  	return flags;
1413  }
1414  #define slub_debug 0
1415  
1416  #define disable_higher_order_debug 0
1417  
1418  static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1419  							{ return 0; }
1420  static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1421  							{ return 0; }
1422  static inline void inc_slabs_node(struct kmem_cache *s, int node,
1423  							int objects) {}
1424  static inline void dec_slabs_node(struct kmem_cache *s, int node,
1425  							int objects) {}
1426  
1427  static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1428  			       void *freelist, void *nextfree)
1429  {
1430  	return false;
1431  }
1432  #endif /* CONFIG_SLUB_DEBUG */
1433  
1434  /*
1435   * Hooks for other subsystems that check memory allocations. In a typical
1436   * production configuration these hooks all should produce no code at all.
1437   */
1438  static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1439  {
1440  	ptr = kasan_kmalloc_large(ptr, size, flags);
1441  	/* As ptr might get tagged, call kmemleak hook after KASAN. */
1442  	kmemleak_alloc(ptr, size, 1, flags);
1443  	return ptr;
1444  }
1445  
1446  static __always_inline void kfree_hook(void *x)
1447  {
1448  	kmemleak_free(x);
1449  	kasan_kfree_large(x, _RET_IP_);
1450  }
1451  
1452  static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1453  {
1454  	kmemleak_free_recursive(x, s->flags);
1455  
1456  	/*
1457  	 * Trouble is that we may no longer disable interrupts in the fast path
1458  	 * So in order to make the debug calls that expect irqs to be
1459  	 * disabled we need to disable interrupts temporarily.
1460  	 */
1461  #ifdef CONFIG_LOCKDEP
1462  	{
1463  		unsigned long flags;
1464  
1465  		local_irq_save(flags);
1466  		debug_check_no_locks_freed(x, s->object_size);
1467  		local_irq_restore(flags);
1468  	}
1469  #endif
1470  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1471  		debug_check_no_obj_freed(x, s->object_size);
1472  
1473  	/* KASAN might put x into memory quarantine, delaying its reuse */
1474  	return kasan_slab_free(s, x, _RET_IP_);
1475  }
1476  
1477  static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1478  					   void **head, void **tail)
1479  {
1480  
1481  	void *object;
1482  	void *next = *head;
1483  	void *old_tail = *tail ? *tail : *head;
1484  	int rsize;
1485  
1486  	/* Head and tail of the reconstructed freelist */
1487  	*head = NULL;
1488  	*tail = NULL;
1489  
1490  	do {
1491  		object = next;
1492  		next = get_freepointer(s, object);
1493  
1494  		if (slab_want_init_on_free(s)) {
1495  			/*
1496  			 * Clear the object and the metadata, but don't touch
1497  			 * the redzone.
1498  			 */
1499  			memset(object, 0, s->object_size);
1500  			rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
1501  							   : 0;
1502  			memset((char *)object + s->inuse, 0,
1503  			       s->size - s->inuse - rsize);
1504  
1505  		}
1506  		/* If object's reuse doesn't have to be delayed */
1507  		if (!slab_free_hook(s, object)) {
1508  			/* Move object to the new freelist */
1509  			set_freepointer(s, object, *head);
1510  			*head = object;
1511  			if (!*tail)
1512  				*tail = object;
1513  		}
1514  	} while (object != old_tail);
1515  
1516  	if (*head == *tail)
1517  		*tail = NULL;
1518  
1519  	return *head != NULL;
1520  }
1521  
1522  static void *setup_object(struct kmem_cache *s, struct page *page,
1523  				void *object)
1524  {
1525  	setup_object_debug(s, page, object);
1526  	object = kasan_init_slab_obj(s, object);
1527  	if (unlikely(s->ctor)) {
1528  		kasan_unpoison_object_data(s, object);
1529  		s->ctor(object);
1530  		kasan_poison_object_data(s, object);
1531  	}
1532  	return object;
1533  }
1534  
1535  /*
1536   * Slab allocation and freeing
1537   */
1538  static inline struct page *alloc_slab_page(struct kmem_cache *s,
1539  		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1540  {
1541  	struct page *page;
1542  	unsigned int order = oo_order(oo);
1543  
1544  	if (node == NUMA_NO_NODE)
1545  		page = alloc_pages(flags, order);
1546  	else
1547  		page = __alloc_pages_node(node, flags, order);
1548  
1549  	if (page && charge_slab_page(page, flags, order, s)) {
1550  		__free_pages(page, order);
1551  		page = NULL;
1552  	}
1553  
1554  	return page;
1555  }
1556  
1557  #ifdef CONFIG_SLAB_FREELIST_RANDOM
1558  /* Pre-initialize the random sequence cache */
1559  static int init_cache_random_seq(struct kmem_cache *s)
1560  {
1561  	unsigned int count = oo_objects(s->oo);
1562  	int err;
1563  
1564  	/* Bailout if already initialised */
1565  	if (s->random_seq)
1566  		return 0;
1567  
1568  	err = cache_random_seq_create(s, count, GFP_KERNEL);
1569  	if (err) {
1570  		pr_err("SLUB: Unable to initialize free list for %s\n",
1571  			s->name);
1572  		return err;
1573  	}
1574  
1575  	/* Transform to an offset on the set of pages */
1576  	if (s->random_seq) {
1577  		unsigned int i;
1578  
1579  		for (i = 0; i < count; i++)
1580  			s->random_seq[i] *= s->size;
1581  	}
1582  	return 0;
1583  }
1584  
1585  /* Initialize each random sequence freelist per cache */
1586  static void __init init_freelist_randomization(void)
1587  {
1588  	struct kmem_cache *s;
1589  
1590  	mutex_lock(&slab_mutex);
1591  
1592  	list_for_each_entry(s, &slab_caches, list)
1593  		init_cache_random_seq(s);
1594  
1595  	mutex_unlock(&slab_mutex);
1596  }
1597  
1598  /* Get the next entry on the pre-computed freelist randomized */
1599  static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1600  				unsigned long *pos, void *start,
1601  				unsigned long page_limit,
1602  				unsigned long freelist_count)
1603  {
1604  	unsigned int idx;
1605  
1606  	/*
1607  	 * If the target page allocation failed, the number of objects on the
1608  	 * page might be smaller than the usual size defined by the cache.
1609  	 */
1610  	do {
1611  		idx = s->random_seq[*pos];
1612  		*pos += 1;
1613  		if (*pos >= freelist_count)
1614  			*pos = 0;
1615  	} while (unlikely(idx >= page_limit));
1616  
1617  	return (char *)start + idx;
1618  }
1619  
1620  /* Shuffle the single linked freelist based on a random pre-computed sequence */
1621  static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1622  {
1623  	void *start;
1624  	void *cur;
1625  	void *next;
1626  	unsigned long idx, pos, page_limit, freelist_count;
1627  
1628  	if (page->objects < 2 || !s->random_seq)
1629  		return false;
1630  
1631  	freelist_count = oo_objects(s->oo);
1632  	pos = get_random_int() % freelist_count;
1633  
1634  	page_limit = page->objects * s->size;
1635  	start = fixup_red_left(s, page_address(page));
1636  
1637  	/* First entry is used as the base of the freelist */
1638  	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1639  				freelist_count);
1640  	cur = setup_object(s, page, cur);
1641  	page->freelist = cur;
1642  
1643  	for (idx = 1; idx < page->objects; idx++) {
1644  		next = next_freelist_entry(s, page, &pos, start, page_limit,
1645  			freelist_count);
1646  		next = setup_object(s, page, next);
1647  		set_freepointer(s, cur, next);
1648  		cur = next;
1649  	}
1650  	set_freepointer(s, cur, NULL);
1651  
1652  	return true;
1653  }
1654  #else
1655  static inline int init_cache_random_seq(struct kmem_cache *s)
1656  {
1657  	return 0;
1658  }
1659  static inline void init_freelist_randomization(void) { }
1660  static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1661  {
1662  	return false;
1663  }
1664  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1665  
1666  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1667  {
1668  	struct page *page;
1669  	struct kmem_cache_order_objects oo = s->oo;
1670  	gfp_t alloc_gfp;
1671  	void *start, *p, *next;
1672  	int idx;
1673  	bool shuffle;
1674  
1675  	flags &= gfp_allowed_mask;
1676  
1677  	if (gfpflags_allow_blocking(flags))
1678  		local_irq_enable();
1679  
1680  	flags |= s->allocflags;
1681  
1682  	/*
1683  	 * Let the initial higher-order allocation fail under memory pressure
1684  	 * so we fall-back to the minimum order allocation.
1685  	 */
1686  	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1687  	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1688  		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1689  
1690  	page = alloc_slab_page(s, alloc_gfp, node, oo);
1691  	if (unlikely(!page)) {
1692  		oo = s->min;
1693  		alloc_gfp = flags;
1694  		/*
1695  		 * Allocation may have failed due to fragmentation.
1696  		 * Try a lower order alloc if possible
1697  		 */
1698  		page = alloc_slab_page(s, alloc_gfp, node, oo);
1699  		if (unlikely(!page))
1700  			goto out;
1701  		stat(s, ORDER_FALLBACK);
1702  	}
1703  
1704  	page->objects = oo_objects(oo);
1705  
1706  	page->slab_cache = s;
1707  	__SetPageSlab(page);
1708  	if (page_is_pfmemalloc(page))
1709  		SetPageSlabPfmemalloc(page);
1710  
1711  	kasan_poison_slab(page);
1712  
1713  	start = page_address(page);
1714  
1715  	setup_page_debug(s, page, start);
1716  
1717  	shuffle = shuffle_freelist(s, page);
1718  
1719  	if (!shuffle) {
1720  		start = fixup_red_left(s, start);
1721  		start = setup_object(s, page, start);
1722  		page->freelist = start;
1723  		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1724  			next = p + s->size;
1725  			next = setup_object(s, page, next);
1726  			set_freepointer(s, p, next);
1727  			p = next;
1728  		}
1729  		set_freepointer(s, p, NULL);
1730  	}
1731  
1732  	page->inuse = page->objects;
1733  	page->frozen = 1;
1734  
1735  out:
1736  	if (gfpflags_allow_blocking(flags))
1737  		local_irq_disable();
1738  	if (!page)
1739  		return NULL;
1740  
1741  	inc_slabs_node(s, page_to_nid(page), page->objects);
1742  
1743  	return page;
1744  }
1745  
1746  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1747  {
1748  	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1749  		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1750  		flags &= ~GFP_SLAB_BUG_MASK;
1751  		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1752  				invalid_mask, &invalid_mask, flags, &flags);
1753  		dump_stack();
1754  	}
1755  
1756  	return allocate_slab(s,
1757  		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1758  }
1759  
1760  static void __free_slab(struct kmem_cache *s, struct page *page)
1761  {
1762  	int order = compound_order(page);
1763  	int pages = 1 << order;
1764  
1765  	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1766  		void *p;
1767  
1768  		slab_pad_check(s, page);
1769  		for_each_object(p, s, page_address(page),
1770  						page->objects)
1771  			check_object(s, page, p, SLUB_RED_INACTIVE);
1772  	}
1773  
1774  	__ClearPageSlabPfmemalloc(page);
1775  	__ClearPageSlab(page);
1776  
1777  	page->mapping = NULL;
1778  	if (current->reclaim_state)
1779  		current->reclaim_state->reclaimed_slab += pages;
1780  	uncharge_slab_page(page, order, s);
1781  	__free_pages(page, order);
1782  }
1783  
1784  static void rcu_free_slab(struct rcu_head *h)
1785  {
1786  	struct page *page = container_of(h, struct page, rcu_head);
1787  
1788  	__free_slab(page->slab_cache, page);
1789  }
1790  
1791  static void free_slab(struct kmem_cache *s, struct page *page)
1792  {
1793  	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1794  		call_rcu(&page->rcu_head, rcu_free_slab);
1795  	} else
1796  		__free_slab(s, page);
1797  }
1798  
1799  static void discard_slab(struct kmem_cache *s, struct page *page)
1800  {
1801  	dec_slabs_node(s, page_to_nid(page), page->objects);
1802  	free_slab(s, page);
1803  }
1804  
1805  /*
1806   * Management of partially allocated slabs.
1807   */
1808  static inline void
1809  __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1810  {
1811  	n->nr_partial++;
1812  	if (tail == DEACTIVATE_TO_TAIL)
1813  		list_add_tail(&page->slab_list, &n->partial);
1814  	else
1815  		list_add(&page->slab_list, &n->partial);
1816  }
1817  
1818  static inline void add_partial(struct kmem_cache_node *n,
1819  				struct page *page, int tail)
1820  {
1821  	lockdep_assert_held(&n->list_lock);
1822  	__add_partial(n, page, tail);
1823  }
1824  
1825  static inline void remove_partial(struct kmem_cache_node *n,
1826  					struct page *page)
1827  {
1828  	lockdep_assert_held(&n->list_lock);
1829  	list_del(&page->slab_list);
1830  	n->nr_partial--;
1831  }
1832  
1833  /*
1834   * Remove slab from the partial list, freeze it and
1835   * return the pointer to the freelist.
1836   *
1837   * Returns a list of objects or NULL if it fails.
1838   */
1839  static inline void *acquire_slab(struct kmem_cache *s,
1840  		struct kmem_cache_node *n, struct page *page,
1841  		int mode, int *objects)
1842  {
1843  	void *freelist;
1844  	unsigned long counters;
1845  	struct page new;
1846  
1847  	lockdep_assert_held(&n->list_lock);
1848  
1849  	/*
1850  	 * Zap the freelist and set the frozen bit.
1851  	 * The old freelist is the list of objects for the
1852  	 * per cpu allocation list.
1853  	 */
1854  	freelist = page->freelist;
1855  	counters = page->counters;
1856  	new.counters = counters;
1857  	*objects = new.objects - new.inuse;
1858  	if (mode) {
1859  		new.inuse = page->objects;
1860  		new.freelist = NULL;
1861  	} else {
1862  		new.freelist = freelist;
1863  	}
1864  
1865  	VM_BUG_ON(new.frozen);
1866  	new.frozen = 1;
1867  
1868  	if (!__cmpxchg_double_slab(s, page,
1869  			freelist, counters,
1870  			new.freelist, new.counters,
1871  			"acquire_slab"))
1872  		return NULL;
1873  
1874  	remove_partial(n, page);
1875  	WARN_ON(!freelist);
1876  	return freelist;
1877  }
1878  
1879  static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1880  static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1881  
1882  /*
1883   * Try to allocate a partial slab from a specific node.
1884   */
1885  static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1886  				struct kmem_cache_cpu *c, gfp_t flags)
1887  {
1888  	struct page *page, *page2;
1889  	void *object = NULL;
1890  	unsigned int available = 0;
1891  	int objects;
1892  
1893  	/*
1894  	 * Racy check. If we mistakenly see no partial slabs then we
1895  	 * just allocate an empty slab. If we mistakenly try to get a
1896  	 * partial slab and there is none available then get_partials()
1897  	 * will return NULL.
1898  	 */
1899  	if (!n || !n->nr_partial)
1900  		return NULL;
1901  
1902  	spin_lock(&n->list_lock);
1903  	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
1904  		void *t;
1905  
1906  		if (!pfmemalloc_match(page, flags))
1907  			continue;
1908  
1909  		t = acquire_slab(s, n, page, object == NULL, &objects);
1910  		if (!t)
1911  			break;
1912  
1913  		available += objects;
1914  		if (!object) {
1915  			c->page = page;
1916  			stat(s, ALLOC_FROM_PARTIAL);
1917  			object = t;
1918  		} else {
1919  			put_cpu_partial(s, page, 0);
1920  			stat(s, CPU_PARTIAL_NODE);
1921  		}
1922  		if (!kmem_cache_has_cpu_partial(s)
1923  			|| available > slub_cpu_partial(s) / 2)
1924  			break;
1925  
1926  	}
1927  	spin_unlock(&n->list_lock);
1928  	return object;
1929  }
1930  
1931  /*
1932   * Get a page from somewhere. Search in increasing NUMA distances.
1933   */
1934  static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1935  		struct kmem_cache_cpu *c)
1936  {
1937  #ifdef CONFIG_NUMA
1938  	struct zonelist *zonelist;
1939  	struct zoneref *z;
1940  	struct zone *zone;
1941  	enum zone_type highest_zoneidx = gfp_zone(flags);
1942  	void *object;
1943  	unsigned int cpuset_mems_cookie;
1944  
1945  	/*
1946  	 * The defrag ratio allows a configuration of the tradeoffs between
1947  	 * inter node defragmentation and node local allocations. A lower
1948  	 * defrag_ratio increases the tendency to do local allocations
1949  	 * instead of attempting to obtain partial slabs from other nodes.
1950  	 *
1951  	 * If the defrag_ratio is set to 0 then kmalloc() always
1952  	 * returns node local objects. If the ratio is higher then kmalloc()
1953  	 * may return off node objects because partial slabs are obtained
1954  	 * from other nodes and filled up.
1955  	 *
1956  	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1957  	 * (which makes defrag_ratio = 1000) then every (well almost)
1958  	 * allocation will first attempt to defrag slab caches on other nodes.
1959  	 * This means scanning over all nodes to look for partial slabs which
1960  	 * may be expensive if we do it every time we are trying to find a slab
1961  	 * with available objects.
1962  	 */
1963  	if (!s->remote_node_defrag_ratio ||
1964  			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1965  		return NULL;
1966  
1967  	do {
1968  		cpuset_mems_cookie = read_mems_allowed_begin();
1969  		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1970  		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
1971  			struct kmem_cache_node *n;
1972  
1973  			n = get_node(s, zone_to_nid(zone));
1974  
1975  			if (n && cpuset_zone_allowed(zone, flags) &&
1976  					n->nr_partial > s->min_partial) {
1977  				object = get_partial_node(s, n, c, flags);
1978  				if (object) {
1979  					/*
1980  					 * Don't check read_mems_allowed_retry()
1981  					 * here - if mems_allowed was updated in
1982  					 * parallel, that was a harmless race
1983  					 * between allocation and the cpuset
1984  					 * update
1985  					 */
1986  					return object;
1987  				}
1988  			}
1989  		}
1990  	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1991  #endif	/* CONFIG_NUMA */
1992  	return NULL;
1993  }
1994  
1995  /*
1996   * Get a partial page, lock it and return it.
1997   */
1998  static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1999  		struct kmem_cache_cpu *c)
2000  {
2001  	void *object;
2002  	int searchnode = node;
2003  
2004  	if (node == NUMA_NO_NODE)
2005  		searchnode = numa_mem_id();
2006  
2007  	object = get_partial_node(s, get_node(s, searchnode), c, flags);
2008  	if (object || node != NUMA_NO_NODE)
2009  		return object;
2010  
2011  	return get_any_partial(s, flags, c);
2012  }
2013  
2014  #ifdef CONFIG_PREEMPTION
2015  /*
2016   * Calculate the next globally unique transaction for disambiguation
2017   * during cmpxchg. The transactions start with the cpu number and are then
2018   * incremented by CONFIG_NR_CPUS.
2019   */
2020  #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
2021  #else
2022  /*
2023   * No preemption supported therefore also no need to check for
2024   * different cpus.
2025   */
2026  #define TID_STEP 1
2027  #endif
2028  
2029  static inline unsigned long next_tid(unsigned long tid)
2030  {
2031  	return tid + TID_STEP;
2032  }
2033  
2034  #ifdef SLUB_DEBUG_CMPXCHG
2035  static inline unsigned int tid_to_cpu(unsigned long tid)
2036  {
2037  	return tid % TID_STEP;
2038  }
2039  
2040  static inline unsigned long tid_to_event(unsigned long tid)
2041  {
2042  	return tid / TID_STEP;
2043  }
2044  #endif
2045  
2046  static inline unsigned int init_tid(int cpu)
2047  {
2048  	return cpu;
2049  }
2050  
2051  static inline void note_cmpxchg_failure(const char *n,
2052  		const struct kmem_cache *s, unsigned long tid)
2053  {
2054  #ifdef SLUB_DEBUG_CMPXCHG
2055  	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2056  
2057  	pr_info("%s %s: cmpxchg redo ", n, s->name);
2058  
2059  #ifdef CONFIG_PREEMPTION
2060  	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2061  		pr_warn("due to cpu change %d -> %d\n",
2062  			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2063  	else
2064  #endif
2065  	if (tid_to_event(tid) != tid_to_event(actual_tid))
2066  		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2067  			tid_to_event(tid), tid_to_event(actual_tid));
2068  	else
2069  		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2070  			actual_tid, tid, next_tid(tid));
2071  #endif
2072  	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2073  }
2074  
2075  static void init_kmem_cache_cpus(struct kmem_cache *s)
2076  {
2077  	int cpu;
2078  
2079  	for_each_possible_cpu(cpu)
2080  		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2081  }
2082  
2083  /*
2084   * Remove the cpu slab
2085   */
2086  static void deactivate_slab(struct kmem_cache *s, struct page *page,
2087  				void *freelist, struct kmem_cache_cpu *c)
2088  {
2089  	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2090  	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2091  	int lock = 0;
2092  	enum slab_modes l = M_NONE, m = M_NONE;
2093  	void *nextfree;
2094  	int tail = DEACTIVATE_TO_HEAD;
2095  	struct page new;
2096  	struct page old;
2097  
2098  	if (page->freelist) {
2099  		stat(s, DEACTIVATE_REMOTE_FREES);
2100  		tail = DEACTIVATE_TO_TAIL;
2101  	}
2102  
2103  	/*
2104  	 * Stage one: Free all available per cpu objects back
2105  	 * to the page freelist while it is still frozen. Leave the
2106  	 * last one.
2107  	 *
2108  	 * There is no need to take the list->lock because the page
2109  	 * is still frozen.
2110  	 */
2111  	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2112  		void *prior;
2113  		unsigned long counters;
2114  
2115  		/*
2116  		 * If 'nextfree' is invalid, it is possible that the object at
2117  		 * 'freelist' is already corrupted.  So isolate all objects
2118  		 * starting at 'freelist'.
2119  		 */
2120  		if (freelist_corrupted(s, page, freelist, nextfree))
2121  			break;
2122  
2123  		do {
2124  			prior = page->freelist;
2125  			counters = page->counters;
2126  			set_freepointer(s, freelist, prior);
2127  			new.counters = counters;
2128  			new.inuse--;
2129  			VM_BUG_ON(!new.frozen);
2130  
2131  		} while (!__cmpxchg_double_slab(s, page,
2132  			prior, counters,
2133  			freelist, new.counters,
2134  			"drain percpu freelist"));
2135  
2136  		freelist = nextfree;
2137  	}
2138  
2139  	/*
2140  	 * Stage two: Ensure that the page is unfrozen while the
2141  	 * list presence reflects the actual number of objects
2142  	 * during unfreeze.
2143  	 *
2144  	 * We setup the list membership and then perform a cmpxchg
2145  	 * with the count. If there is a mismatch then the page
2146  	 * is not unfrozen but the page is on the wrong list.
2147  	 *
2148  	 * Then we restart the process which may have to remove
2149  	 * the page from the list that we just put it on again
2150  	 * because the number of objects in the slab may have
2151  	 * changed.
2152  	 */
2153  redo:
2154  
2155  	old.freelist = page->freelist;
2156  	old.counters = page->counters;
2157  	VM_BUG_ON(!old.frozen);
2158  
2159  	/* Determine target state of the slab */
2160  	new.counters = old.counters;
2161  	if (freelist) {
2162  		new.inuse--;
2163  		set_freepointer(s, freelist, old.freelist);
2164  		new.freelist = freelist;
2165  	} else
2166  		new.freelist = old.freelist;
2167  
2168  	new.frozen = 0;
2169  
2170  	if (!new.inuse && n->nr_partial >= s->min_partial)
2171  		m = M_FREE;
2172  	else if (new.freelist) {
2173  		m = M_PARTIAL;
2174  		if (!lock) {
2175  			lock = 1;
2176  			/*
2177  			 * Taking the spinlock removes the possibility
2178  			 * that acquire_slab() will see a slab page that
2179  			 * is frozen
2180  			 */
2181  			spin_lock(&n->list_lock);
2182  		}
2183  	} else {
2184  		m = M_FULL;
2185  		if (kmem_cache_debug(s) && !lock) {
2186  			lock = 1;
2187  			/*
2188  			 * This also ensures that the scanning of full
2189  			 * slabs from diagnostic functions will not see
2190  			 * any frozen slabs.
2191  			 */
2192  			spin_lock(&n->list_lock);
2193  		}
2194  	}
2195  
2196  	if (l != m) {
2197  		if (l == M_PARTIAL)
2198  			remove_partial(n, page);
2199  		else if (l == M_FULL)
2200  			remove_full(s, n, page);
2201  
2202  		if (m == M_PARTIAL)
2203  			add_partial(n, page, tail);
2204  		else if (m == M_FULL)
2205  			add_full(s, n, page);
2206  	}
2207  
2208  	l = m;
2209  	if (!__cmpxchg_double_slab(s, page,
2210  				old.freelist, old.counters,
2211  				new.freelist, new.counters,
2212  				"unfreezing slab"))
2213  		goto redo;
2214  
2215  	if (lock)
2216  		spin_unlock(&n->list_lock);
2217  
2218  	if (m == M_PARTIAL)
2219  		stat(s, tail);
2220  	else if (m == M_FULL)
2221  		stat(s, DEACTIVATE_FULL);
2222  	else if (m == M_FREE) {
2223  		stat(s, DEACTIVATE_EMPTY);
2224  		discard_slab(s, page);
2225  		stat(s, FREE_SLAB);
2226  	}
2227  
2228  	c->page = NULL;
2229  	c->freelist = NULL;
2230  }
2231  
2232  /*
2233   * Unfreeze all the cpu partial slabs.
2234   *
2235   * This function must be called with interrupts disabled
2236   * for the cpu using c (or some other guarantee must be there
2237   * to guarantee no concurrent accesses).
2238   */
2239  static void unfreeze_partials(struct kmem_cache *s,
2240  		struct kmem_cache_cpu *c)
2241  {
2242  #ifdef CONFIG_SLUB_CPU_PARTIAL
2243  	struct kmem_cache_node *n = NULL, *n2 = NULL;
2244  	struct page *page, *discard_page = NULL;
2245  
2246  	while ((page = slub_percpu_partial(c))) {
2247  		struct page new;
2248  		struct page old;
2249  
2250  		slub_set_percpu_partial(c, page);
2251  
2252  		n2 = get_node(s, page_to_nid(page));
2253  		if (n != n2) {
2254  			if (n)
2255  				spin_unlock(&n->list_lock);
2256  
2257  			n = n2;
2258  			spin_lock(&n->list_lock);
2259  		}
2260  
2261  		do {
2262  
2263  			old.freelist = page->freelist;
2264  			old.counters = page->counters;
2265  			VM_BUG_ON(!old.frozen);
2266  
2267  			new.counters = old.counters;
2268  			new.freelist = old.freelist;
2269  
2270  			new.frozen = 0;
2271  
2272  		} while (!__cmpxchg_double_slab(s, page,
2273  				old.freelist, old.counters,
2274  				new.freelist, new.counters,
2275  				"unfreezing slab"));
2276  
2277  		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2278  			page->next = discard_page;
2279  			discard_page = page;
2280  		} else {
2281  			add_partial(n, page, DEACTIVATE_TO_TAIL);
2282  			stat(s, FREE_ADD_PARTIAL);
2283  		}
2284  	}
2285  
2286  	if (n)
2287  		spin_unlock(&n->list_lock);
2288  
2289  	while (discard_page) {
2290  		page = discard_page;
2291  		discard_page = discard_page->next;
2292  
2293  		stat(s, DEACTIVATE_EMPTY);
2294  		discard_slab(s, page);
2295  		stat(s, FREE_SLAB);
2296  	}
2297  #endif	/* CONFIG_SLUB_CPU_PARTIAL */
2298  }
2299  
2300  /*
2301   * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2302   * partial page slot if available.
2303   *
2304   * If we did not find a slot then simply move all the partials to the
2305   * per node partial list.
2306   */
2307  static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2308  {
2309  #ifdef CONFIG_SLUB_CPU_PARTIAL
2310  	struct page *oldpage;
2311  	int pages;
2312  	int pobjects;
2313  
2314  	preempt_disable();
2315  	do {
2316  		pages = 0;
2317  		pobjects = 0;
2318  		oldpage = this_cpu_read(s->cpu_slab->partial);
2319  
2320  		if (oldpage) {
2321  			pobjects = oldpage->pobjects;
2322  			pages = oldpage->pages;
2323  			if (drain && pobjects > slub_cpu_partial(s)) {
2324  				unsigned long flags;
2325  				/*
2326  				 * partial array is full. Move the existing
2327  				 * set to the per node partial list.
2328  				 */
2329  				local_irq_save(flags);
2330  				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2331  				local_irq_restore(flags);
2332  				oldpage = NULL;
2333  				pobjects = 0;
2334  				pages = 0;
2335  				stat(s, CPU_PARTIAL_DRAIN);
2336  			}
2337  		}
2338  
2339  		pages++;
2340  		pobjects += page->objects - page->inuse;
2341  
2342  		page->pages = pages;
2343  		page->pobjects = pobjects;
2344  		page->next = oldpage;
2345  
2346  	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2347  								!= oldpage);
2348  	if (unlikely(!slub_cpu_partial(s))) {
2349  		unsigned long flags;
2350  
2351  		local_irq_save(flags);
2352  		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2353  		local_irq_restore(flags);
2354  	}
2355  	preempt_enable();
2356  #endif	/* CONFIG_SLUB_CPU_PARTIAL */
2357  }
2358  
2359  static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2360  {
2361  	stat(s, CPUSLAB_FLUSH);
2362  	deactivate_slab(s, c->page, c->freelist, c);
2363  
2364  	c->tid = next_tid(c->tid);
2365  }
2366  
2367  /*
2368   * Flush cpu slab.
2369   *
2370   * Called from IPI handler with interrupts disabled.
2371   */
2372  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2373  {
2374  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2375  
2376  	if (c->page)
2377  		flush_slab(s, c);
2378  
2379  	unfreeze_partials(s, c);
2380  }
2381  
2382  static void flush_cpu_slab(void *d)
2383  {
2384  	struct kmem_cache *s = d;
2385  
2386  	__flush_cpu_slab(s, smp_processor_id());
2387  }
2388  
2389  static bool has_cpu_slab(int cpu, void *info)
2390  {
2391  	struct kmem_cache *s = info;
2392  	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2393  
2394  	return c->page || slub_percpu_partial(c);
2395  }
2396  
2397  static void flush_all(struct kmem_cache *s)
2398  {
2399  	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
2400  }
2401  
2402  /*
2403   * Use the cpu notifier to insure that the cpu slabs are flushed when
2404   * necessary.
2405   */
2406  static int slub_cpu_dead(unsigned int cpu)
2407  {
2408  	struct kmem_cache *s;
2409  	unsigned long flags;
2410  
2411  	mutex_lock(&slab_mutex);
2412  	list_for_each_entry(s, &slab_caches, list) {
2413  		local_irq_save(flags);
2414  		__flush_cpu_slab(s, cpu);
2415  		local_irq_restore(flags);
2416  	}
2417  	mutex_unlock(&slab_mutex);
2418  	return 0;
2419  }
2420  
2421  /*
2422   * Check if the objects in a per cpu structure fit numa
2423   * locality expectations.
2424   */
2425  static inline int node_match(struct page *page, int node)
2426  {
2427  #ifdef CONFIG_NUMA
2428  	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2429  		return 0;
2430  #endif
2431  	return 1;
2432  }
2433  
2434  #ifdef CONFIG_SLUB_DEBUG
2435  static int count_free(struct page *page)
2436  {
2437  	return page->objects - page->inuse;
2438  }
2439  
2440  static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2441  {
2442  	return atomic_long_read(&n->total_objects);
2443  }
2444  #endif /* CONFIG_SLUB_DEBUG */
2445  
2446  #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2447  static unsigned long count_partial(struct kmem_cache_node *n,
2448  					int (*get_count)(struct page *))
2449  {
2450  	unsigned long flags;
2451  	unsigned long x = 0;
2452  	struct page *page;
2453  
2454  	spin_lock_irqsave(&n->list_lock, flags);
2455  	list_for_each_entry(page, &n->partial, slab_list)
2456  		x += get_count(page);
2457  	spin_unlock_irqrestore(&n->list_lock, flags);
2458  	return x;
2459  }
2460  #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2461  
2462  static noinline void
2463  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2464  {
2465  #ifdef CONFIG_SLUB_DEBUG
2466  	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2467  				      DEFAULT_RATELIMIT_BURST);
2468  	int node;
2469  	struct kmem_cache_node *n;
2470  
2471  	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2472  		return;
2473  
2474  	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2475  		nid, gfpflags, &gfpflags);
2476  	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2477  		s->name, s->object_size, s->size, oo_order(s->oo),
2478  		oo_order(s->min));
2479  
2480  	if (oo_order(s->min) > get_order(s->object_size))
2481  		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2482  			s->name);
2483  
2484  	for_each_kmem_cache_node(s, node, n) {
2485  		unsigned long nr_slabs;
2486  		unsigned long nr_objs;
2487  		unsigned long nr_free;
2488  
2489  		nr_free  = count_partial(n, count_free);
2490  		nr_slabs = node_nr_slabs(n);
2491  		nr_objs  = node_nr_objs(n);
2492  
2493  		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2494  			node, nr_slabs, nr_objs, nr_free);
2495  	}
2496  #endif
2497  }
2498  
2499  static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2500  			int node, struct kmem_cache_cpu **pc)
2501  {
2502  	void *freelist;
2503  	struct kmem_cache_cpu *c = *pc;
2504  	struct page *page;
2505  
2506  	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2507  
2508  	freelist = get_partial(s, flags, node, c);
2509  
2510  	if (freelist)
2511  		return freelist;
2512  
2513  	page = new_slab(s, flags, node);
2514  	if (page) {
2515  		c = raw_cpu_ptr(s->cpu_slab);
2516  		if (c->page)
2517  			flush_slab(s, c);
2518  
2519  		/*
2520  		 * No other reference to the page yet so we can
2521  		 * muck around with it freely without cmpxchg
2522  		 */
2523  		freelist = page->freelist;
2524  		page->freelist = NULL;
2525  
2526  		stat(s, ALLOC_SLAB);
2527  		c->page = page;
2528  		*pc = c;
2529  	}
2530  
2531  	return freelist;
2532  }
2533  
2534  static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2535  {
2536  	if (unlikely(PageSlabPfmemalloc(page)))
2537  		return gfp_pfmemalloc_allowed(gfpflags);
2538  
2539  	return true;
2540  }
2541  
2542  /*
2543   * Check the page->freelist of a page and either transfer the freelist to the
2544   * per cpu freelist or deactivate the page.
2545   *
2546   * The page is still frozen if the return value is not NULL.
2547   *
2548   * If this function returns NULL then the page has been unfrozen.
2549   *
2550   * This function must be called with interrupt disabled.
2551   */
2552  static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2553  {
2554  	struct page new;
2555  	unsigned long counters;
2556  	void *freelist;
2557  
2558  	do {
2559  		freelist = page->freelist;
2560  		counters = page->counters;
2561  
2562  		new.counters = counters;
2563  		VM_BUG_ON(!new.frozen);
2564  
2565  		new.inuse = page->objects;
2566  		new.frozen = freelist != NULL;
2567  
2568  	} while (!__cmpxchg_double_slab(s, page,
2569  		freelist, counters,
2570  		NULL, new.counters,
2571  		"get_freelist"));
2572  
2573  	return freelist;
2574  }
2575  
2576  /*
2577   * Slow path. The lockless freelist is empty or we need to perform
2578   * debugging duties.
2579   *
2580   * Processing is still very fast if new objects have been freed to the
2581   * regular freelist. In that case we simply take over the regular freelist
2582   * as the lockless freelist and zap the regular freelist.
2583   *
2584   * If that is not working then we fall back to the partial lists. We take the
2585   * first element of the freelist as the object to allocate now and move the
2586   * rest of the freelist to the lockless freelist.
2587   *
2588   * And if we were unable to get a new slab from the partial slab lists then
2589   * we need to allocate a new slab. This is the slowest path since it involves
2590   * a call to the page allocator and the setup of a new slab.
2591   *
2592   * Version of __slab_alloc to use when we know that interrupts are
2593   * already disabled (which is the case for bulk allocation).
2594   */
2595  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2596  			  unsigned long addr, struct kmem_cache_cpu *c)
2597  {
2598  	void *freelist;
2599  	struct page *page;
2600  
2601  	page = c->page;
2602  	if (!page) {
2603  		/*
2604  		 * if the node is not online or has no normal memory, just
2605  		 * ignore the node constraint
2606  		 */
2607  		if (unlikely(node != NUMA_NO_NODE &&
2608  			     !node_state(node, N_NORMAL_MEMORY)))
2609  			node = NUMA_NO_NODE;
2610  		goto new_slab;
2611  	}
2612  redo:
2613  
2614  	if (unlikely(!node_match(page, node))) {
2615  		/*
2616  		 * same as above but node_match() being false already
2617  		 * implies node != NUMA_NO_NODE
2618  		 */
2619  		if (!node_state(node, N_NORMAL_MEMORY)) {
2620  			node = NUMA_NO_NODE;
2621  			goto redo;
2622  		} else {
2623  			stat(s, ALLOC_NODE_MISMATCH);
2624  			deactivate_slab(s, page, c->freelist, c);
2625  			goto new_slab;
2626  		}
2627  	}
2628  
2629  	/*
2630  	 * By rights, we should be searching for a slab page that was
2631  	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2632  	 * information when the page leaves the per-cpu allocator
2633  	 */
2634  	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2635  		deactivate_slab(s, page, c->freelist, c);
2636  		goto new_slab;
2637  	}
2638  
2639  	/* must check again c->freelist in case of cpu migration or IRQ */
2640  	freelist = c->freelist;
2641  	if (freelist)
2642  		goto load_freelist;
2643  
2644  	freelist = get_freelist(s, page);
2645  
2646  	if (!freelist) {
2647  		c->page = NULL;
2648  		stat(s, DEACTIVATE_BYPASS);
2649  		goto new_slab;
2650  	}
2651  
2652  	stat(s, ALLOC_REFILL);
2653  
2654  load_freelist:
2655  	/*
2656  	 * freelist is pointing to the list of objects to be used.
2657  	 * page is pointing to the page from which the objects are obtained.
2658  	 * That page must be frozen for per cpu allocations to work.
2659  	 */
2660  	VM_BUG_ON(!c->page->frozen);
2661  	c->freelist = get_freepointer(s, freelist);
2662  	c->tid = next_tid(c->tid);
2663  	return freelist;
2664  
2665  new_slab:
2666  
2667  	if (slub_percpu_partial(c)) {
2668  		page = c->page = slub_percpu_partial(c);
2669  		slub_set_percpu_partial(c, page);
2670  		stat(s, CPU_PARTIAL_ALLOC);
2671  		goto redo;
2672  	}
2673  
2674  	freelist = new_slab_objects(s, gfpflags, node, &c);
2675  
2676  	if (unlikely(!freelist)) {
2677  		slab_out_of_memory(s, gfpflags, node);
2678  		return NULL;
2679  	}
2680  
2681  	page = c->page;
2682  	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2683  		goto load_freelist;
2684  
2685  	/* Only entered in the debug case */
2686  	if (kmem_cache_debug(s) &&
2687  			!alloc_debug_processing(s, page, freelist, addr))
2688  		goto new_slab;	/* Slab failed checks. Next slab needed */
2689  
2690  	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2691  	return freelist;
2692  }
2693  
2694  /*
2695   * Another one that disabled interrupt and compensates for possible
2696   * cpu changes by refetching the per cpu area pointer.
2697   */
2698  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2699  			  unsigned long addr, struct kmem_cache_cpu *c)
2700  {
2701  	void *p;
2702  	unsigned long flags;
2703  
2704  	local_irq_save(flags);
2705  #ifdef CONFIG_PREEMPTION
2706  	/*
2707  	 * We may have been preempted and rescheduled on a different
2708  	 * cpu before disabling interrupts. Need to reload cpu area
2709  	 * pointer.
2710  	 */
2711  	c = this_cpu_ptr(s->cpu_slab);
2712  #endif
2713  
2714  	p = ___slab_alloc(s, gfpflags, node, addr, c);
2715  	local_irq_restore(flags);
2716  	return p;
2717  }
2718  
2719  /*
2720   * If the object has been wiped upon free, make sure it's fully initialized by
2721   * zeroing out freelist pointer.
2722   */
2723  static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2724  						   void *obj)
2725  {
2726  	if (unlikely(slab_want_init_on_free(s)) && obj)
2727  		memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
2728  }
2729  
2730  /*
2731   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2732   * have the fastpath folded into their functions. So no function call
2733   * overhead for requests that can be satisfied on the fastpath.
2734   *
2735   * The fastpath works by first checking if the lockless freelist can be used.
2736   * If not then __slab_alloc is called for slow processing.
2737   *
2738   * Otherwise we can simply pick the next object from the lockless free list.
2739   */
2740  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2741  		gfp_t gfpflags, int node, unsigned long addr)
2742  {
2743  	void *object;
2744  	struct kmem_cache_cpu *c;
2745  	struct page *page;
2746  	unsigned long tid;
2747  
2748  	s = slab_pre_alloc_hook(s, gfpflags);
2749  	if (!s)
2750  		return NULL;
2751  redo:
2752  	/*
2753  	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2754  	 * enabled. We may switch back and forth between cpus while
2755  	 * reading from one cpu area. That does not matter as long
2756  	 * as we end up on the original cpu again when doing the cmpxchg.
2757  	 *
2758  	 * We should guarantee that tid and kmem_cache are retrieved on
2759  	 * the same cpu. It could be different if CONFIG_PREEMPTION so we need
2760  	 * to check if it is matched or not.
2761  	 */
2762  	do {
2763  		tid = this_cpu_read(s->cpu_slab->tid);
2764  		c = raw_cpu_ptr(s->cpu_slab);
2765  	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
2766  		 unlikely(tid != READ_ONCE(c->tid)));
2767  
2768  	/*
2769  	 * Irqless object alloc/free algorithm used here depends on sequence
2770  	 * of fetching cpu_slab's data. tid should be fetched before anything
2771  	 * on c to guarantee that object and page associated with previous tid
2772  	 * won't be used with current tid. If we fetch tid first, object and
2773  	 * page could be one associated with next tid and our alloc/free
2774  	 * request will be failed. In this case, we will retry. So, no problem.
2775  	 */
2776  	barrier();
2777  
2778  	/*
2779  	 * The transaction ids are globally unique per cpu and per operation on
2780  	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2781  	 * occurs on the right processor and that there was no operation on the
2782  	 * linked list in between.
2783  	 */
2784  
2785  	object = c->freelist;
2786  	page = c->page;
2787  	if (unlikely(!object || !node_match(page, node))) {
2788  		object = __slab_alloc(s, gfpflags, node, addr, c);
2789  		stat(s, ALLOC_SLOWPATH);
2790  	} else {
2791  		void *next_object = get_freepointer_safe(s, object);
2792  
2793  		/*
2794  		 * The cmpxchg will only match if there was no additional
2795  		 * operation and if we are on the right processor.
2796  		 *
2797  		 * The cmpxchg does the following atomically (without lock
2798  		 * semantics!)
2799  		 * 1. Relocate first pointer to the current per cpu area.
2800  		 * 2. Verify that tid and freelist have not been changed
2801  		 * 3. If they were not changed replace tid and freelist
2802  		 *
2803  		 * Since this is without lock semantics the protection is only
2804  		 * against code executing on this cpu *not* from access by
2805  		 * other cpus.
2806  		 */
2807  		if (unlikely(!this_cpu_cmpxchg_double(
2808  				s->cpu_slab->freelist, s->cpu_slab->tid,
2809  				object, tid,
2810  				next_object, next_tid(tid)))) {
2811  
2812  			note_cmpxchg_failure("slab_alloc", s, tid);
2813  			goto redo;
2814  		}
2815  		prefetch_freepointer(s, next_object);
2816  		stat(s, ALLOC_FASTPATH);
2817  	}
2818  
2819  	maybe_wipe_obj_freeptr(s, object);
2820  
2821  	if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2822  		memset(object, 0, s->object_size);
2823  
2824  	slab_post_alloc_hook(s, gfpflags, 1, &object);
2825  
2826  	return object;
2827  }
2828  
2829  static __always_inline void *slab_alloc(struct kmem_cache *s,
2830  		gfp_t gfpflags, unsigned long addr)
2831  {
2832  	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2833  }
2834  
2835  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2836  {
2837  	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2838  
2839  	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2840  				s->size, gfpflags);
2841  
2842  	return ret;
2843  }
2844  EXPORT_SYMBOL(kmem_cache_alloc);
2845  
2846  #ifdef CONFIG_TRACING
2847  void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2848  {
2849  	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2850  	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2851  	ret = kasan_kmalloc(s, ret, size, gfpflags);
2852  	return ret;
2853  }
2854  EXPORT_SYMBOL(kmem_cache_alloc_trace);
2855  #endif
2856  
2857  #ifdef CONFIG_NUMA
2858  void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2859  {
2860  	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2861  
2862  	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2863  				    s->object_size, s->size, gfpflags, node);
2864  
2865  	return ret;
2866  }
2867  EXPORT_SYMBOL(kmem_cache_alloc_node);
2868  
2869  #ifdef CONFIG_TRACING
2870  void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2871  				    gfp_t gfpflags,
2872  				    int node, size_t size)
2873  {
2874  	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2875  
2876  	trace_kmalloc_node(_RET_IP_, ret,
2877  			   size, s->size, gfpflags, node);
2878  
2879  	ret = kasan_kmalloc(s, ret, size, gfpflags);
2880  	return ret;
2881  }
2882  EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2883  #endif
2884  #endif	/* CONFIG_NUMA */
2885  
2886  /*
2887   * Slow path handling. This may still be called frequently since objects
2888   * have a longer lifetime than the cpu slabs in most processing loads.
2889   *
2890   * So we still attempt to reduce cache line usage. Just take the slab
2891   * lock and free the item. If there is no additional partial page
2892   * handling required then we can return immediately.
2893   */
2894  static void __slab_free(struct kmem_cache *s, struct page *page,
2895  			void *head, void *tail, int cnt,
2896  			unsigned long addr)
2897  
2898  {
2899  	void *prior;
2900  	int was_frozen;
2901  	struct page new;
2902  	unsigned long counters;
2903  	struct kmem_cache_node *n = NULL;
2904  	unsigned long uninitialized_var(flags);
2905  
2906  	stat(s, FREE_SLOWPATH);
2907  
2908  	if (kmem_cache_debug(s) &&
2909  	    !free_debug_processing(s, page, head, tail, cnt, addr))
2910  		return;
2911  
2912  	do {
2913  		if (unlikely(n)) {
2914  			spin_unlock_irqrestore(&n->list_lock, flags);
2915  			n = NULL;
2916  		}
2917  		prior = page->freelist;
2918  		counters = page->counters;
2919  		set_freepointer(s, tail, prior);
2920  		new.counters = counters;
2921  		was_frozen = new.frozen;
2922  		new.inuse -= cnt;
2923  		if ((!new.inuse || !prior) && !was_frozen) {
2924  
2925  			if (kmem_cache_has_cpu_partial(s) && !prior) {
2926  
2927  				/*
2928  				 * Slab was on no list before and will be
2929  				 * partially empty
2930  				 * We can defer the list move and instead
2931  				 * freeze it.
2932  				 */
2933  				new.frozen = 1;
2934  
2935  			} else { /* Needs to be taken off a list */
2936  
2937  				n = get_node(s, page_to_nid(page));
2938  				/*
2939  				 * Speculatively acquire the list_lock.
2940  				 * If the cmpxchg does not succeed then we may
2941  				 * drop the list_lock without any processing.
2942  				 *
2943  				 * Otherwise the list_lock will synchronize with
2944  				 * other processors updating the list of slabs.
2945  				 */
2946  				spin_lock_irqsave(&n->list_lock, flags);
2947  
2948  			}
2949  		}
2950  
2951  	} while (!cmpxchg_double_slab(s, page,
2952  		prior, counters,
2953  		head, new.counters,
2954  		"__slab_free"));
2955  
2956  	if (likely(!n)) {
2957  
2958  		/*
2959  		 * If we just froze the page then put it onto the
2960  		 * per cpu partial list.
2961  		 */
2962  		if (new.frozen && !was_frozen) {
2963  			put_cpu_partial(s, page, 1);
2964  			stat(s, CPU_PARTIAL_FREE);
2965  		}
2966  		/*
2967  		 * The list lock was not taken therefore no list
2968  		 * activity can be necessary.
2969  		 */
2970  		if (was_frozen)
2971  			stat(s, FREE_FROZEN);
2972  		return;
2973  	}
2974  
2975  	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2976  		goto slab_empty;
2977  
2978  	/*
2979  	 * Objects left in the slab. If it was not on the partial list before
2980  	 * then add it.
2981  	 */
2982  	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2983  		remove_full(s, n, page);
2984  		add_partial(n, page, DEACTIVATE_TO_TAIL);
2985  		stat(s, FREE_ADD_PARTIAL);
2986  	}
2987  	spin_unlock_irqrestore(&n->list_lock, flags);
2988  	return;
2989  
2990  slab_empty:
2991  	if (prior) {
2992  		/*
2993  		 * Slab on the partial list.
2994  		 */
2995  		remove_partial(n, page);
2996  		stat(s, FREE_REMOVE_PARTIAL);
2997  	} else {
2998  		/* Slab must be on the full list */
2999  		remove_full(s, n, page);
3000  	}
3001  
3002  	spin_unlock_irqrestore(&n->list_lock, flags);
3003  	stat(s, FREE_SLAB);
3004  	discard_slab(s, page);
3005  }
3006  
3007  /*
3008   * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3009   * can perform fastpath freeing without additional function calls.
3010   *
3011   * The fastpath is only possible if we are freeing to the current cpu slab
3012   * of this processor. This typically the case if we have just allocated
3013   * the item before.
3014   *
3015   * If fastpath is not possible then fall back to __slab_free where we deal
3016   * with all sorts of special processing.
3017   *
3018   * Bulk free of a freelist with several objects (all pointing to the
3019   * same page) possible by specifying head and tail ptr, plus objects
3020   * count (cnt). Bulk free indicated by tail pointer being set.
3021   */
3022  static __always_inline void do_slab_free(struct kmem_cache *s,
3023  				struct page *page, void *head, void *tail,
3024  				int cnt, unsigned long addr)
3025  {
3026  	void *tail_obj = tail ? : head;
3027  	struct kmem_cache_cpu *c;
3028  	unsigned long tid;
3029  redo:
3030  	/*
3031  	 * Determine the currently cpus per cpu slab.
3032  	 * The cpu may change afterward. However that does not matter since
3033  	 * data is retrieved via this pointer. If we are on the same cpu
3034  	 * during the cmpxchg then the free will succeed.
3035  	 */
3036  	do {
3037  		tid = this_cpu_read(s->cpu_slab->tid);
3038  		c = raw_cpu_ptr(s->cpu_slab);
3039  	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
3040  		 unlikely(tid != READ_ONCE(c->tid)));
3041  
3042  	/* Same with comment on barrier() in slab_alloc_node() */
3043  	barrier();
3044  
3045  	if (likely(page == c->page)) {
3046  		void **freelist = READ_ONCE(c->freelist);
3047  
3048  		set_freepointer(s, tail_obj, freelist);
3049  
3050  		if (unlikely(!this_cpu_cmpxchg_double(
3051  				s->cpu_slab->freelist, s->cpu_slab->tid,
3052  				freelist, tid,
3053  				head, next_tid(tid)))) {
3054  
3055  			note_cmpxchg_failure("slab_free", s, tid);
3056  			goto redo;
3057  		}
3058  		stat(s, FREE_FASTPATH);
3059  	} else
3060  		__slab_free(s, page, head, tail_obj, cnt, addr);
3061  
3062  }
3063  
3064  static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3065  				      void *head, void *tail, int cnt,
3066  				      unsigned long addr)
3067  {
3068  	/*
3069  	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3070  	 * to remove objects, whose reuse must be delayed.
3071  	 */
3072  	if (slab_free_freelist_hook(s, &head, &tail))
3073  		do_slab_free(s, page, head, tail, cnt, addr);
3074  }
3075  
3076  #ifdef CONFIG_KASAN_GENERIC
3077  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3078  {
3079  	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3080  }
3081  #endif
3082  
3083  void kmem_cache_free(struct kmem_cache *s, void *x)
3084  {
3085  	s = cache_from_obj(s, x);
3086  	if (!s)
3087  		return;
3088  	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3089  	trace_kmem_cache_free(_RET_IP_, x);
3090  }
3091  EXPORT_SYMBOL(kmem_cache_free);
3092  
3093  struct detached_freelist {
3094  	struct page *page;
3095  	void *tail;
3096  	void *freelist;
3097  	int cnt;
3098  	struct kmem_cache *s;
3099  };
3100  
3101  /*
3102   * This function progressively scans the array with free objects (with
3103   * a limited look ahead) and extract objects belonging to the same
3104   * page.  It builds a detached freelist directly within the given
3105   * page/objects.  This can happen without any need for
3106   * synchronization, because the objects are owned by running process.
3107   * The freelist is build up as a single linked list in the objects.
3108   * The idea is, that this detached freelist can then be bulk
3109   * transferred to the real freelist(s), but only requiring a single
3110   * synchronization primitive.  Look ahead in the array is limited due
3111   * to performance reasons.
3112   */
3113  static inline
3114  int build_detached_freelist(struct kmem_cache *s, size_t size,
3115  			    void **p, struct detached_freelist *df)
3116  {
3117  	size_t first_skipped_index = 0;
3118  	int lookahead = 3;
3119  	void *object;
3120  	struct page *page;
3121  
3122  	/* Always re-init detached_freelist */
3123  	df->page = NULL;
3124  
3125  	do {
3126  		object = p[--size];
3127  		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3128  	} while (!object && size);
3129  
3130  	if (!object)
3131  		return 0;
3132  
3133  	page = virt_to_head_page(object);
3134  	if (!s) {
3135  		/* Handle kalloc'ed objects */
3136  		if (unlikely(!PageSlab(page))) {
3137  			BUG_ON(!PageCompound(page));
3138  			kfree_hook(object);
3139  			__free_pages(page, compound_order(page));
3140  			p[size] = NULL; /* mark object processed */
3141  			return size;
3142  		}
3143  		/* Derive kmem_cache from object */
3144  		df->s = page->slab_cache;
3145  	} else {
3146  		df->s = cache_from_obj(s, object); /* Support for memcg */
3147  	}
3148  
3149  	/* Start new detached freelist */
3150  	df->page = page;
3151  	set_freepointer(df->s, object, NULL);
3152  	df->tail = object;
3153  	df->freelist = object;
3154  	p[size] = NULL; /* mark object processed */
3155  	df->cnt = 1;
3156  
3157  	while (size) {
3158  		object = p[--size];
3159  		if (!object)
3160  			continue; /* Skip processed objects */
3161  
3162  		/* df->page is always set at this point */
3163  		if (df->page == virt_to_head_page(object)) {
3164  			/* Opportunity build freelist */
3165  			set_freepointer(df->s, object, df->freelist);
3166  			df->freelist = object;
3167  			df->cnt++;
3168  			p[size] = NULL; /* mark object processed */
3169  
3170  			continue;
3171  		}
3172  
3173  		/* Limit look ahead search */
3174  		if (!--lookahead)
3175  			break;
3176  
3177  		if (!first_skipped_index)
3178  			first_skipped_index = size + 1;
3179  	}
3180  
3181  	return first_skipped_index;
3182  }
3183  
3184  /* Note that interrupts must be enabled when calling this function. */
3185  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3186  {
3187  	if (WARN_ON(!size))
3188  		return;
3189  
3190  	do {
3191  		struct detached_freelist df;
3192  
3193  		size = build_detached_freelist(s, size, p, &df);
3194  		if (!df.page)
3195  			continue;
3196  
3197  		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3198  	} while (likely(size));
3199  }
3200  EXPORT_SYMBOL(kmem_cache_free_bulk);
3201  
3202  /* Note that interrupts must be enabled when calling this function. */
3203  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3204  			  void **p)
3205  {
3206  	struct kmem_cache_cpu *c;
3207  	int i;
3208  
3209  	/* memcg and kmem_cache debug support */
3210  	s = slab_pre_alloc_hook(s, flags);
3211  	if (unlikely(!s))
3212  		return false;
3213  	/*
3214  	 * Drain objects in the per cpu slab, while disabling local
3215  	 * IRQs, which protects against PREEMPT and interrupts
3216  	 * handlers invoking normal fastpath.
3217  	 */
3218  	local_irq_disable();
3219  	c = this_cpu_ptr(s->cpu_slab);
3220  
3221  	for (i = 0; i < size; i++) {
3222  		void *object = c->freelist;
3223  
3224  		if (unlikely(!object)) {
3225  			/*
3226  			 * We may have removed an object from c->freelist using
3227  			 * the fastpath in the previous iteration; in that case,
3228  			 * c->tid has not been bumped yet.
3229  			 * Since ___slab_alloc() may reenable interrupts while
3230  			 * allocating memory, we should bump c->tid now.
3231  			 */
3232  			c->tid = next_tid(c->tid);
3233  
3234  			/*
3235  			 * Invoking slow path likely have side-effect
3236  			 * of re-populating per CPU c->freelist
3237  			 */
3238  			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3239  					    _RET_IP_, c);
3240  			if (unlikely(!p[i]))
3241  				goto error;
3242  
3243  			c = this_cpu_ptr(s->cpu_slab);
3244  			maybe_wipe_obj_freeptr(s, p[i]);
3245  
3246  			continue; /* goto for-loop */
3247  		}
3248  		c->freelist = get_freepointer(s, object);
3249  		p[i] = object;
3250  		maybe_wipe_obj_freeptr(s, p[i]);
3251  	}
3252  	c->tid = next_tid(c->tid);
3253  	local_irq_enable();
3254  
3255  	/* Clear memory outside IRQ disabled fastpath loop */
3256  	if (unlikely(slab_want_init_on_alloc(flags, s))) {
3257  		int j;
3258  
3259  		for (j = 0; j < i; j++)
3260  			memset(p[j], 0, s->object_size);
3261  	}
3262  
3263  	/* memcg and kmem_cache debug support */
3264  	slab_post_alloc_hook(s, flags, size, p);
3265  	return i;
3266  error:
3267  	local_irq_enable();
3268  	slab_post_alloc_hook(s, flags, i, p);
3269  	__kmem_cache_free_bulk(s, i, p);
3270  	return 0;
3271  }
3272  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3273  
3274  
3275  /*
3276   * Object placement in a slab is made very easy because we always start at
3277   * offset 0. If we tune the size of the object to the alignment then we can
3278   * get the required alignment by putting one properly sized object after
3279   * another.
3280   *
3281   * Notice that the allocation order determines the sizes of the per cpu
3282   * caches. Each processor has always one slab available for allocations.
3283   * Increasing the allocation order reduces the number of times that slabs
3284   * must be moved on and off the partial lists and is therefore a factor in
3285   * locking overhead.
3286   */
3287  
3288  /*
3289   * Mininum / Maximum order of slab pages. This influences locking overhead
3290   * and slab fragmentation. A higher order reduces the number of partial slabs
3291   * and increases the number of allocations possible without having to
3292   * take the list_lock.
3293   */
3294  static unsigned int slub_min_order;
3295  static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3296  static unsigned int slub_min_objects;
3297  
3298  /*
3299   * Calculate the order of allocation given an slab object size.
3300   *
3301   * The order of allocation has significant impact on performance and other
3302   * system components. Generally order 0 allocations should be preferred since
3303   * order 0 does not cause fragmentation in the page allocator. Larger objects
3304   * be problematic to put into order 0 slabs because there may be too much
3305   * unused space left. We go to a higher order if more than 1/16th of the slab
3306   * would be wasted.
3307   *
3308   * In order to reach satisfactory performance we must ensure that a minimum
3309   * number of objects is in one slab. Otherwise we may generate too much
3310   * activity on the partial lists which requires taking the list_lock. This is
3311   * less a concern for large slabs though which are rarely used.
3312   *
3313   * slub_max_order specifies the order where we begin to stop considering the
3314   * number of objects in a slab as critical. If we reach slub_max_order then
3315   * we try to keep the page order as low as possible. So we accept more waste
3316   * of space in favor of a small page order.
3317   *
3318   * Higher order allocations also allow the placement of more objects in a
3319   * slab and thereby reduce object handling overhead. If the user has
3320   * requested a higher mininum order then we start with that one instead of
3321   * the smallest order which will fit the object.
3322   */
3323  static inline unsigned int slab_order(unsigned int size,
3324  		unsigned int min_objects, unsigned int max_order,
3325  		unsigned int fract_leftover)
3326  {
3327  	unsigned int min_order = slub_min_order;
3328  	unsigned int order;
3329  
3330  	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3331  		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3332  
3333  	for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3334  			order <= max_order; order++) {
3335  
3336  		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3337  		unsigned int rem;
3338  
3339  		rem = slab_size % size;
3340  
3341  		if (rem <= slab_size / fract_leftover)
3342  			break;
3343  	}
3344  
3345  	return order;
3346  }
3347  
3348  static inline int calculate_order(unsigned int size)
3349  {
3350  	unsigned int order;
3351  	unsigned int min_objects;
3352  	unsigned int max_objects;
3353  
3354  	/*
3355  	 * Attempt to find best configuration for a slab. This
3356  	 * works by first attempting to generate a layout with
3357  	 * the best configuration and backing off gradually.
3358  	 *
3359  	 * First we increase the acceptable waste in a slab. Then
3360  	 * we reduce the minimum objects required in a slab.
3361  	 */
3362  	min_objects = slub_min_objects;
3363  	if (!min_objects)
3364  		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3365  	max_objects = order_objects(slub_max_order, size);
3366  	min_objects = min(min_objects, max_objects);
3367  
3368  	while (min_objects > 1) {
3369  		unsigned int fraction;
3370  
3371  		fraction = 16;
3372  		while (fraction >= 4) {
3373  			order = slab_order(size, min_objects,
3374  					slub_max_order, fraction);
3375  			if (order <= slub_max_order)
3376  				return order;
3377  			fraction /= 2;
3378  		}
3379  		min_objects--;
3380  	}
3381  
3382  	/*
3383  	 * We were unable to place multiple objects in a slab. Now
3384  	 * lets see if we can place a single object there.
3385  	 */
3386  	order = slab_order(size, 1, slub_max_order, 1);
3387  	if (order <= slub_max_order)
3388  		return order;
3389  
3390  	/*
3391  	 * Doh this slab cannot be placed using slub_max_order.
3392  	 */
3393  	order = slab_order(size, 1, MAX_ORDER, 1);
3394  	if (order < MAX_ORDER)
3395  		return order;
3396  	return -ENOSYS;
3397  }
3398  
3399  static void
3400  init_kmem_cache_node(struct kmem_cache_node *n)
3401  {
3402  	n->nr_partial = 0;
3403  	spin_lock_init(&n->list_lock);
3404  	INIT_LIST_HEAD(&n->partial);
3405  #ifdef CONFIG_SLUB_DEBUG
3406  	atomic_long_set(&n->nr_slabs, 0);
3407  	atomic_long_set(&n->total_objects, 0);
3408  	INIT_LIST_HEAD(&n->full);
3409  #endif
3410  }
3411  
3412  static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3413  {
3414  	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3415  			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3416  
3417  	/*
3418  	 * Must align to double word boundary for the double cmpxchg
3419  	 * instructions to work; see __pcpu_double_call_return_bool().
3420  	 */
3421  	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3422  				     2 * sizeof(void *));
3423  
3424  	if (!s->cpu_slab)
3425  		return 0;
3426  
3427  	init_kmem_cache_cpus(s);
3428  
3429  	return 1;
3430  }
3431  
3432  static struct kmem_cache *kmem_cache_node;
3433  
3434  /*
3435   * No kmalloc_node yet so do it by hand. We know that this is the first
3436   * slab on the node for this slabcache. There are no concurrent accesses
3437   * possible.
3438   *
3439   * Note that this function only works on the kmem_cache_node
3440   * when allocating for the kmem_cache_node. This is used for bootstrapping
3441   * memory on a fresh node that has no slab structures yet.
3442   */
3443  static void early_kmem_cache_node_alloc(int node)
3444  {
3445  	struct page *page;
3446  	struct kmem_cache_node *n;
3447  
3448  	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3449  
3450  	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3451  
3452  	BUG_ON(!page);
3453  	if (page_to_nid(page) != node) {
3454  		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3455  		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3456  	}
3457  
3458  	n = page->freelist;
3459  	BUG_ON(!n);
3460  #ifdef CONFIG_SLUB_DEBUG
3461  	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3462  	init_tracking(kmem_cache_node, n);
3463  #endif
3464  	n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3465  		      GFP_KERNEL);
3466  	page->freelist = get_freepointer(kmem_cache_node, n);
3467  	page->inuse = 1;
3468  	page->frozen = 0;
3469  	kmem_cache_node->node[node] = n;
3470  	init_kmem_cache_node(n);
3471  	inc_slabs_node(kmem_cache_node, node, page->objects);
3472  
3473  	/*
3474  	 * No locks need to be taken here as it has just been
3475  	 * initialized and there is no concurrent access.
3476  	 */
3477  	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3478  }
3479  
3480  static void free_kmem_cache_nodes(struct kmem_cache *s)
3481  {
3482  	int node;
3483  	struct kmem_cache_node *n;
3484  
3485  	for_each_kmem_cache_node(s, node, n) {
3486  		s->node[node] = NULL;
3487  		kmem_cache_free(kmem_cache_node, n);
3488  	}
3489  }
3490  
3491  void __kmem_cache_release(struct kmem_cache *s)
3492  {
3493  	cache_random_seq_destroy(s);
3494  	free_percpu(s->cpu_slab);
3495  	free_kmem_cache_nodes(s);
3496  }
3497  
3498  static int init_kmem_cache_nodes(struct kmem_cache *s)
3499  {
3500  	int node;
3501  
3502  	for_each_node_state(node, N_NORMAL_MEMORY) {
3503  		struct kmem_cache_node *n;
3504  
3505  		if (slab_state == DOWN) {
3506  			early_kmem_cache_node_alloc(node);
3507  			continue;
3508  		}
3509  		n = kmem_cache_alloc_node(kmem_cache_node,
3510  						GFP_KERNEL, node);
3511  
3512  		if (!n) {
3513  			free_kmem_cache_nodes(s);
3514  			return 0;
3515  		}
3516  
3517  		init_kmem_cache_node(n);
3518  		s->node[node] = n;
3519  	}
3520  	return 1;
3521  }
3522  
3523  static void set_min_partial(struct kmem_cache *s, unsigned long min)
3524  {
3525  	if (min < MIN_PARTIAL)
3526  		min = MIN_PARTIAL;
3527  	else if (min > MAX_PARTIAL)
3528  		min = MAX_PARTIAL;
3529  	s->min_partial = min;
3530  }
3531  
3532  static void set_cpu_partial(struct kmem_cache *s)
3533  {
3534  #ifdef CONFIG_SLUB_CPU_PARTIAL
3535  	/*
3536  	 * cpu_partial determined the maximum number of objects kept in the
3537  	 * per cpu partial lists of a processor.
3538  	 *
3539  	 * Per cpu partial lists mainly contain slabs that just have one
3540  	 * object freed. If they are used for allocation then they can be
3541  	 * filled up again with minimal effort. The slab will never hit the
3542  	 * per node partial lists and therefore no locking will be required.
3543  	 *
3544  	 * This setting also determines
3545  	 *
3546  	 * A) The number of objects from per cpu partial slabs dumped to the
3547  	 *    per node list when we reach the limit.
3548  	 * B) The number of objects in cpu partial slabs to extract from the
3549  	 *    per node list when we run out of per cpu objects. We only fetch
3550  	 *    50% to keep some capacity around for frees.
3551  	 */
3552  	if (!kmem_cache_has_cpu_partial(s))
3553  		slub_set_cpu_partial(s, 0);
3554  	else if (s->size >= PAGE_SIZE)
3555  		slub_set_cpu_partial(s, 2);
3556  	else if (s->size >= 1024)
3557  		slub_set_cpu_partial(s, 6);
3558  	else if (s->size >= 256)
3559  		slub_set_cpu_partial(s, 13);
3560  	else
3561  		slub_set_cpu_partial(s, 30);
3562  #endif
3563  }
3564  
3565  /*
3566   * calculate_sizes() determines the order and the distribution of data within
3567   * a slab object.
3568   */
3569  static int calculate_sizes(struct kmem_cache *s, int forced_order)
3570  {
3571  	slab_flags_t flags = s->flags;
3572  	unsigned int size = s->object_size;
3573  	unsigned int freepointer_area;
3574  	unsigned int order;
3575  
3576  	/*
3577  	 * Round up object size to the next word boundary. We can only
3578  	 * place the free pointer at word boundaries and this determines
3579  	 * the possible location of the free pointer.
3580  	 */
3581  	size = ALIGN(size, sizeof(void *));
3582  	/*
3583  	 * This is the area of the object where a freepointer can be
3584  	 * safely written. If redzoning adds more to the inuse size, we
3585  	 * can't use that portion for writing the freepointer, so
3586  	 * s->offset must be limited within this for the general case.
3587  	 */
3588  	freepointer_area = size;
3589  
3590  #ifdef CONFIG_SLUB_DEBUG
3591  	/*
3592  	 * Determine if we can poison the object itself. If the user of
3593  	 * the slab may touch the object after free or before allocation
3594  	 * then we should never poison the object itself.
3595  	 */
3596  	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3597  			!s->ctor)
3598  		s->flags |= __OBJECT_POISON;
3599  	else
3600  		s->flags &= ~__OBJECT_POISON;
3601  
3602  
3603  	/*
3604  	 * If we are Redzoning then check if there is some space between the
3605  	 * end of the object and the free pointer. If not then add an
3606  	 * additional word to have some bytes to store Redzone information.
3607  	 */
3608  	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3609  		size += sizeof(void *);
3610  #endif
3611  
3612  	/*
3613  	 * With that we have determined the number of bytes in actual use
3614  	 * by the object. This is the potential offset to the free pointer.
3615  	 */
3616  	s->inuse = size;
3617  
3618  	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3619  		s->ctor)) {
3620  		/*
3621  		 * Relocate free pointer after the object if it is not
3622  		 * permitted to overwrite the first word of the object on
3623  		 * kmem_cache_free.
3624  		 *
3625  		 * This is the case if we do RCU, have a constructor or
3626  		 * destructor or are poisoning the objects.
3627  		 *
3628  		 * The assumption that s->offset >= s->inuse means free
3629  		 * pointer is outside of the object is used in the
3630  		 * freeptr_outside_object() function. If that is no
3631  		 * longer true, the function needs to be modified.
3632  		 */
3633  		s->offset = size;
3634  		size += sizeof(void *);
3635  	} else if (freepointer_area > sizeof(void *)) {
3636  		/*
3637  		 * Store freelist pointer near middle of object to keep
3638  		 * it away from the edges of the object to avoid small
3639  		 * sized over/underflows from neighboring allocations.
3640  		 */
3641  		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
3642  	}
3643  
3644  #ifdef CONFIG_SLUB_DEBUG
3645  	if (flags & SLAB_STORE_USER)
3646  		/*
3647  		 * Need to store information about allocs and frees after
3648  		 * the object.
3649  		 */
3650  		size += 2 * sizeof(struct track);
3651  #endif
3652  
3653  	kasan_cache_create(s, &size, &s->flags);
3654  #ifdef CONFIG_SLUB_DEBUG
3655  	if (flags & SLAB_RED_ZONE) {
3656  		/*
3657  		 * Add some empty padding so that we can catch
3658  		 * overwrites from earlier objects rather than let
3659  		 * tracking information or the free pointer be
3660  		 * corrupted if a user writes before the start
3661  		 * of the object.
3662  		 */
3663  		size += sizeof(void *);
3664  
3665  		s->red_left_pad = sizeof(void *);
3666  		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3667  		size += s->red_left_pad;
3668  	}
3669  #endif
3670  
3671  	/*
3672  	 * SLUB stores one object immediately after another beginning from
3673  	 * offset 0. In order to align the objects we have to simply size
3674  	 * each object to conform to the alignment.
3675  	 */
3676  	size = ALIGN(size, s->align);
3677  	s->size = size;
3678  	if (forced_order >= 0)
3679  		order = forced_order;
3680  	else
3681  		order = calculate_order(size);
3682  
3683  	if ((int)order < 0)
3684  		return 0;
3685  
3686  	s->allocflags = 0;
3687  	if (order)
3688  		s->allocflags |= __GFP_COMP;
3689  
3690  	if (s->flags & SLAB_CACHE_DMA)
3691  		s->allocflags |= GFP_DMA;
3692  
3693  	if (s->flags & SLAB_CACHE_DMA32)
3694  		s->allocflags |= GFP_DMA32;
3695  
3696  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3697  		s->allocflags |= __GFP_RECLAIMABLE;
3698  
3699  	/*
3700  	 * Determine the number of objects per slab
3701  	 */
3702  	s->oo = oo_make(order, size);
3703  	s->min = oo_make(get_order(size), size);
3704  	if (oo_objects(s->oo) > oo_objects(s->max))
3705  		s->max = s->oo;
3706  
3707  	return !!oo_objects(s->oo);
3708  }
3709  
3710  static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3711  {
3712  	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3713  #ifdef CONFIG_SLAB_FREELIST_HARDENED
3714  	s->random = get_random_long();
3715  #endif
3716  
3717  	if (!calculate_sizes(s, -1))
3718  		goto error;
3719  	if (disable_higher_order_debug) {
3720  		/*
3721  		 * Disable debugging flags that store metadata if the min slab
3722  		 * order increased.
3723  		 */
3724  		if (get_order(s->size) > get_order(s->object_size)) {
3725  			s->flags &= ~DEBUG_METADATA_FLAGS;
3726  			s->offset = 0;
3727  			if (!calculate_sizes(s, -1))
3728  				goto error;
3729  		}
3730  	}
3731  
3732  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3733      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3734  	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3735  		/* Enable fast mode */
3736  		s->flags |= __CMPXCHG_DOUBLE;
3737  #endif
3738  
3739  	/*
3740  	 * The larger the object size is, the more pages we want on the partial
3741  	 * list to avoid pounding the page allocator excessively.
3742  	 */
3743  	set_min_partial(s, ilog2(s->size) / 2);
3744  
3745  	set_cpu_partial(s);
3746  
3747  #ifdef CONFIG_NUMA
3748  	s->remote_node_defrag_ratio = 1000;
3749  #endif
3750  
3751  	/* Initialize the pre-computed randomized freelist if slab is up */
3752  	if (slab_state >= UP) {
3753  		if (init_cache_random_seq(s))
3754  			goto error;
3755  	}
3756  
3757  	if (!init_kmem_cache_nodes(s))
3758  		goto error;
3759  
3760  	if (alloc_kmem_cache_cpus(s))
3761  		return 0;
3762  
3763  	free_kmem_cache_nodes(s);
3764  error:
3765  	return -EINVAL;
3766  }
3767  
3768  static void list_slab_objects(struct kmem_cache *s, struct page *page,
3769  			      const char *text, unsigned long *map)
3770  {
3771  #ifdef CONFIG_SLUB_DEBUG
3772  	void *addr = page_address(page);
3773  	void *p;
3774  
3775  	if (!map)
3776  		return;
3777  
3778  	slab_err(s, page, text, s->name);
3779  	slab_lock(page);
3780  
3781  	map = get_map(s, page);
3782  	for_each_object(p, s, addr, page->objects) {
3783  
3784  		if (!test_bit(slab_index(p, s, addr), map)) {
3785  			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3786  			print_tracking(s, p);
3787  		}
3788  	}
3789  	slab_unlock(page);
3790  #endif
3791  }
3792  
3793  /*
3794   * Attempt to free all partial slabs on a node.
3795   * This is called from __kmem_cache_shutdown(). We must take list_lock
3796   * because sysfs file might still access partial list after the shutdowning.
3797   */
3798  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3799  {
3800  	LIST_HEAD(discard);
3801  	struct page *page, *h;
3802  	unsigned long *map = NULL;
3803  
3804  #ifdef CONFIG_SLUB_DEBUG
3805  	map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
3806  #endif
3807  
3808  	BUG_ON(irqs_disabled());
3809  	spin_lock_irq(&n->list_lock);
3810  	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3811  		if (!page->inuse) {
3812  			remove_partial(n, page);
3813  			list_add(&page->slab_list, &discard);
3814  		} else {
3815  			list_slab_objects(s, page,
3816  			  "Objects remaining in %s on __kmem_cache_shutdown()",
3817  			  map);
3818  		}
3819  	}
3820  	spin_unlock_irq(&n->list_lock);
3821  
3822  #ifdef CONFIG_SLUB_DEBUG
3823  	bitmap_free(map);
3824  #endif
3825  
3826  	list_for_each_entry_safe(page, h, &discard, slab_list)
3827  		discard_slab(s, page);
3828  }
3829  
3830  bool __kmem_cache_empty(struct kmem_cache *s)
3831  {
3832  	int node;
3833  	struct kmem_cache_node *n;
3834  
3835  	for_each_kmem_cache_node(s, node, n)
3836  		if (n->nr_partial || slabs_node(s, node))
3837  			return false;
3838  	return true;
3839  }
3840  
3841  /*
3842   * Release all resources used by a slab cache.
3843   */
3844  int __kmem_cache_shutdown(struct kmem_cache *s)
3845  {
3846  	int node;
3847  	struct kmem_cache_node *n;
3848  
3849  	flush_all(s);
3850  	/* Attempt to free all objects */
3851  	for_each_kmem_cache_node(s, node, n) {
3852  		free_partial(s, n);
3853  		if (n->nr_partial || slabs_node(s, node))
3854  			return 1;
3855  	}
3856  	sysfs_slab_remove(s);
3857  	return 0;
3858  }
3859  
3860  /********************************************************************
3861   *		Kmalloc subsystem
3862   *******************************************************************/
3863  
3864  static int __init setup_slub_min_order(char *str)
3865  {
3866  	get_option(&str, (int *)&slub_min_order);
3867  
3868  	return 1;
3869  }
3870  
3871  __setup("slub_min_order=", setup_slub_min_order);
3872  
3873  static int __init setup_slub_max_order(char *str)
3874  {
3875  	get_option(&str, (int *)&slub_max_order);
3876  	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3877  
3878  	return 1;
3879  }
3880  
3881  __setup("slub_max_order=", setup_slub_max_order);
3882  
3883  static int __init setup_slub_min_objects(char *str)
3884  {
3885  	get_option(&str, (int *)&slub_min_objects);
3886  
3887  	return 1;
3888  }
3889  
3890  __setup("slub_min_objects=", setup_slub_min_objects);
3891  
3892  void *__kmalloc(size_t size, gfp_t flags)
3893  {
3894  	struct kmem_cache *s;
3895  	void *ret;
3896  
3897  	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3898  		return kmalloc_large(size, flags);
3899  
3900  	s = kmalloc_slab(size, flags);
3901  
3902  	if (unlikely(ZERO_OR_NULL_PTR(s)))
3903  		return s;
3904  
3905  	ret = slab_alloc(s, flags, _RET_IP_);
3906  
3907  	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3908  
3909  	ret = kasan_kmalloc(s, ret, size, flags);
3910  
3911  	return ret;
3912  }
3913  EXPORT_SYMBOL(__kmalloc);
3914  
3915  #ifdef CONFIG_NUMA
3916  static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3917  {
3918  	struct page *page;
3919  	void *ptr = NULL;
3920  	unsigned int order = get_order(size);
3921  
3922  	flags |= __GFP_COMP;
3923  	page = alloc_pages_node(node, flags, order);
3924  	if (page) {
3925  		ptr = page_address(page);
3926  		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3927  				    1 << order);
3928  	}
3929  
3930  	return kmalloc_large_node_hook(ptr, size, flags);
3931  }
3932  
3933  void *__kmalloc_node(size_t size, gfp_t flags, int node)
3934  {
3935  	struct kmem_cache *s;
3936  	void *ret;
3937  
3938  	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3939  		ret = kmalloc_large_node(size, flags, node);
3940  
3941  		trace_kmalloc_node(_RET_IP_, ret,
3942  				   size, PAGE_SIZE << get_order(size),
3943  				   flags, node);
3944  
3945  		return ret;
3946  	}
3947  
3948  	s = kmalloc_slab(size, flags);
3949  
3950  	if (unlikely(ZERO_OR_NULL_PTR(s)))
3951  		return s;
3952  
3953  	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3954  
3955  	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3956  
3957  	ret = kasan_kmalloc(s, ret, size, flags);
3958  
3959  	return ret;
3960  }
3961  EXPORT_SYMBOL(__kmalloc_node);
3962  #endif	/* CONFIG_NUMA */
3963  
3964  #ifdef CONFIG_HARDENED_USERCOPY
3965  /*
3966   * Rejects incorrectly sized objects and objects that are to be copied
3967   * to/from userspace but do not fall entirely within the containing slab
3968   * cache's usercopy region.
3969   *
3970   * Returns NULL if check passes, otherwise const char * to name of cache
3971   * to indicate an error.
3972   */
3973  void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3974  			 bool to_user)
3975  {
3976  	struct kmem_cache *s;
3977  	unsigned int offset;
3978  	size_t object_size;
3979  
3980  	ptr = kasan_reset_tag(ptr);
3981  
3982  	/* Find object and usable object size. */
3983  	s = page->slab_cache;
3984  
3985  	/* Reject impossible pointers. */
3986  	if (ptr < page_address(page))
3987  		usercopy_abort("SLUB object not in SLUB page?!", NULL,
3988  			       to_user, 0, n);
3989  
3990  	/* Find offset within object. */
3991  	offset = (ptr - page_address(page)) % s->size;
3992  
3993  	/* Adjust for redzone and reject if within the redzone. */
3994  	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3995  		if (offset < s->red_left_pad)
3996  			usercopy_abort("SLUB object in left red zone",
3997  				       s->name, to_user, offset, n);
3998  		offset -= s->red_left_pad;
3999  	}
4000  
4001  	/* Allow address range falling entirely within usercopy region. */
4002  	if (offset >= s->useroffset &&
4003  	    offset - s->useroffset <= s->usersize &&
4004  	    n <= s->useroffset - offset + s->usersize)
4005  		return;
4006  
4007  	/*
4008  	 * If the copy is still within the allocated object, produce
4009  	 * a warning instead of rejecting the copy. This is intended
4010  	 * to be a temporary method to find any missing usercopy
4011  	 * whitelists.
4012  	 */
4013  	object_size = slab_ksize(s);
4014  	if (usercopy_fallback &&
4015  	    offset <= object_size && n <= object_size - offset) {
4016  		usercopy_warn("SLUB object", s->name, to_user, offset, n);
4017  		return;
4018  	}
4019  
4020  	usercopy_abort("SLUB object", s->name, to_user, offset, n);
4021  }
4022  #endif /* CONFIG_HARDENED_USERCOPY */
4023  
4024  size_t __ksize(const void *object)
4025  {
4026  	struct page *page;
4027  
4028  	if (unlikely(object == ZERO_SIZE_PTR))
4029  		return 0;
4030  
4031  	page = virt_to_head_page(object);
4032  
4033  	if (unlikely(!PageSlab(page))) {
4034  		WARN_ON(!PageCompound(page));
4035  		return page_size(page);
4036  	}
4037  
4038  	return slab_ksize(page->slab_cache);
4039  }
4040  EXPORT_SYMBOL(__ksize);
4041  
4042  void kfree(const void *x)
4043  {
4044  	struct page *page;
4045  	void *object = (void *)x;
4046  
4047  	trace_kfree(_RET_IP_, x);
4048  
4049  	if (unlikely(ZERO_OR_NULL_PTR(x)))
4050  		return;
4051  
4052  	page = virt_to_head_page(x);
4053  	if (unlikely(!PageSlab(page))) {
4054  		unsigned int order = compound_order(page);
4055  
4056  		BUG_ON(!PageCompound(page));
4057  		kfree_hook(object);
4058  		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
4059  				    -(1 << order));
4060  		__free_pages(page, order);
4061  		return;
4062  	}
4063  	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4064  }
4065  EXPORT_SYMBOL(kfree);
4066  
4067  #define SHRINK_PROMOTE_MAX 32
4068  
4069  /*
4070   * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4071   * up most to the head of the partial lists. New allocations will then
4072   * fill those up and thus they can be removed from the partial lists.
4073   *
4074   * The slabs with the least items are placed last. This results in them
4075   * being allocated from last increasing the chance that the last objects
4076   * are freed in them.
4077   */
4078  int __kmem_cache_shrink(struct kmem_cache *s)
4079  {
4080  	int node;
4081  	int i;
4082  	struct kmem_cache_node *n;
4083  	struct page *page;
4084  	struct page *t;
4085  	struct list_head discard;
4086  	struct list_head promote[SHRINK_PROMOTE_MAX];
4087  	unsigned long flags;
4088  	int ret = 0;
4089  
4090  	flush_all(s);
4091  	for_each_kmem_cache_node(s, node, n) {
4092  		INIT_LIST_HEAD(&discard);
4093  		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4094  			INIT_LIST_HEAD(promote + i);
4095  
4096  		spin_lock_irqsave(&n->list_lock, flags);
4097  
4098  		/*
4099  		 * Build lists of slabs to discard or promote.
4100  		 *
4101  		 * Note that concurrent frees may occur while we hold the
4102  		 * list_lock. page->inuse here is the upper limit.
4103  		 */
4104  		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4105  			int free = page->objects - page->inuse;
4106  
4107  			/* Do not reread page->inuse */
4108  			barrier();
4109  
4110  			/* We do not keep full slabs on the list */
4111  			BUG_ON(free <= 0);
4112  
4113  			if (free == page->objects) {
4114  				list_move(&page->slab_list, &discard);
4115  				n->nr_partial--;
4116  			} else if (free <= SHRINK_PROMOTE_MAX)
4117  				list_move(&page->slab_list, promote + free - 1);
4118  		}
4119  
4120  		/*
4121  		 * Promote the slabs filled up most to the head of the
4122  		 * partial list.
4123  		 */
4124  		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4125  			list_splice(promote + i, &n->partial);
4126  
4127  		spin_unlock_irqrestore(&n->list_lock, flags);
4128  
4129  		/* Release empty slabs */
4130  		list_for_each_entry_safe(page, t, &discard, slab_list)
4131  			discard_slab(s, page);
4132  
4133  		if (slabs_node(s, node))
4134  			ret = 1;
4135  	}
4136  
4137  	return ret;
4138  }
4139  
4140  #ifdef CONFIG_MEMCG
4141  void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
4142  {
4143  	/*
4144  	 * Called with all the locks held after a sched RCU grace period.
4145  	 * Even if @s becomes empty after shrinking, we can't know that @s
4146  	 * doesn't have allocations already in-flight and thus can't
4147  	 * destroy @s until the associated memcg is released.
4148  	 *
4149  	 * However, let's remove the sysfs files for empty caches here.
4150  	 * Each cache has a lot of interface files which aren't
4151  	 * particularly useful for empty draining caches; otherwise, we can
4152  	 * easily end up with millions of unnecessary sysfs files on
4153  	 * systems which have a lot of memory and transient cgroups.
4154  	 */
4155  	if (!__kmem_cache_shrink(s))
4156  		sysfs_slab_remove(s);
4157  }
4158  
4159  void __kmemcg_cache_deactivate(struct kmem_cache *s)
4160  {
4161  	/*
4162  	 * Disable empty slabs caching. Used to avoid pinning offline
4163  	 * memory cgroups by kmem pages that can be freed.
4164  	 */
4165  	slub_set_cpu_partial(s, 0);
4166  	s->min_partial = 0;
4167  }
4168  #endif	/* CONFIG_MEMCG */
4169  
4170  static int slab_mem_going_offline_callback(void *arg)
4171  {
4172  	struct kmem_cache *s;
4173  
4174  	mutex_lock(&slab_mutex);
4175  	list_for_each_entry(s, &slab_caches, list)
4176  		__kmem_cache_shrink(s);
4177  	mutex_unlock(&slab_mutex);
4178  
4179  	return 0;
4180  }
4181  
4182  static void slab_mem_offline_callback(void *arg)
4183  {
4184  	struct kmem_cache_node *n;
4185  	struct kmem_cache *s;
4186  	struct memory_notify *marg = arg;
4187  	int offline_node;
4188  
4189  	offline_node = marg->status_change_nid_normal;
4190  
4191  	/*
4192  	 * If the node still has available memory. we need kmem_cache_node
4193  	 * for it yet.
4194  	 */
4195  	if (offline_node < 0)
4196  		return;
4197  
4198  	mutex_lock(&slab_mutex);
4199  	list_for_each_entry(s, &slab_caches, list) {
4200  		n = get_node(s, offline_node);
4201  		if (n) {
4202  			/*
4203  			 * if n->nr_slabs > 0, slabs still exist on the node
4204  			 * that is going down. We were unable to free them,
4205  			 * and offline_pages() function shouldn't call this
4206  			 * callback. So, we must fail.
4207  			 */
4208  			BUG_ON(slabs_node(s, offline_node));
4209  
4210  			s->node[offline_node] = NULL;
4211  			kmem_cache_free(kmem_cache_node, n);
4212  		}
4213  	}
4214  	mutex_unlock(&slab_mutex);
4215  }
4216  
4217  static int slab_mem_going_online_callback(void *arg)
4218  {
4219  	struct kmem_cache_node *n;
4220  	struct kmem_cache *s;
4221  	struct memory_notify *marg = arg;
4222  	int nid = marg->status_change_nid_normal;
4223  	int ret = 0;
4224  
4225  	/*
4226  	 * If the node's memory is already available, then kmem_cache_node is
4227  	 * already created. Nothing to do.
4228  	 */
4229  	if (nid < 0)
4230  		return 0;
4231  
4232  	/*
4233  	 * We are bringing a node online. No memory is available yet. We must
4234  	 * allocate a kmem_cache_node structure in order to bring the node
4235  	 * online.
4236  	 */
4237  	mutex_lock(&slab_mutex);
4238  	list_for_each_entry(s, &slab_caches, list) {
4239  		/*
4240  		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4241  		 *      since memory is not yet available from the node that
4242  		 *      is brought up.
4243  		 */
4244  		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4245  		if (!n) {
4246  			ret = -ENOMEM;
4247  			goto out;
4248  		}
4249  		init_kmem_cache_node(n);
4250  		s->node[nid] = n;
4251  	}
4252  out:
4253  	mutex_unlock(&slab_mutex);
4254  	return ret;
4255  }
4256  
4257  static int slab_memory_callback(struct notifier_block *self,
4258  				unsigned long action, void *arg)
4259  {
4260  	int ret = 0;
4261  
4262  	switch (action) {
4263  	case MEM_GOING_ONLINE:
4264  		ret = slab_mem_going_online_callback(arg);
4265  		break;
4266  	case MEM_GOING_OFFLINE:
4267  		ret = slab_mem_going_offline_callback(arg);
4268  		break;
4269  	case MEM_OFFLINE:
4270  	case MEM_CANCEL_ONLINE:
4271  		slab_mem_offline_callback(arg);
4272  		break;
4273  	case MEM_ONLINE:
4274  	case MEM_CANCEL_OFFLINE:
4275  		break;
4276  	}
4277  	if (ret)
4278  		ret = notifier_from_errno(ret);
4279  	else
4280  		ret = NOTIFY_OK;
4281  	return ret;
4282  }
4283  
4284  static struct notifier_block slab_memory_callback_nb = {
4285  	.notifier_call = slab_memory_callback,
4286  	.priority = SLAB_CALLBACK_PRI,
4287  };
4288  
4289  /********************************************************************
4290   *			Basic setup of slabs
4291   *******************************************************************/
4292  
4293  /*
4294   * Used for early kmem_cache structures that were allocated using
4295   * the page allocator. Allocate them properly then fix up the pointers
4296   * that may be pointing to the wrong kmem_cache structure.
4297   */
4298  
4299  static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4300  {
4301  	int node;
4302  	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4303  	struct kmem_cache_node *n;
4304  
4305  	memcpy(s, static_cache, kmem_cache->object_size);
4306  
4307  	/*
4308  	 * This runs very early, and only the boot processor is supposed to be
4309  	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4310  	 * IPIs around.
4311  	 */
4312  	__flush_cpu_slab(s, smp_processor_id());
4313  	for_each_kmem_cache_node(s, node, n) {
4314  		struct page *p;
4315  
4316  		list_for_each_entry(p, &n->partial, slab_list)
4317  			p->slab_cache = s;
4318  
4319  #ifdef CONFIG_SLUB_DEBUG
4320  		list_for_each_entry(p, &n->full, slab_list)
4321  			p->slab_cache = s;
4322  #endif
4323  	}
4324  	slab_init_memcg_params(s);
4325  	list_add(&s->list, &slab_caches);
4326  	memcg_link_cache(s, NULL);
4327  	return s;
4328  }
4329  
4330  void __init kmem_cache_init(void)
4331  {
4332  	static __initdata struct kmem_cache boot_kmem_cache,
4333  		boot_kmem_cache_node;
4334  
4335  	if (debug_guardpage_minorder())
4336  		slub_max_order = 0;
4337  
4338  	kmem_cache_node = &boot_kmem_cache_node;
4339  	kmem_cache = &boot_kmem_cache;
4340  
4341  	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4342  		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4343  
4344  	register_hotmemory_notifier(&slab_memory_callback_nb);
4345  
4346  	/* Able to allocate the per node structures */
4347  	slab_state = PARTIAL;
4348  
4349  	create_boot_cache(kmem_cache, "kmem_cache",
4350  			offsetof(struct kmem_cache, node) +
4351  				nr_node_ids * sizeof(struct kmem_cache_node *),
4352  		       SLAB_HWCACHE_ALIGN, 0, 0);
4353  
4354  	kmem_cache = bootstrap(&boot_kmem_cache);
4355  	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4356  
4357  	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4358  	setup_kmalloc_cache_index_table();
4359  	create_kmalloc_caches(0);
4360  
4361  	/* Setup random freelists for each cache */
4362  	init_freelist_randomization();
4363  
4364  	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4365  				  slub_cpu_dead);
4366  
4367  	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4368  		cache_line_size(),
4369  		slub_min_order, slub_max_order, slub_min_objects,
4370  		nr_cpu_ids, nr_node_ids);
4371  }
4372  
4373  void __init kmem_cache_init_late(void)
4374  {
4375  }
4376  
4377  struct kmem_cache *
4378  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4379  		   slab_flags_t flags, void (*ctor)(void *))
4380  {
4381  	struct kmem_cache *s, *c;
4382  
4383  	s = find_mergeable(size, align, flags, name, ctor);
4384  	if (s) {
4385  		s->refcount++;
4386  
4387  		/*
4388  		 * Adjust the object sizes so that we clear
4389  		 * the complete object on kzalloc.
4390  		 */
4391  		s->object_size = max(s->object_size, size);
4392  		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4393  
4394  		for_each_memcg_cache(c, s) {
4395  			c->object_size = s->object_size;
4396  			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4397  		}
4398  
4399  		if (sysfs_slab_alias(s, name)) {
4400  			s->refcount--;
4401  			s = NULL;
4402  		}
4403  	}
4404  
4405  	return s;
4406  }
4407  
4408  int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4409  {
4410  	int err;
4411  
4412  	err = kmem_cache_open(s, flags);
4413  	if (err)
4414  		return err;
4415  
4416  	/* Mutex is not taken during early boot */
4417  	if (slab_state <= UP)
4418  		return 0;
4419  
4420  	memcg_propagate_slab_attrs(s);
4421  	err = sysfs_slab_add(s);
4422  	if (err)
4423  		__kmem_cache_release(s);
4424  
4425  	return err;
4426  }
4427  
4428  void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4429  {
4430  	struct kmem_cache *s;
4431  	void *ret;
4432  
4433  	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4434  		return kmalloc_large(size, gfpflags);
4435  
4436  	s = kmalloc_slab(size, gfpflags);
4437  
4438  	if (unlikely(ZERO_OR_NULL_PTR(s)))
4439  		return s;
4440  
4441  	ret = slab_alloc(s, gfpflags, caller);
4442  
4443  	/* Honor the call site pointer we received. */
4444  	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4445  
4446  	return ret;
4447  }
4448  EXPORT_SYMBOL(__kmalloc_track_caller);
4449  
4450  #ifdef CONFIG_NUMA
4451  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4452  					int node, unsigned long caller)
4453  {
4454  	struct kmem_cache *s;
4455  	void *ret;
4456  
4457  	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4458  		ret = kmalloc_large_node(size, gfpflags, node);
4459  
4460  		trace_kmalloc_node(caller, ret,
4461  				   size, PAGE_SIZE << get_order(size),
4462  				   gfpflags, node);
4463  
4464  		return ret;
4465  	}
4466  
4467  	s = kmalloc_slab(size, gfpflags);
4468  
4469  	if (unlikely(ZERO_OR_NULL_PTR(s)))
4470  		return s;
4471  
4472  	ret = slab_alloc_node(s, gfpflags, node, caller);
4473  
4474  	/* Honor the call site pointer we received. */
4475  	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4476  
4477  	return ret;
4478  }
4479  EXPORT_SYMBOL(__kmalloc_node_track_caller);
4480  #endif
4481  
4482  #ifdef CONFIG_SYSFS
4483  static int count_inuse(struct page *page)
4484  {
4485  	return page->inuse;
4486  }
4487  
4488  static int count_total(struct page *page)
4489  {
4490  	return page->objects;
4491  }
4492  #endif
4493  
4494  #ifdef CONFIG_SLUB_DEBUG
4495  static void validate_slab(struct kmem_cache *s, struct page *page)
4496  {
4497  	void *p;
4498  	void *addr = page_address(page);
4499  	unsigned long *map;
4500  
4501  	slab_lock(page);
4502  
4503  	if (!check_slab(s, page) || !on_freelist(s, page, NULL))
4504  		goto unlock;
4505  
4506  	/* Now we know that a valid freelist exists */
4507  	map = get_map(s, page);
4508  	for_each_object(p, s, addr, page->objects) {
4509  		u8 val = test_bit(slab_index(p, s, addr), map) ?
4510  			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
4511  
4512  		if (!check_object(s, page, p, val))
4513  			break;
4514  	}
4515  	put_map(map);
4516  unlock:
4517  	slab_unlock(page);
4518  }
4519  
4520  static int validate_slab_node(struct kmem_cache *s,
4521  		struct kmem_cache_node *n)
4522  {
4523  	unsigned long count = 0;
4524  	struct page *page;
4525  	unsigned long flags;
4526  
4527  	spin_lock_irqsave(&n->list_lock, flags);
4528  
4529  	list_for_each_entry(page, &n->partial, slab_list) {
4530  		validate_slab(s, page);
4531  		count++;
4532  	}
4533  	if (count != n->nr_partial)
4534  		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4535  		       s->name, count, n->nr_partial);
4536  
4537  	if (!(s->flags & SLAB_STORE_USER))
4538  		goto out;
4539  
4540  	list_for_each_entry(page, &n->full, slab_list) {
4541  		validate_slab(s, page);
4542  		count++;
4543  	}
4544  	if (count != atomic_long_read(&n->nr_slabs))
4545  		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4546  		       s->name, count, atomic_long_read(&n->nr_slabs));
4547  
4548  out:
4549  	spin_unlock_irqrestore(&n->list_lock, flags);
4550  	return count;
4551  }
4552  
4553  static long validate_slab_cache(struct kmem_cache *s)
4554  {
4555  	int node;
4556  	unsigned long count = 0;
4557  	struct kmem_cache_node *n;
4558  
4559  	flush_all(s);
4560  	for_each_kmem_cache_node(s, node, n)
4561  		count += validate_slab_node(s, n);
4562  
4563  	return count;
4564  }
4565  /*
4566   * Generate lists of code addresses where slabcache objects are allocated
4567   * and freed.
4568   */
4569  
4570  struct location {
4571  	unsigned long count;
4572  	unsigned long addr;
4573  	long long sum_time;
4574  	long min_time;
4575  	long max_time;
4576  	long min_pid;
4577  	long max_pid;
4578  	DECLARE_BITMAP(cpus, NR_CPUS);
4579  	nodemask_t nodes;
4580  };
4581  
4582  struct loc_track {
4583  	unsigned long max;
4584  	unsigned long count;
4585  	struct location *loc;
4586  };
4587  
4588  static void free_loc_track(struct loc_track *t)
4589  {
4590  	if (t->max)
4591  		free_pages((unsigned long)t->loc,
4592  			get_order(sizeof(struct location) * t->max));
4593  }
4594  
4595  static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4596  {
4597  	struct location *l;
4598  	int order;
4599  
4600  	order = get_order(sizeof(struct location) * max);
4601  
4602  	l = (void *)__get_free_pages(flags, order);
4603  	if (!l)
4604  		return 0;
4605  
4606  	if (t->count) {
4607  		memcpy(l, t->loc, sizeof(struct location) * t->count);
4608  		free_loc_track(t);
4609  	}
4610  	t->max = max;
4611  	t->loc = l;
4612  	return 1;
4613  }
4614  
4615  static int add_location(struct loc_track *t, struct kmem_cache *s,
4616  				const struct track *track)
4617  {
4618  	long start, end, pos;
4619  	struct location *l;
4620  	unsigned long caddr;
4621  	unsigned long age = jiffies - track->when;
4622  
4623  	start = -1;
4624  	end = t->count;
4625  
4626  	for ( ; ; ) {
4627  		pos = start + (end - start + 1) / 2;
4628  
4629  		/*
4630  		 * There is nothing at "end". If we end up there
4631  		 * we need to add something to before end.
4632  		 */
4633  		if (pos == end)
4634  			break;
4635  
4636  		caddr = t->loc[pos].addr;
4637  		if (track->addr == caddr) {
4638  
4639  			l = &t->loc[pos];
4640  			l->count++;
4641  			if (track->when) {
4642  				l->sum_time += age;
4643  				if (age < l->min_time)
4644  					l->min_time = age;
4645  				if (age > l->max_time)
4646  					l->max_time = age;
4647  
4648  				if (track->pid < l->min_pid)
4649  					l->min_pid = track->pid;
4650  				if (track->pid > l->max_pid)
4651  					l->max_pid = track->pid;
4652  
4653  				cpumask_set_cpu(track->cpu,
4654  						to_cpumask(l->cpus));
4655  			}
4656  			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4657  			return 1;
4658  		}
4659  
4660  		if (track->addr < caddr)
4661  			end = pos;
4662  		else
4663  			start = pos;
4664  	}
4665  
4666  	/*
4667  	 * Not found. Insert new tracking element.
4668  	 */
4669  	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4670  		return 0;
4671  
4672  	l = t->loc + pos;
4673  	if (pos < t->count)
4674  		memmove(l + 1, l,
4675  			(t->count - pos) * sizeof(struct location));
4676  	t->count++;
4677  	l->count = 1;
4678  	l->addr = track->addr;
4679  	l->sum_time = age;
4680  	l->min_time = age;
4681  	l->max_time = age;
4682  	l->min_pid = track->pid;
4683  	l->max_pid = track->pid;
4684  	cpumask_clear(to_cpumask(l->cpus));
4685  	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4686  	nodes_clear(l->nodes);
4687  	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4688  	return 1;
4689  }
4690  
4691  static void process_slab(struct loc_track *t, struct kmem_cache *s,
4692  		struct page *page, enum track_item alloc)
4693  {
4694  	void *addr = page_address(page);
4695  	void *p;
4696  	unsigned long *map;
4697  
4698  	map = get_map(s, page);
4699  	for_each_object(p, s, addr, page->objects)
4700  		if (!test_bit(slab_index(p, s, addr), map))
4701  			add_location(t, s, get_track(s, p, alloc));
4702  	put_map(map);
4703  }
4704  
4705  static int list_locations(struct kmem_cache *s, char *buf,
4706  					enum track_item alloc)
4707  {
4708  	int len = 0;
4709  	unsigned long i;
4710  	struct loc_track t = { 0, 0, NULL };
4711  	int node;
4712  	struct kmem_cache_node *n;
4713  
4714  	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4715  			     GFP_KERNEL)) {
4716  		return sprintf(buf, "Out of memory\n");
4717  	}
4718  	/* Push back cpu slabs */
4719  	flush_all(s);
4720  
4721  	for_each_kmem_cache_node(s, node, n) {
4722  		unsigned long flags;
4723  		struct page *page;
4724  
4725  		if (!atomic_long_read(&n->nr_slabs))
4726  			continue;
4727  
4728  		spin_lock_irqsave(&n->list_lock, flags);
4729  		list_for_each_entry(page, &n->partial, slab_list)
4730  			process_slab(&t, s, page, alloc);
4731  		list_for_each_entry(page, &n->full, slab_list)
4732  			process_slab(&t, s, page, alloc);
4733  		spin_unlock_irqrestore(&n->list_lock, flags);
4734  	}
4735  
4736  	for (i = 0; i < t.count; i++) {
4737  		struct location *l = &t.loc[i];
4738  
4739  		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4740  			break;
4741  		len += sprintf(buf + len, "%7ld ", l->count);
4742  
4743  		if (l->addr)
4744  			len += sprintf(buf + len, "%pS", (void *)l->addr);
4745  		else
4746  			len += sprintf(buf + len, "<not-available>");
4747  
4748  		if (l->sum_time != l->min_time) {
4749  			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4750  				l->min_time,
4751  				(long)div_u64(l->sum_time, l->count),
4752  				l->max_time);
4753  		} else
4754  			len += sprintf(buf + len, " age=%ld",
4755  				l->min_time);
4756  
4757  		if (l->min_pid != l->max_pid)
4758  			len += sprintf(buf + len, " pid=%ld-%ld",
4759  				l->min_pid, l->max_pid);
4760  		else
4761  			len += sprintf(buf + len, " pid=%ld",
4762  				l->min_pid);
4763  
4764  		if (num_online_cpus() > 1 &&
4765  				!cpumask_empty(to_cpumask(l->cpus)) &&
4766  				len < PAGE_SIZE - 60)
4767  			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4768  					 " cpus=%*pbl",
4769  					 cpumask_pr_args(to_cpumask(l->cpus)));
4770  
4771  		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4772  				len < PAGE_SIZE - 60)
4773  			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4774  					 " nodes=%*pbl",
4775  					 nodemask_pr_args(&l->nodes));
4776  
4777  		len += sprintf(buf + len, "\n");
4778  	}
4779  
4780  	free_loc_track(&t);
4781  	if (!t.count)
4782  		len += sprintf(buf, "No data\n");
4783  	return len;
4784  }
4785  #endif	/* CONFIG_SLUB_DEBUG */
4786  
4787  #ifdef SLUB_RESILIENCY_TEST
4788  static void __init resiliency_test(void)
4789  {
4790  	u8 *p;
4791  	int type = KMALLOC_NORMAL;
4792  
4793  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4794  
4795  	pr_err("SLUB resiliency testing\n");
4796  	pr_err("-----------------------\n");
4797  	pr_err("A. Corruption after allocation\n");
4798  
4799  	p = kzalloc(16, GFP_KERNEL);
4800  	p[16] = 0x12;
4801  	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4802  	       p + 16);
4803  
4804  	validate_slab_cache(kmalloc_caches[type][4]);
4805  
4806  	/* Hmmm... The next two are dangerous */
4807  	p = kzalloc(32, GFP_KERNEL);
4808  	p[32 + sizeof(void *)] = 0x34;
4809  	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4810  	       p);
4811  	pr_err("If allocated object is overwritten then not detectable\n\n");
4812  
4813  	validate_slab_cache(kmalloc_caches[type][5]);
4814  	p = kzalloc(64, GFP_KERNEL);
4815  	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4816  	*p = 0x56;
4817  	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4818  	       p);
4819  	pr_err("If allocated object is overwritten then not detectable\n\n");
4820  	validate_slab_cache(kmalloc_caches[type][6]);
4821  
4822  	pr_err("\nB. Corruption after free\n");
4823  	p = kzalloc(128, GFP_KERNEL);
4824  	kfree(p);
4825  	*p = 0x78;
4826  	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4827  	validate_slab_cache(kmalloc_caches[type][7]);
4828  
4829  	p = kzalloc(256, GFP_KERNEL);
4830  	kfree(p);
4831  	p[50] = 0x9a;
4832  	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4833  	validate_slab_cache(kmalloc_caches[type][8]);
4834  
4835  	p = kzalloc(512, GFP_KERNEL);
4836  	kfree(p);
4837  	p[512] = 0xab;
4838  	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4839  	validate_slab_cache(kmalloc_caches[type][9]);
4840  }
4841  #else
4842  #ifdef CONFIG_SYSFS
4843  static void resiliency_test(void) {};
4844  #endif
4845  #endif	/* SLUB_RESILIENCY_TEST */
4846  
4847  #ifdef CONFIG_SYSFS
4848  enum slab_stat_type {
4849  	SL_ALL,			/* All slabs */
4850  	SL_PARTIAL,		/* Only partially allocated slabs */
4851  	SL_CPU,			/* Only slabs used for cpu caches */
4852  	SL_OBJECTS,		/* Determine allocated objects not slabs */
4853  	SL_TOTAL		/* Determine object capacity not slabs */
4854  };
4855  
4856  #define SO_ALL		(1 << SL_ALL)
4857  #define SO_PARTIAL	(1 << SL_PARTIAL)
4858  #define SO_CPU		(1 << SL_CPU)
4859  #define SO_OBJECTS	(1 << SL_OBJECTS)
4860  #define SO_TOTAL	(1 << SL_TOTAL)
4861  
4862  #ifdef CONFIG_MEMCG
4863  static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4864  
4865  static int __init setup_slub_memcg_sysfs(char *str)
4866  {
4867  	int v;
4868  
4869  	if (get_option(&str, &v) > 0)
4870  		memcg_sysfs_enabled = v;
4871  
4872  	return 1;
4873  }
4874  
4875  __setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4876  #endif
4877  
4878  static ssize_t show_slab_objects(struct kmem_cache *s,
4879  			    char *buf, unsigned long flags)
4880  {
4881  	unsigned long total = 0;
4882  	int node;
4883  	int x;
4884  	unsigned long *nodes;
4885  
4886  	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
4887  	if (!nodes)
4888  		return -ENOMEM;
4889  
4890  	if (flags & SO_CPU) {
4891  		int cpu;
4892  
4893  		for_each_possible_cpu(cpu) {
4894  			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4895  							       cpu);
4896  			int node;
4897  			struct page *page;
4898  
4899  			page = READ_ONCE(c->page);
4900  			if (!page)
4901  				continue;
4902  
4903  			node = page_to_nid(page);
4904  			if (flags & SO_TOTAL)
4905  				x = page->objects;
4906  			else if (flags & SO_OBJECTS)
4907  				x = page->inuse;
4908  			else
4909  				x = 1;
4910  
4911  			total += x;
4912  			nodes[node] += x;
4913  
4914  			page = slub_percpu_partial_read_once(c);
4915  			if (page) {
4916  				node = page_to_nid(page);
4917  				if (flags & SO_TOTAL)
4918  					WARN_ON_ONCE(1);
4919  				else if (flags & SO_OBJECTS)
4920  					WARN_ON_ONCE(1);
4921  				else
4922  					x = page->pages;
4923  				total += x;
4924  				nodes[node] += x;
4925  			}
4926  		}
4927  	}
4928  
4929  	/*
4930  	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4931  	 * already held which will conflict with an existing lock order:
4932  	 *
4933  	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4934  	 *
4935  	 * We don't really need mem_hotplug_lock (to hold off
4936  	 * slab_mem_going_offline_callback) here because slab's memory hot
4937  	 * unplug code doesn't destroy the kmem_cache->node[] data.
4938  	 */
4939  
4940  #ifdef CONFIG_SLUB_DEBUG
4941  	if (flags & SO_ALL) {
4942  		struct kmem_cache_node *n;
4943  
4944  		for_each_kmem_cache_node(s, node, n) {
4945  
4946  			if (flags & SO_TOTAL)
4947  				x = atomic_long_read(&n->total_objects);
4948  			else if (flags & SO_OBJECTS)
4949  				x = atomic_long_read(&n->total_objects) -
4950  					count_partial(n, count_free);
4951  			else
4952  				x = atomic_long_read(&n->nr_slabs);
4953  			total += x;
4954  			nodes[node] += x;
4955  		}
4956  
4957  	} else
4958  #endif
4959  	if (flags & SO_PARTIAL) {
4960  		struct kmem_cache_node *n;
4961  
4962  		for_each_kmem_cache_node(s, node, n) {
4963  			if (flags & SO_TOTAL)
4964  				x = count_partial(n, count_total);
4965  			else if (flags & SO_OBJECTS)
4966  				x = count_partial(n, count_inuse);
4967  			else
4968  				x = n->nr_partial;
4969  			total += x;
4970  			nodes[node] += x;
4971  		}
4972  	}
4973  	x = sprintf(buf, "%lu", total);
4974  #ifdef CONFIG_NUMA
4975  	for (node = 0; node < nr_node_ids; node++)
4976  		if (nodes[node])
4977  			x += sprintf(buf + x, " N%d=%lu",
4978  					node, nodes[node]);
4979  #endif
4980  	kfree(nodes);
4981  	return x + sprintf(buf + x, "\n");
4982  }
4983  
4984  #ifdef CONFIG_SLUB_DEBUG
4985  static int any_slab_objects(struct kmem_cache *s)
4986  {
4987  	int node;
4988  	struct kmem_cache_node *n;
4989  
4990  	for_each_kmem_cache_node(s, node, n)
4991  		if (atomic_long_read(&n->total_objects))
4992  			return 1;
4993  
4994  	return 0;
4995  }
4996  #endif
4997  
4998  #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4999  #define to_slab(n) container_of(n, struct kmem_cache, kobj)
5000  
5001  struct slab_attribute {
5002  	struct attribute attr;
5003  	ssize_t (*show)(struct kmem_cache *s, char *buf);
5004  	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5005  };
5006  
5007  #define SLAB_ATTR_RO(_name) \
5008  	static struct slab_attribute _name##_attr = \
5009  	__ATTR(_name, 0400, _name##_show, NULL)
5010  
5011  #define SLAB_ATTR(_name) \
5012  	static struct slab_attribute _name##_attr =  \
5013  	__ATTR(_name, 0600, _name##_show, _name##_store)
5014  
5015  static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
5016  {
5017  	return sprintf(buf, "%u\n", s->size);
5018  }
5019  SLAB_ATTR_RO(slab_size);
5020  
5021  static ssize_t align_show(struct kmem_cache *s, char *buf)
5022  {
5023  	return sprintf(buf, "%u\n", s->align);
5024  }
5025  SLAB_ATTR_RO(align);
5026  
5027  static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5028  {
5029  	return sprintf(buf, "%u\n", s->object_size);
5030  }
5031  SLAB_ATTR_RO(object_size);
5032  
5033  static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5034  {
5035  	return sprintf(buf, "%u\n", oo_objects(s->oo));
5036  }
5037  SLAB_ATTR_RO(objs_per_slab);
5038  
5039  static ssize_t order_store(struct kmem_cache *s,
5040  				const char *buf, size_t length)
5041  {
5042  	unsigned int order;
5043  	int err;
5044  
5045  	err = kstrtouint(buf, 10, &order);
5046  	if (err)
5047  		return err;
5048  
5049  	if (order > slub_max_order || order < slub_min_order)
5050  		return -EINVAL;
5051  
5052  	calculate_sizes(s, order);
5053  	return length;
5054  }
5055  
5056  static ssize_t order_show(struct kmem_cache *s, char *buf)
5057  {
5058  	return sprintf(buf, "%u\n", oo_order(s->oo));
5059  }
5060  SLAB_ATTR(order);
5061  
5062  static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5063  {
5064  	return sprintf(buf, "%lu\n", s->min_partial);
5065  }
5066  
5067  static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5068  				 size_t length)
5069  {
5070  	unsigned long min;
5071  	int err;
5072  
5073  	err = kstrtoul(buf, 10, &min);
5074  	if (err)
5075  		return err;
5076  
5077  	set_min_partial(s, min);
5078  	return length;
5079  }
5080  SLAB_ATTR(min_partial);
5081  
5082  static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5083  {
5084  	return sprintf(buf, "%u\n", slub_cpu_partial(s));
5085  }
5086  
5087  static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5088  				 size_t length)
5089  {
5090  	unsigned int objects;
5091  	int err;
5092  
5093  	err = kstrtouint(buf, 10, &objects);
5094  	if (err)
5095  		return err;
5096  	if (objects && !kmem_cache_has_cpu_partial(s))
5097  		return -EINVAL;
5098  
5099  	slub_set_cpu_partial(s, objects);
5100  	flush_all(s);
5101  	return length;
5102  }
5103  SLAB_ATTR(cpu_partial);
5104  
5105  static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5106  {
5107  	if (!s->ctor)
5108  		return 0;
5109  	return sprintf(buf, "%pS\n", s->ctor);
5110  }
5111  SLAB_ATTR_RO(ctor);
5112  
5113  static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5114  {
5115  	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5116  }
5117  SLAB_ATTR_RO(aliases);
5118  
5119  static ssize_t partial_show(struct kmem_cache *s, char *buf)
5120  {
5121  	return show_slab_objects(s, buf, SO_PARTIAL);
5122  }
5123  SLAB_ATTR_RO(partial);
5124  
5125  static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5126  {
5127  	return show_slab_objects(s, buf, SO_CPU);
5128  }
5129  SLAB_ATTR_RO(cpu_slabs);
5130  
5131  static ssize_t objects_show(struct kmem_cache *s, char *buf)
5132  {
5133  	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5134  }
5135  SLAB_ATTR_RO(objects);
5136  
5137  static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5138  {
5139  	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5140  }
5141  SLAB_ATTR_RO(objects_partial);
5142  
5143  static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5144  {
5145  	int objects = 0;
5146  	int pages = 0;
5147  	int cpu;
5148  	int len;
5149  
5150  	for_each_online_cpu(cpu) {
5151  		struct page *page;
5152  
5153  		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5154  
5155  		if (page) {
5156  			pages += page->pages;
5157  			objects += page->pobjects;
5158  		}
5159  	}
5160  
5161  	len = sprintf(buf, "%d(%d)", objects, pages);
5162  
5163  #ifdef CONFIG_SMP
5164  	for_each_online_cpu(cpu) {
5165  		struct page *page;
5166  
5167  		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5168  
5169  		if (page && len < PAGE_SIZE - 20)
5170  			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5171  				page->pobjects, page->pages);
5172  	}
5173  #endif
5174  	return len + sprintf(buf + len, "\n");
5175  }
5176  SLAB_ATTR_RO(slabs_cpu_partial);
5177  
5178  static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5179  {
5180  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5181  }
5182  
5183  static ssize_t reclaim_account_store(struct kmem_cache *s,
5184  				const char *buf, size_t length)
5185  {
5186  	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5187  	if (buf[0] == '1')
5188  		s->flags |= SLAB_RECLAIM_ACCOUNT;
5189  	return length;
5190  }
5191  SLAB_ATTR(reclaim_account);
5192  
5193  static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5194  {
5195  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5196  }
5197  SLAB_ATTR_RO(hwcache_align);
5198  
5199  #ifdef CONFIG_ZONE_DMA
5200  static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5201  {
5202  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5203  }
5204  SLAB_ATTR_RO(cache_dma);
5205  #endif
5206  
5207  static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5208  {
5209  	return sprintf(buf, "%u\n", s->usersize);
5210  }
5211  SLAB_ATTR_RO(usersize);
5212  
5213  static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5214  {
5215  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5216  }
5217  SLAB_ATTR_RO(destroy_by_rcu);
5218  
5219  #ifdef CONFIG_SLUB_DEBUG
5220  static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5221  {
5222  	return show_slab_objects(s, buf, SO_ALL);
5223  }
5224  SLAB_ATTR_RO(slabs);
5225  
5226  static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5227  {
5228  	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5229  }
5230  SLAB_ATTR_RO(total_objects);
5231  
5232  static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5233  {
5234  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5235  }
5236  
5237  static ssize_t sanity_checks_store(struct kmem_cache *s,
5238  				const char *buf, size_t length)
5239  {
5240  	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5241  	if (buf[0] == '1') {
5242  		s->flags &= ~__CMPXCHG_DOUBLE;
5243  		s->flags |= SLAB_CONSISTENCY_CHECKS;
5244  	}
5245  	return length;
5246  }
5247  SLAB_ATTR(sanity_checks);
5248  
5249  static ssize_t trace_show(struct kmem_cache *s, char *buf)
5250  {
5251  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5252  }
5253  
5254  static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5255  							size_t length)
5256  {
5257  	/*
5258  	 * Tracing a merged cache is going to give confusing results
5259  	 * as well as cause other issues like converting a mergeable
5260  	 * cache into an umergeable one.
5261  	 */
5262  	if (s->refcount > 1)
5263  		return -EINVAL;
5264  
5265  	s->flags &= ~SLAB_TRACE;
5266  	if (buf[0] == '1') {
5267  		s->flags &= ~__CMPXCHG_DOUBLE;
5268  		s->flags |= SLAB_TRACE;
5269  	}
5270  	return length;
5271  }
5272  SLAB_ATTR(trace);
5273  
5274  static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5275  {
5276  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5277  }
5278  
5279  static ssize_t red_zone_store(struct kmem_cache *s,
5280  				const char *buf, size_t length)
5281  {
5282  	if (any_slab_objects(s))
5283  		return -EBUSY;
5284  
5285  	s->flags &= ~SLAB_RED_ZONE;
5286  	if (buf[0] == '1') {
5287  		s->flags |= SLAB_RED_ZONE;
5288  	}
5289  	calculate_sizes(s, -1);
5290  	return length;
5291  }
5292  SLAB_ATTR(red_zone);
5293  
5294  static ssize_t poison_show(struct kmem_cache *s, char *buf)
5295  {
5296  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5297  }
5298  
5299  static ssize_t poison_store(struct kmem_cache *s,
5300  				const char *buf, size_t length)
5301  {
5302  	if (any_slab_objects(s))
5303  		return -EBUSY;
5304  
5305  	s->flags &= ~SLAB_POISON;
5306  	if (buf[0] == '1') {
5307  		s->flags |= SLAB_POISON;
5308  	}
5309  	calculate_sizes(s, -1);
5310  	return length;
5311  }
5312  SLAB_ATTR(poison);
5313  
5314  static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5315  {
5316  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5317  }
5318  
5319  static ssize_t store_user_store(struct kmem_cache *s,
5320  				const char *buf, size_t length)
5321  {
5322  	if (any_slab_objects(s))
5323  		return -EBUSY;
5324  
5325  	s->flags &= ~SLAB_STORE_USER;
5326  	if (buf[0] == '1') {
5327  		s->flags &= ~__CMPXCHG_DOUBLE;
5328  		s->flags |= SLAB_STORE_USER;
5329  	}
5330  	calculate_sizes(s, -1);
5331  	return length;
5332  }
5333  SLAB_ATTR(store_user);
5334  
5335  static ssize_t validate_show(struct kmem_cache *s, char *buf)
5336  {
5337  	return 0;
5338  }
5339  
5340  static ssize_t validate_store(struct kmem_cache *s,
5341  			const char *buf, size_t length)
5342  {
5343  	int ret = -EINVAL;
5344  
5345  	if (buf[0] == '1') {
5346  		ret = validate_slab_cache(s);
5347  		if (ret >= 0)
5348  			ret = length;
5349  	}
5350  	return ret;
5351  }
5352  SLAB_ATTR(validate);
5353  
5354  static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5355  {
5356  	if (!(s->flags & SLAB_STORE_USER))
5357  		return -ENOSYS;
5358  	return list_locations(s, buf, TRACK_ALLOC);
5359  }
5360  SLAB_ATTR_RO(alloc_calls);
5361  
5362  static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5363  {
5364  	if (!(s->flags & SLAB_STORE_USER))
5365  		return -ENOSYS;
5366  	return list_locations(s, buf, TRACK_FREE);
5367  }
5368  SLAB_ATTR_RO(free_calls);
5369  #endif /* CONFIG_SLUB_DEBUG */
5370  
5371  #ifdef CONFIG_FAILSLAB
5372  static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5373  {
5374  	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5375  }
5376  
5377  static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5378  							size_t length)
5379  {
5380  	if (s->refcount > 1)
5381  		return -EINVAL;
5382  
5383  	s->flags &= ~SLAB_FAILSLAB;
5384  	if (buf[0] == '1')
5385  		s->flags |= SLAB_FAILSLAB;
5386  	return length;
5387  }
5388  SLAB_ATTR(failslab);
5389  #endif
5390  
5391  static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5392  {
5393  	return 0;
5394  }
5395  
5396  static ssize_t shrink_store(struct kmem_cache *s,
5397  			const char *buf, size_t length)
5398  {
5399  	if (buf[0] == '1')
5400  		kmem_cache_shrink_all(s);
5401  	else
5402  		return -EINVAL;
5403  	return length;
5404  }
5405  SLAB_ATTR(shrink);
5406  
5407  #ifdef CONFIG_NUMA
5408  static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5409  {
5410  	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5411  }
5412  
5413  static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5414  				const char *buf, size_t length)
5415  {
5416  	unsigned int ratio;
5417  	int err;
5418  
5419  	err = kstrtouint(buf, 10, &ratio);
5420  	if (err)
5421  		return err;
5422  	if (ratio > 100)
5423  		return -ERANGE;
5424  
5425  	s->remote_node_defrag_ratio = ratio * 10;
5426  
5427  	return length;
5428  }
5429  SLAB_ATTR(remote_node_defrag_ratio);
5430  #endif
5431  
5432  #ifdef CONFIG_SLUB_STATS
5433  static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5434  {
5435  	unsigned long sum  = 0;
5436  	int cpu;
5437  	int len;
5438  	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5439  
5440  	if (!data)
5441  		return -ENOMEM;
5442  
5443  	for_each_online_cpu(cpu) {
5444  		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5445  
5446  		data[cpu] = x;
5447  		sum += x;
5448  	}
5449  
5450  	len = sprintf(buf, "%lu", sum);
5451  
5452  #ifdef CONFIG_SMP
5453  	for_each_online_cpu(cpu) {
5454  		if (data[cpu] && len < PAGE_SIZE - 20)
5455  			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5456  	}
5457  #endif
5458  	kfree(data);
5459  	return len + sprintf(buf + len, "\n");
5460  }
5461  
5462  static void clear_stat(struct kmem_cache *s, enum stat_item si)
5463  {
5464  	int cpu;
5465  
5466  	for_each_online_cpu(cpu)
5467  		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5468  }
5469  
5470  #define STAT_ATTR(si, text) 					\
5471  static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5472  {								\
5473  	return show_stat(s, buf, si);				\
5474  }								\
5475  static ssize_t text##_store(struct kmem_cache *s,		\
5476  				const char *buf, size_t length)	\
5477  {								\
5478  	if (buf[0] != '0')					\
5479  		return -EINVAL;					\
5480  	clear_stat(s, si);					\
5481  	return length;						\
5482  }								\
5483  SLAB_ATTR(text);						\
5484  
5485  STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5486  STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5487  STAT_ATTR(FREE_FASTPATH, free_fastpath);
5488  STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5489  STAT_ATTR(FREE_FROZEN, free_frozen);
5490  STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5491  STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5492  STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5493  STAT_ATTR(ALLOC_SLAB, alloc_slab);
5494  STAT_ATTR(ALLOC_REFILL, alloc_refill);
5495  STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5496  STAT_ATTR(FREE_SLAB, free_slab);
5497  STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5498  STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5499  STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5500  STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5501  STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5502  STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5503  STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5504  STAT_ATTR(ORDER_FALLBACK, order_fallback);
5505  STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5506  STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5507  STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5508  STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5509  STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5510  STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5511  #endif	/* CONFIG_SLUB_STATS */
5512  
5513  static struct attribute *slab_attrs[] = {
5514  	&slab_size_attr.attr,
5515  	&object_size_attr.attr,
5516  	&objs_per_slab_attr.attr,
5517  	&order_attr.attr,
5518  	&min_partial_attr.attr,
5519  	&cpu_partial_attr.attr,
5520  	&objects_attr.attr,
5521  	&objects_partial_attr.attr,
5522  	&partial_attr.attr,
5523  	&cpu_slabs_attr.attr,
5524  	&ctor_attr.attr,
5525  	&aliases_attr.attr,
5526  	&align_attr.attr,
5527  	&hwcache_align_attr.attr,
5528  	&reclaim_account_attr.attr,
5529  	&destroy_by_rcu_attr.attr,
5530  	&shrink_attr.attr,
5531  	&slabs_cpu_partial_attr.attr,
5532  #ifdef CONFIG_SLUB_DEBUG
5533  	&total_objects_attr.attr,
5534  	&slabs_attr.attr,
5535  	&sanity_checks_attr.attr,
5536  	&trace_attr.attr,
5537  	&red_zone_attr.attr,
5538  	&poison_attr.attr,
5539  	&store_user_attr.attr,
5540  	&validate_attr.attr,
5541  	&alloc_calls_attr.attr,
5542  	&free_calls_attr.attr,
5543  #endif
5544  #ifdef CONFIG_ZONE_DMA
5545  	&cache_dma_attr.attr,
5546  #endif
5547  #ifdef CONFIG_NUMA
5548  	&remote_node_defrag_ratio_attr.attr,
5549  #endif
5550  #ifdef CONFIG_SLUB_STATS
5551  	&alloc_fastpath_attr.attr,
5552  	&alloc_slowpath_attr.attr,
5553  	&free_fastpath_attr.attr,
5554  	&free_slowpath_attr.attr,
5555  	&free_frozen_attr.attr,
5556  	&free_add_partial_attr.attr,
5557  	&free_remove_partial_attr.attr,
5558  	&alloc_from_partial_attr.attr,
5559  	&alloc_slab_attr.attr,
5560  	&alloc_refill_attr.attr,
5561  	&alloc_node_mismatch_attr.attr,
5562  	&free_slab_attr.attr,
5563  	&cpuslab_flush_attr.attr,
5564  	&deactivate_full_attr.attr,
5565  	&deactivate_empty_attr.attr,
5566  	&deactivate_to_head_attr.attr,
5567  	&deactivate_to_tail_attr.attr,
5568  	&deactivate_remote_frees_attr.attr,
5569  	&deactivate_bypass_attr.attr,
5570  	&order_fallback_attr.attr,
5571  	&cmpxchg_double_fail_attr.attr,
5572  	&cmpxchg_double_cpu_fail_attr.attr,
5573  	&cpu_partial_alloc_attr.attr,
5574  	&cpu_partial_free_attr.attr,
5575  	&cpu_partial_node_attr.attr,
5576  	&cpu_partial_drain_attr.attr,
5577  #endif
5578  #ifdef CONFIG_FAILSLAB
5579  	&failslab_attr.attr,
5580  #endif
5581  	&usersize_attr.attr,
5582  
5583  	NULL
5584  };
5585  
5586  static const struct attribute_group slab_attr_group = {
5587  	.attrs = slab_attrs,
5588  };
5589  
5590  static ssize_t slab_attr_show(struct kobject *kobj,
5591  				struct attribute *attr,
5592  				char *buf)
5593  {
5594  	struct slab_attribute *attribute;
5595  	struct kmem_cache *s;
5596  	int err;
5597  
5598  	attribute = to_slab_attr(attr);
5599  	s = to_slab(kobj);
5600  
5601  	if (!attribute->show)
5602  		return -EIO;
5603  
5604  	err = attribute->show(s, buf);
5605  
5606  	return err;
5607  }
5608  
5609  static ssize_t slab_attr_store(struct kobject *kobj,
5610  				struct attribute *attr,
5611  				const char *buf, size_t len)
5612  {
5613  	struct slab_attribute *attribute;
5614  	struct kmem_cache *s;
5615  	int err;
5616  
5617  	attribute = to_slab_attr(attr);
5618  	s = to_slab(kobj);
5619  
5620  	if (!attribute->store)
5621  		return -EIO;
5622  
5623  	err = attribute->store(s, buf, len);
5624  #ifdef CONFIG_MEMCG
5625  	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5626  		struct kmem_cache *c;
5627  
5628  		mutex_lock(&slab_mutex);
5629  		if (s->max_attr_size < len)
5630  			s->max_attr_size = len;
5631  
5632  		/*
5633  		 * This is a best effort propagation, so this function's return
5634  		 * value will be determined by the parent cache only. This is
5635  		 * basically because not all attributes will have a well
5636  		 * defined semantics for rollbacks - most of the actions will
5637  		 * have permanent effects.
5638  		 *
5639  		 * Returning the error value of any of the children that fail
5640  		 * is not 100 % defined, in the sense that users seeing the
5641  		 * error code won't be able to know anything about the state of
5642  		 * the cache.
5643  		 *
5644  		 * Only returning the error code for the parent cache at least
5645  		 * has well defined semantics. The cache being written to
5646  		 * directly either failed or succeeded, in which case we loop
5647  		 * through the descendants with best-effort propagation.
5648  		 */
5649  		for_each_memcg_cache(c, s)
5650  			attribute->store(c, buf, len);
5651  		mutex_unlock(&slab_mutex);
5652  	}
5653  #endif
5654  	return err;
5655  }
5656  
5657  static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5658  {
5659  #ifdef CONFIG_MEMCG
5660  	int i;
5661  	char *buffer = NULL;
5662  	struct kmem_cache *root_cache;
5663  
5664  	if (is_root_cache(s))
5665  		return;
5666  
5667  	root_cache = s->memcg_params.root_cache;
5668  
5669  	/*
5670  	 * This mean this cache had no attribute written. Therefore, no point
5671  	 * in copying default values around
5672  	 */
5673  	if (!root_cache->max_attr_size)
5674  		return;
5675  
5676  	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5677  		char mbuf[64];
5678  		char *buf;
5679  		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5680  		ssize_t len;
5681  
5682  		if (!attr || !attr->store || !attr->show)
5683  			continue;
5684  
5685  		/*
5686  		 * It is really bad that we have to allocate here, so we will
5687  		 * do it only as a fallback. If we actually allocate, though,
5688  		 * we can just use the allocated buffer until the end.
5689  		 *
5690  		 * Most of the slub attributes will tend to be very small in
5691  		 * size, but sysfs allows buffers up to a page, so they can
5692  		 * theoretically happen.
5693  		 */
5694  		if (buffer)
5695  			buf = buffer;
5696  		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
5697  			 !IS_ENABLED(CONFIG_SLUB_STATS))
5698  			buf = mbuf;
5699  		else {
5700  			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5701  			if (WARN_ON(!buffer))
5702  				continue;
5703  			buf = buffer;
5704  		}
5705  
5706  		len = attr->show(root_cache, buf);
5707  		if (len > 0)
5708  			attr->store(s, buf, len);
5709  	}
5710  
5711  	if (buffer)
5712  		free_page((unsigned long)buffer);
5713  #endif	/* CONFIG_MEMCG */
5714  }
5715  
5716  static void kmem_cache_release(struct kobject *k)
5717  {
5718  	slab_kmem_cache_release(to_slab(k));
5719  }
5720  
5721  static const struct sysfs_ops slab_sysfs_ops = {
5722  	.show = slab_attr_show,
5723  	.store = slab_attr_store,
5724  };
5725  
5726  static struct kobj_type slab_ktype = {
5727  	.sysfs_ops = &slab_sysfs_ops,
5728  	.release = kmem_cache_release,
5729  };
5730  
5731  static struct kset *slab_kset;
5732  
5733  static inline struct kset *cache_kset(struct kmem_cache *s)
5734  {
5735  #ifdef CONFIG_MEMCG
5736  	if (!is_root_cache(s))
5737  		return s->memcg_params.root_cache->memcg_kset;
5738  #endif
5739  	return slab_kset;
5740  }
5741  
5742  #define ID_STR_LENGTH 64
5743  
5744  /* Create a unique string id for a slab cache:
5745   *
5746   * Format	:[flags-]size
5747   */
5748  static char *create_unique_id(struct kmem_cache *s)
5749  {
5750  	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5751  	char *p = name;
5752  
5753  	BUG_ON(!name);
5754  
5755  	*p++ = ':';
5756  	/*
5757  	 * First flags affecting slabcache operations. We will only
5758  	 * get here for aliasable slabs so we do not need to support
5759  	 * too many flags. The flags here must cover all flags that
5760  	 * are matched during merging to guarantee that the id is
5761  	 * unique.
5762  	 */
5763  	if (s->flags & SLAB_CACHE_DMA)
5764  		*p++ = 'd';
5765  	if (s->flags & SLAB_CACHE_DMA32)
5766  		*p++ = 'D';
5767  	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5768  		*p++ = 'a';
5769  	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5770  		*p++ = 'F';
5771  	if (s->flags & SLAB_ACCOUNT)
5772  		*p++ = 'A';
5773  	if (p != name + 1)
5774  		*p++ = '-';
5775  	p += sprintf(p, "%07u", s->size);
5776  
5777  	BUG_ON(p > name + ID_STR_LENGTH - 1);
5778  	return name;
5779  }
5780  
5781  static void sysfs_slab_remove_workfn(struct work_struct *work)
5782  {
5783  	struct kmem_cache *s =
5784  		container_of(work, struct kmem_cache, kobj_remove_work);
5785  
5786  	if (!s->kobj.state_in_sysfs)
5787  		/*
5788  		 * For a memcg cache, this may be called during
5789  		 * deactivation and again on shutdown.  Remove only once.
5790  		 * A cache is never shut down before deactivation is
5791  		 * complete, so no need to worry about synchronization.
5792  		 */
5793  		goto out;
5794  
5795  #ifdef CONFIG_MEMCG
5796  	kset_unregister(s->memcg_kset);
5797  #endif
5798  out:
5799  	kobject_put(&s->kobj);
5800  }
5801  
5802  static int sysfs_slab_add(struct kmem_cache *s)
5803  {
5804  	int err;
5805  	const char *name;
5806  	struct kset *kset = cache_kset(s);
5807  	int unmergeable = slab_unmergeable(s);
5808  
5809  	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5810  
5811  	if (!kset) {
5812  		kobject_init(&s->kobj, &slab_ktype);
5813  		return 0;
5814  	}
5815  
5816  	if (!unmergeable && disable_higher_order_debug &&
5817  			(slub_debug & DEBUG_METADATA_FLAGS))
5818  		unmergeable = 1;
5819  
5820  	if (unmergeable) {
5821  		/*
5822  		 * Slabcache can never be merged so we can use the name proper.
5823  		 * This is typically the case for debug situations. In that
5824  		 * case we can catch duplicate names easily.
5825  		 */
5826  		sysfs_remove_link(&slab_kset->kobj, s->name);
5827  		name = s->name;
5828  	} else {
5829  		/*
5830  		 * Create a unique name for the slab as a target
5831  		 * for the symlinks.
5832  		 */
5833  		name = create_unique_id(s);
5834  	}
5835  
5836  	s->kobj.kset = kset;
5837  	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5838  	if (err) {
5839  		kobject_put(&s->kobj);
5840  		goto out;
5841  	}
5842  
5843  	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5844  	if (err)
5845  		goto out_del_kobj;
5846  
5847  #ifdef CONFIG_MEMCG
5848  	if (is_root_cache(s) && memcg_sysfs_enabled) {
5849  		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5850  		if (!s->memcg_kset) {
5851  			err = -ENOMEM;
5852  			goto out_del_kobj;
5853  		}
5854  	}
5855  #endif
5856  
5857  	if (!unmergeable) {
5858  		/* Setup first alias */
5859  		sysfs_slab_alias(s, s->name);
5860  	}
5861  out:
5862  	if (!unmergeable)
5863  		kfree(name);
5864  	return err;
5865  out_del_kobj:
5866  	kobject_del(&s->kobj);
5867  	goto out;
5868  }
5869  
5870  static void sysfs_slab_remove(struct kmem_cache *s)
5871  {
5872  	if (slab_state < FULL)
5873  		/*
5874  		 * Sysfs has not been setup yet so no need to remove the
5875  		 * cache from sysfs.
5876  		 */
5877  		return;
5878  
5879  	kobject_get(&s->kobj);
5880  	schedule_work(&s->kobj_remove_work);
5881  }
5882  
5883  void sysfs_slab_unlink(struct kmem_cache *s)
5884  {
5885  	if (slab_state >= FULL)
5886  		kobject_del(&s->kobj);
5887  }
5888  
5889  void sysfs_slab_release(struct kmem_cache *s)
5890  {
5891  	if (slab_state >= FULL)
5892  		kobject_put(&s->kobj);
5893  }
5894  
5895  /*
5896   * Need to buffer aliases during bootup until sysfs becomes
5897   * available lest we lose that information.
5898   */
5899  struct saved_alias {
5900  	struct kmem_cache *s;
5901  	const char *name;
5902  	struct saved_alias *next;
5903  };
5904  
5905  static struct saved_alias *alias_list;
5906  
5907  static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5908  {
5909  	struct saved_alias *al;
5910  
5911  	if (slab_state == FULL) {
5912  		/*
5913  		 * If we have a leftover link then remove it.
5914  		 */
5915  		sysfs_remove_link(&slab_kset->kobj, name);
5916  		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5917  	}
5918  
5919  	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5920  	if (!al)
5921  		return -ENOMEM;
5922  
5923  	al->s = s;
5924  	al->name = name;
5925  	al->next = alias_list;
5926  	alias_list = al;
5927  	return 0;
5928  }
5929  
5930  static int __init slab_sysfs_init(void)
5931  {
5932  	struct kmem_cache *s;
5933  	int err;
5934  
5935  	mutex_lock(&slab_mutex);
5936  
5937  	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
5938  	if (!slab_kset) {
5939  		mutex_unlock(&slab_mutex);
5940  		pr_err("Cannot register slab subsystem.\n");
5941  		return -ENOSYS;
5942  	}
5943  
5944  	slab_state = FULL;
5945  
5946  	list_for_each_entry(s, &slab_caches, list) {
5947  		err = sysfs_slab_add(s);
5948  		if (err)
5949  			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5950  			       s->name);
5951  	}
5952  
5953  	while (alias_list) {
5954  		struct saved_alias *al = alias_list;
5955  
5956  		alias_list = alias_list->next;
5957  		err = sysfs_slab_alias(al->s, al->name);
5958  		if (err)
5959  			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5960  			       al->name);
5961  		kfree(al);
5962  	}
5963  
5964  	mutex_unlock(&slab_mutex);
5965  	resiliency_test();
5966  	return 0;
5967  }
5968  
5969  __initcall(slab_sysfs_init);
5970  #endif /* CONFIG_SYSFS */
5971  
5972  /*
5973   * The /proc/slabinfo ABI
5974   */
5975  #ifdef CONFIG_SLUB_DEBUG
5976  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5977  {
5978  	unsigned long nr_slabs = 0;
5979  	unsigned long nr_objs = 0;
5980  	unsigned long nr_free = 0;
5981  	int node;
5982  	struct kmem_cache_node *n;
5983  
5984  	for_each_kmem_cache_node(s, node, n) {
5985  		nr_slabs += node_nr_slabs(n);
5986  		nr_objs += node_nr_objs(n);
5987  		nr_free += count_partial(n, count_free);
5988  	}
5989  
5990  	sinfo->active_objs = nr_objs - nr_free;
5991  	sinfo->num_objs = nr_objs;
5992  	sinfo->active_slabs = nr_slabs;
5993  	sinfo->num_slabs = nr_slabs;
5994  	sinfo->objects_per_slab = oo_objects(s->oo);
5995  	sinfo->cache_order = oo_order(s->oo);
5996  }
5997  
5998  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5999  {
6000  }
6001  
6002  ssize_t slabinfo_write(struct file *file, const char __user *buffer,
6003  		       size_t count, loff_t *ppos)
6004  {
6005  	return -EIO;
6006  }
6007  #endif /* CONFIG_SLUB_DEBUG */
6008