xref: /openbmc/linux/mm/memcontrol.c (revision e961cc56)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/sched/isolation.h>
67 #include "internal.h"
68 #include <net/sock.h>
69 #include <net/ip.h>
70 #include "slab.h"
71 #include "swap.h"
72 
73 #include <linux/uaccess.h>
74 
75 #include <trace/events/vmscan.h>
76 
77 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78 EXPORT_SYMBOL(memory_cgrp_subsys);
79 
80 struct mem_cgroup *root_mem_cgroup __read_mostly;
81 
82 /* Active memory cgroup to use from an interrupt context */
83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
85 
86 /* Socket memory accounting disabled? */
87 static bool cgroup_memory_nosocket __ro_after_init;
88 
89 /* Kernel memory accounting disabled? */
90 static bool cgroup_memory_nokmem __ro_after_init;
91 
92 /* BPF memory accounting disabled? */
93 static bool cgroup_memory_nobpf __ro_after_init;
94 
95 #ifdef CONFIG_CGROUP_WRITEBACK
96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
97 #endif
98 
99 /* Whether legacy memory+swap accounting is active */
100 static bool do_memsw_account(void)
101 {
102 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
103 }
104 
105 #define THRESHOLDS_EVENTS_TARGET 128
106 #define SOFTLIMIT_EVENTS_TARGET 1024
107 
108 /*
109  * Cgroups above their limits are maintained in a RB-Tree, independent of
110  * their hierarchy representation
111  */
112 
113 struct mem_cgroup_tree_per_node {
114 	struct rb_root rb_root;
115 	struct rb_node *rb_rightmost;
116 	spinlock_t lock;
117 };
118 
119 struct mem_cgroup_tree {
120 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
121 };
122 
123 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
124 
125 /* for OOM */
126 struct mem_cgroup_eventfd_list {
127 	struct list_head list;
128 	struct eventfd_ctx *eventfd;
129 };
130 
131 /*
132  * cgroup_event represents events which userspace want to receive.
133  */
134 struct mem_cgroup_event {
135 	/*
136 	 * memcg which the event belongs to.
137 	 */
138 	struct mem_cgroup *memcg;
139 	/*
140 	 * eventfd to signal userspace about the event.
141 	 */
142 	struct eventfd_ctx *eventfd;
143 	/*
144 	 * Each of these stored in a list by the cgroup.
145 	 */
146 	struct list_head list;
147 	/*
148 	 * register_event() callback will be used to add new userspace
149 	 * waiter for changes related to this event.  Use eventfd_signal()
150 	 * on eventfd to send notification to userspace.
151 	 */
152 	int (*register_event)(struct mem_cgroup *memcg,
153 			      struct eventfd_ctx *eventfd, const char *args);
154 	/*
155 	 * unregister_event() callback will be called when userspace closes
156 	 * the eventfd or on cgroup removing.  This callback must be set,
157 	 * if you want provide notification functionality.
158 	 */
159 	void (*unregister_event)(struct mem_cgroup *memcg,
160 				 struct eventfd_ctx *eventfd);
161 	/*
162 	 * All fields below needed to unregister event when
163 	 * userspace closes eventfd.
164 	 */
165 	poll_table pt;
166 	wait_queue_head_t *wqh;
167 	wait_queue_entry_t wait;
168 	struct work_struct remove;
169 };
170 
171 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
173 
174 /* Stuffs for move charges at task migration. */
175 /*
176  * Types of charges to be moved.
177  */
178 #define MOVE_ANON	0x1U
179 #define MOVE_FILE	0x2U
180 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
181 
182 /* "mc" and its members are protected by cgroup_mutex */
183 static struct move_charge_struct {
184 	spinlock_t	  lock; /* for from, to */
185 	struct mm_struct  *mm;
186 	struct mem_cgroup *from;
187 	struct mem_cgroup *to;
188 	unsigned long flags;
189 	unsigned long precharge;
190 	unsigned long moved_charge;
191 	unsigned long moved_swap;
192 	struct task_struct *moving_task;	/* a task moving charges */
193 	wait_queue_head_t waitq;		/* a waitq for other context */
194 } mc = {
195 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
196 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
197 };
198 
199 /*
200  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
201  * limit reclaim to prevent infinite loops, if they ever occur.
202  */
203 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
204 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
205 
206 /* for encoding cft->private value on file */
207 enum res_type {
208 	_MEM,
209 	_MEMSWAP,
210 	_KMEM,
211 	_TCP,
212 };
213 
214 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
215 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
216 #define MEMFILE_ATTR(val)	((val) & 0xffff)
217 
218 /*
219  * Iteration constructs for visiting all cgroups (under a tree).  If
220  * loops are exited prematurely (break), mem_cgroup_iter_break() must
221  * be used for reference counting.
222  */
223 #define for_each_mem_cgroup_tree(iter, root)		\
224 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
225 	     iter != NULL;				\
226 	     iter = mem_cgroup_iter(root, iter, NULL))
227 
228 #define for_each_mem_cgroup(iter)			\
229 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
230 	     iter != NULL;				\
231 	     iter = mem_cgroup_iter(NULL, iter, NULL))
232 
233 static inline bool task_is_dying(void)
234 {
235 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
236 		(current->flags & PF_EXITING);
237 }
238 
239 /* Some nice accessors for the vmpressure. */
240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
241 {
242 	if (!memcg)
243 		memcg = root_mem_cgroup;
244 	return &memcg->vmpressure;
245 }
246 
247 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
248 {
249 	return container_of(vmpr, struct mem_cgroup, vmpressure);
250 }
251 
252 #ifdef CONFIG_MEMCG_KMEM
253 static DEFINE_SPINLOCK(objcg_lock);
254 
255 bool mem_cgroup_kmem_disabled(void)
256 {
257 	return cgroup_memory_nokmem;
258 }
259 
260 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
261 				      unsigned int nr_pages);
262 
263 static void obj_cgroup_release(struct percpu_ref *ref)
264 {
265 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
266 	unsigned int nr_bytes;
267 	unsigned int nr_pages;
268 	unsigned long flags;
269 
270 	/*
271 	 * At this point all allocated objects are freed, and
272 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
273 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
274 	 *
275 	 * The following sequence can lead to it:
276 	 * 1) CPU0: objcg == stock->cached_objcg
277 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
278 	 *          PAGE_SIZE bytes are charged
279 	 * 3) CPU1: a process from another memcg is allocating something,
280 	 *          the stock if flushed,
281 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
282 	 * 5) CPU0: we do release this object,
283 	 *          92 bytes are added to stock->nr_bytes
284 	 * 6) CPU0: stock is flushed,
285 	 *          92 bytes are added to objcg->nr_charged_bytes
286 	 *
287 	 * In the result, nr_charged_bytes == PAGE_SIZE.
288 	 * This page will be uncharged in obj_cgroup_release().
289 	 */
290 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
291 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
292 	nr_pages = nr_bytes >> PAGE_SHIFT;
293 
294 	if (nr_pages)
295 		obj_cgroup_uncharge_pages(objcg, nr_pages);
296 
297 	spin_lock_irqsave(&objcg_lock, flags);
298 	list_del(&objcg->list);
299 	spin_unlock_irqrestore(&objcg_lock, flags);
300 
301 	percpu_ref_exit(ref);
302 	kfree_rcu(objcg, rcu);
303 }
304 
305 static struct obj_cgroup *obj_cgroup_alloc(void)
306 {
307 	struct obj_cgroup *objcg;
308 	int ret;
309 
310 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
311 	if (!objcg)
312 		return NULL;
313 
314 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
315 			      GFP_KERNEL);
316 	if (ret) {
317 		kfree(objcg);
318 		return NULL;
319 	}
320 	INIT_LIST_HEAD(&objcg->list);
321 	return objcg;
322 }
323 
324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
325 				  struct mem_cgroup *parent)
326 {
327 	struct obj_cgroup *objcg, *iter;
328 
329 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
330 
331 	spin_lock_irq(&objcg_lock);
332 
333 	/* 1) Ready to reparent active objcg. */
334 	list_add(&objcg->list, &memcg->objcg_list);
335 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
336 	list_for_each_entry(iter, &memcg->objcg_list, list)
337 		WRITE_ONCE(iter->memcg, parent);
338 	/* 3) Move already reparented objcgs to the parent's list */
339 	list_splice(&memcg->objcg_list, &parent->objcg_list);
340 
341 	spin_unlock_irq(&objcg_lock);
342 
343 	percpu_ref_kill(&objcg->refcnt);
344 }
345 
346 /*
347  * A lot of the calls to the cache allocation functions are expected to be
348  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
349  * conditional to this static branch, we'll have to allow modules that does
350  * kmem_cache_alloc and the such to see this symbol as well
351  */
352 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
353 EXPORT_SYMBOL(memcg_kmem_online_key);
354 
355 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
356 EXPORT_SYMBOL(memcg_bpf_enabled_key);
357 #endif
358 
359 /**
360  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
361  * @folio: folio of interest
362  *
363  * If memcg is bound to the default hierarchy, css of the memcg associated
364  * with @folio is returned.  The returned css remains associated with @folio
365  * until it is released.
366  *
367  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368  * is returned.
369  */
370 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
371 {
372 	struct mem_cgroup *memcg = folio_memcg(folio);
373 
374 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
375 		memcg = root_mem_cgroup;
376 
377 	return &memcg->css;
378 }
379 
380 /**
381  * page_cgroup_ino - return inode number of the memcg a page is charged to
382  * @page: the page
383  *
384  * Look up the closest online ancestor of the memory cgroup @page is charged to
385  * and return its inode number or 0 if @page is not charged to any cgroup. It
386  * is safe to call this function without holding a reference to @page.
387  *
388  * Note, this function is inherently racy, because there is nothing to prevent
389  * the cgroup inode from getting torn down and potentially reallocated a moment
390  * after page_cgroup_ino() returns, so it only should be used by callers that
391  * do not care (such as procfs interfaces).
392  */
393 ino_t page_cgroup_ino(struct page *page)
394 {
395 	struct mem_cgroup *memcg;
396 	unsigned long ino = 0;
397 
398 	rcu_read_lock();
399 	memcg = page_memcg_check(page);
400 
401 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
402 		memcg = parent_mem_cgroup(memcg);
403 	if (memcg)
404 		ino = cgroup_ino(memcg->css.cgroup);
405 	rcu_read_unlock();
406 	return ino;
407 }
408 
409 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
410 					 struct mem_cgroup_tree_per_node *mctz,
411 					 unsigned long new_usage_in_excess)
412 {
413 	struct rb_node **p = &mctz->rb_root.rb_node;
414 	struct rb_node *parent = NULL;
415 	struct mem_cgroup_per_node *mz_node;
416 	bool rightmost = true;
417 
418 	if (mz->on_tree)
419 		return;
420 
421 	mz->usage_in_excess = new_usage_in_excess;
422 	if (!mz->usage_in_excess)
423 		return;
424 	while (*p) {
425 		parent = *p;
426 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
427 					tree_node);
428 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
429 			p = &(*p)->rb_left;
430 			rightmost = false;
431 		} else {
432 			p = &(*p)->rb_right;
433 		}
434 	}
435 
436 	if (rightmost)
437 		mctz->rb_rightmost = &mz->tree_node;
438 
439 	rb_link_node(&mz->tree_node, parent, p);
440 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
441 	mz->on_tree = true;
442 }
443 
444 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
445 					 struct mem_cgroup_tree_per_node *mctz)
446 {
447 	if (!mz->on_tree)
448 		return;
449 
450 	if (&mz->tree_node == mctz->rb_rightmost)
451 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
452 
453 	rb_erase(&mz->tree_node, &mctz->rb_root);
454 	mz->on_tree = false;
455 }
456 
457 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
458 				       struct mem_cgroup_tree_per_node *mctz)
459 {
460 	unsigned long flags;
461 
462 	spin_lock_irqsave(&mctz->lock, flags);
463 	__mem_cgroup_remove_exceeded(mz, mctz);
464 	spin_unlock_irqrestore(&mctz->lock, flags);
465 }
466 
467 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
468 {
469 	unsigned long nr_pages = page_counter_read(&memcg->memory);
470 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
471 	unsigned long excess = 0;
472 
473 	if (nr_pages > soft_limit)
474 		excess = nr_pages - soft_limit;
475 
476 	return excess;
477 }
478 
479 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
480 {
481 	unsigned long excess;
482 	struct mem_cgroup_per_node *mz;
483 	struct mem_cgroup_tree_per_node *mctz;
484 
485 	if (lru_gen_enabled()) {
486 		if (soft_limit_excess(memcg))
487 			lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec);
488 		return;
489 	}
490 
491 	mctz = soft_limit_tree.rb_tree_per_node[nid];
492 	if (!mctz)
493 		return;
494 	/*
495 	 * Necessary to update all ancestors when hierarchy is used.
496 	 * because their event counter is not touched.
497 	 */
498 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
499 		mz = memcg->nodeinfo[nid];
500 		excess = soft_limit_excess(memcg);
501 		/*
502 		 * We have to update the tree if mz is on RB-tree or
503 		 * mem is over its softlimit.
504 		 */
505 		if (excess || mz->on_tree) {
506 			unsigned long flags;
507 
508 			spin_lock_irqsave(&mctz->lock, flags);
509 			/* if on-tree, remove it */
510 			if (mz->on_tree)
511 				__mem_cgroup_remove_exceeded(mz, mctz);
512 			/*
513 			 * Insert again. mz->usage_in_excess will be updated.
514 			 * If excess is 0, no tree ops.
515 			 */
516 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
517 			spin_unlock_irqrestore(&mctz->lock, flags);
518 		}
519 	}
520 }
521 
522 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
523 {
524 	struct mem_cgroup_tree_per_node *mctz;
525 	struct mem_cgroup_per_node *mz;
526 	int nid;
527 
528 	for_each_node(nid) {
529 		mz = memcg->nodeinfo[nid];
530 		mctz = soft_limit_tree.rb_tree_per_node[nid];
531 		if (mctz)
532 			mem_cgroup_remove_exceeded(mz, mctz);
533 	}
534 }
535 
536 static struct mem_cgroup_per_node *
537 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
538 {
539 	struct mem_cgroup_per_node *mz;
540 
541 retry:
542 	mz = NULL;
543 	if (!mctz->rb_rightmost)
544 		goto done;		/* Nothing to reclaim from */
545 
546 	mz = rb_entry(mctz->rb_rightmost,
547 		      struct mem_cgroup_per_node, tree_node);
548 	/*
549 	 * Remove the node now but someone else can add it back,
550 	 * we will to add it back at the end of reclaim to its correct
551 	 * position in the tree.
552 	 */
553 	__mem_cgroup_remove_exceeded(mz, mctz);
554 	if (!soft_limit_excess(mz->memcg) ||
555 	    !css_tryget(&mz->memcg->css))
556 		goto retry;
557 done:
558 	return mz;
559 }
560 
561 static struct mem_cgroup_per_node *
562 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
563 {
564 	struct mem_cgroup_per_node *mz;
565 
566 	spin_lock_irq(&mctz->lock);
567 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
568 	spin_unlock_irq(&mctz->lock);
569 	return mz;
570 }
571 
572 /*
573  * memcg and lruvec stats flushing
574  *
575  * Many codepaths leading to stats update or read are performance sensitive and
576  * adding stats flushing in such codepaths is not desirable. So, to optimize the
577  * flushing the kernel does:
578  *
579  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
580  *    rstat update tree grow unbounded.
581  *
582  * 2) Flush the stats synchronously on reader side only when there are more than
583  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
584  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
585  *    only for 2 seconds due to (1).
586  */
587 static void flush_memcg_stats_dwork(struct work_struct *w);
588 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
589 static DEFINE_SPINLOCK(stats_flush_lock);
590 static DEFINE_PER_CPU(unsigned int, stats_updates);
591 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
592 static u64 flush_next_time;
593 
594 #define FLUSH_TIME (2UL*HZ)
595 
596 /*
597  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
598  * not rely on this as part of an acquired spinlock_t lock. These functions are
599  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
600  * is sufficient.
601  */
602 static void memcg_stats_lock(void)
603 {
604 	preempt_disable_nested();
605 	VM_WARN_ON_IRQS_ENABLED();
606 }
607 
608 static void __memcg_stats_lock(void)
609 {
610 	preempt_disable_nested();
611 }
612 
613 static void memcg_stats_unlock(void)
614 {
615 	preempt_enable_nested();
616 }
617 
618 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
619 {
620 	unsigned int x;
621 
622 	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
623 
624 	x = __this_cpu_add_return(stats_updates, abs(val));
625 	if (x > MEMCG_CHARGE_BATCH) {
626 		/*
627 		 * If stats_flush_threshold exceeds the threshold
628 		 * (>num_online_cpus()), cgroup stats update will be triggered
629 		 * in __mem_cgroup_flush_stats(). Increasing this var further
630 		 * is redundant and simply adds overhead in atomic update.
631 		 */
632 		if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
633 			atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
634 		__this_cpu_write(stats_updates, 0);
635 	}
636 }
637 
638 static void __mem_cgroup_flush_stats(void)
639 {
640 	unsigned long flag;
641 
642 	if (!spin_trylock_irqsave(&stats_flush_lock, flag))
643 		return;
644 
645 	flush_next_time = jiffies_64 + 2*FLUSH_TIME;
646 	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
647 	atomic_set(&stats_flush_threshold, 0);
648 	spin_unlock_irqrestore(&stats_flush_lock, flag);
649 }
650 
651 void mem_cgroup_flush_stats(void)
652 {
653 	if (atomic_read(&stats_flush_threshold) > num_online_cpus())
654 		__mem_cgroup_flush_stats();
655 }
656 
657 void mem_cgroup_flush_stats_delayed(void)
658 {
659 	if (time_after64(jiffies_64, flush_next_time))
660 		mem_cgroup_flush_stats();
661 }
662 
663 static void flush_memcg_stats_dwork(struct work_struct *w)
664 {
665 	__mem_cgroup_flush_stats();
666 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
667 }
668 
669 /* Subset of vm_event_item to report for memcg event stats */
670 static const unsigned int memcg_vm_event_stat[] = {
671 	PGPGIN,
672 	PGPGOUT,
673 	PGSCAN_KSWAPD,
674 	PGSCAN_DIRECT,
675 	PGSCAN_KHUGEPAGED,
676 	PGSTEAL_KSWAPD,
677 	PGSTEAL_DIRECT,
678 	PGSTEAL_KHUGEPAGED,
679 	PGFAULT,
680 	PGMAJFAULT,
681 	PGREFILL,
682 	PGACTIVATE,
683 	PGDEACTIVATE,
684 	PGLAZYFREE,
685 	PGLAZYFREED,
686 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
687 	ZSWPIN,
688 	ZSWPOUT,
689 #endif
690 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
691 	THP_FAULT_ALLOC,
692 	THP_COLLAPSE_ALLOC,
693 #endif
694 };
695 
696 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
697 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
698 
699 static void init_memcg_events(void)
700 {
701 	int i;
702 
703 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
704 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
705 }
706 
707 static inline int memcg_events_index(enum vm_event_item idx)
708 {
709 	return mem_cgroup_events_index[idx] - 1;
710 }
711 
712 struct memcg_vmstats_percpu {
713 	/* Local (CPU and cgroup) page state & events */
714 	long			state[MEMCG_NR_STAT];
715 	unsigned long		events[NR_MEMCG_EVENTS];
716 
717 	/* Delta calculation for lockless upward propagation */
718 	long			state_prev[MEMCG_NR_STAT];
719 	unsigned long		events_prev[NR_MEMCG_EVENTS];
720 
721 	/* Cgroup1: threshold notifications & softlimit tree updates */
722 	unsigned long		nr_page_events;
723 	unsigned long		targets[MEM_CGROUP_NTARGETS];
724 };
725 
726 struct memcg_vmstats {
727 	/* Aggregated (CPU and subtree) page state & events */
728 	long			state[MEMCG_NR_STAT];
729 	unsigned long		events[NR_MEMCG_EVENTS];
730 
731 	/* Pending child counts during tree propagation */
732 	long			state_pending[MEMCG_NR_STAT];
733 	unsigned long		events_pending[NR_MEMCG_EVENTS];
734 };
735 
736 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
737 {
738 	long x = READ_ONCE(memcg->vmstats->state[idx]);
739 #ifdef CONFIG_SMP
740 	if (x < 0)
741 		x = 0;
742 #endif
743 	return x;
744 }
745 
746 /**
747  * __mod_memcg_state - update cgroup memory statistics
748  * @memcg: the memory cgroup
749  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
750  * @val: delta to add to the counter, can be negative
751  */
752 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
753 {
754 	if (mem_cgroup_disabled())
755 		return;
756 
757 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
758 	memcg_rstat_updated(memcg, val);
759 }
760 
761 /* idx can be of type enum memcg_stat_item or node_stat_item. */
762 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
763 {
764 	long x = 0;
765 	int cpu;
766 
767 	for_each_possible_cpu(cpu)
768 		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
769 #ifdef CONFIG_SMP
770 	if (x < 0)
771 		x = 0;
772 #endif
773 	return x;
774 }
775 
776 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
777 			      int val)
778 {
779 	struct mem_cgroup_per_node *pn;
780 	struct mem_cgroup *memcg;
781 
782 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
783 	memcg = pn->memcg;
784 
785 	/*
786 	 * The caller from rmap relay on disabled preemption becase they never
787 	 * update their counter from in-interrupt context. For these two
788 	 * counters we check that the update is never performed from an
789 	 * interrupt context while other caller need to have disabled interrupt.
790 	 */
791 	__memcg_stats_lock();
792 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
793 		switch (idx) {
794 		case NR_ANON_MAPPED:
795 		case NR_FILE_MAPPED:
796 		case NR_ANON_THPS:
797 		case NR_SHMEM_PMDMAPPED:
798 		case NR_FILE_PMDMAPPED:
799 			WARN_ON_ONCE(!in_task());
800 			break;
801 		default:
802 			VM_WARN_ON_IRQS_ENABLED();
803 		}
804 	}
805 
806 	/* Update memcg */
807 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
808 
809 	/* Update lruvec */
810 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
811 
812 	memcg_rstat_updated(memcg, val);
813 	memcg_stats_unlock();
814 }
815 
816 /**
817  * __mod_lruvec_state - update lruvec memory statistics
818  * @lruvec: the lruvec
819  * @idx: the stat item
820  * @val: delta to add to the counter, can be negative
821  *
822  * The lruvec is the intersection of the NUMA node and a cgroup. This
823  * function updates the all three counters that are affected by a
824  * change of state at this level: per-node, per-cgroup, per-lruvec.
825  */
826 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
827 			int val)
828 {
829 	/* Update node */
830 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
831 
832 	/* Update memcg and lruvec */
833 	if (!mem_cgroup_disabled())
834 		__mod_memcg_lruvec_state(lruvec, idx, val);
835 }
836 
837 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
838 			     int val)
839 {
840 	struct page *head = compound_head(page); /* rmap on tail pages */
841 	struct mem_cgroup *memcg;
842 	pg_data_t *pgdat = page_pgdat(page);
843 	struct lruvec *lruvec;
844 
845 	rcu_read_lock();
846 	memcg = page_memcg(head);
847 	/* Untracked pages have no memcg, no lruvec. Update only the node */
848 	if (!memcg) {
849 		rcu_read_unlock();
850 		__mod_node_page_state(pgdat, idx, val);
851 		return;
852 	}
853 
854 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
855 	__mod_lruvec_state(lruvec, idx, val);
856 	rcu_read_unlock();
857 }
858 EXPORT_SYMBOL(__mod_lruvec_page_state);
859 
860 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
861 {
862 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
863 	struct mem_cgroup *memcg;
864 	struct lruvec *lruvec;
865 
866 	rcu_read_lock();
867 	memcg = mem_cgroup_from_slab_obj(p);
868 
869 	/*
870 	 * Untracked pages have no memcg, no lruvec. Update only the
871 	 * node. If we reparent the slab objects to the root memcg,
872 	 * when we free the slab object, we need to update the per-memcg
873 	 * vmstats to keep it correct for the root memcg.
874 	 */
875 	if (!memcg) {
876 		__mod_node_page_state(pgdat, idx, val);
877 	} else {
878 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
879 		__mod_lruvec_state(lruvec, idx, val);
880 	}
881 	rcu_read_unlock();
882 }
883 
884 /**
885  * __count_memcg_events - account VM events in a cgroup
886  * @memcg: the memory cgroup
887  * @idx: the event item
888  * @count: the number of events that occurred
889  */
890 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
891 			  unsigned long count)
892 {
893 	int index = memcg_events_index(idx);
894 
895 	if (mem_cgroup_disabled() || index < 0)
896 		return;
897 
898 	memcg_stats_lock();
899 	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
900 	memcg_rstat_updated(memcg, count);
901 	memcg_stats_unlock();
902 }
903 
904 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
905 {
906 	int index = memcg_events_index(event);
907 
908 	if (index < 0)
909 		return 0;
910 	return READ_ONCE(memcg->vmstats->events[index]);
911 }
912 
913 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
914 {
915 	long x = 0;
916 	int cpu;
917 	int index = memcg_events_index(event);
918 
919 	if (index < 0)
920 		return 0;
921 
922 	for_each_possible_cpu(cpu)
923 		x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
924 	return x;
925 }
926 
927 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
928 					 int nr_pages)
929 {
930 	/* pagein of a big page is an event. So, ignore page size */
931 	if (nr_pages > 0)
932 		__count_memcg_events(memcg, PGPGIN, 1);
933 	else {
934 		__count_memcg_events(memcg, PGPGOUT, 1);
935 		nr_pages = -nr_pages; /* for event */
936 	}
937 
938 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
939 }
940 
941 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
942 				       enum mem_cgroup_events_target target)
943 {
944 	unsigned long val, next;
945 
946 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
947 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
948 	/* from time_after() in jiffies.h */
949 	if ((long)(next - val) < 0) {
950 		switch (target) {
951 		case MEM_CGROUP_TARGET_THRESH:
952 			next = val + THRESHOLDS_EVENTS_TARGET;
953 			break;
954 		case MEM_CGROUP_TARGET_SOFTLIMIT:
955 			next = val + SOFTLIMIT_EVENTS_TARGET;
956 			break;
957 		default:
958 			break;
959 		}
960 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
961 		return true;
962 	}
963 	return false;
964 }
965 
966 /*
967  * Check events in order.
968  *
969  */
970 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
971 {
972 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
973 		return;
974 
975 	/* threshold event is triggered in finer grain than soft limit */
976 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
977 						MEM_CGROUP_TARGET_THRESH))) {
978 		bool do_softlimit;
979 
980 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
981 						MEM_CGROUP_TARGET_SOFTLIMIT);
982 		mem_cgroup_threshold(memcg);
983 		if (unlikely(do_softlimit))
984 			mem_cgroup_update_tree(memcg, nid);
985 	}
986 }
987 
988 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
989 {
990 	/*
991 	 * mm_update_next_owner() may clear mm->owner to NULL
992 	 * if it races with swapoff, page migration, etc.
993 	 * So this can be called with p == NULL.
994 	 */
995 	if (unlikely(!p))
996 		return NULL;
997 
998 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
999 }
1000 EXPORT_SYMBOL(mem_cgroup_from_task);
1001 
1002 static __always_inline struct mem_cgroup *active_memcg(void)
1003 {
1004 	if (!in_task())
1005 		return this_cpu_read(int_active_memcg);
1006 	else
1007 		return current->active_memcg;
1008 }
1009 
1010 /**
1011  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1012  * @mm: mm from which memcg should be extracted. It can be NULL.
1013  *
1014  * Obtain a reference on mm->memcg and returns it if successful. If mm
1015  * is NULL, then the memcg is chosen as follows:
1016  * 1) The active memcg, if set.
1017  * 2) current->mm->memcg, if available
1018  * 3) root memcg
1019  * If mem_cgroup is disabled, NULL is returned.
1020  */
1021 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1022 {
1023 	struct mem_cgroup *memcg;
1024 
1025 	if (mem_cgroup_disabled())
1026 		return NULL;
1027 
1028 	/*
1029 	 * Page cache insertions can happen without an
1030 	 * actual mm context, e.g. during disk probing
1031 	 * on boot, loopback IO, acct() writes etc.
1032 	 *
1033 	 * No need to css_get on root memcg as the reference
1034 	 * counting is disabled on the root level in the
1035 	 * cgroup core. See CSS_NO_REF.
1036 	 */
1037 	if (unlikely(!mm)) {
1038 		memcg = active_memcg();
1039 		if (unlikely(memcg)) {
1040 			/* remote memcg must hold a ref */
1041 			css_get(&memcg->css);
1042 			return memcg;
1043 		}
1044 		mm = current->mm;
1045 		if (unlikely(!mm))
1046 			return root_mem_cgroup;
1047 	}
1048 
1049 	rcu_read_lock();
1050 	do {
1051 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1052 		if (unlikely(!memcg))
1053 			memcg = root_mem_cgroup;
1054 	} while (!css_tryget(&memcg->css));
1055 	rcu_read_unlock();
1056 	return memcg;
1057 }
1058 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1059 
1060 static __always_inline bool memcg_kmem_bypass(void)
1061 {
1062 	/* Allow remote memcg charging from any context. */
1063 	if (unlikely(active_memcg()))
1064 		return false;
1065 
1066 	/* Memcg to charge can't be determined. */
1067 	if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1068 		return true;
1069 
1070 	return false;
1071 }
1072 
1073 /**
1074  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1075  * @root: hierarchy root
1076  * @prev: previously returned memcg, NULL on first invocation
1077  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1078  *
1079  * Returns references to children of the hierarchy below @root, or
1080  * @root itself, or %NULL after a full round-trip.
1081  *
1082  * Caller must pass the return value in @prev on subsequent
1083  * invocations for reference counting, or use mem_cgroup_iter_break()
1084  * to cancel a hierarchy walk before the round-trip is complete.
1085  *
1086  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1087  * in the hierarchy among all concurrent reclaimers operating on the
1088  * same node.
1089  */
1090 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1091 				   struct mem_cgroup *prev,
1092 				   struct mem_cgroup_reclaim_cookie *reclaim)
1093 {
1094 	struct mem_cgroup_reclaim_iter *iter;
1095 	struct cgroup_subsys_state *css = NULL;
1096 	struct mem_cgroup *memcg = NULL;
1097 	struct mem_cgroup *pos = NULL;
1098 
1099 	if (mem_cgroup_disabled())
1100 		return NULL;
1101 
1102 	if (!root)
1103 		root = root_mem_cgroup;
1104 
1105 	rcu_read_lock();
1106 
1107 	if (reclaim) {
1108 		struct mem_cgroup_per_node *mz;
1109 
1110 		mz = root->nodeinfo[reclaim->pgdat->node_id];
1111 		iter = &mz->iter;
1112 
1113 		/*
1114 		 * On start, join the current reclaim iteration cycle.
1115 		 * Exit when a concurrent walker completes it.
1116 		 */
1117 		if (!prev)
1118 			reclaim->generation = iter->generation;
1119 		else if (reclaim->generation != iter->generation)
1120 			goto out_unlock;
1121 
1122 		while (1) {
1123 			pos = READ_ONCE(iter->position);
1124 			if (!pos || css_tryget(&pos->css))
1125 				break;
1126 			/*
1127 			 * css reference reached zero, so iter->position will
1128 			 * be cleared by ->css_released. However, we should not
1129 			 * rely on this happening soon, because ->css_released
1130 			 * is called from a work queue, and by busy-waiting we
1131 			 * might block it. So we clear iter->position right
1132 			 * away.
1133 			 */
1134 			(void)cmpxchg(&iter->position, pos, NULL);
1135 		}
1136 	} else if (prev) {
1137 		pos = prev;
1138 	}
1139 
1140 	if (pos)
1141 		css = &pos->css;
1142 
1143 	for (;;) {
1144 		css = css_next_descendant_pre(css, &root->css);
1145 		if (!css) {
1146 			/*
1147 			 * Reclaimers share the hierarchy walk, and a
1148 			 * new one might jump in right at the end of
1149 			 * the hierarchy - make sure they see at least
1150 			 * one group and restart from the beginning.
1151 			 */
1152 			if (!prev)
1153 				continue;
1154 			break;
1155 		}
1156 
1157 		/*
1158 		 * Verify the css and acquire a reference.  The root
1159 		 * is provided by the caller, so we know it's alive
1160 		 * and kicking, and don't take an extra reference.
1161 		 */
1162 		if (css == &root->css || css_tryget(css)) {
1163 			memcg = mem_cgroup_from_css(css);
1164 			break;
1165 		}
1166 	}
1167 
1168 	if (reclaim) {
1169 		/*
1170 		 * The position could have already been updated by a competing
1171 		 * thread, so check that the value hasn't changed since we read
1172 		 * it to avoid reclaiming from the same cgroup twice.
1173 		 */
1174 		(void)cmpxchg(&iter->position, pos, memcg);
1175 
1176 		if (pos)
1177 			css_put(&pos->css);
1178 
1179 		if (!memcg)
1180 			iter->generation++;
1181 	}
1182 
1183 out_unlock:
1184 	rcu_read_unlock();
1185 	if (prev && prev != root)
1186 		css_put(&prev->css);
1187 
1188 	return memcg;
1189 }
1190 
1191 /**
1192  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1193  * @root: hierarchy root
1194  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1195  */
1196 void mem_cgroup_iter_break(struct mem_cgroup *root,
1197 			   struct mem_cgroup *prev)
1198 {
1199 	if (!root)
1200 		root = root_mem_cgroup;
1201 	if (prev && prev != root)
1202 		css_put(&prev->css);
1203 }
1204 
1205 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1206 					struct mem_cgroup *dead_memcg)
1207 {
1208 	struct mem_cgroup_reclaim_iter *iter;
1209 	struct mem_cgroup_per_node *mz;
1210 	int nid;
1211 
1212 	for_each_node(nid) {
1213 		mz = from->nodeinfo[nid];
1214 		iter = &mz->iter;
1215 		cmpxchg(&iter->position, dead_memcg, NULL);
1216 	}
1217 }
1218 
1219 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1220 {
1221 	struct mem_cgroup *memcg = dead_memcg;
1222 	struct mem_cgroup *last;
1223 
1224 	do {
1225 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1226 		last = memcg;
1227 	} while ((memcg = parent_mem_cgroup(memcg)));
1228 
1229 	/*
1230 	 * When cgroup1 non-hierarchy mode is used,
1231 	 * parent_mem_cgroup() does not walk all the way up to the
1232 	 * cgroup root (root_mem_cgroup). So we have to handle
1233 	 * dead_memcg from cgroup root separately.
1234 	 */
1235 	if (!mem_cgroup_is_root(last))
1236 		__invalidate_reclaim_iterators(root_mem_cgroup,
1237 						dead_memcg);
1238 }
1239 
1240 /**
1241  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1242  * @memcg: hierarchy root
1243  * @fn: function to call for each task
1244  * @arg: argument passed to @fn
1245  *
1246  * This function iterates over tasks attached to @memcg or to any of its
1247  * descendants and calls @fn for each task. If @fn returns a non-zero
1248  * value, the function breaks the iteration loop and returns the value.
1249  * Otherwise, it will iterate over all tasks and return 0.
1250  *
1251  * This function must not be called for the root memory cgroup.
1252  */
1253 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1254 			  int (*fn)(struct task_struct *, void *), void *arg)
1255 {
1256 	struct mem_cgroup *iter;
1257 	int ret = 0;
1258 
1259 	BUG_ON(mem_cgroup_is_root(memcg));
1260 
1261 	for_each_mem_cgroup_tree(iter, memcg) {
1262 		struct css_task_iter it;
1263 		struct task_struct *task;
1264 
1265 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1266 		while (!ret && (task = css_task_iter_next(&it)))
1267 			ret = fn(task, arg);
1268 		css_task_iter_end(&it);
1269 		if (ret) {
1270 			mem_cgroup_iter_break(memcg, iter);
1271 			break;
1272 		}
1273 	}
1274 	return ret;
1275 }
1276 
1277 #ifdef CONFIG_DEBUG_VM
1278 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1279 {
1280 	struct mem_cgroup *memcg;
1281 
1282 	if (mem_cgroup_disabled())
1283 		return;
1284 
1285 	memcg = folio_memcg(folio);
1286 
1287 	if (!memcg)
1288 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1289 	else
1290 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1291 }
1292 #endif
1293 
1294 /**
1295  * folio_lruvec_lock - Lock the lruvec for a folio.
1296  * @folio: Pointer to the folio.
1297  *
1298  * These functions are safe to use under any of the following conditions:
1299  * - folio locked
1300  * - folio_test_lru false
1301  * - folio_memcg_lock()
1302  * - folio frozen (refcount of 0)
1303  *
1304  * Return: The lruvec this folio is on with its lock held.
1305  */
1306 struct lruvec *folio_lruvec_lock(struct folio *folio)
1307 {
1308 	struct lruvec *lruvec = folio_lruvec(folio);
1309 
1310 	spin_lock(&lruvec->lru_lock);
1311 	lruvec_memcg_debug(lruvec, folio);
1312 
1313 	return lruvec;
1314 }
1315 
1316 /**
1317  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1318  * @folio: Pointer to the folio.
1319  *
1320  * These functions are safe to use under any of the following conditions:
1321  * - folio locked
1322  * - folio_test_lru false
1323  * - folio_memcg_lock()
1324  * - folio frozen (refcount of 0)
1325  *
1326  * Return: The lruvec this folio is on with its lock held and interrupts
1327  * disabled.
1328  */
1329 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1330 {
1331 	struct lruvec *lruvec = folio_lruvec(folio);
1332 
1333 	spin_lock_irq(&lruvec->lru_lock);
1334 	lruvec_memcg_debug(lruvec, folio);
1335 
1336 	return lruvec;
1337 }
1338 
1339 /**
1340  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1341  * @folio: Pointer to the folio.
1342  * @flags: Pointer to irqsave flags.
1343  *
1344  * These functions are safe to use under any of the following conditions:
1345  * - folio locked
1346  * - folio_test_lru false
1347  * - folio_memcg_lock()
1348  * - folio frozen (refcount of 0)
1349  *
1350  * Return: The lruvec this folio is on with its lock held and interrupts
1351  * disabled.
1352  */
1353 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1354 		unsigned long *flags)
1355 {
1356 	struct lruvec *lruvec = folio_lruvec(folio);
1357 
1358 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1359 	lruvec_memcg_debug(lruvec, folio);
1360 
1361 	return lruvec;
1362 }
1363 
1364 /**
1365  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1366  * @lruvec: mem_cgroup per zone lru vector
1367  * @lru: index of lru list the page is sitting on
1368  * @zid: zone id of the accounted pages
1369  * @nr_pages: positive when adding or negative when removing
1370  *
1371  * This function must be called under lru_lock, just before a page is added
1372  * to or just after a page is removed from an lru list.
1373  */
1374 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1375 				int zid, int nr_pages)
1376 {
1377 	struct mem_cgroup_per_node *mz;
1378 	unsigned long *lru_size;
1379 	long size;
1380 
1381 	if (mem_cgroup_disabled())
1382 		return;
1383 
1384 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1385 	lru_size = &mz->lru_zone_size[zid][lru];
1386 
1387 	if (nr_pages < 0)
1388 		*lru_size += nr_pages;
1389 
1390 	size = *lru_size;
1391 	if (WARN_ONCE(size < 0,
1392 		"%s(%p, %d, %d): lru_size %ld\n",
1393 		__func__, lruvec, lru, nr_pages, size)) {
1394 		VM_BUG_ON(1);
1395 		*lru_size = 0;
1396 	}
1397 
1398 	if (nr_pages > 0)
1399 		*lru_size += nr_pages;
1400 }
1401 
1402 /**
1403  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1404  * @memcg: the memory cgroup
1405  *
1406  * Returns the maximum amount of memory @mem can be charged with, in
1407  * pages.
1408  */
1409 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1410 {
1411 	unsigned long margin = 0;
1412 	unsigned long count;
1413 	unsigned long limit;
1414 
1415 	count = page_counter_read(&memcg->memory);
1416 	limit = READ_ONCE(memcg->memory.max);
1417 	if (count < limit)
1418 		margin = limit - count;
1419 
1420 	if (do_memsw_account()) {
1421 		count = page_counter_read(&memcg->memsw);
1422 		limit = READ_ONCE(memcg->memsw.max);
1423 		if (count < limit)
1424 			margin = min(margin, limit - count);
1425 		else
1426 			margin = 0;
1427 	}
1428 
1429 	return margin;
1430 }
1431 
1432 /*
1433  * A routine for checking "mem" is under move_account() or not.
1434  *
1435  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1436  * moving cgroups. This is for waiting at high-memory pressure
1437  * caused by "move".
1438  */
1439 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1440 {
1441 	struct mem_cgroup *from;
1442 	struct mem_cgroup *to;
1443 	bool ret = false;
1444 	/*
1445 	 * Unlike task_move routines, we access mc.to, mc.from not under
1446 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1447 	 */
1448 	spin_lock(&mc.lock);
1449 	from = mc.from;
1450 	to = mc.to;
1451 	if (!from)
1452 		goto unlock;
1453 
1454 	ret = mem_cgroup_is_descendant(from, memcg) ||
1455 		mem_cgroup_is_descendant(to, memcg);
1456 unlock:
1457 	spin_unlock(&mc.lock);
1458 	return ret;
1459 }
1460 
1461 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1462 {
1463 	if (mc.moving_task && current != mc.moving_task) {
1464 		if (mem_cgroup_under_move(memcg)) {
1465 			DEFINE_WAIT(wait);
1466 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1467 			/* moving charge context might have finished. */
1468 			if (mc.moving_task)
1469 				schedule();
1470 			finish_wait(&mc.waitq, &wait);
1471 			return true;
1472 		}
1473 	}
1474 	return false;
1475 }
1476 
1477 struct memory_stat {
1478 	const char *name;
1479 	unsigned int idx;
1480 };
1481 
1482 static const struct memory_stat memory_stats[] = {
1483 	{ "anon",			NR_ANON_MAPPED			},
1484 	{ "file",			NR_FILE_PAGES			},
1485 	{ "kernel",			MEMCG_KMEM			},
1486 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1487 	{ "pagetables",			NR_PAGETABLE			},
1488 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1489 	{ "percpu",			MEMCG_PERCPU_B			},
1490 	{ "sock",			MEMCG_SOCK			},
1491 	{ "vmalloc",			MEMCG_VMALLOC			},
1492 	{ "shmem",			NR_SHMEM			},
1493 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1494 	{ "zswap",			MEMCG_ZSWAP_B			},
1495 	{ "zswapped",			MEMCG_ZSWAPPED			},
1496 #endif
1497 	{ "file_mapped",		NR_FILE_MAPPED			},
1498 	{ "file_dirty",			NR_FILE_DIRTY			},
1499 	{ "file_writeback",		NR_WRITEBACK			},
1500 #ifdef CONFIG_SWAP
1501 	{ "swapcached",			NR_SWAPCACHE			},
1502 #endif
1503 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1504 	{ "anon_thp",			NR_ANON_THPS			},
1505 	{ "file_thp",			NR_FILE_THPS			},
1506 	{ "shmem_thp",			NR_SHMEM_THPS			},
1507 #endif
1508 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1509 	{ "active_anon",		NR_ACTIVE_ANON			},
1510 	{ "inactive_file",		NR_INACTIVE_FILE		},
1511 	{ "active_file",		NR_ACTIVE_FILE			},
1512 	{ "unevictable",		NR_UNEVICTABLE			},
1513 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1514 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1515 
1516 	/* The memory events */
1517 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1518 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1519 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1520 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1521 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1522 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1523 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1524 };
1525 
1526 /* Translate stat items to the correct unit for memory.stat output */
1527 static int memcg_page_state_unit(int item)
1528 {
1529 	switch (item) {
1530 	case MEMCG_PERCPU_B:
1531 	case MEMCG_ZSWAP_B:
1532 	case NR_SLAB_RECLAIMABLE_B:
1533 	case NR_SLAB_UNRECLAIMABLE_B:
1534 	case WORKINGSET_REFAULT_ANON:
1535 	case WORKINGSET_REFAULT_FILE:
1536 	case WORKINGSET_ACTIVATE_ANON:
1537 	case WORKINGSET_ACTIVATE_FILE:
1538 	case WORKINGSET_RESTORE_ANON:
1539 	case WORKINGSET_RESTORE_FILE:
1540 	case WORKINGSET_NODERECLAIM:
1541 		return 1;
1542 	case NR_KERNEL_STACK_KB:
1543 		return SZ_1K;
1544 	default:
1545 		return PAGE_SIZE;
1546 	}
1547 }
1548 
1549 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1550 						    int item)
1551 {
1552 	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1553 }
1554 
1555 static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
1556 {
1557 	struct seq_buf s;
1558 	int i;
1559 
1560 	seq_buf_init(&s, buf, bufsize);
1561 
1562 	/*
1563 	 * Provide statistics on the state of the memory subsystem as
1564 	 * well as cumulative event counters that show past behavior.
1565 	 *
1566 	 * This list is ordered following a combination of these gradients:
1567 	 * 1) generic big picture -> specifics and details
1568 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1569 	 *
1570 	 * Current memory state:
1571 	 */
1572 	mem_cgroup_flush_stats();
1573 
1574 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1575 		u64 size;
1576 
1577 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1578 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1579 
1580 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1581 			size += memcg_page_state_output(memcg,
1582 							NR_SLAB_RECLAIMABLE_B);
1583 			seq_buf_printf(&s, "slab %llu\n", size);
1584 		}
1585 	}
1586 
1587 	/* Accumulated memory events */
1588 	seq_buf_printf(&s, "pgscan %lu\n",
1589 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1590 		       memcg_events(memcg, PGSCAN_DIRECT) +
1591 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1592 	seq_buf_printf(&s, "pgsteal %lu\n",
1593 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1594 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1595 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1596 
1597 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1598 		if (memcg_vm_event_stat[i] == PGPGIN ||
1599 		    memcg_vm_event_stat[i] == PGPGOUT)
1600 			continue;
1601 
1602 		seq_buf_printf(&s, "%s %lu\n",
1603 			       vm_event_name(memcg_vm_event_stat[i]),
1604 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1605 	}
1606 
1607 	/* The above should easily fit into one page */
1608 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1609 }
1610 
1611 #define K(x) ((x) << (PAGE_SHIFT-10))
1612 /**
1613  * mem_cgroup_print_oom_context: Print OOM information relevant to
1614  * memory controller.
1615  * @memcg: The memory cgroup that went over limit
1616  * @p: Task that is going to be killed
1617  *
1618  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1619  * enabled
1620  */
1621 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1622 {
1623 	rcu_read_lock();
1624 
1625 	if (memcg) {
1626 		pr_cont(",oom_memcg=");
1627 		pr_cont_cgroup_path(memcg->css.cgroup);
1628 	} else
1629 		pr_cont(",global_oom");
1630 	if (p) {
1631 		pr_cont(",task_memcg=");
1632 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1633 	}
1634 	rcu_read_unlock();
1635 }
1636 
1637 /**
1638  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1639  * memory controller.
1640  * @memcg: The memory cgroup that went over limit
1641  */
1642 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1643 {
1644 	/* Use static buffer, for the caller is holding oom_lock. */
1645 	static char buf[PAGE_SIZE];
1646 
1647 	lockdep_assert_held(&oom_lock);
1648 
1649 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1650 		K((u64)page_counter_read(&memcg->memory)),
1651 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1652 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1653 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1654 			K((u64)page_counter_read(&memcg->swap)),
1655 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1656 	else {
1657 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1658 			K((u64)page_counter_read(&memcg->memsw)),
1659 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1660 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1661 			K((u64)page_counter_read(&memcg->kmem)),
1662 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1663 	}
1664 
1665 	pr_info("Memory cgroup stats for ");
1666 	pr_cont_cgroup_path(memcg->css.cgroup);
1667 	pr_cont(":");
1668 	memory_stat_format(memcg, buf, sizeof(buf));
1669 	pr_info("%s", buf);
1670 }
1671 
1672 /*
1673  * Return the memory (and swap, if configured) limit for a memcg.
1674  */
1675 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1676 {
1677 	unsigned long max = READ_ONCE(memcg->memory.max);
1678 
1679 	if (do_memsw_account()) {
1680 		if (mem_cgroup_swappiness(memcg)) {
1681 			/* Calculate swap excess capacity from memsw limit */
1682 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1683 
1684 			max += min(swap, (unsigned long)total_swap_pages);
1685 		}
1686 	} else {
1687 		if (mem_cgroup_swappiness(memcg))
1688 			max += min(READ_ONCE(memcg->swap.max),
1689 				   (unsigned long)total_swap_pages);
1690 	}
1691 	return max;
1692 }
1693 
1694 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1695 {
1696 	return page_counter_read(&memcg->memory);
1697 }
1698 
1699 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1700 				     int order)
1701 {
1702 	struct oom_control oc = {
1703 		.zonelist = NULL,
1704 		.nodemask = NULL,
1705 		.memcg = memcg,
1706 		.gfp_mask = gfp_mask,
1707 		.order = order,
1708 	};
1709 	bool ret = true;
1710 
1711 	if (mutex_lock_killable(&oom_lock))
1712 		return true;
1713 
1714 	if (mem_cgroup_margin(memcg) >= (1 << order))
1715 		goto unlock;
1716 
1717 	/*
1718 	 * A few threads which were not waiting at mutex_lock_killable() can
1719 	 * fail to bail out. Therefore, check again after holding oom_lock.
1720 	 */
1721 	ret = task_is_dying() || out_of_memory(&oc);
1722 
1723 unlock:
1724 	mutex_unlock(&oom_lock);
1725 	return ret;
1726 }
1727 
1728 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1729 				   pg_data_t *pgdat,
1730 				   gfp_t gfp_mask,
1731 				   unsigned long *total_scanned)
1732 {
1733 	struct mem_cgroup *victim = NULL;
1734 	int total = 0;
1735 	int loop = 0;
1736 	unsigned long excess;
1737 	unsigned long nr_scanned;
1738 	struct mem_cgroup_reclaim_cookie reclaim = {
1739 		.pgdat = pgdat,
1740 	};
1741 
1742 	excess = soft_limit_excess(root_memcg);
1743 
1744 	while (1) {
1745 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1746 		if (!victim) {
1747 			loop++;
1748 			if (loop >= 2) {
1749 				/*
1750 				 * If we have not been able to reclaim
1751 				 * anything, it might because there are
1752 				 * no reclaimable pages under this hierarchy
1753 				 */
1754 				if (!total)
1755 					break;
1756 				/*
1757 				 * We want to do more targeted reclaim.
1758 				 * excess >> 2 is not to excessive so as to
1759 				 * reclaim too much, nor too less that we keep
1760 				 * coming back to reclaim from this cgroup
1761 				 */
1762 				if (total >= (excess >> 2) ||
1763 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1764 					break;
1765 			}
1766 			continue;
1767 		}
1768 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1769 					pgdat, &nr_scanned);
1770 		*total_scanned += nr_scanned;
1771 		if (!soft_limit_excess(root_memcg))
1772 			break;
1773 	}
1774 	mem_cgroup_iter_break(root_memcg, victim);
1775 	return total;
1776 }
1777 
1778 #ifdef CONFIG_LOCKDEP
1779 static struct lockdep_map memcg_oom_lock_dep_map = {
1780 	.name = "memcg_oom_lock",
1781 };
1782 #endif
1783 
1784 static DEFINE_SPINLOCK(memcg_oom_lock);
1785 
1786 /*
1787  * Check OOM-Killer is already running under our hierarchy.
1788  * If someone is running, return false.
1789  */
1790 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1791 {
1792 	struct mem_cgroup *iter, *failed = NULL;
1793 
1794 	spin_lock(&memcg_oom_lock);
1795 
1796 	for_each_mem_cgroup_tree(iter, memcg) {
1797 		if (iter->oom_lock) {
1798 			/*
1799 			 * this subtree of our hierarchy is already locked
1800 			 * so we cannot give a lock.
1801 			 */
1802 			failed = iter;
1803 			mem_cgroup_iter_break(memcg, iter);
1804 			break;
1805 		} else
1806 			iter->oom_lock = true;
1807 	}
1808 
1809 	if (failed) {
1810 		/*
1811 		 * OK, we failed to lock the whole subtree so we have
1812 		 * to clean up what we set up to the failing subtree
1813 		 */
1814 		for_each_mem_cgroup_tree(iter, memcg) {
1815 			if (iter == failed) {
1816 				mem_cgroup_iter_break(memcg, iter);
1817 				break;
1818 			}
1819 			iter->oom_lock = false;
1820 		}
1821 	} else
1822 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1823 
1824 	spin_unlock(&memcg_oom_lock);
1825 
1826 	return !failed;
1827 }
1828 
1829 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1830 {
1831 	struct mem_cgroup *iter;
1832 
1833 	spin_lock(&memcg_oom_lock);
1834 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1835 	for_each_mem_cgroup_tree(iter, memcg)
1836 		iter->oom_lock = false;
1837 	spin_unlock(&memcg_oom_lock);
1838 }
1839 
1840 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1841 {
1842 	struct mem_cgroup *iter;
1843 
1844 	spin_lock(&memcg_oom_lock);
1845 	for_each_mem_cgroup_tree(iter, memcg)
1846 		iter->under_oom++;
1847 	spin_unlock(&memcg_oom_lock);
1848 }
1849 
1850 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1851 {
1852 	struct mem_cgroup *iter;
1853 
1854 	/*
1855 	 * Be careful about under_oom underflows because a child memcg
1856 	 * could have been added after mem_cgroup_mark_under_oom.
1857 	 */
1858 	spin_lock(&memcg_oom_lock);
1859 	for_each_mem_cgroup_tree(iter, memcg)
1860 		if (iter->under_oom > 0)
1861 			iter->under_oom--;
1862 	spin_unlock(&memcg_oom_lock);
1863 }
1864 
1865 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1866 
1867 struct oom_wait_info {
1868 	struct mem_cgroup *memcg;
1869 	wait_queue_entry_t	wait;
1870 };
1871 
1872 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1873 	unsigned mode, int sync, void *arg)
1874 {
1875 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1876 	struct mem_cgroup *oom_wait_memcg;
1877 	struct oom_wait_info *oom_wait_info;
1878 
1879 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1880 	oom_wait_memcg = oom_wait_info->memcg;
1881 
1882 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1883 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1884 		return 0;
1885 	return autoremove_wake_function(wait, mode, sync, arg);
1886 }
1887 
1888 static void memcg_oom_recover(struct mem_cgroup *memcg)
1889 {
1890 	/*
1891 	 * For the following lockless ->under_oom test, the only required
1892 	 * guarantee is that it must see the state asserted by an OOM when
1893 	 * this function is called as a result of userland actions
1894 	 * triggered by the notification of the OOM.  This is trivially
1895 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1896 	 * triggering notification.
1897 	 */
1898 	if (memcg && memcg->under_oom)
1899 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1900 }
1901 
1902 /*
1903  * Returns true if successfully killed one or more processes. Though in some
1904  * corner cases it can return true even without killing any process.
1905  */
1906 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1907 {
1908 	bool locked, ret;
1909 
1910 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1911 		return false;
1912 
1913 	memcg_memory_event(memcg, MEMCG_OOM);
1914 
1915 	/*
1916 	 * We are in the middle of the charge context here, so we
1917 	 * don't want to block when potentially sitting on a callstack
1918 	 * that holds all kinds of filesystem and mm locks.
1919 	 *
1920 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1921 	 * handling until the charge can succeed; remember the context and put
1922 	 * the task to sleep at the end of the page fault when all locks are
1923 	 * released.
1924 	 *
1925 	 * On the other hand, in-kernel OOM killer allows for an async victim
1926 	 * memory reclaim (oom_reaper) and that means that we are not solely
1927 	 * relying on the oom victim to make a forward progress and we can
1928 	 * invoke the oom killer here.
1929 	 *
1930 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1931 	 * victim and then we have to bail out from the charge path.
1932 	 */
1933 	if (READ_ONCE(memcg->oom_kill_disable)) {
1934 		if (current->in_user_fault) {
1935 			css_get(&memcg->css);
1936 			current->memcg_in_oom = memcg;
1937 			current->memcg_oom_gfp_mask = mask;
1938 			current->memcg_oom_order = order;
1939 		}
1940 		return false;
1941 	}
1942 
1943 	mem_cgroup_mark_under_oom(memcg);
1944 
1945 	locked = mem_cgroup_oom_trylock(memcg);
1946 
1947 	if (locked)
1948 		mem_cgroup_oom_notify(memcg);
1949 
1950 	mem_cgroup_unmark_under_oom(memcg);
1951 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1952 
1953 	if (locked)
1954 		mem_cgroup_oom_unlock(memcg);
1955 
1956 	return ret;
1957 }
1958 
1959 /**
1960  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1961  * @handle: actually kill/wait or just clean up the OOM state
1962  *
1963  * This has to be called at the end of a page fault if the memcg OOM
1964  * handler was enabled.
1965  *
1966  * Memcg supports userspace OOM handling where failed allocations must
1967  * sleep on a waitqueue until the userspace task resolves the
1968  * situation.  Sleeping directly in the charge context with all kinds
1969  * of locks held is not a good idea, instead we remember an OOM state
1970  * in the task and mem_cgroup_oom_synchronize() has to be called at
1971  * the end of the page fault to complete the OOM handling.
1972  *
1973  * Returns %true if an ongoing memcg OOM situation was detected and
1974  * completed, %false otherwise.
1975  */
1976 bool mem_cgroup_oom_synchronize(bool handle)
1977 {
1978 	struct mem_cgroup *memcg = current->memcg_in_oom;
1979 	struct oom_wait_info owait;
1980 	bool locked;
1981 
1982 	/* OOM is global, do not handle */
1983 	if (!memcg)
1984 		return false;
1985 
1986 	if (!handle)
1987 		goto cleanup;
1988 
1989 	owait.memcg = memcg;
1990 	owait.wait.flags = 0;
1991 	owait.wait.func = memcg_oom_wake_function;
1992 	owait.wait.private = current;
1993 	INIT_LIST_HEAD(&owait.wait.entry);
1994 
1995 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1996 	mem_cgroup_mark_under_oom(memcg);
1997 
1998 	locked = mem_cgroup_oom_trylock(memcg);
1999 
2000 	if (locked)
2001 		mem_cgroup_oom_notify(memcg);
2002 
2003 	if (locked && !READ_ONCE(memcg->oom_kill_disable)) {
2004 		mem_cgroup_unmark_under_oom(memcg);
2005 		finish_wait(&memcg_oom_waitq, &owait.wait);
2006 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2007 					 current->memcg_oom_order);
2008 	} else {
2009 		schedule();
2010 		mem_cgroup_unmark_under_oom(memcg);
2011 		finish_wait(&memcg_oom_waitq, &owait.wait);
2012 	}
2013 
2014 	if (locked) {
2015 		mem_cgroup_oom_unlock(memcg);
2016 		/*
2017 		 * There is no guarantee that an OOM-lock contender
2018 		 * sees the wakeups triggered by the OOM kill
2019 		 * uncharges.  Wake any sleepers explicitly.
2020 		 */
2021 		memcg_oom_recover(memcg);
2022 	}
2023 cleanup:
2024 	current->memcg_in_oom = NULL;
2025 	css_put(&memcg->css);
2026 	return true;
2027 }
2028 
2029 /**
2030  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2031  * @victim: task to be killed by the OOM killer
2032  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2033  *
2034  * Returns a pointer to a memory cgroup, which has to be cleaned up
2035  * by killing all belonging OOM-killable tasks.
2036  *
2037  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2038  */
2039 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2040 					    struct mem_cgroup *oom_domain)
2041 {
2042 	struct mem_cgroup *oom_group = NULL;
2043 	struct mem_cgroup *memcg;
2044 
2045 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2046 		return NULL;
2047 
2048 	if (!oom_domain)
2049 		oom_domain = root_mem_cgroup;
2050 
2051 	rcu_read_lock();
2052 
2053 	memcg = mem_cgroup_from_task(victim);
2054 	if (mem_cgroup_is_root(memcg))
2055 		goto out;
2056 
2057 	/*
2058 	 * If the victim task has been asynchronously moved to a different
2059 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2060 	 * In this case it's better to ignore memory.group.oom.
2061 	 */
2062 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2063 		goto out;
2064 
2065 	/*
2066 	 * Traverse the memory cgroup hierarchy from the victim task's
2067 	 * cgroup up to the OOMing cgroup (or root) to find the
2068 	 * highest-level memory cgroup with oom.group set.
2069 	 */
2070 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2071 		if (READ_ONCE(memcg->oom_group))
2072 			oom_group = memcg;
2073 
2074 		if (memcg == oom_domain)
2075 			break;
2076 	}
2077 
2078 	if (oom_group)
2079 		css_get(&oom_group->css);
2080 out:
2081 	rcu_read_unlock();
2082 
2083 	return oom_group;
2084 }
2085 
2086 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2087 {
2088 	pr_info("Tasks in ");
2089 	pr_cont_cgroup_path(memcg->css.cgroup);
2090 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2091 }
2092 
2093 /**
2094  * folio_memcg_lock - Bind a folio to its memcg.
2095  * @folio: The folio.
2096  *
2097  * This function prevents unlocked LRU folios from being moved to
2098  * another cgroup.
2099  *
2100  * It ensures lifetime of the bound memcg.  The caller is responsible
2101  * for the lifetime of the folio.
2102  */
2103 void folio_memcg_lock(struct folio *folio)
2104 {
2105 	struct mem_cgroup *memcg;
2106 	unsigned long flags;
2107 
2108 	/*
2109 	 * The RCU lock is held throughout the transaction.  The fast
2110 	 * path can get away without acquiring the memcg->move_lock
2111 	 * because page moving starts with an RCU grace period.
2112          */
2113 	rcu_read_lock();
2114 
2115 	if (mem_cgroup_disabled())
2116 		return;
2117 again:
2118 	memcg = folio_memcg(folio);
2119 	if (unlikely(!memcg))
2120 		return;
2121 
2122 #ifdef CONFIG_PROVE_LOCKING
2123 	local_irq_save(flags);
2124 	might_lock(&memcg->move_lock);
2125 	local_irq_restore(flags);
2126 #endif
2127 
2128 	if (atomic_read(&memcg->moving_account) <= 0)
2129 		return;
2130 
2131 	spin_lock_irqsave(&memcg->move_lock, flags);
2132 	if (memcg != folio_memcg(folio)) {
2133 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2134 		goto again;
2135 	}
2136 
2137 	/*
2138 	 * When charge migration first begins, we can have multiple
2139 	 * critical sections holding the fast-path RCU lock and one
2140 	 * holding the slowpath move_lock. Track the task who has the
2141 	 * move_lock for unlock_page_memcg().
2142 	 */
2143 	memcg->move_lock_task = current;
2144 	memcg->move_lock_flags = flags;
2145 }
2146 
2147 void lock_page_memcg(struct page *page)
2148 {
2149 	folio_memcg_lock(page_folio(page));
2150 }
2151 
2152 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2153 {
2154 	if (memcg && memcg->move_lock_task == current) {
2155 		unsigned long flags = memcg->move_lock_flags;
2156 
2157 		memcg->move_lock_task = NULL;
2158 		memcg->move_lock_flags = 0;
2159 
2160 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2161 	}
2162 
2163 	rcu_read_unlock();
2164 }
2165 
2166 /**
2167  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2168  * @folio: The folio.
2169  *
2170  * This releases the binding created by folio_memcg_lock().  This does
2171  * not change the accounting of this folio to its memcg, but it does
2172  * permit others to change it.
2173  */
2174 void folio_memcg_unlock(struct folio *folio)
2175 {
2176 	__folio_memcg_unlock(folio_memcg(folio));
2177 }
2178 
2179 void unlock_page_memcg(struct page *page)
2180 {
2181 	folio_memcg_unlock(page_folio(page));
2182 }
2183 
2184 struct memcg_stock_pcp {
2185 	local_lock_t stock_lock;
2186 	struct mem_cgroup *cached; /* this never be root cgroup */
2187 	unsigned int nr_pages;
2188 
2189 #ifdef CONFIG_MEMCG_KMEM
2190 	struct obj_cgroup *cached_objcg;
2191 	struct pglist_data *cached_pgdat;
2192 	unsigned int nr_bytes;
2193 	int nr_slab_reclaimable_b;
2194 	int nr_slab_unreclaimable_b;
2195 #endif
2196 
2197 	struct work_struct work;
2198 	unsigned long flags;
2199 #define FLUSHING_CACHED_CHARGE	0
2200 };
2201 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2202 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2203 };
2204 static DEFINE_MUTEX(percpu_charge_mutex);
2205 
2206 #ifdef CONFIG_MEMCG_KMEM
2207 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2208 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2209 				     struct mem_cgroup *root_memcg);
2210 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2211 
2212 #else
2213 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2214 {
2215 	return NULL;
2216 }
2217 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2218 				     struct mem_cgroup *root_memcg)
2219 {
2220 	return false;
2221 }
2222 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2223 {
2224 }
2225 #endif
2226 
2227 /**
2228  * consume_stock: Try to consume stocked charge on this cpu.
2229  * @memcg: memcg to consume from.
2230  * @nr_pages: how many pages to charge.
2231  *
2232  * The charges will only happen if @memcg matches the current cpu's memcg
2233  * stock, and at least @nr_pages are available in that stock.  Failure to
2234  * service an allocation will refill the stock.
2235  *
2236  * returns true if successful, false otherwise.
2237  */
2238 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2239 {
2240 	struct memcg_stock_pcp *stock;
2241 	unsigned long flags;
2242 	bool ret = false;
2243 
2244 	if (nr_pages > MEMCG_CHARGE_BATCH)
2245 		return ret;
2246 
2247 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2248 
2249 	stock = this_cpu_ptr(&memcg_stock);
2250 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2251 		stock->nr_pages -= nr_pages;
2252 		ret = true;
2253 	}
2254 
2255 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2256 
2257 	return ret;
2258 }
2259 
2260 /*
2261  * Returns stocks cached in percpu and reset cached information.
2262  */
2263 static void drain_stock(struct memcg_stock_pcp *stock)
2264 {
2265 	struct mem_cgroup *old = stock->cached;
2266 
2267 	if (!old)
2268 		return;
2269 
2270 	if (stock->nr_pages) {
2271 		page_counter_uncharge(&old->memory, stock->nr_pages);
2272 		if (do_memsw_account())
2273 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2274 		stock->nr_pages = 0;
2275 	}
2276 
2277 	css_put(&old->css);
2278 	stock->cached = NULL;
2279 }
2280 
2281 static void drain_local_stock(struct work_struct *dummy)
2282 {
2283 	struct memcg_stock_pcp *stock;
2284 	struct obj_cgroup *old = NULL;
2285 	unsigned long flags;
2286 
2287 	/*
2288 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2289 	 * drain_stock races is that we always operate on local CPU stock
2290 	 * here with IRQ disabled
2291 	 */
2292 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2293 
2294 	stock = this_cpu_ptr(&memcg_stock);
2295 	old = drain_obj_stock(stock);
2296 	drain_stock(stock);
2297 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2298 
2299 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2300 	if (old)
2301 		obj_cgroup_put(old);
2302 }
2303 
2304 /*
2305  * Cache charges(val) to local per_cpu area.
2306  * This will be consumed by consume_stock() function, later.
2307  */
2308 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2309 {
2310 	struct memcg_stock_pcp *stock;
2311 
2312 	stock = this_cpu_ptr(&memcg_stock);
2313 	if (stock->cached != memcg) { /* reset if necessary */
2314 		drain_stock(stock);
2315 		css_get(&memcg->css);
2316 		stock->cached = memcg;
2317 	}
2318 	stock->nr_pages += nr_pages;
2319 
2320 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2321 		drain_stock(stock);
2322 }
2323 
2324 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2325 {
2326 	unsigned long flags;
2327 
2328 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2329 	__refill_stock(memcg, nr_pages);
2330 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2331 }
2332 
2333 /*
2334  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2335  * of the hierarchy under it.
2336  */
2337 static void drain_all_stock(struct mem_cgroup *root_memcg)
2338 {
2339 	int cpu, curcpu;
2340 
2341 	/* If someone's already draining, avoid adding running more workers. */
2342 	if (!mutex_trylock(&percpu_charge_mutex))
2343 		return;
2344 	/*
2345 	 * Notify other cpus that system-wide "drain" is running
2346 	 * We do not care about races with the cpu hotplug because cpu down
2347 	 * as well as workers from this path always operate on the local
2348 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2349 	 */
2350 	migrate_disable();
2351 	curcpu = smp_processor_id();
2352 	for_each_online_cpu(cpu) {
2353 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2354 		struct mem_cgroup *memcg;
2355 		bool flush = false;
2356 
2357 		rcu_read_lock();
2358 		memcg = stock->cached;
2359 		if (memcg && stock->nr_pages &&
2360 		    mem_cgroup_is_descendant(memcg, root_memcg))
2361 			flush = true;
2362 		else if (obj_stock_flush_required(stock, root_memcg))
2363 			flush = true;
2364 		rcu_read_unlock();
2365 
2366 		if (flush &&
2367 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2368 			if (cpu == curcpu)
2369 				drain_local_stock(&stock->work);
2370 			else if (!cpu_is_isolated(cpu))
2371 				schedule_work_on(cpu, &stock->work);
2372 		}
2373 	}
2374 	migrate_enable();
2375 	mutex_unlock(&percpu_charge_mutex);
2376 }
2377 
2378 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2379 {
2380 	struct memcg_stock_pcp *stock;
2381 
2382 	stock = &per_cpu(memcg_stock, cpu);
2383 	drain_stock(stock);
2384 
2385 	return 0;
2386 }
2387 
2388 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2389 				  unsigned int nr_pages,
2390 				  gfp_t gfp_mask)
2391 {
2392 	unsigned long nr_reclaimed = 0;
2393 
2394 	do {
2395 		unsigned long pflags;
2396 
2397 		if (page_counter_read(&memcg->memory) <=
2398 		    READ_ONCE(memcg->memory.high))
2399 			continue;
2400 
2401 		memcg_memory_event(memcg, MEMCG_HIGH);
2402 
2403 		psi_memstall_enter(&pflags);
2404 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2405 							gfp_mask,
2406 							MEMCG_RECLAIM_MAY_SWAP);
2407 		psi_memstall_leave(&pflags);
2408 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2409 		 !mem_cgroup_is_root(memcg));
2410 
2411 	return nr_reclaimed;
2412 }
2413 
2414 static void high_work_func(struct work_struct *work)
2415 {
2416 	struct mem_cgroup *memcg;
2417 
2418 	memcg = container_of(work, struct mem_cgroup, high_work);
2419 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2420 }
2421 
2422 /*
2423  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2424  * enough to still cause a significant slowdown in most cases, while still
2425  * allowing diagnostics and tracing to proceed without becoming stuck.
2426  */
2427 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2428 
2429 /*
2430  * When calculating the delay, we use these either side of the exponentiation to
2431  * maintain precision and scale to a reasonable number of jiffies (see the table
2432  * below.
2433  *
2434  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2435  *   overage ratio to a delay.
2436  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2437  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2438  *   to produce a reasonable delay curve.
2439  *
2440  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2441  * reasonable delay curve compared to precision-adjusted overage, not
2442  * penalising heavily at first, but still making sure that growth beyond the
2443  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2444  * example, with a high of 100 megabytes:
2445  *
2446  *  +-------+------------------------+
2447  *  | usage | time to allocate in ms |
2448  *  +-------+------------------------+
2449  *  | 100M  |                      0 |
2450  *  | 101M  |                      6 |
2451  *  | 102M  |                     25 |
2452  *  | 103M  |                     57 |
2453  *  | 104M  |                    102 |
2454  *  | 105M  |                    159 |
2455  *  | 106M  |                    230 |
2456  *  | 107M  |                    313 |
2457  *  | 108M  |                    409 |
2458  *  | 109M  |                    518 |
2459  *  | 110M  |                    639 |
2460  *  | 111M  |                    774 |
2461  *  | 112M  |                    921 |
2462  *  | 113M  |                   1081 |
2463  *  | 114M  |                   1254 |
2464  *  | 115M  |                   1439 |
2465  *  | 116M  |                   1638 |
2466  *  | 117M  |                   1849 |
2467  *  | 118M  |                   2000 |
2468  *  | 119M  |                   2000 |
2469  *  | 120M  |                   2000 |
2470  *  +-------+------------------------+
2471  */
2472  #define MEMCG_DELAY_PRECISION_SHIFT 20
2473  #define MEMCG_DELAY_SCALING_SHIFT 14
2474 
2475 static u64 calculate_overage(unsigned long usage, unsigned long high)
2476 {
2477 	u64 overage;
2478 
2479 	if (usage <= high)
2480 		return 0;
2481 
2482 	/*
2483 	 * Prevent division by 0 in overage calculation by acting as if
2484 	 * it was a threshold of 1 page
2485 	 */
2486 	high = max(high, 1UL);
2487 
2488 	overage = usage - high;
2489 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2490 	return div64_u64(overage, high);
2491 }
2492 
2493 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2494 {
2495 	u64 overage, max_overage = 0;
2496 
2497 	do {
2498 		overage = calculate_overage(page_counter_read(&memcg->memory),
2499 					    READ_ONCE(memcg->memory.high));
2500 		max_overage = max(overage, max_overage);
2501 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2502 		 !mem_cgroup_is_root(memcg));
2503 
2504 	return max_overage;
2505 }
2506 
2507 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2508 {
2509 	u64 overage, max_overage = 0;
2510 
2511 	do {
2512 		overage = calculate_overage(page_counter_read(&memcg->swap),
2513 					    READ_ONCE(memcg->swap.high));
2514 		if (overage)
2515 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2516 		max_overage = max(overage, max_overage);
2517 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2518 		 !mem_cgroup_is_root(memcg));
2519 
2520 	return max_overage;
2521 }
2522 
2523 /*
2524  * Get the number of jiffies that we should penalise a mischievous cgroup which
2525  * is exceeding its memory.high by checking both it and its ancestors.
2526  */
2527 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2528 					  unsigned int nr_pages,
2529 					  u64 max_overage)
2530 {
2531 	unsigned long penalty_jiffies;
2532 
2533 	if (!max_overage)
2534 		return 0;
2535 
2536 	/*
2537 	 * We use overage compared to memory.high to calculate the number of
2538 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2539 	 * fairly lenient on small overages, and increasingly harsh when the
2540 	 * memcg in question makes it clear that it has no intention of stopping
2541 	 * its crazy behaviour, so we exponentially increase the delay based on
2542 	 * overage amount.
2543 	 */
2544 	penalty_jiffies = max_overage * max_overage * HZ;
2545 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2546 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2547 
2548 	/*
2549 	 * Factor in the task's own contribution to the overage, such that four
2550 	 * N-sized allocations are throttled approximately the same as one
2551 	 * 4N-sized allocation.
2552 	 *
2553 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2554 	 * larger the current charge patch is than that.
2555 	 */
2556 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2557 }
2558 
2559 /*
2560  * Scheduled by try_charge() to be executed from the userland return path
2561  * and reclaims memory over the high limit.
2562  */
2563 void mem_cgroup_handle_over_high(void)
2564 {
2565 	unsigned long penalty_jiffies;
2566 	unsigned long pflags;
2567 	unsigned long nr_reclaimed;
2568 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2569 	int nr_retries = MAX_RECLAIM_RETRIES;
2570 	struct mem_cgroup *memcg;
2571 	bool in_retry = false;
2572 
2573 	if (likely(!nr_pages))
2574 		return;
2575 
2576 	memcg = get_mem_cgroup_from_mm(current->mm);
2577 	current->memcg_nr_pages_over_high = 0;
2578 
2579 retry_reclaim:
2580 	/*
2581 	 * The allocating task should reclaim at least the batch size, but for
2582 	 * subsequent retries we only want to do what's necessary to prevent oom
2583 	 * or breaching resource isolation.
2584 	 *
2585 	 * This is distinct from memory.max or page allocator behaviour because
2586 	 * memory.high is currently batched, whereas memory.max and the page
2587 	 * allocator run every time an allocation is made.
2588 	 */
2589 	nr_reclaimed = reclaim_high(memcg,
2590 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2591 				    GFP_KERNEL);
2592 
2593 	/*
2594 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2595 	 * allocators proactively to slow down excessive growth.
2596 	 */
2597 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2598 					       mem_find_max_overage(memcg));
2599 
2600 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2601 						swap_find_max_overage(memcg));
2602 
2603 	/*
2604 	 * Clamp the max delay per usermode return so as to still keep the
2605 	 * application moving forwards and also permit diagnostics, albeit
2606 	 * extremely slowly.
2607 	 */
2608 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2609 
2610 	/*
2611 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2612 	 * that it's not even worth doing, in an attempt to be nice to those who
2613 	 * go only a small amount over their memory.high value and maybe haven't
2614 	 * been aggressively reclaimed enough yet.
2615 	 */
2616 	if (penalty_jiffies <= HZ / 100)
2617 		goto out;
2618 
2619 	/*
2620 	 * If reclaim is making forward progress but we're still over
2621 	 * memory.high, we want to encourage that rather than doing allocator
2622 	 * throttling.
2623 	 */
2624 	if (nr_reclaimed || nr_retries--) {
2625 		in_retry = true;
2626 		goto retry_reclaim;
2627 	}
2628 
2629 	/*
2630 	 * If we exit early, we're guaranteed to die (since
2631 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2632 	 * need to account for any ill-begotten jiffies to pay them off later.
2633 	 */
2634 	psi_memstall_enter(&pflags);
2635 	schedule_timeout_killable(penalty_jiffies);
2636 	psi_memstall_leave(&pflags);
2637 
2638 out:
2639 	css_put(&memcg->css);
2640 }
2641 
2642 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2643 			unsigned int nr_pages)
2644 {
2645 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2646 	int nr_retries = MAX_RECLAIM_RETRIES;
2647 	struct mem_cgroup *mem_over_limit;
2648 	struct page_counter *counter;
2649 	unsigned long nr_reclaimed;
2650 	bool passed_oom = false;
2651 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2652 	bool drained = false;
2653 	bool raised_max_event = false;
2654 	unsigned long pflags;
2655 
2656 retry:
2657 	if (consume_stock(memcg, nr_pages))
2658 		return 0;
2659 
2660 	if (!do_memsw_account() ||
2661 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2662 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2663 			goto done_restock;
2664 		if (do_memsw_account())
2665 			page_counter_uncharge(&memcg->memsw, batch);
2666 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2667 	} else {
2668 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2669 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2670 	}
2671 
2672 	if (batch > nr_pages) {
2673 		batch = nr_pages;
2674 		goto retry;
2675 	}
2676 
2677 	/*
2678 	 * Prevent unbounded recursion when reclaim operations need to
2679 	 * allocate memory. This might exceed the limits temporarily,
2680 	 * but we prefer facilitating memory reclaim and getting back
2681 	 * under the limit over triggering OOM kills in these cases.
2682 	 */
2683 	if (unlikely(current->flags & PF_MEMALLOC))
2684 		goto force;
2685 
2686 	if (unlikely(task_in_memcg_oom(current)))
2687 		goto nomem;
2688 
2689 	if (!gfpflags_allow_blocking(gfp_mask))
2690 		goto nomem;
2691 
2692 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2693 	raised_max_event = true;
2694 
2695 	psi_memstall_enter(&pflags);
2696 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2697 						    gfp_mask, reclaim_options);
2698 	psi_memstall_leave(&pflags);
2699 
2700 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2701 		goto retry;
2702 
2703 	if (!drained) {
2704 		drain_all_stock(mem_over_limit);
2705 		drained = true;
2706 		goto retry;
2707 	}
2708 
2709 	if (gfp_mask & __GFP_NORETRY)
2710 		goto nomem;
2711 	/*
2712 	 * Even though the limit is exceeded at this point, reclaim
2713 	 * may have been able to free some pages.  Retry the charge
2714 	 * before killing the task.
2715 	 *
2716 	 * Only for regular pages, though: huge pages are rather
2717 	 * unlikely to succeed so close to the limit, and we fall back
2718 	 * to regular pages anyway in case of failure.
2719 	 */
2720 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2721 		goto retry;
2722 	/*
2723 	 * At task move, charge accounts can be doubly counted. So, it's
2724 	 * better to wait until the end of task_move if something is going on.
2725 	 */
2726 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2727 		goto retry;
2728 
2729 	if (nr_retries--)
2730 		goto retry;
2731 
2732 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2733 		goto nomem;
2734 
2735 	/* Avoid endless loop for tasks bypassed by the oom killer */
2736 	if (passed_oom && task_is_dying())
2737 		goto nomem;
2738 
2739 	/*
2740 	 * keep retrying as long as the memcg oom killer is able to make
2741 	 * a forward progress or bypass the charge if the oom killer
2742 	 * couldn't make any progress.
2743 	 */
2744 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2745 			   get_order(nr_pages * PAGE_SIZE))) {
2746 		passed_oom = true;
2747 		nr_retries = MAX_RECLAIM_RETRIES;
2748 		goto retry;
2749 	}
2750 nomem:
2751 	/*
2752 	 * Memcg doesn't have a dedicated reserve for atomic
2753 	 * allocations. But like the global atomic pool, we need to
2754 	 * put the burden of reclaim on regular allocation requests
2755 	 * and let these go through as privileged allocations.
2756 	 */
2757 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2758 		return -ENOMEM;
2759 force:
2760 	/*
2761 	 * If the allocation has to be enforced, don't forget to raise
2762 	 * a MEMCG_MAX event.
2763 	 */
2764 	if (!raised_max_event)
2765 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2766 
2767 	/*
2768 	 * The allocation either can't fail or will lead to more memory
2769 	 * being freed very soon.  Allow memory usage go over the limit
2770 	 * temporarily by force charging it.
2771 	 */
2772 	page_counter_charge(&memcg->memory, nr_pages);
2773 	if (do_memsw_account())
2774 		page_counter_charge(&memcg->memsw, nr_pages);
2775 
2776 	return 0;
2777 
2778 done_restock:
2779 	if (batch > nr_pages)
2780 		refill_stock(memcg, batch - nr_pages);
2781 
2782 	/*
2783 	 * If the hierarchy is above the normal consumption range, schedule
2784 	 * reclaim on returning to userland.  We can perform reclaim here
2785 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2786 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2787 	 * not recorded as it most likely matches current's and won't
2788 	 * change in the meantime.  As high limit is checked again before
2789 	 * reclaim, the cost of mismatch is negligible.
2790 	 */
2791 	do {
2792 		bool mem_high, swap_high;
2793 
2794 		mem_high = page_counter_read(&memcg->memory) >
2795 			READ_ONCE(memcg->memory.high);
2796 		swap_high = page_counter_read(&memcg->swap) >
2797 			READ_ONCE(memcg->swap.high);
2798 
2799 		/* Don't bother a random interrupted task */
2800 		if (!in_task()) {
2801 			if (mem_high) {
2802 				schedule_work(&memcg->high_work);
2803 				break;
2804 			}
2805 			continue;
2806 		}
2807 
2808 		if (mem_high || swap_high) {
2809 			/*
2810 			 * The allocating tasks in this cgroup will need to do
2811 			 * reclaim or be throttled to prevent further growth
2812 			 * of the memory or swap footprints.
2813 			 *
2814 			 * Target some best-effort fairness between the tasks,
2815 			 * and distribute reclaim work and delay penalties
2816 			 * based on how much each task is actually allocating.
2817 			 */
2818 			current->memcg_nr_pages_over_high += batch;
2819 			set_notify_resume(current);
2820 			break;
2821 		}
2822 	} while ((memcg = parent_mem_cgroup(memcg)));
2823 
2824 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2825 	    !(current->flags & PF_MEMALLOC) &&
2826 	    gfpflags_allow_blocking(gfp_mask)) {
2827 		mem_cgroup_handle_over_high();
2828 	}
2829 	return 0;
2830 }
2831 
2832 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2833 			     unsigned int nr_pages)
2834 {
2835 	if (mem_cgroup_is_root(memcg))
2836 		return 0;
2837 
2838 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2839 }
2840 
2841 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2842 {
2843 	if (mem_cgroup_is_root(memcg))
2844 		return;
2845 
2846 	page_counter_uncharge(&memcg->memory, nr_pages);
2847 	if (do_memsw_account())
2848 		page_counter_uncharge(&memcg->memsw, nr_pages);
2849 }
2850 
2851 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2852 {
2853 	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2854 	/*
2855 	 * Any of the following ensures page's memcg stability:
2856 	 *
2857 	 * - the page lock
2858 	 * - LRU isolation
2859 	 * - lock_page_memcg()
2860 	 * - exclusive reference
2861 	 * - mem_cgroup_trylock_pages()
2862 	 */
2863 	folio->memcg_data = (unsigned long)memcg;
2864 }
2865 
2866 #ifdef CONFIG_MEMCG_KMEM
2867 /*
2868  * The allocated objcg pointers array is not accounted directly.
2869  * Moreover, it should not come from DMA buffer and is not readily
2870  * reclaimable. So those GFP bits should be masked off.
2871  */
2872 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2873 
2874 /*
2875  * mod_objcg_mlstate() may be called with irq enabled, so
2876  * mod_memcg_lruvec_state() should be used.
2877  */
2878 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2879 				     struct pglist_data *pgdat,
2880 				     enum node_stat_item idx, int nr)
2881 {
2882 	struct mem_cgroup *memcg;
2883 	struct lruvec *lruvec;
2884 
2885 	rcu_read_lock();
2886 	memcg = obj_cgroup_memcg(objcg);
2887 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2888 	mod_memcg_lruvec_state(lruvec, idx, nr);
2889 	rcu_read_unlock();
2890 }
2891 
2892 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2893 				 gfp_t gfp, bool new_slab)
2894 {
2895 	unsigned int objects = objs_per_slab(s, slab);
2896 	unsigned long memcg_data;
2897 	void *vec;
2898 
2899 	gfp &= ~OBJCGS_CLEAR_MASK;
2900 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2901 			   slab_nid(slab));
2902 	if (!vec)
2903 		return -ENOMEM;
2904 
2905 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2906 	if (new_slab) {
2907 		/*
2908 		 * If the slab is brand new and nobody can yet access its
2909 		 * memcg_data, no synchronization is required and memcg_data can
2910 		 * be simply assigned.
2911 		 */
2912 		slab->memcg_data = memcg_data;
2913 	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2914 		/*
2915 		 * If the slab is already in use, somebody can allocate and
2916 		 * assign obj_cgroups in parallel. In this case the existing
2917 		 * objcg vector should be reused.
2918 		 */
2919 		kfree(vec);
2920 		return 0;
2921 	}
2922 
2923 	kmemleak_not_leak(vec);
2924 	return 0;
2925 }
2926 
2927 static __always_inline
2928 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2929 {
2930 	/*
2931 	 * Slab objects are accounted individually, not per-page.
2932 	 * Memcg membership data for each individual object is saved in
2933 	 * slab->memcg_data.
2934 	 */
2935 	if (folio_test_slab(folio)) {
2936 		struct obj_cgroup **objcgs;
2937 		struct slab *slab;
2938 		unsigned int off;
2939 
2940 		slab = folio_slab(folio);
2941 		objcgs = slab_objcgs(slab);
2942 		if (!objcgs)
2943 			return NULL;
2944 
2945 		off = obj_to_index(slab->slab_cache, slab, p);
2946 		if (objcgs[off])
2947 			return obj_cgroup_memcg(objcgs[off]);
2948 
2949 		return NULL;
2950 	}
2951 
2952 	/*
2953 	 * folio_memcg_check() is used here, because in theory we can encounter
2954 	 * a folio where the slab flag has been cleared already, but
2955 	 * slab->memcg_data has not been freed yet
2956 	 * folio_memcg_check() will guarantee that a proper memory
2957 	 * cgroup pointer or NULL will be returned.
2958 	 */
2959 	return folio_memcg_check(folio);
2960 }
2961 
2962 /*
2963  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2964  *
2965  * A passed kernel object can be a slab object, vmalloc object or a generic
2966  * kernel page, so different mechanisms for getting the memory cgroup pointer
2967  * should be used.
2968  *
2969  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2970  * can not know for sure how the kernel object is implemented.
2971  * mem_cgroup_from_obj() can be safely used in such cases.
2972  *
2973  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2974  * cgroup_mutex, etc.
2975  */
2976 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2977 {
2978 	struct folio *folio;
2979 
2980 	if (mem_cgroup_disabled())
2981 		return NULL;
2982 
2983 	if (unlikely(is_vmalloc_addr(p)))
2984 		folio = page_folio(vmalloc_to_page(p));
2985 	else
2986 		folio = virt_to_folio(p);
2987 
2988 	return mem_cgroup_from_obj_folio(folio, p);
2989 }
2990 
2991 /*
2992  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2993  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2994  * allocated using vmalloc().
2995  *
2996  * A passed kernel object must be a slab object or a generic kernel page.
2997  *
2998  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2999  * cgroup_mutex, etc.
3000  */
3001 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3002 {
3003 	if (mem_cgroup_disabled())
3004 		return NULL;
3005 
3006 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3007 }
3008 
3009 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3010 {
3011 	struct obj_cgroup *objcg = NULL;
3012 
3013 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3014 		objcg = rcu_dereference(memcg->objcg);
3015 		if (objcg && obj_cgroup_tryget(objcg))
3016 			break;
3017 		objcg = NULL;
3018 	}
3019 	return objcg;
3020 }
3021 
3022 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3023 {
3024 	struct obj_cgroup *objcg = NULL;
3025 	struct mem_cgroup *memcg;
3026 
3027 	if (memcg_kmem_bypass())
3028 		return NULL;
3029 
3030 	rcu_read_lock();
3031 	if (unlikely(active_memcg()))
3032 		memcg = active_memcg();
3033 	else
3034 		memcg = mem_cgroup_from_task(current);
3035 	objcg = __get_obj_cgroup_from_memcg(memcg);
3036 	rcu_read_unlock();
3037 	return objcg;
3038 }
3039 
3040 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
3041 {
3042 	struct obj_cgroup *objcg;
3043 
3044 	if (!memcg_kmem_online())
3045 		return NULL;
3046 
3047 	if (PageMemcgKmem(page)) {
3048 		objcg = __folio_objcg(page_folio(page));
3049 		obj_cgroup_get(objcg);
3050 	} else {
3051 		struct mem_cgroup *memcg;
3052 
3053 		rcu_read_lock();
3054 		memcg = __folio_memcg(page_folio(page));
3055 		if (memcg)
3056 			objcg = __get_obj_cgroup_from_memcg(memcg);
3057 		else
3058 			objcg = NULL;
3059 		rcu_read_unlock();
3060 	}
3061 	return objcg;
3062 }
3063 
3064 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3065 {
3066 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3067 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3068 		if (nr_pages > 0)
3069 			page_counter_charge(&memcg->kmem, nr_pages);
3070 		else
3071 			page_counter_uncharge(&memcg->kmem, -nr_pages);
3072 	}
3073 }
3074 
3075 
3076 /*
3077  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3078  * @objcg: object cgroup to uncharge
3079  * @nr_pages: number of pages to uncharge
3080  */
3081 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3082 				      unsigned int nr_pages)
3083 {
3084 	struct mem_cgroup *memcg;
3085 
3086 	memcg = get_mem_cgroup_from_objcg(objcg);
3087 
3088 	memcg_account_kmem(memcg, -nr_pages);
3089 	refill_stock(memcg, nr_pages);
3090 
3091 	css_put(&memcg->css);
3092 }
3093 
3094 /*
3095  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3096  * @objcg: object cgroup to charge
3097  * @gfp: reclaim mode
3098  * @nr_pages: number of pages to charge
3099  *
3100  * Returns 0 on success, an error code on failure.
3101  */
3102 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3103 				   unsigned int nr_pages)
3104 {
3105 	struct mem_cgroup *memcg;
3106 	int ret;
3107 
3108 	memcg = get_mem_cgroup_from_objcg(objcg);
3109 
3110 	ret = try_charge_memcg(memcg, gfp, nr_pages);
3111 	if (ret)
3112 		goto out;
3113 
3114 	memcg_account_kmem(memcg, nr_pages);
3115 out:
3116 	css_put(&memcg->css);
3117 
3118 	return ret;
3119 }
3120 
3121 /**
3122  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3123  * @page: page to charge
3124  * @gfp: reclaim mode
3125  * @order: allocation order
3126  *
3127  * Returns 0 on success, an error code on failure.
3128  */
3129 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3130 {
3131 	struct obj_cgroup *objcg;
3132 	int ret = 0;
3133 
3134 	objcg = get_obj_cgroup_from_current();
3135 	if (objcg) {
3136 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3137 		if (!ret) {
3138 			page->memcg_data = (unsigned long)objcg |
3139 				MEMCG_DATA_KMEM;
3140 			return 0;
3141 		}
3142 		obj_cgroup_put(objcg);
3143 	}
3144 	return ret;
3145 }
3146 
3147 /**
3148  * __memcg_kmem_uncharge_page: uncharge a kmem page
3149  * @page: page to uncharge
3150  * @order: allocation order
3151  */
3152 void __memcg_kmem_uncharge_page(struct page *page, int order)
3153 {
3154 	struct folio *folio = page_folio(page);
3155 	struct obj_cgroup *objcg;
3156 	unsigned int nr_pages = 1 << order;
3157 
3158 	if (!folio_memcg_kmem(folio))
3159 		return;
3160 
3161 	objcg = __folio_objcg(folio);
3162 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3163 	folio->memcg_data = 0;
3164 	obj_cgroup_put(objcg);
3165 }
3166 
3167 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3168 		     enum node_stat_item idx, int nr)
3169 {
3170 	struct memcg_stock_pcp *stock;
3171 	struct obj_cgroup *old = NULL;
3172 	unsigned long flags;
3173 	int *bytes;
3174 
3175 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3176 	stock = this_cpu_ptr(&memcg_stock);
3177 
3178 	/*
3179 	 * Save vmstat data in stock and skip vmstat array update unless
3180 	 * accumulating over a page of vmstat data or when pgdat or idx
3181 	 * changes.
3182 	 */
3183 	if (stock->cached_objcg != objcg) {
3184 		old = drain_obj_stock(stock);
3185 		obj_cgroup_get(objcg);
3186 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3187 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3188 		stock->cached_objcg = objcg;
3189 		stock->cached_pgdat = pgdat;
3190 	} else if (stock->cached_pgdat != pgdat) {
3191 		/* Flush the existing cached vmstat data */
3192 		struct pglist_data *oldpg = stock->cached_pgdat;
3193 
3194 		if (stock->nr_slab_reclaimable_b) {
3195 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3196 					  stock->nr_slab_reclaimable_b);
3197 			stock->nr_slab_reclaimable_b = 0;
3198 		}
3199 		if (stock->nr_slab_unreclaimable_b) {
3200 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3201 					  stock->nr_slab_unreclaimable_b);
3202 			stock->nr_slab_unreclaimable_b = 0;
3203 		}
3204 		stock->cached_pgdat = pgdat;
3205 	}
3206 
3207 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3208 					       : &stock->nr_slab_unreclaimable_b;
3209 	/*
3210 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3211 	 * cached locally at least once before pushing it out.
3212 	 */
3213 	if (!*bytes) {
3214 		*bytes = nr;
3215 		nr = 0;
3216 	} else {
3217 		*bytes += nr;
3218 		if (abs(*bytes) > PAGE_SIZE) {
3219 			nr = *bytes;
3220 			*bytes = 0;
3221 		} else {
3222 			nr = 0;
3223 		}
3224 	}
3225 	if (nr)
3226 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3227 
3228 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3229 	if (old)
3230 		obj_cgroup_put(old);
3231 }
3232 
3233 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3234 {
3235 	struct memcg_stock_pcp *stock;
3236 	unsigned long flags;
3237 	bool ret = false;
3238 
3239 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3240 
3241 	stock = this_cpu_ptr(&memcg_stock);
3242 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3243 		stock->nr_bytes -= nr_bytes;
3244 		ret = true;
3245 	}
3246 
3247 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3248 
3249 	return ret;
3250 }
3251 
3252 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3253 {
3254 	struct obj_cgroup *old = stock->cached_objcg;
3255 
3256 	if (!old)
3257 		return NULL;
3258 
3259 	if (stock->nr_bytes) {
3260 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3261 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3262 
3263 		if (nr_pages) {
3264 			struct mem_cgroup *memcg;
3265 
3266 			memcg = get_mem_cgroup_from_objcg(old);
3267 
3268 			memcg_account_kmem(memcg, -nr_pages);
3269 			__refill_stock(memcg, nr_pages);
3270 
3271 			css_put(&memcg->css);
3272 		}
3273 
3274 		/*
3275 		 * The leftover is flushed to the centralized per-memcg value.
3276 		 * On the next attempt to refill obj stock it will be moved
3277 		 * to a per-cpu stock (probably, on an other CPU), see
3278 		 * refill_obj_stock().
3279 		 *
3280 		 * How often it's flushed is a trade-off between the memory
3281 		 * limit enforcement accuracy and potential CPU contention,
3282 		 * so it might be changed in the future.
3283 		 */
3284 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3285 		stock->nr_bytes = 0;
3286 	}
3287 
3288 	/*
3289 	 * Flush the vmstat data in current stock
3290 	 */
3291 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3292 		if (stock->nr_slab_reclaimable_b) {
3293 			mod_objcg_mlstate(old, stock->cached_pgdat,
3294 					  NR_SLAB_RECLAIMABLE_B,
3295 					  stock->nr_slab_reclaimable_b);
3296 			stock->nr_slab_reclaimable_b = 0;
3297 		}
3298 		if (stock->nr_slab_unreclaimable_b) {
3299 			mod_objcg_mlstate(old, stock->cached_pgdat,
3300 					  NR_SLAB_UNRECLAIMABLE_B,
3301 					  stock->nr_slab_unreclaimable_b);
3302 			stock->nr_slab_unreclaimable_b = 0;
3303 		}
3304 		stock->cached_pgdat = NULL;
3305 	}
3306 
3307 	stock->cached_objcg = NULL;
3308 	/*
3309 	 * The `old' objects needs to be released by the caller via
3310 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3311 	 */
3312 	return old;
3313 }
3314 
3315 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3316 				     struct mem_cgroup *root_memcg)
3317 {
3318 	struct mem_cgroup *memcg;
3319 
3320 	if (stock->cached_objcg) {
3321 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3322 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3323 			return true;
3324 	}
3325 
3326 	return false;
3327 }
3328 
3329 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3330 			     bool allow_uncharge)
3331 {
3332 	struct memcg_stock_pcp *stock;
3333 	struct obj_cgroup *old = NULL;
3334 	unsigned long flags;
3335 	unsigned int nr_pages = 0;
3336 
3337 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3338 
3339 	stock = this_cpu_ptr(&memcg_stock);
3340 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3341 		old = drain_obj_stock(stock);
3342 		obj_cgroup_get(objcg);
3343 		stock->cached_objcg = objcg;
3344 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3345 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3346 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3347 	}
3348 	stock->nr_bytes += nr_bytes;
3349 
3350 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3351 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3352 		stock->nr_bytes &= (PAGE_SIZE - 1);
3353 	}
3354 
3355 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3356 	if (old)
3357 		obj_cgroup_put(old);
3358 
3359 	if (nr_pages)
3360 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3361 }
3362 
3363 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3364 {
3365 	unsigned int nr_pages, nr_bytes;
3366 	int ret;
3367 
3368 	if (consume_obj_stock(objcg, size))
3369 		return 0;
3370 
3371 	/*
3372 	 * In theory, objcg->nr_charged_bytes can have enough
3373 	 * pre-charged bytes to satisfy the allocation. However,
3374 	 * flushing objcg->nr_charged_bytes requires two atomic
3375 	 * operations, and objcg->nr_charged_bytes can't be big.
3376 	 * The shared objcg->nr_charged_bytes can also become a
3377 	 * performance bottleneck if all tasks of the same memcg are
3378 	 * trying to update it. So it's better to ignore it and try
3379 	 * grab some new pages. The stock's nr_bytes will be flushed to
3380 	 * objcg->nr_charged_bytes later on when objcg changes.
3381 	 *
3382 	 * The stock's nr_bytes may contain enough pre-charged bytes
3383 	 * to allow one less page from being charged, but we can't rely
3384 	 * on the pre-charged bytes not being changed outside of
3385 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3386 	 * pre-charged bytes as well when charging pages. To avoid a
3387 	 * page uncharge right after a page charge, we set the
3388 	 * allow_uncharge flag to false when calling refill_obj_stock()
3389 	 * to temporarily allow the pre-charged bytes to exceed the page
3390 	 * size limit. The maximum reachable value of the pre-charged
3391 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3392 	 * race.
3393 	 */
3394 	nr_pages = size >> PAGE_SHIFT;
3395 	nr_bytes = size & (PAGE_SIZE - 1);
3396 
3397 	if (nr_bytes)
3398 		nr_pages += 1;
3399 
3400 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3401 	if (!ret && nr_bytes)
3402 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3403 
3404 	return ret;
3405 }
3406 
3407 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3408 {
3409 	refill_obj_stock(objcg, size, true);
3410 }
3411 
3412 #endif /* CONFIG_MEMCG_KMEM */
3413 
3414 /*
3415  * Because page_memcg(head) is not set on tails, set it now.
3416  */
3417 void split_page_memcg(struct page *head, unsigned int nr)
3418 {
3419 	struct folio *folio = page_folio(head);
3420 	struct mem_cgroup *memcg = folio_memcg(folio);
3421 	int i;
3422 
3423 	if (mem_cgroup_disabled() || !memcg)
3424 		return;
3425 
3426 	for (i = 1; i < nr; i++)
3427 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3428 
3429 	if (folio_memcg_kmem(folio))
3430 		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3431 	else
3432 		css_get_many(&memcg->css, nr - 1);
3433 }
3434 
3435 #ifdef CONFIG_SWAP
3436 /**
3437  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3438  * @entry: swap entry to be moved
3439  * @from:  mem_cgroup which the entry is moved from
3440  * @to:  mem_cgroup which the entry is moved to
3441  *
3442  * It succeeds only when the swap_cgroup's record for this entry is the same
3443  * as the mem_cgroup's id of @from.
3444  *
3445  * Returns 0 on success, -EINVAL on failure.
3446  *
3447  * The caller must have charged to @to, IOW, called page_counter_charge() about
3448  * both res and memsw, and called css_get().
3449  */
3450 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3451 				struct mem_cgroup *from, struct mem_cgroup *to)
3452 {
3453 	unsigned short old_id, new_id;
3454 
3455 	old_id = mem_cgroup_id(from);
3456 	new_id = mem_cgroup_id(to);
3457 
3458 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3459 		mod_memcg_state(from, MEMCG_SWAP, -1);
3460 		mod_memcg_state(to, MEMCG_SWAP, 1);
3461 		return 0;
3462 	}
3463 	return -EINVAL;
3464 }
3465 #else
3466 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3467 				struct mem_cgroup *from, struct mem_cgroup *to)
3468 {
3469 	return -EINVAL;
3470 }
3471 #endif
3472 
3473 static DEFINE_MUTEX(memcg_max_mutex);
3474 
3475 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3476 				 unsigned long max, bool memsw)
3477 {
3478 	bool enlarge = false;
3479 	bool drained = false;
3480 	int ret;
3481 	bool limits_invariant;
3482 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3483 
3484 	do {
3485 		if (signal_pending(current)) {
3486 			ret = -EINTR;
3487 			break;
3488 		}
3489 
3490 		mutex_lock(&memcg_max_mutex);
3491 		/*
3492 		 * Make sure that the new limit (memsw or memory limit) doesn't
3493 		 * break our basic invariant rule memory.max <= memsw.max.
3494 		 */
3495 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3496 					   max <= memcg->memsw.max;
3497 		if (!limits_invariant) {
3498 			mutex_unlock(&memcg_max_mutex);
3499 			ret = -EINVAL;
3500 			break;
3501 		}
3502 		if (max > counter->max)
3503 			enlarge = true;
3504 		ret = page_counter_set_max(counter, max);
3505 		mutex_unlock(&memcg_max_mutex);
3506 
3507 		if (!ret)
3508 			break;
3509 
3510 		if (!drained) {
3511 			drain_all_stock(memcg);
3512 			drained = true;
3513 			continue;
3514 		}
3515 
3516 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3517 					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3518 			ret = -EBUSY;
3519 			break;
3520 		}
3521 	} while (true);
3522 
3523 	if (!ret && enlarge)
3524 		memcg_oom_recover(memcg);
3525 
3526 	return ret;
3527 }
3528 
3529 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3530 					    gfp_t gfp_mask,
3531 					    unsigned long *total_scanned)
3532 {
3533 	unsigned long nr_reclaimed = 0;
3534 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3535 	unsigned long reclaimed;
3536 	int loop = 0;
3537 	struct mem_cgroup_tree_per_node *mctz;
3538 	unsigned long excess;
3539 
3540 	if (lru_gen_enabled())
3541 		return 0;
3542 
3543 	if (order > 0)
3544 		return 0;
3545 
3546 	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3547 
3548 	/*
3549 	 * Do not even bother to check the largest node if the root
3550 	 * is empty. Do it lockless to prevent lock bouncing. Races
3551 	 * are acceptable as soft limit is best effort anyway.
3552 	 */
3553 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3554 		return 0;
3555 
3556 	/*
3557 	 * This loop can run a while, specially if mem_cgroup's continuously
3558 	 * keep exceeding their soft limit and putting the system under
3559 	 * pressure
3560 	 */
3561 	do {
3562 		if (next_mz)
3563 			mz = next_mz;
3564 		else
3565 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3566 		if (!mz)
3567 			break;
3568 
3569 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3570 						    gfp_mask, total_scanned);
3571 		nr_reclaimed += reclaimed;
3572 		spin_lock_irq(&mctz->lock);
3573 
3574 		/*
3575 		 * If we failed to reclaim anything from this memory cgroup
3576 		 * it is time to move on to the next cgroup
3577 		 */
3578 		next_mz = NULL;
3579 		if (!reclaimed)
3580 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3581 
3582 		excess = soft_limit_excess(mz->memcg);
3583 		/*
3584 		 * One school of thought says that we should not add
3585 		 * back the node to the tree if reclaim returns 0.
3586 		 * But our reclaim could return 0, simply because due
3587 		 * to priority we are exposing a smaller subset of
3588 		 * memory to reclaim from. Consider this as a longer
3589 		 * term TODO.
3590 		 */
3591 		/* If excess == 0, no tree ops */
3592 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3593 		spin_unlock_irq(&mctz->lock);
3594 		css_put(&mz->memcg->css);
3595 		loop++;
3596 		/*
3597 		 * Could not reclaim anything and there are no more
3598 		 * mem cgroups to try or we seem to be looping without
3599 		 * reclaiming anything.
3600 		 */
3601 		if (!nr_reclaimed &&
3602 			(next_mz == NULL ||
3603 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3604 			break;
3605 	} while (!nr_reclaimed);
3606 	if (next_mz)
3607 		css_put(&next_mz->memcg->css);
3608 	return nr_reclaimed;
3609 }
3610 
3611 /*
3612  * Reclaims as many pages from the given memcg as possible.
3613  *
3614  * Caller is responsible for holding css reference for memcg.
3615  */
3616 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3617 {
3618 	int nr_retries = MAX_RECLAIM_RETRIES;
3619 
3620 	/* we call try-to-free pages for make this cgroup empty */
3621 	lru_add_drain_all();
3622 
3623 	drain_all_stock(memcg);
3624 
3625 	/* try to free all pages in this cgroup */
3626 	while (nr_retries && page_counter_read(&memcg->memory)) {
3627 		if (signal_pending(current))
3628 			return -EINTR;
3629 
3630 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3631 						  MEMCG_RECLAIM_MAY_SWAP))
3632 			nr_retries--;
3633 	}
3634 
3635 	return 0;
3636 }
3637 
3638 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3639 					    char *buf, size_t nbytes,
3640 					    loff_t off)
3641 {
3642 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3643 
3644 	if (mem_cgroup_is_root(memcg))
3645 		return -EINVAL;
3646 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3647 }
3648 
3649 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3650 				     struct cftype *cft)
3651 {
3652 	return 1;
3653 }
3654 
3655 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3656 				      struct cftype *cft, u64 val)
3657 {
3658 	if (val == 1)
3659 		return 0;
3660 
3661 	pr_warn_once("Non-hierarchical mode is deprecated. "
3662 		     "Please report your usecase to linux-mm@kvack.org if you "
3663 		     "depend on this functionality.\n");
3664 
3665 	return -EINVAL;
3666 }
3667 
3668 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3669 {
3670 	unsigned long val;
3671 
3672 	if (mem_cgroup_is_root(memcg)) {
3673 		mem_cgroup_flush_stats();
3674 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3675 			memcg_page_state(memcg, NR_ANON_MAPPED);
3676 		if (swap)
3677 			val += memcg_page_state(memcg, MEMCG_SWAP);
3678 	} else {
3679 		if (!swap)
3680 			val = page_counter_read(&memcg->memory);
3681 		else
3682 			val = page_counter_read(&memcg->memsw);
3683 	}
3684 	return val;
3685 }
3686 
3687 enum {
3688 	RES_USAGE,
3689 	RES_LIMIT,
3690 	RES_MAX_USAGE,
3691 	RES_FAILCNT,
3692 	RES_SOFT_LIMIT,
3693 };
3694 
3695 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3696 			       struct cftype *cft)
3697 {
3698 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3699 	struct page_counter *counter;
3700 
3701 	switch (MEMFILE_TYPE(cft->private)) {
3702 	case _MEM:
3703 		counter = &memcg->memory;
3704 		break;
3705 	case _MEMSWAP:
3706 		counter = &memcg->memsw;
3707 		break;
3708 	case _KMEM:
3709 		counter = &memcg->kmem;
3710 		break;
3711 	case _TCP:
3712 		counter = &memcg->tcpmem;
3713 		break;
3714 	default:
3715 		BUG();
3716 	}
3717 
3718 	switch (MEMFILE_ATTR(cft->private)) {
3719 	case RES_USAGE:
3720 		if (counter == &memcg->memory)
3721 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3722 		if (counter == &memcg->memsw)
3723 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3724 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3725 	case RES_LIMIT:
3726 		return (u64)counter->max * PAGE_SIZE;
3727 	case RES_MAX_USAGE:
3728 		return (u64)counter->watermark * PAGE_SIZE;
3729 	case RES_FAILCNT:
3730 		return counter->failcnt;
3731 	case RES_SOFT_LIMIT:
3732 		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3733 	default:
3734 		BUG();
3735 	}
3736 }
3737 
3738 #ifdef CONFIG_MEMCG_KMEM
3739 static int memcg_online_kmem(struct mem_cgroup *memcg)
3740 {
3741 	struct obj_cgroup *objcg;
3742 
3743 	if (mem_cgroup_kmem_disabled())
3744 		return 0;
3745 
3746 	if (unlikely(mem_cgroup_is_root(memcg)))
3747 		return 0;
3748 
3749 	objcg = obj_cgroup_alloc();
3750 	if (!objcg)
3751 		return -ENOMEM;
3752 
3753 	objcg->memcg = memcg;
3754 	rcu_assign_pointer(memcg->objcg, objcg);
3755 
3756 	static_branch_enable(&memcg_kmem_online_key);
3757 
3758 	memcg->kmemcg_id = memcg->id.id;
3759 
3760 	return 0;
3761 }
3762 
3763 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3764 {
3765 	struct mem_cgroup *parent;
3766 
3767 	if (mem_cgroup_kmem_disabled())
3768 		return;
3769 
3770 	if (unlikely(mem_cgroup_is_root(memcg)))
3771 		return;
3772 
3773 	parent = parent_mem_cgroup(memcg);
3774 	if (!parent)
3775 		parent = root_mem_cgroup;
3776 
3777 	memcg_reparent_objcgs(memcg, parent);
3778 
3779 	/*
3780 	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3781 	 * corresponding to this cgroup are guaranteed to remain empty.
3782 	 * The ordering is imposed by list_lru_node->lock taken by
3783 	 * memcg_reparent_list_lrus().
3784 	 */
3785 	memcg_reparent_list_lrus(memcg, parent);
3786 }
3787 #else
3788 static int memcg_online_kmem(struct mem_cgroup *memcg)
3789 {
3790 	return 0;
3791 }
3792 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3793 {
3794 }
3795 #endif /* CONFIG_MEMCG_KMEM */
3796 
3797 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3798 {
3799 	int ret;
3800 
3801 	mutex_lock(&memcg_max_mutex);
3802 
3803 	ret = page_counter_set_max(&memcg->tcpmem, max);
3804 	if (ret)
3805 		goto out;
3806 
3807 	if (!memcg->tcpmem_active) {
3808 		/*
3809 		 * The active flag needs to be written after the static_key
3810 		 * update. This is what guarantees that the socket activation
3811 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3812 		 * for details, and note that we don't mark any socket as
3813 		 * belonging to this memcg until that flag is up.
3814 		 *
3815 		 * We need to do this, because static_keys will span multiple
3816 		 * sites, but we can't control their order. If we mark a socket
3817 		 * as accounted, but the accounting functions are not patched in
3818 		 * yet, we'll lose accounting.
3819 		 *
3820 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3821 		 * because when this value change, the code to process it is not
3822 		 * patched in yet.
3823 		 */
3824 		static_branch_inc(&memcg_sockets_enabled_key);
3825 		memcg->tcpmem_active = true;
3826 	}
3827 out:
3828 	mutex_unlock(&memcg_max_mutex);
3829 	return ret;
3830 }
3831 
3832 /*
3833  * The user of this function is...
3834  * RES_LIMIT.
3835  */
3836 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3837 				char *buf, size_t nbytes, loff_t off)
3838 {
3839 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3840 	unsigned long nr_pages;
3841 	int ret;
3842 
3843 	buf = strstrip(buf);
3844 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3845 	if (ret)
3846 		return ret;
3847 
3848 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3849 	case RES_LIMIT:
3850 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3851 			ret = -EINVAL;
3852 			break;
3853 		}
3854 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3855 		case _MEM:
3856 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3857 			break;
3858 		case _MEMSWAP:
3859 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3860 			break;
3861 		case _KMEM:
3862 			/* kmem.limit_in_bytes is deprecated. */
3863 			ret = -EOPNOTSUPP;
3864 			break;
3865 		case _TCP:
3866 			ret = memcg_update_tcp_max(memcg, nr_pages);
3867 			break;
3868 		}
3869 		break;
3870 	case RES_SOFT_LIMIT:
3871 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3872 			ret = -EOPNOTSUPP;
3873 		} else {
3874 			WRITE_ONCE(memcg->soft_limit, nr_pages);
3875 			ret = 0;
3876 		}
3877 		break;
3878 	}
3879 	return ret ?: nbytes;
3880 }
3881 
3882 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3883 				size_t nbytes, loff_t off)
3884 {
3885 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3886 	struct page_counter *counter;
3887 
3888 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3889 	case _MEM:
3890 		counter = &memcg->memory;
3891 		break;
3892 	case _MEMSWAP:
3893 		counter = &memcg->memsw;
3894 		break;
3895 	case _KMEM:
3896 		counter = &memcg->kmem;
3897 		break;
3898 	case _TCP:
3899 		counter = &memcg->tcpmem;
3900 		break;
3901 	default:
3902 		BUG();
3903 	}
3904 
3905 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3906 	case RES_MAX_USAGE:
3907 		page_counter_reset_watermark(counter);
3908 		break;
3909 	case RES_FAILCNT:
3910 		counter->failcnt = 0;
3911 		break;
3912 	default:
3913 		BUG();
3914 	}
3915 
3916 	return nbytes;
3917 }
3918 
3919 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3920 					struct cftype *cft)
3921 {
3922 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3923 }
3924 
3925 #ifdef CONFIG_MMU
3926 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3927 					struct cftype *cft, u64 val)
3928 {
3929 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3930 
3931 	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3932 		     "Please report your usecase to linux-mm@kvack.org if you "
3933 		     "depend on this functionality.\n");
3934 
3935 	if (val & ~MOVE_MASK)
3936 		return -EINVAL;
3937 
3938 	/*
3939 	 * No kind of locking is needed in here, because ->can_attach() will
3940 	 * check this value once in the beginning of the process, and then carry
3941 	 * on with stale data. This means that changes to this value will only
3942 	 * affect task migrations starting after the change.
3943 	 */
3944 	memcg->move_charge_at_immigrate = val;
3945 	return 0;
3946 }
3947 #else
3948 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3949 					struct cftype *cft, u64 val)
3950 {
3951 	return -ENOSYS;
3952 }
3953 #endif
3954 
3955 #ifdef CONFIG_NUMA
3956 
3957 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3958 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3959 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3960 
3961 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3962 				int nid, unsigned int lru_mask, bool tree)
3963 {
3964 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3965 	unsigned long nr = 0;
3966 	enum lru_list lru;
3967 
3968 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3969 
3970 	for_each_lru(lru) {
3971 		if (!(BIT(lru) & lru_mask))
3972 			continue;
3973 		if (tree)
3974 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3975 		else
3976 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3977 	}
3978 	return nr;
3979 }
3980 
3981 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3982 					     unsigned int lru_mask,
3983 					     bool tree)
3984 {
3985 	unsigned long nr = 0;
3986 	enum lru_list lru;
3987 
3988 	for_each_lru(lru) {
3989 		if (!(BIT(lru) & lru_mask))
3990 			continue;
3991 		if (tree)
3992 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3993 		else
3994 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3995 	}
3996 	return nr;
3997 }
3998 
3999 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4000 {
4001 	struct numa_stat {
4002 		const char *name;
4003 		unsigned int lru_mask;
4004 	};
4005 
4006 	static const struct numa_stat stats[] = {
4007 		{ "total", LRU_ALL },
4008 		{ "file", LRU_ALL_FILE },
4009 		{ "anon", LRU_ALL_ANON },
4010 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4011 	};
4012 	const struct numa_stat *stat;
4013 	int nid;
4014 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4015 
4016 	mem_cgroup_flush_stats();
4017 
4018 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4019 		seq_printf(m, "%s=%lu", stat->name,
4020 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4021 						   false));
4022 		for_each_node_state(nid, N_MEMORY)
4023 			seq_printf(m, " N%d=%lu", nid,
4024 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4025 							stat->lru_mask, false));
4026 		seq_putc(m, '\n');
4027 	}
4028 
4029 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4030 
4031 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4032 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4033 						   true));
4034 		for_each_node_state(nid, N_MEMORY)
4035 			seq_printf(m, " N%d=%lu", nid,
4036 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4037 							stat->lru_mask, true));
4038 		seq_putc(m, '\n');
4039 	}
4040 
4041 	return 0;
4042 }
4043 #endif /* CONFIG_NUMA */
4044 
4045 static const unsigned int memcg1_stats[] = {
4046 	NR_FILE_PAGES,
4047 	NR_ANON_MAPPED,
4048 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4049 	NR_ANON_THPS,
4050 #endif
4051 	NR_SHMEM,
4052 	NR_FILE_MAPPED,
4053 	NR_FILE_DIRTY,
4054 	NR_WRITEBACK,
4055 	WORKINGSET_REFAULT_ANON,
4056 	WORKINGSET_REFAULT_FILE,
4057 	MEMCG_SWAP,
4058 };
4059 
4060 static const char *const memcg1_stat_names[] = {
4061 	"cache",
4062 	"rss",
4063 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4064 	"rss_huge",
4065 #endif
4066 	"shmem",
4067 	"mapped_file",
4068 	"dirty",
4069 	"writeback",
4070 	"workingset_refault_anon",
4071 	"workingset_refault_file",
4072 	"swap",
4073 };
4074 
4075 /* Universal VM events cgroup1 shows, original sort order */
4076 static const unsigned int memcg1_events[] = {
4077 	PGPGIN,
4078 	PGPGOUT,
4079 	PGFAULT,
4080 	PGMAJFAULT,
4081 };
4082 
4083 static int memcg_stat_show(struct seq_file *m, void *v)
4084 {
4085 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4086 	unsigned long memory, memsw;
4087 	struct mem_cgroup *mi;
4088 	unsigned int i;
4089 
4090 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4091 
4092 	mem_cgroup_flush_stats();
4093 
4094 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4095 		unsigned long nr;
4096 
4097 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4098 			continue;
4099 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4100 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
4101 			   nr * memcg_page_state_unit(memcg1_stats[i]));
4102 	}
4103 
4104 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4105 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4106 			   memcg_events_local(memcg, memcg1_events[i]));
4107 
4108 	for (i = 0; i < NR_LRU_LISTS; i++)
4109 		seq_printf(m, "%s %lu\n", lru_list_name(i),
4110 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4111 			   PAGE_SIZE);
4112 
4113 	/* Hierarchical information */
4114 	memory = memsw = PAGE_COUNTER_MAX;
4115 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4116 		memory = min(memory, READ_ONCE(mi->memory.max));
4117 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4118 	}
4119 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4120 		   (u64)memory * PAGE_SIZE);
4121 	if (do_memsw_account())
4122 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4123 			   (u64)memsw * PAGE_SIZE);
4124 
4125 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4126 		unsigned long nr;
4127 
4128 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4129 			continue;
4130 		nr = memcg_page_state(memcg, memcg1_stats[i]);
4131 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4132 			   (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4133 	}
4134 
4135 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4136 		seq_printf(m, "total_%s %llu\n",
4137 			   vm_event_name(memcg1_events[i]),
4138 			   (u64)memcg_events(memcg, memcg1_events[i]));
4139 
4140 	for (i = 0; i < NR_LRU_LISTS; i++)
4141 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4142 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4143 			   PAGE_SIZE);
4144 
4145 #ifdef CONFIG_DEBUG_VM
4146 	{
4147 		pg_data_t *pgdat;
4148 		struct mem_cgroup_per_node *mz;
4149 		unsigned long anon_cost = 0;
4150 		unsigned long file_cost = 0;
4151 
4152 		for_each_online_pgdat(pgdat) {
4153 			mz = memcg->nodeinfo[pgdat->node_id];
4154 
4155 			anon_cost += mz->lruvec.anon_cost;
4156 			file_cost += mz->lruvec.file_cost;
4157 		}
4158 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4159 		seq_printf(m, "file_cost %lu\n", file_cost);
4160 	}
4161 #endif
4162 
4163 	return 0;
4164 }
4165 
4166 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4167 				      struct cftype *cft)
4168 {
4169 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4170 
4171 	return mem_cgroup_swappiness(memcg);
4172 }
4173 
4174 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4175 				       struct cftype *cft, u64 val)
4176 {
4177 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4178 
4179 	if (val > 200)
4180 		return -EINVAL;
4181 
4182 	if (!mem_cgroup_is_root(memcg))
4183 		WRITE_ONCE(memcg->swappiness, val);
4184 	else
4185 		WRITE_ONCE(vm_swappiness, val);
4186 
4187 	return 0;
4188 }
4189 
4190 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4191 {
4192 	struct mem_cgroup_threshold_ary *t;
4193 	unsigned long usage;
4194 	int i;
4195 
4196 	rcu_read_lock();
4197 	if (!swap)
4198 		t = rcu_dereference(memcg->thresholds.primary);
4199 	else
4200 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4201 
4202 	if (!t)
4203 		goto unlock;
4204 
4205 	usage = mem_cgroup_usage(memcg, swap);
4206 
4207 	/*
4208 	 * current_threshold points to threshold just below or equal to usage.
4209 	 * If it's not true, a threshold was crossed after last
4210 	 * call of __mem_cgroup_threshold().
4211 	 */
4212 	i = t->current_threshold;
4213 
4214 	/*
4215 	 * Iterate backward over array of thresholds starting from
4216 	 * current_threshold and check if a threshold is crossed.
4217 	 * If none of thresholds below usage is crossed, we read
4218 	 * only one element of the array here.
4219 	 */
4220 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4221 		eventfd_signal(t->entries[i].eventfd, 1);
4222 
4223 	/* i = current_threshold + 1 */
4224 	i++;
4225 
4226 	/*
4227 	 * Iterate forward over array of thresholds starting from
4228 	 * current_threshold+1 and check if a threshold is crossed.
4229 	 * If none of thresholds above usage is crossed, we read
4230 	 * only one element of the array here.
4231 	 */
4232 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4233 		eventfd_signal(t->entries[i].eventfd, 1);
4234 
4235 	/* Update current_threshold */
4236 	t->current_threshold = i - 1;
4237 unlock:
4238 	rcu_read_unlock();
4239 }
4240 
4241 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4242 {
4243 	while (memcg) {
4244 		__mem_cgroup_threshold(memcg, false);
4245 		if (do_memsw_account())
4246 			__mem_cgroup_threshold(memcg, true);
4247 
4248 		memcg = parent_mem_cgroup(memcg);
4249 	}
4250 }
4251 
4252 static int compare_thresholds(const void *a, const void *b)
4253 {
4254 	const struct mem_cgroup_threshold *_a = a;
4255 	const struct mem_cgroup_threshold *_b = b;
4256 
4257 	if (_a->threshold > _b->threshold)
4258 		return 1;
4259 
4260 	if (_a->threshold < _b->threshold)
4261 		return -1;
4262 
4263 	return 0;
4264 }
4265 
4266 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4267 {
4268 	struct mem_cgroup_eventfd_list *ev;
4269 
4270 	spin_lock(&memcg_oom_lock);
4271 
4272 	list_for_each_entry(ev, &memcg->oom_notify, list)
4273 		eventfd_signal(ev->eventfd, 1);
4274 
4275 	spin_unlock(&memcg_oom_lock);
4276 	return 0;
4277 }
4278 
4279 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4280 {
4281 	struct mem_cgroup *iter;
4282 
4283 	for_each_mem_cgroup_tree(iter, memcg)
4284 		mem_cgroup_oom_notify_cb(iter);
4285 }
4286 
4287 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4288 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4289 {
4290 	struct mem_cgroup_thresholds *thresholds;
4291 	struct mem_cgroup_threshold_ary *new;
4292 	unsigned long threshold;
4293 	unsigned long usage;
4294 	int i, size, ret;
4295 
4296 	ret = page_counter_memparse(args, "-1", &threshold);
4297 	if (ret)
4298 		return ret;
4299 
4300 	mutex_lock(&memcg->thresholds_lock);
4301 
4302 	if (type == _MEM) {
4303 		thresholds = &memcg->thresholds;
4304 		usage = mem_cgroup_usage(memcg, false);
4305 	} else if (type == _MEMSWAP) {
4306 		thresholds = &memcg->memsw_thresholds;
4307 		usage = mem_cgroup_usage(memcg, true);
4308 	} else
4309 		BUG();
4310 
4311 	/* Check if a threshold crossed before adding a new one */
4312 	if (thresholds->primary)
4313 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4314 
4315 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4316 
4317 	/* Allocate memory for new array of thresholds */
4318 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4319 	if (!new) {
4320 		ret = -ENOMEM;
4321 		goto unlock;
4322 	}
4323 	new->size = size;
4324 
4325 	/* Copy thresholds (if any) to new array */
4326 	if (thresholds->primary)
4327 		memcpy(new->entries, thresholds->primary->entries,
4328 		       flex_array_size(new, entries, size - 1));
4329 
4330 	/* Add new threshold */
4331 	new->entries[size - 1].eventfd = eventfd;
4332 	new->entries[size - 1].threshold = threshold;
4333 
4334 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4335 	sort(new->entries, size, sizeof(*new->entries),
4336 			compare_thresholds, NULL);
4337 
4338 	/* Find current threshold */
4339 	new->current_threshold = -1;
4340 	for (i = 0; i < size; i++) {
4341 		if (new->entries[i].threshold <= usage) {
4342 			/*
4343 			 * new->current_threshold will not be used until
4344 			 * rcu_assign_pointer(), so it's safe to increment
4345 			 * it here.
4346 			 */
4347 			++new->current_threshold;
4348 		} else
4349 			break;
4350 	}
4351 
4352 	/* Free old spare buffer and save old primary buffer as spare */
4353 	kfree(thresholds->spare);
4354 	thresholds->spare = thresholds->primary;
4355 
4356 	rcu_assign_pointer(thresholds->primary, new);
4357 
4358 	/* To be sure that nobody uses thresholds */
4359 	synchronize_rcu();
4360 
4361 unlock:
4362 	mutex_unlock(&memcg->thresholds_lock);
4363 
4364 	return ret;
4365 }
4366 
4367 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4368 	struct eventfd_ctx *eventfd, const char *args)
4369 {
4370 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4371 }
4372 
4373 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4374 	struct eventfd_ctx *eventfd, const char *args)
4375 {
4376 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4377 }
4378 
4379 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4380 	struct eventfd_ctx *eventfd, enum res_type type)
4381 {
4382 	struct mem_cgroup_thresholds *thresholds;
4383 	struct mem_cgroup_threshold_ary *new;
4384 	unsigned long usage;
4385 	int i, j, size, entries;
4386 
4387 	mutex_lock(&memcg->thresholds_lock);
4388 
4389 	if (type == _MEM) {
4390 		thresholds = &memcg->thresholds;
4391 		usage = mem_cgroup_usage(memcg, false);
4392 	} else if (type == _MEMSWAP) {
4393 		thresholds = &memcg->memsw_thresholds;
4394 		usage = mem_cgroup_usage(memcg, true);
4395 	} else
4396 		BUG();
4397 
4398 	if (!thresholds->primary)
4399 		goto unlock;
4400 
4401 	/* Check if a threshold crossed before removing */
4402 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4403 
4404 	/* Calculate new number of threshold */
4405 	size = entries = 0;
4406 	for (i = 0; i < thresholds->primary->size; i++) {
4407 		if (thresholds->primary->entries[i].eventfd != eventfd)
4408 			size++;
4409 		else
4410 			entries++;
4411 	}
4412 
4413 	new = thresholds->spare;
4414 
4415 	/* If no items related to eventfd have been cleared, nothing to do */
4416 	if (!entries)
4417 		goto unlock;
4418 
4419 	/* Set thresholds array to NULL if we don't have thresholds */
4420 	if (!size) {
4421 		kfree(new);
4422 		new = NULL;
4423 		goto swap_buffers;
4424 	}
4425 
4426 	new->size = size;
4427 
4428 	/* Copy thresholds and find current threshold */
4429 	new->current_threshold = -1;
4430 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4431 		if (thresholds->primary->entries[i].eventfd == eventfd)
4432 			continue;
4433 
4434 		new->entries[j] = thresholds->primary->entries[i];
4435 		if (new->entries[j].threshold <= usage) {
4436 			/*
4437 			 * new->current_threshold will not be used
4438 			 * until rcu_assign_pointer(), so it's safe to increment
4439 			 * it here.
4440 			 */
4441 			++new->current_threshold;
4442 		}
4443 		j++;
4444 	}
4445 
4446 swap_buffers:
4447 	/* Swap primary and spare array */
4448 	thresholds->spare = thresholds->primary;
4449 
4450 	rcu_assign_pointer(thresholds->primary, new);
4451 
4452 	/* To be sure that nobody uses thresholds */
4453 	synchronize_rcu();
4454 
4455 	/* If all events are unregistered, free the spare array */
4456 	if (!new) {
4457 		kfree(thresholds->spare);
4458 		thresholds->spare = NULL;
4459 	}
4460 unlock:
4461 	mutex_unlock(&memcg->thresholds_lock);
4462 }
4463 
4464 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4465 	struct eventfd_ctx *eventfd)
4466 {
4467 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4468 }
4469 
4470 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4471 	struct eventfd_ctx *eventfd)
4472 {
4473 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4474 }
4475 
4476 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4477 	struct eventfd_ctx *eventfd, const char *args)
4478 {
4479 	struct mem_cgroup_eventfd_list *event;
4480 
4481 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4482 	if (!event)
4483 		return -ENOMEM;
4484 
4485 	spin_lock(&memcg_oom_lock);
4486 
4487 	event->eventfd = eventfd;
4488 	list_add(&event->list, &memcg->oom_notify);
4489 
4490 	/* already in OOM ? */
4491 	if (memcg->under_oom)
4492 		eventfd_signal(eventfd, 1);
4493 	spin_unlock(&memcg_oom_lock);
4494 
4495 	return 0;
4496 }
4497 
4498 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4499 	struct eventfd_ctx *eventfd)
4500 {
4501 	struct mem_cgroup_eventfd_list *ev, *tmp;
4502 
4503 	spin_lock(&memcg_oom_lock);
4504 
4505 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4506 		if (ev->eventfd == eventfd) {
4507 			list_del(&ev->list);
4508 			kfree(ev);
4509 		}
4510 	}
4511 
4512 	spin_unlock(&memcg_oom_lock);
4513 }
4514 
4515 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4516 {
4517 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4518 
4519 	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4520 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4521 	seq_printf(sf, "oom_kill %lu\n",
4522 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4523 	return 0;
4524 }
4525 
4526 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4527 	struct cftype *cft, u64 val)
4528 {
4529 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4530 
4531 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4532 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4533 		return -EINVAL;
4534 
4535 	WRITE_ONCE(memcg->oom_kill_disable, val);
4536 	if (!val)
4537 		memcg_oom_recover(memcg);
4538 
4539 	return 0;
4540 }
4541 
4542 #ifdef CONFIG_CGROUP_WRITEBACK
4543 
4544 #include <trace/events/writeback.h>
4545 
4546 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4547 {
4548 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4549 }
4550 
4551 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4552 {
4553 	wb_domain_exit(&memcg->cgwb_domain);
4554 }
4555 
4556 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4557 {
4558 	wb_domain_size_changed(&memcg->cgwb_domain);
4559 }
4560 
4561 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4562 {
4563 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4564 
4565 	if (!memcg->css.parent)
4566 		return NULL;
4567 
4568 	return &memcg->cgwb_domain;
4569 }
4570 
4571 /**
4572  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4573  * @wb: bdi_writeback in question
4574  * @pfilepages: out parameter for number of file pages
4575  * @pheadroom: out parameter for number of allocatable pages according to memcg
4576  * @pdirty: out parameter for number of dirty pages
4577  * @pwriteback: out parameter for number of pages under writeback
4578  *
4579  * Determine the numbers of file, headroom, dirty, and writeback pages in
4580  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4581  * is a bit more involved.
4582  *
4583  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4584  * headroom is calculated as the lowest headroom of itself and the
4585  * ancestors.  Note that this doesn't consider the actual amount of
4586  * available memory in the system.  The caller should further cap
4587  * *@pheadroom accordingly.
4588  */
4589 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4590 			 unsigned long *pheadroom, unsigned long *pdirty,
4591 			 unsigned long *pwriteback)
4592 {
4593 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4594 	struct mem_cgroup *parent;
4595 
4596 	mem_cgroup_flush_stats();
4597 
4598 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4599 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4600 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4601 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4602 
4603 	*pheadroom = PAGE_COUNTER_MAX;
4604 	while ((parent = parent_mem_cgroup(memcg))) {
4605 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4606 					    READ_ONCE(memcg->memory.high));
4607 		unsigned long used = page_counter_read(&memcg->memory);
4608 
4609 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4610 		memcg = parent;
4611 	}
4612 }
4613 
4614 /*
4615  * Foreign dirty flushing
4616  *
4617  * There's an inherent mismatch between memcg and writeback.  The former
4618  * tracks ownership per-page while the latter per-inode.  This was a
4619  * deliberate design decision because honoring per-page ownership in the
4620  * writeback path is complicated, may lead to higher CPU and IO overheads
4621  * and deemed unnecessary given that write-sharing an inode across
4622  * different cgroups isn't a common use-case.
4623  *
4624  * Combined with inode majority-writer ownership switching, this works well
4625  * enough in most cases but there are some pathological cases.  For
4626  * example, let's say there are two cgroups A and B which keep writing to
4627  * different but confined parts of the same inode.  B owns the inode and
4628  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4629  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4630  * triggering background writeback.  A will be slowed down without a way to
4631  * make writeback of the dirty pages happen.
4632  *
4633  * Conditions like the above can lead to a cgroup getting repeatedly and
4634  * severely throttled after making some progress after each
4635  * dirty_expire_interval while the underlying IO device is almost
4636  * completely idle.
4637  *
4638  * Solving this problem completely requires matching the ownership tracking
4639  * granularities between memcg and writeback in either direction.  However,
4640  * the more egregious behaviors can be avoided by simply remembering the
4641  * most recent foreign dirtying events and initiating remote flushes on
4642  * them when local writeback isn't enough to keep the memory clean enough.
4643  *
4644  * The following two functions implement such mechanism.  When a foreign
4645  * page - a page whose memcg and writeback ownerships don't match - is
4646  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4647  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4648  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4649  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4650  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4651  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4652  * limited to MEMCG_CGWB_FRN_CNT.
4653  *
4654  * The mechanism only remembers IDs and doesn't hold any object references.
4655  * As being wrong occasionally doesn't matter, updates and accesses to the
4656  * records are lockless and racy.
4657  */
4658 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4659 					     struct bdi_writeback *wb)
4660 {
4661 	struct mem_cgroup *memcg = folio_memcg(folio);
4662 	struct memcg_cgwb_frn *frn;
4663 	u64 now = get_jiffies_64();
4664 	u64 oldest_at = now;
4665 	int oldest = -1;
4666 	int i;
4667 
4668 	trace_track_foreign_dirty(folio, wb);
4669 
4670 	/*
4671 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4672 	 * using it.  If not replace the oldest one which isn't being
4673 	 * written out.
4674 	 */
4675 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4676 		frn = &memcg->cgwb_frn[i];
4677 		if (frn->bdi_id == wb->bdi->id &&
4678 		    frn->memcg_id == wb->memcg_css->id)
4679 			break;
4680 		if (time_before64(frn->at, oldest_at) &&
4681 		    atomic_read(&frn->done.cnt) == 1) {
4682 			oldest = i;
4683 			oldest_at = frn->at;
4684 		}
4685 	}
4686 
4687 	if (i < MEMCG_CGWB_FRN_CNT) {
4688 		/*
4689 		 * Re-using an existing one.  Update timestamp lazily to
4690 		 * avoid making the cacheline hot.  We want them to be
4691 		 * reasonably up-to-date and significantly shorter than
4692 		 * dirty_expire_interval as that's what expires the record.
4693 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4694 		 */
4695 		unsigned long update_intv =
4696 			min_t(unsigned long, HZ,
4697 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4698 
4699 		if (time_before64(frn->at, now - update_intv))
4700 			frn->at = now;
4701 	} else if (oldest >= 0) {
4702 		/* replace the oldest free one */
4703 		frn = &memcg->cgwb_frn[oldest];
4704 		frn->bdi_id = wb->bdi->id;
4705 		frn->memcg_id = wb->memcg_css->id;
4706 		frn->at = now;
4707 	}
4708 }
4709 
4710 /* issue foreign writeback flushes for recorded foreign dirtying events */
4711 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4712 {
4713 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4714 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4715 	u64 now = jiffies_64;
4716 	int i;
4717 
4718 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4719 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4720 
4721 		/*
4722 		 * If the record is older than dirty_expire_interval,
4723 		 * writeback on it has already started.  No need to kick it
4724 		 * off again.  Also, don't start a new one if there's
4725 		 * already one in flight.
4726 		 */
4727 		if (time_after64(frn->at, now - intv) &&
4728 		    atomic_read(&frn->done.cnt) == 1) {
4729 			frn->at = 0;
4730 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4731 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4732 					       WB_REASON_FOREIGN_FLUSH,
4733 					       &frn->done);
4734 		}
4735 	}
4736 }
4737 
4738 #else	/* CONFIG_CGROUP_WRITEBACK */
4739 
4740 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4741 {
4742 	return 0;
4743 }
4744 
4745 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4746 {
4747 }
4748 
4749 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4750 {
4751 }
4752 
4753 #endif	/* CONFIG_CGROUP_WRITEBACK */
4754 
4755 /*
4756  * DO NOT USE IN NEW FILES.
4757  *
4758  * "cgroup.event_control" implementation.
4759  *
4760  * This is way over-engineered.  It tries to support fully configurable
4761  * events for each user.  Such level of flexibility is completely
4762  * unnecessary especially in the light of the planned unified hierarchy.
4763  *
4764  * Please deprecate this and replace with something simpler if at all
4765  * possible.
4766  */
4767 
4768 /*
4769  * Unregister event and free resources.
4770  *
4771  * Gets called from workqueue.
4772  */
4773 static void memcg_event_remove(struct work_struct *work)
4774 {
4775 	struct mem_cgroup_event *event =
4776 		container_of(work, struct mem_cgroup_event, remove);
4777 	struct mem_cgroup *memcg = event->memcg;
4778 
4779 	remove_wait_queue(event->wqh, &event->wait);
4780 
4781 	event->unregister_event(memcg, event->eventfd);
4782 
4783 	/* Notify userspace the event is going away. */
4784 	eventfd_signal(event->eventfd, 1);
4785 
4786 	eventfd_ctx_put(event->eventfd);
4787 	kfree(event);
4788 	css_put(&memcg->css);
4789 }
4790 
4791 /*
4792  * Gets called on EPOLLHUP on eventfd when user closes it.
4793  *
4794  * Called with wqh->lock held and interrupts disabled.
4795  */
4796 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4797 			    int sync, void *key)
4798 {
4799 	struct mem_cgroup_event *event =
4800 		container_of(wait, struct mem_cgroup_event, wait);
4801 	struct mem_cgroup *memcg = event->memcg;
4802 	__poll_t flags = key_to_poll(key);
4803 
4804 	if (flags & EPOLLHUP) {
4805 		/*
4806 		 * If the event has been detached at cgroup removal, we
4807 		 * can simply return knowing the other side will cleanup
4808 		 * for us.
4809 		 *
4810 		 * We can't race against event freeing since the other
4811 		 * side will require wqh->lock via remove_wait_queue(),
4812 		 * which we hold.
4813 		 */
4814 		spin_lock(&memcg->event_list_lock);
4815 		if (!list_empty(&event->list)) {
4816 			list_del_init(&event->list);
4817 			/*
4818 			 * We are in atomic context, but cgroup_event_remove()
4819 			 * may sleep, so we have to call it in workqueue.
4820 			 */
4821 			schedule_work(&event->remove);
4822 		}
4823 		spin_unlock(&memcg->event_list_lock);
4824 	}
4825 
4826 	return 0;
4827 }
4828 
4829 static void memcg_event_ptable_queue_proc(struct file *file,
4830 		wait_queue_head_t *wqh, poll_table *pt)
4831 {
4832 	struct mem_cgroup_event *event =
4833 		container_of(pt, struct mem_cgroup_event, pt);
4834 
4835 	event->wqh = wqh;
4836 	add_wait_queue(wqh, &event->wait);
4837 }
4838 
4839 /*
4840  * DO NOT USE IN NEW FILES.
4841  *
4842  * Parse input and register new cgroup event handler.
4843  *
4844  * Input must be in format '<event_fd> <control_fd> <args>'.
4845  * Interpretation of args is defined by control file implementation.
4846  */
4847 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4848 					 char *buf, size_t nbytes, loff_t off)
4849 {
4850 	struct cgroup_subsys_state *css = of_css(of);
4851 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4852 	struct mem_cgroup_event *event;
4853 	struct cgroup_subsys_state *cfile_css;
4854 	unsigned int efd, cfd;
4855 	struct fd efile;
4856 	struct fd cfile;
4857 	struct dentry *cdentry;
4858 	const char *name;
4859 	char *endp;
4860 	int ret;
4861 
4862 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
4863 		return -EOPNOTSUPP;
4864 
4865 	buf = strstrip(buf);
4866 
4867 	efd = simple_strtoul(buf, &endp, 10);
4868 	if (*endp != ' ')
4869 		return -EINVAL;
4870 	buf = endp + 1;
4871 
4872 	cfd = simple_strtoul(buf, &endp, 10);
4873 	if ((*endp != ' ') && (*endp != '\0'))
4874 		return -EINVAL;
4875 	buf = endp + 1;
4876 
4877 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4878 	if (!event)
4879 		return -ENOMEM;
4880 
4881 	event->memcg = memcg;
4882 	INIT_LIST_HEAD(&event->list);
4883 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4884 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4885 	INIT_WORK(&event->remove, memcg_event_remove);
4886 
4887 	efile = fdget(efd);
4888 	if (!efile.file) {
4889 		ret = -EBADF;
4890 		goto out_kfree;
4891 	}
4892 
4893 	event->eventfd = eventfd_ctx_fileget(efile.file);
4894 	if (IS_ERR(event->eventfd)) {
4895 		ret = PTR_ERR(event->eventfd);
4896 		goto out_put_efile;
4897 	}
4898 
4899 	cfile = fdget(cfd);
4900 	if (!cfile.file) {
4901 		ret = -EBADF;
4902 		goto out_put_eventfd;
4903 	}
4904 
4905 	/* the process need read permission on control file */
4906 	/* AV: shouldn't we check that it's been opened for read instead? */
4907 	ret = file_permission(cfile.file, MAY_READ);
4908 	if (ret < 0)
4909 		goto out_put_cfile;
4910 
4911 	/*
4912 	 * The control file must be a regular cgroup1 file. As a regular cgroup
4913 	 * file can't be renamed, it's safe to access its name afterwards.
4914 	 */
4915 	cdentry = cfile.file->f_path.dentry;
4916 	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4917 		ret = -EINVAL;
4918 		goto out_put_cfile;
4919 	}
4920 
4921 	/*
4922 	 * Determine the event callbacks and set them in @event.  This used
4923 	 * to be done via struct cftype but cgroup core no longer knows
4924 	 * about these events.  The following is crude but the whole thing
4925 	 * is for compatibility anyway.
4926 	 *
4927 	 * DO NOT ADD NEW FILES.
4928 	 */
4929 	name = cdentry->d_name.name;
4930 
4931 	if (!strcmp(name, "memory.usage_in_bytes")) {
4932 		event->register_event = mem_cgroup_usage_register_event;
4933 		event->unregister_event = mem_cgroup_usage_unregister_event;
4934 	} else if (!strcmp(name, "memory.oom_control")) {
4935 		event->register_event = mem_cgroup_oom_register_event;
4936 		event->unregister_event = mem_cgroup_oom_unregister_event;
4937 	} else if (!strcmp(name, "memory.pressure_level")) {
4938 		event->register_event = vmpressure_register_event;
4939 		event->unregister_event = vmpressure_unregister_event;
4940 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4941 		event->register_event = memsw_cgroup_usage_register_event;
4942 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4943 	} else {
4944 		ret = -EINVAL;
4945 		goto out_put_cfile;
4946 	}
4947 
4948 	/*
4949 	 * Verify @cfile should belong to @css.  Also, remaining events are
4950 	 * automatically removed on cgroup destruction but the removal is
4951 	 * asynchronous, so take an extra ref on @css.
4952 	 */
4953 	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
4954 					       &memory_cgrp_subsys);
4955 	ret = -EINVAL;
4956 	if (IS_ERR(cfile_css))
4957 		goto out_put_cfile;
4958 	if (cfile_css != css) {
4959 		css_put(cfile_css);
4960 		goto out_put_cfile;
4961 	}
4962 
4963 	ret = event->register_event(memcg, event->eventfd, buf);
4964 	if (ret)
4965 		goto out_put_css;
4966 
4967 	vfs_poll(efile.file, &event->pt);
4968 
4969 	spin_lock_irq(&memcg->event_list_lock);
4970 	list_add(&event->list, &memcg->event_list);
4971 	spin_unlock_irq(&memcg->event_list_lock);
4972 
4973 	fdput(cfile);
4974 	fdput(efile);
4975 
4976 	return nbytes;
4977 
4978 out_put_css:
4979 	css_put(css);
4980 out_put_cfile:
4981 	fdput(cfile);
4982 out_put_eventfd:
4983 	eventfd_ctx_put(event->eventfd);
4984 out_put_efile:
4985 	fdput(efile);
4986 out_kfree:
4987 	kfree(event);
4988 
4989 	return ret;
4990 }
4991 
4992 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4993 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4994 {
4995 	/*
4996 	 * Deprecated.
4997 	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
4998 	 */
4999 	return 0;
5000 }
5001 #endif
5002 
5003 static struct cftype mem_cgroup_legacy_files[] = {
5004 	{
5005 		.name = "usage_in_bytes",
5006 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5007 		.read_u64 = mem_cgroup_read_u64,
5008 	},
5009 	{
5010 		.name = "max_usage_in_bytes",
5011 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5012 		.write = mem_cgroup_reset,
5013 		.read_u64 = mem_cgroup_read_u64,
5014 	},
5015 	{
5016 		.name = "limit_in_bytes",
5017 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5018 		.write = mem_cgroup_write,
5019 		.read_u64 = mem_cgroup_read_u64,
5020 	},
5021 	{
5022 		.name = "soft_limit_in_bytes",
5023 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5024 		.write = mem_cgroup_write,
5025 		.read_u64 = mem_cgroup_read_u64,
5026 	},
5027 	{
5028 		.name = "failcnt",
5029 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5030 		.write = mem_cgroup_reset,
5031 		.read_u64 = mem_cgroup_read_u64,
5032 	},
5033 	{
5034 		.name = "stat",
5035 		.seq_show = memcg_stat_show,
5036 	},
5037 	{
5038 		.name = "force_empty",
5039 		.write = mem_cgroup_force_empty_write,
5040 	},
5041 	{
5042 		.name = "use_hierarchy",
5043 		.write_u64 = mem_cgroup_hierarchy_write,
5044 		.read_u64 = mem_cgroup_hierarchy_read,
5045 	},
5046 	{
5047 		.name = "cgroup.event_control",		/* XXX: for compat */
5048 		.write = memcg_write_event_control,
5049 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5050 	},
5051 	{
5052 		.name = "swappiness",
5053 		.read_u64 = mem_cgroup_swappiness_read,
5054 		.write_u64 = mem_cgroup_swappiness_write,
5055 	},
5056 	{
5057 		.name = "move_charge_at_immigrate",
5058 		.read_u64 = mem_cgroup_move_charge_read,
5059 		.write_u64 = mem_cgroup_move_charge_write,
5060 	},
5061 	{
5062 		.name = "oom_control",
5063 		.seq_show = mem_cgroup_oom_control_read,
5064 		.write_u64 = mem_cgroup_oom_control_write,
5065 	},
5066 	{
5067 		.name = "pressure_level",
5068 	},
5069 #ifdef CONFIG_NUMA
5070 	{
5071 		.name = "numa_stat",
5072 		.seq_show = memcg_numa_stat_show,
5073 	},
5074 #endif
5075 	{
5076 		.name = "kmem.limit_in_bytes",
5077 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5078 		.write = mem_cgroup_write,
5079 		.read_u64 = mem_cgroup_read_u64,
5080 	},
5081 	{
5082 		.name = "kmem.usage_in_bytes",
5083 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5084 		.read_u64 = mem_cgroup_read_u64,
5085 	},
5086 	{
5087 		.name = "kmem.failcnt",
5088 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5089 		.write = mem_cgroup_reset,
5090 		.read_u64 = mem_cgroup_read_u64,
5091 	},
5092 	{
5093 		.name = "kmem.max_usage_in_bytes",
5094 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5095 		.write = mem_cgroup_reset,
5096 		.read_u64 = mem_cgroup_read_u64,
5097 	},
5098 #if defined(CONFIG_MEMCG_KMEM) && \
5099 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5100 	{
5101 		.name = "kmem.slabinfo",
5102 		.seq_show = mem_cgroup_slab_show,
5103 	},
5104 #endif
5105 	{
5106 		.name = "kmem.tcp.limit_in_bytes",
5107 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5108 		.write = mem_cgroup_write,
5109 		.read_u64 = mem_cgroup_read_u64,
5110 	},
5111 	{
5112 		.name = "kmem.tcp.usage_in_bytes",
5113 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5114 		.read_u64 = mem_cgroup_read_u64,
5115 	},
5116 	{
5117 		.name = "kmem.tcp.failcnt",
5118 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5119 		.write = mem_cgroup_reset,
5120 		.read_u64 = mem_cgroup_read_u64,
5121 	},
5122 	{
5123 		.name = "kmem.tcp.max_usage_in_bytes",
5124 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5125 		.write = mem_cgroup_reset,
5126 		.read_u64 = mem_cgroup_read_u64,
5127 	},
5128 	{ },	/* terminate */
5129 };
5130 
5131 /*
5132  * Private memory cgroup IDR
5133  *
5134  * Swap-out records and page cache shadow entries need to store memcg
5135  * references in constrained space, so we maintain an ID space that is
5136  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5137  * memory-controlled cgroups to 64k.
5138  *
5139  * However, there usually are many references to the offline CSS after
5140  * the cgroup has been destroyed, such as page cache or reclaimable
5141  * slab objects, that don't need to hang on to the ID. We want to keep
5142  * those dead CSS from occupying IDs, or we might quickly exhaust the
5143  * relatively small ID space and prevent the creation of new cgroups
5144  * even when there are much fewer than 64k cgroups - possibly none.
5145  *
5146  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5147  * be freed and recycled when it's no longer needed, which is usually
5148  * when the CSS is offlined.
5149  *
5150  * The only exception to that are records of swapped out tmpfs/shmem
5151  * pages that need to be attributed to live ancestors on swapin. But
5152  * those references are manageable from userspace.
5153  */
5154 
5155 static DEFINE_IDR(mem_cgroup_idr);
5156 
5157 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5158 {
5159 	if (memcg->id.id > 0) {
5160 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5161 		memcg->id.id = 0;
5162 	}
5163 }
5164 
5165 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5166 						  unsigned int n)
5167 {
5168 	refcount_add(n, &memcg->id.ref);
5169 }
5170 
5171 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5172 {
5173 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5174 		mem_cgroup_id_remove(memcg);
5175 
5176 		/* Memcg ID pins CSS */
5177 		css_put(&memcg->css);
5178 	}
5179 }
5180 
5181 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5182 {
5183 	mem_cgroup_id_put_many(memcg, 1);
5184 }
5185 
5186 /**
5187  * mem_cgroup_from_id - look up a memcg from a memcg id
5188  * @id: the memcg id to look up
5189  *
5190  * Caller must hold rcu_read_lock().
5191  */
5192 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5193 {
5194 	WARN_ON_ONCE(!rcu_read_lock_held());
5195 	return idr_find(&mem_cgroup_idr, id);
5196 }
5197 
5198 #ifdef CONFIG_SHRINKER_DEBUG
5199 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5200 {
5201 	struct cgroup *cgrp;
5202 	struct cgroup_subsys_state *css;
5203 	struct mem_cgroup *memcg;
5204 
5205 	cgrp = cgroup_get_from_id(ino);
5206 	if (IS_ERR(cgrp))
5207 		return ERR_CAST(cgrp);
5208 
5209 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5210 	if (css)
5211 		memcg = container_of(css, struct mem_cgroup, css);
5212 	else
5213 		memcg = ERR_PTR(-ENOENT);
5214 
5215 	cgroup_put(cgrp);
5216 
5217 	return memcg;
5218 }
5219 #endif
5220 
5221 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5222 {
5223 	struct mem_cgroup_per_node *pn;
5224 
5225 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5226 	if (!pn)
5227 		return 1;
5228 
5229 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5230 						   GFP_KERNEL_ACCOUNT);
5231 	if (!pn->lruvec_stats_percpu) {
5232 		kfree(pn);
5233 		return 1;
5234 	}
5235 
5236 	lruvec_init(&pn->lruvec);
5237 	pn->memcg = memcg;
5238 
5239 	memcg->nodeinfo[node] = pn;
5240 	return 0;
5241 }
5242 
5243 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5244 {
5245 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5246 
5247 	if (!pn)
5248 		return;
5249 
5250 	free_percpu(pn->lruvec_stats_percpu);
5251 	kfree(pn);
5252 }
5253 
5254 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5255 {
5256 	int node;
5257 
5258 	for_each_node(node)
5259 		free_mem_cgroup_per_node_info(memcg, node);
5260 	kfree(memcg->vmstats);
5261 	free_percpu(memcg->vmstats_percpu);
5262 	kfree(memcg);
5263 }
5264 
5265 static void mem_cgroup_free(struct mem_cgroup *memcg)
5266 {
5267 	lru_gen_exit_memcg(memcg);
5268 	memcg_wb_domain_exit(memcg);
5269 	__mem_cgroup_free(memcg);
5270 }
5271 
5272 static struct mem_cgroup *mem_cgroup_alloc(void)
5273 {
5274 	struct mem_cgroup *memcg;
5275 	int node;
5276 	int __maybe_unused i;
5277 	long error = -ENOMEM;
5278 
5279 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5280 	if (!memcg)
5281 		return ERR_PTR(error);
5282 
5283 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5284 				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5285 	if (memcg->id.id < 0) {
5286 		error = memcg->id.id;
5287 		goto fail;
5288 	}
5289 
5290 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5291 	if (!memcg->vmstats)
5292 		goto fail;
5293 
5294 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5295 						 GFP_KERNEL_ACCOUNT);
5296 	if (!memcg->vmstats_percpu)
5297 		goto fail;
5298 
5299 	for_each_node(node)
5300 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5301 			goto fail;
5302 
5303 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5304 		goto fail;
5305 
5306 	INIT_WORK(&memcg->high_work, high_work_func);
5307 	INIT_LIST_HEAD(&memcg->oom_notify);
5308 	mutex_init(&memcg->thresholds_lock);
5309 	spin_lock_init(&memcg->move_lock);
5310 	vmpressure_init(&memcg->vmpressure);
5311 	INIT_LIST_HEAD(&memcg->event_list);
5312 	spin_lock_init(&memcg->event_list_lock);
5313 	memcg->socket_pressure = jiffies;
5314 #ifdef CONFIG_MEMCG_KMEM
5315 	memcg->kmemcg_id = -1;
5316 	INIT_LIST_HEAD(&memcg->objcg_list);
5317 #endif
5318 #ifdef CONFIG_CGROUP_WRITEBACK
5319 	INIT_LIST_HEAD(&memcg->cgwb_list);
5320 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5321 		memcg->cgwb_frn[i].done =
5322 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5323 #endif
5324 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5325 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5326 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5327 	memcg->deferred_split_queue.split_queue_len = 0;
5328 #endif
5329 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5330 	lru_gen_init_memcg(memcg);
5331 	return memcg;
5332 fail:
5333 	mem_cgroup_id_remove(memcg);
5334 	__mem_cgroup_free(memcg);
5335 	return ERR_PTR(error);
5336 }
5337 
5338 static struct cgroup_subsys_state * __ref
5339 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5340 {
5341 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5342 	struct mem_cgroup *memcg, *old_memcg;
5343 
5344 	old_memcg = set_active_memcg(parent);
5345 	memcg = mem_cgroup_alloc();
5346 	set_active_memcg(old_memcg);
5347 	if (IS_ERR(memcg))
5348 		return ERR_CAST(memcg);
5349 
5350 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5351 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5352 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5353 	memcg->zswap_max = PAGE_COUNTER_MAX;
5354 #endif
5355 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5356 	if (parent) {
5357 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5358 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5359 
5360 		page_counter_init(&memcg->memory, &parent->memory);
5361 		page_counter_init(&memcg->swap, &parent->swap);
5362 		page_counter_init(&memcg->kmem, &parent->kmem);
5363 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5364 	} else {
5365 		init_memcg_events();
5366 		page_counter_init(&memcg->memory, NULL);
5367 		page_counter_init(&memcg->swap, NULL);
5368 		page_counter_init(&memcg->kmem, NULL);
5369 		page_counter_init(&memcg->tcpmem, NULL);
5370 
5371 		root_mem_cgroup = memcg;
5372 		return &memcg->css;
5373 	}
5374 
5375 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5376 		static_branch_inc(&memcg_sockets_enabled_key);
5377 
5378 #if defined(CONFIG_MEMCG_KMEM)
5379 	if (!cgroup_memory_nobpf)
5380 		static_branch_inc(&memcg_bpf_enabled_key);
5381 #endif
5382 
5383 	return &memcg->css;
5384 }
5385 
5386 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5387 {
5388 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5389 
5390 	if (memcg_online_kmem(memcg))
5391 		goto remove_id;
5392 
5393 	/*
5394 	 * A memcg must be visible for expand_shrinker_info()
5395 	 * by the time the maps are allocated. So, we allocate maps
5396 	 * here, when for_each_mem_cgroup() can't skip it.
5397 	 */
5398 	if (alloc_shrinker_info(memcg))
5399 		goto offline_kmem;
5400 
5401 	/* Online state pins memcg ID, memcg ID pins CSS */
5402 	refcount_set(&memcg->id.ref, 1);
5403 	css_get(css);
5404 
5405 	if (unlikely(mem_cgroup_is_root(memcg)))
5406 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5407 				   2UL*HZ);
5408 	lru_gen_online_memcg(memcg);
5409 	return 0;
5410 offline_kmem:
5411 	memcg_offline_kmem(memcg);
5412 remove_id:
5413 	mem_cgroup_id_remove(memcg);
5414 	return -ENOMEM;
5415 }
5416 
5417 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5418 {
5419 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5420 	struct mem_cgroup_event *event, *tmp;
5421 
5422 	/*
5423 	 * Unregister events and notify userspace.
5424 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5425 	 * directory to avoid race between userspace and kernelspace.
5426 	 */
5427 	spin_lock_irq(&memcg->event_list_lock);
5428 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5429 		list_del_init(&event->list);
5430 		schedule_work(&event->remove);
5431 	}
5432 	spin_unlock_irq(&memcg->event_list_lock);
5433 
5434 	page_counter_set_min(&memcg->memory, 0);
5435 	page_counter_set_low(&memcg->memory, 0);
5436 
5437 	memcg_offline_kmem(memcg);
5438 	reparent_shrinker_deferred(memcg);
5439 	wb_memcg_offline(memcg);
5440 	lru_gen_offline_memcg(memcg);
5441 
5442 	drain_all_stock(memcg);
5443 
5444 	mem_cgroup_id_put(memcg);
5445 }
5446 
5447 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5448 {
5449 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5450 
5451 	invalidate_reclaim_iterators(memcg);
5452 	lru_gen_release_memcg(memcg);
5453 }
5454 
5455 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5456 {
5457 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5458 	int __maybe_unused i;
5459 
5460 #ifdef CONFIG_CGROUP_WRITEBACK
5461 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5462 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5463 #endif
5464 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5465 		static_branch_dec(&memcg_sockets_enabled_key);
5466 
5467 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5468 		static_branch_dec(&memcg_sockets_enabled_key);
5469 
5470 #if defined(CONFIG_MEMCG_KMEM)
5471 	if (!cgroup_memory_nobpf)
5472 		static_branch_dec(&memcg_bpf_enabled_key);
5473 #endif
5474 
5475 	vmpressure_cleanup(&memcg->vmpressure);
5476 	cancel_work_sync(&memcg->high_work);
5477 	mem_cgroup_remove_from_trees(memcg);
5478 	free_shrinker_info(memcg);
5479 	mem_cgroup_free(memcg);
5480 }
5481 
5482 /**
5483  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5484  * @css: the target css
5485  *
5486  * Reset the states of the mem_cgroup associated with @css.  This is
5487  * invoked when the userland requests disabling on the default hierarchy
5488  * but the memcg is pinned through dependency.  The memcg should stop
5489  * applying policies and should revert to the vanilla state as it may be
5490  * made visible again.
5491  *
5492  * The current implementation only resets the essential configurations.
5493  * This needs to be expanded to cover all the visible parts.
5494  */
5495 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5496 {
5497 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5498 
5499 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5500 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5501 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5502 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5503 	page_counter_set_min(&memcg->memory, 0);
5504 	page_counter_set_low(&memcg->memory, 0);
5505 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5506 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5507 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5508 	memcg_wb_domain_size_changed(memcg);
5509 }
5510 
5511 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5512 {
5513 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5514 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5515 	struct memcg_vmstats_percpu *statc;
5516 	long delta, v;
5517 	int i, nid;
5518 
5519 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5520 
5521 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5522 		/*
5523 		 * Collect the aggregated propagation counts of groups
5524 		 * below us. We're in a per-cpu loop here and this is
5525 		 * a global counter, so the first cycle will get them.
5526 		 */
5527 		delta = memcg->vmstats->state_pending[i];
5528 		if (delta)
5529 			memcg->vmstats->state_pending[i] = 0;
5530 
5531 		/* Add CPU changes on this level since the last flush */
5532 		v = READ_ONCE(statc->state[i]);
5533 		if (v != statc->state_prev[i]) {
5534 			delta += v - statc->state_prev[i];
5535 			statc->state_prev[i] = v;
5536 		}
5537 
5538 		if (!delta)
5539 			continue;
5540 
5541 		/* Aggregate counts on this level and propagate upwards */
5542 		memcg->vmstats->state[i] += delta;
5543 		if (parent)
5544 			parent->vmstats->state_pending[i] += delta;
5545 	}
5546 
5547 	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5548 		delta = memcg->vmstats->events_pending[i];
5549 		if (delta)
5550 			memcg->vmstats->events_pending[i] = 0;
5551 
5552 		v = READ_ONCE(statc->events[i]);
5553 		if (v != statc->events_prev[i]) {
5554 			delta += v - statc->events_prev[i];
5555 			statc->events_prev[i] = v;
5556 		}
5557 
5558 		if (!delta)
5559 			continue;
5560 
5561 		memcg->vmstats->events[i] += delta;
5562 		if (parent)
5563 			parent->vmstats->events_pending[i] += delta;
5564 	}
5565 
5566 	for_each_node_state(nid, N_MEMORY) {
5567 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5568 		struct mem_cgroup_per_node *ppn = NULL;
5569 		struct lruvec_stats_percpu *lstatc;
5570 
5571 		if (parent)
5572 			ppn = parent->nodeinfo[nid];
5573 
5574 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5575 
5576 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5577 			delta = pn->lruvec_stats.state_pending[i];
5578 			if (delta)
5579 				pn->lruvec_stats.state_pending[i] = 0;
5580 
5581 			v = READ_ONCE(lstatc->state[i]);
5582 			if (v != lstatc->state_prev[i]) {
5583 				delta += v - lstatc->state_prev[i];
5584 				lstatc->state_prev[i] = v;
5585 			}
5586 
5587 			if (!delta)
5588 				continue;
5589 
5590 			pn->lruvec_stats.state[i] += delta;
5591 			if (ppn)
5592 				ppn->lruvec_stats.state_pending[i] += delta;
5593 		}
5594 	}
5595 }
5596 
5597 #ifdef CONFIG_MMU
5598 /* Handlers for move charge at task migration. */
5599 static int mem_cgroup_do_precharge(unsigned long count)
5600 {
5601 	int ret;
5602 
5603 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5604 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5605 	if (!ret) {
5606 		mc.precharge += count;
5607 		return ret;
5608 	}
5609 
5610 	/* Try charges one by one with reclaim, but do not retry */
5611 	while (count--) {
5612 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5613 		if (ret)
5614 			return ret;
5615 		mc.precharge++;
5616 		cond_resched();
5617 	}
5618 	return 0;
5619 }
5620 
5621 union mc_target {
5622 	struct page	*page;
5623 	swp_entry_t	ent;
5624 };
5625 
5626 enum mc_target_type {
5627 	MC_TARGET_NONE = 0,
5628 	MC_TARGET_PAGE,
5629 	MC_TARGET_SWAP,
5630 	MC_TARGET_DEVICE,
5631 };
5632 
5633 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5634 						unsigned long addr, pte_t ptent)
5635 {
5636 	struct page *page = vm_normal_page(vma, addr, ptent);
5637 
5638 	if (!page || !page_mapped(page))
5639 		return NULL;
5640 	if (PageAnon(page)) {
5641 		if (!(mc.flags & MOVE_ANON))
5642 			return NULL;
5643 	} else {
5644 		if (!(mc.flags & MOVE_FILE))
5645 			return NULL;
5646 	}
5647 	if (!get_page_unless_zero(page))
5648 		return NULL;
5649 
5650 	return page;
5651 }
5652 
5653 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5654 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5655 			pte_t ptent, swp_entry_t *entry)
5656 {
5657 	struct page *page = NULL;
5658 	swp_entry_t ent = pte_to_swp_entry(ptent);
5659 
5660 	if (!(mc.flags & MOVE_ANON))
5661 		return NULL;
5662 
5663 	/*
5664 	 * Handle device private pages that are not accessible by the CPU, but
5665 	 * stored as special swap entries in the page table.
5666 	 */
5667 	if (is_device_private_entry(ent)) {
5668 		page = pfn_swap_entry_to_page(ent);
5669 		if (!get_page_unless_zero(page))
5670 			return NULL;
5671 		return page;
5672 	}
5673 
5674 	if (non_swap_entry(ent))
5675 		return NULL;
5676 
5677 	/*
5678 	 * Because swap_cache_get_folio() updates some statistics counter,
5679 	 * we call find_get_page() with swapper_space directly.
5680 	 */
5681 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5682 	entry->val = ent.val;
5683 
5684 	return page;
5685 }
5686 #else
5687 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5688 			pte_t ptent, swp_entry_t *entry)
5689 {
5690 	return NULL;
5691 }
5692 #endif
5693 
5694 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5695 			unsigned long addr, pte_t ptent)
5696 {
5697 	unsigned long index;
5698 	struct folio *folio;
5699 
5700 	if (!vma->vm_file) /* anonymous vma */
5701 		return NULL;
5702 	if (!(mc.flags & MOVE_FILE))
5703 		return NULL;
5704 
5705 	/* folio is moved even if it's not RSS of this task(page-faulted). */
5706 	/* shmem/tmpfs may report page out on swap: account for that too. */
5707 	index = linear_page_index(vma, addr);
5708 	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5709 	if (IS_ERR(folio))
5710 		return NULL;
5711 	return folio_file_page(folio, index);
5712 }
5713 
5714 /**
5715  * mem_cgroup_move_account - move account of the page
5716  * @page: the page
5717  * @compound: charge the page as compound or small page
5718  * @from: mem_cgroup which the page is moved from.
5719  * @to:	mem_cgroup which the page is moved to. @from != @to.
5720  *
5721  * The page must be locked and not on the LRU.
5722  *
5723  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5724  * from old cgroup.
5725  */
5726 static int mem_cgroup_move_account(struct page *page,
5727 				   bool compound,
5728 				   struct mem_cgroup *from,
5729 				   struct mem_cgroup *to)
5730 {
5731 	struct folio *folio = page_folio(page);
5732 	struct lruvec *from_vec, *to_vec;
5733 	struct pglist_data *pgdat;
5734 	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5735 	int nid, ret;
5736 
5737 	VM_BUG_ON(from == to);
5738 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5739 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5740 	VM_BUG_ON(compound && !folio_test_large(folio));
5741 
5742 	ret = -EINVAL;
5743 	if (folio_memcg(folio) != from)
5744 		goto out;
5745 
5746 	pgdat = folio_pgdat(folio);
5747 	from_vec = mem_cgroup_lruvec(from, pgdat);
5748 	to_vec = mem_cgroup_lruvec(to, pgdat);
5749 
5750 	folio_memcg_lock(folio);
5751 
5752 	if (folio_test_anon(folio)) {
5753 		if (folio_mapped(folio)) {
5754 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5755 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5756 			if (folio_test_transhuge(folio)) {
5757 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5758 						   -nr_pages);
5759 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5760 						   nr_pages);
5761 			}
5762 		}
5763 	} else {
5764 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5765 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5766 
5767 		if (folio_test_swapbacked(folio)) {
5768 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5769 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5770 		}
5771 
5772 		if (folio_mapped(folio)) {
5773 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5774 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5775 		}
5776 
5777 		if (folio_test_dirty(folio)) {
5778 			struct address_space *mapping = folio_mapping(folio);
5779 
5780 			if (mapping_can_writeback(mapping)) {
5781 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5782 						   -nr_pages);
5783 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5784 						   nr_pages);
5785 			}
5786 		}
5787 	}
5788 
5789 #ifdef CONFIG_SWAP
5790 	if (folio_test_swapcache(folio)) {
5791 		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5792 		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5793 	}
5794 #endif
5795 	if (folio_test_writeback(folio)) {
5796 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5797 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5798 	}
5799 
5800 	/*
5801 	 * All state has been migrated, let's switch to the new memcg.
5802 	 *
5803 	 * It is safe to change page's memcg here because the page
5804 	 * is referenced, charged, isolated, and locked: we can't race
5805 	 * with (un)charging, migration, LRU putback, or anything else
5806 	 * that would rely on a stable page's memory cgroup.
5807 	 *
5808 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5809 	 * to save space. As soon as we switch page's memory cgroup to a
5810 	 * new memcg that isn't locked, the above state can change
5811 	 * concurrently again. Make sure we're truly done with it.
5812 	 */
5813 	smp_mb();
5814 
5815 	css_get(&to->css);
5816 	css_put(&from->css);
5817 
5818 	folio->memcg_data = (unsigned long)to;
5819 
5820 	__folio_memcg_unlock(from);
5821 
5822 	ret = 0;
5823 	nid = folio_nid(folio);
5824 
5825 	local_irq_disable();
5826 	mem_cgroup_charge_statistics(to, nr_pages);
5827 	memcg_check_events(to, nid);
5828 	mem_cgroup_charge_statistics(from, -nr_pages);
5829 	memcg_check_events(from, nid);
5830 	local_irq_enable();
5831 out:
5832 	return ret;
5833 }
5834 
5835 /**
5836  * get_mctgt_type - get target type of moving charge
5837  * @vma: the vma the pte to be checked belongs
5838  * @addr: the address corresponding to the pte to be checked
5839  * @ptent: the pte to be checked
5840  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5841  *
5842  * Returns
5843  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5844  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5845  *     move charge. if @target is not NULL, the page is stored in target->page
5846  *     with extra refcnt got(Callers should handle it).
5847  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5848  *     target for charge migration. if @target is not NULL, the entry is stored
5849  *     in target->ent.
5850  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is device memory and
5851  *   thus not on the lru.
5852  *     For now we such page is charge like a regular page would be as for all
5853  *     intent and purposes it is just special memory taking the place of a
5854  *     regular page.
5855  *
5856  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5857  *
5858  * Called with pte lock held.
5859  */
5860 
5861 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5862 		unsigned long addr, pte_t ptent, union mc_target *target)
5863 {
5864 	struct page *page = NULL;
5865 	enum mc_target_type ret = MC_TARGET_NONE;
5866 	swp_entry_t ent = { .val = 0 };
5867 
5868 	if (pte_present(ptent))
5869 		page = mc_handle_present_pte(vma, addr, ptent);
5870 	else if (pte_none_mostly(ptent))
5871 		/*
5872 		 * PTE markers should be treated as a none pte here, separated
5873 		 * from other swap handling below.
5874 		 */
5875 		page = mc_handle_file_pte(vma, addr, ptent);
5876 	else if (is_swap_pte(ptent))
5877 		page = mc_handle_swap_pte(vma, ptent, &ent);
5878 
5879 	if (target && page) {
5880 		if (!trylock_page(page)) {
5881 			put_page(page);
5882 			return ret;
5883 		}
5884 		/*
5885 		 * page_mapped() must be stable during the move. This
5886 		 * pte is locked, so if it's present, the page cannot
5887 		 * become unmapped. If it isn't, we have only partial
5888 		 * control over the mapped state: the page lock will
5889 		 * prevent new faults against pagecache and swapcache,
5890 		 * so an unmapped page cannot become mapped. However,
5891 		 * if the page is already mapped elsewhere, it can
5892 		 * unmap, and there is nothing we can do about it.
5893 		 * Alas, skip moving the page in this case.
5894 		 */
5895 		if (!pte_present(ptent) && page_mapped(page)) {
5896 			unlock_page(page);
5897 			put_page(page);
5898 			return ret;
5899 		}
5900 	}
5901 
5902 	if (!page && !ent.val)
5903 		return ret;
5904 	if (page) {
5905 		/*
5906 		 * Do only loose check w/o serialization.
5907 		 * mem_cgroup_move_account() checks the page is valid or
5908 		 * not under LRU exclusion.
5909 		 */
5910 		if (page_memcg(page) == mc.from) {
5911 			ret = MC_TARGET_PAGE;
5912 			if (is_device_private_page(page) ||
5913 			    is_device_coherent_page(page))
5914 				ret = MC_TARGET_DEVICE;
5915 			if (target)
5916 				target->page = page;
5917 		}
5918 		if (!ret || !target) {
5919 			if (target)
5920 				unlock_page(page);
5921 			put_page(page);
5922 		}
5923 	}
5924 	/*
5925 	 * There is a swap entry and a page doesn't exist or isn't charged.
5926 	 * But we cannot move a tail-page in a THP.
5927 	 */
5928 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5929 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5930 		ret = MC_TARGET_SWAP;
5931 		if (target)
5932 			target->ent = ent;
5933 	}
5934 	return ret;
5935 }
5936 
5937 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5938 /*
5939  * We don't consider PMD mapped swapping or file mapped pages because THP does
5940  * not support them for now.
5941  * Caller should make sure that pmd_trans_huge(pmd) is true.
5942  */
5943 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5944 		unsigned long addr, pmd_t pmd, union mc_target *target)
5945 {
5946 	struct page *page = NULL;
5947 	enum mc_target_type ret = MC_TARGET_NONE;
5948 
5949 	if (unlikely(is_swap_pmd(pmd))) {
5950 		VM_BUG_ON(thp_migration_supported() &&
5951 				  !is_pmd_migration_entry(pmd));
5952 		return ret;
5953 	}
5954 	page = pmd_page(pmd);
5955 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5956 	if (!(mc.flags & MOVE_ANON))
5957 		return ret;
5958 	if (page_memcg(page) == mc.from) {
5959 		ret = MC_TARGET_PAGE;
5960 		if (target) {
5961 			get_page(page);
5962 			if (!trylock_page(page)) {
5963 				put_page(page);
5964 				return MC_TARGET_NONE;
5965 			}
5966 			target->page = page;
5967 		}
5968 	}
5969 	return ret;
5970 }
5971 #else
5972 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5973 		unsigned long addr, pmd_t pmd, union mc_target *target)
5974 {
5975 	return MC_TARGET_NONE;
5976 }
5977 #endif
5978 
5979 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5980 					unsigned long addr, unsigned long end,
5981 					struct mm_walk *walk)
5982 {
5983 	struct vm_area_struct *vma = walk->vma;
5984 	pte_t *pte;
5985 	spinlock_t *ptl;
5986 
5987 	ptl = pmd_trans_huge_lock(pmd, vma);
5988 	if (ptl) {
5989 		/*
5990 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5991 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5992 		 * this might change.
5993 		 */
5994 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5995 			mc.precharge += HPAGE_PMD_NR;
5996 		spin_unlock(ptl);
5997 		return 0;
5998 	}
5999 
6000 	if (pmd_trans_unstable(pmd))
6001 		return 0;
6002 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6003 	for (; addr != end; pte++, addr += PAGE_SIZE)
6004 		if (get_mctgt_type(vma, addr, *pte, NULL))
6005 			mc.precharge++;	/* increment precharge temporarily */
6006 	pte_unmap_unlock(pte - 1, ptl);
6007 	cond_resched();
6008 
6009 	return 0;
6010 }
6011 
6012 static const struct mm_walk_ops precharge_walk_ops = {
6013 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6014 };
6015 
6016 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6017 {
6018 	unsigned long precharge;
6019 
6020 	mmap_read_lock(mm);
6021 	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6022 	mmap_read_unlock(mm);
6023 
6024 	precharge = mc.precharge;
6025 	mc.precharge = 0;
6026 
6027 	return precharge;
6028 }
6029 
6030 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6031 {
6032 	unsigned long precharge = mem_cgroup_count_precharge(mm);
6033 
6034 	VM_BUG_ON(mc.moving_task);
6035 	mc.moving_task = current;
6036 	return mem_cgroup_do_precharge(precharge);
6037 }
6038 
6039 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6040 static void __mem_cgroup_clear_mc(void)
6041 {
6042 	struct mem_cgroup *from = mc.from;
6043 	struct mem_cgroup *to = mc.to;
6044 
6045 	/* we must uncharge all the leftover precharges from mc.to */
6046 	if (mc.precharge) {
6047 		cancel_charge(mc.to, mc.precharge);
6048 		mc.precharge = 0;
6049 	}
6050 	/*
6051 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6052 	 * we must uncharge here.
6053 	 */
6054 	if (mc.moved_charge) {
6055 		cancel_charge(mc.from, mc.moved_charge);
6056 		mc.moved_charge = 0;
6057 	}
6058 	/* we must fixup refcnts and charges */
6059 	if (mc.moved_swap) {
6060 		/* uncharge swap account from the old cgroup */
6061 		if (!mem_cgroup_is_root(mc.from))
6062 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6063 
6064 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6065 
6066 		/*
6067 		 * we charged both to->memory and to->memsw, so we
6068 		 * should uncharge to->memory.
6069 		 */
6070 		if (!mem_cgroup_is_root(mc.to))
6071 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6072 
6073 		mc.moved_swap = 0;
6074 	}
6075 	memcg_oom_recover(from);
6076 	memcg_oom_recover(to);
6077 	wake_up_all(&mc.waitq);
6078 }
6079 
6080 static void mem_cgroup_clear_mc(void)
6081 {
6082 	struct mm_struct *mm = mc.mm;
6083 
6084 	/*
6085 	 * we must clear moving_task before waking up waiters at the end of
6086 	 * task migration.
6087 	 */
6088 	mc.moving_task = NULL;
6089 	__mem_cgroup_clear_mc();
6090 	spin_lock(&mc.lock);
6091 	mc.from = NULL;
6092 	mc.to = NULL;
6093 	mc.mm = NULL;
6094 	spin_unlock(&mc.lock);
6095 
6096 	mmput(mm);
6097 }
6098 
6099 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6100 {
6101 	struct cgroup_subsys_state *css;
6102 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6103 	struct mem_cgroup *from;
6104 	struct task_struct *leader, *p;
6105 	struct mm_struct *mm;
6106 	unsigned long move_flags;
6107 	int ret = 0;
6108 
6109 	/* charge immigration isn't supported on the default hierarchy */
6110 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6111 		return 0;
6112 
6113 	/*
6114 	 * Multi-process migrations only happen on the default hierarchy
6115 	 * where charge immigration is not used.  Perform charge
6116 	 * immigration if @tset contains a leader and whine if there are
6117 	 * multiple.
6118 	 */
6119 	p = NULL;
6120 	cgroup_taskset_for_each_leader(leader, css, tset) {
6121 		WARN_ON_ONCE(p);
6122 		p = leader;
6123 		memcg = mem_cgroup_from_css(css);
6124 	}
6125 	if (!p)
6126 		return 0;
6127 
6128 	/*
6129 	 * We are now committed to this value whatever it is. Changes in this
6130 	 * tunable will only affect upcoming migrations, not the current one.
6131 	 * So we need to save it, and keep it going.
6132 	 */
6133 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6134 	if (!move_flags)
6135 		return 0;
6136 
6137 	from = mem_cgroup_from_task(p);
6138 
6139 	VM_BUG_ON(from == memcg);
6140 
6141 	mm = get_task_mm(p);
6142 	if (!mm)
6143 		return 0;
6144 	/* We move charges only when we move a owner of the mm */
6145 	if (mm->owner == p) {
6146 		VM_BUG_ON(mc.from);
6147 		VM_BUG_ON(mc.to);
6148 		VM_BUG_ON(mc.precharge);
6149 		VM_BUG_ON(mc.moved_charge);
6150 		VM_BUG_ON(mc.moved_swap);
6151 
6152 		spin_lock(&mc.lock);
6153 		mc.mm = mm;
6154 		mc.from = from;
6155 		mc.to = memcg;
6156 		mc.flags = move_flags;
6157 		spin_unlock(&mc.lock);
6158 		/* We set mc.moving_task later */
6159 
6160 		ret = mem_cgroup_precharge_mc(mm);
6161 		if (ret)
6162 			mem_cgroup_clear_mc();
6163 	} else {
6164 		mmput(mm);
6165 	}
6166 	return ret;
6167 }
6168 
6169 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6170 {
6171 	if (mc.to)
6172 		mem_cgroup_clear_mc();
6173 }
6174 
6175 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6176 				unsigned long addr, unsigned long end,
6177 				struct mm_walk *walk)
6178 {
6179 	int ret = 0;
6180 	struct vm_area_struct *vma = walk->vma;
6181 	pte_t *pte;
6182 	spinlock_t *ptl;
6183 	enum mc_target_type target_type;
6184 	union mc_target target;
6185 	struct page *page;
6186 
6187 	ptl = pmd_trans_huge_lock(pmd, vma);
6188 	if (ptl) {
6189 		if (mc.precharge < HPAGE_PMD_NR) {
6190 			spin_unlock(ptl);
6191 			return 0;
6192 		}
6193 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6194 		if (target_type == MC_TARGET_PAGE) {
6195 			page = target.page;
6196 			if (isolate_lru_page(page)) {
6197 				if (!mem_cgroup_move_account(page, true,
6198 							     mc.from, mc.to)) {
6199 					mc.precharge -= HPAGE_PMD_NR;
6200 					mc.moved_charge += HPAGE_PMD_NR;
6201 				}
6202 				putback_lru_page(page);
6203 			}
6204 			unlock_page(page);
6205 			put_page(page);
6206 		} else if (target_type == MC_TARGET_DEVICE) {
6207 			page = target.page;
6208 			if (!mem_cgroup_move_account(page, true,
6209 						     mc.from, mc.to)) {
6210 				mc.precharge -= HPAGE_PMD_NR;
6211 				mc.moved_charge += HPAGE_PMD_NR;
6212 			}
6213 			unlock_page(page);
6214 			put_page(page);
6215 		}
6216 		spin_unlock(ptl);
6217 		return 0;
6218 	}
6219 
6220 	if (pmd_trans_unstable(pmd))
6221 		return 0;
6222 retry:
6223 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6224 	for (; addr != end; addr += PAGE_SIZE) {
6225 		pte_t ptent = *(pte++);
6226 		bool device = false;
6227 		swp_entry_t ent;
6228 
6229 		if (!mc.precharge)
6230 			break;
6231 
6232 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6233 		case MC_TARGET_DEVICE:
6234 			device = true;
6235 			fallthrough;
6236 		case MC_TARGET_PAGE:
6237 			page = target.page;
6238 			/*
6239 			 * We can have a part of the split pmd here. Moving it
6240 			 * can be done but it would be too convoluted so simply
6241 			 * ignore such a partial THP and keep it in original
6242 			 * memcg. There should be somebody mapping the head.
6243 			 */
6244 			if (PageTransCompound(page))
6245 				goto put;
6246 			if (!device && !isolate_lru_page(page))
6247 				goto put;
6248 			if (!mem_cgroup_move_account(page, false,
6249 						mc.from, mc.to)) {
6250 				mc.precharge--;
6251 				/* we uncharge from mc.from later. */
6252 				mc.moved_charge++;
6253 			}
6254 			if (!device)
6255 				putback_lru_page(page);
6256 put:			/* get_mctgt_type() gets & locks the page */
6257 			unlock_page(page);
6258 			put_page(page);
6259 			break;
6260 		case MC_TARGET_SWAP:
6261 			ent = target.ent;
6262 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6263 				mc.precharge--;
6264 				mem_cgroup_id_get_many(mc.to, 1);
6265 				/* we fixup other refcnts and charges later. */
6266 				mc.moved_swap++;
6267 			}
6268 			break;
6269 		default:
6270 			break;
6271 		}
6272 	}
6273 	pte_unmap_unlock(pte - 1, ptl);
6274 	cond_resched();
6275 
6276 	if (addr != end) {
6277 		/*
6278 		 * We have consumed all precharges we got in can_attach().
6279 		 * We try charge one by one, but don't do any additional
6280 		 * charges to mc.to if we have failed in charge once in attach()
6281 		 * phase.
6282 		 */
6283 		ret = mem_cgroup_do_precharge(1);
6284 		if (!ret)
6285 			goto retry;
6286 	}
6287 
6288 	return ret;
6289 }
6290 
6291 static const struct mm_walk_ops charge_walk_ops = {
6292 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6293 };
6294 
6295 static void mem_cgroup_move_charge(void)
6296 {
6297 	lru_add_drain_all();
6298 	/*
6299 	 * Signal lock_page_memcg() to take the memcg's move_lock
6300 	 * while we're moving its pages to another memcg. Then wait
6301 	 * for already started RCU-only updates to finish.
6302 	 */
6303 	atomic_inc(&mc.from->moving_account);
6304 	synchronize_rcu();
6305 retry:
6306 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6307 		/*
6308 		 * Someone who are holding the mmap_lock might be waiting in
6309 		 * waitq. So we cancel all extra charges, wake up all waiters,
6310 		 * and retry. Because we cancel precharges, we might not be able
6311 		 * to move enough charges, but moving charge is a best-effort
6312 		 * feature anyway, so it wouldn't be a big problem.
6313 		 */
6314 		__mem_cgroup_clear_mc();
6315 		cond_resched();
6316 		goto retry;
6317 	}
6318 	/*
6319 	 * When we have consumed all precharges and failed in doing
6320 	 * additional charge, the page walk just aborts.
6321 	 */
6322 	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6323 	mmap_read_unlock(mc.mm);
6324 	atomic_dec(&mc.from->moving_account);
6325 }
6326 
6327 static void mem_cgroup_move_task(void)
6328 {
6329 	if (mc.to) {
6330 		mem_cgroup_move_charge();
6331 		mem_cgroup_clear_mc();
6332 	}
6333 }
6334 #else	/* !CONFIG_MMU */
6335 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6336 {
6337 	return 0;
6338 }
6339 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6340 {
6341 }
6342 static void mem_cgroup_move_task(void)
6343 {
6344 }
6345 #endif
6346 
6347 #ifdef CONFIG_LRU_GEN
6348 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6349 {
6350 	struct task_struct *task;
6351 	struct cgroup_subsys_state *css;
6352 
6353 	/* find the first leader if there is any */
6354 	cgroup_taskset_for_each_leader(task, css, tset)
6355 		break;
6356 
6357 	if (!task)
6358 		return;
6359 
6360 	task_lock(task);
6361 	if (task->mm && READ_ONCE(task->mm->owner) == task)
6362 		lru_gen_migrate_mm(task->mm);
6363 	task_unlock(task);
6364 }
6365 #else
6366 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6367 {
6368 }
6369 #endif /* CONFIG_LRU_GEN */
6370 
6371 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6372 {
6373 	if (value == PAGE_COUNTER_MAX)
6374 		seq_puts(m, "max\n");
6375 	else
6376 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6377 
6378 	return 0;
6379 }
6380 
6381 static u64 memory_current_read(struct cgroup_subsys_state *css,
6382 			       struct cftype *cft)
6383 {
6384 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6385 
6386 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6387 }
6388 
6389 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6390 			    struct cftype *cft)
6391 {
6392 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6393 
6394 	return (u64)memcg->memory.watermark * PAGE_SIZE;
6395 }
6396 
6397 static int memory_min_show(struct seq_file *m, void *v)
6398 {
6399 	return seq_puts_memcg_tunable(m,
6400 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6401 }
6402 
6403 static ssize_t memory_min_write(struct kernfs_open_file *of,
6404 				char *buf, size_t nbytes, loff_t off)
6405 {
6406 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6407 	unsigned long min;
6408 	int err;
6409 
6410 	buf = strstrip(buf);
6411 	err = page_counter_memparse(buf, "max", &min);
6412 	if (err)
6413 		return err;
6414 
6415 	page_counter_set_min(&memcg->memory, min);
6416 
6417 	return nbytes;
6418 }
6419 
6420 static int memory_low_show(struct seq_file *m, void *v)
6421 {
6422 	return seq_puts_memcg_tunable(m,
6423 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6424 }
6425 
6426 static ssize_t memory_low_write(struct kernfs_open_file *of,
6427 				char *buf, size_t nbytes, loff_t off)
6428 {
6429 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6430 	unsigned long low;
6431 	int err;
6432 
6433 	buf = strstrip(buf);
6434 	err = page_counter_memparse(buf, "max", &low);
6435 	if (err)
6436 		return err;
6437 
6438 	page_counter_set_low(&memcg->memory, low);
6439 
6440 	return nbytes;
6441 }
6442 
6443 static int memory_high_show(struct seq_file *m, void *v)
6444 {
6445 	return seq_puts_memcg_tunable(m,
6446 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6447 }
6448 
6449 static ssize_t memory_high_write(struct kernfs_open_file *of,
6450 				 char *buf, size_t nbytes, loff_t off)
6451 {
6452 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6453 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6454 	bool drained = false;
6455 	unsigned long high;
6456 	int err;
6457 
6458 	buf = strstrip(buf);
6459 	err = page_counter_memparse(buf, "max", &high);
6460 	if (err)
6461 		return err;
6462 
6463 	page_counter_set_high(&memcg->memory, high);
6464 
6465 	for (;;) {
6466 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6467 		unsigned long reclaimed;
6468 
6469 		if (nr_pages <= high)
6470 			break;
6471 
6472 		if (signal_pending(current))
6473 			break;
6474 
6475 		if (!drained) {
6476 			drain_all_stock(memcg);
6477 			drained = true;
6478 			continue;
6479 		}
6480 
6481 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6482 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6483 
6484 		if (!reclaimed && !nr_retries--)
6485 			break;
6486 	}
6487 
6488 	memcg_wb_domain_size_changed(memcg);
6489 	return nbytes;
6490 }
6491 
6492 static int memory_max_show(struct seq_file *m, void *v)
6493 {
6494 	return seq_puts_memcg_tunable(m,
6495 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6496 }
6497 
6498 static ssize_t memory_max_write(struct kernfs_open_file *of,
6499 				char *buf, size_t nbytes, loff_t off)
6500 {
6501 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6502 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6503 	bool drained = false;
6504 	unsigned long max;
6505 	int err;
6506 
6507 	buf = strstrip(buf);
6508 	err = page_counter_memparse(buf, "max", &max);
6509 	if (err)
6510 		return err;
6511 
6512 	xchg(&memcg->memory.max, max);
6513 
6514 	for (;;) {
6515 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6516 
6517 		if (nr_pages <= max)
6518 			break;
6519 
6520 		if (signal_pending(current))
6521 			break;
6522 
6523 		if (!drained) {
6524 			drain_all_stock(memcg);
6525 			drained = true;
6526 			continue;
6527 		}
6528 
6529 		if (nr_reclaims) {
6530 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6531 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6532 				nr_reclaims--;
6533 			continue;
6534 		}
6535 
6536 		memcg_memory_event(memcg, MEMCG_OOM);
6537 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6538 			break;
6539 	}
6540 
6541 	memcg_wb_domain_size_changed(memcg);
6542 	return nbytes;
6543 }
6544 
6545 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6546 {
6547 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6548 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6549 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6550 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6551 	seq_printf(m, "oom_kill %lu\n",
6552 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6553 	seq_printf(m, "oom_group_kill %lu\n",
6554 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6555 }
6556 
6557 static int memory_events_show(struct seq_file *m, void *v)
6558 {
6559 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6560 
6561 	__memory_events_show(m, memcg->memory_events);
6562 	return 0;
6563 }
6564 
6565 static int memory_events_local_show(struct seq_file *m, void *v)
6566 {
6567 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6568 
6569 	__memory_events_show(m, memcg->memory_events_local);
6570 	return 0;
6571 }
6572 
6573 static int memory_stat_show(struct seq_file *m, void *v)
6574 {
6575 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6576 	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6577 
6578 	if (!buf)
6579 		return -ENOMEM;
6580 	memory_stat_format(memcg, buf, PAGE_SIZE);
6581 	seq_puts(m, buf);
6582 	kfree(buf);
6583 	return 0;
6584 }
6585 
6586 #ifdef CONFIG_NUMA
6587 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6588 						     int item)
6589 {
6590 	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6591 }
6592 
6593 static int memory_numa_stat_show(struct seq_file *m, void *v)
6594 {
6595 	int i;
6596 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6597 
6598 	mem_cgroup_flush_stats();
6599 
6600 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6601 		int nid;
6602 
6603 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6604 			continue;
6605 
6606 		seq_printf(m, "%s", memory_stats[i].name);
6607 		for_each_node_state(nid, N_MEMORY) {
6608 			u64 size;
6609 			struct lruvec *lruvec;
6610 
6611 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6612 			size = lruvec_page_state_output(lruvec,
6613 							memory_stats[i].idx);
6614 			seq_printf(m, " N%d=%llu", nid, size);
6615 		}
6616 		seq_putc(m, '\n');
6617 	}
6618 
6619 	return 0;
6620 }
6621 #endif
6622 
6623 static int memory_oom_group_show(struct seq_file *m, void *v)
6624 {
6625 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6626 
6627 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6628 
6629 	return 0;
6630 }
6631 
6632 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6633 				      char *buf, size_t nbytes, loff_t off)
6634 {
6635 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6636 	int ret, oom_group;
6637 
6638 	buf = strstrip(buf);
6639 	if (!buf)
6640 		return -EINVAL;
6641 
6642 	ret = kstrtoint(buf, 0, &oom_group);
6643 	if (ret)
6644 		return ret;
6645 
6646 	if (oom_group != 0 && oom_group != 1)
6647 		return -EINVAL;
6648 
6649 	WRITE_ONCE(memcg->oom_group, oom_group);
6650 
6651 	return nbytes;
6652 }
6653 
6654 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6655 			      size_t nbytes, loff_t off)
6656 {
6657 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6658 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6659 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6660 	unsigned int reclaim_options;
6661 	int err;
6662 
6663 	buf = strstrip(buf);
6664 	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6665 	if (err)
6666 		return err;
6667 
6668 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6669 	while (nr_reclaimed < nr_to_reclaim) {
6670 		unsigned long reclaimed;
6671 
6672 		if (signal_pending(current))
6673 			return -EINTR;
6674 
6675 		/*
6676 		 * This is the final attempt, drain percpu lru caches in the
6677 		 * hope of introducing more evictable pages for
6678 		 * try_to_free_mem_cgroup_pages().
6679 		 */
6680 		if (!nr_retries)
6681 			lru_add_drain_all();
6682 
6683 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6684 						nr_to_reclaim - nr_reclaimed,
6685 						GFP_KERNEL, reclaim_options);
6686 
6687 		if (!reclaimed && !nr_retries--)
6688 			return -EAGAIN;
6689 
6690 		nr_reclaimed += reclaimed;
6691 	}
6692 
6693 	return nbytes;
6694 }
6695 
6696 static struct cftype memory_files[] = {
6697 	{
6698 		.name = "current",
6699 		.flags = CFTYPE_NOT_ON_ROOT,
6700 		.read_u64 = memory_current_read,
6701 	},
6702 	{
6703 		.name = "peak",
6704 		.flags = CFTYPE_NOT_ON_ROOT,
6705 		.read_u64 = memory_peak_read,
6706 	},
6707 	{
6708 		.name = "min",
6709 		.flags = CFTYPE_NOT_ON_ROOT,
6710 		.seq_show = memory_min_show,
6711 		.write = memory_min_write,
6712 	},
6713 	{
6714 		.name = "low",
6715 		.flags = CFTYPE_NOT_ON_ROOT,
6716 		.seq_show = memory_low_show,
6717 		.write = memory_low_write,
6718 	},
6719 	{
6720 		.name = "high",
6721 		.flags = CFTYPE_NOT_ON_ROOT,
6722 		.seq_show = memory_high_show,
6723 		.write = memory_high_write,
6724 	},
6725 	{
6726 		.name = "max",
6727 		.flags = CFTYPE_NOT_ON_ROOT,
6728 		.seq_show = memory_max_show,
6729 		.write = memory_max_write,
6730 	},
6731 	{
6732 		.name = "events",
6733 		.flags = CFTYPE_NOT_ON_ROOT,
6734 		.file_offset = offsetof(struct mem_cgroup, events_file),
6735 		.seq_show = memory_events_show,
6736 	},
6737 	{
6738 		.name = "events.local",
6739 		.flags = CFTYPE_NOT_ON_ROOT,
6740 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6741 		.seq_show = memory_events_local_show,
6742 	},
6743 	{
6744 		.name = "stat",
6745 		.seq_show = memory_stat_show,
6746 	},
6747 #ifdef CONFIG_NUMA
6748 	{
6749 		.name = "numa_stat",
6750 		.seq_show = memory_numa_stat_show,
6751 	},
6752 #endif
6753 	{
6754 		.name = "oom.group",
6755 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6756 		.seq_show = memory_oom_group_show,
6757 		.write = memory_oom_group_write,
6758 	},
6759 	{
6760 		.name = "reclaim",
6761 		.flags = CFTYPE_NS_DELEGATABLE,
6762 		.write = memory_reclaim,
6763 	},
6764 	{ }	/* terminate */
6765 };
6766 
6767 struct cgroup_subsys memory_cgrp_subsys = {
6768 	.css_alloc = mem_cgroup_css_alloc,
6769 	.css_online = mem_cgroup_css_online,
6770 	.css_offline = mem_cgroup_css_offline,
6771 	.css_released = mem_cgroup_css_released,
6772 	.css_free = mem_cgroup_css_free,
6773 	.css_reset = mem_cgroup_css_reset,
6774 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6775 	.can_attach = mem_cgroup_can_attach,
6776 	.attach = mem_cgroup_attach,
6777 	.cancel_attach = mem_cgroup_cancel_attach,
6778 	.post_attach = mem_cgroup_move_task,
6779 	.dfl_cftypes = memory_files,
6780 	.legacy_cftypes = mem_cgroup_legacy_files,
6781 	.early_init = 0,
6782 };
6783 
6784 /*
6785  * This function calculates an individual cgroup's effective
6786  * protection which is derived from its own memory.min/low, its
6787  * parent's and siblings' settings, as well as the actual memory
6788  * distribution in the tree.
6789  *
6790  * The following rules apply to the effective protection values:
6791  *
6792  * 1. At the first level of reclaim, effective protection is equal to
6793  *    the declared protection in memory.min and memory.low.
6794  *
6795  * 2. To enable safe delegation of the protection configuration, at
6796  *    subsequent levels the effective protection is capped to the
6797  *    parent's effective protection.
6798  *
6799  * 3. To make complex and dynamic subtrees easier to configure, the
6800  *    user is allowed to overcommit the declared protection at a given
6801  *    level. If that is the case, the parent's effective protection is
6802  *    distributed to the children in proportion to how much protection
6803  *    they have declared and how much of it they are utilizing.
6804  *
6805  *    This makes distribution proportional, but also work-conserving:
6806  *    if one cgroup claims much more protection than it uses memory,
6807  *    the unused remainder is available to its siblings.
6808  *
6809  * 4. Conversely, when the declared protection is undercommitted at a
6810  *    given level, the distribution of the larger parental protection
6811  *    budget is NOT proportional. A cgroup's protection from a sibling
6812  *    is capped to its own memory.min/low setting.
6813  *
6814  * 5. However, to allow protecting recursive subtrees from each other
6815  *    without having to declare each individual cgroup's fixed share
6816  *    of the ancestor's claim to protection, any unutilized -
6817  *    "floating" - protection from up the tree is distributed in
6818  *    proportion to each cgroup's *usage*. This makes the protection
6819  *    neutral wrt sibling cgroups and lets them compete freely over
6820  *    the shared parental protection budget, but it protects the
6821  *    subtree as a whole from neighboring subtrees.
6822  *
6823  * Note that 4. and 5. are not in conflict: 4. is about protecting
6824  * against immediate siblings whereas 5. is about protecting against
6825  * neighboring subtrees.
6826  */
6827 static unsigned long effective_protection(unsigned long usage,
6828 					  unsigned long parent_usage,
6829 					  unsigned long setting,
6830 					  unsigned long parent_effective,
6831 					  unsigned long siblings_protected)
6832 {
6833 	unsigned long protected;
6834 	unsigned long ep;
6835 
6836 	protected = min(usage, setting);
6837 	/*
6838 	 * If all cgroups at this level combined claim and use more
6839 	 * protection then what the parent affords them, distribute
6840 	 * shares in proportion to utilization.
6841 	 *
6842 	 * We are using actual utilization rather than the statically
6843 	 * claimed protection in order to be work-conserving: claimed
6844 	 * but unused protection is available to siblings that would
6845 	 * otherwise get a smaller chunk than what they claimed.
6846 	 */
6847 	if (siblings_protected > parent_effective)
6848 		return protected * parent_effective / siblings_protected;
6849 
6850 	/*
6851 	 * Ok, utilized protection of all children is within what the
6852 	 * parent affords them, so we know whatever this child claims
6853 	 * and utilizes is effectively protected.
6854 	 *
6855 	 * If there is unprotected usage beyond this value, reclaim
6856 	 * will apply pressure in proportion to that amount.
6857 	 *
6858 	 * If there is unutilized protection, the cgroup will be fully
6859 	 * shielded from reclaim, but we do return a smaller value for
6860 	 * protection than what the group could enjoy in theory. This
6861 	 * is okay. With the overcommit distribution above, effective
6862 	 * protection is always dependent on how memory is actually
6863 	 * consumed among the siblings anyway.
6864 	 */
6865 	ep = protected;
6866 
6867 	/*
6868 	 * If the children aren't claiming (all of) the protection
6869 	 * afforded to them by the parent, distribute the remainder in
6870 	 * proportion to the (unprotected) memory of each cgroup. That
6871 	 * way, cgroups that aren't explicitly prioritized wrt each
6872 	 * other compete freely over the allowance, but they are
6873 	 * collectively protected from neighboring trees.
6874 	 *
6875 	 * We're using unprotected memory for the weight so that if
6876 	 * some cgroups DO claim explicit protection, we don't protect
6877 	 * the same bytes twice.
6878 	 *
6879 	 * Check both usage and parent_usage against the respective
6880 	 * protected values. One should imply the other, but they
6881 	 * aren't read atomically - make sure the division is sane.
6882 	 */
6883 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6884 		return ep;
6885 	if (parent_effective > siblings_protected &&
6886 	    parent_usage > siblings_protected &&
6887 	    usage > protected) {
6888 		unsigned long unclaimed;
6889 
6890 		unclaimed = parent_effective - siblings_protected;
6891 		unclaimed *= usage - protected;
6892 		unclaimed /= parent_usage - siblings_protected;
6893 
6894 		ep += unclaimed;
6895 	}
6896 
6897 	return ep;
6898 }
6899 
6900 /**
6901  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6902  * @root: the top ancestor of the sub-tree being checked
6903  * @memcg: the memory cgroup to check
6904  *
6905  * WARNING: This function is not stateless! It can only be used as part
6906  *          of a top-down tree iteration, not for isolated queries.
6907  */
6908 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6909 				     struct mem_cgroup *memcg)
6910 {
6911 	unsigned long usage, parent_usage;
6912 	struct mem_cgroup *parent;
6913 
6914 	if (mem_cgroup_disabled())
6915 		return;
6916 
6917 	if (!root)
6918 		root = root_mem_cgroup;
6919 
6920 	/*
6921 	 * Effective values of the reclaim targets are ignored so they
6922 	 * can be stale. Have a look at mem_cgroup_protection for more
6923 	 * details.
6924 	 * TODO: calculation should be more robust so that we do not need
6925 	 * that special casing.
6926 	 */
6927 	if (memcg == root)
6928 		return;
6929 
6930 	usage = page_counter_read(&memcg->memory);
6931 	if (!usage)
6932 		return;
6933 
6934 	parent = parent_mem_cgroup(memcg);
6935 
6936 	if (parent == root) {
6937 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6938 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6939 		return;
6940 	}
6941 
6942 	parent_usage = page_counter_read(&parent->memory);
6943 
6944 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6945 			READ_ONCE(memcg->memory.min),
6946 			READ_ONCE(parent->memory.emin),
6947 			atomic_long_read(&parent->memory.children_min_usage)));
6948 
6949 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6950 			READ_ONCE(memcg->memory.low),
6951 			READ_ONCE(parent->memory.elow),
6952 			atomic_long_read(&parent->memory.children_low_usage)));
6953 }
6954 
6955 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6956 			gfp_t gfp)
6957 {
6958 	long nr_pages = folio_nr_pages(folio);
6959 	int ret;
6960 
6961 	ret = try_charge(memcg, gfp, nr_pages);
6962 	if (ret)
6963 		goto out;
6964 
6965 	css_get(&memcg->css);
6966 	commit_charge(folio, memcg);
6967 
6968 	local_irq_disable();
6969 	mem_cgroup_charge_statistics(memcg, nr_pages);
6970 	memcg_check_events(memcg, folio_nid(folio));
6971 	local_irq_enable();
6972 out:
6973 	return ret;
6974 }
6975 
6976 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6977 {
6978 	struct mem_cgroup *memcg;
6979 	int ret;
6980 
6981 	memcg = get_mem_cgroup_from_mm(mm);
6982 	ret = charge_memcg(folio, memcg, gfp);
6983 	css_put(&memcg->css);
6984 
6985 	return ret;
6986 }
6987 
6988 /**
6989  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
6990  * @folio: folio to charge.
6991  * @mm: mm context of the victim
6992  * @gfp: reclaim mode
6993  * @entry: swap entry for which the folio is allocated
6994  *
6995  * This function charges a folio allocated for swapin. Please call this before
6996  * adding the folio to the swapcache.
6997  *
6998  * Returns 0 on success. Otherwise, an error code is returned.
6999  */
7000 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7001 				  gfp_t gfp, swp_entry_t entry)
7002 {
7003 	struct mem_cgroup *memcg;
7004 	unsigned short id;
7005 	int ret;
7006 
7007 	if (mem_cgroup_disabled())
7008 		return 0;
7009 
7010 	id = lookup_swap_cgroup_id(entry);
7011 	rcu_read_lock();
7012 	memcg = mem_cgroup_from_id(id);
7013 	if (!memcg || !css_tryget_online(&memcg->css))
7014 		memcg = get_mem_cgroup_from_mm(mm);
7015 	rcu_read_unlock();
7016 
7017 	ret = charge_memcg(folio, memcg, gfp);
7018 
7019 	css_put(&memcg->css);
7020 	return ret;
7021 }
7022 
7023 /*
7024  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7025  * @entry: swap entry for which the page is charged
7026  *
7027  * Call this function after successfully adding the charged page to swapcache.
7028  *
7029  * Note: This function assumes the page for which swap slot is being uncharged
7030  * is order 0 page.
7031  */
7032 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7033 {
7034 	/*
7035 	 * Cgroup1's unified memory+swap counter has been charged with the
7036 	 * new swapcache page, finish the transfer by uncharging the swap
7037 	 * slot. The swap slot would also get uncharged when it dies, but
7038 	 * it can stick around indefinitely and we'd count the page twice
7039 	 * the entire time.
7040 	 *
7041 	 * Cgroup2 has separate resource counters for memory and swap,
7042 	 * so this is a non-issue here. Memory and swap charge lifetimes
7043 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7044 	 * page to memory here, and uncharge swap when the slot is freed.
7045 	 */
7046 	if (!mem_cgroup_disabled() && do_memsw_account()) {
7047 		/*
7048 		 * The swap entry might not get freed for a long time,
7049 		 * let's not wait for it.  The page already received a
7050 		 * memory+swap charge, drop the swap entry duplicate.
7051 		 */
7052 		mem_cgroup_uncharge_swap(entry, 1);
7053 	}
7054 }
7055 
7056 struct uncharge_gather {
7057 	struct mem_cgroup *memcg;
7058 	unsigned long nr_memory;
7059 	unsigned long pgpgout;
7060 	unsigned long nr_kmem;
7061 	int nid;
7062 };
7063 
7064 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7065 {
7066 	memset(ug, 0, sizeof(*ug));
7067 }
7068 
7069 static void uncharge_batch(const struct uncharge_gather *ug)
7070 {
7071 	unsigned long flags;
7072 
7073 	if (ug->nr_memory) {
7074 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7075 		if (do_memsw_account())
7076 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7077 		if (ug->nr_kmem)
7078 			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7079 		memcg_oom_recover(ug->memcg);
7080 	}
7081 
7082 	local_irq_save(flags);
7083 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7084 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7085 	memcg_check_events(ug->memcg, ug->nid);
7086 	local_irq_restore(flags);
7087 
7088 	/* drop reference from uncharge_folio */
7089 	css_put(&ug->memcg->css);
7090 }
7091 
7092 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7093 {
7094 	long nr_pages;
7095 	struct mem_cgroup *memcg;
7096 	struct obj_cgroup *objcg;
7097 
7098 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7099 
7100 	/*
7101 	 * Nobody should be changing or seriously looking at
7102 	 * folio memcg or objcg at this point, we have fully
7103 	 * exclusive access to the folio.
7104 	 */
7105 	if (folio_memcg_kmem(folio)) {
7106 		objcg = __folio_objcg(folio);
7107 		/*
7108 		 * This get matches the put at the end of the function and
7109 		 * kmem pages do not hold memcg references anymore.
7110 		 */
7111 		memcg = get_mem_cgroup_from_objcg(objcg);
7112 	} else {
7113 		memcg = __folio_memcg(folio);
7114 	}
7115 
7116 	if (!memcg)
7117 		return;
7118 
7119 	if (ug->memcg != memcg) {
7120 		if (ug->memcg) {
7121 			uncharge_batch(ug);
7122 			uncharge_gather_clear(ug);
7123 		}
7124 		ug->memcg = memcg;
7125 		ug->nid = folio_nid(folio);
7126 
7127 		/* pairs with css_put in uncharge_batch */
7128 		css_get(&memcg->css);
7129 	}
7130 
7131 	nr_pages = folio_nr_pages(folio);
7132 
7133 	if (folio_memcg_kmem(folio)) {
7134 		ug->nr_memory += nr_pages;
7135 		ug->nr_kmem += nr_pages;
7136 
7137 		folio->memcg_data = 0;
7138 		obj_cgroup_put(objcg);
7139 	} else {
7140 		/* LRU pages aren't accounted at the root level */
7141 		if (!mem_cgroup_is_root(memcg))
7142 			ug->nr_memory += nr_pages;
7143 		ug->pgpgout++;
7144 
7145 		folio->memcg_data = 0;
7146 	}
7147 
7148 	css_put(&memcg->css);
7149 }
7150 
7151 void __mem_cgroup_uncharge(struct folio *folio)
7152 {
7153 	struct uncharge_gather ug;
7154 
7155 	/* Don't touch folio->lru of any random page, pre-check: */
7156 	if (!folio_memcg(folio))
7157 		return;
7158 
7159 	uncharge_gather_clear(&ug);
7160 	uncharge_folio(folio, &ug);
7161 	uncharge_batch(&ug);
7162 }
7163 
7164 /**
7165  * __mem_cgroup_uncharge_list - uncharge a list of page
7166  * @page_list: list of pages to uncharge
7167  *
7168  * Uncharge a list of pages previously charged with
7169  * __mem_cgroup_charge().
7170  */
7171 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7172 {
7173 	struct uncharge_gather ug;
7174 	struct folio *folio;
7175 
7176 	uncharge_gather_clear(&ug);
7177 	list_for_each_entry(folio, page_list, lru)
7178 		uncharge_folio(folio, &ug);
7179 	if (ug.memcg)
7180 		uncharge_batch(&ug);
7181 }
7182 
7183 /**
7184  * mem_cgroup_migrate - Charge a folio's replacement.
7185  * @old: Currently circulating folio.
7186  * @new: Replacement folio.
7187  *
7188  * Charge @new as a replacement folio for @old. @old will
7189  * be uncharged upon free.
7190  *
7191  * Both folios must be locked, @new->mapping must be set up.
7192  */
7193 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7194 {
7195 	struct mem_cgroup *memcg;
7196 	long nr_pages = folio_nr_pages(new);
7197 	unsigned long flags;
7198 
7199 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7200 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7201 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7202 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7203 
7204 	if (mem_cgroup_disabled())
7205 		return;
7206 
7207 	/* Page cache replacement: new folio already charged? */
7208 	if (folio_memcg(new))
7209 		return;
7210 
7211 	memcg = folio_memcg(old);
7212 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7213 	if (!memcg)
7214 		return;
7215 
7216 	/* Force-charge the new page. The old one will be freed soon */
7217 	if (!mem_cgroup_is_root(memcg)) {
7218 		page_counter_charge(&memcg->memory, nr_pages);
7219 		if (do_memsw_account())
7220 			page_counter_charge(&memcg->memsw, nr_pages);
7221 	}
7222 
7223 	css_get(&memcg->css);
7224 	commit_charge(new, memcg);
7225 
7226 	local_irq_save(flags);
7227 	mem_cgroup_charge_statistics(memcg, nr_pages);
7228 	memcg_check_events(memcg, folio_nid(new));
7229 	local_irq_restore(flags);
7230 }
7231 
7232 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7233 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7234 
7235 void mem_cgroup_sk_alloc(struct sock *sk)
7236 {
7237 	struct mem_cgroup *memcg;
7238 
7239 	if (!mem_cgroup_sockets_enabled)
7240 		return;
7241 
7242 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7243 	if (!in_task())
7244 		return;
7245 
7246 	rcu_read_lock();
7247 	memcg = mem_cgroup_from_task(current);
7248 	if (mem_cgroup_is_root(memcg))
7249 		goto out;
7250 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7251 		goto out;
7252 	if (css_tryget(&memcg->css))
7253 		sk->sk_memcg = memcg;
7254 out:
7255 	rcu_read_unlock();
7256 }
7257 
7258 void mem_cgroup_sk_free(struct sock *sk)
7259 {
7260 	if (sk->sk_memcg)
7261 		css_put(&sk->sk_memcg->css);
7262 }
7263 
7264 /**
7265  * mem_cgroup_charge_skmem - charge socket memory
7266  * @memcg: memcg to charge
7267  * @nr_pages: number of pages to charge
7268  * @gfp_mask: reclaim mode
7269  *
7270  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7271  * @memcg's configured limit, %false if it doesn't.
7272  */
7273 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7274 			     gfp_t gfp_mask)
7275 {
7276 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7277 		struct page_counter *fail;
7278 
7279 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7280 			memcg->tcpmem_pressure = 0;
7281 			return true;
7282 		}
7283 		memcg->tcpmem_pressure = 1;
7284 		if (gfp_mask & __GFP_NOFAIL) {
7285 			page_counter_charge(&memcg->tcpmem, nr_pages);
7286 			return true;
7287 		}
7288 		return false;
7289 	}
7290 
7291 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7292 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7293 		return true;
7294 	}
7295 
7296 	return false;
7297 }
7298 
7299 /**
7300  * mem_cgroup_uncharge_skmem - uncharge socket memory
7301  * @memcg: memcg to uncharge
7302  * @nr_pages: number of pages to uncharge
7303  */
7304 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7305 {
7306 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7307 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7308 		return;
7309 	}
7310 
7311 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7312 
7313 	refill_stock(memcg, nr_pages);
7314 }
7315 
7316 static int __init cgroup_memory(char *s)
7317 {
7318 	char *token;
7319 
7320 	while ((token = strsep(&s, ",")) != NULL) {
7321 		if (!*token)
7322 			continue;
7323 		if (!strcmp(token, "nosocket"))
7324 			cgroup_memory_nosocket = true;
7325 		if (!strcmp(token, "nokmem"))
7326 			cgroup_memory_nokmem = true;
7327 		if (!strcmp(token, "nobpf"))
7328 			cgroup_memory_nobpf = true;
7329 	}
7330 	return 1;
7331 }
7332 __setup("cgroup.memory=", cgroup_memory);
7333 
7334 /*
7335  * subsys_initcall() for memory controller.
7336  *
7337  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7338  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7339  * basically everything that doesn't depend on a specific mem_cgroup structure
7340  * should be initialized from here.
7341  */
7342 static int __init mem_cgroup_init(void)
7343 {
7344 	int cpu, node;
7345 
7346 	/*
7347 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7348 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7349 	 * to work fine, we should make sure that the overfill threshold can't
7350 	 * exceed S32_MAX / PAGE_SIZE.
7351 	 */
7352 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7353 
7354 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7355 				  memcg_hotplug_cpu_dead);
7356 
7357 	for_each_possible_cpu(cpu)
7358 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7359 			  drain_local_stock);
7360 
7361 	for_each_node(node) {
7362 		struct mem_cgroup_tree_per_node *rtpn;
7363 
7364 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7365 				    node_online(node) ? node : NUMA_NO_NODE);
7366 
7367 		rtpn->rb_root = RB_ROOT;
7368 		rtpn->rb_rightmost = NULL;
7369 		spin_lock_init(&rtpn->lock);
7370 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7371 	}
7372 
7373 	return 0;
7374 }
7375 subsys_initcall(mem_cgroup_init);
7376 
7377 #ifdef CONFIG_SWAP
7378 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7379 {
7380 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7381 		/*
7382 		 * The root cgroup cannot be destroyed, so it's refcount must
7383 		 * always be >= 1.
7384 		 */
7385 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7386 			VM_BUG_ON(1);
7387 			break;
7388 		}
7389 		memcg = parent_mem_cgroup(memcg);
7390 		if (!memcg)
7391 			memcg = root_mem_cgroup;
7392 	}
7393 	return memcg;
7394 }
7395 
7396 /**
7397  * mem_cgroup_swapout - transfer a memsw charge to swap
7398  * @folio: folio whose memsw charge to transfer
7399  * @entry: swap entry to move the charge to
7400  *
7401  * Transfer the memsw charge of @folio to @entry.
7402  */
7403 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7404 {
7405 	struct mem_cgroup *memcg, *swap_memcg;
7406 	unsigned int nr_entries;
7407 	unsigned short oldid;
7408 
7409 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7410 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7411 
7412 	if (mem_cgroup_disabled())
7413 		return;
7414 
7415 	if (!do_memsw_account())
7416 		return;
7417 
7418 	memcg = folio_memcg(folio);
7419 
7420 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7421 	if (!memcg)
7422 		return;
7423 
7424 	/*
7425 	 * In case the memcg owning these pages has been offlined and doesn't
7426 	 * have an ID allocated to it anymore, charge the closest online
7427 	 * ancestor for the swap instead and transfer the memory+swap charge.
7428 	 */
7429 	swap_memcg = mem_cgroup_id_get_online(memcg);
7430 	nr_entries = folio_nr_pages(folio);
7431 	/* Get references for the tail pages, too */
7432 	if (nr_entries > 1)
7433 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7434 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7435 				   nr_entries);
7436 	VM_BUG_ON_FOLIO(oldid, folio);
7437 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7438 
7439 	folio->memcg_data = 0;
7440 
7441 	if (!mem_cgroup_is_root(memcg))
7442 		page_counter_uncharge(&memcg->memory, nr_entries);
7443 
7444 	if (memcg != swap_memcg) {
7445 		if (!mem_cgroup_is_root(swap_memcg))
7446 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7447 		page_counter_uncharge(&memcg->memsw, nr_entries);
7448 	}
7449 
7450 	/*
7451 	 * Interrupts should be disabled here because the caller holds the
7452 	 * i_pages lock which is taken with interrupts-off. It is
7453 	 * important here to have the interrupts disabled because it is the
7454 	 * only synchronisation we have for updating the per-CPU variables.
7455 	 */
7456 	memcg_stats_lock();
7457 	mem_cgroup_charge_statistics(memcg, -nr_entries);
7458 	memcg_stats_unlock();
7459 	memcg_check_events(memcg, folio_nid(folio));
7460 
7461 	css_put(&memcg->css);
7462 }
7463 
7464 /**
7465  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7466  * @folio: folio being added to swap
7467  * @entry: swap entry to charge
7468  *
7469  * Try to charge @folio's memcg for the swap space at @entry.
7470  *
7471  * Returns 0 on success, -ENOMEM on failure.
7472  */
7473 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7474 {
7475 	unsigned int nr_pages = folio_nr_pages(folio);
7476 	struct page_counter *counter;
7477 	struct mem_cgroup *memcg;
7478 	unsigned short oldid;
7479 
7480 	if (do_memsw_account())
7481 		return 0;
7482 
7483 	memcg = folio_memcg(folio);
7484 
7485 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7486 	if (!memcg)
7487 		return 0;
7488 
7489 	if (!entry.val) {
7490 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7491 		return 0;
7492 	}
7493 
7494 	memcg = mem_cgroup_id_get_online(memcg);
7495 
7496 	if (!mem_cgroup_is_root(memcg) &&
7497 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7498 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7499 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7500 		mem_cgroup_id_put(memcg);
7501 		return -ENOMEM;
7502 	}
7503 
7504 	/* Get references for the tail pages, too */
7505 	if (nr_pages > 1)
7506 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7507 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7508 	VM_BUG_ON_FOLIO(oldid, folio);
7509 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7510 
7511 	return 0;
7512 }
7513 
7514 /**
7515  * __mem_cgroup_uncharge_swap - uncharge swap space
7516  * @entry: swap entry to uncharge
7517  * @nr_pages: the amount of swap space to uncharge
7518  */
7519 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7520 {
7521 	struct mem_cgroup *memcg;
7522 	unsigned short id;
7523 
7524 	if (mem_cgroup_disabled())
7525 		return;
7526 
7527 	id = swap_cgroup_record(entry, 0, nr_pages);
7528 	rcu_read_lock();
7529 	memcg = mem_cgroup_from_id(id);
7530 	if (memcg) {
7531 		if (!mem_cgroup_is_root(memcg)) {
7532 			if (do_memsw_account())
7533 				page_counter_uncharge(&memcg->memsw, nr_pages);
7534 			else
7535 				page_counter_uncharge(&memcg->swap, nr_pages);
7536 		}
7537 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7538 		mem_cgroup_id_put_many(memcg, nr_pages);
7539 	}
7540 	rcu_read_unlock();
7541 }
7542 
7543 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7544 {
7545 	long nr_swap_pages = get_nr_swap_pages();
7546 
7547 	if (mem_cgroup_disabled() || do_memsw_account())
7548 		return nr_swap_pages;
7549 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7550 		nr_swap_pages = min_t(long, nr_swap_pages,
7551 				      READ_ONCE(memcg->swap.max) -
7552 				      page_counter_read(&memcg->swap));
7553 	return nr_swap_pages;
7554 }
7555 
7556 bool mem_cgroup_swap_full(struct folio *folio)
7557 {
7558 	struct mem_cgroup *memcg;
7559 
7560 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7561 
7562 	if (vm_swap_full())
7563 		return true;
7564 	if (do_memsw_account())
7565 		return false;
7566 
7567 	memcg = folio_memcg(folio);
7568 	if (!memcg)
7569 		return false;
7570 
7571 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7572 		unsigned long usage = page_counter_read(&memcg->swap);
7573 
7574 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7575 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7576 			return true;
7577 	}
7578 
7579 	return false;
7580 }
7581 
7582 static int __init setup_swap_account(char *s)
7583 {
7584 	pr_warn_once("The swapaccount= commandline option is deprecated. "
7585 		     "Please report your usecase to linux-mm@kvack.org if you "
7586 		     "depend on this functionality.\n");
7587 	return 1;
7588 }
7589 __setup("swapaccount=", setup_swap_account);
7590 
7591 static u64 swap_current_read(struct cgroup_subsys_state *css,
7592 			     struct cftype *cft)
7593 {
7594 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7595 
7596 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7597 }
7598 
7599 static int swap_high_show(struct seq_file *m, void *v)
7600 {
7601 	return seq_puts_memcg_tunable(m,
7602 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7603 }
7604 
7605 static ssize_t swap_high_write(struct kernfs_open_file *of,
7606 			       char *buf, size_t nbytes, loff_t off)
7607 {
7608 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7609 	unsigned long high;
7610 	int err;
7611 
7612 	buf = strstrip(buf);
7613 	err = page_counter_memparse(buf, "max", &high);
7614 	if (err)
7615 		return err;
7616 
7617 	page_counter_set_high(&memcg->swap, high);
7618 
7619 	return nbytes;
7620 }
7621 
7622 static int swap_max_show(struct seq_file *m, void *v)
7623 {
7624 	return seq_puts_memcg_tunable(m,
7625 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7626 }
7627 
7628 static ssize_t swap_max_write(struct kernfs_open_file *of,
7629 			      char *buf, size_t nbytes, loff_t off)
7630 {
7631 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7632 	unsigned long max;
7633 	int err;
7634 
7635 	buf = strstrip(buf);
7636 	err = page_counter_memparse(buf, "max", &max);
7637 	if (err)
7638 		return err;
7639 
7640 	xchg(&memcg->swap.max, max);
7641 
7642 	return nbytes;
7643 }
7644 
7645 static int swap_events_show(struct seq_file *m, void *v)
7646 {
7647 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7648 
7649 	seq_printf(m, "high %lu\n",
7650 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7651 	seq_printf(m, "max %lu\n",
7652 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7653 	seq_printf(m, "fail %lu\n",
7654 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7655 
7656 	return 0;
7657 }
7658 
7659 static struct cftype swap_files[] = {
7660 	{
7661 		.name = "swap.current",
7662 		.flags = CFTYPE_NOT_ON_ROOT,
7663 		.read_u64 = swap_current_read,
7664 	},
7665 	{
7666 		.name = "swap.high",
7667 		.flags = CFTYPE_NOT_ON_ROOT,
7668 		.seq_show = swap_high_show,
7669 		.write = swap_high_write,
7670 	},
7671 	{
7672 		.name = "swap.max",
7673 		.flags = CFTYPE_NOT_ON_ROOT,
7674 		.seq_show = swap_max_show,
7675 		.write = swap_max_write,
7676 	},
7677 	{
7678 		.name = "swap.events",
7679 		.flags = CFTYPE_NOT_ON_ROOT,
7680 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7681 		.seq_show = swap_events_show,
7682 	},
7683 	{ }	/* terminate */
7684 };
7685 
7686 static struct cftype memsw_files[] = {
7687 	{
7688 		.name = "memsw.usage_in_bytes",
7689 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7690 		.read_u64 = mem_cgroup_read_u64,
7691 	},
7692 	{
7693 		.name = "memsw.max_usage_in_bytes",
7694 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7695 		.write = mem_cgroup_reset,
7696 		.read_u64 = mem_cgroup_read_u64,
7697 	},
7698 	{
7699 		.name = "memsw.limit_in_bytes",
7700 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7701 		.write = mem_cgroup_write,
7702 		.read_u64 = mem_cgroup_read_u64,
7703 	},
7704 	{
7705 		.name = "memsw.failcnt",
7706 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7707 		.write = mem_cgroup_reset,
7708 		.read_u64 = mem_cgroup_read_u64,
7709 	},
7710 	{ },	/* terminate */
7711 };
7712 
7713 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7714 /**
7715  * obj_cgroup_may_zswap - check if this cgroup can zswap
7716  * @objcg: the object cgroup
7717  *
7718  * Check if the hierarchical zswap limit has been reached.
7719  *
7720  * This doesn't check for specific headroom, and it is not atomic
7721  * either. But with zswap, the size of the allocation is only known
7722  * once compression has occured, and this optimistic pre-check avoids
7723  * spending cycles on compression when there is already no room left
7724  * or zswap is disabled altogether somewhere in the hierarchy.
7725  */
7726 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7727 {
7728 	struct mem_cgroup *memcg, *original_memcg;
7729 	bool ret = true;
7730 
7731 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7732 		return true;
7733 
7734 	original_memcg = get_mem_cgroup_from_objcg(objcg);
7735 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
7736 	     memcg = parent_mem_cgroup(memcg)) {
7737 		unsigned long max = READ_ONCE(memcg->zswap_max);
7738 		unsigned long pages;
7739 
7740 		if (max == PAGE_COUNTER_MAX)
7741 			continue;
7742 		if (max == 0) {
7743 			ret = false;
7744 			break;
7745 		}
7746 
7747 		cgroup_rstat_flush(memcg->css.cgroup);
7748 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7749 		if (pages < max)
7750 			continue;
7751 		ret = false;
7752 		break;
7753 	}
7754 	mem_cgroup_put(original_memcg);
7755 	return ret;
7756 }
7757 
7758 /**
7759  * obj_cgroup_charge_zswap - charge compression backend memory
7760  * @objcg: the object cgroup
7761  * @size: size of compressed object
7762  *
7763  * This forces the charge after obj_cgroup_may_swap() allowed
7764  * compression and storage in zwap for this cgroup to go ahead.
7765  */
7766 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7767 {
7768 	struct mem_cgroup *memcg;
7769 
7770 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7771 		return;
7772 
7773 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7774 
7775 	/* PF_MEMALLOC context, charging must succeed */
7776 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7777 		VM_WARN_ON_ONCE(1);
7778 
7779 	rcu_read_lock();
7780 	memcg = obj_cgroup_memcg(objcg);
7781 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7782 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7783 	rcu_read_unlock();
7784 }
7785 
7786 /**
7787  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7788  * @objcg: the object cgroup
7789  * @size: size of compressed object
7790  *
7791  * Uncharges zswap memory on page in.
7792  */
7793 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7794 {
7795 	struct mem_cgroup *memcg;
7796 
7797 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7798 		return;
7799 
7800 	obj_cgroup_uncharge(objcg, size);
7801 
7802 	rcu_read_lock();
7803 	memcg = obj_cgroup_memcg(objcg);
7804 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7805 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7806 	rcu_read_unlock();
7807 }
7808 
7809 static u64 zswap_current_read(struct cgroup_subsys_state *css,
7810 			      struct cftype *cft)
7811 {
7812 	cgroup_rstat_flush(css->cgroup);
7813 	return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7814 }
7815 
7816 static int zswap_max_show(struct seq_file *m, void *v)
7817 {
7818 	return seq_puts_memcg_tunable(m,
7819 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7820 }
7821 
7822 static ssize_t zswap_max_write(struct kernfs_open_file *of,
7823 			       char *buf, size_t nbytes, loff_t off)
7824 {
7825 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7826 	unsigned long max;
7827 	int err;
7828 
7829 	buf = strstrip(buf);
7830 	err = page_counter_memparse(buf, "max", &max);
7831 	if (err)
7832 		return err;
7833 
7834 	xchg(&memcg->zswap_max, max);
7835 
7836 	return nbytes;
7837 }
7838 
7839 static struct cftype zswap_files[] = {
7840 	{
7841 		.name = "zswap.current",
7842 		.flags = CFTYPE_NOT_ON_ROOT,
7843 		.read_u64 = zswap_current_read,
7844 	},
7845 	{
7846 		.name = "zswap.max",
7847 		.flags = CFTYPE_NOT_ON_ROOT,
7848 		.seq_show = zswap_max_show,
7849 		.write = zswap_max_write,
7850 	},
7851 	{ }	/* terminate */
7852 };
7853 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7854 
7855 static int __init mem_cgroup_swap_init(void)
7856 {
7857 	if (mem_cgroup_disabled())
7858 		return 0;
7859 
7860 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7861 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7862 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7863 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7864 #endif
7865 	return 0;
7866 }
7867 subsys_initcall(mem_cgroup_swap_init);
7868 
7869 #endif /* CONFIG_SWAP */
7870