xref: /openbmc/linux/mm/memcontrol.c (revision 54a611b6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "swap.h"
71 
72 #include <linux/uaccess.h>
73 
74 #include <trace/events/vmscan.h>
75 
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
78 
79 struct mem_cgroup *root_mem_cgroup __read_mostly;
80 
81 /* Active memory cgroup to use from an interrupt context */
82 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
83 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
84 
85 /* Socket memory accounting disabled? */
86 static bool cgroup_memory_nosocket __ro_after_init;
87 
88 /* Kernel memory accounting disabled? */
89 static bool cgroup_memory_nokmem __ro_after_init;
90 
91 /* Whether the swap controller is active */
92 #ifdef CONFIG_MEMCG_SWAP
93 static bool cgroup_memory_noswap __ro_after_init;
94 #else
95 #define cgroup_memory_noswap		1
96 #endif
97 
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101 
102 /* Whether legacy memory+swap accounting is active */
103 static bool do_memsw_account(void)
104 {
105 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
106 }
107 
108 #define THRESHOLDS_EVENTS_TARGET 128
109 #define SOFTLIMIT_EVENTS_TARGET 1024
110 
111 /*
112  * Cgroups above their limits are maintained in a RB-Tree, independent of
113  * their hierarchy representation
114  */
115 
116 struct mem_cgroup_tree_per_node {
117 	struct rb_root rb_root;
118 	struct rb_node *rb_rightmost;
119 	spinlock_t lock;
120 };
121 
122 struct mem_cgroup_tree {
123 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
124 };
125 
126 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
127 
128 /* for OOM */
129 struct mem_cgroup_eventfd_list {
130 	struct list_head list;
131 	struct eventfd_ctx *eventfd;
132 };
133 
134 /*
135  * cgroup_event represents events which userspace want to receive.
136  */
137 struct mem_cgroup_event {
138 	/*
139 	 * memcg which the event belongs to.
140 	 */
141 	struct mem_cgroup *memcg;
142 	/*
143 	 * eventfd to signal userspace about the event.
144 	 */
145 	struct eventfd_ctx *eventfd;
146 	/*
147 	 * Each of these stored in a list by the cgroup.
148 	 */
149 	struct list_head list;
150 	/*
151 	 * register_event() callback will be used to add new userspace
152 	 * waiter for changes related to this event.  Use eventfd_signal()
153 	 * on eventfd to send notification to userspace.
154 	 */
155 	int (*register_event)(struct mem_cgroup *memcg,
156 			      struct eventfd_ctx *eventfd, const char *args);
157 	/*
158 	 * unregister_event() callback will be called when userspace closes
159 	 * the eventfd or on cgroup removing.  This callback must be set,
160 	 * if you want provide notification functionality.
161 	 */
162 	void (*unregister_event)(struct mem_cgroup *memcg,
163 				 struct eventfd_ctx *eventfd);
164 	/*
165 	 * All fields below needed to unregister event when
166 	 * userspace closes eventfd.
167 	 */
168 	poll_table pt;
169 	wait_queue_head_t *wqh;
170 	wait_queue_entry_t wait;
171 	struct work_struct remove;
172 };
173 
174 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
175 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
176 
177 /* Stuffs for move charges at task migration. */
178 /*
179  * Types of charges to be moved.
180  */
181 #define MOVE_ANON	0x1U
182 #define MOVE_FILE	0x2U
183 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
184 
185 /* "mc" and its members are protected by cgroup_mutex */
186 static struct move_charge_struct {
187 	spinlock_t	  lock; /* for from, to */
188 	struct mm_struct  *mm;
189 	struct mem_cgroup *from;
190 	struct mem_cgroup *to;
191 	unsigned long flags;
192 	unsigned long precharge;
193 	unsigned long moved_charge;
194 	unsigned long moved_swap;
195 	struct task_struct *moving_task;	/* a task moving charges */
196 	wait_queue_head_t waitq;		/* a waitq for other context */
197 } mc = {
198 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
199 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
200 };
201 
202 /*
203  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
204  * limit reclaim to prevent infinite loops, if they ever occur.
205  */
206 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
207 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
208 
209 /* for encoding cft->private value on file */
210 enum res_type {
211 	_MEM,
212 	_MEMSWAP,
213 	_KMEM,
214 	_TCP,
215 };
216 
217 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)	((val) & 0xffff)
220 
221 /*
222  * Iteration constructs for visiting all cgroups (under a tree).  If
223  * loops are exited prematurely (break), mem_cgroup_iter_break() must
224  * be used for reference counting.
225  */
226 #define for_each_mem_cgroup_tree(iter, root)		\
227 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
228 	     iter != NULL;				\
229 	     iter = mem_cgroup_iter(root, iter, NULL))
230 
231 #define for_each_mem_cgroup(iter)			\
232 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
233 	     iter != NULL;				\
234 	     iter = mem_cgroup_iter(NULL, iter, NULL))
235 
236 static inline bool task_is_dying(void)
237 {
238 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
239 		(current->flags & PF_EXITING);
240 }
241 
242 /* Some nice accessors for the vmpressure. */
243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
244 {
245 	if (!memcg)
246 		memcg = root_mem_cgroup;
247 	return &memcg->vmpressure;
248 }
249 
250 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
251 {
252 	return container_of(vmpr, struct mem_cgroup, vmpressure);
253 }
254 
255 #ifdef CONFIG_MEMCG_KMEM
256 static DEFINE_SPINLOCK(objcg_lock);
257 
258 bool mem_cgroup_kmem_disabled(void)
259 {
260 	return cgroup_memory_nokmem;
261 }
262 
263 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
264 				      unsigned int nr_pages);
265 
266 static void obj_cgroup_release(struct percpu_ref *ref)
267 {
268 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
269 	unsigned int nr_bytes;
270 	unsigned int nr_pages;
271 	unsigned long flags;
272 
273 	/*
274 	 * At this point all allocated objects are freed, and
275 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
276 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
277 	 *
278 	 * The following sequence can lead to it:
279 	 * 1) CPU0: objcg == stock->cached_objcg
280 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
281 	 *          PAGE_SIZE bytes are charged
282 	 * 3) CPU1: a process from another memcg is allocating something,
283 	 *          the stock if flushed,
284 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
285 	 * 5) CPU0: we do release this object,
286 	 *          92 bytes are added to stock->nr_bytes
287 	 * 6) CPU0: stock is flushed,
288 	 *          92 bytes are added to objcg->nr_charged_bytes
289 	 *
290 	 * In the result, nr_charged_bytes == PAGE_SIZE.
291 	 * This page will be uncharged in obj_cgroup_release().
292 	 */
293 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
294 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
295 	nr_pages = nr_bytes >> PAGE_SHIFT;
296 
297 	if (nr_pages)
298 		obj_cgroup_uncharge_pages(objcg, nr_pages);
299 
300 	spin_lock_irqsave(&objcg_lock, flags);
301 	list_del(&objcg->list);
302 	spin_unlock_irqrestore(&objcg_lock, flags);
303 
304 	percpu_ref_exit(ref);
305 	kfree_rcu(objcg, rcu);
306 }
307 
308 static struct obj_cgroup *obj_cgroup_alloc(void)
309 {
310 	struct obj_cgroup *objcg;
311 	int ret;
312 
313 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
314 	if (!objcg)
315 		return NULL;
316 
317 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
318 			      GFP_KERNEL);
319 	if (ret) {
320 		kfree(objcg);
321 		return NULL;
322 	}
323 	INIT_LIST_HEAD(&objcg->list);
324 	return objcg;
325 }
326 
327 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
328 				  struct mem_cgroup *parent)
329 {
330 	struct obj_cgroup *objcg, *iter;
331 
332 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
333 
334 	spin_lock_irq(&objcg_lock);
335 
336 	/* 1) Ready to reparent active objcg. */
337 	list_add(&objcg->list, &memcg->objcg_list);
338 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
339 	list_for_each_entry(iter, &memcg->objcg_list, list)
340 		WRITE_ONCE(iter->memcg, parent);
341 	/* 3) Move already reparented objcgs to the parent's list */
342 	list_splice(&memcg->objcg_list, &parent->objcg_list);
343 
344 	spin_unlock_irq(&objcg_lock);
345 
346 	percpu_ref_kill(&objcg->refcnt);
347 }
348 
349 /*
350  * A lot of the calls to the cache allocation functions are expected to be
351  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
352  * conditional to this static branch, we'll have to allow modules that does
353  * kmem_cache_alloc and the such to see this symbol as well
354  */
355 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
356 EXPORT_SYMBOL(memcg_kmem_enabled_key);
357 #endif
358 
359 /**
360  * mem_cgroup_css_from_page - css of the memcg associated with a page
361  * @page: page of interest
362  *
363  * If memcg is bound to the default hierarchy, css of the memcg associated
364  * with @page is returned.  The returned css remains associated with @page
365  * until it is released.
366  *
367  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368  * is returned.
369  */
370 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
371 {
372 	struct mem_cgroup *memcg;
373 
374 	memcg = page_memcg(page);
375 
376 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
377 		memcg = root_mem_cgroup;
378 
379 	return &memcg->css;
380 }
381 
382 /**
383  * page_cgroup_ino - return inode number of the memcg a page is charged to
384  * @page: the page
385  *
386  * Look up the closest online ancestor of the memory cgroup @page is charged to
387  * and return its inode number or 0 if @page is not charged to any cgroup. It
388  * is safe to call this function without holding a reference to @page.
389  *
390  * Note, this function is inherently racy, because there is nothing to prevent
391  * the cgroup inode from getting torn down and potentially reallocated a moment
392  * after page_cgroup_ino() returns, so it only should be used by callers that
393  * do not care (such as procfs interfaces).
394  */
395 ino_t page_cgroup_ino(struct page *page)
396 {
397 	struct mem_cgroup *memcg;
398 	unsigned long ino = 0;
399 
400 	rcu_read_lock();
401 	memcg = page_memcg_check(page);
402 
403 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
404 		memcg = parent_mem_cgroup(memcg);
405 	if (memcg)
406 		ino = cgroup_ino(memcg->css.cgroup);
407 	rcu_read_unlock();
408 	return ino;
409 }
410 
411 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
412 					 struct mem_cgroup_tree_per_node *mctz,
413 					 unsigned long new_usage_in_excess)
414 {
415 	struct rb_node **p = &mctz->rb_root.rb_node;
416 	struct rb_node *parent = NULL;
417 	struct mem_cgroup_per_node *mz_node;
418 	bool rightmost = true;
419 
420 	if (mz->on_tree)
421 		return;
422 
423 	mz->usage_in_excess = new_usage_in_excess;
424 	if (!mz->usage_in_excess)
425 		return;
426 	while (*p) {
427 		parent = *p;
428 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
429 					tree_node);
430 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
431 			p = &(*p)->rb_left;
432 			rightmost = false;
433 		} else {
434 			p = &(*p)->rb_right;
435 		}
436 	}
437 
438 	if (rightmost)
439 		mctz->rb_rightmost = &mz->tree_node;
440 
441 	rb_link_node(&mz->tree_node, parent, p);
442 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
443 	mz->on_tree = true;
444 }
445 
446 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
447 					 struct mem_cgroup_tree_per_node *mctz)
448 {
449 	if (!mz->on_tree)
450 		return;
451 
452 	if (&mz->tree_node == mctz->rb_rightmost)
453 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
454 
455 	rb_erase(&mz->tree_node, &mctz->rb_root);
456 	mz->on_tree = false;
457 }
458 
459 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
460 				       struct mem_cgroup_tree_per_node *mctz)
461 {
462 	unsigned long flags;
463 
464 	spin_lock_irqsave(&mctz->lock, flags);
465 	__mem_cgroup_remove_exceeded(mz, mctz);
466 	spin_unlock_irqrestore(&mctz->lock, flags);
467 }
468 
469 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
470 {
471 	unsigned long nr_pages = page_counter_read(&memcg->memory);
472 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
473 	unsigned long excess = 0;
474 
475 	if (nr_pages > soft_limit)
476 		excess = nr_pages - soft_limit;
477 
478 	return excess;
479 }
480 
481 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
482 {
483 	unsigned long excess;
484 	struct mem_cgroup_per_node *mz;
485 	struct mem_cgroup_tree_per_node *mctz;
486 
487 	mctz = soft_limit_tree.rb_tree_per_node[nid];
488 	if (!mctz)
489 		return;
490 	/*
491 	 * Necessary to update all ancestors when hierarchy is used.
492 	 * because their event counter is not touched.
493 	 */
494 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
495 		mz = memcg->nodeinfo[nid];
496 		excess = soft_limit_excess(memcg);
497 		/*
498 		 * We have to update the tree if mz is on RB-tree or
499 		 * mem is over its softlimit.
500 		 */
501 		if (excess || mz->on_tree) {
502 			unsigned long flags;
503 
504 			spin_lock_irqsave(&mctz->lock, flags);
505 			/* if on-tree, remove it */
506 			if (mz->on_tree)
507 				__mem_cgroup_remove_exceeded(mz, mctz);
508 			/*
509 			 * Insert again. mz->usage_in_excess will be updated.
510 			 * If excess is 0, no tree ops.
511 			 */
512 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
513 			spin_unlock_irqrestore(&mctz->lock, flags);
514 		}
515 	}
516 }
517 
518 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
519 {
520 	struct mem_cgroup_tree_per_node *mctz;
521 	struct mem_cgroup_per_node *mz;
522 	int nid;
523 
524 	for_each_node(nid) {
525 		mz = memcg->nodeinfo[nid];
526 		mctz = soft_limit_tree.rb_tree_per_node[nid];
527 		if (mctz)
528 			mem_cgroup_remove_exceeded(mz, mctz);
529 	}
530 }
531 
532 static struct mem_cgroup_per_node *
533 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534 {
535 	struct mem_cgroup_per_node *mz;
536 
537 retry:
538 	mz = NULL;
539 	if (!mctz->rb_rightmost)
540 		goto done;		/* Nothing to reclaim from */
541 
542 	mz = rb_entry(mctz->rb_rightmost,
543 		      struct mem_cgroup_per_node, tree_node);
544 	/*
545 	 * Remove the node now but someone else can add it back,
546 	 * we will to add it back at the end of reclaim to its correct
547 	 * position in the tree.
548 	 */
549 	__mem_cgroup_remove_exceeded(mz, mctz);
550 	if (!soft_limit_excess(mz->memcg) ||
551 	    !css_tryget(&mz->memcg->css))
552 		goto retry;
553 done:
554 	return mz;
555 }
556 
557 static struct mem_cgroup_per_node *
558 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
559 {
560 	struct mem_cgroup_per_node *mz;
561 
562 	spin_lock_irq(&mctz->lock);
563 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
564 	spin_unlock_irq(&mctz->lock);
565 	return mz;
566 }
567 
568 /*
569  * memcg and lruvec stats flushing
570  *
571  * Many codepaths leading to stats update or read are performance sensitive and
572  * adding stats flushing in such codepaths is not desirable. So, to optimize the
573  * flushing the kernel does:
574  *
575  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
576  *    rstat update tree grow unbounded.
577  *
578  * 2) Flush the stats synchronously on reader side only when there are more than
579  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
580  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
581  *    only for 2 seconds due to (1).
582  */
583 static void flush_memcg_stats_dwork(struct work_struct *w);
584 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
585 static DEFINE_SPINLOCK(stats_flush_lock);
586 static DEFINE_PER_CPU(unsigned int, stats_updates);
587 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
588 static u64 flush_next_time;
589 
590 #define FLUSH_TIME (2UL*HZ)
591 
592 /*
593  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
594  * not rely on this as part of an acquired spinlock_t lock. These functions are
595  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
596  * is sufficient.
597  */
598 static void memcg_stats_lock(void)
599 {
600 #ifdef CONFIG_PREEMPT_RT
601       preempt_disable();
602 #else
603       VM_BUG_ON(!irqs_disabled());
604 #endif
605 }
606 
607 static void __memcg_stats_lock(void)
608 {
609 #ifdef CONFIG_PREEMPT_RT
610       preempt_disable();
611 #endif
612 }
613 
614 static void memcg_stats_unlock(void)
615 {
616 #ifdef CONFIG_PREEMPT_RT
617       preempt_enable();
618 #endif
619 }
620 
621 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
622 {
623 	unsigned int x;
624 
625 	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
626 
627 	x = __this_cpu_add_return(stats_updates, abs(val));
628 	if (x > MEMCG_CHARGE_BATCH) {
629 		/*
630 		 * If stats_flush_threshold exceeds the threshold
631 		 * (>num_online_cpus()), cgroup stats update will be triggered
632 		 * in __mem_cgroup_flush_stats(). Increasing this var further
633 		 * is redundant and simply adds overhead in atomic update.
634 		 */
635 		if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
636 			atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
637 		__this_cpu_write(stats_updates, 0);
638 	}
639 }
640 
641 static void __mem_cgroup_flush_stats(void)
642 {
643 	unsigned long flag;
644 
645 	if (!spin_trylock_irqsave(&stats_flush_lock, flag))
646 		return;
647 
648 	flush_next_time = jiffies_64 + 2*FLUSH_TIME;
649 	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
650 	atomic_set(&stats_flush_threshold, 0);
651 	spin_unlock_irqrestore(&stats_flush_lock, flag);
652 }
653 
654 void mem_cgroup_flush_stats(void)
655 {
656 	if (atomic_read(&stats_flush_threshold) > num_online_cpus())
657 		__mem_cgroup_flush_stats();
658 }
659 
660 void mem_cgroup_flush_stats_delayed(void)
661 {
662 	if (time_after64(jiffies_64, flush_next_time))
663 		mem_cgroup_flush_stats();
664 }
665 
666 static void flush_memcg_stats_dwork(struct work_struct *w)
667 {
668 	__mem_cgroup_flush_stats();
669 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
670 }
671 
672 /**
673  * __mod_memcg_state - update cgroup memory statistics
674  * @memcg: the memory cgroup
675  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
676  * @val: delta to add to the counter, can be negative
677  */
678 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
679 {
680 	if (mem_cgroup_disabled())
681 		return;
682 
683 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
684 	memcg_rstat_updated(memcg, val);
685 }
686 
687 /* idx can be of type enum memcg_stat_item or node_stat_item. */
688 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
689 {
690 	long x = 0;
691 	int cpu;
692 
693 	for_each_possible_cpu(cpu)
694 		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
695 #ifdef CONFIG_SMP
696 	if (x < 0)
697 		x = 0;
698 #endif
699 	return x;
700 }
701 
702 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
703 			      int val)
704 {
705 	struct mem_cgroup_per_node *pn;
706 	struct mem_cgroup *memcg;
707 
708 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
709 	memcg = pn->memcg;
710 
711 	/*
712 	 * The caller from rmap relay on disabled preemption becase they never
713 	 * update their counter from in-interrupt context. For these two
714 	 * counters we check that the update is never performed from an
715 	 * interrupt context while other caller need to have disabled interrupt.
716 	 */
717 	__memcg_stats_lock();
718 	if (IS_ENABLED(CONFIG_DEBUG_VM) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
719 		switch (idx) {
720 		case NR_ANON_MAPPED:
721 		case NR_FILE_MAPPED:
722 		case NR_ANON_THPS:
723 		case NR_SHMEM_PMDMAPPED:
724 		case NR_FILE_PMDMAPPED:
725 			WARN_ON_ONCE(!in_task());
726 			break;
727 		default:
728 			WARN_ON_ONCE(!irqs_disabled());
729 		}
730 	}
731 
732 	/* Update memcg */
733 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
734 
735 	/* Update lruvec */
736 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
737 
738 	memcg_rstat_updated(memcg, val);
739 	memcg_stats_unlock();
740 }
741 
742 /**
743  * __mod_lruvec_state - update lruvec memory statistics
744  * @lruvec: the lruvec
745  * @idx: the stat item
746  * @val: delta to add to the counter, can be negative
747  *
748  * The lruvec is the intersection of the NUMA node and a cgroup. This
749  * function updates the all three counters that are affected by a
750  * change of state at this level: per-node, per-cgroup, per-lruvec.
751  */
752 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
753 			int val)
754 {
755 	/* Update node */
756 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
757 
758 	/* Update memcg and lruvec */
759 	if (!mem_cgroup_disabled())
760 		__mod_memcg_lruvec_state(lruvec, idx, val);
761 }
762 
763 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
764 			     int val)
765 {
766 	struct page *head = compound_head(page); /* rmap on tail pages */
767 	struct mem_cgroup *memcg;
768 	pg_data_t *pgdat = page_pgdat(page);
769 	struct lruvec *lruvec;
770 
771 	rcu_read_lock();
772 	memcg = page_memcg(head);
773 	/* Untracked pages have no memcg, no lruvec. Update only the node */
774 	if (!memcg) {
775 		rcu_read_unlock();
776 		__mod_node_page_state(pgdat, idx, val);
777 		return;
778 	}
779 
780 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
781 	__mod_lruvec_state(lruvec, idx, val);
782 	rcu_read_unlock();
783 }
784 EXPORT_SYMBOL(__mod_lruvec_page_state);
785 
786 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
787 {
788 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
789 	struct mem_cgroup *memcg;
790 	struct lruvec *lruvec;
791 
792 	rcu_read_lock();
793 	memcg = mem_cgroup_from_slab_obj(p);
794 
795 	/*
796 	 * Untracked pages have no memcg, no lruvec. Update only the
797 	 * node. If we reparent the slab objects to the root memcg,
798 	 * when we free the slab object, we need to update the per-memcg
799 	 * vmstats to keep it correct for the root memcg.
800 	 */
801 	if (!memcg) {
802 		__mod_node_page_state(pgdat, idx, val);
803 	} else {
804 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
805 		__mod_lruvec_state(lruvec, idx, val);
806 	}
807 	rcu_read_unlock();
808 }
809 
810 /**
811  * __count_memcg_events - account VM events in a cgroup
812  * @memcg: the memory cgroup
813  * @idx: the event item
814  * @count: the number of events that occurred
815  */
816 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
817 			  unsigned long count)
818 {
819 	if (mem_cgroup_disabled())
820 		return;
821 
822 	memcg_stats_lock();
823 	__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
824 	memcg_rstat_updated(memcg, count);
825 	memcg_stats_unlock();
826 }
827 
828 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
829 {
830 	return READ_ONCE(memcg->vmstats.events[event]);
831 }
832 
833 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
834 {
835 	long x = 0;
836 	int cpu;
837 
838 	for_each_possible_cpu(cpu)
839 		x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
840 	return x;
841 }
842 
843 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
844 					 int nr_pages)
845 {
846 	/* pagein of a big page is an event. So, ignore page size */
847 	if (nr_pages > 0)
848 		__count_memcg_events(memcg, PGPGIN, 1);
849 	else {
850 		__count_memcg_events(memcg, PGPGOUT, 1);
851 		nr_pages = -nr_pages; /* for event */
852 	}
853 
854 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
855 }
856 
857 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
858 				       enum mem_cgroup_events_target target)
859 {
860 	unsigned long val, next;
861 
862 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
863 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
864 	/* from time_after() in jiffies.h */
865 	if ((long)(next - val) < 0) {
866 		switch (target) {
867 		case MEM_CGROUP_TARGET_THRESH:
868 			next = val + THRESHOLDS_EVENTS_TARGET;
869 			break;
870 		case MEM_CGROUP_TARGET_SOFTLIMIT:
871 			next = val + SOFTLIMIT_EVENTS_TARGET;
872 			break;
873 		default:
874 			break;
875 		}
876 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
877 		return true;
878 	}
879 	return false;
880 }
881 
882 /*
883  * Check events in order.
884  *
885  */
886 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
887 {
888 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
889 		return;
890 
891 	/* threshold event is triggered in finer grain than soft limit */
892 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
893 						MEM_CGROUP_TARGET_THRESH))) {
894 		bool do_softlimit;
895 
896 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
897 						MEM_CGROUP_TARGET_SOFTLIMIT);
898 		mem_cgroup_threshold(memcg);
899 		if (unlikely(do_softlimit))
900 			mem_cgroup_update_tree(memcg, nid);
901 	}
902 }
903 
904 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
905 {
906 	/*
907 	 * mm_update_next_owner() may clear mm->owner to NULL
908 	 * if it races with swapoff, page migration, etc.
909 	 * So this can be called with p == NULL.
910 	 */
911 	if (unlikely(!p))
912 		return NULL;
913 
914 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
915 }
916 EXPORT_SYMBOL(mem_cgroup_from_task);
917 
918 static __always_inline struct mem_cgroup *active_memcg(void)
919 {
920 	if (!in_task())
921 		return this_cpu_read(int_active_memcg);
922 	else
923 		return current->active_memcg;
924 }
925 
926 /**
927  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
928  * @mm: mm from which memcg should be extracted. It can be NULL.
929  *
930  * Obtain a reference on mm->memcg and returns it if successful. If mm
931  * is NULL, then the memcg is chosen as follows:
932  * 1) The active memcg, if set.
933  * 2) current->mm->memcg, if available
934  * 3) root memcg
935  * If mem_cgroup is disabled, NULL is returned.
936  */
937 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
938 {
939 	struct mem_cgroup *memcg;
940 
941 	if (mem_cgroup_disabled())
942 		return NULL;
943 
944 	/*
945 	 * Page cache insertions can happen without an
946 	 * actual mm context, e.g. during disk probing
947 	 * on boot, loopback IO, acct() writes etc.
948 	 *
949 	 * No need to css_get on root memcg as the reference
950 	 * counting is disabled on the root level in the
951 	 * cgroup core. See CSS_NO_REF.
952 	 */
953 	if (unlikely(!mm)) {
954 		memcg = active_memcg();
955 		if (unlikely(memcg)) {
956 			/* remote memcg must hold a ref */
957 			css_get(&memcg->css);
958 			return memcg;
959 		}
960 		mm = current->mm;
961 		if (unlikely(!mm))
962 			return root_mem_cgroup;
963 	}
964 
965 	rcu_read_lock();
966 	do {
967 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
968 		if (unlikely(!memcg))
969 			memcg = root_mem_cgroup;
970 	} while (!css_tryget(&memcg->css));
971 	rcu_read_unlock();
972 	return memcg;
973 }
974 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
975 
976 static __always_inline bool memcg_kmem_bypass(void)
977 {
978 	/* Allow remote memcg charging from any context. */
979 	if (unlikely(active_memcg()))
980 		return false;
981 
982 	/* Memcg to charge can't be determined. */
983 	if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
984 		return true;
985 
986 	return false;
987 }
988 
989 /**
990  * mem_cgroup_iter - iterate over memory cgroup hierarchy
991  * @root: hierarchy root
992  * @prev: previously returned memcg, NULL on first invocation
993  * @reclaim: cookie for shared reclaim walks, NULL for full walks
994  *
995  * Returns references to children of the hierarchy below @root, or
996  * @root itself, or %NULL after a full round-trip.
997  *
998  * Caller must pass the return value in @prev on subsequent
999  * invocations for reference counting, or use mem_cgroup_iter_break()
1000  * to cancel a hierarchy walk before the round-trip is complete.
1001  *
1002  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1003  * in the hierarchy among all concurrent reclaimers operating on the
1004  * same node.
1005  */
1006 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1007 				   struct mem_cgroup *prev,
1008 				   struct mem_cgroup_reclaim_cookie *reclaim)
1009 {
1010 	struct mem_cgroup_reclaim_iter *iter;
1011 	struct cgroup_subsys_state *css = NULL;
1012 	struct mem_cgroup *memcg = NULL;
1013 	struct mem_cgroup *pos = NULL;
1014 
1015 	if (mem_cgroup_disabled())
1016 		return NULL;
1017 
1018 	if (!root)
1019 		root = root_mem_cgroup;
1020 
1021 	rcu_read_lock();
1022 
1023 	if (reclaim) {
1024 		struct mem_cgroup_per_node *mz;
1025 
1026 		mz = root->nodeinfo[reclaim->pgdat->node_id];
1027 		iter = &mz->iter;
1028 
1029 		/*
1030 		 * On start, join the current reclaim iteration cycle.
1031 		 * Exit when a concurrent walker completes it.
1032 		 */
1033 		if (!prev)
1034 			reclaim->generation = iter->generation;
1035 		else if (reclaim->generation != iter->generation)
1036 			goto out_unlock;
1037 
1038 		while (1) {
1039 			pos = READ_ONCE(iter->position);
1040 			if (!pos || css_tryget(&pos->css))
1041 				break;
1042 			/*
1043 			 * css reference reached zero, so iter->position will
1044 			 * be cleared by ->css_released. However, we should not
1045 			 * rely on this happening soon, because ->css_released
1046 			 * is called from a work queue, and by busy-waiting we
1047 			 * might block it. So we clear iter->position right
1048 			 * away.
1049 			 */
1050 			(void)cmpxchg(&iter->position, pos, NULL);
1051 		}
1052 	} else if (prev) {
1053 		pos = prev;
1054 	}
1055 
1056 	if (pos)
1057 		css = &pos->css;
1058 
1059 	for (;;) {
1060 		css = css_next_descendant_pre(css, &root->css);
1061 		if (!css) {
1062 			/*
1063 			 * Reclaimers share the hierarchy walk, and a
1064 			 * new one might jump in right at the end of
1065 			 * the hierarchy - make sure they see at least
1066 			 * one group and restart from the beginning.
1067 			 */
1068 			if (!prev)
1069 				continue;
1070 			break;
1071 		}
1072 
1073 		/*
1074 		 * Verify the css and acquire a reference.  The root
1075 		 * is provided by the caller, so we know it's alive
1076 		 * and kicking, and don't take an extra reference.
1077 		 */
1078 		if (css == &root->css || css_tryget(css)) {
1079 			memcg = mem_cgroup_from_css(css);
1080 			break;
1081 		}
1082 	}
1083 
1084 	if (reclaim) {
1085 		/*
1086 		 * The position could have already been updated by a competing
1087 		 * thread, so check that the value hasn't changed since we read
1088 		 * it to avoid reclaiming from the same cgroup twice.
1089 		 */
1090 		(void)cmpxchg(&iter->position, pos, memcg);
1091 
1092 		if (pos)
1093 			css_put(&pos->css);
1094 
1095 		if (!memcg)
1096 			iter->generation++;
1097 	}
1098 
1099 out_unlock:
1100 	rcu_read_unlock();
1101 	if (prev && prev != root)
1102 		css_put(&prev->css);
1103 
1104 	return memcg;
1105 }
1106 
1107 /**
1108  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1109  * @root: hierarchy root
1110  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1111  */
1112 void mem_cgroup_iter_break(struct mem_cgroup *root,
1113 			   struct mem_cgroup *prev)
1114 {
1115 	if (!root)
1116 		root = root_mem_cgroup;
1117 	if (prev && prev != root)
1118 		css_put(&prev->css);
1119 }
1120 
1121 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1122 					struct mem_cgroup *dead_memcg)
1123 {
1124 	struct mem_cgroup_reclaim_iter *iter;
1125 	struct mem_cgroup_per_node *mz;
1126 	int nid;
1127 
1128 	for_each_node(nid) {
1129 		mz = from->nodeinfo[nid];
1130 		iter = &mz->iter;
1131 		cmpxchg(&iter->position, dead_memcg, NULL);
1132 	}
1133 }
1134 
1135 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1136 {
1137 	struct mem_cgroup *memcg = dead_memcg;
1138 	struct mem_cgroup *last;
1139 
1140 	do {
1141 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1142 		last = memcg;
1143 	} while ((memcg = parent_mem_cgroup(memcg)));
1144 
1145 	/*
1146 	 * When cgroup1 non-hierarchy mode is used,
1147 	 * parent_mem_cgroup() does not walk all the way up to the
1148 	 * cgroup root (root_mem_cgroup). So we have to handle
1149 	 * dead_memcg from cgroup root separately.
1150 	 */
1151 	if (last != root_mem_cgroup)
1152 		__invalidate_reclaim_iterators(root_mem_cgroup,
1153 						dead_memcg);
1154 }
1155 
1156 /**
1157  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1158  * @memcg: hierarchy root
1159  * @fn: function to call for each task
1160  * @arg: argument passed to @fn
1161  *
1162  * This function iterates over tasks attached to @memcg or to any of its
1163  * descendants and calls @fn for each task. If @fn returns a non-zero
1164  * value, the function breaks the iteration loop and returns the value.
1165  * Otherwise, it will iterate over all tasks and return 0.
1166  *
1167  * This function must not be called for the root memory cgroup.
1168  */
1169 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1170 			  int (*fn)(struct task_struct *, void *), void *arg)
1171 {
1172 	struct mem_cgroup *iter;
1173 	int ret = 0;
1174 
1175 	BUG_ON(memcg == root_mem_cgroup);
1176 
1177 	for_each_mem_cgroup_tree(iter, memcg) {
1178 		struct css_task_iter it;
1179 		struct task_struct *task;
1180 
1181 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1182 		while (!ret && (task = css_task_iter_next(&it)))
1183 			ret = fn(task, arg);
1184 		css_task_iter_end(&it);
1185 		if (ret) {
1186 			mem_cgroup_iter_break(memcg, iter);
1187 			break;
1188 		}
1189 	}
1190 	return ret;
1191 }
1192 
1193 #ifdef CONFIG_DEBUG_VM
1194 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1195 {
1196 	struct mem_cgroup *memcg;
1197 
1198 	if (mem_cgroup_disabled())
1199 		return;
1200 
1201 	memcg = folio_memcg(folio);
1202 
1203 	if (!memcg)
1204 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
1205 	else
1206 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1207 }
1208 #endif
1209 
1210 /**
1211  * folio_lruvec_lock - Lock the lruvec for a folio.
1212  * @folio: Pointer to the folio.
1213  *
1214  * These functions are safe to use under any of the following conditions:
1215  * - folio locked
1216  * - folio_test_lru false
1217  * - folio_memcg_lock()
1218  * - folio frozen (refcount of 0)
1219  *
1220  * Return: The lruvec this folio is on with its lock held.
1221  */
1222 struct lruvec *folio_lruvec_lock(struct folio *folio)
1223 {
1224 	struct lruvec *lruvec = folio_lruvec(folio);
1225 
1226 	spin_lock(&lruvec->lru_lock);
1227 	lruvec_memcg_debug(lruvec, folio);
1228 
1229 	return lruvec;
1230 }
1231 
1232 /**
1233  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1234  * @folio: Pointer to the folio.
1235  *
1236  * These functions are safe to use under any of the following conditions:
1237  * - folio locked
1238  * - folio_test_lru false
1239  * - folio_memcg_lock()
1240  * - folio frozen (refcount of 0)
1241  *
1242  * Return: The lruvec this folio is on with its lock held and interrupts
1243  * disabled.
1244  */
1245 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1246 {
1247 	struct lruvec *lruvec = folio_lruvec(folio);
1248 
1249 	spin_lock_irq(&lruvec->lru_lock);
1250 	lruvec_memcg_debug(lruvec, folio);
1251 
1252 	return lruvec;
1253 }
1254 
1255 /**
1256  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1257  * @folio: Pointer to the folio.
1258  * @flags: Pointer to irqsave flags.
1259  *
1260  * These functions are safe to use under any of the following conditions:
1261  * - folio locked
1262  * - folio_test_lru false
1263  * - folio_memcg_lock()
1264  * - folio frozen (refcount of 0)
1265  *
1266  * Return: The lruvec this folio is on with its lock held and interrupts
1267  * disabled.
1268  */
1269 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1270 		unsigned long *flags)
1271 {
1272 	struct lruvec *lruvec = folio_lruvec(folio);
1273 
1274 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1275 	lruvec_memcg_debug(lruvec, folio);
1276 
1277 	return lruvec;
1278 }
1279 
1280 /**
1281  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1282  * @lruvec: mem_cgroup per zone lru vector
1283  * @lru: index of lru list the page is sitting on
1284  * @zid: zone id of the accounted pages
1285  * @nr_pages: positive when adding or negative when removing
1286  *
1287  * This function must be called under lru_lock, just before a page is added
1288  * to or just after a page is removed from an lru list.
1289  */
1290 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1291 				int zid, int nr_pages)
1292 {
1293 	struct mem_cgroup_per_node *mz;
1294 	unsigned long *lru_size;
1295 	long size;
1296 
1297 	if (mem_cgroup_disabled())
1298 		return;
1299 
1300 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1301 	lru_size = &mz->lru_zone_size[zid][lru];
1302 
1303 	if (nr_pages < 0)
1304 		*lru_size += nr_pages;
1305 
1306 	size = *lru_size;
1307 	if (WARN_ONCE(size < 0,
1308 		"%s(%p, %d, %d): lru_size %ld\n",
1309 		__func__, lruvec, lru, nr_pages, size)) {
1310 		VM_BUG_ON(1);
1311 		*lru_size = 0;
1312 	}
1313 
1314 	if (nr_pages > 0)
1315 		*lru_size += nr_pages;
1316 }
1317 
1318 /**
1319  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1320  * @memcg: the memory cgroup
1321  *
1322  * Returns the maximum amount of memory @mem can be charged with, in
1323  * pages.
1324  */
1325 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1326 {
1327 	unsigned long margin = 0;
1328 	unsigned long count;
1329 	unsigned long limit;
1330 
1331 	count = page_counter_read(&memcg->memory);
1332 	limit = READ_ONCE(memcg->memory.max);
1333 	if (count < limit)
1334 		margin = limit - count;
1335 
1336 	if (do_memsw_account()) {
1337 		count = page_counter_read(&memcg->memsw);
1338 		limit = READ_ONCE(memcg->memsw.max);
1339 		if (count < limit)
1340 			margin = min(margin, limit - count);
1341 		else
1342 			margin = 0;
1343 	}
1344 
1345 	return margin;
1346 }
1347 
1348 /*
1349  * A routine for checking "mem" is under move_account() or not.
1350  *
1351  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1352  * moving cgroups. This is for waiting at high-memory pressure
1353  * caused by "move".
1354  */
1355 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1356 {
1357 	struct mem_cgroup *from;
1358 	struct mem_cgroup *to;
1359 	bool ret = false;
1360 	/*
1361 	 * Unlike task_move routines, we access mc.to, mc.from not under
1362 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1363 	 */
1364 	spin_lock(&mc.lock);
1365 	from = mc.from;
1366 	to = mc.to;
1367 	if (!from)
1368 		goto unlock;
1369 
1370 	ret = mem_cgroup_is_descendant(from, memcg) ||
1371 		mem_cgroup_is_descendant(to, memcg);
1372 unlock:
1373 	spin_unlock(&mc.lock);
1374 	return ret;
1375 }
1376 
1377 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1378 {
1379 	if (mc.moving_task && current != mc.moving_task) {
1380 		if (mem_cgroup_under_move(memcg)) {
1381 			DEFINE_WAIT(wait);
1382 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1383 			/* moving charge context might have finished. */
1384 			if (mc.moving_task)
1385 				schedule();
1386 			finish_wait(&mc.waitq, &wait);
1387 			return true;
1388 		}
1389 	}
1390 	return false;
1391 }
1392 
1393 struct memory_stat {
1394 	const char *name;
1395 	unsigned int idx;
1396 };
1397 
1398 static const struct memory_stat memory_stats[] = {
1399 	{ "anon",			NR_ANON_MAPPED			},
1400 	{ "file",			NR_FILE_PAGES			},
1401 	{ "kernel",			MEMCG_KMEM			},
1402 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1403 	{ "pagetables",			NR_PAGETABLE			},
1404 	{ "percpu",			MEMCG_PERCPU_B			},
1405 	{ "sock",			MEMCG_SOCK			},
1406 	{ "vmalloc",			MEMCG_VMALLOC			},
1407 	{ "shmem",			NR_SHMEM			},
1408 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1409 	{ "zswap",			MEMCG_ZSWAP_B			},
1410 	{ "zswapped",			MEMCG_ZSWAPPED			},
1411 #endif
1412 	{ "file_mapped",		NR_FILE_MAPPED			},
1413 	{ "file_dirty",			NR_FILE_DIRTY			},
1414 	{ "file_writeback",		NR_WRITEBACK			},
1415 #ifdef CONFIG_SWAP
1416 	{ "swapcached",			NR_SWAPCACHE			},
1417 #endif
1418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1419 	{ "anon_thp",			NR_ANON_THPS			},
1420 	{ "file_thp",			NR_FILE_THPS			},
1421 	{ "shmem_thp",			NR_SHMEM_THPS			},
1422 #endif
1423 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1424 	{ "active_anon",		NR_ACTIVE_ANON			},
1425 	{ "inactive_file",		NR_INACTIVE_FILE		},
1426 	{ "active_file",		NR_ACTIVE_FILE			},
1427 	{ "unevictable",		NR_UNEVICTABLE			},
1428 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1429 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1430 
1431 	/* The memory events */
1432 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1433 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1434 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1435 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1436 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1437 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1438 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1439 };
1440 
1441 /* Translate stat items to the correct unit for memory.stat output */
1442 static int memcg_page_state_unit(int item)
1443 {
1444 	switch (item) {
1445 	case MEMCG_PERCPU_B:
1446 	case MEMCG_ZSWAP_B:
1447 	case NR_SLAB_RECLAIMABLE_B:
1448 	case NR_SLAB_UNRECLAIMABLE_B:
1449 	case WORKINGSET_REFAULT_ANON:
1450 	case WORKINGSET_REFAULT_FILE:
1451 	case WORKINGSET_ACTIVATE_ANON:
1452 	case WORKINGSET_ACTIVATE_FILE:
1453 	case WORKINGSET_RESTORE_ANON:
1454 	case WORKINGSET_RESTORE_FILE:
1455 	case WORKINGSET_NODERECLAIM:
1456 		return 1;
1457 	case NR_KERNEL_STACK_KB:
1458 		return SZ_1K;
1459 	default:
1460 		return PAGE_SIZE;
1461 	}
1462 }
1463 
1464 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1465 						    int item)
1466 {
1467 	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1468 }
1469 
1470 /* Subset of vm_event_item to report for memcg event stats */
1471 static const unsigned int memcg_vm_event_stat[] = {
1472 	PGSCAN_KSWAPD,
1473 	PGSCAN_DIRECT,
1474 	PGSTEAL_KSWAPD,
1475 	PGSTEAL_DIRECT,
1476 	PGFAULT,
1477 	PGMAJFAULT,
1478 	PGREFILL,
1479 	PGACTIVATE,
1480 	PGDEACTIVATE,
1481 	PGLAZYFREE,
1482 	PGLAZYFREED,
1483 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1484 	ZSWPIN,
1485 	ZSWPOUT,
1486 #endif
1487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1488 	THP_FAULT_ALLOC,
1489 	THP_COLLAPSE_ALLOC,
1490 #endif
1491 };
1492 
1493 static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
1494 {
1495 	struct seq_buf s;
1496 	int i;
1497 
1498 	seq_buf_init(&s, buf, bufsize);
1499 
1500 	/*
1501 	 * Provide statistics on the state of the memory subsystem as
1502 	 * well as cumulative event counters that show past behavior.
1503 	 *
1504 	 * This list is ordered following a combination of these gradients:
1505 	 * 1) generic big picture -> specifics and details
1506 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1507 	 *
1508 	 * Current memory state:
1509 	 */
1510 	mem_cgroup_flush_stats();
1511 
1512 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1513 		u64 size;
1514 
1515 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1516 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1517 
1518 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1519 			size += memcg_page_state_output(memcg,
1520 							NR_SLAB_RECLAIMABLE_B);
1521 			seq_buf_printf(&s, "slab %llu\n", size);
1522 		}
1523 	}
1524 
1525 	/* Accumulated memory events */
1526 	seq_buf_printf(&s, "pgscan %lu\n",
1527 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1528 		       memcg_events(memcg, PGSCAN_DIRECT));
1529 	seq_buf_printf(&s, "pgsteal %lu\n",
1530 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1531 		       memcg_events(memcg, PGSTEAL_DIRECT));
1532 
1533 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++)
1534 		seq_buf_printf(&s, "%s %lu\n",
1535 			       vm_event_name(memcg_vm_event_stat[i]),
1536 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1537 
1538 	/* The above should easily fit into one page */
1539 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1540 }
1541 
1542 #define K(x) ((x) << (PAGE_SHIFT-10))
1543 /**
1544  * mem_cgroup_print_oom_context: Print OOM information relevant to
1545  * memory controller.
1546  * @memcg: The memory cgroup that went over limit
1547  * @p: Task that is going to be killed
1548  *
1549  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1550  * enabled
1551  */
1552 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1553 {
1554 	rcu_read_lock();
1555 
1556 	if (memcg) {
1557 		pr_cont(",oom_memcg=");
1558 		pr_cont_cgroup_path(memcg->css.cgroup);
1559 	} else
1560 		pr_cont(",global_oom");
1561 	if (p) {
1562 		pr_cont(",task_memcg=");
1563 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1564 	}
1565 	rcu_read_unlock();
1566 }
1567 
1568 /**
1569  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1570  * memory controller.
1571  * @memcg: The memory cgroup that went over limit
1572  */
1573 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1574 {
1575 	/* Use static buffer, for the caller is holding oom_lock. */
1576 	static char buf[PAGE_SIZE];
1577 
1578 	lockdep_assert_held(&oom_lock);
1579 
1580 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1581 		K((u64)page_counter_read(&memcg->memory)),
1582 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1583 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1584 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1585 			K((u64)page_counter_read(&memcg->swap)),
1586 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1587 	else {
1588 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1589 			K((u64)page_counter_read(&memcg->memsw)),
1590 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1591 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1592 			K((u64)page_counter_read(&memcg->kmem)),
1593 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1594 	}
1595 
1596 	pr_info("Memory cgroup stats for ");
1597 	pr_cont_cgroup_path(memcg->css.cgroup);
1598 	pr_cont(":");
1599 	memory_stat_format(memcg, buf, sizeof(buf));
1600 	pr_info("%s", buf);
1601 }
1602 
1603 /*
1604  * Return the memory (and swap, if configured) limit for a memcg.
1605  */
1606 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1607 {
1608 	unsigned long max = READ_ONCE(memcg->memory.max);
1609 
1610 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1611 		if (mem_cgroup_swappiness(memcg))
1612 			max += min(READ_ONCE(memcg->swap.max),
1613 				   (unsigned long)total_swap_pages);
1614 	} else { /* v1 */
1615 		if (mem_cgroup_swappiness(memcg)) {
1616 			/* Calculate swap excess capacity from memsw limit */
1617 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1618 
1619 			max += min(swap, (unsigned long)total_swap_pages);
1620 		}
1621 	}
1622 	return max;
1623 }
1624 
1625 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1626 {
1627 	return page_counter_read(&memcg->memory);
1628 }
1629 
1630 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1631 				     int order)
1632 {
1633 	struct oom_control oc = {
1634 		.zonelist = NULL,
1635 		.nodemask = NULL,
1636 		.memcg = memcg,
1637 		.gfp_mask = gfp_mask,
1638 		.order = order,
1639 	};
1640 	bool ret = true;
1641 
1642 	if (mutex_lock_killable(&oom_lock))
1643 		return true;
1644 
1645 	if (mem_cgroup_margin(memcg) >= (1 << order))
1646 		goto unlock;
1647 
1648 	/*
1649 	 * A few threads which were not waiting at mutex_lock_killable() can
1650 	 * fail to bail out. Therefore, check again after holding oom_lock.
1651 	 */
1652 	ret = task_is_dying() || out_of_memory(&oc);
1653 
1654 unlock:
1655 	mutex_unlock(&oom_lock);
1656 	return ret;
1657 }
1658 
1659 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1660 				   pg_data_t *pgdat,
1661 				   gfp_t gfp_mask,
1662 				   unsigned long *total_scanned)
1663 {
1664 	struct mem_cgroup *victim = NULL;
1665 	int total = 0;
1666 	int loop = 0;
1667 	unsigned long excess;
1668 	unsigned long nr_scanned;
1669 	struct mem_cgroup_reclaim_cookie reclaim = {
1670 		.pgdat = pgdat,
1671 	};
1672 
1673 	excess = soft_limit_excess(root_memcg);
1674 
1675 	while (1) {
1676 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1677 		if (!victim) {
1678 			loop++;
1679 			if (loop >= 2) {
1680 				/*
1681 				 * If we have not been able to reclaim
1682 				 * anything, it might because there are
1683 				 * no reclaimable pages under this hierarchy
1684 				 */
1685 				if (!total)
1686 					break;
1687 				/*
1688 				 * We want to do more targeted reclaim.
1689 				 * excess >> 2 is not to excessive so as to
1690 				 * reclaim too much, nor too less that we keep
1691 				 * coming back to reclaim from this cgroup
1692 				 */
1693 				if (total >= (excess >> 2) ||
1694 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1695 					break;
1696 			}
1697 			continue;
1698 		}
1699 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1700 					pgdat, &nr_scanned);
1701 		*total_scanned += nr_scanned;
1702 		if (!soft_limit_excess(root_memcg))
1703 			break;
1704 	}
1705 	mem_cgroup_iter_break(root_memcg, victim);
1706 	return total;
1707 }
1708 
1709 #ifdef CONFIG_LOCKDEP
1710 static struct lockdep_map memcg_oom_lock_dep_map = {
1711 	.name = "memcg_oom_lock",
1712 };
1713 #endif
1714 
1715 static DEFINE_SPINLOCK(memcg_oom_lock);
1716 
1717 /*
1718  * Check OOM-Killer is already running under our hierarchy.
1719  * If someone is running, return false.
1720  */
1721 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1722 {
1723 	struct mem_cgroup *iter, *failed = NULL;
1724 
1725 	spin_lock(&memcg_oom_lock);
1726 
1727 	for_each_mem_cgroup_tree(iter, memcg) {
1728 		if (iter->oom_lock) {
1729 			/*
1730 			 * this subtree of our hierarchy is already locked
1731 			 * so we cannot give a lock.
1732 			 */
1733 			failed = iter;
1734 			mem_cgroup_iter_break(memcg, iter);
1735 			break;
1736 		} else
1737 			iter->oom_lock = true;
1738 	}
1739 
1740 	if (failed) {
1741 		/*
1742 		 * OK, we failed to lock the whole subtree so we have
1743 		 * to clean up what we set up to the failing subtree
1744 		 */
1745 		for_each_mem_cgroup_tree(iter, memcg) {
1746 			if (iter == failed) {
1747 				mem_cgroup_iter_break(memcg, iter);
1748 				break;
1749 			}
1750 			iter->oom_lock = false;
1751 		}
1752 	} else
1753 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1754 
1755 	spin_unlock(&memcg_oom_lock);
1756 
1757 	return !failed;
1758 }
1759 
1760 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1761 {
1762 	struct mem_cgroup *iter;
1763 
1764 	spin_lock(&memcg_oom_lock);
1765 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1766 	for_each_mem_cgroup_tree(iter, memcg)
1767 		iter->oom_lock = false;
1768 	spin_unlock(&memcg_oom_lock);
1769 }
1770 
1771 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1772 {
1773 	struct mem_cgroup *iter;
1774 
1775 	spin_lock(&memcg_oom_lock);
1776 	for_each_mem_cgroup_tree(iter, memcg)
1777 		iter->under_oom++;
1778 	spin_unlock(&memcg_oom_lock);
1779 }
1780 
1781 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1782 {
1783 	struct mem_cgroup *iter;
1784 
1785 	/*
1786 	 * Be careful about under_oom underflows because a child memcg
1787 	 * could have been added after mem_cgroup_mark_under_oom.
1788 	 */
1789 	spin_lock(&memcg_oom_lock);
1790 	for_each_mem_cgroup_tree(iter, memcg)
1791 		if (iter->under_oom > 0)
1792 			iter->under_oom--;
1793 	spin_unlock(&memcg_oom_lock);
1794 }
1795 
1796 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1797 
1798 struct oom_wait_info {
1799 	struct mem_cgroup *memcg;
1800 	wait_queue_entry_t	wait;
1801 };
1802 
1803 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1804 	unsigned mode, int sync, void *arg)
1805 {
1806 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1807 	struct mem_cgroup *oom_wait_memcg;
1808 	struct oom_wait_info *oom_wait_info;
1809 
1810 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1811 	oom_wait_memcg = oom_wait_info->memcg;
1812 
1813 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1814 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1815 		return 0;
1816 	return autoremove_wake_function(wait, mode, sync, arg);
1817 }
1818 
1819 static void memcg_oom_recover(struct mem_cgroup *memcg)
1820 {
1821 	/*
1822 	 * For the following lockless ->under_oom test, the only required
1823 	 * guarantee is that it must see the state asserted by an OOM when
1824 	 * this function is called as a result of userland actions
1825 	 * triggered by the notification of the OOM.  This is trivially
1826 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1827 	 * triggering notification.
1828 	 */
1829 	if (memcg && memcg->under_oom)
1830 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1831 }
1832 
1833 /*
1834  * Returns true if successfully killed one or more processes. Though in some
1835  * corner cases it can return true even without killing any process.
1836  */
1837 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1838 {
1839 	bool locked, ret;
1840 
1841 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1842 		return false;
1843 
1844 	memcg_memory_event(memcg, MEMCG_OOM);
1845 
1846 	/*
1847 	 * We are in the middle of the charge context here, so we
1848 	 * don't want to block when potentially sitting on a callstack
1849 	 * that holds all kinds of filesystem and mm locks.
1850 	 *
1851 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1852 	 * handling until the charge can succeed; remember the context and put
1853 	 * the task to sleep at the end of the page fault when all locks are
1854 	 * released.
1855 	 *
1856 	 * On the other hand, in-kernel OOM killer allows for an async victim
1857 	 * memory reclaim (oom_reaper) and that means that we are not solely
1858 	 * relying on the oom victim to make a forward progress and we can
1859 	 * invoke the oom killer here.
1860 	 *
1861 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1862 	 * victim and then we have to bail out from the charge path.
1863 	 */
1864 	if (memcg->oom_kill_disable) {
1865 		if (current->in_user_fault) {
1866 			css_get(&memcg->css);
1867 			current->memcg_in_oom = memcg;
1868 			current->memcg_oom_gfp_mask = mask;
1869 			current->memcg_oom_order = order;
1870 		}
1871 		return false;
1872 	}
1873 
1874 	mem_cgroup_mark_under_oom(memcg);
1875 
1876 	locked = mem_cgroup_oom_trylock(memcg);
1877 
1878 	if (locked)
1879 		mem_cgroup_oom_notify(memcg);
1880 
1881 	mem_cgroup_unmark_under_oom(memcg);
1882 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1883 
1884 	if (locked)
1885 		mem_cgroup_oom_unlock(memcg);
1886 
1887 	return ret;
1888 }
1889 
1890 /**
1891  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1892  * @handle: actually kill/wait or just clean up the OOM state
1893  *
1894  * This has to be called at the end of a page fault if the memcg OOM
1895  * handler was enabled.
1896  *
1897  * Memcg supports userspace OOM handling where failed allocations must
1898  * sleep on a waitqueue until the userspace task resolves the
1899  * situation.  Sleeping directly in the charge context with all kinds
1900  * of locks held is not a good idea, instead we remember an OOM state
1901  * in the task and mem_cgroup_oom_synchronize() has to be called at
1902  * the end of the page fault to complete the OOM handling.
1903  *
1904  * Returns %true if an ongoing memcg OOM situation was detected and
1905  * completed, %false otherwise.
1906  */
1907 bool mem_cgroup_oom_synchronize(bool handle)
1908 {
1909 	struct mem_cgroup *memcg = current->memcg_in_oom;
1910 	struct oom_wait_info owait;
1911 	bool locked;
1912 
1913 	/* OOM is global, do not handle */
1914 	if (!memcg)
1915 		return false;
1916 
1917 	if (!handle)
1918 		goto cleanup;
1919 
1920 	owait.memcg = memcg;
1921 	owait.wait.flags = 0;
1922 	owait.wait.func = memcg_oom_wake_function;
1923 	owait.wait.private = current;
1924 	INIT_LIST_HEAD(&owait.wait.entry);
1925 
1926 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1927 	mem_cgroup_mark_under_oom(memcg);
1928 
1929 	locked = mem_cgroup_oom_trylock(memcg);
1930 
1931 	if (locked)
1932 		mem_cgroup_oom_notify(memcg);
1933 
1934 	if (locked && !memcg->oom_kill_disable) {
1935 		mem_cgroup_unmark_under_oom(memcg);
1936 		finish_wait(&memcg_oom_waitq, &owait.wait);
1937 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1938 					 current->memcg_oom_order);
1939 	} else {
1940 		schedule();
1941 		mem_cgroup_unmark_under_oom(memcg);
1942 		finish_wait(&memcg_oom_waitq, &owait.wait);
1943 	}
1944 
1945 	if (locked) {
1946 		mem_cgroup_oom_unlock(memcg);
1947 		/*
1948 		 * There is no guarantee that an OOM-lock contender
1949 		 * sees the wakeups triggered by the OOM kill
1950 		 * uncharges.  Wake any sleepers explicitly.
1951 		 */
1952 		memcg_oom_recover(memcg);
1953 	}
1954 cleanup:
1955 	current->memcg_in_oom = NULL;
1956 	css_put(&memcg->css);
1957 	return true;
1958 }
1959 
1960 /**
1961  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1962  * @victim: task to be killed by the OOM killer
1963  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1964  *
1965  * Returns a pointer to a memory cgroup, which has to be cleaned up
1966  * by killing all belonging OOM-killable tasks.
1967  *
1968  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1969  */
1970 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1971 					    struct mem_cgroup *oom_domain)
1972 {
1973 	struct mem_cgroup *oom_group = NULL;
1974 	struct mem_cgroup *memcg;
1975 
1976 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1977 		return NULL;
1978 
1979 	if (!oom_domain)
1980 		oom_domain = root_mem_cgroup;
1981 
1982 	rcu_read_lock();
1983 
1984 	memcg = mem_cgroup_from_task(victim);
1985 	if (memcg == root_mem_cgroup)
1986 		goto out;
1987 
1988 	/*
1989 	 * If the victim task has been asynchronously moved to a different
1990 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1991 	 * In this case it's better to ignore memory.group.oom.
1992 	 */
1993 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1994 		goto out;
1995 
1996 	/*
1997 	 * Traverse the memory cgroup hierarchy from the victim task's
1998 	 * cgroup up to the OOMing cgroup (or root) to find the
1999 	 * highest-level memory cgroup with oom.group set.
2000 	 */
2001 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2002 		if (memcg->oom_group)
2003 			oom_group = memcg;
2004 
2005 		if (memcg == oom_domain)
2006 			break;
2007 	}
2008 
2009 	if (oom_group)
2010 		css_get(&oom_group->css);
2011 out:
2012 	rcu_read_unlock();
2013 
2014 	return oom_group;
2015 }
2016 
2017 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2018 {
2019 	pr_info("Tasks in ");
2020 	pr_cont_cgroup_path(memcg->css.cgroup);
2021 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2022 }
2023 
2024 /**
2025  * folio_memcg_lock - Bind a folio to its memcg.
2026  * @folio: The folio.
2027  *
2028  * This function prevents unlocked LRU folios from being moved to
2029  * another cgroup.
2030  *
2031  * It ensures lifetime of the bound memcg.  The caller is responsible
2032  * for the lifetime of the folio.
2033  */
2034 void folio_memcg_lock(struct folio *folio)
2035 {
2036 	struct mem_cgroup *memcg;
2037 	unsigned long flags;
2038 
2039 	/*
2040 	 * The RCU lock is held throughout the transaction.  The fast
2041 	 * path can get away without acquiring the memcg->move_lock
2042 	 * because page moving starts with an RCU grace period.
2043          */
2044 	rcu_read_lock();
2045 
2046 	if (mem_cgroup_disabled())
2047 		return;
2048 again:
2049 	memcg = folio_memcg(folio);
2050 	if (unlikely(!memcg))
2051 		return;
2052 
2053 #ifdef CONFIG_PROVE_LOCKING
2054 	local_irq_save(flags);
2055 	might_lock(&memcg->move_lock);
2056 	local_irq_restore(flags);
2057 #endif
2058 
2059 	if (atomic_read(&memcg->moving_account) <= 0)
2060 		return;
2061 
2062 	spin_lock_irqsave(&memcg->move_lock, flags);
2063 	if (memcg != folio_memcg(folio)) {
2064 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2065 		goto again;
2066 	}
2067 
2068 	/*
2069 	 * When charge migration first begins, we can have multiple
2070 	 * critical sections holding the fast-path RCU lock and one
2071 	 * holding the slowpath move_lock. Track the task who has the
2072 	 * move_lock for unlock_page_memcg().
2073 	 */
2074 	memcg->move_lock_task = current;
2075 	memcg->move_lock_flags = flags;
2076 }
2077 
2078 void lock_page_memcg(struct page *page)
2079 {
2080 	folio_memcg_lock(page_folio(page));
2081 }
2082 
2083 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2084 {
2085 	if (memcg && memcg->move_lock_task == current) {
2086 		unsigned long flags = memcg->move_lock_flags;
2087 
2088 		memcg->move_lock_task = NULL;
2089 		memcg->move_lock_flags = 0;
2090 
2091 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2092 	}
2093 
2094 	rcu_read_unlock();
2095 }
2096 
2097 /**
2098  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2099  * @folio: The folio.
2100  *
2101  * This releases the binding created by folio_memcg_lock().  This does
2102  * not change the accounting of this folio to its memcg, but it does
2103  * permit others to change it.
2104  */
2105 void folio_memcg_unlock(struct folio *folio)
2106 {
2107 	__folio_memcg_unlock(folio_memcg(folio));
2108 }
2109 
2110 void unlock_page_memcg(struct page *page)
2111 {
2112 	folio_memcg_unlock(page_folio(page));
2113 }
2114 
2115 struct memcg_stock_pcp {
2116 	local_lock_t stock_lock;
2117 	struct mem_cgroup *cached; /* this never be root cgroup */
2118 	unsigned int nr_pages;
2119 
2120 #ifdef CONFIG_MEMCG_KMEM
2121 	struct obj_cgroup *cached_objcg;
2122 	struct pglist_data *cached_pgdat;
2123 	unsigned int nr_bytes;
2124 	int nr_slab_reclaimable_b;
2125 	int nr_slab_unreclaimable_b;
2126 #endif
2127 
2128 	struct work_struct work;
2129 	unsigned long flags;
2130 #define FLUSHING_CACHED_CHARGE	0
2131 };
2132 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2133 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2134 };
2135 static DEFINE_MUTEX(percpu_charge_mutex);
2136 
2137 #ifdef CONFIG_MEMCG_KMEM
2138 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2139 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2140 				     struct mem_cgroup *root_memcg);
2141 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2142 
2143 #else
2144 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2145 {
2146 	return NULL;
2147 }
2148 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2149 				     struct mem_cgroup *root_memcg)
2150 {
2151 	return false;
2152 }
2153 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2154 {
2155 }
2156 #endif
2157 
2158 /**
2159  * consume_stock: Try to consume stocked charge on this cpu.
2160  * @memcg: memcg to consume from.
2161  * @nr_pages: how many pages to charge.
2162  *
2163  * The charges will only happen if @memcg matches the current cpu's memcg
2164  * stock, and at least @nr_pages are available in that stock.  Failure to
2165  * service an allocation will refill the stock.
2166  *
2167  * returns true if successful, false otherwise.
2168  */
2169 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2170 {
2171 	struct memcg_stock_pcp *stock;
2172 	unsigned long flags;
2173 	bool ret = false;
2174 
2175 	if (nr_pages > MEMCG_CHARGE_BATCH)
2176 		return ret;
2177 
2178 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2179 
2180 	stock = this_cpu_ptr(&memcg_stock);
2181 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2182 		stock->nr_pages -= nr_pages;
2183 		ret = true;
2184 	}
2185 
2186 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2187 
2188 	return ret;
2189 }
2190 
2191 /*
2192  * Returns stocks cached in percpu and reset cached information.
2193  */
2194 static void drain_stock(struct memcg_stock_pcp *stock)
2195 {
2196 	struct mem_cgroup *old = stock->cached;
2197 
2198 	if (!old)
2199 		return;
2200 
2201 	if (stock->nr_pages) {
2202 		page_counter_uncharge(&old->memory, stock->nr_pages);
2203 		if (do_memsw_account())
2204 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2205 		stock->nr_pages = 0;
2206 	}
2207 
2208 	css_put(&old->css);
2209 	stock->cached = NULL;
2210 }
2211 
2212 static void drain_local_stock(struct work_struct *dummy)
2213 {
2214 	struct memcg_stock_pcp *stock;
2215 	struct obj_cgroup *old = NULL;
2216 	unsigned long flags;
2217 
2218 	/*
2219 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2220 	 * drain_stock races is that we always operate on local CPU stock
2221 	 * here with IRQ disabled
2222 	 */
2223 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2224 
2225 	stock = this_cpu_ptr(&memcg_stock);
2226 	old = drain_obj_stock(stock);
2227 	drain_stock(stock);
2228 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2229 
2230 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2231 	if (old)
2232 		obj_cgroup_put(old);
2233 }
2234 
2235 /*
2236  * Cache charges(val) to local per_cpu area.
2237  * This will be consumed by consume_stock() function, later.
2238  */
2239 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2240 {
2241 	struct memcg_stock_pcp *stock;
2242 
2243 	stock = this_cpu_ptr(&memcg_stock);
2244 	if (stock->cached != memcg) { /* reset if necessary */
2245 		drain_stock(stock);
2246 		css_get(&memcg->css);
2247 		stock->cached = memcg;
2248 	}
2249 	stock->nr_pages += nr_pages;
2250 
2251 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2252 		drain_stock(stock);
2253 }
2254 
2255 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2256 {
2257 	unsigned long flags;
2258 
2259 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2260 	__refill_stock(memcg, nr_pages);
2261 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2262 }
2263 
2264 /*
2265  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2266  * of the hierarchy under it.
2267  */
2268 static void drain_all_stock(struct mem_cgroup *root_memcg)
2269 {
2270 	int cpu, curcpu;
2271 
2272 	/* If someone's already draining, avoid adding running more workers. */
2273 	if (!mutex_trylock(&percpu_charge_mutex))
2274 		return;
2275 	/*
2276 	 * Notify other cpus that system-wide "drain" is running
2277 	 * We do not care about races with the cpu hotplug because cpu down
2278 	 * as well as workers from this path always operate on the local
2279 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2280 	 */
2281 	migrate_disable();
2282 	curcpu = smp_processor_id();
2283 	for_each_online_cpu(cpu) {
2284 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2285 		struct mem_cgroup *memcg;
2286 		bool flush = false;
2287 
2288 		rcu_read_lock();
2289 		memcg = stock->cached;
2290 		if (memcg && stock->nr_pages &&
2291 		    mem_cgroup_is_descendant(memcg, root_memcg))
2292 			flush = true;
2293 		else if (obj_stock_flush_required(stock, root_memcg))
2294 			flush = true;
2295 		rcu_read_unlock();
2296 
2297 		if (flush &&
2298 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2299 			if (cpu == curcpu)
2300 				drain_local_stock(&stock->work);
2301 			else
2302 				schedule_work_on(cpu, &stock->work);
2303 		}
2304 	}
2305 	migrate_enable();
2306 	mutex_unlock(&percpu_charge_mutex);
2307 }
2308 
2309 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2310 {
2311 	struct memcg_stock_pcp *stock;
2312 
2313 	stock = &per_cpu(memcg_stock, cpu);
2314 	drain_stock(stock);
2315 
2316 	return 0;
2317 }
2318 
2319 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2320 				  unsigned int nr_pages,
2321 				  gfp_t gfp_mask)
2322 {
2323 	unsigned long nr_reclaimed = 0;
2324 
2325 	do {
2326 		unsigned long pflags;
2327 
2328 		if (page_counter_read(&memcg->memory) <=
2329 		    READ_ONCE(memcg->memory.high))
2330 			continue;
2331 
2332 		memcg_memory_event(memcg, MEMCG_HIGH);
2333 
2334 		psi_memstall_enter(&pflags);
2335 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2336 							gfp_mask,
2337 							MEMCG_RECLAIM_MAY_SWAP);
2338 		psi_memstall_leave(&pflags);
2339 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2340 		 !mem_cgroup_is_root(memcg));
2341 
2342 	return nr_reclaimed;
2343 }
2344 
2345 static void high_work_func(struct work_struct *work)
2346 {
2347 	struct mem_cgroup *memcg;
2348 
2349 	memcg = container_of(work, struct mem_cgroup, high_work);
2350 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2351 }
2352 
2353 /*
2354  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2355  * enough to still cause a significant slowdown in most cases, while still
2356  * allowing diagnostics and tracing to proceed without becoming stuck.
2357  */
2358 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2359 
2360 /*
2361  * When calculating the delay, we use these either side of the exponentiation to
2362  * maintain precision and scale to a reasonable number of jiffies (see the table
2363  * below.
2364  *
2365  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2366  *   overage ratio to a delay.
2367  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2368  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2369  *   to produce a reasonable delay curve.
2370  *
2371  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2372  * reasonable delay curve compared to precision-adjusted overage, not
2373  * penalising heavily at first, but still making sure that growth beyond the
2374  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2375  * example, with a high of 100 megabytes:
2376  *
2377  *  +-------+------------------------+
2378  *  | usage | time to allocate in ms |
2379  *  +-------+------------------------+
2380  *  | 100M  |                      0 |
2381  *  | 101M  |                      6 |
2382  *  | 102M  |                     25 |
2383  *  | 103M  |                     57 |
2384  *  | 104M  |                    102 |
2385  *  | 105M  |                    159 |
2386  *  | 106M  |                    230 |
2387  *  | 107M  |                    313 |
2388  *  | 108M  |                    409 |
2389  *  | 109M  |                    518 |
2390  *  | 110M  |                    639 |
2391  *  | 111M  |                    774 |
2392  *  | 112M  |                    921 |
2393  *  | 113M  |                   1081 |
2394  *  | 114M  |                   1254 |
2395  *  | 115M  |                   1439 |
2396  *  | 116M  |                   1638 |
2397  *  | 117M  |                   1849 |
2398  *  | 118M  |                   2000 |
2399  *  | 119M  |                   2000 |
2400  *  | 120M  |                   2000 |
2401  *  +-------+------------------------+
2402  */
2403  #define MEMCG_DELAY_PRECISION_SHIFT 20
2404  #define MEMCG_DELAY_SCALING_SHIFT 14
2405 
2406 static u64 calculate_overage(unsigned long usage, unsigned long high)
2407 {
2408 	u64 overage;
2409 
2410 	if (usage <= high)
2411 		return 0;
2412 
2413 	/*
2414 	 * Prevent division by 0 in overage calculation by acting as if
2415 	 * it was a threshold of 1 page
2416 	 */
2417 	high = max(high, 1UL);
2418 
2419 	overage = usage - high;
2420 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2421 	return div64_u64(overage, high);
2422 }
2423 
2424 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2425 {
2426 	u64 overage, max_overage = 0;
2427 
2428 	do {
2429 		overage = calculate_overage(page_counter_read(&memcg->memory),
2430 					    READ_ONCE(memcg->memory.high));
2431 		max_overage = max(overage, max_overage);
2432 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2433 		 !mem_cgroup_is_root(memcg));
2434 
2435 	return max_overage;
2436 }
2437 
2438 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2439 {
2440 	u64 overage, max_overage = 0;
2441 
2442 	do {
2443 		overage = calculate_overage(page_counter_read(&memcg->swap),
2444 					    READ_ONCE(memcg->swap.high));
2445 		if (overage)
2446 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2447 		max_overage = max(overage, max_overage);
2448 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2449 		 !mem_cgroup_is_root(memcg));
2450 
2451 	return max_overage;
2452 }
2453 
2454 /*
2455  * Get the number of jiffies that we should penalise a mischievous cgroup which
2456  * is exceeding its memory.high by checking both it and its ancestors.
2457  */
2458 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2459 					  unsigned int nr_pages,
2460 					  u64 max_overage)
2461 {
2462 	unsigned long penalty_jiffies;
2463 
2464 	if (!max_overage)
2465 		return 0;
2466 
2467 	/*
2468 	 * We use overage compared to memory.high to calculate the number of
2469 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2470 	 * fairly lenient on small overages, and increasingly harsh when the
2471 	 * memcg in question makes it clear that it has no intention of stopping
2472 	 * its crazy behaviour, so we exponentially increase the delay based on
2473 	 * overage amount.
2474 	 */
2475 	penalty_jiffies = max_overage * max_overage * HZ;
2476 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2477 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2478 
2479 	/*
2480 	 * Factor in the task's own contribution to the overage, such that four
2481 	 * N-sized allocations are throttled approximately the same as one
2482 	 * 4N-sized allocation.
2483 	 *
2484 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2485 	 * larger the current charge patch is than that.
2486 	 */
2487 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2488 }
2489 
2490 /*
2491  * Scheduled by try_charge() to be executed from the userland return path
2492  * and reclaims memory over the high limit.
2493  */
2494 void mem_cgroup_handle_over_high(void)
2495 {
2496 	unsigned long penalty_jiffies;
2497 	unsigned long pflags;
2498 	unsigned long nr_reclaimed;
2499 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2500 	int nr_retries = MAX_RECLAIM_RETRIES;
2501 	struct mem_cgroup *memcg;
2502 	bool in_retry = false;
2503 
2504 	if (likely(!nr_pages))
2505 		return;
2506 
2507 	memcg = get_mem_cgroup_from_mm(current->mm);
2508 	current->memcg_nr_pages_over_high = 0;
2509 
2510 retry_reclaim:
2511 	/*
2512 	 * The allocating task should reclaim at least the batch size, but for
2513 	 * subsequent retries we only want to do what's necessary to prevent oom
2514 	 * or breaching resource isolation.
2515 	 *
2516 	 * This is distinct from memory.max or page allocator behaviour because
2517 	 * memory.high is currently batched, whereas memory.max and the page
2518 	 * allocator run every time an allocation is made.
2519 	 */
2520 	nr_reclaimed = reclaim_high(memcg,
2521 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2522 				    GFP_KERNEL);
2523 
2524 	/*
2525 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2526 	 * allocators proactively to slow down excessive growth.
2527 	 */
2528 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2529 					       mem_find_max_overage(memcg));
2530 
2531 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2532 						swap_find_max_overage(memcg));
2533 
2534 	/*
2535 	 * Clamp the max delay per usermode return so as to still keep the
2536 	 * application moving forwards and also permit diagnostics, albeit
2537 	 * extremely slowly.
2538 	 */
2539 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2540 
2541 	/*
2542 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2543 	 * that it's not even worth doing, in an attempt to be nice to those who
2544 	 * go only a small amount over their memory.high value and maybe haven't
2545 	 * been aggressively reclaimed enough yet.
2546 	 */
2547 	if (penalty_jiffies <= HZ / 100)
2548 		goto out;
2549 
2550 	/*
2551 	 * If reclaim is making forward progress but we're still over
2552 	 * memory.high, we want to encourage that rather than doing allocator
2553 	 * throttling.
2554 	 */
2555 	if (nr_reclaimed || nr_retries--) {
2556 		in_retry = true;
2557 		goto retry_reclaim;
2558 	}
2559 
2560 	/*
2561 	 * If we exit early, we're guaranteed to die (since
2562 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2563 	 * need to account for any ill-begotten jiffies to pay them off later.
2564 	 */
2565 	psi_memstall_enter(&pflags);
2566 	schedule_timeout_killable(penalty_jiffies);
2567 	psi_memstall_leave(&pflags);
2568 
2569 out:
2570 	css_put(&memcg->css);
2571 }
2572 
2573 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2574 			unsigned int nr_pages)
2575 {
2576 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2577 	int nr_retries = MAX_RECLAIM_RETRIES;
2578 	struct mem_cgroup *mem_over_limit;
2579 	struct page_counter *counter;
2580 	unsigned long nr_reclaimed;
2581 	bool passed_oom = false;
2582 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2583 	bool drained = false;
2584 	bool raised_max_event = false;
2585 	unsigned long pflags;
2586 
2587 retry:
2588 	if (consume_stock(memcg, nr_pages))
2589 		return 0;
2590 
2591 	if (!do_memsw_account() ||
2592 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2593 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2594 			goto done_restock;
2595 		if (do_memsw_account())
2596 			page_counter_uncharge(&memcg->memsw, batch);
2597 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2598 	} else {
2599 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2600 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2601 	}
2602 
2603 	if (batch > nr_pages) {
2604 		batch = nr_pages;
2605 		goto retry;
2606 	}
2607 
2608 	/*
2609 	 * Prevent unbounded recursion when reclaim operations need to
2610 	 * allocate memory. This might exceed the limits temporarily,
2611 	 * but we prefer facilitating memory reclaim and getting back
2612 	 * under the limit over triggering OOM kills in these cases.
2613 	 */
2614 	if (unlikely(current->flags & PF_MEMALLOC))
2615 		goto force;
2616 
2617 	if (unlikely(task_in_memcg_oom(current)))
2618 		goto nomem;
2619 
2620 	if (!gfpflags_allow_blocking(gfp_mask))
2621 		goto nomem;
2622 
2623 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2624 	raised_max_event = true;
2625 
2626 	psi_memstall_enter(&pflags);
2627 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2628 						    gfp_mask, reclaim_options);
2629 	psi_memstall_leave(&pflags);
2630 
2631 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2632 		goto retry;
2633 
2634 	if (!drained) {
2635 		drain_all_stock(mem_over_limit);
2636 		drained = true;
2637 		goto retry;
2638 	}
2639 
2640 	if (gfp_mask & __GFP_NORETRY)
2641 		goto nomem;
2642 	/*
2643 	 * Even though the limit is exceeded at this point, reclaim
2644 	 * may have been able to free some pages.  Retry the charge
2645 	 * before killing the task.
2646 	 *
2647 	 * Only for regular pages, though: huge pages are rather
2648 	 * unlikely to succeed so close to the limit, and we fall back
2649 	 * to regular pages anyway in case of failure.
2650 	 */
2651 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2652 		goto retry;
2653 	/*
2654 	 * At task move, charge accounts can be doubly counted. So, it's
2655 	 * better to wait until the end of task_move if something is going on.
2656 	 */
2657 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2658 		goto retry;
2659 
2660 	if (nr_retries--)
2661 		goto retry;
2662 
2663 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2664 		goto nomem;
2665 
2666 	/* Avoid endless loop for tasks bypassed by the oom killer */
2667 	if (passed_oom && task_is_dying())
2668 		goto nomem;
2669 
2670 	/*
2671 	 * keep retrying as long as the memcg oom killer is able to make
2672 	 * a forward progress or bypass the charge if the oom killer
2673 	 * couldn't make any progress.
2674 	 */
2675 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2676 			   get_order(nr_pages * PAGE_SIZE))) {
2677 		passed_oom = true;
2678 		nr_retries = MAX_RECLAIM_RETRIES;
2679 		goto retry;
2680 	}
2681 nomem:
2682 	/*
2683 	 * Memcg doesn't have a dedicated reserve for atomic
2684 	 * allocations. But like the global atomic pool, we need to
2685 	 * put the burden of reclaim on regular allocation requests
2686 	 * and let these go through as privileged allocations.
2687 	 */
2688 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2689 		return -ENOMEM;
2690 force:
2691 	/*
2692 	 * If the allocation has to be enforced, don't forget to raise
2693 	 * a MEMCG_MAX event.
2694 	 */
2695 	if (!raised_max_event)
2696 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2697 
2698 	/*
2699 	 * The allocation either can't fail or will lead to more memory
2700 	 * being freed very soon.  Allow memory usage go over the limit
2701 	 * temporarily by force charging it.
2702 	 */
2703 	page_counter_charge(&memcg->memory, nr_pages);
2704 	if (do_memsw_account())
2705 		page_counter_charge(&memcg->memsw, nr_pages);
2706 
2707 	return 0;
2708 
2709 done_restock:
2710 	if (batch > nr_pages)
2711 		refill_stock(memcg, batch - nr_pages);
2712 
2713 	/*
2714 	 * If the hierarchy is above the normal consumption range, schedule
2715 	 * reclaim on returning to userland.  We can perform reclaim here
2716 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2717 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2718 	 * not recorded as it most likely matches current's and won't
2719 	 * change in the meantime.  As high limit is checked again before
2720 	 * reclaim, the cost of mismatch is negligible.
2721 	 */
2722 	do {
2723 		bool mem_high, swap_high;
2724 
2725 		mem_high = page_counter_read(&memcg->memory) >
2726 			READ_ONCE(memcg->memory.high);
2727 		swap_high = page_counter_read(&memcg->swap) >
2728 			READ_ONCE(memcg->swap.high);
2729 
2730 		/* Don't bother a random interrupted task */
2731 		if (!in_task()) {
2732 			if (mem_high) {
2733 				schedule_work(&memcg->high_work);
2734 				break;
2735 			}
2736 			continue;
2737 		}
2738 
2739 		if (mem_high || swap_high) {
2740 			/*
2741 			 * The allocating tasks in this cgroup will need to do
2742 			 * reclaim or be throttled to prevent further growth
2743 			 * of the memory or swap footprints.
2744 			 *
2745 			 * Target some best-effort fairness between the tasks,
2746 			 * and distribute reclaim work and delay penalties
2747 			 * based on how much each task is actually allocating.
2748 			 */
2749 			current->memcg_nr_pages_over_high += batch;
2750 			set_notify_resume(current);
2751 			break;
2752 		}
2753 	} while ((memcg = parent_mem_cgroup(memcg)));
2754 
2755 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2756 	    !(current->flags & PF_MEMALLOC) &&
2757 	    gfpflags_allow_blocking(gfp_mask)) {
2758 		mem_cgroup_handle_over_high();
2759 	}
2760 	return 0;
2761 }
2762 
2763 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2764 			     unsigned int nr_pages)
2765 {
2766 	if (mem_cgroup_is_root(memcg))
2767 		return 0;
2768 
2769 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2770 }
2771 
2772 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2773 {
2774 	if (mem_cgroup_is_root(memcg))
2775 		return;
2776 
2777 	page_counter_uncharge(&memcg->memory, nr_pages);
2778 	if (do_memsw_account())
2779 		page_counter_uncharge(&memcg->memsw, nr_pages);
2780 }
2781 
2782 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2783 {
2784 	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2785 	/*
2786 	 * Any of the following ensures page's memcg stability:
2787 	 *
2788 	 * - the page lock
2789 	 * - LRU isolation
2790 	 * - lock_page_memcg()
2791 	 * - exclusive reference
2792 	 * - mem_cgroup_trylock_pages()
2793 	 */
2794 	folio->memcg_data = (unsigned long)memcg;
2795 }
2796 
2797 #ifdef CONFIG_MEMCG_KMEM
2798 /*
2799  * The allocated objcg pointers array is not accounted directly.
2800  * Moreover, it should not come from DMA buffer and is not readily
2801  * reclaimable. So those GFP bits should be masked off.
2802  */
2803 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2804 
2805 /*
2806  * mod_objcg_mlstate() may be called with irq enabled, so
2807  * mod_memcg_lruvec_state() should be used.
2808  */
2809 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2810 				     struct pglist_data *pgdat,
2811 				     enum node_stat_item idx, int nr)
2812 {
2813 	struct mem_cgroup *memcg;
2814 	struct lruvec *lruvec;
2815 
2816 	rcu_read_lock();
2817 	memcg = obj_cgroup_memcg(objcg);
2818 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2819 	mod_memcg_lruvec_state(lruvec, idx, nr);
2820 	rcu_read_unlock();
2821 }
2822 
2823 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2824 				 gfp_t gfp, bool new_slab)
2825 {
2826 	unsigned int objects = objs_per_slab(s, slab);
2827 	unsigned long memcg_data;
2828 	void *vec;
2829 
2830 	gfp &= ~OBJCGS_CLEAR_MASK;
2831 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2832 			   slab_nid(slab));
2833 	if (!vec)
2834 		return -ENOMEM;
2835 
2836 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2837 	if (new_slab) {
2838 		/*
2839 		 * If the slab is brand new and nobody can yet access its
2840 		 * memcg_data, no synchronization is required and memcg_data can
2841 		 * be simply assigned.
2842 		 */
2843 		slab->memcg_data = memcg_data;
2844 	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2845 		/*
2846 		 * If the slab is already in use, somebody can allocate and
2847 		 * assign obj_cgroups in parallel. In this case the existing
2848 		 * objcg vector should be reused.
2849 		 */
2850 		kfree(vec);
2851 		return 0;
2852 	}
2853 
2854 	kmemleak_not_leak(vec);
2855 	return 0;
2856 }
2857 
2858 static __always_inline
2859 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2860 {
2861 	/*
2862 	 * Slab objects are accounted individually, not per-page.
2863 	 * Memcg membership data for each individual object is saved in
2864 	 * slab->memcg_data.
2865 	 */
2866 	if (folio_test_slab(folio)) {
2867 		struct obj_cgroup **objcgs;
2868 		struct slab *slab;
2869 		unsigned int off;
2870 
2871 		slab = folio_slab(folio);
2872 		objcgs = slab_objcgs(slab);
2873 		if (!objcgs)
2874 			return NULL;
2875 
2876 		off = obj_to_index(slab->slab_cache, slab, p);
2877 		if (objcgs[off])
2878 			return obj_cgroup_memcg(objcgs[off]);
2879 
2880 		return NULL;
2881 	}
2882 
2883 	/*
2884 	 * page_memcg_check() is used here, because in theory we can encounter
2885 	 * a folio where the slab flag has been cleared already, but
2886 	 * slab->memcg_data has not been freed yet
2887 	 * page_memcg_check(page) will guarantee that a proper memory
2888 	 * cgroup pointer or NULL will be returned.
2889 	 */
2890 	return page_memcg_check(folio_page(folio, 0));
2891 }
2892 
2893 /*
2894  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2895  *
2896  * A passed kernel object can be a slab object, vmalloc object or a generic
2897  * kernel page, so different mechanisms for getting the memory cgroup pointer
2898  * should be used.
2899  *
2900  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2901  * can not know for sure how the kernel object is implemented.
2902  * mem_cgroup_from_obj() can be safely used in such cases.
2903  *
2904  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2905  * cgroup_mutex, etc.
2906  */
2907 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2908 {
2909 	struct folio *folio;
2910 
2911 	if (mem_cgroup_disabled())
2912 		return NULL;
2913 
2914 	if (unlikely(is_vmalloc_addr(p)))
2915 		folio = page_folio(vmalloc_to_page(p));
2916 	else
2917 		folio = virt_to_folio(p);
2918 
2919 	return mem_cgroup_from_obj_folio(folio, p);
2920 }
2921 
2922 /*
2923  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2924  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2925  * allocated using vmalloc().
2926  *
2927  * A passed kernel object must be a slab object or a generic kernel page.
2928  *
2929  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2930  * cgroup_mutex, etc.
2931  */
2932 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2933 {
2934 	if (mem_cgroup_disabled())
2935 		return NULL;
2936 
2937 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2938 }
2939 
2940 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2941 {
2942 	struct obj_cgroup *objcg = NULL;
2943 
2944 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2945 		objcg = rcu_dereference(memcg->objcg);
2946 		if (objcg && obj_cgroup_tryget(objcg))
2947 			break;
2948 		objcg = NULL;
2949 	}
2950 	return objcg;
2951 }
2952 
2953 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2954 {
2955 	struct obj_cgroup *objcg = NULL;
2956 	struct mem_cgroup *memcg;
2957 
2958 	if (memcg_kmem_bypass())
2959 		return NULL;
2960 
2961 	rcu_read_lock();
2962 	if (unlikely(active_memcg()))
2963 		memcg = active_memcg();
2964 	else
2965 		memcg = mem_cgroup_from_task(current);
2966 	objcg = __get_obj_cgroup_from_memcg(memcg);
2967 	rcu_read_unlock();
2968 	return objcg;
2969 }
2970 
2971 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
2972 {
2973 	struct obj_cgroup *objcg;
2974 
2975 	if (!memcg_kmem_enabled() || memcg_kmem_bypass())
2976 		return NULL;
2977 
2978 	if (PageMemcgKmem(page)) {
2979 		objcg = __folio_objcg(page_folio(page));
2980 		obj_cgroup_get(objcg);
2981 	} else {
2982 		struct mem_cgroup *memcg;
2983 
2984 		rcu_read_lock();
2985 		memcg = __folio_memcg(page_folio(page));
2986 		if (memcg)
2987 			objcg = __get_obj_cgroup_from_memcg(memcg);
2988 		else
2989 			objcg = NULL;
2990 		rcu_read_unlock();
2991 	}
2992 	return objcg;
2993 }
2994 
2995 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2996 {
2997 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2998 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
2999 		if (nr_pages > 0)
3000 			page_counter_charge(&memcg->kmem, nr_pages);
3001 		else
3002 			page_counter_uncharge(&memcg->kmem, -nr_pages);
3003 	}
3004 }
3005 
3006 
3007 /*
3008  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3009  * @objcg: object cgroup to uncharge
3010  * @nr_pages: number of pages to uncharge
3011  */
3012 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3013 				      unsigned int nr_pages)
3014 {
3015 	struct mem_cgroup *memcg;
3016 
3017 	memcg = get_mem_cgroup_from_objcg(objcg);
3018 
3019 	memcg_account_kmem(memcg, -nr_pages);
3020 	refill_stock(memcg, nr_pages);
3021 
3022 	css_put(&memcg->css);
3023 }
3024 
3025 /*
3026  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3027  * @objcg: object cgroup to charge
3028  * @gfp: reclaim mode
3029  * @nr_pages: number of pages to charge
3030  *
3031  * Returns 0 on success, an error code on failure.
3032  */
3033 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3034 				   unsigned int nr_pages)
3035 {
3036 	struct mem_cgroup *memcg;
3037 	int ret;
3038 
3039 	memcg = get_mem_cgroup_from_objcg(objcg);
3040 
3041 	ret = try_charge_memcg(memcg, gfp, nr_pages);
3042 	if (ret)
3043 		goto out;
3044 
3045 	memcg_account_kmem(memcg, nr_pages);
3046 out:
3047 	css_put(&memcg->css);
3048 
3049 	return ret;
3050 }
3051 
3052 /**
3053  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3054  * @page: page to charge
3055  * @gfp: reclaim mode
3056  * @order: allocation order
3057  *
3058  * Returns 0 on success, an error code on failure.
3059  */
3060 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3061 {
3062 	struct obj_cgroup *objcg;
3063 	int ret = 0;
3064 
3065 	objcg = get_obj_cgroup_from_current();
3066 	if (objcg) {
3067 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3068 		if (!ret) {
3069 			page->memcg_data = (unsigned long)objcg |
3070 				MEMCG_DATA_KMEM;
3071 			return 0;
3072 		}
3073 		obj_cgroup_put(objcg);
3074 	}
3075 	return ret;
3076 }
3077 
3078 /**
3079  * __memcg_kmem_uncharge_page: uncharge a kmem page
3080  * @page: page to uncharge
3081  * @order: allocation order
3082  */
3083 void __memcg_kmem_uncharge_page(struct page *page, int order)
3084 {
3085 	struct folio *folio = page_folio(page);
3086 	struct obj_cgroup *objcg;
3087 	unsigned int nr_pages = 1 << order;
3088 
3089 	if (!folio_memcg_kmem(folio))
3090 		return;
3091 
3092 	objcg = __folio_objcg(folio);
3093 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3094 	folio->memcg_data = 0;
3095 	obj_cgroup_put(objcg);
3096 }
3097 
3098 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3099 		     enum node_stat_item idx, int nr)
3100 {
3101 	struct memcg_stock_pcp *stock;
3102 	struct obj_cgroup *old = NULL;
3103 	unsigned long flags;
3104 	int *bytes;
3105 
3106 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3107 	stock = this_cpu_ptr(&memcg_stock);
3108 
3109 	/*
3110 	 * Save vmstat data in stock and skip vmstat array update unless
3111 	 * accumulating over a page of vmstat data or when pgdat or idx
3112 	 * changes.
3113 	 */
3114 	if (stock->cached_objcg != objcg) {
3115 		old = drain_obj_stock(stock);
3116 		obj_cgroup_get(objcg);
3117 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3118 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3119 		stock->cached_objcg = objcg;
3120 		stock->cached_pgdat = pgdat;
3121 	} else if (stock->cached_pgdat != pgdat) {
3122 		/* Flush the existing cached vmstat data */
3123 		struct pglist_data *oldpg = stock->cached_pgdat;
3124 
3125 		if (stock->nr_slab_reclaimable_b) {
3126 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3127 					  stock->nr_slab_reclaimable_b);
3128 			stock->nr_slab_reclaimable_b = 0;
3129 		}
3130 		if (stock->nr_slab_unreclaimable_b) {
3131 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3132 					  stock->nr_slab_unreclaimable_b);
3133 			stock->nr_slab_unreclaimable_b = 0;
3134 		}
3135 		stock->cached_pgdat = pgdat;
3136 	}
3137 
3138 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3139 					       : &stock->nr_slab_unreclaimable_b;
3140 	/*
3141 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3142 	 * cached locally at least once before pushing it out.
3143 	 */
3144 	if (!*bytes) {
3145 		*bytes = nr;
3146 		nr = 0;
3147 	} else {
3148 		*bytes += nr;
3149 		if (abs(*bytes) > PAGE_SIZE) {
3150 			nr = *bytes;
3151 			*bytes = 0;
3152 		} else {
3153 			nr = 0;
3154 		}
3155 	}
3156 	if (nr)
3157 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3158 
3159 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3160 	if (old)
3161 		obj_cgroup_put(old);
3162 }
3163 
3164 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3165 {
3166 	struct memcg_stock_pcp *stock;
3167 	unsigned long flags;
3168 	bool ret = false;
3169 
3170 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3171 
3172 	stock = this_cpu_ptr(&memcg_stock);
3173 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3174 		stock->nr_bytes -= nr_bytes;
3175 		ret = true;
3176 	}
3177 
3178 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3179 
3180 	return ret;
3181 }
3182 
3183 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3184 {
3185 	struct obj_cgroup *old = stock->cached_objcg;
3186 
3187 	if (!old)
3188 		return NULL;
3189 
3190 	if (stock->nr_bytes) {
3191 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3192 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3193 
3194 		if (nr_pages) {
3195 			struct mem_cgroup *memcg;
3196 
3197 			memcg = get_mem_cgroup_from_objcg(old);
3198 
3199 			memcg_account_kmem(memcg, -nr_pages);
3200 			__refill_stock(memcg, nr_pages);
3201 
3202 			css_put(&memcg->css);
3203 		}
3204 
3205 		/*
3206 		 * The leftover is flushed to the centralized per-memcg value.
3207 		 * On the next attempt to refill obj stock it will be moved
3208 		 * to a per-cpu stock (probably, on an other CPU), see
3209 		 * refill_obj_stock().
3210 		 *
3211 		 * How often it's flushed is a trade-off between the memory
3212 		 * limit enforcement accuracy and potential CPU contention,
3213 		 * so it might be changed in the future.
3214 		 */
3215 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3216 		stock->nr_bytes = 0;
3217 	}
3218 
3219 	/*
3220 	 * Flush the vmstat data in current stock
3221 	 */
3222 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3223 		if (stock->nr_slab_reclaimable_b) {
3224 			mod_objcg_mlstate(old, stock->cached_pgdat,
3225 					  NR_SLAB_RECLAIMABLE_B,
3226 					  stock->nr_slab_reclaimable_b);
3227 			stock->nr_slab_reclaimable_b = 0;
3228 		}
3229 		if (stock->nr_slab_unreclaimable_b) {
3230 			mod_objcg_mlstate(old, stock->cached_pgdat,
3231 					  NR_SLAB_UNRECLAIMABLE_B,
3232 					  stock->nr_slab_unreclaimable_b);
3233 			stock->nr_slab_unreclaimable_b = 0;
3234 		}
3235 		stock->cached_pgdat = NULL;
3236 	}
3237 
3238 	stock->cached_objcg = NULL;
3239 	/*
3240 	 * The `old' objects needs to be released by the caller via
3241 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3242 	 */
3243 	return old;
3244 }
3245 
3246 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3247 				     struct mem_cgroup *root_memcg)
3248 {
3249 	struct mem_cgroup *memcg;
3250 
3251 	if (stock->cached_objcg) {
3252 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3253 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3254 			return true;
3255 	}
3256 
3257 	return false;
3258 }
3259 
3260 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3261 			     bool allow_uncharge)
3262 {
3263 	struct memcg_stock_pcp *stock;
3264 	struct obj_cgroup *old = NULL;
3265 	unsigned long flags;
3266 	unsigned int nr_pages = 0;
3267 
3268 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3269 
3270 	stock = this_cpu_ptr(&memcg_stock);
3271 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3272 		old = drain_obj_stock(stock);
3273 		obj_cgroup_get(objcg);
3274 		stock->cached_objcg = objcg;
3275 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3276 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3277 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3278 	}
3279 	stock->nr_bytes += nr_bytes;
3280 
3281 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3282 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3283 		stock->nr_bytes &= (PAGE_SIZE - 1);
3284 	}
3285 
3286 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3287 	if (old)
3288 		obj_cgroup_put(old);
3289 
3290 	if (nr_pages)
3291 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3292 }
3293 
3294 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3295 {
3296 	unsigned int nr_pages, nr_bytes;
3297 	int ret;
3298 
3299 	if (consume_obj_stock(objcg, size))
3300 		return 0;
3301 
3302 	/*
3303 	 * In theory, objcg->nr_charged_bytes can have enough
3304 	 * pre-charged bytes to satisfy the allocation. However,
3305 	 * flushing objcg->nr_charged_bytes requires two atomic
3306 	 * operations, and objcg->nr_charged_bytes can't be big.
3307 	 * The shared objcg->nr_charged_bytes can also become a
3308 	 * performance bottleneck if all tasks of the same memcg are
3309 	 * trying to update it. So it's better to ignore it and try
3310 	 * grab some new pages. The stock's nr_bytes will be flushed to
3311 	 * objcg->nr_charged_bytes later on when objcg changes.
3312 	 *
3313 	 * The stock's nr_bytes may contain enough pre-charged bytes
3314 	 * to allow one less page from being charged, but we can't rely
3315 	 * on the pre-charged bytes not being changed outside of
3316 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3317 	 * pre-charged bytes as well when charging pages. To avoid a
3318 	 * page uncharge right after a page charge, we set the
3319 	 * allow_uncharge flag to false when calling refill_obj_stock()
3320 	 * to temporarily allow the pre-charged bytes to exceed the page
3321 	 * size limit. The maximum reachable value of the pre-charged
3322 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3323 	 * race.
3324 	 */
3325 	nr_pages = size >> PAGE_SHIFT;
3326 	nr_bytes = size & (PAGE_SIZE - 1);
3327 
3328 	if (nr_bytes)
3329 		nr_pages += 1;
3330 
3331 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3332 	if (!ret && nr_bytes)
3333 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3334 
3335 	return ret;
3336 }
3337 
3338 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3339 {
3340 	refill_obj_stock(objcg, size, true);
3341 }
3342 
3343 #endif /* CONFIG_MEMCG_KMEM */
3344 
3345 /*
3346  * Because page_memcg(head) is not set on tails, set it now.
3347  */
3348 void split_page_memcg(struct page *head, unsigned int nr)
3349 {
3350 	struct folio *folio = page_folio(head);
3351 	struct mem_cgroup *memcg = folio_memcg(folio);
3352 	int i;
3353 
3354 	if (mem_cgroup_disabled() || !memcg)
3355 		return;
3356 
3357 	for (i = 1; i < nr; i++)
3358 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3359 
3360 	if (folio_memcg_kmem(folio))
3361 		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3362 	else
3363 		css_get_many(&memcg->css, nr - 1);
3364 }
3365 
3366 #ifdef CONFIG_MEMCG_SWAP
3367 /**
3368  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3369  * @entry: swap entry to be moved
3370  * @from:  mem_cgroup which the entry is moved from
3371  * @to:  mem_cgroup which the entry is moved to
3372  *
3373  * It succeeds only when the swap_cgroup's record for this entry is the same
3374  * as the mem_cgroup's id of @from.
3375  *
3376  * Returns 0 on success, -EINVAL on failure.
3377  *
3378  * The caller must have charged to @to, IOW, called page_counter_charge() about
3379  * both res and memsw, and called css_get().
3380  */
3381 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3382 				struct mem_cgroup *from, struct mem_cgroup *to)
3383 {
3384 	unsigned short old_id, new_id;
3385 
3386 	old_id = mem_cgroup_id(from);
3387 	new_id = mem_cgroup_id(to);
3388 
3389 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3390 		mod_memcg_state(from, MEMCG_SWAP, -1);
3391 		mod_memcg_state(to, MEMCG_SWAP, 1);
3392 		return 0;
3393 	}
3394 	return -EINVAL;
3395 }
3396 #else
3397 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3398 				struct mem_cgroup *from, struct mem_cgroup *to)
3399 {
3400 	return -EINVAL;
3401 }
3402 #endif
3403 
3404 static DEFINE_MUTEX(memcg_max_mutex);
3405 
3406 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3407 				 unsigned long max, bool memsw)
3408 {
3409 	bool enlarge = false;
3410 	bool drained = false;
3411 	int ret;
3412 	bool limits_invariant;
3413 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3414 
3415 	do {
3416 		if (signal_pending(current)) {
3417 			ret = -EINTR;
3418 			break;
3419 		}
3420 
3421 		mutex_lock(&memcg_max_mutex);
3422 		/*
3423 		 * Make sure that the new limit (memsw or memory limit) doesn't
3424 		 * break our basic invariant rule memory.max <= memsw.max.
3425 		 */
3426 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3427 					   max <= memcg->memsw.max;
3428 		if (!limits_invariant) {
3429 			mutex_unlock(&memcg_max_mutex);
3430 			ret = -EINVAL;
3431 			break;
3432 		}
3433 		if (max > counter->max)
3434 			enlarge = true;
3435 		ret = page_counter_set_max(counter, max);
3436 		mutex_unlock(&memcg_max_mutex);
3437 
3438 		if (!ret)
3439 			break;
3440 
3441 		if (!drained) {
3442 			drain_all_stock(memcg);
3443 			drained = true;
3444 			continue;
3445 		}
3446 
3447 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3448 					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3449 			ret = -EBUSY;
3450 			break;
3451 		}
3452 	} while (true);
3453 
3454 	if (!ret && enlarge)
3455 		memcg_oom_recover(memcg);
3456 
3457 	return ret;
3458 }
3459 
3460 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3461 					    gfp_t gfp_mask,
3462 					    unsigned long *total_scanned)
3463 {
3464 	unsigned long nr_reclaimed = 0;
3465 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3466 	unsigned long reclaimed;
3467 	int loop = 0;
3468 	struct mem_cgroup_tree_per_node *mctz;
3469 	unsigned long excess;
3470 
3471 	if (order > 0)
3472 		return 0;
3473 
3474 	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3475 
3476 	/*
3477 	 * Do not even bother to check the largest node if the root
3478 	 * is empty. Do it lockless to prevent lock bouncing. Races
3479 	 * are acceptable as soft limit is best effort anyway.
3480 	 */
3481 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3482 		return 0;
3483 
3484 	/*
3485 	 * This loop can run a while, specially if mem_cgroup's continuously
3486 	 * keep exceeding their soft limit and putting the system under
3487 	 * pressure
3488 	 */
3489 	do {
3490 		if (next_mz)
3491 			mz = next_mz;
3492 		else
3493 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3494 		if (!mz)
3495 			break;
3496 
3497 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3498 						    gfp_mask, total_scanned);
3499 		nr_reclaimed += reclaimed;
3500 		spin_lock_irq(&mctz->lock);
3501 
3502 		/*
3503 		 * If we failed to reclaim anything from this memory cgroup
3504 		 * it is time to move on to the next cgroup
3505 		 */
3506 		next_mz = NULL;
3507 		if (!reclaimed)
3508 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3509 
3510 		excess = soft_limit_excess(mz->memcg);
3511 		/*
3512 		 * One school of thought says that we should not add
3513 		 * back the node to the tree if reclaim returns 0.
3514 		 * But our reclaim could return 0, simply because due
3515 		 * to priority we are exposing a smaller subset of
3516 		 * memory to reclaim from. Consider this as a longer
3517 		 * term TODO.
3518 		 */
3519 		/* If excess == 0, no tree ops */
3520 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3521 		spin_unlock_irq(&mctz->lock);
3522 		css_put(&mz->memcg->css);
3523 		loop++;
3524 		/*
3525 		 * Could not reclaim anything and there are no more
3526 		 * mem cgroups to try or we seem to be looping without
3527 		 * reclaiming anything.
3528 		 */
3529 		if (!nr_reclaimed &&
3530 			(next_mz == NULL ||
3531 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3532 			break;
3533 	} while (!nr_reclaimed);
3534 	if (next_mz)
3535 		css_put(&next_mz->memcg->css);
3536 	return nr_reclaimed;
3537 }
3538 
3539 /*
3540  * Reclaims as many pages from the given memcg as possible.
3541  *
3542  * Caller is responsible for holding css reference for memcg.
3543  */
3544 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3545 {
3546 	int nr_retries = MAX_RECLAIM_RETRIES;
3547 
3548 	/* we call try-to-free pages for make this cgroup empty */
3549 	lru_add_drain_all();
3550 
3551 	drain_all_stock(memcg);
3552 
3553 	/* try to free all pages in this cgroup */
3554 	while (nr_retries && page_counter_read(&memcg->memory)) {
3555 		if (signal_pending(current))
3556 			return -EINTR;
3557 
3558 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3559 						  MEMCG_RECLAIM_MAY_SWAP))
3560 			nr_retries--;
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3567 					    char *buf, size_t nbytes,
3568 					    loff_t off)
3569 {
3570 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3571 
3572 	if (mem_cgroup_is_root(memcg))
3573 		return -EINVAL;
3574 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3575 }
3576 
3577 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3578 				     struct cftype *cft)
3579 {
3580 	return 1;
3581 }
3582 
3583 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3584 				      struct cftype *cft, u64 val)
3585 {
3586 	if (val == 1)
3587 		return 0;
3588 
3589 	pr_warn_once("Non-hierarchical mode is deprecated. "
3590 		     "Please report your usecase to linux-mm@kvack.org if you "
3591 		     "depend on this functionality.\n");
3592 
3593 	return -EINVAL;
3594 }
3595 
3596 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3597 {
3598 	unsigned long val;
3599 
3600 	if (mem_cgroup_is_root(memcg)) {
3601 		mem_cgroup_flush_stats();
3602 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3603 			memcg_page_state(memcg, NR_ANON_MAPPED);
3604 		if (swap)
3605 			val += memcg_page_state(memcg, MEMCG_SWAP);
3606 	} else {
3607 		if (!swap)
3608 			val = page_counter_read(&memcg->memory);
3609 		else
3610 			val = page_counter_read(&memcg->memsw);
3611 	}
3612 	return val;
3613 }
3614 
3615 enum {
3616 	RES_USAGE,
3617 	RES_LIMIT,
3618 	RES_MAX_USAGE,
3619 	RES_FAILCNT,
3620 	RES_SOFT_LIMIT,
3621 };
3622 
3623 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3624 			       struct cftype *cft)
3625 {
3626 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3627 	struct page_counter *counter;
3628 
3629 	switch (MEMFILE_TYPE(cft->private)) {
3630 	case _MEM:
3631 		counter = &memcg->memory;
3632 		break;
3633 	case _MEMSWAP:
3634 		counter = &memcg->memsw;
3635 		break;
3636 	case _KMEM:
3637 		counter = &memcg->kmem;
3638 		break;
3639 	case _TCP:
3640 		counter = &memcg->tcpmem;
3641 		break;
3642 	default:
3643 		BUG();
3644 	}
3645 
3646 	switch (MEMFILE_ATTR(cft->private)) {
3647 	case RES_USAGE:
3648 		if (counter == &memcg->memory)
3649 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3650 		if (counter == &memcg->memsw)
3651 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3652 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3653 	case RES_LIMIT:
3654 		return (u64)counter->max * PAGE_SIZE;
3655 	case RES_MAX_USAGE:
3656 		return (u64)counter->watermark * PAGE_SIZE;
3657 	case RES_FAILCNT:
3658 		return counter->failcnt;
3659 	case RES_SOFT_LIMIT:
3660 		return (u64)memcg->soft_limit * PAGE_SIZE;
3661 	default:
3662 		BUG();
3663 	}
3664 }
3665 
3666 #ifdef CONFIG_MEMCG_KMEM
3667 static int memcg_online_kmem(struct mem_cgroup *memcg)
3668 {
3669 	struct obj_cgroup *objcg;
3670 
3671 	if (mem_cgroup_kmem_disabled())
3672 		return 0;
3673 
3674 	if (unlikely(mem_cgroup_is_root(memcg)))
3675 		return 0;
3676 
3677 	objcg = obj_cgroup_alloc();
3678 	if (!objcg)
3679 		return -ENOMEM;
3680 
3681 	objcg->memcg = memcg;
3682 	rcu_assign_pointer(memcg->objcg, objcg);
3683 
3684 	static_branch_enable(&memcg_kmem_enabled_key);
3685 
3686 	memcg->kmemcg_id = memcg->id.id;
3687 
3688 	return 0;
3689 }
3690 
3691 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3692 {
3693 	struct mem_cgroup *parent;
3694 
3695 	if (mem_cgroup_kmem_disabled())
3696 		return;
3697 
3698 	if (unlikely(mem_cgroup_is_root(memcg)))
3699 		return;
3700 
3701 	parent = parent_mem_cgroup(memcg);
3702 	if (!parent)
3703 		parent = root_mem_cgroup;
3704 
3705 	memcg_reparent_objcgs(memcg, parent);
3706 
3707 	/*
3708 	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3709 	 * corresponding to this cgroup are guaranteed to remain empty.
3710 	 * The ordering is imposed by list_lru_node->lock taken by
3711 	 * memcg_reparent_list_lrus().
3712 	 */
3713 	memcg_reparent_list_lrus(memcg, parent);
3714 }
3715 #else
3716 static int memcg_online_kmem(struct mem_cgroup *memcg)
3717 {
3718 	return 0;
3719 }
3720 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3721 {
3722 }
3723 #endif /* CONFIG_MEMCG_KMEM */
3724 
3725 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3726 {
3727 	int ret;
3728 
3729 	mutex_lock(&memcg_max_mutex);
3730 
3731 	ret = page_counter_set_max(&memcg->tcpmem, max);
3732 	if (ret)
3733 		goto out;
3734 
3735 	if (!memcg->tcpmem_active) {
3736 		/*
3737 		 * The active flag needs to be written after the static_key
3738 		 * update. This is what guarantees that the socket activation
3739 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3740 		 * for details, and note that we don't mark any socket as
3741 		 * belonging to this memcg until that flag is up.
3742 		 *
3743 		 * We need to do this, because static_keys will span multiple
3744 		 * sites, but we can't control their order. If we mark a socket
3745 		 * as accounted, but the accounting functions are not patched in
3746 		 * yet, we'll lose accounting.
3747 		 *
3748 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3749 		 * because when this value change, the code to process it is not
3750 		 * patched in yet.
3751 		 */
3752 		static_branch_inc(&memcg_sockets_enabled_key);
3753 		memcg->tcpmem_active = true;
3754 	}
3755 out:
3756 	mutex_unlock(&memcg_max_mutex);
3757 	return ret;
3758 }
3759 
3760 /*
3761  * The user of this function is...
3762  * RES_LIMIT.
3763  */
3764 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3765 				char *buf, size_t nbytes, loff_t off)
3766 {
3767 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3768 	unsigned long nr_pages;
3769 	int ret;
3770 
3771 	buf = strstrip(buf);
3772 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3773 	if (ret)
3774 		return ret;
3775 
3776 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3777 	case RES_LIMIT:
3778 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3779 			ret = -EINVAL;
3780 			break;
3781 		}
3782 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3783 		case _MEM:
3784 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3785 			break;
3786 		case _MEMSWAP:
3787 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3788 			break;
3789 		case _KMEM:
3790 			/* kmem.limit_in_bytes is deprecated. */
3791 			ret = -EOPNOTSUPP;
3792 			break;
3793 		case _TCP:
3794 			ret = memcg_update_tcp_max(memcg, nr_pages);
3795 			break;
3796 		}
3797 		break;
3798 	case RES_SOFT_LIMIT:
3799 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3800 			ret = -EOPNOTSUPP;
3801 		} else {
3802 			memcg->soft_limit = nr_pages;
3803 			ret = 0;
3804 		}
3805 		break;
3806 	}
3807 	return ret ?: nbytes;
3808 }
3809 
3810 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3811 				size_t nbytes, loff_t off)
3812 {
3813 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3814 	struct page_counter *counter;
3815 
3816 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3817 	case _MEM:
3818 		counter = &memcg->memory;
3819 		break;
3820 	case _MEMSWAP:
3821 		counter = &memcg->memsw;
3822 		break;
3823 	case _KMEM:
3824 		counter = &memcg->kmem;
3825 		break;
3826 	case _TCP:
3827 		counter = &memcg->tcpmem;
3828 		break;
3829 	default:
3830 		BUG();
3831 	}
3832 
3833 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3834 	case RES_MAX_USAGE:
3835 		page_counter_reset_watermark(counter);
3836 		break;
3837 	case RES_FAILCNT:
3838 		counter->failcnt = 0;
3839 		break;
3840 	default:
3841 		BUG();
3842 	}
3843 
3844 	return nbytes;
3845 }
3846 
3847 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3848 					struct cftype *cft)
3849 {
3850 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3851 }
3852 
3853 #ifdef CONFIG_MMU
3854 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3855 					struct cftype *cft, u64 val)
3856 {
3857 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3858 
3859 	if (val & ~MOVE_MASK)
3860 		return -EINVAL;
3861 
3862 	/*
3863 	 * No kind of locking is needed in here, because ->can_attach() will
3864 	 * check this value once in the beginning of the process, and then carry
3865 	 * on with stale data. This means that changes to this value will only
3866 	 * affect task migrations starting after the change.
3867 	 */
3868 	memcg->move_charge_at_immigrate = val;
3869 	return 0;
3870 }
3871 #else
3872 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3873 					struct cftype *cft, u64 val)
3874 {
3875 	return -ENOSYS;
3876 }
3877 #endif
3878 
3879 #ifdef CONFIG_NUMA
3880 
3881 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3882 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3883 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3884 
3885 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3886 				int nid, unsigned int lru_mask, bool tree)
3887 {
3888 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3889 	unsigned long nr = 0;
3890 	enum lru_list lru;
3891 
3892 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3893 
3894 	for_each_lru(lru) {
3895 		if (!(BIT(lru) & lru_mask))
3896 			continue;
3897 		if (tree)
3898 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3899 		else
3900 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3901 	}
3902 	return nr;
3903 }
3904 
3905 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3906 					     unsigned int lru_mask,
3907 					     bool tree)
3908 {
3909 	unsigned long nr = 0;
3910 	enum lru_list lru;
3911 
3912 	for_each_lru(lru) {
3913 		if (!(BIT(lru) & lru_mask))
3914 			continue;
3915 		if (tree)
3916 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3917 		else
3918 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3919 	}
3920 	return nr;
3921 }
3922 
3923 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3924 {
3925 	struct numa_stat {
3926 		const char *name;
3927 		unsigned int lru_mask;
3928 	};
3929 
3930 	static const struct numa_stat stats[] = {
3931 		{ "total", LRU_ALL },
3932 		{ "file", LRU_ALL_FILE },
3933 		{ "anon", LRU_ALL_ANON },
3934 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3935 	};
3936 	const struct numa_stat *stat;
3937 	int nid;
3938 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3939 
3940 	mem_cgroup_flush_stats();
3941 
3942 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3943 		seq_printf(m, "%s=%lu", stat->name,
3944 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3945 						   false));
3946 		for_each_node_state(nid, N_MEMORY)
3947 			seq_printf(m, " N%d=%lu", nid,
3948 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3949 							stat->lru_mask, false));
3950 		seq_putc(m, '\n');
3951 	}
3952 
3953 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3954 
3955 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
3956 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3957 						   true));
3958 		for_each_node_state(nid, N_MEMORY)
3959 			seq_printf(m, " N%d=%lu", nid,
3960 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3961 							stat->lru_mask, true));
3962 		seq_putc(m, '\n');
3963 	}
3964 
3965 	return 0;
3966 }
3967 #endif /* CONFIG_NUMA */
3968 
3969 static const unsigned int memcg1_stats[] = {
3970 	NR_FILE_PAGES,
3971 	NR_ANON_MAPPED,
3972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3973 	NR_ANON_THPS,
3974 #endif
3975 	NR_SHMEM,
3976 	NR_FILE_MAPPED,
3977 	NR_FILE_DIRTY,
3978 	NR_WRITEBACK,
3979 	WORKINGSET_REFAULT_ANON,
3980 	WORKINGSET_REFAULT_FILE,
3981 	MEMCG_SWAP,
3982 };
3983 
3984 static const char *const memcg1_stat_names[] = {
3985 	"cache",
3986 	"rss",
3987 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3988 	"rss_huge",
3989 #endif
3990 	"shmem",
3991 	"mapped_file",
3992 	"dirty",
3993 	"writeback",
3994 	"workingset_refault_anon",
3995 	"workingset_refault_file",
3996 	"swap",
3997 };
3998 
3999 /* Universal VM events cgroup1 shows, original sort order */
4000 static const unsigned int memcg1_events[] = {
4001 	PGPGIN,
4002 	PGPGOUT,
4003 	PGFAULT,
4004 	PGMAJFAULT,
4005 };
4006 
4007 static int memcg_stat_show(struct seq_file *m, void *v)
4008 {
4009 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4010 	unsigned long memory, memsw;
4011 	struct mem_cgroup *mi;
4012 	unsigned int i;
4013 
4014 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4015 
4016 	mem_cgroup_flush_stats();
4017 
4018 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4019 		unsigned long nr;
4020 
4021 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4022 			continue;
4023 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4024 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
4025 			   nr * memcg_page_state_unit(memcg1_stats[i]));
4026 	}
4027 
4028 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4029 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4030 			   memcg_events_local(memcg, memcg1_events[i]));
4031 
4032 	for (i = 0; i < NR_LRU_LISTS; i++)
4033 		seq_printf(m, "%s %lu\n", lru_list_name(i),
4034 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4035 			   PAGE_SIZE);
4036 
4037 	/* Hierarchical information */
4038 	memory = memsw = PAGE_COUNTER_MAX;
4039 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4040 		memory = min(memory, READ_ONCE(mi->memory.max));
4041 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4042 	}
4043 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4044 		   (u64)memory * PAGE_SIZE);
4045 	if (do_memsw_account())
4046 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4047 			   (u64)memsw * PAGE_SIZE);
4048 
4049 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4050 		unsigned long nr;
4051 
4052 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4053 			continue;
4054 		nr = memcg_page_state(memcg, memcg1_stats[i]);
4055 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4056 			   (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4057 	}
4058 
4059 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4060 		seq_printf(m, "total_%s %llu\n",
4061 			   vm_event_name(memcg1_events[i]),
4062 			   (u64)memcg_events(memcg, memcg1_events[i]));
4063 
4064 	for (i = 0; i < NR_LRU_LISTS; i++)
4065 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4066 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4067 			   PAGE_SIZE);
4068 
4069 #ifdef CONFIG_DEBUG_VM
4070 	{
4071 		pg_data_t *pgdat;
4072 		struct mem_cgroup_per_node *mz;
4073 		unsigned long anon_cost = 0;
4074 		unsigned long file_cost = 0;
4075 
4076 		for_each_online_pgdat(pgdat) {
4077 			mz = memcg->nodeinfo[pgdat->node_id];
4078 
4079 			anon_cost += mz->lruvec.anon_cost;
4080 			file_cost += mz->lruvec.file_cost;
4081 		}
4082 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4083 		seq_printf(m, "file_cost %lu\n", file_cost);
4084 	}
4085 #endif
4086 
4087 	return 0;
4088 }
4089 
4090 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4091 				      struct cftype *cft)
4092 {
4093 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4094 
4095 	return mem_cgroup_swappiness(memcg);
4096 }
4097 
4098 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4099 				       struct cftype *cft, u64 val)
4100 {
4101 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4102 
4103 	if (val > 200)
4104 		return -EINVAL;
4105 
4106 	if (!mem_cgroup_is_root(memcg))
4107 		memcg->swappiness = val;
4108 	else
4109 		vm_swappiness = val;
4110 
4111 	return 0;
4112 }
4113 
4114 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4115 {
4116 	struct mem_cgroup_threshold_ary *t;
4117 	unsigned long usage;
4118 	int i;
4119 
4120 	rcu_read_lock();
4121 	if (!swap)
4122 		t = rcu_dereference(memcg->thresholds.primary);
4123 	else
4124 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4125 
4126 	if (!t)
4127 		goto unlock;
4128 
4129 	usage = mem_cgroup_usage(memcg, swap);
4130 
4131 	/*
4132 	 * current_threshold points to threshold just below or equal to usage.
4133 	 * If it's not true, a threshold was crossed after last
4134 	 * call of __mem_cgroup_threshold().
4135 	 */
4136 	i = t->current_threshold;
4137 
4138 	/*
4139 	 * Iterate backward over array of thresholds starting from
4140 	 * current_threshold and check if a threshold is crossed.
4141 	 * If none of thresholds below usage is crossed, we read
4142 	 * only one element of the array here.
4143 	 */
4144 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4145 		eventfd_signal(t->entries[i].eventfd, 1);
4146 
4147 	/* i = current_threshold + 1 */
4148 	i++;
4149 
4150 	/*
4151 	 * Iterate forward over array of thresholds starting from
4152 	 * current_threshold+1 and check if a threshold is crossed.
4153 	 * If none of thresholds above usage is crossed, we read
4154 	 * only one element of the array here.
4155 	 */
4156 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4157 		eventfd_signal(t->entries[i].eventfd, 1);
4158 
4159 	/* Update current_threshold */
4160 	t->current_threshold = i - 1;
4161 unlock:
4162 	rcu_read_unlock();
4163 }
4164 
4165 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4166 {
4167 	while (memcg) {
4168 		__mem_cgroup_threshold(memcg, false);
4169 		if (do_memsw_account())
4170 			__mem_cgroup_threshold(memcg, true);
4171 
4172 		memcg = parent_mem_cgroup(memcg);
4173 	}
4174 }
4175 
4176 static int compare_thresholds(const void *a, const void *b)
4177 {
4178 	const struct mem_cgroup_threshold *_a = a;
4179 	const struct mem_cgroup_threshold *_b = b;
4180 
4181 	if (_a->threshold > _b->threshold)
4182 		return 1;
4183 
4184 	if (_a->threshold < _b->threshold)
4185 		return -1;
4186 
4187 	return 0;
4188 }
4189 
4190 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4191 {
4192 	struct mem_cgroup_eventfd_list *ev;
4193 
4194 	spin_lock(&memcg_oom_lock);
4195 
4196 	list_for_each_entry(ev, &memcg->oom_notify, list)
4197 		eventfd_signal(ev->eventfd, 1);
4198 
4199 	spin_unlock(&memcg_oom_lock);
4200 	return 0;
4201 }
4202 
4203 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4204 {
4205 	struct mem_cgroup *iter;
4206 
4207 	for_each_mem_cgroup_tree(iter, memcg)
4208 		mem_cgroup_oom_notify_cb(iter);
4209 }
4210 
4211 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4212 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4213 {
4214 	struct mem_cgroup_thresholds *thresholds;
4215 	struct mem_cgroup_threshold_ary *new;
4216 	unsigned long threshold;
4217 	unsigned long usage;
4218 	int i, size, ret;
4219 
4220 	ret = page_counter_memparse(args, "-1", &threshold);
4221 	if (ret)
4222 		return ret;
4223 
4224 	mutex_lock(&memcg->thresholds_lock);
4225 
4226 	if (type == _MEM) {
4227 		thresholds = &memcg->thresholds;
4228 		usage = mem_cgroup_usage(memcg, false);
4229 	} else if (type == _MEMSWAP) {
4230 		thresholds = &memcg->memsw_thresholds;
4231 		usage = mem_cgroup_usage(memcg, true);
4232 	} else
4233 		BUG();
4234 
4235 	/* Check if a threshold crossed before adding a new one */
4236 	if (thresholds->primary)
4237 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4238 
4239 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4240 
4241 	/* Allocate memory for new array of thresholds */
4242 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4243 	if (!new) {
4244 		ret = -ENOMEM;
4245 		goto unlock;
4246 	}
4247 	new->size = size;
4248 
4249 	/* Copy thresholds (if any) to new array */
4250 	if (thresholds->primary)
4251 		memcpy(new->entries, thresholds->primary->entries,
4252 		       flex_array_size(new, entries, size - 1));
4253 
4254 	/* Add new threshold */
4255 	new->entries[size - 1].eventfd = eventfd;
4256 	new->entries[size - 1].threshold = threshold;
4257 
4258 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4259 	sort(new->entries, size, sizeof(*new->entries),
4260 			compare_thresholds, NULL);
4261 
4262 	/* Find current threshold */
4263 	new->current_threshold = -1;
4264 	for (i = 0; i < size; i++) {
4265 		if (new->entries[i].threshold <= usage) {
4266 			/*
4267 			 * new->current_threshold will not be used until
4268 			 * rcu_assign_pointer(), so it's safe to increment
4269 			 * it here.
4270 			 */
4271 			++new->current_threshold;
4272 		} else
4273 			break;
4274 	}
4275 
4276 	/* Free old spare buffer and save old primary buffer as spare */
4277 	kfree(thresholds->spare);
4278 	thresholds->spare = thresholds->primary;
4279 
4280 	rcu_assign_pointer(thresholds->primary, new);
4281 
4282 	/* To be sure that nobody uses thresholds */
4283 	synchronize_rcu();
4284 
4285 unlock:
4286 	mutex_unlock(&memcg->thresholds_lock);
4287 
4288 	return ret;
4289 }
4290 
4291 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4292 	struct eventfd_ctx *eventfd, const char *args)
4293 {
4294 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4295 }
4296 
4297 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4298 	struct eventfd_ctx *eventfd, const char *args)
4299 {
4300 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4301 }
4302 
4303 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4304 	struct eventfd_ctx *eventfd, enum res_type type)
4305 {
4306 	struct mem_cgroup_thresholds *thresholds;
4307 	struct mem_cgroup_threshold_ary *new;
4308 	unsigned long usage;
4309 	int i, j, size, entries;
4310 
4311 	mutex_lock(&memcg->thresholds_lock);
4312 
4313 	if (type == _MEM) {
4314 		thresholds = &memcg->thresholds;
4315 		usage = mem_cgroup_usage(memcg, false);
4316 	} else if (type == _MEMSWAP) {
4317 		thresholds = &memcg->memsw_thresholds;
4318 		usage = mem_cgroup_usage(memcg, true);
4319 	} else
4320 		BUG();
4321 
4322 	if (!thresholds->primary)
4323 		goto unlock;
4324 
4325 	/* Check if a threshold crossed before removing */
4326 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4327 
4328 	/* Calculate new number of threshold */
4329 	size = entries = 0;
4330 	for (i = 0; i < thresholds->primary->size; i++) {
4331 		if (thresholds->primary->entries[i].eventfd != eventfd)
4332 			size++;
4333 		else
4334 			entries++;
4335 	}
4336 
4337 	new = thresholds->spare;
4338 
4339 	/* If no items related to eventfd have been cleared, nothing to do */
4340 	if (!entries)
4341 		goto unlock;
4342 
4343 	/* Set thresholds array to NULL if we don't have thresholds */
4344 	if (!size) {
4345 		kfree(new);
4346 		new = NULL;
4347 		goto swap_buffers;
4348 	}
4349 
4350 	new->size = size;
4351 
4352 	/* Copy thresholds and find current threshold */
4353 	new->current_threshold = -1;
4354 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4355 		if (thresholds->primary->entries[i].eventfd == eventfd)
4356 			continue;
4357 
4358 		new->entries[j] = thresholds->primary->entries[i];
4359 		if (new->entries[j].threshold <= usage) {
4360 			/*
4361 			 * new->current_threshold will not be used
4362 			 * until rcu_assign_pointer(), so it's safe to increment
4363 			 * it here.
4364 			 */
4365 			++new->current_threshold;
4366 		}
4367 		j++;
4368 	}
4369 
4370 swap_buffers:
4371 	/* Swap primary and spare array */
4372 	thresholds->spare = thresholds->primary;
4373 
4374 	rcu_assign_pointer(thresholds->primary, new);
4375 
4376 	/* To be sure that nobody uses thresholds */
4377 	synchronize_rcu();
4378 
4379 	/* If all events are unregistered, free the spare array */
4380 	if (!new) {
4381 		kfree(thresholds->spare);
4382 		thresholds->spare = NULL;
4383 	}
4384 unlock:
4385 	mutex_unlock(&memcg->thresholds_lock);
4386 }
4387 
4388 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4389 	struct eventfd_ctx *eventfd)
4390 {
4391 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4392 }
4393 
4394 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4395 	struct eventfd_ctx *eventfd)
4396 {
4397 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4398 }
4399 
4400 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4401 	struct eventfd_ctx *eventfd, const char *args)
4402 {
4403 	struct mem_cgroup_eventfd_list *event;
4404 
4405 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4406 	if (!event)
4407 		return -ENOMEM;
4408 
4409 	spin_lock(&memcg_oom_lock);
4410 
4411 	event->eventfd = eventfd;
4412 	list_add(&event->list, &memcg->oom_notify);
4413 
4414 	/* already in OOM ? */
4415 	if (memcg->under_oom)
4416 		eventfd_signal(eventfd, 1);
4417 	spin_unlock(&memcg_oom_lock);
4418 
4419 	return 0;
4420 }
4421 
4422 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4423 	struct eventfd_ctx *eventfd)
4424 {
4425 	struct mem_cgroup_eventfd_list *ev, *tmp;
4426 
4427 	spin_lock(&memcg_oom_lock);
4428 
4429 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4430 		if (ev->eventfd == eventfd) {
4431 			list_del(&ev->list);
4432 			kfree(ev);
4433 		}
4434 	}
4435 
4436 	spin_unlock(&memcg_oom_lock);
4437 }
4438 
4439 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4440 {
4441 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4442 
4443 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4444 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4445 	seq_printf(sf, "oom_kill %lu\n",
4446 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4447 	return 0;
4448 }
4449 
4450 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4451 	struct cftype *cft, u64 val)
4452 {
4453 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4454 
4455 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4456 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4457 		return -EINVAL;
4458 
4459 	memcg->oom_kill_disable = val;
4460 	if (!val)
4461 		memcg_oom_recover(memcg);
4462 
4463 	return 0;
4464 }
4465 
4466 #ifdef CONFIG_CGROUP_WRITEBACK
4467 
4468 #include <trace/events/writeback.h>
4469 
4470 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4471 {
4472 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4473 }
4474 
4475 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4476 {
4477 	wb_domain_exit(&memcg->cgwb_domain);
4478 }
4479 
4480 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4481 {
4482 	wb_domain_size_changed(&memcg->cgwb_domain);
4483 }
4484 
4485 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4486 {
4487 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4488 
4489 	if (!memcg->css.parent)
4490 		return NULL;
4491 
4492 	return &memcg->cgwb_domain;
4493 }
4494 
4495 /**
4496  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4497  * @wb: bdi_writeback in question
4498  * @pfilepages: out parameter for number of file pages
4499  * @pheadroom: out parameter for number of allocatable pages according to memcg
4500  * @pdirty: out parameter for number of dirty pages
4501  * @pwriteback: out parameter for number of pages under writeback
4502  *
4503  * Determine the numbers of file, headroom, dirty, and writeback pages in
4504  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4505  * is a bit more involved.
4506  *
4507  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4508  * headroom is calculated as the lowest headroom of itself and the
4509  * ancestors.  Note that this doesn't consider the actual amount of
4510  * available memory in the system.  The caller should further cap
4511  * *@pheadroom accordingly.
4512  */
4513 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4514 			 unsigned long *pheadroom, unsigned long *pdirty,
4515 			 unsigned long *pwriteback)
4516 {
4517 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4518 	struct mem_cgroup *parent;
4519 
4520 	mem_cgroup_flush_stats();
4521 
4522 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4523 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4524 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4525 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4526 
4527 	*pheadroom = PAGE_COUNTER_MAX;
4528 	while ((parent = parent_mem_cgroup(memcg))) {
4529 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4530 					    READ_ONCE(memcg->memory.high));
4531 		unsigned long used = page_counter_read(&memcg->memory);
4532 
4533 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4534 		memcg = parent;
4535 	}
4536 }
4537 
4538 /*
4539  * Foreign dirty flushing
4540  *
4541  * There's an inherent mismatch between memcg and writeback.  The former
4542  * tracks ownership per-page while the latter per-inode.  This was a
4543  * deliberate design decision because honoring per-page ownership in the
4544  * writeback path is complicated, may lead to higher CPU and IO overheads
4545  * and deemed unnecessary given that write-sharing an inode across
4546  * different cgroups isn't a common use-case.
4547  *
4548  * Combined with inode majority-writer ownership switching, this works well
4549  * enough in most cases but there are some pathological cases.  For
4550  * example, let's say there are two cgroups A and B which keep writing to
4551  * different but confined parts of the same inode.  B owns the inode and
4552  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4553  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4554  * triggering background writeback.  A will be slowed down without a way to
4555  * make writeback of the dirty pages happen.
4556  *
4557  * Conditions like the above can lead to a cgroup getting repeatedly and
4558  * severely throttled after making some progress after each
4559  * dirty_expire_interval while the underlying IO device is almost
4560  * completely idle.
4561  *
4562  * Solving this problem completely requires matching the ownership tracking
4563  * granularities between memcg and writeback in either direction.  However,
4564  * the more egregious behaviors can be avoided by simply remembering the
4565  * most recent foreign dirtying events and initiating remote flushes on
4566  * them when local writeback isn't enough to keep the memory clean enough.
4567  *
4568  * The following two functions implement such mechanism.  When a foreign
4569  * page - a page whose memcg and writeback ownerships don't match - is
4570  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4571  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4572  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4573  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4574  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4575  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4576  * limited to MEMCG_CGWB_FRN_CNT.
4577  *
4578  * The mechanism only remembers IDs and doesn't hold any object references.
4579  * As being wrong occasionally doesn't matter, updates and accesses to the
4580  * records are lockless and racy.
4581  */
4582 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4583 					     struct bdi_writeback *wb)
4584 {
4585 	struct mem_cgroup *memcg = folio_memcg(folio);
4586 	struct memcg_cgwb_frn *frn;
4587 	u64 now = get_jiffies_64();
4588 	u64 oldest_at = now;
4589 	int oldest = -1;
4590 	int i;
4591 
4592 	trace_track_foreign_dirty(folio, wb);
4593 
4594 	/*
4595 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4596 	 * using it.  If not replace the oldest one which isn't being
4597 	 * written out.
4598 	 */
4599 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4600 		frn = &memcg->cgwb_frn[i];
4601 		if (frn->bdi_id == wb->bdi->id &&
4602 		    frn->memcg_id == wb->memcg_css->id)
4603 			break;
4604 		if (time_before64(frn->at, oldest_at) &&
4605 		    atomic_read(&frn->done.cnt) == 1) {
4606 			oldest = i;
4607 			oldest_at = frn->at;
4608 		}
4609 	}
4610 
4611 	if (i < MEMCG_CGWB_FRN_CNT) {
4612 		/*
4613 		 * Re-using an existing one.  Update timestamp lazily to
4614 		 * avoid making the cacheline hot.  We want them to be
4615 		 * reasonably up-to-date and significantly shorter than
4616 		 * dirty_expire_interval as that's what expires the record.
4617 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4618 		 */
4619 		unsigned long update_intv =
4620 			min_t(unsigned long, HZ,
4621 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4622 
4623 		if (time_before64(frn->at, now - update_intv))
4624 			frn->at = now;
4625 	} else if (oldest >= 0) {
4626 		/* replace the oldest free one */
4627 		frn = &memcg->cgwb_frn[oldest];
4628 		frn->bdi_id = wb->bdi->id;
4629 		frn->memcg_id = wb->memcg_css->id;
4630 		frn->at = now;
4631 	}
4632 }
4633 
4634 /* issue foreign writeback flushes for recorded foreign dirtying events */
4635 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4636 {
4637 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4638 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4639 	u64 now = jiffies_64;
4640 	int i;
4641 
4642 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4643 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4644 
4645 		/*
4646 		 * If the record is older than dirty_expire_interval,
4647 		 * writeback on it has already started.  No need to kick it
4648 		 * off again.  Also, don't start a new one if there's
4649 		 * already one in flight.
4650 		 */
4651 		if (time_after64(frn->at, now - intv) &&
4652 		    atomic_read(&frn->done.cnt) == 1) {
4653 			frn->at = 0;
4654 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4655 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4656 					       WB_REASON_FOREIGN_FLUSH,
4657 					       &frn->done);
4658 		}
4659 	}
4660 }
4661 
4662 #else	/* CONFIG_CGROUP_WRITEBACK */
4663 
4664 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4665 {
4666 	return 0;
4667 }
4668 
4669 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4670 {
4671 }
4672 
4673 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4674 {
4675 }
4676 
4677 #endif	/* CONFIG_CGROUP_WRITEBACK */
4678 
4679 /*
4680  * DO NOT USE IN NEW FILES.
4681  *
4682  * "cgroup.event_control" implementation.
4683  *
4684  * This is way over-engineered.  It tries to support fully configurable
4685  * events for each user.  Such level of flexibility is completely
4686  * unnecessary especially in the light of the planned unified hierarchy.
4687  *
4688  * Please deprecate this and replace with something simpler if at all
4689  * possible.
4690  */
4691 
4692 /*
4693  * Unregister event and free resources.
4694  *
4695  * Gets called from workqueue.
4696  */
4697 static void memcg_event_remove(struct work_struct *work)
4698 {
4699 	struct mem_cgroup_event *event =
4700 		container_of(work, struct mem_cgroup_event, remove);
4701 	struct mem_cgroup *memcg = event->memcg;
4702 
4703 	remove_wait_queue(event->wqh, &event->wait);
4704 
4705 	event->unregister_event(memcg, event->eventfd);
4706 
4707 	/* Notify userspace the event is going away. */
4708 	eventfd_signal(event->eventfd, 1);
4709 
4710 	eventfd_ctx_put(event->eventfd);
4711 	kfree(event);
4712 	css_put(&memcg->css);
4713 }
4714 
4715 /*
4716  * Gets called on EPOLLHUP on eventfd when user closes it.
4717  *
4718  * Called with wqh->lock held and interrupts disabled.
4719  */
4720 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4721 			    int sync, void *key)
4722 {
4723 	struct mem_cgroup_event *event =
4724 		container_of(wait, struct mem_cgroup_event, wait);
4725 	struct mem_cgroup *memcg = event->memcg;
4726 	__poll_t flags = key_to_poll(key);
4727 
4728 	if (flags & EPOLLHUP) {
4729 		/*
4730 		 * If the event has been detached at cgroup removal, we
4731 		 * can simply return knowing the other side will cleanup
4732 		 * for us.
4733 		 *
4734 		 * We can't race against event freeing since the other
4735 		 * side will require wqh->lock via remove_wait_queue(),
4736 		 * which we hold.
4737 		 */
4738 		spin_lock(&memcg->event_list_lock);
4739 		if (!list_empty(&event->list)) {
4740 			list_del_init(&event->list);
4741 			/*
4742 			 * We are in atomic context, but cgroup_event_remove()
4743 			 * may sleep, so we have to call it in workqueue.
4744 			 */
4745 			schedule_work(&event->remove);
4746 		}
4747 		spin_unlock(&memcg->event_list_lock);
4748 	}
4749 
4750 	return 0;
4751 }
4752 
4753 static void memcg_event_ptable_queue_proc(struct file *file,
4754 		wait_queue_head_t *wqh, poll_table *pt)
4755 {
4756 	struct mem_cgroup_event *event =
4757 		container_of(pt, struct mem_cgroup_event, pt);
4758 
4759 	event->wqh = wqh;
4760 	add_wait_queue(wqh, &event->wait);
4761 }
4762 
4763 /*
4764  * DO NOT USE IN NEW FILES.
4765  *
4766  * Parse input and register new cgroup event handler.
4767  *
4768  * Input must be in format '<event_fd> <control_fd> <args>'.
4769  * Interpretation of args is defined by control file implementation.
4770  */
4771 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4772 					 char *buf, size_t nbytes, loff_t off)
4773 {
4774 	struct cgroup_subsys_state *css = of_css(of);
4775 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4776 	struct mem_cgroup_event *event;
4777 	struct cgroup_subsys_state *cfile_css;
4778 	unsigned int efd, cfd;
4779 	struct fd efile;
4780 	struct fd cfile;
4781 	const char *name;
4782 	char *endp;
4783 	int ret;
4784 
4785 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
4786 		return -EOPNOTSUPP;
4787 
4788 	buf = strstrip(buf);
4789 
4790 	efd = simple_strtoul(buf, &endp, 10);
4791 	if (*endp != ' ')
4792 		return -EINVAL;
4793 	buf = endp + 1;
4794 
4795 	cfd = simple_strtoul(buf, &endp, 10);
4796 	if ((*endp != ' ') && (*endp != '\0'))
4797 		return -EINVAL;
4798 	buf = endp + 1;
4799 
4800 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4801 	if (!event)
4802 		return -ENOMEM;
4803 
4804 	event->memcg = memcg;
4805 	INIT_LIST_HEAD(&event->list);
4806 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4807 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4808 	INIT_WORK(&event->remove, memcg_event_remove);
4809 
4810 	efile = fdget(efd);
4811 	if (!efile.file) {
4812 		ret = -EBADF;
4813 		goto out_kfree;
4814 	}
4815 
4816 	event->eventfd = eventfd_ctx_fileget(efile.file);
4817 	if (IS_ERR(event->eventfd)) {
4818 		ret = PTR_ERR(event->eventfd);
4819 		goto out_put_efile;
4820 	}
4821 
4822 	cfile = fdget(cfd);
4823 	if (!cfile.file) {
4824 		ret = -EBADF;
4825 		goto out_put_eventfd;
4826 	}
4827 
4828 	/* the process need read permission on control file */
4829 	/* AV: shouldn't we check that it's been opened for read instead? */
4830 	ret = file_permission(cfile.file, MAY_READ);
4831 	if (ret < 0)
4832 		goto out_put_cfile;
4833 
4834 	/*
4835 	 * Determine the event callbacks and set them in @event.  This used
4836 	 * to be done via struct cftype but cgroup core no longer knows
4837 	 * about these events.  The following is crude but the whole thing
4838 	 * is for compatibility anyway.
4839 	 *
4840 	 * DO NOT ADD NEW FILES.
4841 	 */
4842 	name = cfile.file->f_path.dentry->d_name.name;
4843 
4844 	if (!strcmp(name, "memory.usage_in_bytes")) {
4845 		event->register_event = mem_cgroup_usage_register_event;
4846 		event->unregister_event = mem_cgroup_usage_unregister_event;
4847 	} else if (!strcmp(name, "memory.oom_control")) {
4848 		event->register_event = mem_cgroup_oom_register_event;
4849 		event->unregister_event = mem_cgroup_oom_unregister_event;
4850 	} else if (!strcmp(name, "memory.pressure_level")) {
4851 		event->register_event = vmpressure_register_event;
4852 		event->unregister_event = vmpressure_unregister_event;
4853 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4854 		event->register_event = memsw_cgroup_usage_register_event;
4855 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4856 	} else {
4857 		ret = -EINVAL;
4858 		goto out_put_cfile;
4859 	}
4860 
4861 	/*
4862 	 * Verify @cfile should belong to @css.  Also, remaining events are
4863 	 * automatically removed on cgroup destruction but the removal is
4864 	 * asynchronous, so take an extra ref on @css.
4865 	 */
4866 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4867 					       &memory_cgrp_subsys);
4868 	ret = -EINVAL;
4869 	if (IS_ERR(cfile_css))
4870 		goto out_put_cfile;
4871 	if (cfile_css != css) {
4872 		css_put(cfile_css);
4873 		goto out_put_cfile;
4874 	}
4875 
4876 	ret = event->register_event(memcg, event->eventfd, buf);
4877 	if (ret)
4878 		goto out_put_css;
4879 
4880 	vfs_poll(efile.file, &event->pt);
4881 
4882 	spin_lock_irq(&memcg->event_list_lock);
4883 	list_add(&event->list, &memcg->event_list);
4884 	spin_unlock_irq(&memcg->event_list_lock);
4885 
4886 	fdput(cfile);
4887 	fdput(efile);
4888 
4889 	return nbytes;
4890 
4891 out_put_css:
4892 	css_put(css);
4893 out_put_cfile:
4894 	fdput(cfile);
4895 out_put_eventfd:
4896 	eventfd_ctx_put(event->eventfd);
4897 out_put_efile:
4898 	fdput(efile);
4899 out_kfree:
4900 	kfree(event);
4901 
4902 	return ret;
4903 }
4904 
4905 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4906 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4907 {
4908 	/*
4909 	 * Deprecated.
4910 	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
4911 	 */
4912 	return 0;
4913 }
4914 #endif
4915 
4916 static struct cftype mem_cgroup_legacy_files[] = {
4917 	{
4918 		.name = "usage_in_bytes",
4919 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4920 		.read_u64 = mem_cgroup_read_u64,
4921 	},
4922 	{
4923 		.name = "max_usage_in_bytes",
4924 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4925 		.write = mem_cgroup_reset,
4926 		.read_u64 = mem_cgroup_read_u64,
4927 	},
4928 	{
4929 		.name = "limit_in_bytes",
4930 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4931 		.write = mem_cgroup_write,
4932 		.read_u64 = mem_cgroup_read_u64,
4933 	},
4934 	{
4935 		.name = "soft_limit_in_bytes",
4936 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4937 		.write = mem_cgroup_write,
4938 		.read_u64 = mem_cgroup_read_u64,
4939 	},
4940 	{
4941 		.name = "failcnt",
4942 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4943 		.write = mem_cgroup_reset,
4944 		.read_u64 = mem_cgroup_read_u64,
4945 	},
4946 	{
4947 		.name = "stat",
4948 		.seq_show = memcg_stat_show,
4949 	},
4950 	{
4951 		.name = "force_empty",
4952 		.write = mem_cgroup_force_empty_write,
4953 	},
4954 	{
4955 		.name = "use_hierarchy",
4956 		.write_u64 = mem_cgroup_hierarchy_write,
4957 		.read_u64 = mem_cgroup_hierarchy_read,
4958 	},
4959 	{
4960 		.name = "cgroup.event_control",		/* XXX: for compat */
4961 		.write = memcg_write_event_control,
4962 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4963 	},
4964 	{
4965 		.name = "swappiness",
4966 		.read_u64 = mem_cgroup_swappiness_read,
4967 		.write_u64 = mem_cgroup_swappiness_write,
4968 	},
4969 	{
4970 		.name = "move_charge_at_immigrate",
4971 		.read_u64 = mem_cgroup_move_charge_read,
4972 		.write_u64 = mem_cgroup_move_charge_write,
4973 	},
4974 	{
4975 		.name = "oom_control",
4976 		.seq_show = mem_cgroup_oom_control_read,
4977 		.write_u64 = mem_cgroup_oom_control_write,
4978 	},
4979 	{
4980 		.name = "pressure_level",
4981 	},
4982 #ifdef CONFIG_NUMA
4983 	{
4984 		.name = "numa_stat",
4985 		.seq_show = memcg_numa_stat_show,
4986 	},
4987 #endif
4988 	{
4989 		.name = "kmem.limit_in_bytes",
4990 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4991 		.write = mem_cgroup_write,
4992 		.read_u64 = mem_cgroup_read_u64,
4993 	},
4994 	{
4995 		.name = "kmem.usage_in_bytes",
4996 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4997 		.read_u64 = mem_cgroup_read_u64,
4998 	},
4999 	{
5000 		.name = "kmem.failcnt",
5001 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5002 		.write = mem_cgroup_reset,
5003 		.read_u64 = mem_cgroup_read_u64,
5004 	},
5005 	{
5006 		.name = "kmem.max_usage_in_bytes",
5007 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5008 		.write = mem_cgroup_reset,
5009 		.read_u64 = mem_cgroup_read_u64,
5010 	},
5011 #if defined(CONFIG_MEMCG_KMEM) && \
5012 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5013 	{
5014 		.name = "kmem.slabinfo",
5015 		.seq_show = mem_cgroup_slab_show,
5016 	},
5017 #endif
5018 	{
5019 		.name = "kmem.tcp.limit_in_bytes",
5020 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5021 		.write = mem_cgroup_write,
5022 		.read_u64 = mem_cgroup_read_u64,
5023 	},
5024 	{
5025 		.name = "kmem.tcp.usage_in_bytes",
5026 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5027 		.read_u64 = mem_cgroup_read_u64,
5028 	},
5029 	{
5030 		.name = "kmem.tcp.failcnt",
5031 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5032 		.write = mem_cgroup_reset,
5033 		.read_u64 = mem_cgroup_read_u64,
5034 	},
5035 	{
5036 		.name = "kmem.tcp.max_usage_in_bytes",
5037 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5038 		.write = mem_cgroup_reset,
5039 		.read_u64 = mem_cgroup_read_u64,
5040 	},
5041 	{ },	/* terminate */
5042 };
5043 
5044 /*
5045  * Private memory cgroup IDR
5046  *
5047  * Swap-out records and page cache shadow entries need to store memcg
5048  * references in constrained space, so we maintain an ID space that is
5049  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5050  * memory-controlled cgroups to 64k.
5051  *
5052  * However, there usually are many references to the offline CSS after
5053  * the cgroup has been destroyed, such as page cache or reclaimable
5054  * slab objects, that don't need to hang on to the ID. We want to keep
5055  * those dead CSS from occupying IDs, or we might quickly exhaust the
5056  * relatively small ID space and prevent the creation of new cgroups
5057  * even when there are much fewer than 64k cgroups - possibly none.
5058  *
5059  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5060  * be freed and recycled when it's no longer needed, which is usually
5061  * when the CSS is offlined.
5062  *
5063  * The only exception to that are records of swapped out tmpfs/shmem
5064  * pages that need to be attributed to live ancestors on swapin. But
5065  * those references are manageable from userspace.
5066  */
5067 
5068 static DEFINE_IDR(mem_cgroup_idr);
5069 
5070 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5071 {
5072 	if (memcg->id.id > 0) {
5073 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5074 		memcg->id.id = 0;
5075 	}
5076 }
5077 
5078 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5079 						  unsigned int n)
5080 {
5081 	refcount_add(n, &memcg->id.ref);
5082 }
5083 
5084 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5085 {
5086 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5087 		mem_cgroup_id_remove(memcg);
5088 
5089 		/* Memcg ID pins CSS */
5090 		css_put(&memcg->css);
5091 	}
5092 }
5093 
5094 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5095 {
5096 	mem_cgroup_id_put_many(memcg, 1);
5097 }
5098 
5099 /**
5100  * mem_cgroup_from_id - look up a memcg from a memcg id
5101  * @id: the memcg id to look up
5102  *
5103  * Caller must hold rcu_read_lock().
5104  */
5105 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5106 {
5107 	WARN_ON_ONCE(!rcu_read_lock_held());
5108 	return idr_find(&mem_cgroup_idr, id);
5109 }
5110 
5111 #ifdef CONFIG_SHRINKER_DEBUG
5112 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5113 {
5114 	struct cgroup *cgrp;
5115 	struct cgroup_subsys_state *css;
5116 	struct mem_cgroup *memcg;
5117 
5118 	cgrp = cgroup_get_from_id(ino);
5119 	if (!cgrp)
5120 		return ERR_PTR(-ENOENT);
5121 
5122 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5123 	if (css)
5124 		memcg = container_of(css, struct mem_cgroup, css);
5125 	else
5126 		memcg = ERR_PTR(-ENOENT);
5127 
5128 	cgroup_put(cgrp);
5129 
5130 	return memcg;
5131 }
5132 #endif
5133 
5134 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5135 {
5136 	struct mem_cgroup_per_node *pn;
5137 
5138 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5139 	if (!pn)
5140 		return 1;
5141 
5142 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5143 						   GFP_KERNEL_ACCOUNT);
5144 	if (!pn->lruvec_stats_percpu) {
5145 		kfree(pn);
5146 		return 1;
5147 	}
5148 
5149 	lruvec_init(&pn->lruvec);
5150 	pn->memcg = memcg;
5151 
5152 	memcg->nodeinfo[node] = pn;
5153 	return 0;
5154 }
5155 
5156 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5157 {
5158 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5159 
5160 	if (!pn)
5161 		return;
5162 
5163 	free_percpu(pn->lruvec_stats_percpu);
5164 	kfree(pn);
5165 }
5166 
5167 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5168 {
5169 	int node;
5170 
5171 	for_each_node(node)
5172 		free_mem_cgroup_per_node_info(memcg, node);
5173 	free_percpu(memcg->vmstats_percpu);
5174 	kfree(memcg);
5175 }
5176 
5177 static void mem_cgroup_free(struct mem_cgroup *memcg)
5178 {
5179 	lru_gen_exit_memcg(memcg);
5180 	memcg_wb_domain_exit(memcg);
5181 	__mem_cgroup_free(memcg);
5182 }
5183 
5184 static struct mem_cgroup *mem_cgroup_alloc(void)
5185 {
5186 	struct mem_cgroup *memcg;
5187 	int node;
5188 	int __maybe_unused i;
5189 	long error = -ENOMEM;
5190 
5191 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5192 	if (!memcg)
5193 		return ERR_PTR(error);
5194 
5195 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5196 				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5197 	if (memcg->id.id < 0) {
5198 		error = memcg->id.id;
5199 		goto fail;
5200 	}
5201 
5202 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5203 						 GFP_KERNEL_ACCOUNT);
5204 	if (!memcg->vmstats_percpu)
5205 		goto fail;
5206 
5207 	for_each_node(node)
5208 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5209 			goto fail;
5210 
5211 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5212 		goto fail;
5213 
5214 	INIT_WORK(&memcg->high_work, high_work_func);
5215 	INIT_LIST_HEAD(&memcg->oom_notify);
5216 	mutex_init(&memcg->thresholds_lock);
5217 	spin_lock_init(&memcg->move_lock);
5218 	vmpressure_init(&memcg->vmpressure);
5219 	INIT_LIST_HEAD(&memcg->event_list);
5220 	spin_lock_init(&memcg->event_list_lock);
5221 	memcg->socket_pressure = jiffies;
5222 #ifdef CONFIG_MEMCG_KMEM
5223 	memcg->kmemcg_id = -1;
5224 	INIT_LIST_HEAD(&memcg->objcg_list);
5225 #endif
5226 #ifdef CONFIG_CGROUP_WRITEBACK
5227 	INIT_LIST_HEAD(&memcg->cgwb_list);
5228 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5229 		memcg->cgwb_frn[i].done =
5230 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5231 #endif
5232 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5233 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5234 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5235 	memcg->deferred_split_queue.split_queue_len = 0;
5236 #endif
5237 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5238 	lru_gen_init_memcg(memcg);
5239 	return memcg;
5240 fail:
5241 	mem_cgroup_id_remove(memcg);
5242 	__mem_cgroup_free(memcg);
5243 	return ERR_PTR(error);
5244 }
5245 
5246 static struct cgroup_subsys_state * __ref
5247 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5248 {
5249 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5250 	struct mem_cgroup *memcg, *old_memcg;
5251 
5252 	old_memcg = set_active_memcg(parent);
5253 	memcg = mem_cgroup_alloc();
5254 	set_active_memcg(old_memcg);
5255 	if (IS_ERR(memcg))
5256 		return ERR_CAST(memcg);
5257 
5258 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5259 	memcg->soft_limit = PAGE_COUNTER_MAX;
5260 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5261 	memcg->zswap_max = PAGE_COUNTER_MAX;
5262 #endif
5263 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5264 	if (parent) {
5265 		memcg->swappiness = mem_cgroup_swappiness(parent);
5266 		memcg->oom_kill_disable = parent->oom_kill_disable;
5267 
5268 		page_counter_init(&memcg->memory, &parent->memory);
5269 		page_counter_init(&memcg->swap, &parent->swap);
5270 		page_counter_init(&memcg->kmem, &parent->kmem);
5271 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5272 	} else {
5273 		page_counter_init(&memcg->memory, NULL);
5274 		page_counter_init(&memcg->swap, NULL);
5275 		page_counter_init(&memcg->kmem, NULL);
5276 		page_counter_init(&memcg->tcpmem, NULL);
5277 
5278 		root_mem_cgroup = memcg;
5279 		return &memcg->css;
5280 	}
5281 
5282 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5283 		static_branch_inc(&memcg_sockets_enabled_key);
5284 
5285 	return &memcg->css;
5286 }
5287 
5288 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5289 {
5290 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5291 
5292 	if (memcg_online_kmem(memcg))
5293 		goto remove_id;
5294 
5295 	/*
5296 	 * A memcg must be visible for expand_shrinker_info()
5297 	 * by the time the maps are allocated. So, we allocate maps
5298 	 * here, when for_each_mem_cgroup() can't skip it.
5299 	 */
5300 	if (alloc_shrinker_info(memcg))
5301 		goto offline_kmem;
5302 
5303 	/* Online state pins memcg ID, memcg ID pins CSS */
5304 	refcount_set(&memcg->id.ref, 1);
5305 	css_get(css);
5306 
5307 	if (unlikely(mem_cgroup_is_root(memcg)))
5308 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5309 				   2UL*HZ);
5310 	return 0;
5311 offline_kmem:
5312 	memcg_offline_kmem(memcg);
5313 remove_id:
5314 	mem_cgroup_id_remove(memcg);
5315 	return -ENOMEM;
5316 }
5317 
5318 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5319 {
5320 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5321 	struct mem_cgroup_event *event, *tmp;
5322 
5323 	/*
5324 	 * Unregister events and notify userspace.
5325 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5326 	 * directory to avoid race between userspace and kernelspace.
5327 	 */
5328 	spin_lock_irq(&memcg->event_list_lock);
5329 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5330 		list_del_init(&event->list);
5331 		schedule_work(&event->remove);
5332 	}
5333 	spin_unlock_irq(&memcg->event_list_lock);
5334 
5335 	page_counter_set_min(&memcg->memory, 0);
5336 	page_counter_set_low(&memcg->memory, 0);
5337 
5338 	memcg_offline_kmem(memcg);
5339 	reparent_shrinker_deferred(memcg);
5340 	wb_memcg_offline(memcg);
5341 
5342 	drain_all_stock(memcg);
5343 
5344 	mem_cgroup_id_put(memcg);
5345 }
5346 
5347 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5348 {
5349 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5350 
5351 	invalidate_reclaim_iterators(memcg);
5352 }
5353 
5354 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5355 {
5356 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5357 	int __maybe_unused i;
5358 
5359 #ifdef CONFIG_CGROUP_WRITEBACK
5360 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5361 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5362 #endif
5363 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5364 		static_branch_dec(&memcg_sockets_enabled_key);
5365 
5366 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5367 		static_branch_dec(&memcg_sockets_enabled_key);
5368 
5369 	vmpressure_cleanup(&memcg->vmpressure);
5370 	cancel_work_sync(&memcg->high_work);
5371 	mem_cgroup_remove_from_trees(memcg);
5372 	free_shrinker_info(memcg);
5373 	mem_cgroup_free(memcg);
5374 }
5375 
5376 /**
5377  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5378  * @css: the target css
5379  *
5380  * Reset the states of the mem_cgroup associated with @css.  This is
5381  * invoked when the userland requests disabling on the default hierarchy
5382  * but the memcg is pinned through dependency.  The memcg should stop
5383  * applying policies and should revert to the vanilla state as it may be
5384  * made visible again.
5385  *
5386  * The current implementation only resets the essential configurations.
5387  * This needs to be expanded to cover all the visible parts.
5388  */
5389 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5390 {
5391 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5392 
5393 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5394 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5395 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5396 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5397 	page_counter_set_min(&memcg->memory, 0);
5398 	page_counter_set_low(&memcg->memory, 0);
5399 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5400 	memcg->soft_limit = PAGE_COUNTER_MAX;
5401 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5402 	memcg_wb_domain_size_changed(memcg);
5403 }
5404 
5405 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5406 {
5407 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5408 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5409 	struct memcg_vmstats_percpu *statc;
5410 	long delta, v;
5411 	int i, nid;
5412 
5413 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5414 
5415 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5416 		/*
5417 		 * Collect the aggregated propagation counts of groups
5418 		 * below us. We're in a per-cpu loop here and this is
5419 		 * a global counter, so the first cycle will get them.
5420 		 */
5421 		delta = memcg->vmstats.state_pending[i];
5422 		if (delta)
5423 			memcg->vmstats.state_pending[i] = 0;
5424 
5425 		/* Add CPU changes on this level since the last flush */
5426 		v = READ_ONCE(statc->state[i]);
5427 		if (v != statc->state_prev[i]) {
5428 			delta += v - statc->state_prev[i];
5429 			statc->state_prev[i] = v;
5430 		}
5431 
5432 		if (!delta)
5433 			continue;
5434 
5435 		/* Aggregate counts on this level and propagate upwards */
5436 		memcg->vmstats.state[i] += delta;
5437 		if (parent)
5438 			parent->vmstats.state_pending[i] += delta;
5439 	}
5440 
5441 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5442 		delta = memcg->vmstats.events_pending[i];
5443 		if (delta)
5444 			memcg->vmstats.events_pending[i] = 0;
5445 
5446 		v = READ_ONCE(statc->events[i]);
5447 		if (v != statc->events_prev[i]) {
5448 			delta += v - statc->events_prev[i];
5449 			statc->events_prev[i] = v;
5450 		}
5451 
5452 		if (!delta)
5453 			continue;
5454 
5455 		memcg->vmstats.events[i] += delta;
5456 		if (parent)
5457 			parent->vmstats.events_pending[i] += delta;
5458 	}
5459 
5460 	for_each_node_state(nid, N_MEMORY) {
5461 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5462 		struct mem_cgroup_per_node *ppn = NULL;
5463 		struct lruvec_stats_percpu *lstatc;
5464 
5465 		if (parent)
5466 			ppn = parent->nodeinfo[nid];
5467 
5468 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5469 
5470 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5471 			delta = pn->lruvec_stats.state_pending[i];
5472 			if (delta)
5473 				pn->lruvec_stats.state_pending[i] = 0;
5474 
5475 			v = READ_ONCE(lstatc->state[i]);
5476 			if (v != lstatc->state_prev[i]) {
5477 				delta += v - lstatc->state_prev[i];
5478 				lstatc->state_prev[i] = v;
5479 			}
5480 
5481 			if (!delta)
5482 				continue;
5483 
5484 			pn->lruvec_stats.state[i] += delta;
5485 			if (ppn)
5486 				ppn->lruvec_stats.state_pending[i] += delta;
5487 		}
5488 	}
5489 }
5490 
5491 #ifdef CONFIG_MMU
5492 /* Handlers for move charge at task migration. */
5493 static int mem_cgroup_do_precharge(unsigned long count)
5494 {
5495 	int ret;
5496 
5497 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5498 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5499 	if (!ret) {
5500 		mc.precharge += count;
5501 		return ret;
5502 	}
5503 
5504 	/* Try charges one by one with reclaim, but do not retry */
5505 	while (count--) {
5506 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5507 		if (ret)
5508 			return ret;
5509 		mc.precharge++;
5510 		cond_resched();
5511 	}
5512 	return 0;
5513 }
5514 
5515 union mc_target {
5516 	struct page	*page;
5517 	swp_entry_t	ent;
5518 };
5519 
5520 enum mc_target_type {
5521 	MC_TARGET_NONE = 0,
5522 	MC_TARGET_PAGE,
5523 	MC_TARGET_SWAP,
5524 	MC_TARGET_DEVICE,
5525 };
5526 
5527 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5528 						unsigned long addr, pte_t ptent)
5529 {
5530 	struct page *page = vm_normal_page(vma, addr, ptent);
5531 
5532 	if (!page || !page_mapped(page))
5533 		return NULL;
5534 	if (PageAnon(page)) {
5535 		if (!(mc.flags & MOVE_ANON))
5536 			return NULL;
5537 	} else {
5538 		if (!(mc.flags & MOVE_FILE))
5539 			return NULL;
5540 	}
5541 	if (!get_page_unless_zero(page))
5542 		return NULL;
5543 
5544 	return page;
5545 }
5546 
5547 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5548 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5549 			pte_t ptent, swp_entry_t *entry)
5550 {
5551 	struct page *page = NULL;
5552 	swp_entry_t ent = pte_to_swp_entry(ptent);
5553 
5554 	if (!(mc.flags & MOVE_ANON))
5555 		return NULL;
5556 
5557 	/*
5558 	 * Handle device private pages that are not accessible by the CPU, but
5559 	 * stored as special swap entries in the page table.
5560 	 */
5561 	if (is_device_private_entry(ent)) {
5562 		page = pfn_swap_entry_to_page(ent);
5563 		if (!get_page_unless_zero(page))
5564 			return NULL;
5565 		return page;
5566 	}
5567 
5568 	if (non_swap_entry(ent))
5569 		return NULL;
5570 
5571 	/*
5572 	 * Because lookup_swap_cache() updates some statistics counter,
5573 	 * we call find_get_page() with swapper_space directly.
5574 	 */
5575 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5576 	entry->val = ent.val;
5577 
5578 	return page;
5579 }
5580 #else
5581 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5582 			pte_t ptent, swp_entry_t *entry)
5583 {
5584 	return NULL;
5585 }
5586 #endif
5587 
5588 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5589 			unsigned long addr, pte_t ptent)
5590 {
5591 	if (!vma->vm_file) /* anonymous vma */
5592 		return NULL;
5593 	if (!(mc.flags & MOVE_FILE))
5594 		return NULL;
5595 
5596 	/* page is moved even if it's not RSS of this task(page-faulted). */
5597 	/* shmem/tmpfs may report page out on swap: account for that too. */
5598 	return find_get_incore_page(vma->vm_file->f_mapping,
5599 			linear_page_index(vma, addr));
5600 }
5601 
5602 /**
5603  * mem_cgroup_move_account - move account of the page
5604  * @page: the page
5605  * @compound: charge the page as compound or small page
5606  * @from: mem_cgroup which the page is moved from.
5607  * @to:	mem_cgroup which the page is moved to. @from != @to.
5608  *
5609  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5610  *
5611  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5612  * from old cgroup.
5613  */
5614 static int mem_cgroup_move_account(struct page *page,
5615 				   bool compound,
5616 				   struct mem_cgroup *from,
5617 				   struct mem_cgroup *to)
5618 {
5619 	struct folio *folio = page_folio(page);
5620 	struct lruvec *from_vec, *to_vec;
5621 	struct pglist_data *pgdat;
5622 	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5623 	int nid, ret;
5624 
5625 	VM_BUG_ON(from == to);
5626 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5627 	VM_BUG_ON(compound && !folio_test_large(folio));
5628 
5629 	/*
5630 	 * Prevent mem_cgroup_migrate() from looking at
5631 	 * page's memory cgroup of its source page while we change it.
5632 	 */
5633 	ret = -EBUSY;
5634 	if (!folio_trylock(folio))
5635 		goto out;
5636 
5637 	ret = -EINVAL;
5638 	if (folio_memcg(folio) != from)
5639 		goto out_unlock;
5640 
5641 	pgdat = folio_pgdat(folio);
5642 	from_vec = mem_cgroup_lruvec(from, pgdat);
5643 	to_vec = mem_cgroup_lruvec(to, pgdat);
5644 
5645 	folio_memcg_lock(folio);
5646 
5647 	if (folio_test_anon(folio)) {
5648 		if (folio_mapped(folio)) {
5649 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5650 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5651 			if (folio_test_transhuge(folio)) {
5652 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5653 						   -nr_pages);
5654 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5655 						   nr_pages);
5656 			}
5657 		}
5658 	} else {
5659 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5660 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5661 
5662 		if (folio_test_swapbacked(folio)) {
5663 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5664 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5665 		}
5666 
5667 		if (folio_mapped(folio)) {
5668 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5669 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5670 		}
5671 
5672 		if (folio_test_dirty(folio)) {
5673 			struct address_space *mapping = folio_mapping(folio);
5674 
5675 			if (mapping_can_writeback(mapping)) {
5676 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5677 						   -nr_pages);
5678 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5679 						   nr_pages);
5680 			}
5681 		}
5682 	}
5683 
5684 	if (folio_test_writeback(folio)) {
5685 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5686 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5687 	}
5688 
5689 	/*
5690 	 * All state has been migrated, let's switch to the new memcg.
5691 	 *
5692 	 * It is safe to change page's memcg here because the page
5693 	 * is referenced, charged, isolated, and locked: we can't race
5694 	 * with (un)charging, migration, LRU putback, or anything else
5695 	 * that would rely on a stable page's memory cgroup.
5696 	 *
5697 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5698 	 * to save space. As soon as we switch page's memory cgroup to a
5699 	 * new memcg that isn't locked, the above state can change
5700 	 * concurrently again. Make sure we're truly done with it.
5701 	 */
5702 	smp_mb();
5703 
5704 	css_get(&to->css);
5705 	css_put(&from->css);
5706 
5707 	folio->memcg_data = (unsigned long)to;
5708 
5709 	__folio_memcg_unlock(from);
5710 
5711 	ret = 0;
5712 	nid = folio_nid(folio);
5713 
5714 	local_irq_disable();
5715 	mem_cgroup_charge_statistics(to, nr_pages);
5716 	memcg_check_events(to, nid);
5717 	mem_cgroup_charge_statistics(from, -nr_pages);
5718 	memcg_check_events(from, nid);
5719 	local_irq_enable();
5720 out_unlock:
5721 	folio_unlock(folio);
5722 out:
5723 	return ret;
5724 }
5725 
5726 /**
5727  * get_mctgt_type - get target type of moving charge
5728  * @vma: the vma the pte to be checked belongs
5729  * @addr: the address corresponding to the pte to be checked
5730  * @ptent: the pte to be checked
5731  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5732  *
5733  * Returns
5734  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5735  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5736  *     move charge. if @target is not NULL, the page is stored in target->page
5737  *     with extra refcnt got(Callers should handle it).
5738  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5739  *     target for charge migration. if @target is not NULL, the entry is stored
5740  *     in target->ent.
5741  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is device memory and
5742  *   thus not on the lru.
5743  *     For now we such page is charge like a regular page would be as for all
5744  *     intent and purposes it is just special memory taking the place of a
5745  *     regular page.
5746  *
5747  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5748  *
5749  * Called with pte lock held.
5750  */
5751 
5752 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5753 		unsigned long addr, pte_t ptent, union mc_target *target)
5754 {
5755 	struct page *page = NULL;
5756 	enum mc_target_type ret = MC_TARGET_NONE;
5757 	swp_entry_t ent = { .val = 0 };
5758 
5759 	if (pte_present(ptent))
5760 		page = mc_handle_present_pte(vma, addr, ptent);
5761 	else if (pte_none_mostly(ptent))
5762 		/*
5763 		 * PTE markers should be treated as a none pte here, separated
5764 		 * from other swap handling below.
5765 		 */
5766 		page = mc_handle_file_pte(vma, addr, ptent);
5767 	else if (is_swap_pte(ptent))
5768 		page = mc_handle_swap_pte(vma, ptent, &ent);
5769 
5770 	if (!page && !ent.val)
5771 		return ret;
5772 	if (page) {
5773 		/*
5774 		 * Do only loose check w/o serialization.
5775 		 * mem_cgroup_move_account() checks the page is valid or
5776 		 * not under LRU exclusion.
5777 		 */
5778 		if (page_memcg(page) == mc.from) {
5779 			ret = MC_TARGET_PAGE;
5780 			if (is_device_private_page(page) ||
5781 			    is_device_coherent_page(page))
5782 				ret = MC_TARGET_DEVICE;
5783 			if (target)
5784 				target->page = page;
5785 		}
5786 		if (!ret || !target)
5787 			put_page(page);
5788 	}
5789 	/*
5790 	 * There is a swap entry and a page doesn't exist or isn't charged.
5791 	 * But we cannot move a tail-page in a THP.
5792 	 */
5793 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5794 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5795 		ret = MC_TARGET_SWAP;
5796 		if (target)
5797 			target->ent = ent;
5798 	}
5799 	return ret;
5800 }
5801 
5802 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5803 /*
5804  * We don't consider PMD mapped swapping or file mapped pages because THP does
5805  * not support them for now.
5806  * Caller should make sure that pmd_trans_huge(pmd) is true.
5807  */
5808 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5809 		unsigned long addr, pmd_t pmd, union mc_target *target)
5810 {
5811 	struct page *page = NULL;
5812 	enum mc_target_type ret = MC_TARGET_NONE;
5813 
5814 	if (unlikely(is_swap_pmd(pmd))) {
5815 		VM_BUG_ON(thp_migration_supported() &&
5816 				  !is_pmd_migration_entry(pmd));
5817 		return ret;
5818 	}
5819 	page = pmd_page(pmd);
5820 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5821 	if (!(mc.flags & MOVE_ANON))
5822 		return ret;
5823 	if (page_memcg(page) == mc.from) {
5824 		ret = MC_TARGET_PAGE;
5825 		if (target) {
5826 			get_page(page);
5827 			target->page = page;
5828 		}
5829 	}
5830 	return ret;
5831 }
5832 #else
5833 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5834 		unsigned long addr, pmd_t pmd, union mc_target *target)
5835 {
5836 	return MC_TARGET_NONE;
5837 }
5838 #endif
5839 
5840 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5841 					unsigned long addr, unsigned long end,
5842 					struct mm_walk *walk)
5843 {
5844 	struct vm_area_struct *vma = walk->vma;
5845 	pte_t *pte;
5846 	spinlock_t *ptl;
5847 
5848 	ptl = pmd_trans_huge_lock(pmd, vma);
5849 	if (ptl) {
5850 		/*
5851 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5852 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5853 		 * this might change.
5854 		 */
5855 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5856 			mc.precharge += HPAGE_PMD_NR;
5857 		spin_unlock(ptl);
5858 		return 0;
5859 	}
5860 
5861 	if (pmd_trans_unstable(pmd))
5862 		return 0;
5863 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5864 	for (; addr != end; pte++, addr += PAGE_SIZE)
5865 		if (get_mctgt_type(vma, addr, *pte, NULL))
5866 			mc.precharge++;	/* increment precharge temporarily */
5867 	pte_unmap_unlock(pte - 1, ptl);
5868 	cond_resched();
5869 
5870 	return 0;
5871 }
5872 
5873 static const struct mm_walk_ops precharge_walk_ops = {
5874 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5875 };
5876 
5877 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5878 {
5879 	unsigned long precharge;
5880 
5881 	mmap_read_lock(mm);
5882 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5883 	mmap_read_unlock(mm);
5884 
5885 	precharge = mc.precharge;
5886 	mc.precharge = 0;
5887 
5888 	return precharge;
5889 }
5890 
5891 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5892 {
5893 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5894 
5895 	VM_BUG_ON(mc.moving_task);
5896 	mc.moving_task = current;
5897 	return mem_cgroup_do_precharge(precharge);
5898 }
5899 
5900 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5901 static void __mem_cgroup_clear_mc(void)
5902 {
5903 	struct mem_cgroup *from = mc.from;
5904 	struct mem_cgroup *to = mc.to;
5905 
5906 	/* we must uncharge all the leftover precharges from mc.to */
5907 	if (mc.precharge) {
5908 		cancel_charge(mc.to, mc.precharge);
5909 		mc.precharge = 0;
5910 	}
5911 	/*
5912 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5913 	 * we must uncharge here.
5914 	 */
5915 	if (mc.moved_charge) {
5916 		cancel_charge(mc.from, mc.moved_charge);
5917 		mc.moved_charge = 0;
5918 	}
5919 	/* we must fixup refcnts and charges */
5920 	if (mc.moved_swap) {
5921 		/* uncharge swap account from the old cgroup */
5922 		if (!mem_cgroup_is_root(mc.from))
5923 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5924 
5925 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5926 
5927 		/*
5928 		 * we charged both to->memory and to->memsw, so we
5929 		 * should uncharge to->memory.
5930 		 */
5931 		if (!mem_cgroup_is_root(mc.to))
5932 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5933 
5934 		mc.moved_swap = 0;
5935 	}
5936 	memcg_oom_recover(from);
5937 	memcg_oom_recover(to);
5938 	wake_up_all(&mc.waitq);
5939 }
5940 
5941 static void mem_cgroup_clear_mc(void)
5942 {
5943 	struct mm_struct *mm = mc.mm;
5944 
5945 	/*
5946 	 * we must clear moving_task before waking up waiters at the end of
5947 	 * task migration.
5948 	 */
5949 	mc.moving_task = NULL;
5950 	__mem_cgroup_clear_mc();
5951 	spin_lock(&mc.lock);
5952 	mc.from = NULL;
5953 	mc.to = NULL;
5954 	mc.mm = NULL;
5955 	spin_unlock(&mc.lock);
5956 
5957 	mmput(mm);
5958 }
5959 
5960 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5961 {
5962 	struct cgroup_subsys_state *css;
5963 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5964 	struct mem_cgroup *from;
5965 	struct task_struct *leader, *p;
5966 	struct mm_struct *mm;
5967 	unsigned long move_flags;
5968 	int ret = 0;
5969 
5970 	/* charge immigration isn't supported on the default hierarchy */
5971 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5972 		return 0;
5973 
5974 	/*
5975 	 * Multi-process migrations only happen on the default hierarchy
5976 	 * where charge immigration is not used.  Perform charge
5977 	 * immigration if @tset contains a leader and whine if there are
5978 	 * multiple.
5979 	 */
5980 	p = NULL;
5981 	cgroup_taskset_for_each_leader(leader, css, tset) {
5982 		WARN_ON_ONCE(p);
5983 		p = leader;
5984 		memcg = mem_cgroup_from_css(css);
5985 	}
5986 	if (!p)
5987 		return 0;
5988 
5989 	/*
5990 	 * We are now committed to this value whatever it is. Changes in this
5991 	 * tunable will only affect upcoming migrations, not the current one.
5992 	 * So we need to save it, and keep it going.
5993 	 */
5994 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5995 	if (!move_flags)
5996 		return 0;
5997 
5998 	from = mem_cgroup_from_task(p);
5999 
6000 	VM_BUG_ON(from == memcg);
6001 
6002 	mm = get_task_mm(p);
6003 	if (!mm)
6004 		return 0;
6005 	/* We move charges only when we move a owner of the mm */
6006 	if (mm->owner == p) {
6007 		VM_BUG_ON(mc.from);
6008 		VM_BUG_ON(mc.to);
6009 		VM_BUG_ON(mc.precharge);
6010 		VM_BUG_ON(mc.moved_charge);
6011 		VM_BUG_ON(mc.moved_swap);
6012 
6013 		spin_lock(&mc.lock);
6014 		mc.mm = mm;
6015 		mc.from = from;
6016 		mc.to = memcg;
6017 		mc.flags = move_flags;
6018 		spin_unlock(&mc.lock);
6019 		/* We set mc.moving_task later */
6020 
6021 		ret = mem_cgroup_precharge_mc(mm);
6022 		if (ret)
6023 			mem_cgroup_clear_mc();
6024 	} else {
6025 		mmput(mm);
6026 	}
6027 	return ret;
6028 }
6029 
6030 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6031 {
6032 	if (mc.to)
6033 		mem_cgroup_clear_mc();
6034 }
6035 
6036 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6037 				unsigned long addr, unsigned long end,
6038 				struct mm_walk *walk)
6039 {
6040 	int ret = 0;
6041 	struct vm_area_struct *vma = walk->vma;
6042 	pte_t *pte;
6043 	spinlock_t *ptl;
6044 	enum mc_target_type target_type;
6045 	union mc_target target;
6046 	struct page *page;
6047 
6048 	ptl = pmd_trans_huge_lock(pmd, vma);
6049 	if (ptl) {
6050 		if (mc.precharge < HPAGE_PMD_NR) {
6051 			spin_unlock(ptl);
6052 			return 0;
6053 		}
6054 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6055 		if (target_type == MC_TARGET_PAGE) {
6056 			page = target.page;
6057 			if (!isolate_lru_page(page)) {
6058 				if (!mem_cgroup_move_account(page, true,
6059 							     mc.from, mc.to)) {
6060 					mc.precharge -= HPAGE_PMD_NR;
6061 					mc.moved_charge += HPAGE_PMD_NR;
6062 				}
6063 				putback_lru_page(page);
6064 			}
6065 			put_page(page);
6066 		} else if (target_type == MC_TARGET_DEVICE) {
6067 			page = target.page;
6068 			if (!mem_cgroup_move_account(page, true,
6069 						     mc.from, mc.to)) {
6070 				mc.precharge -= HPAGE_PMD_NR;
6071 				mc.moved_charge += HPAGE_PMD_NR;
6072 			}
6073 			put_page(page);
6074 		}
6075 		spin_unlock(ptl);
6076 		return 0;
6077 	}
6078 
6079 	if (pmd_trans_unstable(pmd))
6080 		return 0;
6081 retry:
6082 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6083 	for (; addr != end; addr += PAGE_SIZE) {
6084 		pte_t ptent = *(pte++);
6085 		bool device = false;
6086 		swp_entry_t ent;
6087 
6088 		if (!mc.precharge)
6089 			break;
6090 
6091 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6092 		case MC_TARGET_DEVICE:
6093 			device = true;
6094 			fallthrough;
6095 		case MC_TARGET_PAGE:
6096 			page = target.page;
6097 			/*
6098 			 * We can have a part of the split pmd here. Moving it
6099 			 * can be done but it would be too convoluted so simply
6100 			 * ignore such a partial THP and keep it in original
6101 			 * memcg. There should be somebody mapping the head.
6102 			 */
6103 			if (PageTransCompound(page))
6104 				goto put;
6105 			if (!device && isolate_lru_page(page))
6106 				goto put;
6107 			if (!mem_cgroup_move_account(page, false,
6108 						mc.from, mc.to)) {
6109 				mc.precharge--;
6110 				/* we uncharge from mc.from later. */
6111 				mc.moved_charge++;
6112 			}
6113 			if (!device)
6114 				putback_lru_page(page);
6115 put:			/* get_mctgt_type() gets the page */
6116 			put_page(page);
6117 			break;
6118 		case MC_TARGET_SWAP:
6119 			ent = target.ent;
6120 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6121 				mc.precharge--;
6122 				mem_cgroup_id_get_many(mc.to, 1);
6123 				/* we fixup other refcnts and charges later. */
6124 				mc.moved_swap++;
6125 			}
6126 			break;
6127 		default:
6128 			break;
6129 		}
6130 	}
6131 	pte_unmap_unlock(pte - 1, ptl);
6132 	cond_resched();
6133 
6134 	if (addr != end) {
6135 		/*
6136 		 * We have consumed all precharges we got in can_attach().
6137 		 * We try charge one by one, but don't do any additional
6138 		 * charges to mc.to if we have failed in charge once in attach()
6139 		 * phase.
6140 		 */
6141 		ret = mem_cgroup_do_precharge(1);
6142 		if (!ret)
6143 			goto retry;
6144 	}
6145 
6146 	return ret;
6147 }
6148 
6149 static const struct mm_walk_ops charge_walk_ops = {
6150 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6151 };
6152 
6153 static void mem_cgroup_move_charge(void)
6154 {
6155 	lru_add_drain_all();
6156 	/*
6157 	 * Signal lock_page_memcg() to take the memcg's move_lock
6158 	 * while we're moving its pages to another memcg. Then wait
6159 	 * for already started RCU-only updates to finish.
6160 	 */
6161 	atomic_inc(&mc.from->moving_account);
6162 	synchronize_rcu();
6163 retry:
6164 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6165 		/*
6166 		 * Someone who are holding the mmap_lock might be waiting in
6167 		 * waitq. So we cancel all extra charges, wake up all waiters,
6168 		 * and retry. Because we cancel precharges, we might not be able
6169 		 * to move enough charges, but moving charge is a best-effort
6170 		 * feature anyway, so it wouldn't be a big problem.
6171 		 */
6172 		__mem_cgroup_clear_mc();
6173 		cond_resched();
6174 		goto retry;
6175 	}
6176 	/*
6177 	 * When we have consumed all precharges and failed in doing
6178 	 * additional charge, the page walk just aborts.
6179 	 */
6180 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6181 			NULL);
6182 
6183 	mmap_read_unlock(mc.mm);
6184 	atomic_dec(&mc.from->moving_account);
6185 }
6186 
6187 static void mem_cgroup_move_task(void)
6188 {
6189 	if (mc.to) {
6190 		mem_cgroup_move_charge();
6191 		mem_cgroup_clear_mc();
6192 	}
6193 }
6194 #else	/* !CONFIG_MMU */
6195 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6196 {
6197 	return 0;
6198 }
6199 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6200 {
6201 }
6202 static void mem_cgroup_move_task(void)
6203 {
6204 }
6205 #endif
6206 
6207 #ifdef CONFIG_LRU_GEN
6208 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6209 {
6210 	struct task_struct *task;
6211 	struct cgroup_subsys_state *css;
6212 
6213 	/* find the first leader if there is any */
6214 	cgroup_taskset_for_each_leader(task, css, tset)
6215 		break;
6216 
6217 	if (!task)
6218 		return;
6219 
6220 	task_lock(task);
6221 	if (task->mm && READ_ONCE(task->mm->owner) == task)
6222 		lru_gen_migrate_mm(task->mm);
6223 	task_unlock(task);
6224 }
6225 #else
6226 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6227 {
6228 }
6229 #endif /* CONFIG_LRU_GEN */
6230 
6231 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6232 {
6233 	if (value == PAGE_COUNTER_MAX)
6234 		seq_puts(m, "max\n");
6235 	else
6236 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6237 
6238 	return 0;
6239 }
6240 
6241 static u64 memory_current_read(struct cgroup_subsys_state *css,
6242 			       struct cftype *cft)
6243 {
6244 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6245 
6246 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6247 }
6248 
6249 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6250 			    struct cftype *cft)
6251 {
6252 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6253 
6254 	return (u64)memcg->memory.watermark * PAGE_SIZE;
6255 }
6256 
6257 static int memory_min_show(struct seq_file *m, void *v)
6258 {
6259 	return seq_puts_memcg_tunable(m,
6260 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6261 }
6262 
6263 static ssize_t memory_min_write(struct kernfs_open_file *of,
6264 				char *buf, size_t nbytes, loff_t off)
6265 {
6266 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6267 	unsigned long min;
6268 	int err;
6269 
6270 	buf = strstrip(buf);
6271 	err = page_counter_memparse(buf, "max", &min);
6272 	if (err)
6273 		return err;
6274 
6275 	page_counter_set_min(&memcg->memory, min);
6276 
6277 	return nbytes;
6278 }
6279 
6280 static int memory_low_show(struct seq_file *m, void *v)
6281 {
6282 	return seq_puts_memcg_tunable(m,
6283 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6284 }
6285 
6286 static ssize_t memory_low_write(struct kernfs_open_file *of,
6287 				char *buf, size_t nbytes, loff_t off)
6288 {
6289 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6290 	unsigned long low;
6291 	int err;
6292 
6293 	buf = strstrip(buf);
6294 	err = page_counter_memparse(buf, "max", &low);
6295 	if (err)
6296 		return err;
6297 
6298 	page_counter_set_low(&memcg->memory, low);
6299 
6300 	return nbytes;
6301 }
6302 
6303 static int memory_high_show(struct seq_file *m, void *v)
6304 {
6305 	return seq_puts_memcg_tunable(m,
6306 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6307 }
6308 
6309 static ssize_t memory_high_write(struct kernfs_open_file *of,
6310 				 char *buf, size_t nbytes, loff_t off)
6311 {
6312 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6313 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6314 	bool drained = false;
6315 	unsigned long high;
6316 	int err;
6317 
6318 	buf = strstrip(buf);
6319 	err = page_counter_memparse(buf, "max", &high);
6320 	if (err)
6321 		return err;
6322 
6323 	page_counter_set_high(&memcg->memory, high);
6324 
6325 	for (;;) {
6326 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6327 		unsigned long reclaimed;
6328 
6329 		if (nr_pages <= high)
6330 			break;
6331 
6332 		if (signal_pending(current))
6333 			break;
6334 
6335 		if (!drained) {
6336 			drain_all_stock(memcg);
6337 			drained = true;
6338 			continue;
6339 		}
6340 
6341 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6342 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6343 
6344 		if (!reclaimed && !nr_retries--)
6345 			break;
6346 	}
6347 
6348 	memcg_wb_domain_size_changed(memcg);
6349 	return nbytes;
6350 }
6351 
6352 static int memory_max_show(struct seq_file *m, void *v)
6353 {
6354 	return seq_puts_memcg_tunable(m,
6355 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6356 }
6357 
6358 static ssize_t memory_max_write(struct kernfs_open_file *of,
6359 				char *buf, size_t nbytes, loff_t off)
6360 {
6361 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6362 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6363 	bool drained = false;
6364 	unsigned long max;
6365 	int err;
6366 
6367 	buf = strstrip(buf);
6368 	err = page_counter_memparse(buf, "max", &max);
6369 	if (err)
6370 		return err;
6371 
6372 	xchg(&memcg->memory.max, max);
6373 
6374 	for (;;) {
6375 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6376 
6377 		if (nr_pages <= max)
6378 			break;
6379 
6380 		if (signal_pending(current))
6381 			break;
6382 
6383 		if (!drained) {
6384 			drain_all_stock(memcg);
6385 			drained = true;
6386 			continue;
6387 		}
6388 
6389 		if (nr_reclaims) {
6390 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6391 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6392 				nr_reclaims--;
6393 			continue;
6394 		}
6395 
6396 		memcg_memory_event(memcg, MEMCG_OOM);
6397 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6398 			break;
6399 	}
6400 
6401 	memcg_wb_domain_size_changed(memcg);
6402 	return nbytes;
6403 }
6404 
6405 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6406 {
6407 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6408 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6409 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6410 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6411 	seq_printf(m, "oom_kill %lu\n",
6412 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6413 	seq_printf(m, "oom_group_kill %lu\n",
6414 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6415 }
6416 
6417 static int memory_events_show(struct seq_file *m, void *v)
6418 {
6419 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6420 
6421 	__memory_events_show(m, memcg->memory_events);
6422 	return 0;
6423 }
6424 
6425 static int memory_events_local_show(struct seq_file *m, void *v)
6426 {
6427 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6428 
6429 	__memory_events_show(m, memcg->memory_events_local);
6430 	return 0;
6431 }
6432 
6433 static int memory_stat_show(struct seq_file *m, void *v)
6434 {
6435 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6436 	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6437 
6438 	if (!buf)
6439 		return -ENOMEM;
6440 	memory_stat_format(memcg, buf, PAGE_SIZE);
6441 	seq_puts(m, buf);
6442 	kfree(buf);
6443 	return 0;
6444 }
6445 
6446 #ifdef CONFIG_NUMA
6447 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6448 						     int item)
6449 {
6450 	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6451 }
6452 
6453 static int memory_numa_stat_show(struct seq_file *m, void *v)
6454 {
6455 	int i;
6456 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6457 
6458 	mem_cgroup_flush_stats();
6459 
6460 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6461 		int nid;
6462 
6463 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6464 			continue;
6465 
6466 		seq_printf(m, "%s", memory_stats[i].name);
6467 		for_each_node_state(nid, N_MEMORY) {
6468 			u64 size;
6469 			struct lruvec *lruvec;
6470 
6471 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6472 			size = lruvec_page_state_output(lruvec,
6473 							memory_stats[i].idx);
6474 			seq_printf(m, " N%d=%llu", nid, size);
6475 		}
6476 		seq_putc(m, '\n');
6477 	}
6478 
6479 	return 0;
6480 }
6481 #endif
6482 
6483 static int memory_oom_group_show(struct seq_file *m, void *v)
6484 {
6485 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6486 
6487 	seq_printf(m, "%d\n", memcg->oom_group);
6488 
6489 	return 0;
6490 }
6491 
6492 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6493 				      char *buf, size_t nbytes, loff_t off)
6494 {
6495 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6496 	int ret, oom_group;
6497 
6498 	buf = strstrip(buf);
6499 	if (!buf)
6500 		return -EINVAL;
6501 
6502 	ret = kstrtoint(buf, 0, &oom_group);
6503 	if (ret)
6504 		return ret;
6505 
6506 	if (oom_group != 0 && oom_group != 1)
6507 		return -EINVAL;
6508 
6509 	memcg->oom_group = oom_group;
6510 
6511 	return nbytes;
6512 }
6513 
6514 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6515 			      size_t nbytes, loff_t off)
6516 {
6517 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6518 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6519 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6520 	unsigned int reclaim_options;
6521 	int err;
6522 
6523 	buf = strstrip(buf);
6524 	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6525 	if (err)
6526 		return err;
6527 
6528 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6529 	while (nr_reclaimed < nr_to_reclaim) {
6530 		unsigned long reclaimed;
6531 
6532 		if (signal_pending(current))
6533 			return -EINTR;
6534 
6535 		/*
6536 		 * This is the final attempt, drain percpu lru caches in the
6537 		 * hope of introducing more evictable pages for
6538 		 * try_to_free_mem_cgroup_pages().
6539 		 */
6540 		if (!nr_retries)
6541 			lru_add_drain_all();
6542 
6543 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6544 						nr_to_reclaim - nr_reclaimed,
6545 						GFP_KERNEL, reclaim_options);
6546 
6547 		if (!reclaimed && !nr_retries--)
6548 			return -EAGAIN;
6549 
6550 		nr_reclaimed += reclaimed;
6551 	}
6552 
6553 	return nbytes;
6554 }
6555 
6556 static struct cftype memory_files[] = {
6557 	{
6558 		.name = "current",
6559 		.flags = CFTYPE_NOT_ON_ROOT,
6560 		.read_u64 = memory_current_read,
6561 	},
6562 	{
6563 		.name = "peak",
6564 		.flags = CFTYPE_NOT_ON_ROOT,
6565 		.read_u64 = memory_peak_read,
6566 	},
6567 	{
6568 		.name = "min",
6569 		.flags = CFTYPE_NOT_ON_ROOT,
6570 		.seq_show = memory_min_show,
6571 		.write = memory_min_write,
6572 	},
6573 	{
6574 		.name = "low",
6575 		.flags = CFTYPE_NOT_ON_ROOT,
6576 		.seq_show = memory_low_show,
6577 		.write = memory_low_write,
6578 	},
6579 	{
6580 		.name = "high",
6581 		.flags = CFTYPE_NOT_ON_ROOT,
6582 		.seq_show = memory_high_show,
6583 		.write = memory_high_write,
6584 	},
6585 	{
6586 		.name = "max",
6587 		.flags = CFTYPE_NOT_ON_ROOT,
6588 		.seq_show = memory_max_show,
6589 		.write = memory_max_write,
6590 	},
6591 	{
6592 		.name = "events",
6593 		.flags = CFTYPE_NOT_ON_ROOT,
6594 		.file_offset = offsetof(struct mem_cgroup, events_file),
6595 		.seq_show = memory_events_show,
6596 	},
6597 	{
6598 		.name = "events.local",
6599 		.flags = CFTYPE_NOT_ON_ROOT,
6600 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6601 		.seq_show = memory_events_local_show,
6602 	},
6603 	{
6604 		.name = "stat",
6605 		.seq_show = memory_stat_show,
6606 	},
6607 #ifdef CONFIG_NUMA
6608 	{
6609 		.name = "numa_stat",
6610 		.seq_show = memory_numa_stat_show,
6611 	},
6612 #endif
6613 	{
6614 		.name = "oom.group",
6615 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6616 		.seq_show = memory_oom_group_show,
6617 		.write = memory_oom_group_write,
6618 	},
6619 	{
6620 		.name = "reclaim",
6621 		.flags = CFTYPE_NS_DELEGATABLE,
6622 		.write = memory_reclaim,
6623 	},
6624 	{ }	/* terminate */
6625 };
6626 
6627 struct cgroup_subsys memory_cgrp_subsys = {
6628 	.css_alloc = mem_cgroup_css_alloc,
6629 	.css_online = mem_cgroup_css_online,
6630 	.css_offline = mem_cgroup_css_offline,
6631 	.css_released = mem_cgroup_css_released,
6632 	.css_free = mem_cgroup_css_free,
6633 	.css_reset = mem_cgroup_css_reset,
6634 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6635 	.can_attach = mem_cgroup_can_attach,
6636 	.attach = mem_cgroup_attach,
6637 	.cancel_attach = mem_cgroup_cancel_attach,
6638 	.post_attach = mem_cgroup_move_task,
6639 	.dfl_cftypes = memory_files,
6640 	.legacy_cftypes = mem_cgroup_legacy_files,
6641 	.early_init = 0,
6642 };
6643 
6644 /*
6645  * This function calculates an individual cgroup's effective
6646  * protection which is derived from its own memory.min/low, its
6647  * parent's and siblings' settings, as well as the actual memory
6648  * distribution in the tree.
6649  *
6650  * The following rules apply to the effective protection values:
6651  *
6652  * 1. At the first level of reclaim, effective protection is equal to
6653  *    the declared protection in memory.min and memory.low.
6654  *
6655  * 2. To enable safe delegation of the protection configuration, at
6656  *    subsequent levels the effective protection is capped to the
6657  *    parent's effective protection.
6658  *
6659  * 3. To make complex and dynamic subtrees easier to configure, the
6660  *    user is allowed to overcommit the declared protection at a given
6661  *    level. If that is the case, the parent's effective protection is
6662  *    distributed to the children in proportion to how much protection
6663  *    they have declared and how much of it they are utilizing.
6664  *
6665  *    This makes distribution proportional, but also work-conserving:
6666  *    if one cgroup claims much more protection than it uses memory,
6667  *    the unused remainder is available to its siblings.
6668  *
6669  * 4. Conversely, when the declared protection is undercommitted at a
6670  *    given level, the distribution of the larger parental protection
6671  *    budget is NOT proportional. A cgroup's protection from a sibling
6672  *    is capped to its own memory.min/low setting.
6673  *
6674  * 5. However, to allow protecting recursive subtrees from each other
6675  *    without having to declare each individual cgroup's fixed share
6676  *    of the ancestor's claim to protection, any unutilized -
6677  *    "floating" - protection from up the tree is distributed in
6678  *    proportion to each cgroup's *usage*. This makes the protection
6679  *    neutral wrt sibling cgroups and lets them compete freely over
6680  *    the shared parental protection budget, but it protects the
6681  *    subtree as a whole from neighboring subtrees.
6682  *
6683  * Note that 4. and 5. are not in conflict: 4. is about protecting
6684  * against immediate siblings whereas 5. is about protecting against
6685  * neighboring subtrees.
6686  */
6687 static unsigned long effective_protection(unsigned long usage,
6688 					  unsigned long parent_usage,
6689 					  unsigned long setting,
6690 					  unsigned long parent_effective,
6691 					  unsigned long siblings_protected)
6692 {
6693 	unsigned long protected;
6694 	unsigned long ep;
6695 
6696 	protected = min(usage, setting);
6697 	/*
6698 	 * If all cgroups at this level combined claim and use more
6699 	 * protection then what the parent affords them, distribute
6700 	 * shares in proportion to utilization.
6701 	 *
6702 	 * We are using actual utilization rather than the statically
6703 	 * claimed protection in order to be work-conserving: claimed
6704 	 * but unused protection is available to siblings that would
6705 	 * otherwise get a smaller chunk than what they claimed.
6706 	 */
6707 	if (siblings_protected > parent_effective)
6708 		return protected * parent_effective / siblings_protected;
6709 
6710 	/*
6711 	 * Ok, utilized protection of all children is within what the
6712 	 * parent affords them, so we know whatever this child claims
6713 	 * and utilizes is effectively protected.
6714 	 *
6715 	 * If there is unprotected usage beyond this value, reclaim
6716 	 * will apply pressure in proportion to that amount.
6717 	 *
6718 	 * If there is unutilized protection, the cgroup will be fully
6719 	 * shielded from reclaim, but we do return a smaller value for
6720 	 * protection than what the group could enjoy in theory. This
6721 	 * is okay. With the overcommit distribution above, effective
6722 	 * protection is always dependent on how memory is actually
6723 	 * consumed among the siblings anyway.
6724 	 */
6725 	ep = protected;
6726 
6727 	/*
6728 	 * If the children aren't claiming (all of) the protection
6729 	 * afforded to them by the parent, distribute the remainder in
6730 	 * proportion to the (unprotected) memory of each cgroup. That
6731 	 * way, cgroups that aren't explicitly prioritized wrt each
6732 	 * other compete freely over the allowance, but they are
6733 	 * collectively protected from neighboring trees.
6734 	 *
6735 	 * We're using unprotected memory for the weight so that if
6736 	 * some cgroups DO claim explicit protection, we don't protect
6737 	 * the same bytes twice.
6738 	 *
6739 	 * Check both usage and parent_usage against the respective
6740 	 * protected values. One should imply the other, but they
6741 	 * aren't read atomically - make sure the division is sane.
6742 	 */
6743 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6744 		return ep;
6745 	if (parent_effective > siblings_protected &&
6746 	    parent_usage > siblings_protected &&
6747 	    usage > protected) {
6748 		unsigned long unclaimed;
6749 
6750 		unclaimed = parent_effective - siblings_protected;
6751 		unclaimed *= usage - protected;
6752 		unclaimed /= parent_usage - siblings_protected;
6753 
6754 		ep += unclaimed;
6755 	}
6756 
6757 	return ep;
6758 }
6759 
6760 /**
6761  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6762  * @root: the top ancestor of the sub-tree being checked
6763  * @memcg: the memory cgroup to check
6764  *
6765  * WARNING: This function is not stateless! It can only be used as part
6766  *          of a top-down tree iteration, not for isolated queries.
6767  */
6768 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6769 				     struct mem_cgroup *memcg)
6770 {
6771 	unsigned long usage, parent_usage;
6772 	struct mem_cgroup *parent;
6773 
6774 	if (mem_cgroup_disabled())
6775 		return;
6776 
6777 	if (!root)
6778 		root = root_mem_cgroup;
6779 
6780 	/*
6781 	 * Effective values of the reclaim targets are ignored so they
6782 	 * can be stale. Have a look at mem_cgroup_protection for more
6783 	 * details.
6784 	 * TODO: calculation should be more robust so that we do not need
6785 	 * that special casing.
6786 	 */
6787 	if (memcg == root)
6788 		return;
6789 
6790 	usage = page_counter_read(&memcg->memory);
6791 	if (!usage)
6792 		return;
6793 
6794 	parent = parent_mem_cgroup(memcg);
6795 
6796 	if (parent == root) {
6797 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6798 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6799 		return;
6800 	}
6801 
6802 	parent_usage = page_counter_read(&parent->memory);
6803 
6804 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6805 			READ_ONCE(memcg->memory.min),
6806 			READ_ONCE(parent->memory.emin),
6807 			atomic_long_read(&parent->memory.children_min_usage)));
6808 
6809 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6810 			READ_ONCE(memcg->memory.low),
6811 			READ_ONCE(parent->memory.elow),
6812 			atomic_long_read(&parent->memory.children_low_usage)));
6813 }
6814 
6815 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6816 			gfp_t gfp)
6817 {
6818 	long nr_pages = folio_nr_pages(folio);
6819 	int ret;
6820 
6821 	ret = try_charge(memcg, gfp, nr_pages);
6822 	if (ret)
6823 		goto out;
6824 
6825 	css_get(&memcg->css);
6826 	commit_charge(folio, memcg);
6827 
6828 	local_irq_disable();
6829 	mem_cgroup_charge_statistics(memcg, nr_pages);
6830 	memcg_check_events(memcg, folio_nid(folio));
6831 	local_irq_enable();
6832 out:
6833 	return ret;
6834 }
6835 
6836 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6837 {
6838 	struct mem_cgroup *memcg;
6839 	int ret;
6840 
6841 	memcg = get_mem_cgroup_from_mm(mm);
6842 	ret = charge_memcg(folio, memcg, gfp);
6843 	css_put(&memcg->css);
6844 
6845 	return ret;
6846 }
6847 
6848 /**
6849  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6850  * @page: page to charge
6851  * @mm: mm context of the victim
6852  * @gfp: reclaim mode
6853  * @entry: swap entry for which the page is allocated
6854  *
6855  * This function charges a page allocated for swapin. Please call this before
6856  * adding the page to the swapcache.
6857  *
6858  * Returns 0 on success. Otherwise, an error code is returned.
6859  */
6860 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6861 				  gfp_t gfp, swp_entry_t entry)
6862 {
6863 	struct folio *folio = page_folio(page);
6864 	struct mem_cgroup *memcg;
6865 	unsigned short id;
6866 	int ret;
6867 
6868 	if (mem_cgroup_disabled())
6869 		return 0;
6870 
6871 	id = lookup_swap_cgroup_id(entry);
6872 	rcu_read_lock();
6873 	memcg = mem_cgroup_from_id(id);
6874 	if (!memcg || !css_tryget_online(&memcg->css))
6875 		memcg = get_mem_cgroup_from_mm(mm);
6876 	rcu_read_unlock();
6877 
6878 	ret = charge_memcg(folio, memcg, gfp);
6879 
6880 	css_put(&memcg->css);
6881 	return ret;
6882 }
6883 
6884 /*
6885  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6886  * @entry: swap entry for which the page is charged
6887  *
6888  * Call this function after successfully adding the charged page to swapcache.
6889  *
6890  * Note: This function assumes the page for which swap slot is being uncharged
6891  * is order 0 page.
6892  */
6893 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6894 {
6895 	/*
6896 	 * Cgroup1's unified memory+swap counter has been charged with the
6897 	 * new swapcache page, finish the transfer by uncharging the swap
6898 	 * slot. The swap slot would also get uncharged when it dies, but
6899 	 * it can stick around indefinitely and we'd count the page twice
6900 	 * the entire time.
6901 	 *
6902 	 * Cgroup2 has separate resource counters for memory and swap,
6903 	 * so this is a non-issue here. Memory and swap charge lifetimes
6904 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6905 	 * page to memory here, and uncharge swap when the slot is freed.
6906 	 */
6907 	if (!mem_cgroup_disabled() && do_memsw_account()) {
6908 		/*
6909 		 * The swap entry might not get freed for a long time,
6910 		 * let's not wait for it.  The page already received a
6911 		 * memory+swap charge, drop the swap entry duplicate.
6912 		 */
6913 		mem_cgroup_uncharge_swap(entry, 1);
6914 	}
6915 }
6916 
6917 struct uncharge_gather {
6918 	struct mem_cgroup *memcg;
6919 	unsigned long nr_memory;
6920 	unsigned long pgpgout;
6921 	unsigned long nr_kmem;
6922 	int nid;
6923 };
6924 
6925 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6926 {
6927 	memset(ug, 0, sizeof(*ug));
6928 }
6929 
6930 static void uncharge_batch(const struct uncharge_gather *ug)
6931 {
6932 	unsigned long flags;
6933 
6934 	if (ug->nr_memory) {
6935 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6936 		if (do_memsw_account())
6937 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6938 		if (ug->nr_kmem)
6939 			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
6940 		memcg_oom_recover(ug->memcg);
6941 	}
6942 
6943 	local_irq_save(flags);
6944 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6945 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6946 	memcg_check_events(ug->memcg, ug->nid);
6947 	local_irq_restore(flags);
6948 
6949 	/* drop reference from uncharge_folio */
6950 	css_put(&ug->memcg->css);
6951 }
6952 
6953 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
6954 {
6955 	long nr_pages;
6956 	struct mem_cgroup *memcg;
6957 	struct obj_cgroup *objcg;
6958 
6959 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
6960 
6961 	/*
6962 	 * Nobody should be changing or seriously looking at
6963 	 * folio memcg or objcg at this point, we have fully
6964 	 * exclusive access to the folio.
6965 	 */
6966 	if (folio_memcg_kmem(folio)) {
6967 		objcg = __folio_objcg(folio);
6968 		/*
6969 		 * This get matches the put at the end of the function and
6970 		 * kmem pages do not hold memcg references anymore.
6971 		 */
6972 		memcg = get_mem_cgroup_from_objcg(objcg);
6973 	} else {
6974 		memcg = __folio_memcg(folio);
6975 	}
6976 
6977 	if (!memcg)
6978 		return;
6979 
6980 	if (ug->memcg != memcg) {
6981 		if (ug->memcg) {
6982 			uncharge_batch(ug);
6983 			uncharge_gather_clear(ug);
6984 		}
6985 		ug->memcg = memcg;
6986 		ug->nid = folio_nid(folio);
6987 
6988 		/* pairs with css_put in uncharge_batch */
6989 		css_get(&memcg->css);
6990 	}
6991 
6992 	nr_pages = folio_nr_pages(folio);
6993 
6994 	if (folio_memcg_kmem(folio)) {
6995 		ug->nr_memory += nr_pages;
6996 		ug->nr_kmem += nr_pages;
6997 
6998 		folio->memcg_data = 0;
6999 		obj_cgroup_put(objcg);
7000 	} else {
7001 		/* LRU pages aren't accounted at the root level */
7002 		if (!mem_cgroup_is_root(memcg))
7003 			ug->nr_memory += nr_pages;
7004 		ug->pgpgout++;
7005 
7006 		folio->memcg_data = 0;
7007 	}
7008 
7009 	css_put(&memcg->css);
7010 }
7011 
7012 void __mem_cgroup_uncharge(struct folio *folio)
7013 {
7014 	struct uncharge_gather ug;
7015 
7016 	/* Don't touch folio->lru of any random page, pre-check: */
7017 	if (!folio_memcg(folio))
7018 		return;
7019 
7020 	uncharge_gather_clear(&ug);
7021 	uncharge_folio(folio, &ug);
7022 	uncharge_batch(&ug);
7023 }
7024 
7025 /**
7026  * __mem_cgroup_uncharge_list - uncharge a list of page
7027  * @page_list: list of pages to uncharge
7028  *
7029  * Uncharge a list of pages previously charged with
7030  * __mem_cgroup_charge().
7031  */
7032 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7033 {
7034 	struct uncharge_gather ug;
7035 	struct folio *folio;
7036 
7037 	uncharge_gather_clear(&ug);
7038 	list_for_each_entry(folio, page_list, lru)
7039 		uncharge_folio(folio, &ug);
7040 	if (ug.memcg)
7041 		uncharge_batch(&ug);
7042 }
7043 
7044 /**
7045  * mem_cgroup_migrate - Charge a folio's replacement.
7046  * @old: Currently circulating folio.
7047  * @new: Replacement folio.
7048  *
7049  * Charge @new as a replacement folio for @old. @old will
7050  * be uncharged upon free.
7051  *
7052  * Both folios must be locked, @new->mapping must be set up.
7053  */
7054 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7055 {
7056 	struct mem_cgroup *memcg;
7057 	long nr_pages = folio_nr_pages(new);
7058 	unsigned long flags;
7059 
7060 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7061 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7062 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7063 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7064 
7065 	if (mem_cgroup_disabled())
7066 		return;
7067 
7068 	/* Page cache replacement: new folio already charged? */
7069 	if (folio_memcg(new))
7070 		return;
7071 
7072 	memcg = folio_memcg(old);
7073 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7074 	if (!memcg)
7075 		return;
7076 
7077 	/* Force-charge the new page. The old one will be freed soon */
7078 	if (!mem_cgroup_is_root(memcg)) {
7079 		page_counter_charge(&memcg->memory, nr_pages);
7080 		if (do_memsw_account())
7081 			page_counter_charge(&memcg->memsw, nr_pages);
7082 	}
7083 
7084 	css_get(&memcg->css);
7085 	commit_charge(new, memcg);
7086 
7087 	local_irq_save(flags);
7088 	mem_cgroup_charge_statistics(memcg, nr_pages);
7089 	memcg_check_events(memcg, folio_nid(new));
7090 	local_irq_restore(flags);
7091 }
7092 
7093 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7094 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7095 
7096 void mem_cgroup_sk_alloc(struct sock *sk)
7097 {
7098 	struct mem_cgroup *memcg;
7099 
7100 	if (!mem_cgroup_sockets_enabled)
7101 		return;
7102 
7103 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7104 	if (!in_task())
7105 		return;
7106 
7107 	rcu_read_lock();
7108 	memcg = mem_cgroup_from_task(current);
7109 	if (memcg == root_mem_cgroup)
7110 		goto out;
7111 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7112 		goto out;
7113 	if (css_tryget(&memcg->css))
7114 		sk->sk_memcg = memcg;
7115 out:
7116 	rcu_read_unlock();
7117 }
7118 
7119 void mem_cgroup_sk_free(struct sock *sk)
7120 {
7121 	if (sk->sk_memcg)
7122 		css_put(&sk->sk_memcg->css);
7123 }
7124 
7125 /**
7126  * mem_cgroup_charge_skmem - charge socket memory
7127  * @memcg: memcg to charge
7128  * @nr_pages: number of pages to charge
7129  * @gfp_mask: reclaim mode
7130  *
7131  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7132  * @memcg's configured limit, %false if it doesn't.
7133  */
7134 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7135 			     gfp_t gfp_mask)
7136 {
7137 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7138 		struct page_counter *fail;
7139 
7140 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7141 			memcg->tcpmem_pressure = 0;
7142 			return true;
7143 		}
7144 		memcg->tcpmem_pressure = 1;
7145 		if (gfp_mask & __GFP_NOFAIL) {
7146 			page_counter_charge(&memcg->tcpmem, nr_pages);
7147 			return true;
7148 		}
7149 		return false;
7150 	}
7151 
7152 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7153 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7154 		return true;
7155 	}
7156 
7157 	return false;
7158 }
7159 
7160 /**
7161  * mem_cgroup_uncharge_skmem - uncharge socket memory
7162  * @memcg: memcg to uncharge
7163  * @nr_pages: number of pages to uncharge
7164  */
7165 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7166 {
7167 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7168 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7169 		return;
7170 	}
7171 
7172 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7173 
7174 	refill_stock(memcg, nr_pages);
7175 }
7176 
7177 static int __init cgroup_memory(char *s)
7178 {
7179 	char *token;
7180 
7181 	while ((token = strsep(&s, ",")) != NULL) {
7182 		if (!*token)
7183 			continue;
7184 		if (!strcmp(token, "nosocket"))
7185 			cgroup_memory_nosocket = true;
7186 		if (!strcmp(token, "nokmem"))
7187 			cgroup_memory_nokmem = true;
7188 	}
7189 	return 1;
7190 }
7191 __setup("cgroup.memory=", cgroup_memory);
7192 
7193 /*
7194  * subsys_initcall() for memory controller.
7195  *
7196  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7197  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7198  * basically everything that doesn't depend on a specific mem_cgroup structure
7199  * should be initialized from here.
7200  */
7201 static int __init mem_cgroup_init(void)
7202 {
7203 	int cpu, node;
7204 
7205 	/*
7206 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7207 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7208 	 * to work fine, we should make sure that the overfill threshold can't
7209 	 * exceed S32_MAX / PAGE_SIZE.
7210 	 */
7211 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7212 
7213 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7214 				  memcg_hotplug_cpu_dead);
7215 
7216 	for_each_possible_cpu(cpu)
7217 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7218 			  drain_local_stock);
7219 
7220 	for_each_node(node) {
7221 		struct mem_cgroup_tree_per_node *rtpn;
7222 
7223 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7224 				    node_online(node) ? node : NUMA_NO_NODE);
7225 
7226 		rtpn->rb_root = RB_ROOT;
7227 		rtpn->rb_rightmost = NULL;
7228 		spin_lock_init(&rtpn->lock);
7229 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7230 	}
7231 
7232 	return 0;
7233 }
7234 subsys_initcall(mem_cgroup_init);
7235 
7236 #ifdef CONFIG_MEMCG_SWAP
7237 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7238 {
7239 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7240 		/*
7241 		 * The root cgroup cannot be destroyed, so it's refcount must
7242 		 * always be >= 1.
7243 		 */
7244 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7245 			VM_BUG_ON(1);
7246 			break;
7247 		}
7248 		memcg = parent_mem_cgroup(memcg);
7249 		if (!memcg)
7250 			memcg = root_mem_cgroup;
7251 	}
7252 	return memcg;
7253 }
7254 
7255 /**
7256  * mem_cgroup_swapout - transfer a memsw charge to swap
7257  * @folio: folio whose memsw charge to transfer
7258  * @entry: swap entry to move the charge to
7259  *
7260  * Transfer the memsw charge of @folio to @entry.
7261  */
7262 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7263 {
7264 	struct mem_cgroup *memcg, *swap_memcg;
7265 	unsigned int nr_entries;
7266 	unsigned short oldid;
7267 
7268 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7269 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7270 
7271 	if (mem_cgroup_disabled())
7272 		return;
7273 
7274 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7275 		return;
7276 
7277 	memcg = folio_memcg(folio);
7278 
7279 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7280 	if (!memcg)
7281 		return;
7282 
7283 	/*
7284 	 * In case the memcg owning these pages has been offlined and doesn't
7285 	 * have an ID allocated to it anymore, charge the closest online
7286 	 * ancestor for the swap instead and transfer the memory+swap charge.
7287 	 */
7288 	swap_memcg = mem_cgroup_id_get_online(memcg);
7289 	nr_entries = folio_nr_pages(folio);
7290 	/* Get references for the tail pages, too */
7291 	if (nr_entries > 1)
7292 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7293 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7294 				   nr_entries);
7295 	VM_BUG_ON_FOLIO(oldid, folio);
7296 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7297 
7298 	folio->memcg_data = 0;
7299 
7300 	if (!mem_cgroup_is_root(memcg))
7301 		page_counter_uncharge(&memcg->memory, nr_entries);
7302 
7303 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7304 		if (!mem_cgroup_is_root(swap_memcg))
7305 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7306 		page_counter_uncharge(&memcg->memsw, nr_entries);
7307 	}
7308 
7309 	/*
7310 	 * Interrupts should be disabled here because the caller holds the
7311 	 * i_pages lock which is taken with interrupts-off. It is
7312 	 * important here to have the interrupts disabled because it is the
7313 	 * only synchronisation we have for updating the per-CPU variables.
7314 	 */
7315 	memcg_stats_lock();
7316 	mem_cgroup_charge_statistics(memcg, -nr_entries);
7317 	memcg_stats_unlock();
7318 	memcg_check_events(memcg, folio_nid(folio));
7319 
7320 	css_put(&memcg->css);
7321 }
7322 
7323 /**
7324  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7325  * @folio: folio being added to swap
7326  * @entry: swap entry to charge
7327  *
7328  * Try to charge @folio's memcg for the swap space at @entry.
7329  *
7330  * Returns 0 on success, -ENOMEM on failure.
7331  */
7332 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7333 {
7334 	unsigned int nr_pages = folio_nr_pages(folio);
7335 	struct page_counter *counter;
7336 	struct mem_cgroup *memcg;
7337 	unsigned short oldid;
7338 
7339 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7340 		return 0;
7341 
7342 	memcg = folio_memcg(folio);
7343 
7344 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7345 	if (!memcg)
7346 		return 0;
7347 
7348 	if (!entry.val) {
7349 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7350 		return 0;
7351 	}
7352 
7353 	memcg = mem_cgroup_id_get_online(memcg);
7354 
7355 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7356 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7357 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7358 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7359 		mem_cgroup_id_put(memcg);
7360 		return -ENOMEM;
7361 	}
7362 
7363 	/* Get references for the tail pages, too */
7364 	if (nr_pages > 1)
7365 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7366 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7367 	VM_BUG_ON_FOLIO(oldid, folio);
7368 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7369 
7370 	return 0;
7371 }
7372 
7373 /**
7374  * __mem_cgroup_uncharge_swap - uncharge swap space
7375  * @entry: swap entry to uncharge
7376  * @nr_pages: the amount of swap space to uncharge
7377  */
7378 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7379 {
7380 	struct mem_cgroup *memcg;
7381 	unsigned short id;
7382 
7383 	id = swap_cgroup_record(entry, 0, nr_pages);
7384 	rcu_read_lock();
7385 	memcg = mem_cgroup_from_id(id);
7386 	if (memcg) {
7387 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7388 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7389 				page_counter_uncharge(&memcg->swap, nr_pages);
7390 			else
7391 				page_counter_uncharge(&memcg->memsw, nr_pages);
7392 		}
7393 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7394 		mem_cgroup_id_put_many(memcg, nr_pages);
7395 	}
7396 	rcu_read_unlock();
7397 }
7398 
7399 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7400 {
7401 	long nr_swap_pages = get_nr_swap_pages();
7402 
7403 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7404 		return nr_swap_pages;
7405 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7406 		nr_swap_pages = min_t(long, nr_swap_pages,
7407 				      READ_ONCE(memcg->swap.max) -
7408 				      page_counter_read(&memcg->swap));
7409 	return nr_swap_pages;
7410 }
7411 
7412 bool mem_cgroup_swap_full(struct page *page)
7413 {
7414 	struct mem_cgroup *memcg;
7415 
7416 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7417 
7418 	if (vm_swap_full())
7419 		return true;
7420 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7421 		return false;
7422 
7423 	memcg = page_memcg(page);
7424 	if (!memcg)
7425 		return false;
7426 
7427 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7428 		unsigned long usage = page_counter_read(&memcg->swap);
7429 
7430 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7431 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7432 			return true;
7433 	}
7434 
7435 	return false;
7436 }
7437 
7438 static int __init setup_swap_account(char *s)
7439 {
7440 	if (!strcmp(s, "1"))
7441 		cgroup_memory_noswap = false;
7442 	else if (!strcmp(s, "0"))
7443 		cgroup_memory_noswap = true;
7444 	return 1;
7445 }
7446 __setup("swapaccount=", setup_swap_account);
7447 
7448 static u64 swap_current_read(struct cgroup_subsys_state *css,
7449 			     struct cftype *cft)
7450 {
7451 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7452 
7453 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7454 }
7455 
7456 static int swap_high_show(struct seq_file *m, void *v)
7457 {
7458 	return seq_puts_memcg_tunable(m,
7459 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7460 }
7461 
7462 static ssize_t swap_high_write(struct kernfs_open_file *of,
7463 			       char *buf, size_t nbytes, loff_t off)
7464 {
7465 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7466 	unsigned long high;
7467 	int err;
7468 
7469 	buf = strstrip(buf);
7470 	err = page_counter_memparse(buf, "max", &high);
7471 	if (err)
7472 		return err;
7473 
7474 	page_counter_set_high(&memcg->swap, high);
7475 
7476 	return nbytes;
7477 }
7478 
7479 static int swap_max_show(struct seq_file *m, void *v)
7480 {
7481 	return seq_puts_memcg_tunable(m,
7482 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7483 }
7484 
7485 static ssize_t swap_max_write(struct kernfs_open_file *of,
7486 			      char *buf, size_t nbytes, loff_t off)
7487 {
7488 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7489 	unsigned long max;
7490 	int err;
7491 
7492 	buf = strstrip(buf);
7493 	err = page_counter_memparse(buf, "max", &max);
7494 	if (err)
7495 		return err;
7496 
7497 	xchg(&memcg->swap.max, max);
7498 
7499 	return nbytes;
7500 }
7501 
7502 static int swap_events_show(struct seq_file *m, void *v)
7503 {
7504 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7505 
7506 	seq_printf(m, "high %lu\n",
7507 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7508 	seq_printf(m, "max %lu\n",
7509 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7510 	seq_printf(m, "fail %lu\n",
7511 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7512 
7513 	return 0;
7514 }
7515 
7516 static struct cftype swap_files[] = {
7517 	{
7518 		.name = "swap.current",
7519 		.flags = CFTYPE_NOT_ON_ROOT,
7520 		.read_u64 = swap_current_read,
7521 	},
7522 	{
7523 		.name = "swap.high",
7524 		.flags = CFTYPE_NOT_ON_ROOT,
7525 		.seq_show = swap_high_show,
7526 		.write = swap_high_write,
7527 	},
7528 	{
7529 		.name = "swap.max",
7530 		.flags = CFTYPE_NOT_ON_ROOT,
7531 		.seq_show = swap_max_show,
7532 		.write = swap_max_write,
7533 	},
7534 	{
7535 		.name = "swap.events",
7536 		.flags = CFTYPE_NOT_ON_ROOT,
7537 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7538 		.seq_show = swap_events_show,
7539 	},
7540 	{ }	/* terminate */
7541 };
7542 
7543 static struct cftype memsw_files[] = {
7544 	{
7545 		.name = "memsw.usage_in_bytes",
7546 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7547 		.read_u64 = mem_cgroup_read_u64,
7548 	},
7549 	{
7550 		.name = "memsw.max_usage_in_bytes",
7551 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7552 		.write = mem_cgroup_reset,
7553 		.read_u64 = mem_cgroup_read_u64,
7554 	},
7555 	{
7556 		.name = "memsw.limit_in_bytes",
7557 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7558 		.write = mem_cgroup_write,
7559 		.read_u64 = mem_cgroup_read_u64,
7560 	},
7561 	{
7562 		.name = "memsw.failcnt",
7563 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7564 		.write = mem_cgroup_reset,
7565 		.read_u64 = mem_cgroup_read_u64,
7566 	},
7567 	{ },	/* terminate */
7568 };
7569 
7570 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7571 /**
7572  * obj_cgroup_may_zswap - check if this cgroup can zswap
7573  * @objcg: the object cgroup
7574  *
7575  * Check if the hierarchical zswap limit has been reached.
7576  *
7577  * This doesn't check for specific headroom, and it is not atomic
7578  * either. But with zswap, the size of the allocation is only known
7579  * once compression has occured, and this optimistic pre-check avoids
7580  * spending cycles on compression when there is already no room left
7581  * or zswap is disabled altogether somewhere in the hierarchy.
7582  */
7583 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7584 {
7585 	struct mem_cgroup *memcg, *original_memcg;
7586 	bool ret = true;
7587 
7588 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7589 		return true;
7590 
7591 	original_memcg = get_mem_cgroup_from_objcg(objcg);
7592 	for (memcg = original_memcg; memcg != root_mem_cgroup;
7593 	     memcg = parent_mem_cgroup(memcg)) {
7594 		unsigned long max = READ_ONCE(memcg->zswap_max);
7595 		unsigned long pages;
7596 
7597 		if (max == PAGE_COUNTER_MAX)
7598 			continue;
7599 		if (max == 0) {
7600 			ret = false;
7601 			break;
7602 		}
7603 
7604 		cgroup_rstat_flush(memcg->css.cgroup);
7605 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7606 		if (pages < max)
7607 			continue;
7608 		ret = false;
7609 		break;
7610 	}
7611 	mem_cgroup_put(original_memcg);
7612 	return ret;
7613 }
7614 
7615 /**
7616  * obj_cgroup_charge_zswap - charge compression backend memory
7617  * @objcg: the object cgroup
7618  * @size: size of compressed object
7619  *
7620  * This forces the charge after obj_cgroup_may_swap() allowed
7621  * compression and storage in zwap for this cgroup to go ahead.
7622  */
7623 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7624 {
7625 	struct mem_cgroup *memcg;
7626 
7627 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7628 		return;
7629 
7630 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7631 
7632 	/* PF_MEMALLOC context, charging must succeed */
7633 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7634 		VM_WARN_ON_ONCE(1);
7635 
7636 	rcu_read_lock();
7637 	memcg = obj_cgroup_memcg(objcg);
7638 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7639 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7640 	rcu_read_unlock();
7641 }
7642 
7643 /**
7644  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7645  * @objcg: the object cgroup
7646  * @size: size of compressed object
7647  *
7648  * Uncharges zswap memory on page in.
7649  */
7650 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7651 {
7652 	struct mem_cgroup *memcg;
7653 
7654 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7655 		return;
7656 
7657 	obj_cgroup_uncharge(objcg, size);
7658 
7659 	rcu_read_lock();
7660 	memcg = obj_cgroup_memcg(objcg);
7661 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7662 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7663 	rcu_read_unlock();
7664 }
7665 
7666 static u64 zswap_current_read(struct cgroup_subsys_state *css,
7667 			      struct cftype *cft)
7668 {
7669 	cgroup_rstat_flush(css->cgroup);
7670 	return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7671 }
7672 
7673 static int zswap_max_show(struct seq_file *m, void *v)
7674 {
7675 	return seq_puts_memcg_tunable(m,
7676 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7677 }
7678 
7679 static ssize_t zswap_max_write(struct kernfs_open_file *of,
7680 			       char *buf, size_t nbytes, loff_t off)
7681 {
7682 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7683 	unsigned long max;
7684 	int err;
7685 
7686 	buf = strstrip(buf);
7687 	err = page_counter_memparse(buf, "max", &max);
7688 	if (err)
7689 		return err;
7690 
7691 	xchg(&memcg->zswap_max, max);
7692 
7693 	return nbytes;
7694 }
7695 
7696 static struct cftype zswap_files[] = {
7697 	{
7698 		.name = "zswap.current",
7699 		.flags = CFTYPE_NOT_ON_ROOT,
7700 		.read_u64 = zswap_current_read,
7701 	},
7702 	{
7703 		.name = "zswap.max",
7704 		.flags = CFTYPE_NOT_ON_ROOT,
7705 		.seq_show = zswap_max_show,
7706 		.write = zswap_max_write,
7707 	},
7708 	{ }	/* terminate */
7709 };
7710 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7711 
7712 /*
7713  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7714  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7715  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7716  * boot parameter. This may result in premature OOPS inside
7717  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7718  */
7719 static int __init mem_cgroup_swap_init(void)
7720 {
7721 	/* No memory control -> no swap control */
7722 	if (mem_cgroup_disabled())
7723 		cgroup_memory_noswap = true;
7724 
7725 	if (cgroup_memory_noswap)
7726 		return 0;
7727 
7728 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7729 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7730 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7731 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7732 #endif
7733 	return 0;
7734 }
7735 core_initcall(mem_cgroup_swap_init);
7736 
7737 #endif /* CONFIG_MEMCG_SWAP */
7738