xref: /openbmc/linux/mm/memcontrol.c (revision 2fe60ec9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "swap.h"
71 
72 #include <linux/uaccess.h>
73 
74 #include <trace/events/vmscan.h>
75 
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
78 
79 struct mem_cgroup *root_mem_cgroup __read_mostly;
80 
81 /* Active memory cgroup to use from an interrupt context */
82 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
83 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
84 
85 /* Socket memory accounting disabled? */
86 static bool cgroup_memory_nosocket __ro_after_init;
87 
88 /* Kernel memory accounting disabled? */
89 static bool cgroup_memory_nokmem __ro_after_init;
90 
91 /* Whether the swap controller is active */
92 #ifdef CONFIG_MEMCG_SWAP
93 static bool cgroup_memory_noswap __ro_after_init;
94 #else
95 #define cgroup_memory_noswap		1
96 #endif
97 
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101 
102 /* Whether legacy memory+swap accounting is active */
103 static bool do_memsw_account(void)
104 {
105 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
106 }
107 
108 #define THRESHOLDS_EVENTS_TARGET 128
109 #define SOFTLIMIT_EVENTS_TARGET 1024
110 
111 /*
112  * Cgroups above their limits are maintained in a RB-Tree, independent of
113  * their hierarchy representation
114  */
115 
116 struct mem_cgroup_tree_per_node {
117 	struct rb_root rb_root;
118 	struct rb_node *rb_rightmost;
119 	spinlock_t lock;
120 };
121 
122 struct mem_cgroup_tree {
123 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
124 };
125 
126 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
127 
128 /* for OOM */
129 struct mem_cgroup_eventfd_list {
130 	struct list_head list;
131 	struct eventfd_ctx *eventfd;
132 };
133 
134 /*
135  * cgroup_event represents events which userspace want to receive.
136  */
137 struct mem_cgroup_event {
138 	/*
139 	 * memcg which the event belongs to.
140 	 */
141 	struct mem_cgroup *memcg;
142 	/*
143 	 * eventfd to signal userspace about the event.
144 	 */
145 	struct eventfd_ctx *eventfd;
146 	/*
147 	 * Each of these stored in a list by the cgroup.
148 	 */
149 	struct list_head list;
150 	/*
151 	 * register_event() callback will be used to add new userspace
152 	 * waiter for changes related to this event.  Use eventfd_signal()
153 	 * on eventfd to send notification to userspace.
154 	 */
155 	int (*register_event)(struct mem_cgroup *memcg,
156 			      struct eventfd_ctx *eventfd, const char *args);
157 	/*
158 	 * unregister_event() callback will be called when userspace closes
159 	 * the eventfd or on cgroup removing.  This callback must be set,
160 	 * if you want provide notification functionality.
161 	 */
162 	void (*unregister_event)(struct mem_cgroup *memcg,
163 				 struct eventfd_ctx *eventfd);
164 	/*
165 	 * All fields below needed to unregister event when
166 	 * userspace closes eventfd.
167 	 */
168 	poll_table pt;
169 	wait_queue_head_t *wqh;
170 	wait_queue_entry_t wait;
171 	struct work_struct remove;
172 };
173 
174 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
175 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
176 
177 /* Stuffs for move charges at task migration. */
178 /*
179  * Types of charges to be moved.
180  */
181 #define MOVE_ANON	0x1U
182 #define MOVE_FILE	0x2U
183 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
184 
185 /* "mc" and its members are protected by cgroup_mutex */
186 static struct move_charge_struct {
187 	spinlock_t	  lock; /* for from, to */
188 	struct mm_struct  *mm;
189 	struct mem_cgroup *from;
190 	struct mem_cgroup *to;
191 	unsigned long flags;
192 	unsigned long precharge;
193 	unsigned long moved_charge;
194 	unsigned long moved_swap;
195 	struct task_struct *moving_task;	/* a task moving charges */
196 	wait_queue_head_t waitq;		/* a waitq for other context */
197 } mc = {
198 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
199 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
200 };
201 
202 /*
203  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
204  * limit reclaim to prevent infinite loops, if they ever occur.
205  */
206 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
207 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
208 
209 /* for encoding cft->private value on file */
210 enum res_type {
211 	_MEM,
212 	_MEMSWAP,
213 	_KMEM,
214 	_TCP,
215 };
216 
217 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)	((val) & 0xffff)
220 
221 /*
222  * Iteration constructs for visiting all cgroups (under a tree).  If
223  * loops are exited prematurely (break), mem_cgroup_iter_break() must
224  * be used for reference counting.
225  */
226 #define for_each_mem_cgroup_tree(iter, root)		\
227 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
228 	     iter != NULL;				\
229 	     iter = mem_cgroup_iter(root, iter, NULL))
230 
231 #define for_each_mem_cgroup(iter)			\
232 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
233 	     iter != NULL;				\
234 	     iter = mem_cgroup_iter(NULL, iter, NULL))
235 
236 static inline bool task_is_dying(void)
237 {
238 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
239 		(current->flags & PF_EXITING);
240 }
241 
242 /* Some nice accessors for the vmpressure. */
243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
244 {
245 	if (!memcg)
246 		memcg = root_mem_cgroup;
247 	return &memcg->vmpressure;
248 }
249 
250 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
251 {
252 	return container_of(vmpr, struct mem_cgroup, vmpressure);
253 }
254 
255 #ifdef CONFIG_MEMCG_KMEM
256 static DEFINE_SPINLOCK(objcg_lock);
257 
258 bool mem_cgroup_kmem_disabled(void)
259 {
260 	return cgroup_memory_nokmem;
261 }
262 
263 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
264 				      unsigned int nr_pages);
265 
266 static void obj_cgroup_release(struct percpu_ref *ref)
267 {
268 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
269 	unsigned int nr_bytes;
270 	unsigned int nr_pages;
271 	unsigned long flags;
272 
273 	/*
274 	 * At this point all allocated objects are freed, and
275 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
276 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
277 	 *
278 	 * The following sequence can lead to it:
279 	 * 1) CPU0: objcg == stock->cached_objcg
280 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
281 	 *          PAGE_SIZE bytes are charged
282 	 * 3) CPU1: a process from another memcg is allocating something,
283 	 *          the stock if flushed,
284 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
285 	 * 5) CPU0: we do release this object,
286 	 *          92 bytes are added to stock->nr_bytes
287 	 * 6) CPU0: stock is flushed,
288 	 *          92 bytes are added to objcg->nr_charged_bytes
289 	 *
290 	 * In the result, nr_charged_bytes == PAGE_SIZE.
291 	 * This page will be uncharged in obj_cgroup_release().
292 	 */
293 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
294 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
295 	nr_pages = nr_bytes >> PAGE_SHIFT;
296 
297 	if (nr_pages)
298 		obj_cgroup_uncharge_pages(objcg, nr_pages);
299 
300 	spin_lock_irqsave(&objcg_lock, flags);
301 	list_del(&objcg->list);
302 	spin_unlock_irqrestore(&objcg_lock, flags);
303 
304 	percpu_ref_exit(ref);
305 	kfree_rcu(objcg, rcu);
306 }
307 
308 static struct obj_cgroup *obj_cgroup_alloc(void)
309 {
310 	struct obj_cgroup *objcg;
311 	int ret;
312 
313 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
314 	if (!objcg)
315 		return NULL;
316 
317 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
318 			      GFP_KERNEL);
319 	if (ret) {
320 		kfree(objcg);
321 		return NULL;
322 	}
323 	INIT_LIST_HEAD(&objcg->list);
324 	return objcg;
325 }
326 
327 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
328 				  struct mem_cgroup *parent)
329 {
330 	struct obj_cgroup *objcg, *iter;
331 
332 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
333 
334 	spin_lock_irq(&objcg_lock);
335 
336 	/* 1) Ready to reparent active objcg. */
337 	list_add(&objcg->list, &memcg->objcg_list);
338 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
339 	list_for_each_entry(iter, &memcg->objcg_list, list)
340 		WRITE_ONCE(iter->memcg, parent);
341 	/* 3) Move already reparented objcgs to the parent's list */
342 	list_splice(&memcg->objcg_list, &parent->objcg_list);
343 
344 	spin_unlock_irq(&objcg_lock);
345 
346 	percpu_ref_kill(&objcg->refcnt);
347 }
348 
349 /*
350  * A lot of the calls to the cache allocation functions are expected to be
351  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
352  * conditional to this static branch, we'll have to allow modules that does
353  * kmem_cache_alloc and the such to see this symbol as well
354  */
355 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
356 EXPORT_SYMBOL(memcg_kmem_enabled_key);
357 #endif
358 
359 /**
360  * mem_cgroup_css_from_page - css of the memcg associated with a page
361  * @page: page of interest
362  *
363  * If memcg is bound to the default hierarchy, css of the memcg associated
364  * with @page is returned.  The returned css remains associated with @page
365  * until it is released.
366  *
367  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368  * is returned.
369  */
370 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
371 {
372 	struct mem_cgroup *memcg;
373 
374 	memcg = page_memcg(page);
375 
376 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
377 		memcg = root_mem_cgroup;
378 
379 	return &memcg->css;
380 }
381 
382 /**
383  * page_cgroup_ino - return inode number of the memcg a page is charged to
384  * @page: the page
385  *
386  * Look up the closest online ancestor of the memory cgroup @page is charged to
387  * and return its inode number or 0 if @page is not charged to any cgroup. It
388  * is safe to call this function without holding a reference to @page.
389  *
390  * Note, this function is inherently racy, because there is nothing to prevent
391  * the cgroup inode from getting torn down and potentially reallocated a moment
392  * after page_cgroup_ino() returns, so it only should be used by callers that
393  * do not care (such as procfs interfaces).
394  */
395 ino_t page_cgroup_ino(struct page *page)
396 {
397 	struct mem_cgroup *memcg;
398 	unsigned long ino = 0;
399 
400 	rcu_read_lock();
401 	memcg = page_memcg_check(page);
402 
403 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
404 		memcg = parent_mem_cgroup(memcg);
405 	if (memcg)
406 		ino = cgroup_ino(memcg->css.cgroup);
407 	rcu_read_unlock();
408 	return ino;
409 }
410 
411 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
412 					 struct mem_cgroup_tree_per_node *mctz,
413 					 unsigned long new_usage_in_excess)
414 {
415 	struct rb_node **p = &mctz->rb_root.rb_node;
416 	struct rb_node *parent = NULL;
417 	struct mem_cgroup_per_node *mz_node;
418 	bool rightmost = true;
419 
420 	if (mz->on_tree)
421 		return;
422 
423 	mz->usage_in_excess = new_usage_in_excess;
424 	if (!mz->usage_in_excess)
425 		return;
426 	while (*p) {
427 		parent = *p;
428 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
429 					tree_node);
430 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
431 			p = &(*p)->rb_left;
432 			rightmost = false;
433 		} else {
434 			p = &(*p)->rb_right;
435 		}
436 	}
437 
438 	if (rightmost)
439 		mctz->rb_rightmost = &mz->tree_node;
440 
441 	rb_link_node(&mz->tree_node, parent, p);
442 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
443 	mz->on_tree = true;
444 }
445 
446 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
447 					 struct mem_cgroup_tree_per_node *mctz)
448 {
449 	if (!mz->on_tree)
450 		return;
451 
452 	if (&mz->tree_node == mctz->rb_rightmost)
453 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
454 
455 	rb_erase(&mz->tree_node, &mctz->rb_root);
456 	mz->on_tree = false;
457 }
458 
459 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
460 				       struct mem_cgroup_tree_per_node *mctz)
461 {
462 	unsigned long flags;
463 
464 	spin_lock_irqsave(&mctz->lock, flags);
465 	__mem_cgroup_remove_exceeded(mz, mctz);
466 	spin_unlock_irqrestore(&mctz->lock, flags);
467 }
468 
469 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
470 {
471 	unsigned long nr_pages = page_counter_read(&memcg->memory);
472 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
473 	unsigned long excess = 0;
474 
475 	if (nr_pages > soft_limit)
476 		excess = nr_pages - soft_limit;
477 
478 	return excess;
479 }
480 
481 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
482 {
483 	unsigned long excess;
484 	struct mem_cgroup_per_node *mz;
485 	struct mem_cgroup_tree_per_node *mctz;
486 
487 	mctz = soft_limit_tree.rb_tree_per_node[nid];
488 	if (!mctz)
489 		return;
490 	/*
491 	 * Necessary to update all ancestors when hierarchy is used.
492 	 * because their event counter is not touched.
493 	 */
494 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
495 		mz = memcg->nodeinfo[nid];
496 		excess = soft_limit_excess(memcg);
497 		/*
498 		 * We have to update the tree if mz is on RB-tree or
499 		 * mem is over its softlimit.
500 		 */
501 		if (excess || mz->on_tree) {
502 			unsigned long flags;
503 
504 			spin_lock_irqsave(&mctz->lock, flags);
505 			/* if on-tree, remove it */
506 			if (mz->on_tree)
507 				__mem_cgroup_remove_exceeded(mz, mctz);
508 			/*
509 			 * Insert again. mz->usage_in_excess will be updated.
510 			 * If excess is 0, no tree ops.
511 			 */
512 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
513 			spin_unlock_irqrestore(&mctz->lock, flags);
514 		}
515 	}
516 }
517 
518 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
519 {
520 	struct mem_cgroup_tree_per_node *mctz;
521 	struct mem_cgroup_per_node *mz;
522 	int nid;
523 
524 	for_each_node(nid) {
525 		mz = memcg->nodeinfo[nid];
526 		mctz = soft_limit_tree.rb_tree_per_node[nid];
527 		if (mctz)
528 			mem_cgroup_remove_exceeded(mz, mctz);
529 	}
530 }
531 
532 static struct mem_cgroup_per_node *
533 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534 {
535 	struct mem_cgroup_per_node *mz;
536 
537 retry:
538 	mz = NULL;
539 	if (!mctz->rb_rightmost)
540 		goto done;		/* Nothing to reclaim from */
541 
542 	mz = rb_entry(mctz->rb_rightmost,
543 		      struct mem_cgroup_per_node, tree_node);
544 	/*
545 	 * Remove the node now but someone else can add it back,
546 	 * we will to add it back at the end of reclaim to its correct
547 	 * position in the tree.
548 	 */
549 	__mem_cgroup_remove_exceeded(mz, mctz);
550 	if (!soft_limit_excess(mz->memcg) ||
551 	    !css_tryget(&mz->memcg->css))
552 		goto retry;
553 done:
554 	return mz;
555 }
556 
557 static struct mem_cgroup_per_node *
558 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
559 {
560 	struct mem_cgroup_per_node *mz;
561 
562 	spin_lock_irq(&mctz->lock);
563 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
564 	spin_unlock_irq(&mctz->lock);
565 	return mz;
566 }
567 
568 /*
569  * memcg and lruvec stats flushing
570  *
571  * Many codepaths leading to stats update or read are performance sensitive and
572  * adding stats flushing in such codepaths is not desirable. So, to optimize the
573  * flushing the kernel does:
574  *
575  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
576  *    rstat update tree grow unbounded.
577  *
578  * 2) Flush the stats synchronously on reader side only when there are more than
579  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
580  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
581  *    only for 2 seconds due to (1).
582  */
583 static void flush_memcg_stats_dwork(struct work_struct *w);
584 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
585 static DEFINE_SPINLOCK(stats_flush_lock);
586 static DEFINE_PER_CPU(unsigned int, stats_updates);
587 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
588 static u64 flush_next_time;
589 
590 #define FLUSH_TIME (2UL*HZ)
591 
592 /*
593  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
594  * not rely on this as part of an acquired spinlock_t lock. These functions are
595  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
596  * is sufficient.
597  */
598 static void memcg_stats_lock(void)
599 {
600 #ifdef CONFIG_PREEMPT_RT
601       preempt_disable();
602 #else
603       VM_BUG_ON(!irqs_disabled());
604 #endif
605 }
606 
607 static void __memcg_stats_lock(void)
608 {
609 #ifdef CONFIG_PREEMPT_RT
610       preempt_disable();
611 #endif
612 }
613 
614 static void memcg_stats_unlock(void)
615 {
616 #ifdef CONFIG_PREEMPT_RT
617       preempt_enable();
618 #endif
619 }
620 
621 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
622 {
623 	unsigned int x;
624 
625 	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
626 
627 	x = __this_cpu_add_return(stats_updates, abs(val));
628 	if (x > MEMCG_CHARGE_BATCH) {
629 		atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
630 		__this_cpu_write(stats_updates, 0);
631 	}
632 }
633 
634 static void __mem_cgroup_flush_stats(void)
635 {
636 	unsigned long flag;
637 
638 	if (!spin_trylock_irqsave(&stats_flush_lock, flag))
639 		return;
640 
641 	flush_next_time = jiffies_64 + 2*FLUSH_TIME;
642 	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
643 	atomic_set(&stats_flush_threshold, 0);
644 	spin_unlock_irqrestore(&stats_flush_lock, flag);
645 }
646 
647 void mem_cgroup_flush_stats(void)
648 {
649 	if (atomic_read(&stats_flush_threshold) > num_online_cpus())
650 		__mem_cgroup_flush_stats();
651 }
652 
653 void mem_cgroup_flush_stats_delayed(void)
654 {
655 	if (time_after64(jiffies_64, flush_next_time))
656 		mem_cgroup_flush_stats();
657 }
658 
659 static void flush_memcg_stats_dwork(struct work_struct *w)
660 {
661 	__mem_cgroup_flush_stats();
662 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
663 }
664 
665 /**
666  * __mod_memcg_state - update cgroup memory statistics
667  * @memcg: the memory cgroup
668  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
669  * @val: delta to add to the counter, can be negative
670  */
671 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
672 {
673 	if (mem_cgroup_disabled())
674 		return;
675 
676 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
677 	memcg_rstat_updated(memcg, val);
678 }
679 
680 /* idx can be of type enum memcg_stat_item or node_stat_item. */
681 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
682 {
683 	long x = 0;
684 	int cpu;
685 
686 	for_each_possible_cpu(cpu)
687 		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
688 #ifdef CONFIG_SMP
689 	if (x < 0)
690 		x = 0;
691 #endif
692 	return x;
693 }
694 
695 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
696 			      int val)
697 {
698 	struct mem_cgroup_per_node *pn;
699 	struct mem_cgroup *memcg;
700 
701 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
702 	memcg = pn->memcg;
703 
704 	/*
705 	 * The caller from rmap relay on disabled preemption becase they never
706 	 * update their counter from in-interrupt context. For these two
707 	 * counters we check that the update is never performed from an
708 	 * interrupt context while other caller need to have disabled interrupt.
709 	 */
710 	__memcg_stats_lock();
711 	if (IS_ENABLED(CONFIG_DEBUG_VM) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
712 		switch (idx) {
713 		case NR_ANON_MAPPED:
714 		case NR_FILE_MAPPED:
715 		case NR_ANON_THPS:
716 		case NR_SHMEM_PMDMAPPED:
717 		case NR_FILE_PMDMAPPED:
718 			WARN_ON_ONCE(!in_task());
719 			break;
720 		default:
721 			WARN_ON_ONCE(!irqs_disabled());
722 		}
723 	}
724 
725 	/* Update memcg */
726 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
727 
728 	/* Update lruvec */
729 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
730 
731 	memcg_rstat_updated(memcg, val);
732 	memcg_stats_unlock();
733 }
734 
735 /**
736  * __mod_lruvec_state - update lruvec memory statistics
737  * @lruvec: the lruvec
738  * @idx: the stat item
739  * @val: delta to add to the counter, can be negative
740  *
741  * The lruvec is the intersection of the NUMA node and a cgroup. This
742  * function updates the all three counters that are affected by a
743  * change of state at this level: per-node, per-cgroup, per-lruvec.
744  */
745 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
746 			int val)
747 {
748 	/* Update node */
749 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
750 
751 	/* Update memcg and lruvec */
752 	if (!mem_cgroup_disabled())
753 		__mod_memcg_lruvec_state(lruvec, idx, val);
754 }
755 
756 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
757 			     int val)
758 {
759 	struct page *head = compound_head(page); /* rmap on tail pages */
760 	struct mem_cgroup *memcg;
761 	pg_data_t *pgdat = page_pgdat(page);
762 	struct lruvec *lruvec;
763 
764 	rcu_read_lock();
765 	memcg = page_memcg(head);
766 	/* Untracked pages have no memcg, no lruvec. Update only the node */
767 	if (!memcg) {
768 		rcu_read_unlock();
769 		__mod_node_page_state(pgdat, idx, val);
770 		return;
771 	}
772 
773 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
774 	__mod_lruvec_state(lruvec, idx, val);
775 	rcu_read_unlock();
776 }
777 EXPORT_SYMBOL(__mod_lruvec_page_state);
778 
779 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
780 {
781 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
782 	struct mem_cgroup *memcg;
783 	struct lruvec *lruvec;
784 
785 	rcu_read_lock();
786 	memcg = mem_cgroup_from_obj(p);
787 
788 	/*
789 	 * Untracked pages have no memcg, no lruvec. Update only the
790 	 * node. If we reparent the slab objects to the root memcg,
791 	 * when we free the slab object, we need to update the per-memcg
792 	 * vmstats to keep it correct for the root memcg.
793 	 */
794 	if (!memcg) {
795 		__mod_node_page_state(pgdat, idx, val);
796 	} else {
797 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
798 		__mod_lruvec_state(lruvec, idx, val);
799 	}
800 	rcu_read_unlock();
801 }
802 
803 /**
804  * __count_memcg_events - account VM events in a cgroup
805  * @memcg: the memory cgroup
806  * @idx: the event item
807  * @count: the number of events that occurred
808  */
809 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
810 			  unsigned long count)
811 {
812 	if (mem_cgroup_disabled())
813 		return;
814 
815 	memcg_stats_lock();
816 	__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
817 	memcg_rstat_updated(memcg, count);
818 	memcg_stats_unlock();
819 }
820 
821 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
822 {
823 	return READ_ONCE(memcg->vmstats.events[event]);
824 }
825 
826 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
827 {
828 	long x = 0;
829 	int cpu;
830 
831 	for_each_possible_cpu(cpu)
832 		x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
833 	return x;
834 }
835 
836 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
837 					 int nr_pages)
838 {
839 	/* pagein of a big page is an event. So, ignore page size */
840 	if (nr_pages > 0)
841 		__count_memcg_events(memcg, PGPGIN, 1);
842 	else {
843 		__count_memcg_events(memcg, PGPGOUT, 1);
844 		nr_pages = -nr_pages; /* for event */
845 	}
846 
847 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
848 }
849 
850 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
851 				       enum mem_cgroup_events_target target)
852 {
853 	unsigned long val, next;
854 
855 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
856 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
857 	/* from time_after() in jiffies.h */
858 	if ((long)(next - val) < 0) {
859 		switch (target) {
860 		case MEM_CGROUP_TARGET_THRESH:
861 			next = val + THRESHOLDS_EVENTS_TARGET;
862 			break;
863 		case MEM_CGROUP_TARGET_SOFTLIMIT:
864 			next = val + SOFTLIMIT_EVENTS_TARGET;
865 			break;
866 		default:
867 			break;
868 		}
869 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
870 		return true;
871 	}
872 	return false;
873 }
874 
875 /*
876  * Check events in order.
877  *
878  */
879 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
880 {
881 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
882 		return;
883 
884 	/* threshold event is triggered in finer grain than soft limit */
885 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
886 						MEM_CGROUP_TARGET_THRESH))) {
887 		bool do_softlimit;
888 
889 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
890 						MEM_CGROUP_TARGET_SOFTLIMIT);
891 		mem_cgroup_threshold(memcg);
892 		if (unlikely(do_softlimit))
893 			mem_cgroup_update_tree(memcg, nid);
894 	}
895 }
896 
897 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
898 {
899 	/*
900 	 * mm_update_next_owner() may clear mm->owner to NULL
901 	 * if it races with swapoff, page migration, etc.
902 	 * So this can be called with p == NULL.
903 	 */
904 	if (unlikely(!p))
905 		return NULL;
906 
907 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
908 }
909 EXPORT_SYMBOL(mem_cgroup_from_task);
910 
911 static __always_inline struct mem_cgroup *active_memcg(void)
912 {
913 	if (!in_task())
914 		return this_cpu_read(int_active_memcg);
915 	else
916 		return current->active_memcg;
917 }
918 
919 /**
920  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
921  * @mm: mm from which memcg should be extracted. It can be NULL.
922  *
923  * Obtain a reference on mm->memcg and returns it if successful. If mm
924  * is NULL, then the memcg is chosen as follows:
925  * 1) The active memcg, if set.
926  * 2) current->mm->memcg, if available
927  * 3) root memcg
928  * If mem_cgroup is disabled, NULL is returned.
929  */
930 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
931 {
932 	struct mem_cgroup *memcg;
933 
934 	if (mem_cgroup_disabled())
935 		return NULL;
936 
937 	/*
938 	 * Page cache insertions can happen without an
939 	 * actual mm context, e.g. during disk probing
940 	 * on boot, loopback IO, acct() writes etc.
941 	 *
942 	 * No need to css_get on root memcg as the reference
943 	 * counting is disabled on the root level in the
944 	 * cgroup core. See CSS_NO_REF.
945 	 */
946 	if (unlikely(!mm)) {
947 		memcg = active_memcg();
948 		if (unlikely(memcg)) {
949 			/* remote memcg must hold a ref */
950 			css_get(&memcg->css);
951 			return memcg;
952 		}
953 		mm = current->mm;
954 		if (unlikely(!mm))
955 			return root_mem_cgroup;
956 	}
957 
958 	rcu_read_lock();
959 	do {
960 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
961 		if (unlikely(!memcg))
962 			memcg = root_mem_cgroup;
963 	} while (!css_tryget(&memcg->css));
964 	rcu_read_unlock();
965 	return memcg;
966 }
967 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
968 
969 static __always_inline bool memcg_kmem_bypass(void)
970 {
971 	/* Allow remote memcg charging from any context. */
972 	if (unlikely(active_memcg()))
973 		return false;
974 
975 	/* Memcg to charge can't be determined. */
976 	if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
977 		return true;
978 
979 	return false;
980 }
981 
982 /**
983  * mem_cgroup_iter - iterate over memory cgroup hierarchy
984  * @root: hierarchy root
985  * @prev: previously returned memcg, NULL on first invocation
986  * @reclaim: cookie for shared reclaim walks, NULL for full walks
987  *
988  * Returns references to children of the hierarchy below @root, or
989  * @root itself, or %NULL after a full round-trip.
990  *
991  * Caller must pass the return value in @prev on subsequent
992  * invocations for reference counting, or use mem_cgroup_iter_break()
993  * to cancel a hierarchy walk before the round-trip is complete.
994  *
995  * Reclaimers can specify a node in @reclaim to divide up the memcgs
996  * in the hierarchy among all concurrent reclaimers operating on the
997  * same node.
998  */
999 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1000 				   struct mem_cgroup *prev,
1001 				   struct mem_cgroup_reclaim_cookie *reclaim)
1002 {
1003 	struct mem_cgroup_reclaim_iter *iter;
1004 	struct cgroup_subsys_state *css = NULL;
1005 	struct mem_cgroup *memcg = NULL;
1006 	struct mem_cgroup *pos = NULL;
1007 
1008 	if (mem_cgroup_disabled())
1009 		return NULL;
1010 
1011 	if (!root)
1012 		root = root_mem_cgroup;
1013 
1014 	rcu_read_lock();
1015 
1016 	if (reclaim) {
1017 		struct mem_cgroup_per_node *mz;
1018 
1019 		mz = root->nodeinfo[reclaim->pgdat->node_id];
1020 		iter = &mz->iter;
1021 
1022 		/*
1023 		 * On start, join the current reclaim iteration cycle.
1024 		 * Exit when a concurrent walker completes it.
1025 		 */
1026 		if (!prev)
1027 			reclaim->generation = iter->generation;
1028 		else if (reclaim->generation != iter->generation)
1029 			goto out_unlock;
1030 
1031 		while (1) {
1032 			pos = READ_ONCE(iter->position);
1033 			if (!pos || css_tryget(&pos->css))
1034 				break;
1035 			/*
1036 			 * css reference reached zero, so iter->position will
1037 			 * be cleared by ->css_released. However, we should not
1038 			 * rely on this happening soon, because ->css_released
1039 			 * is called from a work queue, and by busy-waiting we
1040 			 * might block it. So we clear iter->position right
1041 			 * away.
1042 			 */
1043 			(void)cmpxchg(&iter->position, pos, NULL);
1044 		}
1045 	} else if (prev) {
1046 		pos = prev;
1047 	}
1048 
1049 	if (pos)
1050 		css = &pos->css;
1051 
1052 	for (;;) {
1053 		css = css_next_descendant_pre(css, &root->css);
1054 		if (!css) {
1055 			/*
1056 			 * Reclaimers share the hierarchy walk, and a
1057 			 * new one might jump in right at the end of
1058 			 * the hierarchy - make sure they see at least
1059 			 * one group and restart from the beginning.
1060 			 */
1061 			if (!prev)
1062 				continue;
1063 			break;
1064 		}
1065 
1066 		/*
1067 		 * Verify the css and acquire a reference.  The root
1068 		 * is provided by the caller, so we know it's alive
1069 		 * and kicking, and don't take an extra reference.
1070 		 */
1071 		if (css == &root->css || css_tryget(css)) {
1072 			memcg = mem_cgroup_from_css(css);
1073 			break;
1074 		}
1075 	}
1076 
1077 	if (reclaim) {
1078 		/*
1079 		 * The position could have already been updated by a competing
1080 		 * thread, so check that the value hasn't changed since we read
1081 		 * it to avoid reclaiming from the same cgroup twice.
1082 		 */
1083 		(void)cmpxchg(&iter->position, pos, memcg);
1084 
1085 		if (pos)
1086 			css_put(&pos->css);
1087 
1088 		if (!memcg)
1089 			iter->generation++;
1090 	}
1091 
1092 out_unlock:
1093 	rcu_read_unlock();
1094 	if (prev && prev != root)
1095 		css_put(&prev->css);
1096 
1097 	return memcg;
1098 }
1099 
1100 /**
1101  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1102  * @root: hierarchy root
1103  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1104  */
1105 void mem_cgroup_iter_break(struct mem_cgroup *root,
1106 			   struct mem_cgroup *prev)
1107 {
1108 	if (!root)
1109 		root = root_mem_cgroup;
1110 	if (prev && prev != root)
1111 		css_put(&prev->css);
1112 }
1113 
1114 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1115 					struct mem_cgroup *dead_memcg)
1116 {
1117 	struct mem_cgroup_reclaim_iter *iter;
1118 	struct mem_cgroup_per_node *mz;
1119 	int nid;
1120 
1121 	for_each_node(nid) {
1122 		mz = from->nodeinfo[nid];
1123 		iter = &mz->iter;
1124 		cmpxchg(&iter->position, dead_memcg, NULL);
1125 	}
1126 }
1127 
1128 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1129 {
1130 	struct mem_cgroup *memcg = dead_memcg;
1131 	struct mem_cgroup *last;
1132 
1133 	do {
1134 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1135 		last = memcg;
1136 	} while ((memcg = parent_mem_cgroup(memcg)));
1137 
1138 	/*
1139 	 * When cgruop1 non-hierarchy mode is used,
1140 	 * parent_mem_cgroup() does not walk all the way up to the
1141 	 * cgroup root (root_mem_cgroup). So we have to handle
1142 	 * dead_memcg from cgroup root separately.
1143 	 */
1144 	if (last != root_mem_cgroup)
1145 		__invalidate_reclaim_iterators(root_mem_cgroup,
1146 						dead_memcg);
1147 }
1148 
1149 /**
1150  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1151  * @memcg: hierarchy root
1152  * @fn: function to call for each task
1153  * @arg: argument passed to @fn
1154  *
1155  * This function iterates over tasks attached to @memcg or to any of its
1156  * descendants and calls @fn for each task. If @fn returns a non-zero
1157  * value, the function breaks the iteration loop and returns the value.
1158  * Otherwise, it will iterate over all tasks and return 0.
1159  *
1160  * This function must not be called for the root memory cgroup.
1161  */
1162 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1163 			  int (*fn)(struct task_struct *, void *), void *arg)
1164 {
1165 	struct mem_cgroup *iter;
1166 	int ret = 0;
1167 
1168 	BUG_ON(memcg == root_mem_cgroup);
1169 
1170 	for_each_mem_cgroup_tree(iter, memcg) {
1171 		struct css_task_iter it;
1172 		struct task_struct *task;
1173 
1174 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1175 		while (!ret && (task = css_task_iter_next(&it)))
1176 			ret = fn(task, arg);
1177 		css_task_iter_end(&it);
1178 		if (ret) {
1179 			mem_cgroup_iter_break(memcg, iter);
1180 			break;
1181 		}
1182 	}
1183 	return ret;
1184 }
1185 
1186 #ifdef CONFIG_DEBUG_VM
1187 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1188 {
1189 	struct mem_cgroup *memcg;
1190 
1191 	if (mem_cgroup_disabled())
1192 		return;
1193 
1194 	memcg = folio_memcg(folio);
1195 
1196 	if (!memcg)
1197 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
1198 	else
1199 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1200 }
1201 #endif
1202 
1203 /**
1204  * folio_lruvec_lock - Lock the lruvec for a folio.
1205  * @folio: Pointer to the folio.
1206  *
1207  * These functions are safe to use under any of the following conditions:
1208  * - folio locked
1209  * - folio_test_lru false
1210  * - folio_memcg_lock()
1211  * - folio frozen (refcount of 0)
1212  *
1213  * Return: The lruvec this folio is on with its lock held.
1214  */
1215 struct lruvec *folio_lruvec_lock(struct folio *folio)
1216 {
1217 	struct lruvec *lruvec = folio_lruvec(folio);
1218 
1219 	spin_lock(&lruvec->lru_lock);
1220 	lruvec_memcg_debug(lruvec, folio);
1221 
1222 	return lruvec;
1223 }
1224 
1225 /**
1226  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1227  * @folio: Pointer to the folio.
1228  *
1229  * These functions are safe to use under any of the following conditions:
1230  * - folio locked
1231  * - folio_test_lru false
1232  * - folio_memcg_lock()
1233  * - folio frozen (refcount of 0)
1234  *
1235  * Return: The lruvec this folio is on with its lock held and interrupts
1236  * disabled.
1237  */
1238 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1239 {
1240 	struct lruvec *lruvec = folio_lruvec(folio);
1241 
1242 	spin_lock_irq(&lruvec->lru_lock);
1243 	lruvec_memcg_debug(lruvec, folio);
1244 
1245 	return lruvec;
1246 }
1247 
1248 /**
1249  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1250  * @folio: Pointer to the folio.
1251  * @flags: Pointer to irqsave flags.
1252  *
1253  * These functions are safe to use under any of the following conditions:
1254  * - folio locked
1255  * - folio_test_lru false
1256  * - folio_memcg_lock()
1257  * - folio frozen (refcount of 0)
1258  *
1259  * Return: The lruvec this folio is on with its lock held and interrupts
1260  * disabled.
1261  */
1262 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1263 		unsigned long *flags)
1264 {
1265 	struct lruvec *lruvec = folio_lruvec(folio);
1266 
1267 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1268 	lruvec_memcg_debug(lruvec, folio);
1269 
1270 	return lruvec;
1271 }
1272 
1273 /**
1274  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1275  * @lruvec: mem_cgroup per zone lru vector
1276  * @lru: index of lru list the page is sitting on
1277  * @zid: zone id of the accounted pages
1278  * @nr_pages: positive when adding or negative when removing
1279  *
1280  * This function must be called under lru_lock, just before a page is added
1281  * to or just after a page is removed from an lru list.
1282  */
1283 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1284 				int zid, int nr_pages)
1285 {
1286 	struct mem_cgroup_per_node *mz;
1287 	unsigned long *lru_size;
1288 	long size;
1289 
1290 	if (mem_cgroup_disabled())
1291 		return;
1292 
1293 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1294 	lru_size = &mz->lru_zone_size[zid][lru];
1295 
1296 	if (nr_pages < 0)
1297 		*lru_size += nr_pages;
1298 
1299 	size = *lru_size;
1300 	if (WARN_ONCE(size < 0,
1301 		"%s(%p, %d, %d): lru_size %ld\n",
1302 		__func__, lruvec, lru, nr_pages, size)) {
1303 		VM_BUG_ON(1);
1304 		*lru_size = 0;
1305 	}
1306 
1307 	if (nr_pages > 0)
1308 		*lru_size += nr_pages;
1309 }
1310 
1311 /**
1312  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1313  * @memcg: the memory cgroup
1314  *
1315  * Returns the maximum amount of memory @mem can be charged with, in
1316  * pages.
1317  */
1318 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1319 {
1320 	unsigned long margin = 0;
1321 	unsigned long count;
1322 	unsigned long limit;
1323 
1324 	count = page_counter_read(&memcg->memory);
1325 	limit = READ_ONCE(memcg->memory.max);
1326 	if (count < limit)
1327 		margin = limit - count;
1328 
1329 	if (do_memsw_account()) {
1330 		count = page_counter_read(&memcg->memsw);
1331 		limit = READ_ONCE(memcg->memsw.max);
1332 		if (count < limit)
1333 			margin = min(margin, limit - count);
1334 		else
1335 			margin = 0;
1336 	}
1337 
1338 	return margin;
1339 }
1340 
1341 /*
1342  * A routine for checking "mem" is under move_account() or not.
1343  *
1344  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1345  * moving cgroups. This is for waiting at high-memory pressure
1346  * caused by "move".
1347  */
1348 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1349 {
1350 	struct mem_cgroup *from;
1351 	struct mem_cgroup *to;
1352 	bool ret = false;
1353 	/*
1354 	 * Unlike task_move routines, we access mc.to, mc.from not under
1355 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1356 	 */
1357 	spin_lock(&mc.lock);
1358 	from = mc.from;
1359 	to = mc.to;
1360 	if (!from)
1361 		goto unlock;
1362 
1363 	ret = mem_cgroup_is_descendant(from, memcg) ||
1364 		mem_cgroup_is_descendant(to, memcg);
1365 unlock:
1366 	spin_unlock(&mc.lock);
1367 	return ret;
1368 }
1369 
1370 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1371 {
1372 	if (mc.moving_task && current != mc.moving_task) {
1373 		if (mem_cgroup_under_move(memcg)) {
1374 			DEFINE_WAIT(wait);
1375 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1376 			/* moving charge context might have finished. */
1377 			if (mc.moving_task)
1378 				schedule();
1379 			finish_wait(&mc.waitq, &wait);
1380 			return true;
1381 		}
1382 	}
1383 	return false;
1384 }
1385 
1386 struct memory_stat {
1387 	const char *name;
1388 	unsigned int idx;
1389 };
1390 
1391 static const struct memory_stat memory_stats[] = {
1392 	{ "anon",			NR_ANON_MAPPED			},
1393 	{ "file",			NR_FILE_PAGES			},
1394 	{ "kernel",			MEMCG_KMEM			},
1395 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1396 	{ "pagetables",			NR_PAGETABLE			},
1397 	{ "percpu",			MEMCG_PERCPU_B			},
1398 	{ "sock",			MEMCG_SOCK			},
1399 	{ "vmalloc",			MEMCG_VMALLOC			},
1400 	{ "shmem",			NR_SHMEM			},
1401 	{ "file_mapped",		NR_FILE_MAPPED			},
1402 	{ "file_dirty",			NR_FILE_DIRTY			},
1403 	{ "file_writeback",		NR_WRITEBACK			},
1404 #ifdef CONFIG_SWAP
1405 	{ "swapcached",			NR_SWAPCACHE			},
1406 #endif
1407 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1408 	{ "anon_thp",			NR_ANON_THPS			},
1409 	{ "file_thp",			NR_FILE_THPS			},
1410 	{ "shmem_thp",			NR_SHMEM_THPS			},
1411 #endif
1412 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1413 	{ "active_anon",		NR_ACTIVE_ANON			},
1414 	{ "inactive_file",		NR_INACTIVE_FILE		},
1415 	{ "active_file",		NR_ACTIVE_FILE			},
1416 	{ "unevictable",		NR_UNEVICTABLE			},
1417 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1418 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1419 
1420 	/* The memory events */
1421 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1422 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1423 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1424 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1425 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1426 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1427 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1428 };
1429 
1430 /* Translate stat items to the correct unit for memory.stat output */
1431 static int memcg_page_state_unit(int item)
1432 {
1433 	switch (item) {
1434 	case MEMCG_PERCPU_B:
1435 	case NR_SLAB_RECLAIMABLE_B:
1436 	case NR_SLAB_UNRECLAIMABLE_B:
1437 	case WORKINGSET_REFAULT_ANON:
1438 	case WORKINGSET_REFAULT_FILE:
1439 	case WORKINGSET_ACTIVATE_ANON:
1440 	case WORKINGSET_ACTIVATE_FILE:
1441 	case WORKINGSET_RESTORE_ANON:
1442 	case WORKINGSET_RESTORE_FILE:
1443 	case WORKINGSET_NODERECLAIM:
1444 		return 1;
1445 	case NR_KERNEL_STACK_KB:
1446 		return SZ_1K;
1447 	default:
1448 		return PAGE_SIZE;
1449 	}
1450 }
1451 
1452 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1453 						    int item)
1454 {
1455 	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1456 }
1457 
1458 static char *memory_stat_format(struct mem_cgroup *memcg)
1459 {
1460 	struct seq_buf s;
1461 	int i;
1462 
1463 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1464 	if (!s.buffer)
1465 		return NULL;
1466 
1467 	/*
1468 	 * Provide statistics on the state of the memory subsystem as
1469 	 * well as cumulative event counters that show past behavior.
1470 	 *
1471 	 * This list is ordered following a combination of these gradients:
1472 	 * 1) generic big picture -> specifics and details
1473 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1474 	 *
1475 	 * Current memory state:
1476 	 */
1477 	mem_cgroup_flush_stats();
1478 
1479 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1480 		u64 size;
1481 
1482 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1483 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1484 
1485 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1486 			size += memcg_page_state_output(memcg,
1487 							NR_SLAB_RECLAIMABLE_B);
1488 			seq_buf_printf(&s, "slab %llu\n", size);
1489 		}
1490 	}
1491 
1492 	/* Accumulated memory events */
1493 
1494 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1495 		       memcg_events(memcg, PGFAULT));
1496 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1497 		       memcg_events(memcg, PGMAJFAULT));
1498 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1499 		       memcg_events(memcg, PGREFILL));
1500 	seq_buf_printf(&s, "pgscan %lu\n",
1501 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1502 		       memcg_events(memcg, PGSCAN_DIRECT));
1503 	seq_buf_printf(&s, "pgsteal %lu\n",
1504 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1505 		       memcg_events(memcg, PGSTEAL_DIRECT));
1506 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1507 		       memcg_events(memcg, PGACTIVATE));
1508 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1509 		       memcg_events(memcg, PGDEACTIVATE));
1510 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1511 		       memcg_events(memcg, PGLAZYFREE));
1512 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1513 		       memcg_events(memcg, PGLAZYFREED));
1514 
1515 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1516 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1517 		       memcg_events(memcg, THP_FAULT_ALLOC));
1518 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1519 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1520 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1521 
1522 	/* The above should easily fit into one page */
1523 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1524 
1525 	return s.buffer;
1526 }
1527 
1528 #define K(x) ((x) << (PAGE_SHIFT-10))
1529 /**
1530  * mem_cgroup_print_oom_context: Print OOM information relevant to
1531  * memory controller.
1532  * @memcg: The memory cgroup that went over limit
1533  * @p: Task that is going to be killed
1534  *
1535  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1536  * enabled
1537  */
1538 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1539 {
1540 	rcu_read_lock();
1541 
1542 	if (memcg) {
1543 		pr_cont(",oom_memcg=");
1544 		pr_cont_cgroup_path(memcg->css.cgroup);
1545 	} else
1546 		pr_cont(",global_oom");
1547 	if (p) {
1548 		pr_cont(",task_memcg=");
1549 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1550 	}
1551 	rcu_read_unlock();
1552 }
1553 
1554 /**
1555  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1556  * memory controller.
1557  * @memcg: The memory cgroup that went over limit
1558  */
1559 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1560 {
1561 	char *buf;
1562 
1563 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1564 		K((u64)page_counter_read(&memcg->memory)),
1565 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1566 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1567 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1568 			K((u64)page_counter_read(&memcg->swap)),
1569 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1570 	else {
1571 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1572 			K((u64)page_counter_read(&memcg->memsw)),
1573 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1574 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1575 			K((u64)page_counter_read(&memcg->kmem)),
1576 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1577 	}
1578 
1579 	pr_info("Memory cgroup stats for ");
1580 	pr_cont_cgroup_path(memcg->css.cgroup);
1581 	pr_cont(":");
1582 	buf = memory_stat_format(memcg);
1583 	if (!buf)
1584 		return;
1585 	pr_info("%s", buf);
1586 	kfree(buf);
1587 }
1588 
1589 /*
1590  * Return the memory (and swap, if configured) limit for a memcg.
1591  */
1592 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1593 {
1594 	unsigned long max = READ_ONCE(memcg->memory.max);
1595 
1596 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1597 		if (mem_cgroup_swappiness(memcg))
1598 			max += min(READ_ONCE(memcg->swap.max),
1599 				   (unsigned long)total_swap_pages);
1600 	} else { /* v1 */
1601 		if (mem_cgroup_swappiness(memcg)) {
1602 			/* Calculate swap excess capacity from memsw limit */
1603 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1604 
1605 			max += min(swap, (unsigned long)total_swap_pages);
1606 		}
1607 	}
1608 	return max;
1609 }
1610 
1611 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1612 {
1613 	return page_counter_read(&memcg->memory);
1614 }
1615 
1616 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1617 				     int order)
1618 {
1619 	struct oom_control oc = {
1620 		.zonelist = NULL,
1621 		.nodemask = NULL,
1622 		.memcg = memcg,
1623 		.gfp_mask = gfp_mask,
1624 		.order = order,
1625 	};
1626 	bool ret = true;
1627 
1628 	if (mutex_lock_killable(&oom_lock))
1629 		return true;
1630 
1631 	if (mem_cgroup_margin(memcg) >= (1 << order))
1632 		goto unlock;
1633 
1634 	/*
1635 	 * A few threads which were not waiting at mutex_lock_killable() can
1636 	 * fail to bail out. Therefore, check again after holding oom_lock.
1637 	 */
1638 	ret = task_is_dying() || out_of_memory(&oc);
1639 
1640 unlock:
1641 	mutex_unlock(&oom_lock);
1642 	return ret;
1643 }
1644 
1645 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1646 				   pg_data_t *pgdat,
1647 				   gfp_t gfp_mask,
1648 				   unsigned long *total_scanned)
1649 {
1650 	struct mem_cgroup *victim = NULL;
1651 	int total = 0;
1652 	int loop = 0;
1653 	unsigned long excess;
1654 	unsigned long nr_scanned;
1655 	struct mem_cgroup_reclaim_cookie reclaim = {
1656 		.pgdat = pgdat,
1657 	};
1658 
1659 	excess = soft_limit_excess(root_memcg);
1660 
1661 	while (1) {
1662 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1663 		if (!victim) {
1664 			loop++;
1665 			if (loop >= 2) {
1666 				/*
1667 				 * If we have not been able to reclaim
1668 				 * anything, it might because there are
1669 				 * no reclaimable pages under this hierarchy
1670 				 */
1671 				if (!total)
1672 					break;
1673 				/*
1674 				 * We want to do more targeted reclaim.
1675 				 * excess >> 2 is not to excessive so as to
1676 				 * reclaim too much, nor too less that we keep
1677 				 * coming back to reclaim from this cgroup
1678 				 */
1679 				if (total >= (excess >> 2) ||
1680 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1681 					break;
1682 			}
1683 			continue;
1684 		}
1685 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1686 					pgdat, &nr_scanned);
1687 		*total_scanned += nr_scanned;
1688 		if (!soft_limit_excess(root_memcg))
1689 			break;
1690 	}
1691 	mem_cgroup_iter_break(root_memcg, victim);
1692 	return total;
1693 }
1694 
1695 #ifdef CONFIG_LOCKDEP
1696 static struct lockdep_map memcg_oom_lock_dep_map = {
1697 	.name = "memcg_oom_lock",
1698 };
1699 #endif
1700 
1701 static DEFINE_SPINLOCK(memcg_oom_lock);
1702 
1703 /*
1704  * Check OOM-Killer is already running under our hierarchy.
1705  * If someone is running, return false.
1706  */
1707 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1708 {
1709 	struct mem_cgroup *iter, *failed = NULL;
1710 
1711 	spin_lock(&memcg_oom_lock);
1712 
1713 	for_each_mem_cgroup_tree(iter, memcg) {
1714 		if (iter->oom_lock) {
1715 			/*
1716 			 * this subtree of our hierarchy is already locked
1717 			 * so we cannot give a lock.
1718 			 */
1719 			failed = iter;
1720 			mem_cgroup_iter_break(memcg, iter);
1721 			break;
1722 		} else
1723 			iter->oom_lock = true;
1724 	}
1725 
1726 	if (failed) {
1727 		/*
1728 		 * OK, we failed to lock the whole subtree so we have
1729 		 * to clean up what we set up to the failing subtree
1730 		 */
1731 		for_each_mem_cgroup_tree(iter, memcg) {
1732 			if (iter == failed) {
1733 				mem_cgroup_iter_break(memcg, iter);
1734 				break;
1735 			}
1736 			iter->oom_lock = false;
1737 		}
1738 	} else
1739 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1740 
1741 	spin_unlock(&memcg_oom_lock);
1742 
1743 	return !failed;
1744 }
1745 
1746 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1747 {
1748 	struct mem_cgroup *iter;
1749 
1750 	spin_lock(&memcg_oom_lock);
1751 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1752 	for_each_mem_cgroup_tree(iter, memcg)
1753 		iter->oom_lock = false;
1754 	spin_unlock(&memcg_oom_lock);
1755 }
1756 
1757 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1758 {
1759 	struct mem_cgroup *iter;
1760 
1761 	spin_lock(&memcg_oom_lock);
1762 	for_each_mem_cgroup_tree(iter, memcg)
1763 		iter->under_oom++;
1764 	spin_unlock(&memcg_oom_lock);
1765 }
1766 
1767 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1768 {
1769 	struct mem_cgroup *iter;
1770 
1771 	/*
1772 	 * Be careful about under_oom underflows because a child memcg
1773 	 * could have been added after mem_cgroup_mark_under_oom.
1774 	 */
1775 	spin_lock(&memcg_oom_lock);
1776 	for_each_mem_cgroup_tree(iter, memcg)
1777 		if (iter->under_oom > 0)
1778 			iter->under_oom--;
1779 	spin_unlock(&memcg_oom_lock);
1780 }
1781 
1782 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1783 
1784 struct oom_wait_info {
1785 	struct mem_cgroup *memcg;
1786 	wait_queue_entry_t	wait;
1787 };
1788 
1789 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1790 	unsigned mode, int sync, void *arg)
1791 {
1792 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1793 	struct mem_cgroup *oom_wait_memcg;
1794 	struct oom_wait_info *oom_wait_info;
1795 
1796 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1797 	oom_wait_memcg = oom_wait_info->memcg;
1798 
1799 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1800 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1801 		return 0;
1802 	return autoremove_wake_function(wait, mode, sync, arg);
1803 }
1804 
1805 static void memcg_oom_recover(struct mem_cgroup *memcg)
1806 {
1807 	/*
1808 	 * For the following lockless ->under_oom test, the only required
1809 	 * guarantee is that it must see the state asserted by an OOM when
1810 	 * this function is called as a result of userland actions
1811 	 * triggered by the notification of the OOM.  This is trivially
1812 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1813 	 * triggering notification.
1814 	 */
1815 	if (memcg && memcg->under_oom)
1816 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1817 }
1818 
1819 /*
1820  * Returns true if successfully killed one or more processes. Though in some
1821  * corner cases it can return true even without killing any process.
1822  */
1823 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1824 {
1825 	bool locked, ret;
1826 
1827 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1828 		return false;
1829 
1830 	memcg_memory_event(memcg, MEMCG_OOM);
1831 
1832 	/*
1833 	 * We are in the middle of the charge context here, so we
1834 	 * don't want to block when potentially sitting on a callstack
1835 	 * that holds all kinds of filesystem and mm locks.
1836 	 *
1837 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1838 	 * handling until the charge can succeed; remember the context and put
1839 	 * the task to sleep at the end of the page fault when all locks are
1840 	 * released.
1841 	 *
1842 	 * On the other hand, in-kernel OOM killer allows for an async victim
1843 	 * memory reclaim (oom_reaper) and that means that we are not solely
1844 	 * relying on the oom victim to make a forward progress and we can
1845 	 * invoke the oom killer here.
1846 	 *
1847 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1848 	 * victim and then we have to bail out from the charge path.
1849 	 */
1850 	if (memcg->oom_kill_disable) {
1851 		if (current->in_user_fault) {
1852 			css_get(&memcg->css);
1853 			current->memcg_in_oom = memcg;
1854 			current->memcg_oom_gfp_mask = mask;
1855 			current->memcg_oom_order = order;
1856 		}
1857 		return false;
1858 	}
1859 
1860 	mem_cgroup_mark_under_oom(memcg);
1861 
1862 	locked = mem_cgroup_oom_trylock(memcg);
1863 
1864 	if (locked)
1865 		mem_cgroup_oom_notify(memcg);
1866 
1867 	mem_cgroup_unmark_under_oom(memcg);
1868 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1869 
1870 	if (locked)
1871 		mem_cgroup_oom_unlock(memcg);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1878  * @handle: actually kill/wait or just clean up the OOM state
1879  *
1880  * This has to be called at the end of a page fault if the memcg OOM
1881  * handler was enabled.
1882  *
1883  * Memcg supports userspace OOM handling where failed allocations must
1884  * sleep on a waitqueue until the userspace task resolves the
1885  * situation.  Sleeping directly in the charge context with all kinds
1886  * of locks held is not a good idea, instead we remember an OOM state
1887  * in the task and mem_cgroup_oom_synchronize() has to be called at
1888  * the end of the page fault to complete the OOM handling.
1889  *
1890  * Returns %true if an ongoing memcg OOM situation was detected and
1891  * completed, %false otherwise.
1892  */
1893 bool mem_cgroup_oom_synchronize(bool handle)
1894 {
1895 	struct mem_cgroup *memcg = current->memcg_in_oom;
1896 	struct oom_wait_info owait;
1897 	bool locked;
1898 
1899 	/* OOM is global, do not handle */
1900 	if (!memcg)
1901 		return false;
1902 
1903 	if (!handle)
1904 		goto cleanup;
1905 
1906 	owait.memcg = memcg;
1907 	owait.wait.flags = 0;
1908 	owait.wait.func = memcg_oom_wake_function;
1909 	owait.wait.private = current;
1910 	INIT_LIST_HEAD(&owait.wait.entry);
1911 
1912 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1913 	mem_cgroup_mark_under_oom(memcg);
1914 
1915 	locked = mem_cgroup_oom_trylock(memcg);
1916 
1917 	if (locked)
1918 		mem_cgroup_oom_notify(memcg);
1919 
1920 	if (locked && !memcg->oom_kill_disable) {
1921 		mem_cgroup_unmark_under_oom(memcg);
1922 		finish_wait(&memcg_oom_waitq, &owait.wait);
1923 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1924 					 current->memcg_oom_order);
1925 	} else {
1926 		schedule();
1927 		mem_cgroup_unmark_under_oom(memcg);
1928 		finish_wait(&memcg_oom_waitq, &owait.wait);
1929 	}
1930 
1931 	if (locked) {
1932 		mem_cgroup_oom_unlock(memcg);
1933 		/*
1934 		 * There is no guarantee that an OOM-lock contender
1935 		 * sees the wakeups triggered by the OOM kill
1936 		 * uncharges.  Wake any sleepers explicitly.
1937 		 */
1938 		memcg_oom_recover(memcg);
1939 	}
1940 cleanup:
1941 	current->memcg_in_oom = NULL;
1942 	css_put(&memcg->css);
1943 	return true;
1944 }
1945 
1946 /**
1947  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1948  * @victim: task to be killed by the OOM killer
1949  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1950  *
1951  * Returns a pointer to a memory cgroup, which has to be cleaned up
1952  * by killing all belonging OOM-killable tasks.
1953  *
1954  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1955  */
1956 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1957 					    struct mem_cgroup *oom_domain)
1958 {
1959 	struct mem_cgroup *oom_group = NULL;
1960 	struct mem_cgroup *memcg;
1961 
1962 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1963 		return NULL;
1964 
1965 	if (!oom_domain)
1966 		oom_domain = root_mem_cgroup;
1967 
1968 	rcu_read_lock();
1969 
1970 	memcg = mem_cgroup_from_task(victim);
1971 	if (memcg == root_mem_cgroup)
1972 		goto out;
1973 
1974 	/*
1975 	 * If the victim task has been asynchronously moved to a different
1976 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1977 	 * In this case it's better to ignore memory.group.oom.
1978 	 */
1979 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1980 		goto out;
1981 
1982 	/*
1983 	 * Traverse the memory cgroup hierarchy from the victim task's
1984 	 * cgroup up to the OOMing cgroup (or root) to find the
1985 	 * highest-level memory cgroup with oom.group set.
1986 	 */
1987 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1988 		if (memcg->oom_group)
1989 			oom_group = memcg;
1990 
1991 		if (memcg == oom_domain)
1992 			break;
1993 	}
1994 
1995 	if (oom_group)
1996 		css_get(&oom_group->css);
1997 out:
1998 	rcu_read_unlock();
1999 
2000 	return oom_group;
2001 }
2002 
2003 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2004 {
2005 	pr_info("Tasks in ");
2006 	pr_cont_cgroup_path(memcg->css.cgroup);
2007 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2008 }
2009 
2010 /**
2011  * folio_memcg_lock - Bind a folio to its memcg.
2012  * @folio: The folio.
2013  *
2014  * This function prevents unlocked LRU folios from being moved to
2015  * another cgroup.
2016  *
2017  * It ensures lifetime of the bound memcg.  The caller is responsible
2018  * for the lifetime of the folio.
2019  */
2020 void folio_memcg_lock(struct folio *folio)
2021 {
2022 	struct mem_cgroup *memcg;
2023 	unsigned long flags;
2024 
2025 	/*
2026 	 * The RCU lock is held throughout the transaction.  The fast
2027 	 * path can get away without acquiring the memcg->move_lock
2028 	 * because page moving starts with an RCU grace period.
2029          */
2030 	rcu_read_lock();
2031 
2032 	if (mem_cgroup_disabled())
2033 		return;
2034 again:
2035 	memcg = folio_memcg(folio);
2036 	if (unlikely(!memcg))
2037 		return;
2038 
2039 #ifdef CONFIG_PROVE_LOCKING
2040 	local_irq_save(flags);
2041 	might_lock(&memcg->move_lock);
2042 	local_irq_restore(flags);
2043 #endif
2044 
2045 	if (atomic_read(&memcg->moving_account) <= 0)
2046 		return;
2047 
2048 	spin_lock_irqsave(&memcg->move_lock, flags);
2049 	if (memcg != folio_memcg(folio)) {
2050 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2051 		goto again;
2052 	}
2053 
2054 	/*
2055 	 * When charge migration first begins, we can have multiple
2056 	 * critical sections holding the fast-path RCU lock and one
2057 	 * holding the slowpath move_lock. Track the task who has the
2058 	 * move_lock for unlock_page_memcg().
2059 	 */
2060 	memcg->move_lock_task = current;
2061 	memcg->move_lock_flags = flags;
2062 }
2063 
2064 void lock_page_memcg(struct page *page)
2065 {
2066 	folio_memcg_lock(page_folio(page));
2067 }
2068 
2069 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2070 {
2071 	if (memcg && memcg->move_lock_task == current) {
2072 		unsigned long flags = memcg->move_lock_flags;
2073 
2074 		memcg->move_lock_task = NULL;
2075 		memcg->move_lock_flags = 0;
2076 
2077 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2078 	}
2079 
2080 	rcu_read_unlock();
2081 }
2082 
2083 /**
2084  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2085  * @folio: The folio.
2086  *
2087  * This releases the binding created by folio_memcg_lock().  This does
2088  * not change the accounting of this folio to its memcg, but it does
2089  * permit others to change it.
2090  */
2091 void folio_memcg_unlock(struct folio *folio)
2092 {
2093 	__folio_memcg_unlock(folio_memcg(folio));
2094 }
2095 
2096 void unlock_page_memcg(struct page *page)
2097 {
2098 	folio_memcg_unlock(page_folio(page));
2099 }
2100 
2101 struct memcg_stock_pcp {
2102 	local_lock_t stock_lock;
2103 	struct mem_cgroup *cached; /* this never be root cgroup */
2104 	unsigned int nr_pages;
2105 
2106 #ifdef CONFIG_MEMCG_KMEM
2107 	struct obj_cgroup *cached_objcg;
2108 	struct pglist_data *cached_pgdat;
2109 	unsigned int nr_bytes;
2110 	int nr_slab_reclaimable_b;
2111 	int nr_slab_unreclaimable_b;
2112 #endif
2113 
2114 	struct work_struct work;
2115 	unsigned long flags;
2116 #define FLUSHING_CACHED_CHARGE	0
2117 };
2118 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2119 	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2120 };
2121 static DEFINE_MUTEX(percpu_charge_mutex);
2122 
2123 #ifdef CONFIG_MEMCG_KMEM
2124 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2125 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2126 				     struct mem_cgroup *root_memcg);
2127 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2128 
2129 #else
2130 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2131 {
2132 	return NULL;
2133 }
2134 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2135 				     struct mem_cgroup *root_memcg)
2136 {
2137 	return false;
2138 }
2139 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2140 {
2141 }
2142 #endif
2143 
2144 /**
2145  * consume_stock: Try to consume stocked charge on this cpu.
2146  * @memcg: memcg to consume from.
2147  * @nr_pages: how many pages to charge.
2148  *
2149  * The charges will only happen if @memcg matches the current cpu's memcg
2150  * stock, and at least @nr_pages are available in that stock.  Failure to
2151  * service an allocation will refill the stock.
2152  *
2153  * returns true if successful, false otherwise.
2154  */
2155 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2156 {
2157 	struct memcg_stock_pcp *stock;
2158 	unsigned long flags;
2159 	bool ret = false;
2160 
2161 	if (nr_pages > MEMCG_CHARGE_BATCH)
2162 		return ret;
2163 
2164 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2165 
2166 	stock = this_cpu_ptr(&memcg_stock);
2167 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2168 		stock->nr_pages -= nr_pages;
2169 		ret = true;
2170 	}
2171 
2172 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2173 
2174 	return ret;
2175 }
2176 
2177 /*
2178  * Returns stocks cached in percpu and reset cached information.
2179  */
2180 static void drain_stock(struct memcg_stock_pcp *stock)
2181 {
2182 	struct mem_cgroup *old = stock->cached;
2183 
2184 	if (!old)
2185 		return;
2186 
2187 	if (stock->nr_pages) {
2188 		page_counter_uncharge(&old->memory, stock->nr_pages);
2189 		if (do_memsw_account())
2190 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2191 		stock->nr_pages = 0;
2192 	}
2193 
2194 	css_put(&old->css);
2195 	stock->cached = NULL;
2196 }
2197 
2198 static void drain_local_stock(struct work_struct *dummy)
2199 {
2200 	struct memcg_stock_pcp *stock;
2201 	struct obj_cgroup *old = NULL;
2202 	unsigned long flags;
2203 
2204 	/*
2205 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2206 	 * drain_stock races is that we always operate on local CPU stock
2207 	 * here with IRQ disabled
2208 	 */
2209 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2210 
2211 	stock = this_cpu_ptr(&memcg_stock);
2212 	old = drain_obj_stock(stock);
2213 	drain_stock(stock);
2214 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2215 
2216 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2217 	if (old)
2218 		obj_cgroup_put(old);
2219 }
2220 
2221 /*
2222  * Cache charges(val) to local per_cpu area.
2223  * This will be consumed by consume_stock() function, later.
2224  */
2225 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2226 {
2227 	struct memcg_stock_pcp *stock;
2228 
2229 	stock = this_cpu_ptr(&memcg_stock);
2230 	if (stock->cached != memcg) { /* reset if necessary */
2231 		drain_stock(stock);
2232 		css_get(&memcg->css);
2233 		stock->cached = memcg;
2234 	}
2235 	stock->nr_pages += nr_pages;
2236 
2237 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2238 		drain_stock(stock);
2239 }
2240 
2241 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2242 {
2243 	unsigned long flags;
2244 
2245 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2246 	__refill_stock(memcg, nr_pages);
2247 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2248 }
2249 
2250 /*
2251  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2252  * of the hierarchy under it.
2253  */
2254 static void drain_all_stock(struct mem_cgroup *root_memcg)
2255 {
2256 	int cpu, curcpu;
2257 
2258 	/* If someone's already draining, avoid adding running more workers. */
2259 	if (!mutex_trylock(&percpu_charge_mutex))
2260 		return;
2261 	/*
2262 	 * Notify other cpus that system-wide "drain" is running
2263 	 * We do not care about races with the cpu hotplug because cpu down
2264 	 * as well as workers from this path always operate on the local
2265 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2266 	 */
2267 	migrate_disable();
2268 	curcpu = smp_processor_id();
2269 	for_each_online_cpu(cpu) {
2270 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2271 		struct mem_cgroup *memcg;
2272 		bool flush = false;
2273 
2274 		rcu_read_lock();
2275 		memcg = stock->cached;
2276 		if (memcg && stock->nr_pages &&
2277 		    mem_cgroup_is_descendant(memcg, root_memcg))
2278 			flush = true;
2279 		else if (obj_stock_flush_required(stock, root_memcg))
2280 			flush = true;
2281 		rcu_read_unlock();
2282 
2283 		if (flush &&
2284 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2285 			if (cpu == curcpu)
2286 				drain_local_stock(&stock->work);
2287 			else
2288 				schedule_work_on(cpu, &stock->work);
2289 		}
2290 	}
2291 	migrate_enable();
2292 	mutex_unlock(&percpu_charge_mutex);
2293 }
2294 
2295 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2296 {
2297 	struct memcg_stock_pcp *stock;
2298 
2299 	stock = &per_cpu(memcg_stock, cpu);
2300 	drain_stock(stock);
2301 
2302 	return 0;
2303 }
2304 
2305 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2306 				  unsigned int nr_pages,
2307 				  gfp_t gfp_mask)
2308 {
2309 	unsigned long nr_reclaimed = 0;
2310 
2311 	do {
2312 		unsigned long pflags;
2313 
2314 		if (page_counter_read(&memcg->memory) <=
2315 		    READ_ONCE(memcg->memory.high))
2316 			continue;
2317 
2318 		memcg_memory_event(memcg, MEMCG_HIGH);
2319 
2320 		psi_memstall_enter(&pflags);
2321 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2322 							     gfp_mask, true);
2323 		psi_memstall_leave(&pflags);
2324 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2325 		 !mem_cgroup_is_root(memcg));
2326 
2327 	return nr_reclaimed;
2328 }
2329 
2330 static void high_work_func(struct work_struct *work)
2331 {
2332 	struct mem_cgroup *memcg;
2333 
2334 	memcg = container_of(work, struct mem_cgroup, high_work);
2335 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2336 }
2337 
2338 /*
2339  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2340  * enough to still cause a significant slowdown in most cases, while still
2341  * allowing diagnostics and tracing to proceed without becoming stuck.
2342  */
2343 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2344 
2345 /*
2346  * When calculating the delay, we use these either side of the exponentiation to
2347  * maintain precision and scale to a reasonable number of jiffies (see the table
2348  * below.
2349  *
2350  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2351  *   overage ratio to a delay.
2352  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2353  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2354  *   to produce a reasonable delay curve.
2355  *
2356  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2357  * reasonable delay curve compared to precision-adjusted overage, not
2358  * penalising heavily at first, but still making sure that growth beyond the
2359  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2360  * example, with a high of 100 megabytes:
2361  *
2362  *  +-------+------------------------+
2363  *  | usage | time to allocate in ms |
2364  *  +-------+------------------------+
2365  *  | 100M  |                      0 |
2366  *  | 101M  |                      6 |
2367  *  | 102M  |                     25 |
2368  *  | 103M  |                     57 |
2369  *  | 104M  |                    102 |
2370  *  | 105M  |                    159 |
2371  *  | 106M  |                    230 |
2372  *  | 107M  |                    313 |
2373  *  | 108M  |                    409 |
2374  *  | 109M  |                    518 |
2375  *  | 110M  |                    639 |
2376  *  | 111M  |                    774 |
2377  *  | 112M  |                    921 |
2378  *  | 113M  |                   1081 |
2379  *  | 114M  |                   1254 |
2380  *  | 115M  |                   1439 |
2381  *  | 116M  |                   1638 |
2382  *  | 117M  |                   1849 |
2383  *  | 118M  |                   2000 |
2384  *  | 119M  |                   2000 |
2385  *  | 120M  |                   2000 |
2386  *  +-------+------------------------+
2387  */
2388  #define MEMCG_DELAY_PRECISION_SHIFT 20
2389  #define MEMCG_DELAY_SCALING_SHIFT 14
2390 
2391 static u64 calculate_overage(unsigned long usage, unsigned long high)
2392 {
2393 	u64 overage;
2394 
2395 	if (usage <= high)
2396 		return 0;
2397 
2398 	/*
2399 	 * Prevent division by 0 in overage calculation by acting as if
2400 	 * it was a threshold of 1 page
2401 	 */
2402 	high = max(high, 1UL);
2403 
2404 	overage = usage - high;
2405 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2406 	return div64_u64(overage, high);
2407 }
2408 
2409 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2410 {
2411 	u64 overage, max_overage = 0;
2412 
2413 	do {
2414 		overage = calculate_overage(page_counter_read(&memcg->memory),
2415 					    READ_ONCE(memcg->memory.high));
2416 		max_overage = max(overage, max_overage);
2417 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2418 		 !mem_cgroup_is_root(memcg));
2419 
2420 	return max_overage;
2421 }
2422 
2423 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2424 {
2425 	u64 overage, max_overage = 0;
2426 
2427 	do {
2428 		overage = calculate_overage(page_counter_read(&memcg->swap),
2429 					    READ_ONCE(memcg->swap.high));
2430 		if (overage)
2431 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2432 		max_overage = max(overage, max_overage);
2433 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2434 		 !mem_cgroup_is_root(memcg));
2435 
2436 	return max_overage;
2437 }
2438 
2439 /*
2440  * Get the number of jiffies that we should penalise a mischievous cgroup which
2441  * is exceeding its memory.high by checking both it and its ancestors.
2442  */
2443 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2444 					  unsigned int nr_pages,
2445 					  u64 max_overage)
2446 {
2447 	unsigned long penalty_jiffies;
2448 
2449 	if (!max_overage)
2450 		return 0;
2451 
2452 	/*
2453 	 * We use overage compared to memory.high to calculate the number of
2454 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2455 	 * fairly lenient on small overages, and increasingly harsh when the
2456 	 * memcg in question makes it clear that it has no intention of stopping
2457 	 * its crazy behaviour, so we exponentially increase the delay based on
2458 	 * overage amount.
2459 	 */
2460 	penalty_jiffies = max_overage * max_overage * HZ;
2461 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2462 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2463 
2464 	/*
2465 	 * Factor in the task's own contribution to the overage, such that four
2466 	 * N-sized allocations are throttled approximately the same as one
2467 	 * 4N-sized allocation.
2468 	 *
2469 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2470 	 * larger the current charge patch is than that.
2471 	 */
2472 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2473 }
2474 
2475 /*
2476  * Scheduled by try_charge() to be executed from the userland return path
2477  * and reclaims memory over the high limit.
2478  */
2479 void mem_cgroup_handle_over_high(void)
2480 {
2481 	unsigned long penalty_jiffies;
2482 	unsigned long pflags;
2483 	unsigned long nr_reclaimed;
2484 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2485 	int nr_retries = MAX_RECLAIM_RETRIES;
2486 	struct mem_cgroup *memcg;
2487 	bool in_retry = false;
2488 
2489 	if (likely(!nr_pages))
2490 		return;
2491 
2492 	memcg = get_mem_cgroup_from_mm(current->mm);
2493 	current->memcg_nr_pages_over_high = 0;
2494 
2495 retry_reclaim:
2496 	/*
2497 	 * The allocating task should reclaim at least the batch size, but for
2498 	 * subsequent retries we only want to do what's necessary to prevent oom
2499 	 * or breaching resource isolation.
2500 	 *
2501 	 * This is distinct from memory.max or page allocator behaviour because
2502 	 * memory.high is currently batched, whereas memory.max and the page
2503 	 * allocator run every time an allocation is made.
2504 	 */
2505 	nr_reclaimed = reclaim_high(memcg,
2506 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2507 				    GFP_KERNEL);
2508 
2509 	/*
2510 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2511 	 * allocators proactively to slow down excessive growth.
2512 	 */
2513 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2514 					       mem_find_max_overage(memcg));
2515 
2516 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2517 						swap_find_max_overage(memcg));
2518 
2519 	/*
2520 	 * Clamp the max delay per usermode return so as to still keep the
2521 	 * application moving forwards and also permit diagnostics, albeit
2522 	 * extremely slowly.
2523 	 */
2524 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2525 
2526 	/*
2527 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2528 	 * that it's not even worth doing, in an attempt to be nice to those who
2529 	 * go only a small amount over their memory.high value and maybe haven't
2530 	 * been aggressively reclaimed enough yet.
2531 	 */
2532 	if (penalty_jiffies <= HZ / 100)
2533 		goto out;
2534 
2535 	/*
2536 	 * If reclaim is making forward progress but we're still over
2537 	 * memory.high, we want to encourage that rather than doing allocator
2538 	 * throttling.
2539 	 */
2540 	if (nr_reclaimed || nr_retries--) {
2541 		in_retry = true;
2542 		goto retry_reclaim;
2543 	}
2544 
2545 	/*
2546 	 * If we exit early, we're guaranteed to die (since
2547 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2548 	 * need to account for any ill-begotten jiffies to pay them off later.
2549 	 */
2550 	psi_memstall_enter(&pflags);
2551 	schedule_timeout_killable(penalty_jiffies);
2552 	psi_memstall_leave(&pflags);
2553 
2554 out:
2555 	css_put(&memcg->css);
2556 }
2557 
2558 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2559 			unsigned int nr_pages)
2560 {
2561 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2562 	int nr_retries = MAX_RECLAIM_RETRIES;
2563 	struct mem_cgroup *mem_over_limit;
2564 	struct page_counter *counter;
2565 	unsigned long nr_reclaimed;
2566 	bool passed_oom = false;
2567 	bool may_swap = true;
2568 	bool drained = false;
2569 	unsigned long pflags;
2570 
2571 retry:
2572 	if (consume_stock(memcg, nr_pages))
2573 		return 0;
2574 
2575 	if (!do_memsw_account() ||
2576 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2577 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2578 			goto done_restock;
2579 		if (do_memsw_account())
2580 			page_counter_uncharge(&memcg->memsw, batch);
2581 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2582 	} else {
2583 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2584 		may_swap = false;
2585 	}
2586 
2587 	if (batch > nr_pages) {
2588 		batch = nr_pages;
2589 		goto retry;
2590 	}
2591 
2592 	/*
2593 	 * Prevent unbounded recursion when reclaim operations need to
2594 	 * allocate memory. This might exceed the limits temporarily,
2595 	 * but we prefer facilitating memory reclaim and getting back
2596 	 * under the limit over triggering OOM kills in these cases.
2597 	 */
2598 	if (unlikely(current->flags & PF_MEMALLOC))
2599 		goto force;
2600 
2601 	if (unlikely(task_in_memcg_oom(current)))
2602 		goto nomem;
2603 
2604 	if (!gfpflags_allow_blocking(gfp_mask))
2605 		goto nomem;
2606 
2607 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2608 
2609 	psi_memstall_enter(&pflags);
2610 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2611 						    gfp_mask, may_swap);
2612 	psi_memstall_leave(&pflags);
2613 
2614 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2615 		goto retry;
2616 
2617 	if (!drained) {
2618 		drain_all_stock(mem_over_limit);
2619 		drained = true;
2620 		goto retry;
2621 	}
2622 
2623 	if (gfp_mask & __GFP_NORETRY)
2624 		goto nomem;
2625 	/*
2626 	 * Even though the limit is exceeded at this point, reclaim
2627 	 * may have been able to free some pages.  Retry the charge
2628 	 * before killing the task.
2629 	 *
2630 	 * Only for regular pages, though: huge pages are rather
2631 	 * unlikely to succeed so close to the limit, and we fall back
2632 	 * to regular pages anyway in case of failure.
2633 	 */
2634 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2635 		goto retry;
2636 	/*
2637 	 * At task move, charge accounts can be doubly counted. So, it's
2638 	 * better to wait until the end of task_move if something is going on.
2639 	 */
2640 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2641 		goto retry;
2642 
2643 	if (nr_retries--)
2644 		goto retry;
2645 
2646 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2647 		goto nomem;
2648 
2649 	/* Avoid endless loop for tasks bypassed by the oom killer */
2650 	if (passed_oom && task_is_dying())
2651 		goto nomem;
2652 
2653 	/*
2654 	 * keep retrying as long as the memcg oom killer is able to make
2655 	 * a forward progress or bypass the charge if the oom killer
2656 	 * couldn't make any progress.
2657 	 */
2658 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2659 			   get_order(nr_pages * PAGE_SIZE))) {
2660 		passed_oom = true;
2661 		nr_retries = MAX_RECLAIM_RETRIES;
2662 		goto retry;
2663 	}
2664 nomem:
2665 	/*
2666 	 * Memcg doesn't have a dedicated reserve for atomic
2667 	 * allocations. But like the global atomic pool, we need to
2668 	 * put the burden of reclaim on regular allocation requests
2669 	 * and let these go through as privileged allocations.
2670 	 */
2671 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2672 		return -ENOMEM;
2673 force:
2674 	/*
2675 	 * The allocation either can't fail or will lead to more memory
2676 	 * being freed very soon.  Allow memory usage go over the limit
2677 	 * temporarily by force charging it.
2678 	 */
2679 	page_counter_charge(&memcg->memory, nr_pages);
2680 	if (do_memsw_account())
2681 		page_counter_charge(&memcg->memsw, nr_pages);
2682 
2683 	return 0;
2684 
2685 done_restock:
2686 	if (batch > nr_pages)
2687 		refill_stock(memcg, batch - nr_pages);
2688 
2689 	/*
2690 	 * If the hierarchy is above the normal consumption range, schedule
2691 	 * reclaim on returning to userland.  We can perform reclaim here
2692 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2693 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2694 	 * not recorded as it most likely matches current's and won't
2695 	 * change in the meantime.  As high limit is checked again before
2696 	 * reclaim, the cost of mismatch is negligible.
2697 	 */
2698 	do {
2699 		bool mem_high, swap_high;
2700 
2701 		mem_high = page_counter_read(&memcg->memory) >
2702 			READ_ONCE(memcg->memory.high);
2703 		swap_high = page_counter_read(&memcg->swap) >
2704 			READ_ONCE(memcg->swap.high);
2705 
2706 		/* Don't bother a random interrupted task */
2707 		if (!in_task()) {
2708 			if (mem_high) {
2709 				schedule_work(&memcg->high_work);
2710 				break;
2711 			}
2712 			continue;
2713 		}
2714 
2715 		if (mem_high || swap_high) {
2716 			/*
2717 			 * The allocating tasks in this cgroup will need to do
2718 			 * reclaim or be throttled to prevent further growth
2719 			 * of the memory or swap footprints.
2720 			 *
2721 			 * Target some best-effort fairness between the tasks,
2722 			 * and distribute reclaim work and delay penalties
2723 			 * based on how much each task is actually allocating.
2724 			 */
2725 			current->memcg_nr_pages_over_high += batch;
2726 			set_notify_resume(current);
2727 			break;
2728 		}
2729 	} while ((memcg = parent_mem_cgroup(memcg)));
2730 
2731 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2732 	    !(current->flags & PF_MEMALLOC) &&
2733 	    gfpflags_allow_blocking(gfp_mask)) {
2734 		mem_cgroup_handle_over_high();
2735 	}
2736 	return 0;
2737 }
2738 
2739 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2740 			     unsigned int nr_pages)
2741 {
2742 	if (mem_cgroup_is_root(memcg))
2743 		return 0;
2744 
2745 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2746 }
2747 
2748 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2749 {
2750 	if (mem_cgroup_is_root(memcg))
2751 		return;
2752 
2753 	page_counter_uncharge(&memcg->memory, nr_pages);
2754 	if (do_memsw_account())
2755 		page_counter_uncharge(&memcg->memsw, nr_pages);
2756 }
2757 
2758 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2759 {
2760 	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2761 	/*
2762 	 * Any of the following ensures page's memcg stability:
2763 	 *
2764 	 * - the page lock
2765 	 * - LRU isolation
2766 	 * - lock_page_memcg()
2767 	 * - exclusive reference
2768 	 */
2769 	folio->memcg_data = (unsigned long)memcg;
2770 }
2771 
2772 #ifdef CONFIG_MEMCG_KMEM
2773 /*
2774  * The allocated objcg pointers array is not accounted directly.
2775  * Moreover, it should not come from DMA buffer and is not readily
2776  * reclaimable. So those GFP bits should be masked off.
2777  */
2778 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2779 
2780 /*
2781  * mod_objcg_mlstate() may be called with irq enabled, so
2782  * mod_memcg_lruvec_state() should be used.
2783  */
2784 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2785 				     struct pglist_data *pgdat,
2786 				     enum node_stat_item idx, int nr)
2787 {
2788 	struct mem_cgroup *memcg;
2789 	struct lruvec *lruvec;
2790 
2791 	rcu_read_lock();
2792 	memcg = obj_cgroup_memcg(objcg);
2793 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2794 	mod_memcg_lruvec_state(lruvec, idx, nr);
2795 	rcu_read_unlock();
2796 }
2797 
2798 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2799 				 gfp_t gfp, bool new_slab)
2800 {
2801 	unsigned int objects = objs_per_slab(s, slab);
2802 	unsigned long memcg_data;
2803 	void *vec;
2804 
2805 	gfp &= ~OBJCGS_CLEAR_MASK;
2806 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2807 			   slab_nid(slab));
2808 	if (!vec)
2809 		return -ENOMEM;
2810 
2811 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2812 	if (new_slab) {
2813 		/*
2814 		 * If the slab is brand new and nobody can yet access its
2815 		 * memcg_data, no synchronization is required and memcg_data can
2816 		 * be simply assigned.
2817 		 */
2818 		slab->memcg_data = memcg_data;
2819 	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2820 		/*
2821 		 * If the slab is already in use, somebody can allocate and
2822 		 * assign obj_cgroups in parallel. In this case the existing
2823 		 * objcg vector should be reused.
2824 		 */
2825 		kfree(vec);
2826 		return 0;
2827 	}
2828 
2829 	kmemleak_not_leak(vec);
2830 	return 0;
2831 }
2832 
2833 /*
2834  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2835  *
2836  * A passed kernel object can be a slab object or a generic kernel page, so
2837  * different mechanisms for getting the memory cgroup pointer should be used.
2838  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2839  * can not know for sure how the kernel object is implemented.
2840  * mem_cgroup_from_obj() can be safely used in such cases.
2841  *
2842  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2843  * cgroup_mutex, etc.
2844  */
2845 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2846 {
2847 	struct folio *folio;
2848 
2849 	if (mem_cgroup_disabled())
2850 		return NULL;
2851 
2852 	folio = virt_to_folio(p);
2853 
2854 	/*
2855 	 * Slab objects are accounted individually, not per-page.
2856 	 * Memcg membership data for each individual object is saved in
2857 	 * slab->memcg_data.
2858 	 */
2859 	if (folio_test_slab(folio)) {
2860 		struct obj_cgroup **objcgs;
2861 		struct slab *slab;
2862 		unsigned int off;
2863 
2864 		slab = folio_slab(folio);
2865 		objcgs = slab_objcgs(slab);
2866 		if (!objcgs)
2867 			return NULL;
2868 
2869 		off = obj_to_index(slab->slab_cache, slab, p);
2870 		if (objcgs[off])
2871 			return obj_cgroup_memcg(objcgs[off]);
2872 
2873 		return NULL;
2874 	}
2875 
2876 	/*
2877 	 * page_memcg_check() is used here, because in theory we can encounter
2878 	 * a folio where the slab flag has been cleared already, but
2879 	 * slab->memcg_data has not been freed yet
2880 	 * page_memcg_check(page) will guarantee that a proper memory
2881 	 * cgroup pointer or NULL will be returned.
2882 	 */
2883 	return page_memcg_check(folio_page(folio, 0));
2884 }
2885 
2886 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2887 {
2888 	struct obj_cgroup *objcg = NULL;
2889 	struct mem_cgroup *memcg;
2890 
2891 	if (memcg_kmem_bypass())
2892 		return NULL;
2893 
2894 	rcu_read_lock();
2895 	if (unlikely(active_memcg()))
2896 		memcg = active_memcg();
2897 	else
2898 		memcg = mem_cgroup_from_task(current);
2899 
2900 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2901 		objcg = rcu_dereference(memcg->objcg);
2902 		if (objcg && obj_cgroup_tryget(objcg))
2903 			break;
2904 		objcg = NULL;
2905 	}
2906 	rcu_read_unlock();
2907 
2908 	return objcg;
2909 }
2910 
2911 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2912 {
2913 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2914 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
2915 		if (nr_pages > 0)
2916 			page_counter_charge(&memcg->kmem, nr_pages);
2917 		else
2918 			page_counter_uncharge(&memcg->kmem, -nr_pages);
2919 	}
2920 }
2921 
2922 
2923 /*
2924  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2925  * @objcg: object cgroup to uncharge
2926  * @nr_pages: number of pages to uncharge
2927  */
2928 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2929 				      unsigned int nr_pages)
2930 {
2931 	struct mem_cgroup *memcg;
2932 
2933 	memcg = get_mem_cgroup_from_objcg(objcg);
2934 
2935 	memcg_account_kmem(memcg, -nr_pages);
2936 	refill_stock(memcg, nr_pages);
2937 
2938 	css_put(&memcg->css);
2939 }
2940 
2941 /*
2942  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2943  * @objcg: object cgroup to charge
2944  * @gfp: reclaim mode
2945  * @nr_pages: number of pages to charge
2946  *
2947  * Returns 0 on success, an error code on failure.
2948  */
2949 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2950 				   unsigned int nr_pages)
2951 {
2952 	struct mem_cgroup *memcg;
2953 	int ret;
2954 
2955 	memcg = get_mem_cgroup_from_objcg(objcg);
2956 
2957 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2958 	if (ret)
2959 		goto out;
2960 
2961 	memcg_account_kmem(memcg, nr_pages);
2962 out:
2963 	css_put(&memcg->css);
2964 
2965 	return ret;
2966 }
2967 
2968 /**
2969  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2970  * @page: page to charge
2971  * @gfp: reclaim mode
2972  * @order: allocation order
2973  *
2974  * Returns 0 on success, an error code on failure.
2975  */
2976 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2977 {
2978 	struct obj_cgroup *objcg;
2979 	int ret = 0;
2980 
2981 	objcg = get_obj_cgroup_from_current();
2982 	if (objcg) {
2983 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2984 		if (!ret) {
2985 			page->memcg_data = (unsigned long)objcg |
2986 				MEMCG_DATA_KMEM;
2987 			return 0;
2988 		}
2989 		obj_cgroup_put(objcg);
2990 	}
2991 	return ret;
2992 }
2993 
2994 /**
2995  * __memcg_kmem_uncharge_page: uncharge a kmem page
2996  * @page: page to uncharge
2997  * @order: allocation order
2998  */
2999 void __memcg_kmem_uncharge_page(struct page *page, int order)
3000 {
3001 	struct folio *folio = page_folio(page);
3002 	struct obj_cgroup *objcg;
3003 	unsigned int nr_pages = 1 << order;
3004 
3005 	if (!folio_memcg_kmem(folio))
3006 		return;
3007 
3008 	objcg = __folio_objcg(folio);
3009 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3010 	folio->memcg_data = 0;
3011 	obj_cgroup_put(objcg);
3012 }
3013 
3014 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3015 		     enum node_stat_item idx, int nr)
3016 {
3017 	struct memcg_stock_pcp *stock;
3018 	struct obj_cgroup *old = NULL;
3019 	unsigned long flags;
3020 	int *bytes;
3021 
3022 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3023 	stock = this_cpu_ptr(&memcg_stock);
3024 
3025 	/*
3026 	 * Save vmstat data in stock and skip vmstat array update unless
3027 	 * accumulating over a page of vmstat data or when pgdat or idx
3028 	 * changes.
3029 	 */
3030 	if (stock->cached_objcg != objcg) {
3031 		old = drain_obj_stock(stock);
3032 		obj_cgroup_get(objcg);
3033 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3034 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3035 		stock->cached_objcg = objcg;
3036 		stock->cached_pgdat = pgdat;
3037 	} else if (stock->cached_pgdat != pgdat) {
3038 		/* Flush the existing cached vmstat data */
3039 		struct pglist_data *oldpg = stock->cached_pgdat;
3040 
3041 		if (stock->nr_slab_reclaimable_b) {
3042 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3043 					  stock->nr_slab_reclaimable_b);
3044 			stock->nr_slab_reclaimable_b = 0;
3045 		}
3046 		if (stock->nr_slab_unreclaimable_b) {
3047 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3048 					  stock->nr_slab_unreclaimable_b);
3049 			stock->nr_slab_unreclaimable_b = 0;
3050 		}
3051 		stock->cached_pgdat = pgdat;
3052 	}
3053 
3054 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3055 					       : &stock->nr_slab_unreclaimable_b;
3056 	/*
3057 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3058 	 * cached locally at least once before pushing it out.
3059 	 */
3060 	if (!*bytes) {
3061 		*bytes = nr;
3062 		nr = 0;
3063 	} else {
3064 		*bytes += nr;
3065 		if (abs(*bytes) > PAGE_SIZE) {
3066 			nr = *bytes;
3067 			*bytes = 0;
3068 		} else {
3069 			nr = 0;
3070 		}
3071 	}
3072 	if (nr)
3073 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3074 
3075 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3076 	if (old)
3077 		obj_cgroup_put(old);
3078 }
3079 
3080 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3081 {
3082 	struct memcg_stock_pcp *stock;
3083 	unsigned long flags;
3084 	bool ret = false;
3085 
3086 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3087 
3088 	stock = this_cpu_ptr(&memcg_stock);
3089 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3090 		stock->nr_bytes -= nr_bytes;
3091 		ret = true;
3092 	}
3093 
3094 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3095 
3096 	return ret;
3097 }
3098 
3099 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3100 {
3101 	struct obj_cgroup *old = stock->cached_objcg;
3102 
3103 	if (!old)
3104 		return NULL;
3105 
3106 	if (stock->nr_bytes) {
3107 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3108 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3109 
3110 		if (nr_pages) {
3111 			struct mem_cgroup *memcg;
3112 
3113 			memcg = get_mem_cgroup_from_objcg(old);
3114 
3115 			memcg_account_kmem(memcg, -nr_pages);
3116 			__refill_stock(memcg, nr_pages);
3117 
3118 			css_put(&memcg->css);
3119 		}
3120 
3121 		/*
3122 		 * The leftover is flushed to the centralized per-memcg value.
3123 		 * On the next attempt to refill obj stock it will be moved
3124 		 * to a per-cpu stock (probably, on an other CPU), see
3125 		 * refill_obj_stock().
3126 		 *
3127 		 * How often it's flushed is a trade-off between the memory
3128 		 * limit enforcement accuracy and potential CPU contention,
3129 		 * so it might be changed in the future.
3130 		 */
3131 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3132 		stock->nr_bytes = 0;
3133 	}
3134 
3135 	/*
3136 	 * Flush the vmstat data in current stock
3137 	 */
3138 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3139 		if (stock->nr_slab_reclaimable_b) {
3140 			mod_objcg_mlstate(old, stock->cached_pgdat,
3141 					  NR_SLAB_RECLAIMABLE_B,
3142 					  stock->nr_slab_reclaimable_b);
3143 			stock->nr_slab_reclaimable_b = 0;
3144 		}
3145 		if (stock->nr_slab_unreclaimable_b) {
3146 			mod_objcg_mlstate(old, stock->cached_pgdat,
3147 					  NR_SLAB_UNRECLAIMABLE_B,
3148 					  stock->nr_slab_unreclaimable_b);
3149 			stock->nr_slab_unreclaimable_b = 0;
3150 		}
3151 		stock->cached_pgdat = NULL;
3152 	}
3153 
3154 	stock->cached_objcg = NULL;
3155 	/*
3156 	 * The `old' objects needs to be released by the caller via
3157 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3158 	 */
3159 	return old;
3160 }
3161 
3162 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3163 				     struct mem_cgroup *root_memcg)
3164 {
3165 	struct mem_cgroup *memcg;
3166 
3167 	if (stock->cached_objcg) {
3168 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3169 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3170 			return true;
3171 	}
3172 
3173 	return false;
3174 }
3175 
3176 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3177 			     bool allow_uncharge)
3178 {
3179 	struct memcg_stock_pcp *stock;
3180 	struct obj_cgroup *old = NULL;
3181 	unsigned long flags;
3182 	unsigned int nr_pages = 0;
3183 
3184 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3185 
3186 	stock = this_cpu_ptr(&memcg_stock);
3187 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3188 		old = drain_obj_stock(stock);
3189 		obj_cgroup_get(objcg);
3190 		stock->cached_objcg = objcg;
3191 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3192 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3193 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3194 	}
3195 	stock->nr_bytes += nr_bytes;
3196 
3197 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3198 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3199 		stock->nr_bytes &= (PAGE_SIZE - 1);
3200 	}
3201 
3202 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3203 	if (old)
3204 		obj_cgroup_put(old);
3205 
3206 	if (nr_pages)
3207 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3208 }
3209 
3210 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3211 {
3212 	unsigned int nr_pages, nr_bytes;
3213 	int ret;
3214 
3215 	if (consume_obj_stock(objcg, size))
3216 		return 0;
3217 
3218 	/*
3219 	 * In theory, objcg->nr_charged_bytes can have enough
3220 	 * pre-charged bytes to satisfy the allocation. However,
3221 	 * flushing objcg->nr_charged_bytes requires two atomic
3222 	 * operations, and objcg->nr_charged_bytes can't be big.
3223 	 * The shared objcg->nr_charged_bytes can also become a
3224 	 * performance bottleneck if all tasks of the same memcg are
3225 	 * trying to update it. So it's better to ignore it and try
3226 	 * grab some new pages. The stock's nr_bytes will be flushed to
3227 	 * objcg->nr_charged_bytes later on when objcg changes.
3228 	 *
3229 	 * The stock's nr_bytes may contain enough pre-charged bytes
3230 	 * to allow one less page from being charged, but we can't rely
3231 	 * on the pre-charged bytes not being changed outside of
3232 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3233 	 * pre-charged bytes as well when charging pages. To avoid a
3234 	 * page uncharge right after a page charge, we set the
3235 	 * allow_uncharge flag to false when calling refill_obj_stock()
3236 	 * to temporarily allow the pre-charged bytes to exceed the page
3237 	 * size limit. The maximum reachable value of the pre-charged
3238 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3239 	 * race.
3240 	 */
3241 	nr_pages = size >> PAGE_SHIFT;
3242 	nr_bytes = size & (PAGE_SIZE - 1);
3243 
3244 	if (nr_bytes)
3245 		nr_pages += 1;
3246 
3247 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3248 	if (!ret && nr_bytes)
3249 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3250 
3251 	return ret;
3252 }
3253 
3254 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3255 {
3256 	refill_obj_stock(objcg, size, true);
3257 }
3258 
3259 #endif /* CONFIG_MEMCG_KMEM */
3260 
3261 /*
3262  * Because page_memcg(head) is not set on tails, set it now.
3263  */
3264 void split_page_memcg(struct page *head, unsigned int nr)
3265 {
3266 	struct folio *folio = page_folio(head);
3267 	struct mem_cgroup *memcg = folio_memcg(folio);
3268 	int i;
3269 
3270 	if (mem_cgroup_disabled() || !memcg)
3271 		return;
3272 
3273 	for (i = 1; i < nr; i++)
3274 		folio_page(folio, i)->memcg_data = folio->memcg_data;
3275 
3276 	if (folio_memcg_kmem(folio))
3277 		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3278 	else
3279 		css_get_many(&memcg->css, nr - 1);
3280 }
3281 
3282 #ifdef CONFIG_MEMCG_SWAP
3283 /**
3284  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3285  * @entry: swap entry to be moved
3286  * @from:  mem_cgroup which the entry is moved from
3287  * @to:  mem_cgroup which the entry is moved to
3288  *
3289  * It succeeds only when the swap_cgroup's record for this entry is the same
3290  * as the mem_cgroup's id of @from.
3291  *
3292  * Returns 0 on success, -EINVAL on failure.
3293  *
3294  * The caller must have charged to @to, IOW, called page_counter_charge() about
3295  * both res and memsw, and called css_get().
3296  */
3297 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3298 				struct mem_cgroup *from, struct mem_cgroup *to)
3299 {
3300 	unsigned short old_id, new_id;
3301 
3302 	old_id = mem_cgroup_id(from);
3303 	new_id = mem_cgroup_id(to);
3304 
3305 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3306 		mod_memcg_state(from, MEMCG_SWAP, -1);
3307 		mod_memcg_state(to, MEMCG_SWAP, 1);
3308 		return 0;
3309 	}
3310 	return -EINVAL;
3311 }
3312 #else
3313 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3314 				struct mem_cgroup *from, struct mem_cgroup *to)
3315 {
3316 	return -EINVAL;
3317 }
3318 #endif
3319 
3320 static DEFINE_MUTEX(memcg_max_mutex);
3321 
3322 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3323 				 unsigned long max, bool memsw)
3324 {
3325 	bool enlarge = false;
3326 	bool drained = false;
3327 	int ret;
3328 	bool limits_invariant;
3329 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3330 
3331 	do {
3332 		if (signal_pending(current)) {
3333 			ret = -EINTR;
3334 			break;
3335 		}
3336 
3337 		mutex_lock(&memcg_max_mutex);
3338 		/*
3339 		 * Make sure that the new limit (memsw or memory limit) doesn't
3340 		 * break our basic invariant rule memory.max <= memsw.max.
3341 		 */
3342 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3343 					   max <= memcg->memsw.max;
3344 		if (!limits_invariant) {
3345 			mutex_unlock(&memcg_max_mutex);
3346 			ret = -EINVAL;
3347 			break;
3348 		}
3349 		if (max > counter->max)
3350 			enlarge = true;
3351 		ret = page_counter_set_max(counter, max);
3352 		mutex_unlock(&memcg_max_mutex);
3353 
3354 		if (!ret)
3355 			break;
3356 
3357 		if (!drained) {
3358 			drain_all_stock(memcg);
3359 			drained = true;
3360 			continue;
3361 		}
3362 
3363 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3364 					GFP_KERNEL, !memsw)) {
3365 			ret = -EBUSY;
3366 			break;
3367 		}
3368 	} while (true);
3369 
3370 	if (!ret && enlarge)
3371 		memcg_oom_recover(memcg);
3372 
3373 	return ret;
3374 }
3375 
3376 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3377 					    gfp_t gfp_mask,
3378 					    unsigned long *total_scanned)
3379 {
3380 	unsigned long nr_reclaimed = 0;
3381 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3382 	unsigned long reclaimed;
3383 	int loop = 0;
3384 	struct mem_cgroup_tree_per_node *mctz;
3385 	unsigned long excess;
3386 
3387 	if (order > 0)
3388 		return 0;
3389 
3390 	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3391 
3392 	/*
3393 	 * Do not even bother to check the largest node if the root
3394 	 * is empty. Do it lockless to prevent lock bouncing. Races
3395 	 * are acceptable as soft limit is best effort anyway.
3396 	 */
3397 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3398 		return 0;
3399 
3400 	/*
3401 	 * This loop can run a while, specially if mem_cgroup's continuously
3402 	 * keep exceeding their soft limit and putting the system under
3403 	 * pressure
3404 	 */
3405 	do {
3406 		if (next_mz)
3407 			mz = next_mz;
3408 		else
3409 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3410 		if (!mz)
3411 			break;
3412 
3413 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3414 						    gfp_mask, total_scanned);
3415 		nr_reclaimed += reclaimed;
3416 		spin_lock_irq(&mctz->lock);
3417 
3418 		/*
3419 		 * If we failed to reclaim anything from this memory cgroup
3420 		 * it is time to move on to the next cgroup
3421 		 */
3422 		next_mz = NULL;
3423 		if (!reclaimed)
3424 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3425 
3426 		excess = soft_limit_excess(mz->memcg);
3427 		/*
3428 		 * One school of thought says that we should not add
3429 		 * back the node to the tree if reclaim returns 0.
3430 		 * But our reclaim could return 0, simply because due
3431 		 * to priority we are exposing a smaller subset of
3432 		 * memory to reclaim from. Consider this as a longer
3433 		 * term TODO.
3434 		 */
3435 		/* If excess == 0, no tree ops */
3436 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3437 		spin_unlock_irq(&mctz->lock);
3438 		css_put(&mz->memcg->css);
3439 		loop++;
3440 		/*
3441 		 * Could not reclaim anything and there are no more
3442 		 * mem cgroups to try or we seem to be looping without
3443 		 * reclaiming anything.
3444 		 */
3445 		if (!nr_reclaimed &&
3446 			(next_mz == NULL ||
3447 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3448 			break;
3449 	} while (!nr_reclaimed);
3450 	if (next_mz)
3451 		css_put(&next_mz->memcg->css);
3452 	return nr_reclaimed;
3453 }
3454 
3455 /*
3456  * Reclaims as many pages from the given memcg as possible.
3457  *
3458  * Caller is responsible for holding css reference for memcg.
3459  */
3460 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3461 {
3462 	int nr_retries = MAX_RECLAIM_RETRIES;
3463 
3464 	/* we call try-to-free pages for make this cgroup empty */
3465 	lru_add_drain_all();
3466 
3467 	drain_all_stock(memcg);
3468 
3469 	/* try to free all pages in this cgroup */
3470 	while (nr_retries && page_counter_read(&memcg->memory)) {
3471 		if (signal_pending(current))
3472 			return -EINTR;
3473 
3474 		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true))
3475 			nr_retries--;
3476 	}
3477 
3478 	return 0;
3479 }
3480 
3481 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3482 					    char *buf, size_t nbytes,
3483 					    loff_t off)
3484 {
3485 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3486 
3487 	if (mem_cgroup_is_root(memcg))
3488 		return -EINVAL;
3489 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3490 }
3491 
3492 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3493 				     struct cftype *cft)
3494 {
3495 	return 1;
3496 }
3497 
3498 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3499 				      struct cftype *cft, u64 val)
3500 {
3501 	if (val == 1)
3502 		return 0;
3503 
3504 	pr_warn_once("Non-hierarchical mode is deprecated. "
3505 		     "Please report your usecase to linux-mm@kvack.org if you "
3506 		     "depend on this functionality.\n");
3507 
3508 	return -EINVAL;
3509 }
3510 
3511 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3512 {
3513 	unsigned long val;
3514 
3515 	if (mem_cgroup_is_root(memcg)) {
3516 		mem_cgroup_flush_stats();
3517 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3518 			memcg_page_state(memcg, NR_ANON_MAPPED);
3519 		if (swap)
3520 			val += memcg_page_state(memcg, MEMCG_SWAP);
3521 	} else {
3522 		if (!swap)
3523 			val = page_counter_read(&memcg->memory);
3524 		else
3525 			val = page_counter_read(&memcg->memsw);
3526 	}
3527 	return val;
3528 }
3529 
3530 enum {
3531 	RES_USAGE,
3532 	RES_LIMIT,
3533 	RES_MAX_USAGE,
3534 	RES_FAILCNT,
3535 	RES_SOFT_LIMIT,
3536 };
3537 
3538 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3539 			       struct cftype *cft)
3540 {
3541 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3542 	struct page_counter *counter;
3543 
3544 	switch (MEMFILE_TYPE(cft->private)) {
3545 	case _MEM:
3546 		counter = &memcg->memory;
3547 		break;
3548 	case _MEMSWAP:
3549 		counter = &memcg->memsw;
3550 		break;
3551 	case _KMEM:
3552 		counter = &memcg->kmem;
3553 		break;
3554 	case _TCP:
3555 		counter = &memcg->tcpmem;
3556 		break;
3557 	default:
3558 		BUG();
3559 	}
3560 
3561 	switch (MEMFILE_ATTR(cft->private)) {
3562 	case RES_USAGE:
3563 		if (counter == &memcg->memory)
3564 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3565 		if (counter == &memcg->memsw)
3566 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3567 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3568 	case RES_LIMIT:
3569 		return (u64)counter->max * PAGE_SIZE;
3570 	case RES_MAX_USAGE:
3571 		return (u64)counter->watermark * PAGE_SIZE;
3572 	case RES_FAILCNT:
3573 		return counter->failcnt;
3574 	case RES_SOFT_LIMIT:
3575 		return (u64)memcg->soft_limit * PAGE_SIZE;
3576 	default:
3577 		BUG();
3578 	}
3579 }
3580 
3581 #ifdef CONFIG_MEMCG_KMEM
3582 static int memcg_online_kmem(struct mem_cgroup *memcg)
3583 {
3584 	struct obj_cgroup *objcg;
3585 
3586 	if (cgroup_memory_nokmem)
3587 		return 0;
3588 
3589 	if (unlikely(mem_cgroup_is_root(memcg)))
3590 		return 0;
3591 
3592 	objcg = obj_cgroup_alloc();
3593 	if (!objcg)
3594 		return -ENOMEM;
3595 
3596 	objcg->memcg = memcg;
3597 	rcu_assign_pointer(memcg->objcg, objcg);
3598 
3599 	static_branch_enable(&memcg_kmem_enabled_key);
3600 
3601 	memcg->kmemcg_id = memcg->id.id;
3602 
3603 	return 0;
3604 }
3605 
3606 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3607 {
3608 	struct mem_cgroup *parent;
3609 
3610 	if (cgroup_memory_nokmem)
3611 		return;
3612 
3613 	if (unlikely(mem_cgroup_is_root(memcg)))
3614 		return;
3615 
3616 	parent = parent_mem_cgroup(memcg);
3617 	if (!parent)
3618 		parent = root_mem_cgroup;
3619 
3620 	memcg_reparent_objcgs(memcg, parent);
3621 
3622 	/*
3623 	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3624 	 * corresponding to this cgroup are guaranteed to remain empty.
3625 	 * The ordering is imposed by list_lru_node->lock taken by
3626 	 * memcg_reparent_list_lrus().
3627 	 */
3628 	memcg_reparent_list_lrus(memcg, parent);
3629 }
3630 #else
3631 static int memcg_online_kmem(struct mem_cgroup *memcg)
3632 {
3633 	return 0;
3634 }
3635 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3636 {
3637 }
3638 #endif /* CONFIG_MEMCG_KMEM */
3639 
3640 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3641 {
3642 	int ret;
3643 
3644 	mutex_lock(&memcg_max_mutex);
3645 
3646 	ret = page_counter_set_max(&memcg->tcpmem, max);
3647 	if (ret)
3648 		goto out;
3649 
3650 	if (!memcg->tcpmem_active) {
3651 		/*
3652 		 * The active flag needs to be written after the static_key
3653 		 * update. This is what guarantees that the socket activation
3654 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3655 		 * for details, and note that we don't mark any socket as
3656 		 * belonging to this memcg until that flag is up.
3657 		 *
3658 		 * We need to do this, because static_keys will span multiple
3659 		 * sites, but we can't control their order. If we mark a socket
3660 		 * as accounted, but the accounting functions are not patched in
3661 		 * yet, we'll lose accounting.
3662 		 *
3663 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3664 		 * because when this value change, the code to process it is not
3665 		 * patched in yet.
3666 		 */
3667 		static_branch_inc(&memcg_sockets_enabled_key);
3668 		memcg->tcpmem_active = true;
3669 	}
3670 out:
3671 	mutex_unlock(&memcg_max_mutex);
3672 	return ret;
3673 }
3674 
3675 /*
3676  * The user of this function is...
3677  * RES_LIMIT.
3678  */
3679 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3680 				char *buf, size_t nbytes, loff_t off)
3681 {
3682 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3683 	unsigned long nr_pages;
3684 	int ret;
3685 
3686 	buf = strstrip(buf);
3687 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3688 	if (ret)
3689 		return ret;
3690 
3691 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3692 	case RES_LIMIT:
3693 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3694 			ret = -EINVAL;
3695 			break;
3696 		}
3697 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3698 		case _MEM:
3699 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3700 			break;
3701 		case _MEMSWAP:
3702 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3703 			break;
3704 		case _KMEM:
3705 			/* kmem.limit_in_bytes is deprecated. */
3706 			ret = -EOPNOTSUPP;
3707 			break;
3708 		case _TCP:
3709 			ret = memcg_update_tcp_max(memcg, nr_pages);
3710 			break;
3711 		}
3712 		break;
3713 	case RES_SOFT_LIMIT:
3714 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3715 			ret = -EOPNOTSUPP;
3716 		} else {
3717 			memcg->soft_limit = nr_pages;
3718 			ret = 0;
3719 		}
3720 		break;
3721 	}
3722 	return ret ?: nbytes;
3723 }
3724 
3725 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3726 				size_t nbytes, loff_t off)
3727 {
3728 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3729 	struct page_counter *counter;
3730 
3731 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3732 	case _MEM:
3733 		counter = &memcg->memory;
3734 		break;
3735 	case _MEMSWAP:
3736 		counter = &memcg->memsw;
3737 		break;
3738 	case _KMEM:
3739 		counter = &memcg->kmem;
3740 		break;
3741 	case _TCP:
3742 		counter = &memcg->tcpmem;
3743 		break;
3744 	default:
3745 		BUG();
3746 	}
3747 
3748 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3749 	case RES_MAX_USAGE:
3750 		page_counter_reset_watermark(counter);
3751 		break;
3752 	case RES_FAILCNT:
3753 		counter->failcnt = 0;
3754 		break;
3755 	default:
3756 		BUG();
3757 	}
3758 
3759 	return nbytes;
3760 }
3761 
3762 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3763 					struct cftype *cft)
3764 {
3765 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3766 }
3767 
3768 #ifdef CONFIG_MMU
3769 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3770 					struct cftype *cft, u64 val)
3771 {
3772 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3773 
3774 	if (val & ~MOVE_MASK)
3775 		return -EINVAL;
3776 
3777 	/*
3778 	 * No kind of locking is needed in here, because ->can_attach() will
3779 	 * check this value once in the beginning of the process, and then carry
3780 	 * on with stale data. This means that changes to this value will only
3781 	 * affect task migrations starting after the change.
3782 	 */
3783 	memcg->move_charge_at_immigrate = val;
3784 	return 0;
3785 }
3786 #else
3787 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3788 					struct cftype *cft, u64 val)
3789 {
3790 	return -ENOSYS;
3791 }
3792 #endif
3793 
3794 #ifdef CONFIG_NUMA
3795 
3796 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3797 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3798 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3799 
3800 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3801 				int nid, unsigned int lru_mask, bool tree)
3802 {
3803 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3804 	unsigned long nr = 0;
3805 	enum lru_list lru;
3806 
3807 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3808 
3809 	for_each_lru(lru) {
3810 		if (!(BIT(lru) & lru_mask))
3811 			continue;
3812 		if (tree)
3813 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3814 		else
3815 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3816 	}
3817 	return nr;
3818 }
3819 
3820 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3821 					     unsigned int lru_mask,
3822 					     bool tree)
3823 {
3824 	unsigned long nr = 0;
3825 	enum lru_list lru;
3826 
3827 	for_each_lru(lru) {
3828 		if (!(BIT(lru) & lru_mask))
3829 			continue;
3830 		if (tree)
3831 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3832 		else
3833 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3834 	}
3835 	return nr;
3836 }
3837 
3838 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3839 {
3840 	struct numa_stat {
3841 		const char *name;
3842 		unsigned int lru_mask;
3843 	};
3844 
3845 	static const struct numa_stat stats[] = {
3846 		{ "total", LRU_ALL },
3847 		{ "file", LRU_ALL_FILE },
3848 		{ "anon", LRU_ALL_ANON },
3849 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3850 	};
3851 	const struct numa_stat *stat;
3852 	int nid;
3853 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3854 
3855 	mem_cgroup_flush_stats();
3856 
3857 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3858 		seq_printf(m, "%s=%lu", stat->name,
3859 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3860 						   false));
3861 		for_each_node_state(nid, N_MEMORY)
3862 			seq_printf(m, " N%d=%lu", nid,
3863 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3864 							stat->lru_mask, false));
3865 		seq_putc(m, '\n');
3866 	}
3867 
3868 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3869 
3870 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
3871 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3872 						   true));
3873 		for_each_node_state(nid, N_MEMORY)
3874 			seq_printf(m, " N%d=%lu", nid,
3875 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3876 							stat->lru_mask, true));
3877 		seq_putc(m, '\n');
3878 	}
3879 
3880 	return 0;
3881 }
3882 #endif /* CONFIG_NUMA */
3883 
3884 static const unsigned int memcg1_stats[] = {
3885 	NR_FILE_PAGES,
3886 	NR_ANON_MAPPED,
3887 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3888 	NR_ANON_THPS,
3889 #endif
3890 	NR_SHMEM,
3891 	NR_FILE_MAPPED,
3892 	NR_FILE_DIRTY,
3893 	NR_WRITEBACK,
3894 	MEMCG_SWAP,
3895 };
3896 
3897 static const char *const memcg1_stat_names[] = {
3898 	"cache",
3899 	"rss",
3900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3901 	"rss_huge",
3902 #endif
3903 	"shmem",
3904 	"mapped_file",
3905 	"dirty",
3906 	"writeback",
3907 	"swap",
3908 };
3909 
3910 /* Universal VM events cgroup1 shows, original sort order */
3911 static const unsigned int memcg1_events[] = {
3912 	PGPGIN,
3913 	PGPGOUT,
3914 	PGFAULT,
3915 	PGMAJFAULT,
3916 };
3917 
3918 static int memcg_stat_show(struct seq_file *m, void *v)
3919 {
3920 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3921 	unsigned long memory, memsw;
3922 	struct mem_cgroup *mi;
3923 	unsigned int i;
3924 
3925 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3926 
3927 	mem_cgroup_flush_stats();
3928 
3929 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3930 		unsigned long nr;
3931 
3932 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3933 			continue;
3934 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
3935 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
3936 	}
3937 
3938 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3939 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3940 			   memcg_events_local(memcg, memcg1_events[i]));
3941 
3942 	for (i = 0; i < NR_LRU_LISTS; i++)
3943 		seq_printf(m, "%s %lu\n", lru_list_name(i),
3944 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3945 			   PAGE_SIZE);
3946 
3947 	/* Hierarchical information */
3948 	memory = memsw = PAGE_COUNTER_MAX;
3949 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3950 		memory = min(memory, READ_ONCE(mi->memory.max));
3951 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
3952 	}
3953 	seq_printf(m, "hierarchical_memory_limit %llu\n",
3954 		   (u64)memory * PAGE_SIZE);
3955 	if (do_memsw_account())
3956 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3957 			   (u64)memsw * PAGE_SIZE);
3958 
3959 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3960 		unsigned long nr;
3961 
3962 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3963 			continue;
3964 		nr = memcg_page_state(memcg, memcg1_stats[i]);
3965 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3966 						(u64)nr * PAGE_SIZE);
3967 	}
3968 
3969 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3970 		seq_printf(m, "total_%s %llu\n",
3971 			   vm_event_name(memcg1_events[i]),
3972 			   (u64)memcg_events(memcg, memcg1_events[i]));
3973 
3974 	for (i = 0; i < NR_LRU_LISTS; i++)
3975 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3976 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3977 			   PAGE_SIZE);
3978 
3979 #ifdef CONFIG_DEBUG_VM
3980 	{
3981 		pg_data_t *pgdat;
3982 		struct mem_cgroup_per_node *mz;
3983 		unsigned long anon_cost = 0;
3984 		unsigned long file_cost = 0;
3985 
3986 		for_each_online_pgdat(pgdat) {
3987 			mz = memcg->nodeinfo[pgdat->node_id];
3988 
3989 			anon_cost += mz->lruvec.anon_cost;
3990 			file_cost += mz->lruvec.file_cost;
3991 		}
3992 		seq_printf(m, "anon_cost %lu\n", anon_cost);
3993 		seq_printf(m, "file_cost %lu\n", file_cost);
3994 	}
3995 #endif
3996 
3997 	return 0;
3998 }
3999 
4000 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4001 				      struct cftype *cft)
4002 {
4003 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4004 
4005 	return mem_cgroup_swappiness(memcg);
4006 }
4007 
4008 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4009 				       struct cftype *cft, u64 val)
4010 {
4011 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4012 
4013 	if (val > 200)
4014 		return -EINVAL;
4015 
4016 	if (!mem_cgroup_is_root(memcg))
4017 		memcg->swappiness = val;
4018 	else
4019 		vm_swappiness = val;
4020 
4021 	return 0;
4022 }
4023 
4024 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4025 {
4026 	struct mem_cgroup_threshold_ary *t;
4027 	unsigned long usage;
4028 	int i;
4029 
4030 	rcu_read_lock();
4031 	if (!swap)
4032 		t = rcu_dereference(memcg->thresholds.primary);
4033 	else
4034 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4035 
4036 	if (!t)
4037 		goto unlock;
4038 
4039 	usage = mem_cgroup_usage(memcg, swap);
4040 
4041 	/*
4042 	 * current_threshold points to threshold just below or equal to usage.
4043 	 * If it's not true, a threshold was crossed after last
4044 	 * call of __mem_cgroup_threshold().
4045 	 */
4046 	i = t->current_threshold;
4047 
4048 	/*
4049 	 * Iterate backward over array of thresholds starting from
4050 	 * current_threshold and check if a threshold is crossed.
4051 	 * If none of thresholds below usage is crossed, we read
4052 	 * only one element of the array here.
4053 	 */
4054 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4055 		eventfd_signal(t->entries[i].eventfd, 1);
4056 
4057 	/* i = current_threshold + 1 */
4058 	i++;
4059 
4060 	/*
4061 	 * Iterate forward over array of thresholds starting from
4062 	 * current_threshold+1 and check if a threshold is crossed.
4063 	 * If none of thresholds above usage is crossed, we read
4064 	 * only one element of the array here.
4065 	 */
4066 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4067 		eventfd_signal(t->entries[i].eventfd, 1);
4068 
4069 	/* Update current_threshold */
4070 	t->current_threshold = i - 1;
4071 unlock:
4072 	rcu_read_unlock();
4073 }
4074 
4075 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4076 {
4077 	while (memcg) {
4078 		__mem_cgroup_threshold(memcg, false);
4079 		if (do_memsw_account())
4080 			__mem_cgroup_threshold(memcg, true);
4081 
4082 		memcg = parent_mem_cgroup(memcg);
4083 	}
4084 }
4085 
4086 static int compare_thresholds(const void *a, const void *b)
4087 {
4088 	const struct mem_cgroup_threshold *_a = a;
4089 	const struct mem_cgroup_threshold *_b = b;
4090 
4091 	if (_a->threshold > _b->threshold)
4092 		return 1;
4093 
4094 	if (_a->threshold < _b->threshold)
4095 		return -1;
4096 
4097 	return 0;
4098 }
4099 
4100 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4101 {
4102 	struct mem_cgroup_eventfd_list *ev;
4103 
4104 	spin_lock(&memcg_oom_lock);
4105 
4106 	list_for_each_entry(ev, &memcg->oom_notify, list)
4107 		eventfd_signal(ev->eventfd, 1);
4108 
4109 	spin_unlock(&memcg_oom_lock);
4110 	return 0;
4111 }
4112 
4113 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4114 {
4115 	struct mem_cgroup *iter;
4116 
4117 	for_each_mem_cgroup_tree(iter, memcg)
4118 		mem_cgroup_oom_notify_cb(iter);
4119 }
4120 
4121 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4122 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4123 {
4124 	struct mem_cgroup_thresholds *thresholds;
4125 	struct mem_cgroup_threshold_ary *new;
4126 	unsigned long threshold;
4127 	unsigned long usage;
4128 	int i, size, ret;
4129 
4130 	ret = page_counter_memparse(args, "-1", &threshold);
4131 	if (ret)
4132 		return ret;
4133 
4134 	mutex_lock(&memcg->thresholds_lock);
4135 
4136 	if (type == _MEM) {
4137 		thresholds = &memcg->thresholds;
4138 		usage = mem_cgroup_usage(memcg, false);
4139 	} else if (type == _MEMSWAP) {
4140 		thresholds = &memcg->memsw_thresholds;
4141 		usage = mem_cgroup_usage(memcg, true);
4142 	} else
4143 		BUG();
4144 
4145 	/* Check if a threshold crossed before adding a new one */
4146 	if (thresholds->primary)
4147 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4148 
4149 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4150 
4151 	/* Allocate memory for new array of thresholds */
4152 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4153 	if (!new) {
4154 		ret = -ENOMEM;
4155 		goto unlock;
4156 	}
4157 	new->size = size;
4158 
4159 	/* Copy thresholds (if any) to new array */
4160 	if (thresholds->primary)
4161 		memcpy(new->entries, thresholds->primary->entries,
4162 		       flex_array_size(new, entries, size - 1));
4163 
4164 	/* Add new threshold */
4165 	new->entries[size - 1].eventfd = eventfd;
4166 	new->entries[size - 1].threshold = threshold;
4167 
4168 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4169 	sort(new->entries, size, sizeof(*new->entries),
4170 			compare_thresholds, NULL);
4171 
4172 	/* Find current threshold */
4173 	new->current_threshold = -1;
4174 	for (i = 0; i < size; i++) {
4175 		if (new->entries[i].threshold <= usage) {
4176 			/*
4177 			 * new->current_threshold will not be used until
4178 			 * rcu_assign_pointer(), so it's safe to increment
4179 			 * it here.
4180 			 */
4181 			++new->current_threshold;
4182 		} else
4183 			break;
4184 	}
4185 
4186 	/* Free old spare buffer and save old primary buffer as spare */
4187 	kfree(thresholds->spare);
4188 	thresholds->spare = thresholds->primary;
4189 
4190 	rcu_assign_pointer(thresholds->primary, new);
4191 
4192 	/* To be sure that nobody uses thresholds */
4193 	synchronize_rcu();
4194 
4195 unlock:
4196 	mutex_unlock(&memcg->thresholds_lock);
4197 
4198 	return ret;
4199 }
4200 
4201 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4202 	struct eventfd_ctx *eventfd, const char *args)
4203 {
4204 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4205 }
4206 
4207 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4208 	struct eventfd_ctx *eventfd, const char *args)
4209 {
4210 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4211 }
4212 
4213 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4214 	struct eventfd_ctx *eventfd, enum res_type type)
4215 {
4216 	struct mem_cgroup_thresholds *thresholds;
4217 	struct mem_cgroup_threshold_ary *new;
4218 	unsigned long usage;
4219 	int i, j, size, entries;
4220 
4221 	mutex_lock(&memcg->thresholds_lock);
4222 
4223 	if (type == _MEM) {
4224 		thresholds = &memcg->thresholds;
4225 		usage = mem_cgroup_usage(memcg, false);
4226 	} else if (type == _MEMSWAP) {
4227 		thresholds = &memcg->memsw_thresholds;
4228 		usage = mem_cgroup_usage(memcg, true);
4229 	} else
4230 		BUG();
4231 
4232 	if (!thresholds->primary)
4233 		goto unlock;
4234 
4235 	/* Check if a threshold crossed before removing */
4236 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4237 
4238 	/* Calculate new number of threshold */
4239 	size = entries = 0;
4240 	for (i = 0; i < thresholds->primary->size; i++) {
4241 		if (thresholds->primary->entries[i].eventfd != eventfd)
4242 			size++;
4243 		else
4244 			entries++;
4245 	}
4246 
4247 	new = thresholds->spare;
4248 
4249 	/* If no items related to eventfd have been cleared, nothing to do */
4250 	if (!entries)
4251 		goto unlock;
4252 
4253 	/* Set thresholds array to NULL if we don't have thresholds */
4254 	if (!size) {
4255 		kfree(new);
4256 		new = NULL;
4257 		goto swap_buffers;
4258 	}
4259 
4260 	new->size = size;
4261 
4262 	/* Copy thresholds and find current threshold */
4263 	new->current_threshold = -1;
4264 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4265 		if (thresholds->primary->entries[i].eventfd == eventfd)
4266 			continue;
4267 
4268 		new->entries[j] = thresholds->primary->entries[i];
4269 		if (new->entries[j].threshold <= usage) {
4270 			/*
4271 			 * new->current_threshold will not be used
4272 			 * until rcu_assign_pointer(), so it's safe to increment
4273 			 * it here.
4274 			 */
4275 			++new->current_threshold;
4276 		}
4277 		j++;
4278 	}
4279 
4280 swap_buffers:
4281 	/* Swap primary and spare array */
4282 	thresholds->spare = thresholds->primary;
4283 
4284 	rcu_assign_pointer(thresholds->primary, new);
4285 
4286 	/* To be sure that nobody uses thresholds */
4287 	synchronize_rcu();
4288 
4289 	/* If all events are unregistered, free the spare array */
4290 	if (!new) {
4291 		kfree(thresholds->spare);
4292 		thresholds->spare = NULL;
4293 	}
4294 unlock:
4295 	mutex_unlock(&memcg->thresholds_lock);
4296 }
4297 
4298 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4299 	struct eventfd_ctx *eventfd)
4300 {
4301 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4302 }
4303 
4304 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4305 	struct eventfd_ctx *eventfd)
4306 {
4307 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4308 }
4309 
4310 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4311 	struct eventfd_ctx *eventfd, const char *args)
4312 {
4313 	struct mem_cgroup_eventfd_list *event;
4314 
4315 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4316 	if (!event)
4317 		return -ENOMEM;
4318 
4319 	spin_lock(&memcg_oom_lock);
4320 
4321 	event->eventfd = eventfd;
4322 	list_add(&event->list, &memcg->oom_notify);
4323 
4324 	/* already in OOM ? */
4325 	if (memcg->under_oom)
4326 		eventfd_signal(eventfd, 1);
4327 	spin_unlock(&memcg_oom_lock);
4328 
4329 	return 0;
4330 }
4331 
4332 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4333 	struct eventfd_ctx *eventfd)
4334 {
4335 	struct mem_cgroup_eventfd_list *ev, *tmp;
4336 
4337 	spin_lock(&memcg_oom_lock);
4338 
4339 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4340 		if (ev->eventfd == eventfd) {
4341 			list_del(&ev->list);
4342 			kfree(ev);
4343 		}
4344 	}
4345 
4346 	spin_unlock(&memcg_oom_lock);
4347 }
4348 
4349 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4350 {
4351 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4352 
4353 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4354 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4355 	seq_printf(sf, "oom_kill %lu\n",
4356 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4357 	return 0;
4358 }
4359 
4360 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4361 	struct cftype *cft, u64 val)
4362 {
4363 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4364 
4365 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4366 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4367 		return -EINVAL;
4368 
4369 	memcg->oom_kill_disable = val;
4370 	if (!val)
4371 		memcg_oom_recover(memcg);
4372 
4373 	return 0;
4374 }
4375 
4376 #ifdef CONFIG_CGROUP_WRITEBACK
4377 
4378 #include <trace/events/writeback.h>
4379 
4380 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4381 {
4382 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4383 }
4384 
4385 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4386 {
4387 	wb_domain_exit(&memcg->cgwb_domain);
4388 }
4389 
4390 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4391 {
4392 	wb_domain_size_changed(&memcg->cgwb_domain);
4393 }
4394 
4395 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4396 {
4397 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4398 
4399 	if (!memcg->css.parent)
4400 		return NULL;
4401 
4402 	return &memcg->cgwb_domain;
4403 }
4404 
4405 /**
4406  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4407  * @wb: bdi_writeback in question
4408  * @pfilepages: out parameter for number of file pages
4409  * @pheadroom: out parameter for number of allocatable pages according to memcg
4410  * @pdirty: out parameter for number of dirty pages
4411  * @pwriteback: out parameter for number of pages under writeback
4412  *
4413  * Determine the numbers of file, headroom, dirty, and writeback pages in
4414  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4415  * is a bit more involved.
4416  *
4417  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4418  * headroom is calculated as the lowest headroom of itself and the
4419  * ancestors.  Note that this doesn't consider the actual amount of
4420  * available memory in the system.  The caller should further cap
4421  * *@pheadroom accordingly.
4422  */
4423 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4424 			 unsigned long *pheadroom, unsigned long *pdirty,
4425 			 unsigned long *pwriteback)
4426 {
4427 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4428 	struct mem_cgroup *parent;
4429 
4430 	mem_cgroup_flush_stats();
4431 
4432 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4433 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4434 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4435 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4436 
4437 	*pheadroom = PAGE_COUNTER_MAX;
4438 	while ((parent = parent_mem_cgroup(memcg))) {
4439 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4440 					    READ_ONCE(memcg->memory.high));
4441 		unsigned long used = page_counter_read(&memcg->memory);
4442 
4443 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4444 		memcg = parent;
4445 	}
4446 }
4447 
4448 /*
4449  * Foreign dirty flushing
4450  *
4451  * There's an inherent mismatch between memcg and writeback.  The former
4452  * tracks ownership per-page while the latter per-inode.  This was a
4453  * deliberate design decision because honoring per-page ownership in the
4454  * writeback path is complicated, may lead to higher CPU and IO overheads
4455  * and deemed unnecessary given that write-sharing an inode across
4456  * different cgroups isn't a common use-case.
4457  *
4458  * Combined with inode majority-writer ownership switching, this works well
4459  * enough in most cases but there are some pathological cases.  For
4460  * example, let's say there are two cgroups A and B which keep writing to
4461  * different but confined parts of the same inode.  B owns the inode and
4462  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4463  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4464  * triggering background writeback.  A will be slowed down without a way to
4465  * make writeback of the dirty pages happen.
4466  *
4467  * Conditions like the above can lead to a cgroup getting repeatedly and
4468  * severely throttled after making some progress after each
4469  * dirty_expire_interval while the underlying IO device is almost
4470  * completely idle.
4471  *
4472  * Solving this problem completely requires matching the ownership tracking
4473  * granularities between memcg and writeback in either direction.  However,
4474  * the more egregious behaviors can be avoided by simply remembering the
4475  * most recent foreign dirtying events and initiating remote flushes on
4476  * them when local writeback isn't enough to keep the memory clean enough.
4477  *
4478  * The following two functions implement such mechanism.  When a foreign
4479  * page - a page whose memcg and writeback ownerships don't match - is
4480  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4481  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4482  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4483  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4484  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4485  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4486  * limited to MEMCG_CGWB_FRN_CNT.
4487  *
4488  * The mechanism only remembers IDs and doesn't hold any object references.
4489  * As being wrong occasionally doesn't matter, updates and accesses to the
4490  * records are lockless and racy.
4491  */
4492 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4493 					     struct bdi_writeback *wb)
4494 {
4495 	struct mem_cgroup *memcg = folio_memcg(folio);
4496 	struct memcg_cgwb_frn *frn;
4497 	u64 now = get_jiffies_64();
4498 	u64 oldest_at = now;
4499 	int oldest = -1;
4500 	int i;
4501 
4502 	trace_track_foreign_dirty(folio, wb);
4503 
4504 	/*
4505 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4506 	 * using it.  If not replace the oldest one which isn't being
4507 	 * written out.
4508 	 */
4509 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4510 		frn = &memcg->cgwb_frn[i];
4511 		if (frn->bdi_id == wb->bdi->id &&
4512 		    frn->memcg_id == wb->memcg_css->id)
4513 			break;
4514 		if (time_before64(frn->at, oldest_at) &&
4515 		    atomic_read(&frn->done.cnt) == 1) {
4516 			oldest = i;
4517 			oldest_at = frn->at;
4518 		}
4519 	}
4520 
4521 	if (i < MEMCG_CGWB_FRN_CNT) {
4522 		/*
4523 		 * Re-using an existing one.  Update timestamp lazily to
4524 		 * avoid making the cacheline hot.  We want them to be
4525 		 * reasonably up-to-date and significantly shorter than
4526 		 * dirty_expire_interval as that's what expires the record.
4527 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4528 		 */
4529 		unsigned long update_intv =
4530 			min_t(unsigned long, HZ,
4531 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4532 
4533 		if (time_before64(frn->at, now - update_intv))
4534 			frn->at = now;
4535 	} else if (oldest >= 0) {
4536 		/* replace the oldest free one */
4537 		frn = &memcg->cgwb_frn[oldest];
4538 		frn->bdi_id = wb->bdi->id;
4539 		frn->memcg_id = wb->memcg_css->id;
4540 		frn->at = now;
4541 	}
4542 }
4543 
4544 /* issue foreign writeback flushes for recorded foreign dirtying events */
4545 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4546 {
4547 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4548 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4549 	u64 now = jiffies_64;
4550 	int i;
4551 
4552 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4553 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4554 
4555 		/*
4556 		 * If the record is older than dirty_expire_interval,
4557 		 * writeback on it has already started.  No need to kick it
4558 		 * off again.  Also, don't start a new one if there's
4559 		 * already one in flight.
4560 		 */
4561 		if (time_after64(frn->at, now - intv) &&
4562 		    atomic_read(&frn->done.cnt) == 1) {
4563 			frn->at = 0;
4564 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4565 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4566 					       WB_REASON_FOREIGN_FLUSH,
4567 					       &frn->done);
4568 		}
4569 	}
4570 }
4571 
4572 #else	/* CONFIG_CGROUP_WRITEBACK */
4573 
4574 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4575 {
4576 	return 0;
4577 }
4578 
4579 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4580 {
4581 }
4582 
4583 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4584 {
4585 }
4586 
4587 #endif	/* CONFIG_CGROUP_WRITEBACK */
4588 
4589 /*
4590  * DO NOT USE IN NEW FILES.
4591  *
4592  * "cgroup.event_control" implementation.
4593  *
4594  * This is way over-engineered.  It tries to support fully configurable
4595  * events for each user.  Such level of flexibility is completely
4596  * unnecessary especially in the light of the planned unified hierarchy.
4597  *
4598  * Please deprecate this and replace with something simpler if at all
4599  * possible.
4600  */
4601 
4602 /*
4603  * Unregister event and free resources.
4604  *
4605  * Gets called from workqueue.
4606  */
4607 static void memcg_event_remove(struct work_struct *work)
4608 {
4609 	struct mem_cgroup_event *event =
4610 		container_of(work, struct mem_cgroup_event, remove);
4611 	struct mem_cgroup *memcg = event->memcg;
4612 
4613 	remove_wait_queue(event->wqh, &event->wait);
4614 
4615 	event->unregister_event(memcg, event->eventfd);
4616 
4617 	/* Notify userspace the event is going away. */
4618 	eventfd_signal(event->eventfd, 1);
4619 
4620 	eventfd_ctx_put(event->eventfd);
4621 	kfree(event);
4622 	css_put(&memcg->css);
4623 }
4624 
4625 /*
4626  * Gets called on EPOLLHUP on eventfd when user closes it.
4627  *
4628  * Called with wqh->lock held and interrupts disabled.
4629  */
4630 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4631 			    int sync, void *key)
4632 {
4633 	struct mem_cgroup_event *event =
4634 		container_of(wait, struct mem_cgroup_event, wait);
4635 	struct mem_cgroup *memcg = event->memcg;
4636 	__poll_t flags = key_to_poll(key);
4637 
4638 	if (flags & EPOLLHUP) {
4639 		/*
4640 		 * If the event has been detached at cgroup removal, we
4641 		 * can simply return knowing the other side will cleanup
4642 		 * for us.
4643 		 *
4644 		 * We can't race against event freeing since the other
4645 		 * side will require wqh->lock via remove_wait_queue(),
4646 		 * which we hold.
4647 		 */
4648 		spin_lock(&memcg->event_list_lock);
4649 		if (!list_empty(&event->list)) {
4650 			list_del_init(&event->list);
4651 			/*
4652 			 * We are in atomic context, but cgroup_event_remove()
4653 			 * may sleep, so we have to call it in workqueue.
4654 			 */
4655 			schedule_work(&event->remove);
4656 		}
4657 		spin_unlock(&memcg->event_list_lock);
4658 	}
4659 
4660 	return 0;
4661 }
4662 
4663 static void memcg_event_ptable_queue_proc(struct file *file,
4664 		wait_queue_head_t *wqh, poll_table *pt)
4665 {
4666 	struct mem_cgroup_event *event =
4667 		container_of(pt, struct mem_cgroup_event, pt);
4668 
4669 	event->wqh = wqh;
4670 	add_wait_queue(wqh, &event->wait);
4671 }
4672 
4673 /*
4674  * DO NOT USE IN NEW FILES.
4675  *
4676  * Parse input and register new cgroup event handler.
4677  *
4678  * Input must be in format '<event_fd> <control_fd> <args>'.
4679  * Interpretation of args is defined by control file implementation.
4680  */
4681 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4682 					 char *buf, size_t nbytes, loff_t off)
4683 {
4684 	struct cgroup_subsys_state *css = of_css(of);
4685 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4686 	struct mem_cgroup_event *event;
4687 	struct cgroup_subsys_state *cfile_css;
4688 	unsigned int efd, cfd;
4689 	struct fd efile;
4690 	struct fd cfile;
4691 	const char *name;
4692 	char *endp;
4693 	int ret;
4694 
4695 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
4696 		return -EOPNOTSUPP;
4697 
4698 	buf = strstrip(buf);
4699 
4700 	efd = simple_strtoul(buf, &endp, 10);
4701 	if (*endp != ' ')
4702 		return -EINVAL;
4703 	buf = endp + 1;
4704 
4705 	cfd = simple_strtoul(buf, &endp, 10);
4706 	if ((*endp != ' ') && (*endp != '\0'))
4707 		return -EINVAL;
4708 	buf = endp + 1;
4709 
4710 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4711 	if (!event)
4712 		return -ENOMEM;
4713 
4714 	event->memcg = memcg;
4715 	INIT_LIST_HEAD(&event->list);
4716 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4717 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4718 	INIT_WORK(&event->remove, memcg_event_remove);
4719 
4720 	efile = fdget(efd);
4721 	if (!efile.file) {
4722 		ret = -EBADF;
4723 		goto out_kfree;
4724 	}
4725 
4726 	event->eventfd = eventfd_ctx_fileget(efile.file);
4727 	if (IS_ERR(event->eventfd)) {
4728 		ret = PTR_ERR(event->eventfd);
4729 		goto out_put_efile;
4730 	}
4731 
4732 	cfile = fdget(cfd);
4733 	if (!cfile.file) {
4734 		ret = -EBADF;
4735 		goto out_put_eventfd;
4736 	}
4737 
4738 	/* the process need read permission on control file */
4739 	/* AV: shouldn't we check that it's been opened for read instead? */
4740 	ret = file_permission(cfile.file, MAY_READ);
4741 	if (ret < 0)
4742 		goto out_put_cfile;
4743 
4744 	/*
4745 	 * Determine the event callbacks and set them in @event.  This used
4746 	 * to be done via struct cftype but cgroup core no longer knows
4747 	 * about these events.  The following is crude but the whole thing
4748 	 * is for compatibility anyway.
4749 	 *
4750 	 * DO NOT ADD NEW FILES.
4751 	 */
4752 	name = cfile.file->f_path.dentry->d_name.name;
4753 
4754 	if (!strcmp(name, "memory.usage_in_bytes")) {
4755 		event->register_event = mem_cgroup_usage_register_event;
4756 		event->unregister_event = mem_cgroup_usage_unregister_event;
4757 	} else if (!strcmp(name, "memory.oom_control")) {
4758 		event->register_event = mem_cgroup_oom_register_event;
4759 		event->unregister_event = mem_cgroup_oom_unregister_event;
4760 	} else if (!strcmp(name, "memory.pressure_level")) {
4761 		event->register_event = vmpressure_register_event;
4762 		event->unregister_event = vmpressure_unregister_event;
4763 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4764 		event->register_event = memsw_cgroup_usage_register_event;
4765 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4766 	} else {
4767 		ret = -EINVAL;
4768 		goto out_put_cfile;
4769 	}
4770 
4771 	/*
4772 	 * Verify @cfile should belong to @css.  Also, remaining events are
4773 	 * automatically removed on cgroup destruction but the removal is
4774 	 * asynchronous, so take an extra ref on @css.
4775 	 */
4776 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4777 					       &memory_cgrp_subsys);
4778 	ret = -EINVAL;
4779 	if (IS_ERR(cfile_css))
4780 		goto out_put_cfile;
4781 	if (cfile_css != css) {
4782 		css_put(cfile_css);
4783 		goto out_put_cfile;
4784 	}
4785 
4786 	ret = event->register_event(memcg, event->eventfd, buf);
4787 	if (ret)
4788 		goto out_put_css;
4789 
4790 	vfs_poll(efile.file, &event->pt);
4791 
4792 	spin_lock_irq(&memcg->event_list_lock);
4793 	list_add(&event->list, &memcg->event_list);
4794 	spin_unlock_irq(&memcg->event_list_lock);
4795 
4796 	fdput(cfile);
4797 	fdput(efile);
4798 
4799 	return nbytes;
4800 
4801 out_put_css:
4802 	css_put(css);
4803 out_put_cfile:
4804 	fdput(cfile);
4805 out_put_eventfd:
4806 	eventfd_ctx_put(event->eventfd);
4807 out_put_efile:
4808 	fdput(efile);
4809 out_kfree:
4810 	kfree(event);
4811 
4812 	return ret;
4813 }
4814 
4815 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4816 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4817 {
4818 	/*
4819 	 * Deprecated.
4820 	 * Please, take a look at tools/cgroup/slabinfo.py .
4821 	 */
4822 	return 0;
4823 }
4824 #endif
4825 
4826 static struct cftype mem_cgroup_legacy_files[] = {
4827 	{
4828 		.name = "usage_in_bytes",
4829 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4830 		.read_u64 = mem_cgroup_read_u64,
4831 	},
4832 	{
4833 		.name = "max_usage_in_bytes",
4834 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4835 		.write = mem_cgroup_reset,
4836 		.read_u64 = mem_cgroup_read_u64,
4837 	},
4838 	{
4839 		.name = "limit_in_bytes",
4840 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4841 		.write = mem_cgroup_write,
4842 		.read_u64 = mem_cgroup_read_u64,
4843 	},
4844 	{
4845 		.name = "soft_limit_in_bytes",
4846 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4847 		.write = mem_cgroup_write,
4848 		.read_u64 = mem_cgroup_read_u64,
4849 	},
4850 	{
4851 		.name = "failcnt",
4852 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4853 		.write = mem_cgroup_reset,
4854 		.read_u64 = mem_cgroup_read_u64,
4855 	},
4856 	{
4857 		.name = "stat",
4858 		.seq_show = memcg_stat_show,
4859 	},
4860 	{
4861 		.name = "force_empty",
4862 		.write = mem_cgroup_force_empty_write,
4863 	},
4864 	{
4865 		.name = "use_hierarchy",
4866 		.write_u64 = mem_cgroup_hierarchy_write,
4867 		.read_u64 = mem_cgroup_hierarchy_read,
4868 	},
4869 	{
4870 		.name = "cgroup.event_control",		/* XXX: for compat */
4871 		.write = memcg_write_event_control,
4872 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4873 	},
4874 	{
4875 		.name = "swappiness",
4876 		.read_u64 = mem_cgroup_swappiness_read,
4877 		.write_u64 = mem_cgroup_swappiness_write,
4878 	},
4879 	{
4880 		.name = "move_charge_at_immigrate",
4881 		.read_u64 = mem_cgroup_move_charge_read,
4882 		.write_u64 = mem_cgroup_move_charge_write,
4883 	},
4884 	{
4885 		.name = "oom_control",
4886 		.seq_show = mem_cgroup_oom_control_read,
4887 		.write_u64 = mem_cgroup_oom_control_write,
4888 	},
4889 	{
4890 		.name = "pressure_level",
4891 	},
4892 #ifdef CONFIG_NUMA
4893 	{
4894 		.name = "numa_stat",
4895 		.seq_show = memcg_numa_stat_show,
4896 	},
4897 #endif
4898 	{
4899 		.name = "kmem.limit_in_bytes",
4900 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4901 		.write = mem_cgroup_write,
4902 		.read_u64 = mem_cgroup_read_u64,
4903 	},
4904 	{
4905 		.name = "kmem.usage_in_bytes",
4906 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4907 		.read_u64 = mem_cgroup_read_u64,
4908 	},
4909 	{
4910 		.name = "kmem.failcnt",
4911 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4912 		.write = mem_cgroup_reset,
4913 		.read_u64 = mem_cgroup_read_u64,
4914 	},
4915 	{
4916 		.name = "kmem.max_usage_in_bytes",
4917 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4918 		.write = mem_cgroup_reset,
4919 		.read_u64 = mem_cgroup_read_u64,
4920 	},
4921 #if defined(CONFIG_MEMCG_KMEM) && \
4922 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4923 	{
4924 		.name = "kmem.slabinfo",
4925 		.seq_show = mem_cgroup_slab_show,
4926 	},
4927 #endif
4928 	{
4929 		.name = "kmem.tcp.limit_in_bytes",
4930 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4931 		.write = mem_cgroup_write,
4932 		.read_u64 = mem_cgroup_read_u64,
4933 	},
4934 	{
4935 		.name = "kmem.tcp.usage_in_bytes",
4936 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4937 		.read_u64 = mem_cgroup_read_u64,
4938 	},
4939 	{
4940 		.name = "kmem.tcp.failcnt",
4941 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4942 		.write = mem_cgroup_reset,
4943 		.read_u64 = mem_cgroup_read_u64,
4944 	},
4945 	{
4946 		.name = "kmem.tcp.max_usage_in_bytes",
4947 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4948 		.write = mem_cgroup_reset,
4949 		.read_u64 = mem_cgroup_read_u64,
4950 	},
4951 	{ },	/* terminate */
4952 };
4953 
4954 /*
4955  * Private memory cgroup IDR
4956  *
4957  * Swap-out records and page cache shadow entries need to store memcg
4958  * references in constrained space, so we maintain an ID space that is
4959  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4960  * memory-controlled cgroups to 64k.
4961  *
4962  * However, there usually are many references to the offline CSS after
4963  * the cgroup has been destroyed, such as page cache or reclaimable
4964  * slab objects, that don't need to hang on to the ID. We want to keep
4965  * those dead CSS from occupying IDs, or we might quickly exhaust the
4966  * relatively small ID space and prevent the creation of new cgroups
4967  * even when there are much fewer than 64k cgroups - possibly none.
4968  *
4969  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4970  * be freed and recycled when it's no longer needed, which is usually
4971  * when the CSS is offlined.
4972  *
4973  * The only exception to that are records of swapped out tmpfs/shmem
4974  * pages that need to be attributed to live ancestors on swapin. But
4975  * those references are manageable from userspace.
4976  */
4977 
4978 static DEFINE_IDR(mem_cgroup_idr);
4979 
4980 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4981 {
4982 	if (memcg->id.id > 0) {
4983 		idr_remove(&mem_cgroup_idr, memcg->id.id);
4984 		memcg->id.id = 0;
4985 	}
4986 }
4987 
4988 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
4989 						  unsigned int n)
4990 {
4991 	refcount_add(n, &memcg->id.ref);
4992 }
4993 
4994 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4995 {
4996 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4997 		mem_cgroup_id_remove(memcg);
4998 
4999 		/* Memcg ID pins CSS */
5000 		css_put(&memcg->css);
5001 	}
5002 }
5003 
5004 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5005 {
5006 	mem_cgroup_id_put_many(memcg, 1);
5007 }
5008 
5009 /**
5010  * mem_cgroup_from_id - look up a memcg from a memcg id
5011  * @id: the memcg id to look up
5012  *
5013  * Caller must hold rcu_read_lock().
5014  */
5015 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5016 {
5017 	WARN_ON_ONCE(!rcu_read_lock_held());
5018 	return idr_find(&mem_cgroup_idr, id);
5019 }
5020 
5021 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5022 {
5023 	struct mem_cgroup_per_node *pn;
5024 
5025 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5026 	if (!pn)
5027 		return 1;
5028 
5029 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5030 						   GFP_KERNEL_ACCOUNT);
5031 	if (!pn->lruvec_stats_percpu) {
5032 		kfree(pn);
5033 		return 1;
5034 	}
5035 
5036 	lruvec_init(&pn->lruvec);
5037 	pn->memcg = memcg;
5038 
5039 	memcg->nodeinfo[node] = pn;
5040 	return 0;
5041 }
5042 
5043 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5044 {
5045 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5046 
5047 	if (!pn)
5048 		return;
5049 
5050 	free_percpu(pn->lruvec_stats_percpu);
5051 	kfree(pn);
5052 }
5053 
5054 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5055 {
5056 	int node;
5057 
5058 	for_each_node(node)
5059 		free_mem_cgroup_per_node_info(memcg, node);
5060 	free_percpu(memcg->vmstats_percpu);
5061 	kfree(memcg);
5062 }
5063 
5064 static void mem_cgroup_free(struct mem_cgroup *memcg)
5065 {
5066 	memcg_wb_domain_exit(memcg);
5067 	__mem_cgroup_free(memcg);
5068 }
5069 
5070 static struct mem_cgroup *mem_cgroup_alloc(void)
5071 {
5072 	struct mem_cgroup *memcg;
5073 	int node;
5074 	int __maybe_unused i;
5075 	long error = -ENOMEM;
5076 
5077 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5078 	if (!memcg)
5079 		return ERR_PTR(error);
5080 
5081 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5082 				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5083 	if (memcg->id.id < 0) {
5084 		error = memcg->id.id;
5085 		goto fail;
5086 	}
5087 
5088 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5089 						 GFP_KERNEL_ACCOUNT);
5090 	if (!memcg->vmstats_percpu)
5091 		goto fail;
5092 
5093 	for_each_node(node)
5094 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5095 			goto fail;
5096 
5097 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5098 		goto fail;
5099 
5100 	INIT_WORK(&memcg->high_work, high_work_func);
5101 	INIT_LIST_HEAD(&memcg->oom_notify);
5102 	mutex_init(&memcg->thresholds_lock);
5103 	spin_lock_init(&memcg->move_lock);
5104 	vmpressure_init(&memcg->vmpressure);
5105 	INIT_LIST_HEAD(&memcg->event_list);
5106 	spin_lock_init(&memcg->event_list_lock);
5107 	memcg->socket_pressure = jiffies;
5108 #ifdef CONFIG_MEMCG_KMEM
5109 	memcg->kmemcg_id = -1;
5110 	INIT_LIST_HEAD(&memcg->objcg_list);
5111 #endif
5112 #ifdef CONFIG_CGROUP_WRITEBACK
5113 	INIT_LIST_HEAD(&memcg->cgwb_list);
5114 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5115 		memcg->cgwb_frn[i].done =
5116 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5117 #endif
5118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5119 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5120 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5121 	memcg->deferred_split_queue.split_queue_len = 0;
5122 #endif
5123 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5124 	return memcg;
5125 fail:
5126 	mem_cgroup_id_remove(memcg);
5127 	__mem_cgroup_free(memcg);
5128 	return ERR_PTR(error);
5129 }
5130 
5131 static struct cgroup_subsys_state * __ref
5132 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5133 {
5134 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5135 	struct mem_cgroup *memcg, *old_memcg;
5136 
5137 	old_memcg = set_active_memcg(parent);
5138 	memcg = mem_cgroup_alloc();
5139 	set_active_memcg(old_memcg);
5140 	if (IS_ERR(memcg))
5141 		return ERR_CAST(memcg);
5142 
5143 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5144 	memcg->soft_limit = PAGE_COUNTER_MAX;
5145 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5146 	if (parent) {
5147 		memcg->swappiness = mem_cgroup_swappiness(parent);
5148 		memcg->oom_kill_disable = parent->oom_kill_disable;
5149 
5150 		page_counter_init(&memcg->memory, &parent->memory);
5151 		page_counter_init(&memcg->swap, &parent->swap);
5152 		page_counter_init(&memcg->kmem, &parent->kmem);
5153 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5154 	} else {
5155 		page_counter_init(&memcg->memory, NULL);
5156 		page_counter_init(&memcg->swap, NULL);
5157 		page_counter_init(&memcg->kmem, NULL);
5158 		page_counter_init(&memcg->tcpmem, NULL);
5159 
5160 		root_mem_cgroup = memcg;
5161 		return &memcg->css;
5162 	}
5163 
5164 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5165 		static_branch_inc(&memcg_sockets_enabled_key);
5166 
5167 	return &memcg->css;
5168 }
5169 
5170 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5171 {
5172 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5173 
5174 	if (memcg_online_kmem(memcg))
5175 		goto remove_id;
5176 
5177 	/*
5178 	 * A memcg must be visible for expand_shrinker_info()
5179 	 * by the time the maps are allocated. So, we allocate maps
5180 	 * here, when for_each_mem_cgroup() can't skip it.
5181 	 */
5182 	if (alloc_shrinker_info(memcg))
5183 		goto offline_kmem;
5184 
5185 	/* Online state pins memcg ID, memcg ID pins CSS */
5186 	refcount_set(&memcg->id.ref, 1);
5187 	css_get(css);
5188 
5189 	if (unlikely(mem_cgroup_is_root(memcg)))
5190 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5191 				   2UL*HZ);
5192 	return 0;
5193 offline_kmem:
5194 	memcg_offline_kmem(memcg);
5195 remove_id:
5196 	mem_cgroup_id_remove(memcg);
5197 	return -ENOMEM;
5198 }
5199 
5200 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5201 {
5202 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5203 	struct mem_cgroup_event *event, *tmp;
5204 
5205 	/*
5206 	 * Unregister events and notify userspace.
5207 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5208 	 * directory to avoid race between userspace and kernelspace.
5209 	 */
5210 	spin_lock_irq(&memcg->event_list_lock);
5211 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5212 		list_del_init(&event->list);
5213 		schedule_work(&event->remove);
5214 	}
5215 	spin_unlock_irq(&memcg->event_list_lock);
5216 
5217 	page_counter_set_min(&memcg->memory, 0);
5218 	page_counter_set_low(&memcg->memory, 0);
5219 
5220 	memcg_offline_kmem(memcg);
5221 	reparent_shrinker_deferred(memcg);
5222 	wb_memcg_offline(memcg);
5223 
5224 	drain_all_stock(memcg);
5225 
5226 	mem_cgroup_id_put(memcg);
5227 }
5228 
5229 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5230 {
5231 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5232 
5233 	invalidate_reclaim_iterators(memcg);
5234 }
5235 
5236 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5237 {
5238 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5239 	int __maybe_unused i;
5240 
5241 #ifdef CONFIG_CGROUP_WRITEBACK
5242 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5243 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5244 #endif
5245 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5246 		static_branch_dec(&memcg_sockets_enabled_key);
5247 
5248 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5249 		static_branch_dec(&memcg_sockets_enabled_key);
5250 
5251 	vmpressure_cleanup(&memcg->vmpressure);
5252 	cancel_work_sync(&memcg->high_work);
5253 	mem_cgroup_remove_from_trees(memcg);
5254 	free_shrinker_info(memcg);
5255 	mem_cgroup_free(memcg);
5256 }
5257 
5258 /**
5259  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5260  * @css: the target css
5261  *
5262  * Reset the states of the mem_cgroup associated with @css.  This is
5263  * invoked when the userland requests disabling on the default hierarchy
5264  * but the memcg is pinned through dependency.  The memcg should stop
5265  * applying policies and should revert to the vanilla state as it may be
5266  * made visible again.
5267  *
5268  * The current implementation only resets the essential configurations.
5269  * This needs to be expanded to cover all the visible parts.
5270  */
5271 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5272 {
5273 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5274 
5275 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5276 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5277 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5278 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5279 	page_counter_set_min(&memcg->memory, 0);
5280 	page_counter_set_low(&memcg->memory, 0);
5281 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5282 	memcg->soft_limit = PAGE_COUNTER_MAX;
5283 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5284 	memcg_wb_domain_size_changed(memcg);
5285 }
5286 
5287 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5288 {
5289 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5290 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5291 	struct memcg_vmstats_percpu *statc;
5292 	long delta, v;
5293 	int i, nid;
5294 
5295 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5296 
5297 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5298 		/*
5299 		 * Collect the aggregated propagation counts of groups
5300 		 * below us. We're in a per-cpu loop here and this is
5301 		 * a global counter, so the first cycle will get them.
5302 		 */
5303 		delta = memcg->vmstats.state_pending[i];
5304 		if (delta)
5305 			memcg->vmstats.state_pending[i] = 0;
5306 
5307 		/* Add CPU changes on this level since the last flush */
5308 		v = READ_ONCE(statc->state[i]);
5309 		if (v != statc->state_prev[i]) {
5310 			delta += v - statc->state_prev[i];
5311 			statc->state_prev[i] = v;
5312 		}
5313 
5314 		if (!delta)
5315 			continue;
5316 
5317 		/* Aggregate counts on this level and propagate upwards */
5318 		memcg->vmstats.state[i] += delta;
5319 		if (parent)
5320 			parent->vmstats.state_pending[i] += delta;
5321 	}
5322 
5323 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5324 		delta = memcg->vmstats.events_pending[i];
5325 		if (delta)
5326 			memcg->vmstats.events_pending[i] = 0;
5327 
5328 		v = READ_ONCE(statc->events[i]);
5329 		if (v != statc->events_prev[i]) {
5330 			delta += v - statc->events_prev[i];
5331 			statc->events_prev[i] = v;
5332 		}
5333 
5334 		if (!delta)
5335 			continue;
5336 
5337 		memcg->vmstats.events[i] += delta;
5338 		if (parent)
5339 			parent->vmstats.events_pending[i] += delta;
5340 	}
5341 
5342 	for_each_node_state(nid, N_MEMORY) {
5343 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5344 		struct mem_cgroup_per_node *ppn = NULL;
5345 		struct lruvec_stats_percpu *lstatc;
5346 
5347 		if (parent)
5348 			ppn = parent->nodeinfo[nid];
5349 
5350 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5351 
5352 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5353 			delta = pn->lruvec_stats.state_pending[i];
5354 			if (delta)
5355 				pn->lruvec_stats.state_pending[i] = 0;
5356 
5357 			v = READ_ONCE(lstatc->state[i]);
5358 			if (v != lstatc->state_prev[i]) {
5359 				delta += v - lstatc->state_prev[i];
5360 				lstatc->state_prev[i] = v;
5361 			}
5362 
5363 			if (!delta)
5364 				continue;
5365 
5366 			pn->lruvec_stats.state[i] += delta;
5367 			if (ppn)
5368 				ppn->lruvec_stats.state_pending[i] += delta;
5369 		}
5370 	}
5371 }
5372 
5373 #ifdef CONFIG_MMU
5374 /* Handlers for move charge at task migration. */
5375 static int mem_cgroup_do_precharge(unsigned long count)
5376 {
5377 	int ret;
5378 
5379 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5380 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5381 	if (!ret) {
5382 		mc.precharge += count;
5383 		return ret;
5384 	}
5385 
5386 	/* Try charges one by one with reclaim, but do not retry */
5387 	while (count--) {
5388 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5389 		if (ret)
5390 			return ret;
5391 		mc.precharge++;
5392 		cond_resched();
5393 	}
5394 	return 0;
5395 }
5396 
5397 union mc_target {
5398 	struct page	*page;
5399 	swp_entry_t	ent;
5400 };
5401 
5402 enum mc_target_type {
5403 	MC_TARGET_NONE = 0,
5404 	MC_TARGET_PAGE,
5405 	MC_TARGET_SWAP,
5406 	MC_TARGET_DEVICE,
5407 };
5408 
5409 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5410 						unsigned long addr, pte_t ptent)
5411 {
5412 	struct page *page = vm_normal_page(vma, addr, ptent);
5413 
5414 	if (!page || !page_mapped(page))
5415 		return NULL;
5416 	if (PageAnon(page)) {
5417 		if (!(mc.flags & MOVE_ANON))
5418 			return NULL;
5419 	} else {
5420 		if (!(mc.flags & MOVE_FILE))
5421 			return NULL;
5422 	}
5423 	if (!get_page_unless_zero(page))
5424 		return NULL;
5425 
5426 	return page;
5427 }
5428 
5429 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5430 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5431 			pte_t ptent, swp_entry_t *entry)
5432 {
5433 	struct page *page = NULL;
5434 	swp_entry_t ent = pte_to_swp_entry(ptent);
5435 
5436 	if (!(mc.flags & MOVE_ANON))
5437 		return NULL;
5438 
5439 	/*
5440 	 * Handle device private pages that are not accessible by the CPU, but
5441 	 * stored as special swap entries in the page table.
5442 	 */
5443 	if (is_device_private_entry(ent)) {
5444 		page = pfn_swap_entry_to_page(ent);
5445 		if (!get_page_unless_zero(page))
5446 			return NULL;
5447 		return page;
5448 	}
5449 
5450 	if (non_swap_entry(ent))
5451 		return NULL;
5452 
5453 	/*
5454 	 * Because lookup_swap_cache() updates some statistics counter,
5455 	 * we call find_get_page() with swapper_space directly.
5456 	 */
5457 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5458 	entry->val = ent.val;
5459 
5460 	return page;
5461 }
5462 #else
5463 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5464 			pte_t ptent, swp_entry_t *entry)
5465 {
5466 	return NULL;
5467 }
5468 #endif
5469 
5470 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5471 			unsigned long addr, pte_t ptent)
5472 {
5473 	if (!vma->vm_file) /* anonymous vma */
5474 		return NULL;
5475 	if (!(mc.flags & MOVE_FILE))
5476 		return NULL;
5477 
5478 	/* page is moved even if it's not RSS of this task(page-faulted). */
5479 	/* shmem/tmpfs may report page out on swap: account for that too. */
5480 	return find_get_incore_page(vma->vm_file->f_mapping,
5481 			linear_page_index(vma, addr));
5482 }
5483 
5484 /**
5485  * mem_cgroup_move_account - move account of the page
5486  * @page: the page
5487  * @compound: charge the page as compound or small page
5488  * @from: mem_cgroup which the page is moved from.
5489  * @to:	mem_cgroup which the page is moved to. @from != @to.
5490  *
5491  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5492  *
5493  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5494  * from old cgroup.
5495  */
5496 static int mem_cgroup_move_account(struct page *page,
5497 				   bool compound,
5498 				   struct mem_cgroup *from,
5499 				   struct mem_cgroup *to)
5500 {
5501 	struct folio *folio = page_folio(page);
5502 	struct lruvec *from_vec, *to_vec;
5503 	struct pglist_data *pgdat;
5504 	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5505 	int nid, ret;
5506 
5507 	VM_BUG_ON(from == to);
5508 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5509 	VM_BUG_ON(compound && !folio_test_large(folio));
5510 
5511 	/*
5512 	 * Prevent mem_cgroup_migrate() from looking at
5513 	 * page's memory cgroup of its source page while we change it.
5514 	 */
5515 	ret = -EBUSY;
5516 	if (!folio_trylock(folio))
5517 		goto out;
5518 
5519 	ret = -EINVAL;
5520 	if (folio_memcg(folio) != from)
5521 		goto out_unlock;
5522 
5523 	pgdat = folio_pgdat(folio);
5524 	from_vec = mem_cgroup_lruvec(from, pgdat);
5525 	to_vec = mem_cgroup_lruvec(to, pgdat);
5526 
5527 	folio_memcg_lock(folio);
5528 
5529 	if (folio_test_anon(folio)) {
5530 		if (folio_mapped(folio)) {
5531 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5532 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5533 			if (folio_test_transhuge(folio)) {
5534 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5535 						   -nr_pages);
5536 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5537 						   nr_pages);
5538 			}
5539 		}
5540 	} else {
5541 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5542 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5543 
5544 		if (folio_test_swapbacked(folio)) {
5545 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5546 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5547 		}
5548 
5549 		if (folio_mapped(folio)) {
5550 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5551 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5552 		}
5553 
5554 		if (folio_test_dirty(folio)) {
5555 			struct address_space *mapping = folio_mapping(folio);
5556 
5557 			if (mapping_can_writeback(mapping)) {
5558 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5559 						   -nr_pages);
5560 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5561 						   nr_pages);
5562 			}
5563 		}
5564 	}
5565 
5566 	if (folio_test_writeback(folio)) {
5567 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5568 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5569 	}
5570 
5571 	/*
5572 	 * All state has been migrated, let's switch to the new memcg.
5573 	 *
5574 	 * It is safe to change page's memcg here because the page
5575 	 * is referenced, charged, isolated, and locked: we can't race
5576 	 * with (un)charging, migration, LRU putback, or anything else
5577 	 * that would rely on a stable page's memory cgroup.
5578 	 *
5579 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5580 	 * to save space. As soon as we switch page's memory cgroup to a
5581 	 * new memcg that isn't locked, the above state can change
5582 	 * concurrently again. Make sure we're truly done with it.
5583 	 */
5584 	smp_mb();
5585 
5586 	css_get(&to->css);
5587 	css_put(&from->css);
5588 
5589 	folio->memcg_data = (unsigned long)to;
5590 
5591 	__folio_memcg_unlock(from);
5592 
5593 	ret = 0;
5594 	nid = folio_nid(folio);
5595 
5596 	local_irq_disable();
5597 	mem_cgroup_charge_statistics(to, nr_pages);
5598 	memcg_check_events(to, nid);
5599 	mem_cgroup_charge_statistics(from, -nr_pages);
5600 	memcg_check_events(from, nid);
5601 	local_irq_enable();
5602 out_unlock:
5603 	folio_unlock(folio);
5604 out:
5605 	return ret;
5606 }
5607 
5608 /**
5609  * get_mctgt_type - get target type of moving charge
5610  * @vma: the vma the pte to be checked belongs
5611  * @addr: the address corresponding to the pte to be checked
5612  * @ptent: the pte to be checked
5613  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5614  *
5615  * Returns
5616  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5617  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5618  *     move charge. if @target is not NULL, the page is stored in target->page
5619  *     with extra refcnt got(Callers should handle it).
5620  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5621  *     target for charge migration. if @target is not NULL, the entry is stored
5622  *     in target->ent.
5623  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5624  *     (so ZONE_DEVICE page and thus not on the lru).
5625  *     For now we such page is charge like a regular page would be as for all
5626  *     intent and purposes it is just special memory taking the place of a
5627  *     regular page.
5628  *
5629  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5630  *
5631  * Called with pte lock held.
5632  */
5633 
5634 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5635 		unsigned long addr, pte_t ptent, union mc_target *target)
5636 {
5637 	struct page *page = NULL;
5638 	enum mc_target_type ret = MC_TARGET_NONE;
5639 	swp_entry_t ent = { .val = 0 };
5640 
5641 	if (pte_present(ptent))
5642 		page = mc_handle_present_pte(vma, addr, ptent);
5643 	else if (is_swap_pte(ptent))
5644 		page = mc_handle_swap_pte(vma, ptent, &ent);
5645 	else if (pte_none(ptent))
5646 		page = mc_handle_file_pte(vma, addr, ptent);
5647 
5648 	if (!page && !ent.val)
5649 		return ret;
5650 	if (page) {
5651 		/*
5652 		 * Do only loose check w/o serialization.
5653 		 * mem_cgroup_move_account() checks the page is valid or
5654 		 * not under LRU exclusion.
5655 		 */
5656 		if (page_memcg(page) == mc.from) {
5657 			ret = MC_TARGET_PAGE;
5658 			if (is_device_private_page(page))
5659 				ret = MC_TARGET_DEVICE;
5660 			if (target)
5661 				target->page = page;
5662 		}
5663 		if (!ret || !target)
5664 			put_page(page);
5665 	}
5666 	/*
5667 	 * There is a swap entry and a page doesn't exist or isn't charged.
5668 	 * But we cannot move a tail-page in a THP.
5669 	 */
5670 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5671 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5672 		ret = MC_TARGET_SWAP;
5673 		if (target)
5674 			target->ent = ent;
5675 	}
5676 	return ret;
5677 }
5678 
5679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5680 /*
5681  * We don't consider PMD mapped swapping or file mapped pages because THP does
5682  * not support them for now.
5683  * Caller should make sure that pmd_trans_huge(pmd) is true.
5684  */
5685 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5686 		unsigned long addr, pmd_t pmd, union mc_target *target)
5687 {
5688 	struct page *page = NULL;
5689 	enum mc_target_type ret = MC_TARGET_NONE;
5690 
5691 	if (unlikely(is_swap_pmd(pmd))) {
5692 		VM_BUG_ON(thp_migration_supported() &&
5693 				  !is_pmd_migration_entry(pmd));
5694 		return ret;
5695 	}
5696 	page = pmd_page(pmd);
5697 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5698 	if (!(mc.flags & MOVE_ANON))
5699 		return ret;
5700 	if (page_memcg(page) == mc.from) {
5701 		ret = MC_TARGET_PAGE;
5702 		if (target) {
5703 			get_page(page);
5704 			target->page = page;
5705 		}
5706 	}
5707 	return ret;
5708 }
5709 #else
5710 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5711 		unsigned long addr, pmd_t pmd, union mc_target *target)
5712 {
5713 	return MC_TARGET_NONE;
5714 }
5715 #endif
5716 
5717 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5718 					unsigned long addr, unsigned long end,
5719 					struct mm_walk *walk)
5720 {
5721 	struct vm_area_struct *vma = walk->vma;
5722 	pte_t *pte;
5723 	spinlock_t *ptl;
5724 
5725 	ptl = pmd_trans_huge_lock(pmd, vma);
5726 	if (ptl) {
5727 		/*
5728 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5729 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5730 		 * this might change.
5731 		 */
5732 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5733 			mc.precharge += HPAGE_PMD_NR;
5734 		spin_unlock(ptl);
5735 		return 0;
5736 	}
5737 
5738 	if (pmd_trans_unstable(pmd))
5739 		return 0;
5740 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5741 	for (; addr != end; pte++, addr += PAGE_SIZE)
5742 		if (get_mctgt_type(vma, addr, *pte, NULL))
5743 			mc.precharge++;	/* increment precharge temporarily */
5744 	pte_unmap_unlock(pte - 1, ptl);
5745 	cond_resched();
5746 
5747 	return 0;
5748 }
5749 
5750 static const struct mm_walk_ops precharge_walk_ops = {
5751 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5752 };
5753 
5754 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5755 {
5756 	unsigned long precharge;
5757 
5758 	mmap_read_lock(mm);
5759 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5760 	mmap_read_unlock(mm);
5761 
5762 	precharge = mc.precharge;
5763 	mc.precharge = 0;
5764 
5765 	return precharge;
5766 }
5767 
5768 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5769 {
5770 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5771 
5772 	VM_BUG_ON(mc.moving_task);
5773 	mc.moving_task = current;
5774 	return mem_cgroup_do_precharge(precharge);
5775 }
5776 
5777 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5778 static void __mem_cgroup_clear_mc(void)
5779 {
5780 	struct mem_cgroup *from = mc.from;
5781 	struct mem_cgroup *to = mc.to;
5782 
5783 	/* we must uncharge all the leftover precharges from mc.to */
5784 	if (mc.precharge) {
5785 		cancel_charge(mc.to, mc.precharge);
5786 		mc.precharge = 0;
5787 	}
5788 	/*
5789 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5790 	 * we must uncharge here.
5791 	 */
5792 	if (mc.moved_charge) {
5793 		cancel_charge(mc.from, mc.moved_charge);
5794 		mc.moved_charge = 0;
5795 	}
5796 	/* we must fixup refcnts and charges */
5797 	if (mc.moved_swap) {
5798 		/* uncharge swap account from the old cgroup */
5799 		if (!mem_cgroup_is_root(mc.from))
5800 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5801 
5802 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5803 
5804 		/*
5805 		 * we charged both to->memory and to->memsw, so we
5806 		 * should uncharge to->memory.
5807 		 */
5808 		if (!mem_cgroup_is_root(mc.to))
5809 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5810 
5811 		mc.moved_swap = 0;
5812 	}
5813 	memcg_oom_recover(from);
5814 	memcg_oom_recover(to);
5815 	wake_up_all(&mc.waitq);
5816 }
5817 
5818 static void mem_cgroup_clear_mc(void)
5819 {
5820 	struct mm_struct *mm = mc.mm;
5821 
5822 	/*
5823 	 * we must clear moving_task before waking up waiters at the end of
5824 	 * task migration.
5825 	 */
5826 	mc.moving_task = NULL;
5827 	__mem_cgroup_clear_mc();
5828 	spin_lock(&mc.lock);
5829 	mc.from = NULL;
5830 	mc.to = NULL;
5831 	mc.mm = NULL;
5832 	spin_unlock(&mc.lock);
5833 
5834 	mmput(mm);
5835 }
5836 
5837 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5838 {
5839 	struct cgroup_subsys_state *css;
5840 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5841 	struct mem_cgroup *from;
5842 	struct task_struct *leader, *p;
5843 	struct mm_struct *mm;
5844 	unsigned long move_flags;
5845 	int ret = 0;
5846 
5847 	/* charge immigration isn't supported on the default hierarchy */
5848 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5849 		return 0;
5850 
5851 	/*
5852 	 * Multi-process migrations only happen on the default hierarchy
5853 	 * where charge immigration is not used.  Perform charge
5854 	 * immigration if @tset contains a leader and whine if there are
5855 	 * multiple.
5856 	 */
5857 	p = NULL;
5858 	cgroup_taskset_for_each_leader(leader, css, tset) {
5859 		WARN_ON_ONCE(p);
5860 		p = leader;
5861 		memcg = mem_cgroup_from_css(css);
5862 	}
5863 	if (!p)
5864 		return 0;
5865 
5866 	/*
5867 	 * We are now committed to this value whatever it is. Changes in this
5868 	 * tunable will only affect upcoming migrations, not the current one.
5869 	 * So we need to save it, and keep it going.
5870 	 */
5871 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5872 	if (!move_flags)
5873 		return 0;
5874 
5875 	from = mem_cgroup_from_task(p);
5876 
5877 	VM_BUG_ON(from == memcg);
5878 
5879 	mm = get_task_mm(p);
5880 	if (!mm)
5881 		return 0;
5882 	/* We move charges only when we move a owner of the mm */
5883 	if (mm->owner == p) {
5884 		VM_BUG_ON(mc.from);
5885 		VM_BUG_ON(mc.to);
5886 		VM_BUG_ON(mc.precharge);
5887 		VM_BUG_ON(mc.moved_charge);
5888 		VM_BUG_ON(mc.moved_swap);
5889 
5890 		spin_lock(&mc.lock);
5891 		mc.mm = mm;
5892 		mc.from = from;
5893 		mc.to = memcg;
5894 		mc.flags = move_flags;
5895 		spin_unlock(&mc.lock);
5896 		/* We set mc.moving_task later */
5897 
5898 		ret = mem_cgroup_precharge_mc(mm);
5899 		if (ret)
5900 			mem_cgroup_clear_mc();
5901 	} else {
5902 		mmput(mm);
5903 	}
5904 	return ret;
5905 }
5906 
5907 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5908 {
5909 	if (mc.to)
5910 		mem_cgroup_clear_mc();
5911 }
5912 
5913 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5914 				unsigned long addr, unsigned long end,
5915 				struct mm_walk *walk)
5916 {
5917 	int ret = 0;
5918 	struct vm_area_struct *vma = walk->vma;
5919 	pte_t *pte;
5920 	spinlock_t *ptl;
5921 	enum mc_target_type target_type;
5922 	union mc_target target;
5923 	struct page *page;
5924 
5925 	ptl = pmd_trans_huge_lock(pmd, vma);
5926 	if (ptl) {
5927 		if (mc.precharge < HPAGE_PMD_NR) {
5928 			spin_unlock(ptl);
5929 			return 0;
5930 		}
5931 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5932 		if (target_type == MC_TARGET_PAGE) {
5933 			page = target.page;
5934 			if (!isolate_lru_page(page)) {
5935 				if (!mem_cgroup_move_account(page, true,
5936 							     mc.from, mc.to)) {
5937 					mc.precharge -= HPAGE_PMD_NR;
5938 					mc.moved_charge += HPAGE_PMD_NR;
5939 				}
5940 				putback_lru_page(page);
5941 			}
5942 			put_page(page);
5943 		} else if (target_type == MC_TARGET_DEVICE) {
5944 			page = target.page;
5945 			if (!mem_cgroup_move_account(page, true,
5946 						     mc.from, mc.to)) {
5947 				mc.precharge -= HPAGE_PMD_NR;
5948 				mc.moved_charge += HPAGE_PMD_NR;
5949 			}
5950 			put_page(page);
5951 		}
5952 		spin_unlock(ptl);
5953 		return 0;
5954 	}
5955 
5956 	if (pmd_trans_unstable(pmd))
5957 		return 0;
5958 retry:
5959 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5960 	for (; addr != end; addr += PAGE_SIZE) {
5961 		pte_t ptent = *(pte++);
5962 		bool device = false;
5963 		swp_entry_t ent;
5964 
5965 		if (!mc.precharge)
5966 			break;
5967 
5968 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5969 		case MC_TARGET_DEVICE:
5970 			device = true;
5971 			fallthrough;
5972 		case MC_TARGET_PAGE:
5973 			page = target.page;
5974 			/*
5975 			 * We can have a part of the split pmd here. Moving it
5976 			 * can be done but it would be too convoluted so simply
5977 			 * ignore such a partial THP and keep it in original
5978 			 * memcg. There should be somebody mapping the head.
5979 			 */
5980 			if (PageTransCompound(page))
5981 				goto put;
5982 			if (!device && isolate_lru_page(page))
5983 				goto put;
5984 			if (!mem_cgroup_move_account(page, false,
5985 						mc.from, mc.to)) {
5986 				mc.precharge--;
5987 				/* we uncharge from mc.from later. */
5988 				mc.moved_charge++;
5989 			}
5990 			if (!device)
5991 				putback_lru_page(page);
5992 put:			/* get_mctgt_type() gets the page */
5993 			put_page(page);
5994 			break;
5995 		case MC_TARGET_SWAP:
5996 			ent = target.ent;
5997 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5998 				mc.precharge--;
5999 				mem_cgroup_id_get_many(mc.to, 1);
6000 				/* we fixup other refcnts and charges later. */
6001 				mc.moved_swap++;
6002 			}
6003 			break;
6004 		default:
6005 			break;
6006 		}
6007 	}
6008 	pte_unmap_unlock(pte - 1, ptl);
6009 	cond_resched();
6010 
6011 	if (addr != end) {
6012 		/*
6013 		 * We have consumed all precharges we got in can_attach().
6014 		 * We try charge one by one, but don't do any additional
6015 		 * charges to mc.to if we have failed in charge once in attach()
6016 		 * phase.
6017 		 */
6018 		ret = mem_cgroup_do_precharge(1);
6019 		if (!ret)
6020 			goto retry;
6021 	}
6022 
6023 	return ret;
6024 }
6025 
6026 static const struct mm_walk_ops charge_walk_ops = {
6027 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6028 };
6029 
6030 static void mem_cgroup_move_charge(void)
6031 {
6032 	lru_add_drain_all();
6033 	/*
6034 	 * Signal lock_page_memcg() to take the memcg's move_lock
6035 	 * while we're moving its pages to another memcg. Then wait
6036 	 * for already started RCU-only updates to finish.
6037 	 */
6038 	atomic_inc(&mc.from->moving_account);
6039 	synchronize_rcu();
6040 retry:
6041 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6042 		/*
6043 		 * Someone who are holding the mmap_lock might be waiting in
6044 		 * waitq. So we cancel all extra charges, wake up all waiters,
6045 		 * and retry. Because we cancel precharges, we might not be able
6046 		 * to move enough charges, but moving charge is a best-effort
6047 		 * feature anyway, so it wouldn't be a big problem.
6048 		 */
6049 		__mem_cgroup_clear_mc();
6050 		cond_resched();
6051 		goto retry;
6052 	}
6053 	/*
6054 	 * When we have consumed all precharges and failed in doing
6055 	 * additional charge, the page walk just aborts.
6056 	 */
6057 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6058 			NULL);
6059 
6060 	mmap_read_unlock(mc.mm);
6061 	atomic_dec(&mc.from->moving_account);
6062 }
6063 
6064 static void mem_cgroup_move_task(void)
6065 {
6066 	if (mc.to) {
6067 		mem_cgroup_move_charge();
6068 		mem_cgroup_clear_mc();
6069 	}
6070 }
6071 #else	/* !CONFIG_MMU */
6072 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6073 {
6074 	return 0;
6075 }
6076 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6077 {
6078 }
6079 static void mem_cgroup_move_task(void)
6080 {
6081 }
6082 #endif
6083 
6084 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6085 {
6086 	if (value == PAGE_COUNTER_MAX)
6087 		seq_puts(m, "max\n");
6088 	else
6089 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6090 
6091 	return 0;
6092 }
6093 
6094 static u64 memory_current_read(struct cgroup_subsys_state *css,
6095 			       struct cftype *cft)
6096 {
6097 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6098 
6099 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6100 }
6101 
6102 static int memory_min_show(struct seq_file *m, void *v)
6103 {
6104 	return seq_puts_memcg_tunable(m,
6105 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6106 }
6107 
6108 static ssize_t memory_min_write(struct kernfs_open_file *of,
6109 				char *buf, size_t nbytes, loff_t off)
6110 {
6111 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6112 	unsigned long min;
6113 	int err;
6114 
6115 	buf = strstrip(buf);
6116 	err = page_counter_memparse(buf, "max", &min);
6117 	if (err)
6118 		return err;
6119 
6120 	page_counter_set_min(&memcg->memory, min);
6121 
6122 	return nbytes;
6123 }
6124 
6125 static int memory_low_show(struct seq_file *m, void *v)
6126 {
6127 	return seq_puts_memcg_tunable(m,
6128 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6129 }
6130 
6131 static ssize_t memory_low_write(struct kernfs_open_file *of,
6132 				char *buf, size_t nbytes, loff_t off)
6133 {
6134 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6135 	unsigned long low;
6136 	int err;
6137 
6138 	buf = strstrip(buf);
6139 	err = page_counter_memparse(buf, "max", &low);
6140 	if (err)
6141 		return err;
6142 
6143 	page_counter_set_low(&memcg->memory, low);
6144 
6145 	return nbytes;
6146 }
6147 
6148 static int memory_high_show(struct seq_file *m, void *v)
6149 {
6150 	return seq_puts_memcg_tunable(m,
6151 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6152 }
6153 
6154 static ssize_t memory_high_write(struct kernfs_open_file *of,
6155 				 char *buf, size_t nbytes, loff_t off)
6156 {
6157 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6158 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6159 	bool drained = false;
6160 	unsigned long high;
6161 	int err;
6162 
6163 	buf = strstrip(buf);
6164 	err = page_counter_memparse(buf, "max", &high);
6165 	if (err)
6166 		return err;
6167 
6168 	page_counter_set_high(&memcg->memory, high);
6169 
6170 	for (;;) {
6171 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6172 		unsigned long reclaimed;
6173 
6174 		if (nr_pages <= high)
6175 			break;
6176 
6177 		if (signal_pending(current))
6178 			break;
6179 
6180 		if (!drained) {
6181 			drain_all_stock(memcg);
6182 			drained = true;
6183 			continue;
6184 		}
6185 
6186 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6187 							 GFP_KERNEL, true);
6188 
6189 		if (!reclaimed && !nr_retries--)
6190 			break;
6191 	}
6192 
6193 	memcg_wb_domain_size_changed(memcg);
6194 	return nbytes;
6195 }
6196 
6197 static int memory_max_show(struct seq_file *m, void *v)
6198 {
6199 	return seq_puts_memcg_tunable(m,
6200 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6201 }
6202 
6203 static ssize_t memory_max_write(struct kernfs_open_file *of,
6204 				char *buf, size_t nbytes, loff_t off)
6205 {
6206 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6207 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6208 	bool drained = false;
6209 	unsigned long max;
6210 	int err;
6211 
6212 	buf = strstrip(buf);
6213 	err = page_counter_memparse(buf, "max", &max);
6214 	if (err)
6215 		return err;
6216 
6217 	xchg(&memcg->memory.max, max);
6218 
6219 	for (;;) {
6220 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6221 
6222 		if (nr_pages <= max)
6223 			break;
6224 
6225 		if (signal_pending(current))
6226 			break;
6227 
6228 		if (!drained) {
6229 			drain_all_stock(memcg);
6230 			drained = true;
6231 			continue;
6232 		}
6233 
6234 		if (nr_reclaims) {
6235 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6236 							  GFP_KERNEL, true))
6237 				nr_reclaims--;
6238 			continue;
6239 		}
6240 
6241 		memcg_memory_event(memcg, MEMCG_OOM);
6242 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6243 			break;
6244 	}
6245 
6246 	memcg_wb_domain_size_changed(memcg);
6247 	return nbytes;
6248 }
6249 
6250 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6251 {
6252 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6253 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6254 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6255 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6256 	seq_printf(m, "oom_kill %lu\n",
6257 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6258 	seq_printf(m, "oom_group_kill %lu\n",
6259 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6260 }
6261 
6262 static int memory_events_show(struct seq_file *m, void *v)
6263 {
6264 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6265 
6266 	__memory_events_show(m, memcg->memory_events);
6267 	return 0;
6268 }
6269 
6270 static int memory_events_local_show(struct seq_file *m, void *v)
6271 {
6272 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6273 
6274 	__memory_events_show(m, memcg->memory_events_local);
6275 	return 0;
6276 }
6277 
6278 static int memory_stat_show(struct seq_file *m, void *v)
6279 {
6280 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6281 	char *buf;
6282 
6283 	buf = memory_stat_format(memcg);
6284 	if (!buf)
6285 		return -ENOMEM;
6286 	seq_puts(m, buf);
6287 	kfree(buf);
6288 	return 0;
6289 }
6290 
6291 #ifdef CONFIG_NUMA
6292 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6293 						     int item)
6294 {
6295 	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6296 }
6297 
6298 static int memory_numa_stat_show(struct seq_file *m, void *v)
6299 {
6300 	int i;
6301 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6302 
6303 	mem_cgroup_flush_stats();
6304 
6305 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6306 		int nid;
6307 
6308 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6309 			continue;
6310 
6311 		seq_printf(m, "%s", memory_stats[i].name);
6312 		for_each_node_state(nid, N_MEMORY) {
6313 			u64 size;
6314 			struct lruvec *lruvec;
6315 
6316 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6317 			size = lruvec_page_state_output(lruvec,
6318 							memory_stats[i].idx);
6319 			seq_printf(m, " N%d=%llu", nid, size);
6320 		}
6321 		seq_putc(m, '\n');
6322 	}
6323 
6324 	return 0;
6325 }
6326 #endif
6327 
6328 static int memory_oom_group_show(struct seq_file *m, void *v)
6329 {
6330 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6331 
6332 	seq_printf(m, "%d\n", memcg->oom_group);
6333 
6334 	return 0;
6335 }
6336 
6337 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6338 				      char *buf, size_t nbytes, loff_t off)
6339 {
6340 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6341 	int ret, oom_group;
6342 
6343 	buf = strstrip(buf);
6344 	if (!buf)
6345 		return -EINVAL;
6346 
6347 	ret = kstrtoint(buf, 0, &oom_group);
6348 	if (ret)
6349 		return ret;
6350 
6351 	if (oom_group != 0 && oom_group != 1)
6352 		return -EINVAL;
6353 
6354 	memcg->oom_group = oom_group;
6355 
6356 	return nbytes;
6357 }
6358 
6359 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6360 			      size_t nbytes, loff_t off)
6361 {
6362 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6363 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6364 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6365 	int err;
6366 
6367 	buf = strstrip(buf);
6368 	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6369 	if (err)
6370 		return err;
6371 
6372 	while (nr_reclaimed < nr_to_reclaim) {
6373 		unsigned long reclaimed;
6374 
6375 		if (signal_pending(current))
6376 			return -EINTR;
6377 
6378 		/*
6379 		 * This is the final attempt, drain percpu lru caches in the
6380 		 * hope of introducing more evictable pages for
6381 		 * try_to_free_mem_cgroup_pages().
6382 		 */
6383 		if (!nr_retries)
6384 			lru_add_drain_all();
6385 
6386 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6387 						nr_to_reclaim - nr_reclaimed,
6388 						GFP_KERNEL, true);
6389 
6390 		if (!reclaimed && !nr_retries--)
6391 			return -EAGAIN;
6392 
6393 		nr_reclaimed += reclaimed;
6394 	}
6395 
6396 	return nbytes;
6397 }
6398 
6399 static struct cftype memory_files[] = {
6400 	{
6401 		.name = "current",
6402 		.flags = CFTYPE_NOT_ON_ROOT,
6403 		.read_u64 = memory_current_read,
6404 	},
6405 	{
6406 		.name = "min",
6407 		.flags = CFTYPE_NOT_ON_ROOT,
6408 		.seq_show = memory_min_show,
6409 		.write = memory_min_write,
6410 	},
6411 	{
6412 		.name = "low",
6413 		.flags = CFTYPE_NOT_ON_ROOT,
6414 		.seq_show = memory_low_show,
6415 		.write = memory_low_write,
6416 	},
6417 	{
6418 		.name = "high",
6419 		.flags = CFTYPE_NOT_ON_ROOT,
6420 		.seq_show = memory_high_show,
6421 		.write = memory_high_write,
6422 	},
6423 	{
6424 		.name = "max",
6425 		.flags = CFTYPE_NOT_ON_ROOT,
6426 		.seq_show = memory_max_show,
6427 		.write = memory_max_write,
6428 	},
6429 	{
6430 		.name = "events",
6431 		.flags = CFTYPE_NOT_ON_ROOT,
6432 		.file_offset = offsetof(struct mem_cgroup, events_file),
6433 		.seq_show = memory_events_show,
6434 	},
6435 	{
6436 		.name = "events.local",
6437 		.flags = CFTYPE_NOT_ON_ROOT,
6438 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6439 		.seq_show = memory_events_local_show,
6440 	},
6441 	{
6442 		.name = "stat",
6443 		.seq_show = memory_stat_show,
6444 	},
6445 #ifdef CONFIG_NUMA
6446 	{
6447 		.name = "numa_stat",
6448 		.seq_show = memory_numa_stat_show,
6449 	},
6450 #endif
6451 	{
6452 		.name = "oom.group",
6453 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6454 		.seq_show = memory_oom_group_show,
6455 		.write = memory_oom_group_write,
6456 	},
6457 	{
6458 		.name = "reclaim",
6459 		.flags = CFTYPE_NS_DELEGATABLE,
6460 		.write = memory_reclaim,
6461 	},
6462 	{ }	/* terminate */
6463 };
6464 
6465 struct cgroup_subsys memory_cgrp_subsys = {
6466 	.css_alloc = mem_cgroup_css_alloc,
6467 	.css_online = mem_cgroup_css_online,
6468 	.css_offline = mem_cgroup_css_offline,
6469 	.css_released = mem_cgroup_css_released,
6470 	.css_free = mem_cgroup_css_free,
6471 	.css_reset = mem_cgroup_css_reset,
6472 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6473 	.can_attach = mem_cgroup_can_attach,
6474 	.cancel_attach = mem_cgroup_cancel_attach,
6475 	.post_attach = mem_cgroup_move_task,
6476 	.dfl_cftypes = memory_files,
6477 	.legacy_cftypes = mem_cgroup_legacy_files,
6478 	.early_init = 0,
6479 };
6480 
6481 /*
6482  * This function calculates an individual cgroup's effective
6483  * protection which is derived from its own memory.min/low, its
6484  * parent's and siblings' settings, as well as the actual memory
6485  * distribution in the tree.
6486  *
6487  * The following rules apply to the effective protection values:
6488  *
6489  * 1. At the first level of reclaim, effective protection is equal to
6490  *    the declared protection in memory.min and memory.low.
6491  *
6492  * 2. To enable safe delegation of the protection configuration, at
6493  *    subsequent levels the effective protection is capped to the
6494  *    parent's effective protection.
6495  *
6496  * 3. To make complex and dynamic subtrees easier to configure, the
6497  *    user is allowed to overcommit the declared protection at a given
6498  *    level. If that is the case, the parent's effective protection is
6499  *    distributed to the children in proportion to how much protection
6500  *    they have declared and how much of it they are utilizing.
6501  *
6502  *    This makes distribution proportional, but also work-conserving:
6503  *    if one cgroup claims much more protection than it uses memory,
6504  *    the unused remainder is available to its siblings.
6505  *
6506  * 4. Conversely, when the declared protection is undercommitted at a
6507  *    given level, the distribution of the larger parental protection
6508  *    budget is NOT proportional. A cgroup's protection from a sibling
6509  *    is capped to its own memory.min/low setting.
6510  *
6511  * 5. However, to allow protecting recursive subtrees from each other
6512  *    without having to declare each individual cgroup's fixed share
6513  *    of the ancestor's claim to protection, any unutilized -
6514  *    "floating" - protection from up the tree is distributed in
6515  *    proportion to each cgroup's *usage*. This makes the protection
6516  *    neutral wrt sibling cgroups and lets them compete freely over
6517  *    the shared parental protection budget, but it protects the
6518  *    subtree as a whole from neighboring subtrees.
6519  *
6520  * Note that 4. and 5. are not in conflict: 4. is about protecting
6521  * against immediate siblings whereas 5. is about protecting against
6522  * neighboring subtrees.
6523  */
6524 static unsigned long effective_protection(unsigned long usage,
6525 					  unsigned long parent_usage,
6526 					  unsigned long setting,
6527 					  unsigned long parent_effective,
6528 					  unsigned long siblings_protected)
6529 {
6530 	unsigned long protected;
6531 	unsigned long ep;
6532 
6533 	protected = min(usage, setting);
6534 	/*
6535 	 * If all cgroups at this level combined claim and use more
6536 	 * protection then what the parent affords them, distribute
6537 	 * shares in proportion to utilization.
6538 	 *
6539 	 * We are using actual utilization rather than the statically
6540 	 * claimed protection in order to be work-conserving: claimed
6541 	 * but unused protection is available to siblings that would
6542 	 * otherwise get a smaller chunk than what they claimed.
6543 	 */
6544 	if (siblings_protected > parent_effective)
6545 		return protected * parent_effective / siblings_protected;
6546 
6547 	/*
6548 	 * Ok, utilized protection of all children is within what the
6549 	 * parent affords them, so we know whatever this child claims
6550 	 * and utilizes is effectively protected.
6551 	 *
6552 	 * If there is unprotected usage beyond this value, reclaim
6553 	 * will apply pressure in proportion to that amount.
6554 	 *
6555 	 * If there is unutilized protection, the cgroup will be fully
6556 	 * shielded from reclaim, but we do return a smaller value for
6557 	 * protection than what the group could enjoy in theory. This
6558 	 * is okay. With the overcommit distribution above, effective
6559 	 * protection is always dependent on how memory is actually
6560 	 * consumed among the siblings anyway.
6561 	 */
6562 	ep = protected;
6563 
6564 	/*
6565 	 * If the children aren't claiming (all of) the protection
6566 	 * afforded to them by the parent, distribute the remainder in
6567 	 * proportion to the (unprotected) memory of each cgroup. That
6568 	 * way, cgroups that aren't explicitly prioritized wrt each
6569 	 * other compete freely over the allowance, but they are
6570 	 * collectively protected from neighboring trees.
6571 	 *
6572 	 * We're using unprotected memory for the weight so that if
6573 	 * some cgroups DO claim explicit protection, we don't protect
6574 	 * the same bytes twice.
6575 	 *
6576 	 * Check both usage and parent_usage against the respective
6577 	 * protected values. One should imply the other, but they
6578 	 * aren't read atomically - make sure the division is sane.
6579 	 */
6580 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6581 		return ep;
6582 	if (parent_effective > siblings_protected &&
6583 	    parent_usage > siblings_protected &&
6584 	    usage > protected) {
6585 		unsigned long unclaimed;
6586 
6587 		unclaimed = parent_effective - siblings_protected;
6588 		unclaimed *= usage - protected;
6589 		unclaimed /= parent_usage - siblings_protected;
6590 
6591 		ep += unclaimed;
6592 	}
6593 
6594 	return ep;
6595 }
6596 
6597 /**
6598  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6599  * @root: the top ancestor of the sub-tree being checked
6600  * @memcg: the memory cgroup to check
6601  *
6602  * WARNING: This function is not stateless! It can only be used as part
6603  *          of a top-down tree iteration, not for isolated queries.
6604  */
6605 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6606 				     struct mem_cgroup *memcg)
6607 {
6608 	unsigned long usage, parent_usage;
6609 	struct mem_cgroup *parent;
6610 
6611 	if (mem_cgroup_disabled())
6612 		return;
6613 
6614 	if (!root)
6615 		root = root_mem_cgroup;
6616 
6617 	/*
6618 	 * Effective values of the reclaim targets are ignored so they
6619 	 * can be stale. Have a look at mem_cgroup_protection for more
6620 	 * details.
6621 	 * TODO: calculation should be more robust so that we do not need
6622 	 * that special casing.
6623 	 */
6624 	if (memcg == root)
6625 		return;
6626 
6627 	usage = page_counter_read(&memcg->memory);
6628 	if (!usage)
6629 		return;
6630 
6631 	parent = parent_mem_cgroup(memcg);
6632 
6633 	if (parent == root) {
6634 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6635 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6636 		return;
6637 	}
6638 
6639 	parent_usage = page_counter_read(&parent->memory);
6640 
6641 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6642 			READ_ONCE(memcg->memory.min),
6643 			READ_ONCE(parent->memory.emin),
6644 			atomic_long_read(&parent->memory.children_min_usage)));
6645 
6646 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6647 			READ_ONCE(memcg->memory.low),
6648 			READ_ONCE(parent->memory.elow),
6649 			atomic_long_read(&parent->memory.children_low_usage)));
6650 }
6651 
6652 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6653 			gfp_t gfp)
6654 {
6655 	long nr_pages = folio_nr_pages(folio);
6656 	int ret;
6657 
6658 	ret = try_charge(memcg, gfp, nr_pages);
6659 	if (ret)
6660 		goto out;
6661 
6662 	css_get(&memcg->css);
6663 	commit_charge(folio, memcg);
6664 
6665 	local_irq_disable();
6666 	mem_cgroup_charge_statistics(memcg, nr_pages);
6667 	memcg_check_events(memcg, folio_nid(folio));
6668 	local_irq_enable();
6669 out:
6670 	return ret;
6671 }
6672 
6673 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6674 {
6675 	struct mem_cgroup *memcg;
6676 	int ret;
6677 
6678 	memcg = get_mem_cgroup_from_mm(mm);
6679 	ret = charge_memcg(folio, memcg, gfp);
6680 	css_put(&memcg->css);
6681 
6682 	return ret;
6683 }
6684 
6685 /**
6686  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6687  * @page: page to charge
6688  * @mm: mm context of the victim
6689  * @gfp: reclaim mode
6690  * @entry: swap entry for which the page is allocated
6691  *
6692  * This function charges a page allocated for swapin. Please call this before
6693  * adding the page to the swapcache.
6694  *
6695  * Returns 0 on success. Otherwise, an error code is returned.
6696  */
6697 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6698 				  gfp_t gfp, swp_entry_t entry)
6699 {
6700 	struct folio *folio = page_folio(page);
6701 	struct mem_cgroup *memcg;
6702 	unsigned short id;
6703 	int ret;
6704 
6705 	if (mem_cgroup_disabled())
6706 		return 0;
6707 
6708 	id = lookup_swap_cgroup_id(entry);
6709 	rcu_read_lock();
6710 	memcg = mem_cgroup_from_id(id);
6711 	if (!memcg || !css_tryget_online(&memcg->css))
6712 		memcg = get_mem_cgroup_from_mm(mm);
6713 	rcu_read_unlock();
6714 
6715 	ret = charge_memcg(folio, memcg, gfp);
6716 
6717 	css_put(&memcg->css);
6718 	return ret;
6719 }
6720 
6721 /*
6722  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6723  * @entry: swap entry for which the page is charged
6724  *
6725  * Call this function after successfully adding the charged page to swapcache.
6726  *
6727  * Note: This function assumes the page for which swap slot is being uncharged
6728  * is order 0 page.
6729  */
6730 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6731 {
6732 	/*
6733 	 * Cgroup1's unified memory+swap counter has been charged with the
6734 	 * new swapcache page, finish the transfer by uncharging the swap
6735 	 * slot. The swap slot would also get uncharged when it dies, but
6736 	 * it can stick around indefinitely and we'd count the page twice
6737 	 * the entire time.
6738 	 *
6739 	 * Cgroup2 has separate resource counters for memory and swap,
6740 	 * so this is a non-issue here. Memory and swap charge lifetimes
6741 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6742 	 * page to memory here, and uncharge swap when the slot is freed.
6743 	 */
6744 	if (!mem_cgroup_disabled() && do_memsw_account()) {
6745 		/*
6746 		 * The swap entry might not get freed for a long time,
6747 		 * let's not wait for it.  The page already received a
6748 		 * memory+swap charge, drop the swap entry duplicate.
6749 		 */
6750 		mem_cgroup_uncharge_swap(entry, 1);
6751 	}
6752 }
6753 
6754 struct uncharge_gather {
6755 	struct mem_cgroup *memcg;
6756 	unsigned long nr_memory;
6757 	unsigned long pgpgout;
6758 	unsigned long nr_kmem;
6759 	int nid;
6760 };
6761 
6762 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6763 {
6764 	memset(ug, 0, sizeof(*ug));
6765 }
6766 
6767 static void uncharge_batch(const struct uncharge_gather *ug)
6768 {
6769 	unsigned long flags;
6770 
6771 	if (ug->nr_memory) {
6772 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6773 		if (do_memsw_account())
6774 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6775 		if (ug->nr_kmem)
6776 			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
6777 		memcg_oom_recover(ug->memcg);
6778 	}
6779 
6780 	local_irq_save(flags);
6781 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6782 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6783 	memcg_check_events(ug->memcg, ug->nid);
6784 	local_irq_restore(flags);
6785 
6786 	/* drop reference from uncharge_folio */
6787 	css_put(&ug->memcg->css);
6788 }
6789 
6790 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
6791 {
6792 	long nr_pages;
6793 	struct mem_cgroup *memcg;
6794 	struct obj_cgroup *objcg;
6795 
6796 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
6797 
6798 	/*
6799 	 * Nobody should be changing or seriously looking at
6800 	 * folio memcg or objcg at this point, we have fully
6801 	 * exclusive access to the folio.
6802 	 */
6803 	if (folio_memcg_kmem(folio)) {
6804 		objcg = __folio_objcg(folio);
6805 		/*
6806 		 * This get matches the put at the end of the function and
6807 		 * kmem pages do not hold memcg references anymore.
6808 		 */
6809 		memcg = get_mem_cgroup_from_objcg(objcg);
6810 	} else {
6811 		memcg = __folio_memcg(folio);
6812 	}
6813 
6814 	if (!memcg)
6815 		return;
6816 
6817 	if (ug->memcg != memcg) {
6818 		if (ug->memcg) {
6819 			uncharge_batch(ug);
6820 			uncharge_gather_clear(ug);
6821 		}
6822 		ug->memcg = memcg;
6823 		ug->nid = folio_nid(folio);
6824 
6825 		/* pairs with css_put in uncharge_batch */
6826 		css_get(&memcg->css);
6827 	}
6828 
6829 	nr_pages = folio_nr_pages(folio);
6830 
6831 	if (folio_memcg_kmem(folio)) {
6832 		ug->nr_memory += nr_pages;
6833 		ug->nr_kmem += nr_pages;
6834 
6835 		folio->memcg_data = 0;
6836 		obj_cgroup_put(objcg);
6837 	} else {
6838 		/* LRU pages aren't accounted at the root level */
6839 		if (!mem_cgroup_is_root(memcg))
6840 			ug->nr_memory += nr_pages;
6841 		ug->pgpgout++;
6842 
6843 		folio->memcg_data = 0;
6844 	}
6845 
6846 	css_put(&memcg->css);
6847 }
6848 
6849 void __mem_cgroup_uncharge(struct folio *folio)
6850 {
6851 	struct uncharge_gather ug;
6852 
6853 	/* Don't touch folio->lru of any random page, pre-check: */
6854 	if (!folio_memcg(folio))
6855 		return;
6856 
6857 	uncharge_gather_clear(&ug);
6858 	uncharge_folio(folio, &ug);
6859 	uncharge_batch(&ug);
6860 }
6861 
6862 /**
6863  * __mem_cgroup_uncharge_list - uncharge a list of page
6864  * @page_list: list of pages to uncharge
6865  *
6866  * Uncharge a list of pages previously charged with
6867  * __mem_cgroup_charge().
6868  */
6869 void __mem_cgroup_uncharge_list(struct list_head *page_list)
6870 {
6871 	struct uncharge_gather ug;
6872 	struct folio *folio;
6873 
6874 	uncharge_gather_clear(&ug);
6875 	list_for_each_entry(folio, page_list, lru)
6876 		uncharge_folio(folio, &ug);
6877 	if (ug.memcg)
6878 		uncharge_batch(&ug);
6879 }
6880 
6881 /**
6882  * mem_cgroup_migrate - Charge a folio's replacement.
6883  * @old: Currently circulating folio.
6884  * @new: Replacement folio.
6885  *
6886  * Charge @new as a replacement folio for @old. @old will
6887  * be uncharged upon free.
6888  *
6889  * Both folios must be locked, @new->mapping must be set up.
6890  */
6891 void mem_cgroup_migrate(struct folio *old, struct folio *new)
6892 {
6893 	struct mem_cgroup *memcg;
6894 	long nr_pages = folio_nr_pages(new);
6895 	unsigned long flags;
6896 
6897 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
6898 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
6899 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
6900 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
6901 
6902 	if (mem_cgroup_disabled())
6903 		return;
6904 
6905 	/* Page cache replacement: new folio already charged? */
6906 	if (folio_memcg(new))
6907 		return;
6908 
6909 	memcg = folio_memcg(old);
6910 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
6911 	if (!memcg)
6912 		return;
6913 
6914 	/* Force-charge the new page. The old one will be freed soon */
6915 	if (!mem_cgroup_is_root(memcg)) {
6916 		page_counter_charge(&memcg->memory, nr_pages);
6917 		if (do_memsw_account())
6918 			page_counter_charge(&memcg->memsw, nr_pages);
6919 	}
6920 
6921 	css_get(&memcg->css);
6922 	commit_charge(new, memcg);
6923 
6924 	local_irq_save(flags);
6925 	mem_cgroup_charge_statistics(memcg, nr_pages);
6926 	memcg_check_events(memcg, folio_nid(new));
6927 	local_irq_restore(flags);
6928 }
6929 
6930 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6931 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6932 
6933 void mem_cgroup_sk_alloc(struct sock *sk)
6934 {
6935 	struct mem_cgroup *memcg;
6936 
6937 	if (!mem_cgroup_sockets_enabled)
6938 		return;
6939 
6940 	/* Do not associate the sock with unrelated interrupted task's memcg. */
6941 	if (!in_task())
6942 		return;
6943 
6944 	rcu_read_lock();
6945 	memcg = mem_cgroup_from_task(current);
6946 	if (memcg == root_mem_cgroup)
6947 		goto out;
6948 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6949 		goto out;
6950 	if (css_tryget(&memcg->css))
6951 		sk->sk_memcg = memcg;
6952 out:
6953 	rcu_read_unlock();
6954 }
6955 
6956 void mem_cgroup_sk_free(struct sock *sk)
6957 {
6958 	if (sk->sk_memcg)
6959 		css_put(&sk->sk_memcg->css);
6960 }
6961 
6962 /**
6963  * mem_cgroup_charge_skmem - charge socket memory
6964  * @memcg: memcg to charge
6965  * @nr_pages: number of pages to charge
6966  * @gfp_mask: reclaim mode
6967  *
6968  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6969  * @memcg's configured limit, %false if it doesn't.
6970  */
6971 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
6972 			     gfp_t gfp_mask)
6973 {
6974 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6975 		struct page_counter *fail;
6976 
6977 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6978 			memcg->tcpmem_pressure = 0;
6979 			return true;
6980 		}
6981 		memcg->tcpmem_pressure = 1;
6982 		if (gfp_mask & __GFP_NOFAIL) {
6983 			page_counter_charge(&memcg->tcpmem, nr_pages);
6984 			return true;
6985 		}
6986 		return false;
6987 	}
6988 
6989 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
6990 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6991 		return true;
6992 	}
6993 
6994 	return false;
6995 }
6996 
6997 /**
6998  * mem_cgroup_uncharge_skmem - uncharge socket memory
6999  * @memcg: memcg to uncharge
7000  * @nr_pages: number of pages to uncharge
7001  */
7002 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7003 {
7004 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7005 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7006 		return;
7007 	}
7008 
7009 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7010 
7011 	refill_stock(memcg, nr_pages);
7012 }
7013 
7014 static int __init cgroup_memory(char *s)
7015 {
7016 	char *token;
7017 
7018 	while ((token = strsep(&s, ",")) != NULL) {
7019 		if (!*token)
7020 			continue;
7021 		if (!strcmp(token, "nosocket"))
7022 			cgroup_memory_nosocket = true;
7023 		if (!strcmp(token, "nokmem"))
7024 			cgroup_memory_nokmem = true;
7025 	}
7026 	return 1;
7027 }
7028 __setup("cgroup.memory=", cgroup_memory);
7029 
7030 /*
7031  * subsys_initcall() for memory controller.
7032  *
7033  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7034  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7035  * basically everything that doesn't depend on a specific mem_cgroup structure
7036  * should be initialized from here.
7037  */
7038 static int __init mem_cgroup_init(void)
7039 {
7040 	int cpu, node;
7041 
7042 	/*
7043 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7044 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7045 	 * to work fine, we should make sure that the overfill threshold can't
7046 	 * exceed S32_MAX / PAGE_SIZE.
7047 	 */
7048 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7049 
7050 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7051 				  memcg_hotplug_cpu_dead);
7052 
7053 	for_each_possible_cpu(cpu)
7054 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7055 			  drain_local_stock);
7056 
7057 	for_each_node(node) {
7058 		struct mem_cgroup_tree_per_node *rtpn;
7059 
7060 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7061 				    node_online(node) ? node : NUMA_NO_NODE);
7062 
7063 		rtpn->rb_root = RB_ROOT;
7064 		rtpn->rb_rightmost = NULL;
7065 		spin_lock_init(&rtpn->lock);
7066 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7067 	}
7068 
7069 	return 0;
7070 }
7071 subsys_initcall(mem_cgroup_init);
7072 
7073 #ifdef CONFIG_MEMCG_SWAP
7074 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7075 {
7076 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7077 		/*
7078 		 * The root cgroup cannot be destroyed, so it's refcount must
7079 		 * always be >= 1.
7080 		 */
7081 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7082 			VM_BUG_ON(1);
7083 			break;
7084 		}
7085 		memcg = parent_mem_cgroup(memcg);
7086 		if (!memcg)
7087 			memcg = root_mem_cgroup;
7088 	}
7089 	return memcg;
7090 }
7091 
7092 /**
7093  * mem_cgroup_swapout - transfer a memsw charge to swap
7094  * @folio: folio whose memsw charge to transfer
7095  * @entry: swap entry to move the charge to
7096  *
7097  * Transfer the memsw charge of @folio to @entry.
7098  */
7099 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7100 {
7101 	struct mem_cgroup *memcg, *swap_memcg;
7102 	unsigned int nr_entries;
7103 	unsigned short oldid;
7104 
7105 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7106 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7107 
7108 	if (mem_cgroup_disabled())
7109 		return;
7110 
7111 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7112 		return;
7113 
7114 	memcg = folio_memcg(folio);
7115 
7116 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7117 	if (!memcg)
7118 		return;
7119 
7120 	/*
7121 	 * In case the memcg owning these pages has been offlined and doesn't
7122 	 * have an ID allocated to it anymore, charge the closest online
7123 	 * ancestor for the swap instead and transfer the memory+swap charge.
7124 	 */
7125 	swap_memcg = mem_cgroup_id_get_online(memcg);
7126 	nr_entries = folio_nr_pages(folio);
7127 	/* Get references for the tail pages, too */
7128 	if (nr_entries > 1)
7129 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7130 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7131 				   nr_entries);
7132 	VM_BUG_ON_FOLIO(oldid, folio);
7133 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7134 
7135 	folio->memcg_data = 0;
7136 
7137 	if (!mem_cgroup_is_root(memcg))
7138 		page_counter_uncharge(&memcg->memory, nr_entries);
7139 
7140 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7141 		if (!mem_cgroup_is_root(swap_memcg))
7142 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7143 		page_counter_uncharge(&memcg->memsw, nr_entries);
7144 	}
7145 
7146 	/*
7147 	 * Interrupts should be disabled here because the caller holds the
7148 	 * i_pages lock which is taken with interrupts-off. It is
7149 	 * important here to have the interrupts disabled because it is the
7150 	 * only synchronisation we have for updating the per-CPU variables.
7151 	 */
7152 	memcg_stats_lock();
7153 	mem_cgroup_charge_statistics(memcg, -nr_entries);
7154 	memcg_stats_unlock();
7155 	memcg_check_events(memcg, folio_nid(folio));
7156 
7157 	css_put(&memcg->css);
7158 }
7159 
7160 /**
7161  * __mem_cgroup_try_charge_swap - try charging swap space for a page
7162  * @page: page being added to swap
7163  * @entry: swap entry to charge
7164  *
7165  * Try to charge @page's memcg for the swap space at @entry.
7166  *
7167  * Returns 0 on success, -ENOMEM on failure.
7168  */
7169 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7170 {
7171 	unsigned int nr_pages = thp_nr_pages(page);
7172 	struct page_counter *counter;
7173 	struct mem_cgroup *memcg;
7174 	unsigned short oldid;
7175 
7176 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7177 		return 0;
7178 
7179 	memcg = page_memcg(page);
7180 
7181 	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7182 	if (!memcg)
7183 		return 0;
7184 
7185 	if (!entry.val) {
7186 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7187 		return 0;
7188 	}
7189 
7190 	memcg = mem_cgroup_id_get_online(memcg);
7191 
7192 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7193 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7194 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7195 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7196 		mem_cgroup_id_put(memcg);
7197 		return -ENOMEM;
7198 	}
7199 
7200 	/* Get references for the tail pages, too */
7201 	if (nr_pages > 1)
7202 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7203 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7204 	VM_BUG_ON_PAGE(oldid, page);
7205 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7206 
7207 	return 0;
7208 }
7209 
7210 /**
7211  * __mem_cgroup_uncharge_swap - uncharge swap space
7212  * @entry: swap entry to uncharge
7213  * @nr_pages: the amount of swap space to uncharge
7214  */
7215 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7216 {
7217 	struct mem_cgroup *memcg;
7218 	unsigned short id;
7219 
7220 	id = swap_cgroup_record(entry, 0, nr_pages);
7221 	rcu_read_lock();
7222 	memcg = mem_cgroup_from_id(id);
7223 	if (memcg) {
7224 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7225 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7226 				page_counter_uncharge(&memcg->swap, nr_pages);
7227 			else
7228 				page_counter_uncharge(&memcg->memsw, nr_pages);
7229 		}
7230 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7231 		mem_cgroup_id_put_many(memcg, nr_pages);
7232 	}
7233 	rcu_read_unlock();
7234 }
7235 
7236 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7237 {
7238 	long nr_swap_pages = get_nr_swap_pages();
7239 
7240 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7241 		return nr_swap_pages;
7242 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7243 		nr_swap_pages = min_t(long, nr_swap_pages,
7244 				      READ_ONCE(memcg->swap.max) -
7245 				      page_counter_read(&memcg->swap));
7246 	return nr_swap_pages;
7247 }
7248 
7249 bool mem_cgroup_swap_full(struct page *page)
7250 {
7251 	struct mem_cgroup *memcg;
7252 
7253 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7254 
7255 	if (vm_swap_full())
7256 		return true;
7257 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7258 		return false;
7259 
7260 	memcg = page_memcg(page);
7261 	if (!memcg)
7262 		return false;
7263 
7264 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7265 		unsigned long usage = page_counter_read(&memcg->swap);
7266 
7267 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7268 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7269 			return true;
7270 	}
7271 
7272 	return false;
7273 }
7274 
7275 static int __init setup_swap_account(char *s)
7276 {
7277 	if (!strcmp(s, "1"))
7278 		cgroup_memory_noswap = false;
7279 	else if (!strcmp(s, "0"))
7280 		cgroup_memory_noswap = true;
7281 	return 1;
7282 }
7283 __setup("swapaccount=", setup_swap_account);
7284 
7285 static u64 swap_current_read(struct cgroup_subsys_state *css,
7286 			     struct cftype *cft)
7287 {
7288 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7289 
7290 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7291 }
7292 
7293 static int swap_high_show(struct seq_file *m, void *v)
7294 {
7295 	return seq_puts_memcg_tunable(m,
7296 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7297 }
7298 
7299 static ssize_t swap_high_write(struct kernfs_open_file *of,
7300 			       char *buf, size_t nbytes, loff_t off)
7301 {
7302 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7303 	unsigned long high;
7304 	int err;
7305 
7306 	buf = strstrip(buf);
7307 	err = page_counter_memparse(buf, "max", &high);
7308 	if (err)
7309 		return err;
7310 
7311 	page_counter_set_high(&memcg->swap, high);
7312 
7313 	return nbytes;
7314 }
7315 
7316 static int swap_max_show(struct seq_file *m, void *v)
7317 {
7318 	return seq_puts_memcg_tunable(m,
7319 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7320 }
7321 
7322 static ssize_t swap_max_write(struct kernfs_open_file *of,
7323 			      char *buf, size_t nbytes, loff_t off)
7324 {
7325 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7326 	unsigned long max;
7327 	int err;
7328 
7329 	buf = strstrip(buf);
7330 	err = page_counter_memparse(buf, "max", &max);
7331 	if (err)
7332 		return err;
7333 
7334 	xchg(&memcg->swap.max, max);
7335 
7336 	return nbytes;
7337 }
7338 
7339 static int swap_events_show(struct seq_file *m, void *v)
7340 {
7341 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7342 
7343 	seq_printf(m, "high %lu\n",
7344 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7345 	seq_printf(m, "max %lu\n",
7346 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7347 	seq_printf(m, "fail %lu\n",
7348 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7349 
7350 	return 0;
7351 }
7352 
7353 static struct cftype swap_files[] = {
7354 	{
7355 		.name = "swap.current",
7356 		.flags = CFTYPE_NOT_ON_ROOT,
7357 		.read_u64 = swap_current_read,
7358 	},
7359 	{
7360 		.name = "swap.high",
7361 		.flags = CFTYPE_NOT_ON_ROOT,
7362 		.seq_show = swap_high_show,
7363 		.write = swap_high_write,
7364 	},
7365 	{
7366 		.name = "swap.max",
7367 		.flags = CFTYPE_NOT_ON_ROOT,
7368 		.seq_show = swap_max_show,
7369 		.write = swap_max_write,
7370 	},
7371 	{
7372 		.name = "swap.events",
7373 		.flags = CFTYPE_NOT_ON_ROOT,
7374 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7375 		.seq_show = swap_events_show,
7376 	},
7377 	{ }	/* terminate */
7378 };
7379 
7380 static struct cftype memsw_files[] = {
7381 	{
7382 		.name = "memsw.usage_in_bytes",
7383 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7384 		.read_u64 = mem_cgroup_read_u64,
7385 	},
7386 	{
7387 		.name = "memsw.max_usage_in_bytes",
7388 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7389 		.write = mem_cgroup_reset,
7390 		.read_u64 = mem_cgroup_read_u64,
7391 	},
7392 	{
7393 		.name = "memsw.limit_in_bytes",
7394 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7395 		.write = mem_cgroup_write,
7396 		.read_u64 = mem_cgroup_read_u64,
7397 	},
7398 	{
7399 		.name = "memsw.failcnt",
7400 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7401 		.write = mem_cgroup_reset,
7402 		.read_u64 = mem_cgroup_read_u64,
7403 	},
7404 	{ },	/* terminate */
7405 };
7406 
7407 /*
7408  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7409  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7410  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7411  * boot parameter. This may result in premature OOPS inside
7412  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7413  */
7414 static int __init mem_cgroup_swap_init(void)
7415 {
7416 	/* No memory control -> no swap control */
7417 	if (mem_cgroup_disabled())
7418 		cgroup_memory_noswap = true;
7419 
7420 	if (cgroup_memory_noswap)
7421 		return 0;
7422 
7423 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7424 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7425 
7426 	return 0;
7427 }
7428 core_initcall(mem_cgroup_swap_init);
7429 
7430 #endif /* CONFIG_MEMCG_SWAP */
7431