xref: /openbmc/linux/mm/memcontrol.c (revision ed84ef1c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/file.h>
62 #include <linux/tracehook.h>
63 #include <linux/psi.h>
64 #include <linux/seq_buf.h>
65 #include "internal.h"
66 #include <net/sock.h>
67 #include <net/ip.h>
68 #include "slab.h"
69 
70 #include <linux/uaccess.h>
71 
72 #include <trace/events/vmscan.h>
73 
74 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
75 EXPORT_SYMBOL(memory_cgrp_subsys);
76 
77 struct mem_cgroup *root_mem_cgroup __read_mostly;
78 
79 /* Active memory cgroup to use from an interrupt context */
80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
82 
83 /* Socket memory accounting disabled? */
84 static bool cgroup_memory_nosocket __ro_after_init;
85 
86 /* Kernel memory accounting disabled? */
87 bool cgroup_memory_nokmem __ro_after_init;
88 
89 /* Whether the swap controller is active */
90 #ifdef CONFIG_MEMCG_SWAP
91 bool cgroup_memory_noswap __ro_after_init;
92 #else
93 #define cgroup_memory_noswap		1
94 #endif
95 
96 #ifdef CONFIG_CGROUP_WRITEBACK
97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
98 #endif
99 
100 /* Whether legacy memory+swap accounting is active */
101 static bool do_memsw_account(void)
102 {
103 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
104 }
105 
106 /* memcg and lruvec stats flushing */
107 static void flush_memcg_stats_dwork(struct work_struct *w);
108 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
109 static void flush_memcg_stats_work(struct work_struct *w);
110 static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
111 static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
112 static DEFINE_SPINLOCK(stats_flush_lock);
113 
114 #define THRESHOLDS_EVENTS_TARGET 128
115 #define SOFTLIMIT_EVENTS_TARGET 1024
116 
117 /*
118  * Cgroups above their limits are maintained in a RB-Tree, independent of
119  * their hierarchy representation
120  */
121 
122 struct mem_cgroup_tree_per_node {
123 	struct rb_root rb_root;
124 	struct rb_node *rb_rightmost;
125 	spinlock_t lock;
126 };
127 
128 struct mem_cgroup_tree {
129 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
130 };
131 
132 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
133 
134 /* for OOM */
135 struct mem_cgroup_eventfd_list {
136 	struct list_head list;
137 	struct eventfd_ctx *eventfd;
138 };
139 
140 /*
141  * cgroup_event represents events which userspace want to receive.
142  */
143 struct mem_cgroup_event {
144 	/*
145 	 * memcg which the event belongs to.
146 	 */
147 	struct mem_cgroup *memcg;
148 	/*
149 	 * eventfd to signal userspace about the event.
150 	 */
151 	struct eventfd_ctx *eventfd;
152 	/*
153 	 * Each of these stored in a list by the cgroup.
154 	 */
155 	struct list_head list;
156 	/*
157 	 * register_event() callback will be used to add new userspace
158 	 * waiter for changes related to this event.  Use eventfd_signal()
159 	 * on eventfd to send notification to userspace.
160 	 */
161 	int (*register_event)(struct mem_cgroup *memcg,
162 			      struct eventfd_ctx *eventfd, const char *args);
163 	/*
164 	 * unregister_event() callback will be called when userspace closes
165 	 * the eventfd or on cgroup removing.  This callback must be set,
166 	 * if you want provide notification functionality.
167 	 */
168 	void (*unregister_event)(struct mem_cgroup *memcg,
169 				 struct eventfd_ctx *eventfd);
170 	/*
171 	 * All fields below needed to unregister event when
172 	 * userspace closes eventfd.
173 	 */
174 	poll_table pt;
175 	wait_queue_head_t *wqh;
176 	wait_queue_entry_t wait;
177 	struct work_struct remove;
178 };
179 
180 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
181 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
182 
183 /* Stuffs for move charges at task migration. */
184 /*
185  * Types of charges to be moved.
186  */
187 #define MOVE_ANON	0x1U
188 #define MOVE_FILE	0x2U
189 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
190 
191 /* "mc" and its members are protected by cgroup_mutex */
192 static struct move_charge_struct {
193 	spinlock_t	  lock; /* for from, to */
194 	struct mm_struct  *mm;
195 	struct mem_cgroup *from;
196 	struct mem_cgroup *to;
197 	unsigned long flags;
198 	unsigned long precharge;
199 	unsigned long moved_charge;
200 	unsigned long moved_swap;
201 	struct task_struct *moving_task;	/* a task moving charges */
202 	wait_queue_head_t waitq;		/* a waitq for other context */
203 } mc = {
204 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
205 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
206 };
207 
208 /*
209  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
210  * limit reclaim to prevent infinite loops, if they ever occur.
211  */
212 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
213 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
214 
215 /* for encoding cft->private value on file */
216 enum res_type {
217 	_MEM,
218 	_MEMSWAP,
219 	_OOM_TYPE,
220 	_KMEM,
221 	_TCP,
222 };
223 
224 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
225 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
226 #define MEMFILE_ATTR(val)	((val) & 0xffff)
227 /* Used for OOM notifier */
228 #define OOM_CONTROL		(0)
229 
230 /*
231  * Iteration constructs for visiting all cgroups (under a tree).  If
232  * loops are exited prematurely (break), mem_cgroup_iter_break() must
233  * be used for reference counting.
234  */
235 #define for_each_mem_cgroup_tree(iter, root)		\
236 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
237 	     iter != NULL;				\
238 	     iter = mem_cgroup_iter(root, iter, NULL))
239 
240 #define for_each_mem_cgroup(iter)			\
241 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
242 	     iter != NULL;				\
243 	     iter = mem_cgroup_iter(NULL, iter, NULL))
244 
245 static inline bool should_force_charge(void)
246 {
247 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
248 		(current->flags & PF_EXITING);
249 }
250 
251 /* Some nice accessors for the vmpressure. */
252 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
253 {
254 	if (!memcg)
255 		memcg = root_mem_cgroup;
256 	return &memcg->vmpressure;
257 }
258 
259 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
260 {
261 	return container_of(vmpr, struct mem_cgroup, vmpressure);
262 }
263 
264 #ifdef CONFIG_MEMCG_KMEM
265 extern spinlock_t css_set_lock;
266 
267 bool mem_cgroup_kmem_disabled(void)
268 {
269 	return cgroup_memory_nokmem;
270 }
271 
272 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
273 				      unsigned int nr_pages);
274 
275 static void obj_cgroup_release(struct percpu_ref *ref)
276 {
277 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
278 	unsigned int nr_bytes;
279 	unsigned int nr_pages;
280 	unsigned long flags;
281 
282 	/*
283 	 * At this point all allocated objects are freed, and
284 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
285 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
286 	 *
287 	 * The following sequence can lead to it:
288 	 * 1) CPU0: objcg == stock->cached_objcg
289 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
290 	 *          PAGE_SIZE bytes are charged
291 	 * 3) CPU1: a process from another memcg is allocating something,
292 	 *          the stock if flushed,
293 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
294 	 * 5) CPU0: we do release this object,
295 	 *          92 bytes are added to stock->nr_bytes
296 	 * 6) CPU0: stock is flushed,
297 	 *          92 bytes are added to objcg->nr_charged_bytes
298 	 *
299 	 * In the result, nr_charged_bytes == PAGE_SIZE.
300 	 * This page will be uncharged in obj_cgroup_release().
301 	 */
302 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
303 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
304 	nr_pages = nr_bytes >> PAGE_SHIFT;
305 
306 	if (nr_pages)
307 		obj_cgroup_uncharge_pages(objcg, nr_pages);
308 
309 	spin_lock_irqsave(&css_set_lock, flags);
310 	list_del(&objcg->list);
311 	spin_unlock_irqrestore(&css_set_lock, flags);
312 
313 	percpu_ref_exit(ref);
314 	kfree_rcu(objcg, rcu);
315 }
316 
317 static struct obj_cgroup *obj_cgroup_alloc(void)
318 {
319 	struct obj_cgroup *objcg;
320 	int ret;
321 
322 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
323 	if (!objcg)
324 		return NULL;
325 
326 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
327 			      GFP_KERNEL);
328 	if (ret) {
329 		kfree(objcg);
330 		return NULL;
331 	}
332 	INIT_LIST_HEAD(&objcg->list);
333 	return objcg;
334 }
335 
336 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
337 				  struct mem_cgroup *parent)
338 {
339 	struct obj_cgroup *objcg, *iter;
340 
341 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
342 
343 	spin_lock_irq(&css_set_lock);
344 
345 	/* 1) Ready to reparent active objcg. */
346 	list_add(&objcg->list, &memcg->objcg_list);
347 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
348 	list_for_each_entry(iter, &memcg->objcg_list, list)
349 		WRITE_ONCE(iter->memcg, parent);
350 	/* 3) Move already reparented objcgs to the parent's list */
351 	list_splice(&memcg->objcg_list, &parent->objcg_list);
352 
353 	spin_unlock_irq(&css_set_lock);
354 
355 	percpu_ref_kill(&objcg->refcnt);
356 }
357 
358 /*
359  * This will be used as a shrinker list's index.
360  * The main reason for not using cgroup id for this:
361  *  this works better in sparse environments, where we have a lot of memcgs,
362  *  but only a few kmem-limited. Or also, if we have, for instance, 200
363  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
364  *  200 entry array for that.
365  *
366  * The current size of the caches array is stored in memcg_nr_cache_ids. It
367  * will double each time we have to increase it.
368  */
369 static DEFINE_IDA(memcg_cache_ida);
370 int memcg_nr_cache_ids;
371 
372 /* Protects memcg_nr_cache_ids */
373 static DECLARE_RWSEM(memcg_cache_ids_sem);
374 
375 void memcg_get_cache_ids(void)
376 {
377 	down_read(&memcg_cache_ids_sem);
378 }
379 
380 void memcg_put_cache_ids(void)
381 {
382 	up_read(&memcg_cache_ids_sem);
383 }
384 
385 /*
386  * MIN_SIZE is different than 1, because we would like to avoid going through
387  * the alloc/free process all the time. In a small machine, 4 kmem-limited
388  * cgroups is a reasonable guess. In the future, it could be a parameter or
389  * tunable, but that is strictly not necessary.
390  *
391  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
392  * this constant directly from cgroup, but it is understandable that this is
393  * better kept as an internal representation in cgroup.c. In any case, the
394  * cgrp_id space is not getting any smaller, and we don't have to necessarily
395  * increase ours as well if it increases.
396  */
397 #define MEMCG_CACHES_MIN_SIZE 4
398 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
399 
400 /*
401  * A lot of the calls to the cache allocation functions are expected to be
402  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
403  * conditional to this static branch, we'll have to allow modules that does
404  * kmem_cache_alloc and the such to see this symbol as well
405  */
406 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
407 EXPORT_SYMBOL(memcg_kmem_enabled_key);
408 #endif
409 
410 /**
411  * mem_cgroup_css_from_page - css of the memcg associated with a page
412  * @page: page of interest
413  *
414  * If memcg is bound to the default hierarchy, css of the memcg associated
415  * with @page is returned.  The returned css remains associated with @page
416  * until it is released.
417  *
418  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
419  * is returned.
420  */
421 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
422 {
423 	struct mem_cgroup *memcg;
424 
425 	memcg = page_memcg(page);
426 
427 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
428 		memcg = root_mem_cgroup;
429 
430 	return &memcg->css;
431 }
432 
433 /**
434  * page_cgroup_ino - return inode number of the memcg a page is charged to
435  * @page: the page
436  *
437  * Look up the closest online ancestor of the memory cgroup @page is charged to
438  * and return its inode number or 0 if @page is not charged to any cgroup. It
439  * is safe to call this function without holding a reference to @page.
440  *
441  * Note, this function is inherently racy, because there is nothing to prevent
442  * the cgroup inode from getting torn down and potentially reallocated a moment
443  * after page_cgroup_ino() returns, so it only should be used by callers that
444  * do not care (such as procfs interfaces).
445  */
446 ino_t page_cgroup_ino(struct page *page)
447 {
448 	struct mem_cgroup *memcg;
449 	unsigned long ino = 0;
450 
451 	rcu_read_lock();
452 	memcg = page_memcg_check(page);
453 
454 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
455 		memcg = parent_mem_cgroup(memcg);
456 	if (memcg)
457 		ino = cgroup_ino(memcg->css.cgroup);
458 	rcu_read_unlock();
459 	return ino;
460 }
461 
462 static struct mem_cgroup_per_node *
463 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
464 {
465 	int nid = page_to_nid(page);
466 
467 	return memcg->nodeinfo[nid];
468 }
469 
470 static struct mem_cgroup_tree_per_node *
471 soft_limit_tree_node(int nid)
472 {
473 	return soft_limit_tree.rb_tree_per_node[nid];
474 }
475 
476 static struct mem_cgroup_tree_per_node *
477 soft_limit_tree_from_page(struct page *page)
478 {
479 	int nid = page_to_nid(page);
480 
481 	return soft_limit_tree.rb_tree_per_node[nid];
482 }
483 
484 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
485 					 struct mem_cgroup_tree_per_node *mctz,
486 					 unsigned long new_usage_in_excess)
487 {
488 	struct rb_node **p = &mctz->rb_root.rb_node;
489 	struct rb_node *parent = NULL;
490 	struct mem_cgroup_per_node *mz_node;
491 	bool rightmost = true;
492 
493 	if (mz->on_tree)
494 		return;
495 
496 	mz->usage_in_excess = new_usage_in_excess;
497 	if (!mz->usage_in_excess)
498 		return;
499 	while (*p) {
500 		parent = *p;
501 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
502 					tree_node);
503 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
504 			p = &(*p)->rb_left;
505 			rightmost = false;
506 		} else {
507 			p = &(*p)->rb_right;
508 		}
509 	}
510 
511 	if (rightmost)
512 		mctz->rb_rightmost = &mz->tree_node;
513 
514 	rb_link_node(&mz->tree_node, parent, p);
515 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
516 	mz->on_tree = true;
517 }
518 
519 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
520 					 struct mem_cgroup_tree_per_node *mctz)
521 {
522 	if (!mz->on_tree)
523 		return;
524 
525 	if (&mz->tree_node == mctz->rb_rightmost)
526 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
527 
528 	rb_erase(&mz->tree_node, &mctz->rb_root);
529 	mz->on_tree = false;
530 }
531 
532 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
533 				       struct mem_cgroup_tree_per_node *mctz)
534 {
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&mctz->lock, flags);
538 	__mem_cgroup_remove_exceeded(mz, mctz);
539 	spin_unlock_irqrestore(&mctz->lock, flags);
540 }
541 
542 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
543 {
544 	unsigned long nr_pages = page_counter_read(&memcg->memory);
545 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
546 	unsigned long excess = 0;
547 
548 	if (nr_pages > soft_limit)
549 		excess = nr_pages - soft_limit;
550 
551 	return excess;
552 }
553 
554 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
555 {
556 	unsigned long excess;
557 	struct mem_cgroup_per_node *mz;
558 	struct mem_cgroup_tree_per_node *mctz;
559 
560 	mctz = soft_limit_tree_from_page(page);
561 	if (!mctz)
562 		return;
563 	/*
564 	 * Necessary to update all ancestors when hierarchy is used.
565 	 * because their event counter is not touched.
566 	 */
567 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
568 		mz = mem_cgroup_page_nodeinfo(memcg, page);
569 		excess = soft_limit_excess(memcg);
570 		/*
571 		 * We have to update the tree if mz is on RB-tree or
572 		 * mem is over its softlimit.
573 		 */
574 		if (excess || mz->on_tree) {
575 			unsigned long flags;
576 
577 			spin_lock_irqsave(&mctz->lock, flags);
578 			/* if on-tree, remove it */
579 			if (mz->on_tree)
580 				__mem_cgroup_remove_exceeded(mz, mctz);
581 			/*
582 			 * Insert again. mz->usage_in_excess will be updated.
583 			 * If excess is 0, no tree ops.
584 			 */
585 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
586 			spin_unlock_irqrestore(&mctz->lock, flags);
587 		}
588 	}
589 }
590 
591 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
592 {
593 	struct mem_cgroup_tree_per_node *mctz;
594 	struct mem_cgroup_per_node *mz;
595 	int nid;
596 
597 	for_each_node(nid) {
598 		mz = memcg->nodeinfo[nid];
599 		mctz = soft_limit_tree_node(nid);
600 		if (mctz)
601 			mem_cgroup_remove_exceeded(mz, mctz);
602 	}
603 }
604 
605 static struct mem_cgroup_per_node *
606 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
607 {
608 	struct mem_cgroup_per_node *mz;
609 
610 retry:
611 	mz = NULL;
612 	if (!mctz->rb_rightmost)
613 		goto done;		/* Nothing to reclaim from */
614 
615 	mz = rb_entry(mctz->rb_rightmost,
616 		      struct mem_cgroup_per_node, tree_node);
617 	/*
618 	 * Remove the node now but someone else can add it back,
619 	 * we will to add it back at the end of reclaim to its correct
620 	 * position in the tree.
621 	 */
622 	__mem_cgroup_remove_exceeded(mz, mctz);
623 	if (!soft_limit_excess(mz->memcg) ||
624 	    !css_tryget(&mz->memcg->css))
625 		goto retry;
626 done:
627 	return mz;
628 }
629 
630 static struct mem_cgroup_per_node *
631 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
632 {
633 	struct mem_cgroup_per_node *mz;
634 
635 	spin_lock_irq(&mctz->lock);
636 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
637 	spin_unlock_irq(&mctz->lock);
638 	return mz;
639 }
640 
641 /**
642  * __mod_memcg_state - update cgroup memory statistics
643  * @memcg: the memory cgroup
644  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
645  * @val: delta to add to the counter, can be negative
646  */
647 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
648 {
649 	if (mem_cgroup_disabled())
650 		return;
651 
652 	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
653 	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
654 }
655 
656 /* idx can be of type enum memcg_stat_item or node_stat_item. */
657 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
658 {
659 	long x = 0;
660 	int cpu;
661 
662 	for_each_possible_cpu(cpu)
663 		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
664 #ifdef CONFIG_SMP
665 	if (x < 0)
666 		x = 0;
667 #endif
668 	return x;
669 }
670 
671 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
672 			      int val)
673 {
674 	struct mem_cgroup_per_node *pn;
675 	struct mem_cgroup *memcg;
676 
677 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
678 	memcg = pn->memcg;
679 
680 	/* Update memcg */
681 	__mod_memcg_state(memcg, idx, val);
682 
683 	/* Update lruvec */
684 	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
685 	if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
686 		queue_work(system_unbound_wq, &stats_flush_work);
687 }
688 
689 /**
690  * __mod_lruvec_state - update lruvec memory statistics
691  * @lruvec: the lruvec
692  * @idx: the stat item
693  * @val: delta to add to the counter, can be negative
694  *
695  * The lruvec is the intersection of the NUMA node and a cgroup. This
696  * function updates the all three counters that are affected by a
697  * change of state at this level: per-node, per-cgroup, per-lruvec.
698  */
699 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
700 			int val)
701 {
702 	/* Update node */
703 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
704 
705 	/* Update memcg and lruvec */
706 	if (!mem_cgroup_disabled())
707 		__mod_memcg_lruvec_state(lruvec, idx, val);
708 }
709 
710 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
711 			     int val)
712 {
713 	struct page *head = compound_head(page); /* rmap on tail pages */
714 	struct mem_cgroup *memcg;
715 	pg_data_t *pgdat = page_pgdat(page);
716 	struct lruvec *lruvec;
717 
718 	rcu_read_lock();
719 	memcg = page_memcg(head);
720 	/* Untracked pages have no memcg, no lruvec. Update only the node */
721 	if (!memcg) {
722 		rcu_read_unlock();
723 		__mod_node_page_state(pgdat, idx, val);
724 		return;
725 	}
726 
727 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
728 	__mod_lruvec_state(lruvec, idx, val);
729 	rcu_read_unlock();
730 }
731 EXPORT_SYMBOL(__mod_lruvec_page_state);
732 
733 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
734 {
735 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
736 	struct mem_cgroup *memcg;
737 	struct lruvec *lruvec;
738 
739 	rcu_read_lock();
740 	memcg = mem_cgroup_from_obj(p);
741 
742 	/*
743 	 * Untracked pages have no memcg, no lruvec. Update only the
744 	 * node. If we reparent the slab objects to the root memcg,
745 	 * when we free the slab object, we need to update the per-memcg
746 	 * vmstats to keep it correct for the root memcg.
747 	 */
748 	if (!memcg) {
749 		__mod_node_page_state(pgdat, idx, val);
750 	} else {
751 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
752 		__mod_lruvec_state(lruvec, idx, val);
753 	}
754 	rcu_read_unlock();
755 }
756 
757 /*
758  * mod_objcg_mlstate() may be called with irq enabled, so
759  * mod_memcg_lruvec_state() should be used.
760  */
761 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
762 				     struct pglist_data *pgdat,
763 				     enum node_stat_item idx, int nr)
764 {
765 	struct mem_cgroup *memcg;
766 	struct lruvec *lruvec;
767 
768 	rcu_read_lock();
769 	memcg = obj_cgroup_memcg(objcg);
770 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
771 	mod_memcg_lruvec_state(lruvec, idx, nr);
772 	rcu_read_unlock();
773 }
774 
775 /**
776  * __count_memcg_events - account VM events in a cgroup
777  * @memcg: the memory cgroup
778  * @idx: the event item
779  * @count: the number of events that occurred
780  */
781 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
782 			  unsigned long count)
783 {
784 	if (mem_cgroup_disabled())
785 		return;
786 
787 	__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
788 	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
789 }
790 
791 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
792 {
793 	return READ_ONCE(memcg->vmstats.events[event]);
794 }
795 
796 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
797 {
798 	long x = 0;
799 	int cpu;
800 
801 	for_each_possible_cpu(cpu)
802 		x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
803 	return x;
804 }
805 
806 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
807 					 struct page *page,
808 					 int nr_pages)
809 {
810 	/* pagein of a big page is an event. So, ignore page size */
811 	if (nr_pages > 0)
812 		__count_memcg_events(memcg, PGPGIN, 1);
813 	else {
814 		__count_memcg_events(memcg, PGPGOUT, 1);
815 		nr_pages = -nr_pages; /* for event */
816 	}
817 
818 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
819 }
820 
821 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822 				       enum mem_cgroup_events_target target)
823 {
824 	unsigned long val, next;
825 
826 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
827 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
828 	/* from time_after() in jiffies.h */
829 	if ((long)(next - val) < 0) {
830 		switch (target) {
831 		case MEM_CGROUP_TARGET_THRESH:
832 			next = val + THRESHOLDS_EVENTS_TARGET;
833 			break;
834 		case MEM_CGROUP_TARGET_SOFTLIMIT:
835 			next = val + SOFTLIMIT_EVENTS_TARGET;
836 			break;
837 		default:
838 			break;
839 		}
840 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
841 		return true;
842 	}
843 	return false;
844 }
845 
846 /*
847  * Check events in order.
848  *
849  */
850 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
851 {
852 	/* threshold event is triggered in finer grain than soft limit */
853 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
854 						MEM_CGROUP_TARGET_THRESH))) {
855 		bool do_softlimit;
856 
857 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
858 						MEM_CGROUP_TARGET_SOFTLIMIT);
859 		mem_cgroup_threshold(memcg);
860 		if (unlikely(do_softlimit))
861 			mem_cgroup_update_tree(memcg, page);
862 	}
863 }
864 
865 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
866 {
867 	/*
868 	 * mm_update_next_owner() may clear mm->owner to NULL
869 	 * if it races with swapoff, page migration, etc.
870 	 * So this can be called with p == NULL.
871 	 */
872 	if (unlikely(!p))
873 		return NULL;
874 
875 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
876 }
877 EXPORT_SYMBOL(mem_cgroup_from_task);
878 
879 static __always_inline struct mem_cgroup *active_memcg(void)
880 {
881 	if (!in_task())
882 		return this_cpu_read(int_active_memcg);
883 	else
884 		return current->active_memcg;
885 }
886 
887 /**
888  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
889  * @mm: mm from which memcg should be extracted. It can be NULL.
890  *
891  * Obtain a reference on mm->memcg and returns it if successful. If mm
892  * is NULL, then the memcg is chosen as follows:
893  * 1) The active memcg, if set.
894  * 2) current->mm->memcg, if available
895  * 3) root memcg
896  * If mem_cgroup is disabled, NULL is returned.
897  */
898 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
899 {
900 	struct mem_cgroup *memcg;
901 
902 	if (mem_cgroup_disabled())
903 		return NULL;
904 
905 	/*
906 	 * Page cache insertions can happen without an
907 	 * actual mm context, e.g. during disk probing
908 	 * on boot, loopback IO, acct() writes etc.
909 	 *
910 	 * No need to css_get on root memcg as the reference
911 	 * counting is disabled on the root level in the
912 	 * cgroup core. See CSS_NO_REF.
913 	 */
914 	if (unlikely(!mm)) {
915 		memcg = active_memcg();
916 		if (unlikely(memcg)) {
917 			/* remote memcg must hold a ref */
918 			css_get(&memcg->css);
919 			return memcg;
920 		}
921 		mm = current->mm;
922 		if (unlikely(!mm))
923 			return root_mem_cgroup;
924 	}
925 
926 	rcu_read_lock();
927 	do {
928 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
929 		if (unlikely(!memcg))
930 			memcg = root_mem_cgroup;
931 	} while (!css_tryget(&memcg->css));
932 	rcu_read_unlock();
933 	return memcg;
934 }
935 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
936 
937 static __always_inline bool memcg_kmem_bypass(void)
938 {
939 	/* Allow remote memcg charging from any context. */
940 	if (unlikely(active_memcg()))
941 		return false;
942 
943 	/* Memcg to charge can't be determined. */
944 	if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
945 		return true;
946 
947 	return false;
948 }
949 
950 /**
951  * mem_cgroup_iter - iterate over memory cgroup hierarchy
952  * @root: hierarchy root
953  * @prev: previously returned memcg, NULL on first invocation
954  * @reclaim: cookie for shared reclaim walks, NULL for full walks
955  *
956  * Returns references to children of the hierarchy below @root, or
957  * @root itself, or %NULL after a full round-trip.
958  *
959  * Caller must pass the return value in @prev on subsequent
960  * invocations for reference counting, or use mem_cgroup_iter_break()
961  * to cancel a hierarchy walk before the round-trip is complete.
962  *
963  * Reclaimers can specify a node in @reclaim to divide up the memcgs
964  * in the hierarchy among all concurrent reclaimers operating on the
965  * same node.
966  */
967 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
968 				   struct mem_cgroup *prev,
969 				   struct mem_cgroup_reclaim_cookie *reclaim)
970 {
971 	struct mem_cgroup_reclaim_iter *iter;
972 	struct cgroup_subsys_state *css = NULL;
973 	struct mem_cgroup *memcg = NULL;
974 	struct mem_cgroup *pos = NULL;
975 
976 	if (mem_cgroup_disabled())
977 		return NULL;
978 
979 	if (!root)
980 		root = root_mem_cgroup;
981 
982 	if (prev && !reclaim)
983 		pos = prev;
984 
985 	rcu_read_lock();
986 
987 	if (reclaim) {
988 		struct mem_cgroup_per_node *mz;
989 
990 		mz = root->nodeinfo[reclaim->pgdat->node_id];
991 		iter = &mz->iter;
992 
993 		if (prev && reclaim->generation != iter->generation)
994 			goto out_unlock;
995 
996 		while (1) {
997 			pos = READ_ONCE(iter->position);
998 			if (!pos || css_tryget(&pos->css))
999 				break;
1000 			/*
1001 			 * css reference reached zero, so iter->position will
1002 			 * be cleared by ->css_released. However, we should not
1003 			 * rely on this happening soon, because ->css_released
1004 			 * is called from a work queue, and by busy-waiting we
1005 			 * might block it. So we clear iter->position right
1006 			 * away.
1007 			 */
1008 			(void)cmpxchg(&iter->position, pos, NULL);
1009 		}
1010 	}
1011 
1012 	if (pos)
1013 		css = &pos->css;
1014 
1015 	for (;;) {
1016 		css = css_next_descendant_pre(css, &root->css);
1017 		if (!css) {
1018 			/*
1019 			 * Reclaimers share the hierarchy walk, and a
1020 			 * new one might jump in right at the end of
1021 			 * the hierarchy - make sure they see at least
1022 			 * one group and restart from the beginning.
1023 			 */
1024 			if (!prev)
1025 				continue;
1026 			break;
1027 		}
1028 
1029 		/*
1030 		 * Verify the css and acquire a reference.  The root
1031 		 * is provided by the caller, so we know it's alive
1032 		 * and kicking, and don't take an extra reference.
1033 		 */
1034 		memcg = mem_cgroup_from_css(css);
1035 
1036 		if (css == &root->css)
1037 			break;
1038 
1039 		if (css_tryget(css))
1040 			break;
1041 
1042 		memcg = NULL;
1043 	}
1044 
1045 	if (reclaim) {
1046 		/*
1047 		 * The position could have already been updated by a competing
1048 		 * thread, so check that the value hasn't changed since we read
1049 		 * it to avoid reclaiming from the same cgroup twice.
1050 		 */
1051 		(void)cmpxchg(&iter->position, pos, memcg);
1052 
1053 		if (pos)
1054 			css_put(&pos->css);
1055 
1056 		if (!memcg)
1057 			iter->generation++;
1058 		else if (!prev)
1059 			reclaim->generation = iter->generation;
1060 	}
1061 
1062 out_unlock:
1063 	rcu_read_unlock();
1064 	if (prev && prev != root)
1065 		css_put(&prev->css);
1066 
1067 	return memcg;
1068 }
1069 
1070 /**
1071  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1072  * @root: hierarchy root
1073  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1074  */
1075 void mem_cgroup_iter_break(struct mem_cgroup *root,
1076 			   struct mem_cgroup *prev)
1077 {
1078 	if (!root)
1079 		root = root_mem_cgroup;
1080 	if (prev && prev != root)
1081 		css_put(&prev->css);
1082 }
1083 
1084 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1085 					struct mem_cgroup *dead_memcg)
1086 {
1087 	struct mem_cgroup_reclaim_iter *iter;
1088 	struct mem_cgroup_per_node *mz;
1089 	int nid;
1090 
1091 	for_each_node(nid) {
1092 		mz = from->nodeinfo[nid];
1093 		iter = &mz->iter;
1094 		cmpxchg(&iter->position, dead_memcg, NULL);
1095 	}
1096 }
1097 
1098 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1099 {
1100 	struct mem_cgroup *memcg = dead_memcg;
1101 	struct mem_cgroup *last;
1102 
1103 	do {
1104 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1105 		last = memcg;
1106 	} while ((memcg = parent_mem_cgroup(memcg)));
1107 
1108 	/*
1109 	 * When cgruop1 non-hierarchy mode is used,
1110 	 * parent_mem_cgroup() does not walk all the way up to the
1111 	 * cgroup root (root_mem_cgroup). So we have to handle
1112 	 * dead_memcg from cgroup root separately.
1113 	 */
1114 	if (last != root_mem_cgroup)
1115 		__invalidate_reclaim_iterators(root_mem_cgroup,
1116 						dead_memcg);
1117 }
1118 
1119 /**
1120  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1121  * @memcg: hierarchy root
1122  * @fn: function to call for each task
1123  * @arg: argument passed to @fn
1124  *
1125  * This function iterates over tasks attached to @memcg or to any of its
1126  * descendants and calls @fn for each task. If @fn returns a non-zero
1127  * value, the function breaks the iteration loop and returns the value.
1128  * Otherwise, it will iterate over all tasks and return 0.
1129  *
1130  * This function must not be called for the root memory cgroup.
1131  */
1132 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1133 			  int (*fn)(struct task_struct *, void *), void *arg)
1134 {
1135 	struct mem_cgroup *iter;
1136 	int ret = 0;
1137 
1138 	BUG_ON(memcg == root_mem_cgroup);
1139 
1140 	for_each_mem_cgroup_tree(iter, memcg) {
1141 		struct css_task_iter it;
1142 		struct task_struct *task;
1143 
1144 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1145 		while (!ret && (task = css_task_iter_next(&it)))
1146 			ret = fn(task, arg);
1147 		css_task_iter_end(&it);
1148 		if (ret) {
1149 			mem_cgroup_iter_break(memcg, iter);
1150 			break;
1151 		}
1152 	}
1153 	return ret;
1154 }
1155 
1156 #ifdef CONFIG_DEBUG_VM
1157 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1158 {
1159 	struct mem_cgroup *memcg;
1160 
1161 	if (mem_cgroup_disabled())
1162 		return;
1163 
1164 	memcg = page_memcg(page);
1165 
1166 	if (!memcg)
1167 		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1168 	else
1169 		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1170 }
1171 #endif
1172 
1173 /**
1174  * lock_page_lruvec - lock and return lruvec for a given page.
1175  * @page: the page
1176  *
1177  * These functions are safe to use under any of the following conditions:
1178  * - page locked
1179  * - PageLRU cleared
1180  * - lock_page_memcg()
1181  * - page->_refcount is zero
1182  */
1183 struct lruvec *lock_page_lruvec(struct page *page)
1184 {
1185 	struct lruvec *lruvec;
1186 
1187 	lruvec = mem_cgroup_page_lruvec(page);
1188 	spin_lock(&lruvec->lru_lock);
1189 
1190 	lruvec_memcg_debug(lruvec, page);
1191 
1192 	return lruvec;
1193 }
1194 
1195 struct lruvec *lock_page_lruvec_irq(struct page *page)
1196 {
1197 	struct lruvec *lruvec;
1198 
1199 	lruvec = mem_cgroup_page_lruvec(page);
1200 	spin_lock_irq(&lruvec->lru_lock);
1201 
1202 	lruvec_memcg_debug(lruvec, page);
1203 
1204 	return lruvec;
1205 }
1206 
1207 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
1208 {
1209 	struct lruvec *lruvec;
1210 
1211 	lruvec = mem_cgroup_page_lruvec(page);
1212 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1213 
1214 	lruvec_memcg_debug(lruvec, page);
1215 
1216 	return lruvec;
1217 }
1218 
1219 /**
1220  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1221  * @lruvec: mem_cgroup per zone lru vector
1222  * @lru: index of lru list the page is sitting on
1223  * @zid: zone id of the accounted pages
1224  * @nr_pages: positive when adding or negative when removing
1225  *
1226  * This function must be called under lru_lock, just before a page is added
1227  * to or just after a page is removed from an lru list (that ordering being
1228  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1229  */
1230 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1231 				int zid, int nr_pages)
1232 {
1233 	struct mem_cgroup_per_node *mz;
1234 	unsigned long *lru_size;
1235 	long size;
1236 
1237 	if (mem_cgroup_disabled())
1238 		return;
1239 
1240 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1241 	lru_size = &mz->lru_zone_size[zid][lru];
1242 
1243 	if (nr_pages < 0)
1244 		*lru_size += nr_pages;
1245 
1246 	size = *lru_size;
1247 	if (WARN_ONCE(size < 0,
1248 		"%s(%p, %d, %d): lru_size %ld\n",
1249 		__func__, lruvec, lru, nr_pages, size)) {
1250 		VM_BUG_ON(1);
1251 		*lru_size = 0;
1252 	}
1253 
1254 	if (nr_pages > 0)
1255 		*lru_size += nr_pages;
1256 }
1257 
1258 /**
1259  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1260  * @memcg: the memory cgroup
1261  *
1262  * Returns the maximum amount of memory @mem can be charged with, in
1263  * pages.
1264  */
1265 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1266 {
1267 	unsigned long margin = 0;
1268 	unsigned long count;
1269 	unsigned long limit;
1270 
1271 	count = page_counter_read(&memcg->memory);
1272 	limit = READ_ONCE(memcg->memory.max);
1273 	if (count < limit)
1274 		margin = limit - count;
1275 
1276 	if (do_memsw_account()) {
1277 		count = page_counter_read(&memcg->memsw);
1278 		limit = READ_ONCE(memcg->memsw.max);
1279 		if (count < limit)
1280 			margin = min(margin, limit - count);
1281 		else
1282 			margin = 0;
1283 	}
1284 
1285 	return margin;
1286 }
1287 
1288 /*
1289  * A routine for checking "mem" is under move_account() or not.
1290  *
1291  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1292  * moving cgroups. This is for waiting at high-memory pressure
1293  * caused by "move".
1294  */
1295 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1296 {
1297 	struct mem_cgroup *from;
1298 	struct mem_cgroup *to;
1299 	bool ret = false;
1300 	/*
1301 	 * Unlike task_move routines, we access mc.to, mc.from not under
1302 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1303 	 */
1304 	spin_lock(&mc.lock);
1305 	from = mc.from;
1306 	to = mc.to;
1307 	if (!from)
1308 		goto unlock;
1309 
1310 	ret = mem_cgroup_is_descendant(from, memcg) ||
1311 		mem_cgroup_is_descendant(to, memcg);
1312 unlock:
1313 	spin_unlock(&mc.lock);
1314 	return ret;
1315 }
1316 
1317 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1318 {
1319 	if (mc.moving_task && current != mc.moving_task) {
1320 		if (mem_cgroup_under_move(memcg)) {
1321 			DEFINE_WAIT(wait);
1322 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1323 			/* moving charge context might have finished. */
1324 			if (mc.moving_task)
1325 				schedule();
1326 			finish_wait(&mc.waitq, &wait);
1327 			return true;
1328 		}
1329 	}
1330 	return false;
1331 }
1332 
1333 struct memory_stat {
1334 	const char *name;
1335 	unsigned int idx;
1336 };
1337 
1338 static const struct memory_stat memory_stats[] = {
1339 	{ "anon",			NR_ANON_MAPPED			},
1340 	{ "file",			NR_FILE_PAGES			},
1341 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1342 	{ "pagetables",			NR_PAGETABLE			},
1343 	{ "percpu",			MEMCG_PERCPU_B			},
1344 	{ "sock",			MEMCG_SOCK			},
1345 	{ "shmem",			NR_SHMEM			},
1346 	{ "file_mapped",		NR_FILE_MAPPED			},
1347 	{ "file_dirty",			NR_FILE_DIRTY			},
1348 	{ "file_writeback",		NR_WRITEBACK			},
1349 #ifdef CONFIG_SWAP
1350 	{ "swapcached",			NR_SWAPCACHE			},
1351 #endif
1352 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1353 	{ "anon_thp",			NR_ANON_THPS			},
1354 	{ "file_thp",			NR_FILE_THPS			},
1355 	{ "shmem_thp",			NR_SHMEM_THPS			},
1356 #endif
1357 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1358 	{ "active_anon",		NR_ACTIVE_ANON			},
1359 	{ "inactive_file",		NR_INACTIVE_FILE		},
1360 	{ "active_file",		NR_ACTIVE_FILE			},
1361 	{ "unevictable",		NR_UNEVICTABLE			},
1362 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1363 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1364 
1365 	/* The memory events */
1366 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1367 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1368 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1369 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1370 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1371 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1372 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1373 };
1374 
1375 /* Translate stat items to the correct unit for memory.stat output */
1376 static int memcg_page_state_unit(int item)
1377 {
1378 	switch (item) {
1379 	case MEMCG_PERCPU_B:
1380 	case NR_SLAB_RECLAIMABLE_B:
1381 	case NR_SLAB_UNRECLAIMABLE_B:
1382 	case WORKINGSET_REFAULT_ANON:
1383 	case WORKINGSET_REFAULT_FILE:
1384 	case WORKINGSET_ACTIVATE_ANON:
1385 	case WORKINGSET_ACTIVATE_FILE:
1386 	case WORKINGSET_RESTORE_ANON:
1387 	case WORKINGSET_RESTORE_FILE:
1388 	case WORKINGSET_NODERECLAIM:
1389 		return 1;
1390 	case NR_KERNEL_STACK_KB:
1391 		return SZ_1K;
1392 	default:
1393 		return PAGE_SIZE;
1394 	}
1395 }
1396 
1397 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1398 						    int item)
1399 {
1400 	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1401 }
1402 
1403 static char *memory_stat_format(struct mem_cgroup *memcg)
1404 {
1405 	struct seq_buf s;
1406 	int i;
1407 
1408 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1409 	if (!s.buffer)
1410 		return NULL;
1411 
1412 	/*
1413 	 * Provide statistics on the state of the memory subsystem as
1414 	 * well as cumulative event counters that show past behavior.
1415 	 *
1416 	 * This list is ordered following a combination of these gradients:
1417 	 * 1) generic big picture -> specifics and details
1418 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1419 	 *
1420 	 * Current memory state:
1421 	 */
1422 	cgroup_rstat_flush(memcg->css.cgroup);
1423 
1424 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1425 		u64 size;
1426 
1427 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1428 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1429 
1430 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1431 			size += memcg_page_state_output(memcg,
1432 							NR_SLAB_RECLAIMABLE_B);
1433 			seq_buf_printf(&s, "slab %llu\n", size);
1434 		}
1435 	}
1436 
1437 	/* Accumulated memory events */
1438 
1439 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1440 		       memcg_events(memcg, PGFAULT));
1441 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1442 		       memcg_events(memcg, PGMAJFAULT));
1443 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1444 		       memcg_events(memcg, PGREFILL));
1445 	seq_buf_printf(&s, "pgscan %lu\n",
1446 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1447 		       memcg_events(memcg, PGSCAN_DIRECT));
1448 	seq_buf_printf(&s, "pgsteal %lu\n",
1449 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1450 		       memcg_events(memcg, PGSTEAL_DIRECT));
1451 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1452 		       memcg_events(memcg, PGACTIVATE));
1453 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1454 		       memcg_events(memcg, PGDEACTIVATE));
1455 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1456 		       memcg_events(memcg, PGLAZYFREE));
1457 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1458 		       memcg_events(memcg, PGLAZYFREED));
1459 
1460 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1461 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1462 		       memcg_events(memcg, THP_FAULT_ALLOC));
1463 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1464 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1465 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1466 
1467 	/* The above should easily fit into one page */
1468 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1469 
1470 	return s.buffer;
1471 }
1472 
1473 #define K(x) ((x) << (PAGE_SHIFT-10))
1474 /**
1475  * mem_cgroup_print_oom_context: Print OOM information relevant to
1476  * memory controller.
1477  * @memcg: The memory cgroup that went over limit
1478  * @p: Task that is going to be killed
1479  *
1480  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1481  * enabled
1482  */
1483 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1484 {
1485 	rcu_read_lock();
1486 
1487 	if (memcg) {
1488 		pr_cont(",oom_memcg=");
1489 		pr_cont_cgroup_path(memcg->css.cgroup);
1490 	} else
1491 		pr_cont(",global_oom");
1492 	if (p) {
1493 		pr_cont(",task_memcg=");
1494 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1495 	}
1496 	rcu_read_unlock();
1497 }
1498 
1499 /**
1500  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1501  * memory controller.
1502  * @memcg: The memory cgroup that went over limit
1503  */
1504 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1505 {
1506 	char *buf;
1507 
1508 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1509 		K((u64)page_counter_read(&memcg->memory)),
1510 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1511 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1512 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1513 			K((u64)page_counter_read(&memcg->swap)),
1514 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1515 	else {
1516 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1517 			K((u64)page_counter_read(&memcg->memsw)),
1518 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1519 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1520 			K((u64)page_counter_read(&memcg->kmem)),
1521 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1522 	}
1523 
1524 	pr_info("Memory cgroup stats for ");
1525 	pr_cont_cgroup_path(memcg->css.cgroup);
1526 	pr_cont(":");
1527 	buf = memory_stat_format(memcg);
1528 	if (!buf)
1529 		return;
1530 	pr_info("%s", buf);
1531 	kfree(buf);
1532 }
1533 
1534 /*
1535  * Return the memory (and swap, if configured) limit for a memcg.
1536  */
1537 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1538 {
1539 	unsigned long max = READ_ONCE(memcg->memory.max);
1540 
1541 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1542 		if (mem_cgroup_swappiness(memcg))
1543 			max += min(READ_ONCE(memcg->swap.max),
1544 				   (unsigned long)total_swap_pages);
1545 	} else { /* v1 */
1546 		if (mem_cgroup_swappiness(memcg)) {
1547 			/* Calculate swap excess capacity from memsw limit */
1548 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1549 
1550 			max += min(swap, (unsigned long)total_swap_pages);
1551 		}
1552 	}
1553 	return max;
1554 }
1555 
1556 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1557 {
1558 	return page_counter_read(&memcg->memory);
1559 }
1560 
1561 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1562 				     int order)
1563 {
1564 	struct oom_control oc = {
1565 		.zonelist = NULL,
1566 		.nodemask = NULL,
1567 		.memcg = memcg,
1568 		.gfp_mask = gfp_mask,
1569 		.order = order,
1570 	};
1571 	bool ret = true;
1572 
1573 	if (mutex_lock_killable(&oom_lock))
1574 		return true;
1575 
1576 	if (mem_cgroup_margin(memcg) >= (1 << order))
1577 		goto unlock;
1578 
1579 	/*
1580 	 * A few threads which were not waiting at mutex_lock_killable() can
1581 	 * fail to bail out. Therefore, check again after holding oom_lock.
1582 	 */
1583 	ret = should_force_charge() || out_of_memory(&oc);
1584 
1585 unlock:
1586 	mutex_unlock(&oom_lock);
1587 	return ret;
1588 }
1589 
1590 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1591 				   pg_data_t *pgdat,
1592 				   gfp_t gfp_mask,
1593 				   unsigned long *total_scanned)
1594 {
1595 	struct mem_cgroup *victim = NULL;
1596 	int total = 0;
1597 	int loop = 0;
1598 	unsigned long excess;
1599 	unsigned long nr_scanned;
1600 	struct mem_cgroup_reclaim_cookie reclaim = {
1601 		.pgdat = pgdat,
1602 	};
1603 
1604 	excess = soft_limit_excess(root_memcg);
1605 
1606 	while (1) {
1607 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1608 		if (!victim) {
1609 			loop++;
1610 			if (loop >= 2) {
1611 				/*
1612 				 * If we have not been able to reclaim
1613 				 * anything, it might because there are
1614 				 * no reclaimable pages under this hierarchy
1615 				 */
1616 				if (!total)
1617 					break;
1618 				/*
1619 				 * We want to do more targeted reclaim.
1620 				 * excess >> 2 is not to excessive so as to
1621 				 * reclaim too much, nor too less that we keep
1622 				 * coming back to reclaim from this cgroup
1623 				 */
1624 				if (total >= (excess >> 2) ||
1625 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1626 					break;
1627 			}
1628 			continue;
1629 		}
1630 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1631 					pgdat, &nr_scanned);
1632 		*total_scanned += nr_scanned;
1633 		if (!soft_limit_excess(root_memcg))
1634 			break;
1635 	}
1636 	mem_cgroup_iter_break(root_memcg, victim);
1637 	return total;
1638 }
1639 
1640 #ifdef CONFIG_LOCKDEP
1641 static struct lockdep_map memcg_oom_lock_dep_map = {
1642 	.name = "memcg_oom_lock",
1643 };
1644 #endif
1645 
1646 static DEFINE_SPINLOCK(memcg_oom_lock);
1647 
1648 /*
1649  * Check OOM-Killer is already running under our hierarchy.
1650  * If someone is running, return false.
1651  */
1652 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1653 {
1654 	struct mem_cgroup *iter, *failed = NULL;
1655 
1656 	spin_lock(&memcg_oom_lock);
1657 
1658 	for_each_mem_cgroup_tree(iter, memcg) {
1659 		if (iter->oom_lock) {
1660 			/*
1661 			 * this subtree of our hierarchy is already locked
1662 			 * so we cannot give a lock.
1663 			 */
1664 			failed = iter;
1665 			mem_cgroup_iter_break(memcg, iter);
1666 			break;
1667 		} else
1668 			iter->oom_lock = true;
1669 	}
1670 
1671 	if (failed) {
1672 		/*
1673 		 * OK, we failed to lock the whole subtree so we have
1674 		 * to clean up what we set up to the failing subtree
1675 		 */
1676 		for_each_mem_cgroup_tree(iter, memcg) {
1677 			if (iter == failed) {
1678 				mem_cgroup_iter_break(memcg, iter);
1679 				break;
1680 			}
1681 			iter->oom_lock = false;
1682 		}
1683 	} else
1684 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1685 
1686 	spin_unlock(&memcg_oom_lock);
1687 
1688 	return !failed;
1689 }
1690 
1691 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1692 {
1693 	struct mem_cgroup *iter;
1694 
1695 	spin_lock(&memcg_oom_lock);
1696 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1697 	for_each_mem_cgroup_tree(iter, memcg)
1698 		iter->oom_lock = false;
1699 	spin_unlock(&memcg_oom_lock);
1700 }
1701 
1702 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1703 {
1704 	struct mem_cgroup *iter;
1705 
1706 	spin_lock(&memcg_oom_lock);
1707 	for_each_mem_cgroup_tree(iter, memcg)
1708 		iter->under_oom++;
1709 	spin_unlock(&memcg_oom_lock);
1710 }
1711 
1712 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1713 {
1714 	struct mem_cgroup *iter;
1715 
1716 	/*
1717 	 * Be careful about under_oom underflows because a child memcg
1718 	 * could have been added after mem_cgroup_mark_under_oom.
1719 	 */
1720 	spin_lock(&memcg_oom_lock);
1721 	for_each_mem_cgroup_tree(iter, memcg)
1722 		if (iter->under_oom > 0)
1723 			iter->under_oom--;
1724 	spin_unlock(&memcg_oom_lock);
1725 }
1726 
1727 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1728 
1729 struct oom_wait_info {
1730 	struct mem_cgroup *memcg;
1731 	wait_queue_entry_t	wait;
1732 };
1733 
1734 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1735 	unsigned mode, int sync, void *arg)
1736 {
1737 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1738 	struct mem_cgroup *oom_wait_memcg;
1739 	struct oom_wait_info *oom_wait_info;
1740 
1741 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1742 	oom_wait_memcg = oom_wait_info->memcg;
1743 
1744 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1745 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1746 		return 0;
1747 	return autoremove_wake_function(wait, mode, sync, arg);
1748 }
1749 
1750 static void memcg_oom_recover(struct mem_cgroup *memcg)
1751 {
1752 	/*
1753 	 * For the following lockless ->under_oom test, the only required
1754 	 * guarantee is that it must see the state asserted by an OOM when
1755 	 * this function is called as a result of userland actions
1756 	 * triggered by the notification of the OOM.  This is trivially
1757 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1758 	 * triggering notification.
1759 	 */
1760 	if (memcg && memcg->under_oom)
1761 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1762 }
1763 
1764 enum oom_status {
1765 	OOM_SUCCESS,
1766 	OOM_FAILED,
1767 	OOM_ASYNC,
1768 	OOM_SKIPPED
1769 };
1770 
1771 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1772 {
1773 	enum oom_status ret;
1774 	bool locked;
1775 
1776 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1777 		return OOM_SKIPPED;
1778 
1779 	memcg_memory_event(memcg, MEMCG_OOM);
1780 
1781 	/*
1782 	 * We are in the middle of the charge context here, so we
1783 	 * don't want to block when potentially sitting on a callstack
1784 	 * that holds all kinds of filesystem and mm locks.
1785 	 *
1786 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1787 	 * handling until the charge can succeed; remember the context and put
1788 	 * the task to sleep at the end of the page fault when all locks are
1789 	 * released.
1790 	 *
1791 	 * On the other hand, in-kernel OOM killer allows for an async victim
1792 	 * memory reclaim (oom_reaper) and that means that we are not solely
1793 	 * relying on the oom victim to make a forward progress and we can
1794 	 * invoke the oom killer here.
1795 	 *
1796 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1797 	 * victim and then we have to bail out from the charge path.
1798 	 */
1799 	if (memcg->oom_kill_disable) {
1800 		if (!current->in_user_fault)
1801 			return OOM_SKIPPED;
1802 		css_get(&memcg->css);
1803 		current->memcg_in_oom = memcg;
1804 		current->memcg_oom_gfp_mask = mask;
1805 		current->memcg_oom_order = order;
1806 
1807 		return OOM_ASYNC;
1808 	}
1809 
1810 	mem_cgroup_mark_under_oom(memcg);
1811 
1812 	locked = mem_cgroup_oom_trylock(memcg);
1813 
1814 	if (locked)
1815 		mem_cgroup_oom_notify(memcg);
1816 
1817 	mem_cgroup_unmark_under_oom(memcg);
1818 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1819 		ret = OOM_SUCCESS;
1820 	else
1821 		ret = OOM_FAILED;
1822 
1823 	if (locked)
1824 		mem_cgroup_oom_unlock(memcg);
1825 
1826 	return ret;
1827 }
1828 
1829 /**
1830  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1831  * @handle: actually kill/wait or just clean up the OOM state
1832  *
1833  * This has to be called at the end of a page fault if the memcg OOM
1834  * handler was enabled.
1835  *
1836  * Memcg supports userspace OOM handling where failed allocations must
1837  * sleep on a waitqueue until the userspace task resolves the
1838  * situation.  Sleeping directly in the charge context with all kinds
1839  * of locks held is not a good idea, instead we remember an OOM state
1840  * in the task and mem_cgroup_oom_synchronize() has to be called at
1841  * the end of the page fault to complete the OOM handling.
1842  *
1843  * Returns %true if an ongoing memcg OOM situation was detected and
1844  * completed, %false otherwise.
1845  */
1846 bool mem_cgroup_oom_synchronize(bool handle)
1847 {
1848 	struct mem_cgroup *memcg = current->memcg_in_oom;
1849 	struct oom_wait_info owait;
1850 	bool locked;
1851 
1852 	/* OOM is global, do not handle */
1853 	if (!memcg)
1854 		return false;
1855 
1856 	if (!handle)
1857 		goto cleanup;
1858 
1859 	owait.memcg = memcg;
1860 	owait.wait.flags = 0;
1861 	owait.wait.func = memcg_oom_wake_function;
1862 	owait.wait.private = current;
1863 	INIT_LIST_HEAD(&owait.wait.entry);
1864 
1865 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1866 	mem_cgroup_mark_under_oom(memcg);
1867 
1868 	locked = mem_cgroup_oom_trylock(memcg);
1869 
1870 	if (locked)
1871 		mem_cgroup_oom_notify(memcg);
1872 
1873 	if (locked && !memcg->oom_kill_disable) {
1874 		mem_cgroup_unmark_under_oom(memcg);
1875 		finish_wait(&memcg_oom_waitq, &owait.wait);
1876 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1877 					 current->memcg_oom_order);
1878 	} else {
1879 		schedule();
1880 		mem_cgroup_unmark_under_oom(memcg);
1881 		finish_wait(&memcg_oom_waitq, &owait.wait);
1882 	}
1883 
1884 	if (locked) {
1885 		mem_cgroup_oom_unlock(memcg);
1886 		/*
1887 		 * There is no guarantee that an OOM-lock contender
1888 		 * sees the wakeups triggered by the OOM kill
1889 		 * uncharges.  Wake any sleepers explicitly.
1890 		 */
1891 		memcg_oom_recover(memcg);
1892 	}
1893 cleanup:
1894 	current->memcg_in_oom = NULL;
1895 	css_put(&memcg->css);
1896 	return true;
1897 }
1898 
1899 /**
1900  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1901  * @victim: task to be killed by the OOM killer
1902  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1903  *
1904  * Returns a pointer to a memory cgroup, which has to be cleaned up
1905  * by killing all belonging OOM-killable tasks.
1906  *
1907  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1908  */
1909 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1910 					    struct mem_cgroup *oom_domain)
1911 {
1912 	struct mem_cgroup *oom_group = NULL;
1913 	struct mem_cgroup *memcg;
1914 
1915 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1916 		return NULL;
1917 
1918 	if (!oom_domain)
1919 		oom_domain = root_mem_cgroup;
1920 
1921 	rcu_read_lock();
1922 
1923 	memcg = mem_cgroup_from_task(victim);
1924 	if (memcg == root_mem_cgroup)
1925 		goto out;
1926 
1927 	/*
1928 	 * If the victim task has been asynchronously moved to a different
1929 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1930 	 * In this case it's better to ignore memory.group.oom.
1931 	 */
1932 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1933 		goto out;
1934 
1935 	/*
1936 	 * Traverse the memory cgroup hierarchy from the victim task's
1937 	 * cgroup up to the OOMing cgroup (or root) to find the
1938 	 * highest-level memory cgroup with oom.group set.
1939 	 */
1940 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1941 		if (memcg->oom_group)
1942 			oom_group = memcg;
1943 
1944 		if (memcg == oom_domain)
1945 			break;
1946 	}
1947 
1948 	if (oom_group)
1949 		css_get(&oom_group->css);
1950 out:
1951 	rcu_read_unlock();
1952 
1953 	return oom_group;
1954 }
1955 
1956 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1957 {
1958 	pr_info("Tasks in ");
1959 	pr_cont_cgroup_path(memcg->css.cgroup);
1960 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1961 }
1962 
1963 /**
1964  * lock_page_memcg - lock a page and memcg binding
1965  * @page: the page
1966  *
1967  * This function protects unlocked LRU pages from being moved to
1968  * another cgroup.
1969  *
1970  * It ensures lifetime of the locked memcg. Caller is responsible
1971  * for the lifetime of the page.
1972  */
1973 void lock_page_memcg(struct page *page)
1974 {
1975 	struct page *head = compound_head(page); /* rmap on tail pages */
1976 	struct mem_cgroup *memcg;
1977 	unsigned long flags;
1978 
1979 	/*
1980 	 * The RCU lock is held throughout the transaction.  The fast
1981 	 * path can get away without acquiring the memcg->move_lock
1982 	 * because page moving starts with an RCU grace period.
1983          */
1984 	rcu_read_lock();
1985 
1986 	if (mem_cgroup_disabled())
1987 		return;
1988 again:
1989 	memcg = page_memcg(head);
1990 	if (unlikely(!memcg))
1991 		return;
1992 
1993 #ifdef CONFIG_PROVE_LOCKING
1994 	local_irq_save(flags);
1995 	might_lock(&memcg->move_lock);
1996 	local_irq_restore(flags);
1997 #endif
1998 
1999 	if (atomic_read(&memcg->moving_account) <= 0)
2000 		return;
2001 
2002 	spin_lock_irqsave(&memcg->move_lock, flags);
2003 	if (memcg != page_memcg(head)) {
2004 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2005 		goto again;
2006 	}
2007 
2008 	/*
2009 	 * When charge migration first begins, we can have multiple
2010 	 * critical sections holding the fast-path RCU lock and one
2011 	 * holding the slowpath move_lock. Track the task who has the
2012 	 * move_lock for unlock_page_memcg().
2013 	 */
2014 	memcg->move_lock_task = current;
2015 	memcg->move_lock_flags = flags;
2016 }
2017 EXPORT_SYMBOL(lock_page_memcg);
2018 
2019 static void __unlock_page_memcg(struct mem_cgroup *memcg)
2020 {
2021 	if (memcg && memcg->move_lock_task == current) {
2022 		unsigned long flags = memcg->move_lock_flags;
2023 
2024 		memcg->move_lock_task = NULL;
2025 		memcg->move_lock_flags = 0;
2026 
2027 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2028 	}
2029 
2030 	rcu_read_unlock();
2031 }
2032 
2033 /**
2034  * unlock_page_memcg - unlock a page and memcg binding
2035  * @page: the page
2036  */
2037 void unlock_page_memcg(struct page *page)
2038 {
2039 	struct page *head = compound_head(page);
2040 
2041 	__unlock_page_memcg(page_memcg(head));
2042 }
2043 EXPORT_SYMBOL(unlock_page_memcg);
2044 
2045 struct obj_stock {
2046 #ifdef CONFIG_MEMCG_KMEM
2047 	struct obj_cgroup *cached_objcg;
2048 	struct pglist_data *cached_pgdat;
2049 	unsigned int nr_bytes;
2050 	int nr_slab_reclaimable_b;
2051 	int nr_slab_unreclaimable_b;
2052 #else
2053 	int dummy[0];
2054 #endif
2055 };
2056 
2057 struct memcg_stock_pcp {
2058 	struct mem_cgroup *cached; /* this never be root cgroup */
2059 	unsigned int nr_pages;
2060 	struct obj_stock task_obj;
2061 	struct obj_stock irq_obj;
2062 
2063 	struct work_struct work;
2064 	unsigned long flags;
2065 #define FLUSHING_CACHED_CHARGE	0
2066 };
2067 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2068 static DEFINE_MUTEX(percpu_charge_mutex);
2069 
2070 #ifdef CONFIG_MEMCG_KMEM
2071 static void drain_obj_stock(struct obj_stock *stock);
2072 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2073 				     struct mem_cgroup *root_memcg);
2074 
2075 #else
2076 static inline void drain_obj_stock(struct obj_stock *stock)
2077 {
2078 }
2079 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2080 				     struct mem_cgroup *root_memcg)
2081 {
2082 	return false;
2083 }
2084 #endif
2085 
2086 /*
2087  * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2088  * sequence used in this case to access content from object stock is slow.
2089  * To optimize for user context access, there are now two object stocks for
2090  * task context and interrupt context access respectively.
2091  *
2092  * The task context object stock can be accessed by disabling preemption only
2093  * which is cheap in non-preempt kernel. The interrupt context object stock
2094  * can only be accessed after disabling interrupt. User context code can
2095  * access interrupt object stock, but not vice versa.
2096  */
2097 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2098 {
2099 	struct memcg_stock_pcp *stock;
2100 
2101 	if (likely(in_task())) {
2102 		*pflags = 0UL;
2103 		preempt_disable();
2104 		stock = this_cpu_ptr(&memcg_stock);
2105 		return &stock->task_obj;
2106 	}
2107 
2108 	local_irq_save(*pflags);
2109 	stock = this_cpu_ptr(&memcg_stock);
2110 	return &stock->irq_obj;
2111 }
2112 
2113 static inline void put_obj_stock(unsigned long flags)
2114 {
2115 	if (likely(in_task()))
2116 		preempt_enable();
2117 	else
2118 		local_irq_restore(flags);
2119 }
2120 
2121 /**
2122  * consume_stock: Try to consume stocked charge on this cpu.
2123  * @memcg: memcg to consume from.
2124  * @nr_pages: how many pages to charge.
2125  *
2126  * The charges will only happen if @memcg matches the current cpu's memcg
2127  * stock, and at least @nr_pages are available in that stock.  Failure to
2128  * service an allocation will refill the stock.
2129  *
2130  * returns true if successful, false otherwise.
2131  */
2132 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2133 {
2134 	struct memcg_stock_pcp *stock;
2135 	unsigned long flags;
2136 	bool ret = false;
2137 
2138 	if (nr_pages > MEMCG_CHARGE_BATCH)
2139 		return ret;
2140 
2141 	local_irq_save(flags);
2142 
2143 	stock = this_cpu_ptr(&memcg_stock);
2144 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2145 		stock->nr_pages -= nr_pages;
2146 		ret = true;
2147 	}
2148 
2149 	local_irq_restore(flags);
2150 
2151 	return ret;
2152 }
2153 
2154 /*
2155  * Returns stocks cached in percpu and reset cached information.
2156  */
2157 static void drain_stock(struct memcg_stock_pcp *stock)
2158 {
2159 	struct mem_cgroup *old = stock->cached;
2160 
2161 	if (!old)
2162 		return;
2163 
2164 	if (stock->nr_pages) {
2165 		page_counter_uncharge(&old->memory, stock->nr_pages);
2166 		if (do_memsw_account())
2167 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2168 		stock->nr_pages = 0;
2169 	}
2170 
2171 	css_put(&old->css);
2172 	stock->cached = NULL;
2173 }
2174 
2175 static void drain_local_stock(struct work_struct *dummy)
2176 {
2177 	struct memcg_stock_pcp *stock;
2178 	unsigned long flags;
2179 
2180 	/*
2181 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2182 	 * drain_stock races is that we always operate on local CPU stock
2183 	 * here with IRQ disabled
2184 	 */
2185 	local_irq_save(flags);
2186 
2187 	stock = this_cpu_ptr(&memcg_stock);
2188 	drain_obj_stock(&stock->irq_obj);
2189 	if (in_task())
2190 		drain_obj_stock(&stock->task_obj);
2191 	drain_stock(stock);
2192 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2193 
2194 	local_irq_restore(flags);
2195 }
2196 
2197 /*
2198  * Cache charges(val) to local per_cpu area.
2199  * This will be consumed by consume_stock() function, later.
2200  */
2201 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2202 {
2203 	struct memcg_stock_pcp *stock;
2204 	unsigned long flags;
2205 
2206 	local_irq_save(flags);
2207 
2208 	stock = this_cpu_ptr(&memcg_stock);
2209 	if (stock->cached != memcg) { /* reset if necessary */
2210 		drain_stock(stock);
2211 		css_get(&memcg->css);
2212 		stock->cached = memcg;
2213 	}
2214 	stock->nr_pages += nr_pages;
2215 
2216 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2217 		drain_stock(stock);
2218 
2219 	local_irq_restore(flags);
2220 }
2221 
2222 /*
2223  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2224  * of the hierarchy under it.
2225  */
2226 static void drain_all_stock(struct mem_cgroup *root_memcg)
2227 {
2228 	int cpu, curcpu;
2229 
2230 	/* If someone's already draining, avoid adding running more workers. */
2231 	if (!mutex_trylock(&percpu_charge_mutex))
2232 		return;
2233 	/*
2234 	 * Notify other cpus that system-wide "drain" is running
2235 	 * We do not care about races with the cpu hotplug because cpu down
2236 	 * as well as workers from this path always operate on the local
2237 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2238 	 */
2239 	curcpu = get_cpu();
2240 	for_each_online_cpu(cpu) {
2241 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2242 		struct mem_cgroup *memcg;
2243 		bool flush = false;
2244 
2245 		rcu_read_lock();
2246 		memcg = stock->cached;
2247 		if (memcg && stock->nr_pages &&
2248 		    mem_cgroup_is_descendant(memcg, root_memcg))
2249 			flush = true;
2250 		else if (obj_stock_flush_required(stock, root_memcg))
2251 			flush = true;
2252 		rcu_read_unlock();
2253 
2254 		if (flush &&
2255 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2256 			if (cpu == curcpu)
2257 				drain_local_stock(&stock->work);
2258 			else
2259 				schedule_work_on(cpu, &stock->work);
2260 		}
2261 	}
2262 	put_cpu();
2263 	mutex_unlock(&percpu_charge_mutex);
2264 }
2265 
2266 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2267 {
2268 	struct memcg_stock_pcp *stock;
2269 
2270 	stock = &per_cpu(memcg_stock, cpu);
2271 	drain_stock(stock);
2272 
2273 	return 0;
2274 }
2275 
2276 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2277 				  unsigned int nr_pages,
2278 				  gfp_t gfp_mask)
2279 {
2280 	unsigned long nr_reclaimed = 0;
2281 
2282 	do {
2283 		unsigned long pflags;
2284 
2285 		if (page_counter_read(&memcg->memory) <=
2286 		    READ_ONCE(memcg->memory.high))
2287 			continue;
2288 
2289 		memcg_memory_event(memcg, MEMCG_HIGH);
2290 
2291 		psi_memstall_enter(&pflags);
2292 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2293 							     gfp_mask, true);
2294 		psi_memstall_leave(&pflags);
2295 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2296 		 !mem_cgroup_is_root(memcg));
2297 
2298 	return nr_reclaimed;
2299 }
2300 
2301 static void high_work_func(struct work_struct *work)
2302 {
2303 	struct mem_cgroup *memcg;
2304 
2305 	memcg = container_of(work, struct mem_cgroup, high_work);
2306 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2307 }
2308 
2309 /*
2310  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2311  * enough to still cause a significant slowdown in most cases, while still
2312  * allowing diagnostics and tracing to proceed without becoming stuck.
2313  */
2314 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2315 
2316 /*
2317  * When calculating the delay, we use these either side of the exponentiation to
2318  * maintain precision and scale to a reasonable number of jiffies (see the table
2319  * below.
2320  *
2321  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2322  *   overage ratio to a delay.
2323  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2324  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2325  *   to produce a reasonable delay curve.
2326  *
2327  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2328  * reasonable delay curve compared to precision-adjusted overage, not
2329  * penalising heavily at first, but still making sure that growth beyond the
2330  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2331  * example, with a high of 100 megabytes:
2332  *
2333  *  +-------+------------------------+
2334  *  | usage | time to allocate in ms |
2335  *  +-------+------------------------+
2336  *  | 100M  |                      0 |
2337  *  | 101M  |                      6 |
2338  *  | 102M  |                     25 |
2339  *  | 103M  |                     57 |
2340  *  | 104M  |                    102 |
2341  *  | 105M  |                    159 |
2342  *  | 106M  |                    230 |
2343  *  | 107M  |                    313 |
2344  *  | 108M  |                    409 |
2345  *  | 109M  |                    518 |
2346  *  | 110M  |                    639 |
2347  *  | 111M  |                    774 |
2348  *  | 112M  |                    921 |
2349  *  | 113M  |                   1081 |
2350  *  | 114M  |                   1254 |
2351  *  | 115M  |                   1439 |
2352  *  | 116M  |                   1638 |
2353  *  | 117M  |                   1849 |
2354  *  | 118M  |                   2000 |
2355  *  | 119M  |                   2000 |
2356  *  | 120M  |                   2000 |
2357  *  +-------+------------------------+
2358  */
2359  #define MEMCG_DELAY_PRECISION_SHIFT 20
2360  #define MEMCG_DELAY_SCALING_SHIFT 14
2361 
2362 static u64 calculate_overage(unsigned long usage, unsigned long high)
2363 {
2364 	u64 overage;
2365 
2366 	if (usage <= high)
2367 		return 0;
2368 
2369 	/*
2370 	 * Prevent division by 0 in overage calculation by acting as if
2371 	 * it was a threshold of 1 page
2372 	 */
2373 	high = max(high, 1UL);
2374 
2375 	overage = usage - high;
2376 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2377 	return div64_u64(overage, high);
2378 }
2379 
2380 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2381 {
2382 	u64 overage, max_overage = 0;
2383 
2384 	do {
2385 		overage = calculate_overage(page_counter_read(&memcg->memory),
2386 					    READ_ONCE(memcg->memory.high));
2387 		max_overage = max(overage, max_overage);
2388 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2389 		 !mem_cgroup_is_root(memcg));
2390 
2391 	return max_overage;
2392 }
2393 
2394 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2395 {
2396 	u64 overage, max_overage = 0;
2397 
2398 	do {
2399 		overage = calculate_overage(page_counter_read(&memcg->swap),
2400 					    READ_ONCE(memcg->swap.high));
2401 		if (overage)
2402 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2403 		max_overage = max(overage, max_overage);
2404 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2405 		 !mem_cgroup_is_root(memcg));
2406 
2407 	return max_overage;
2408 }
2409 
2410 /*
2411  * Get the number of jiffies that we should penalise a mischievous cgroup which
2412  * is exceeding its memory.high by checking both it and its ancestors.
2413  */
2414 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2415 					  unsigned int nr_pages,
2416 					  u64 max_overage)
2417 {
2418 	unsigned long penalty_jiffies;
2419 
2420 	if (!max_overage)
2421 		return 0;
2422 
2423 	/*
2424 	 * We use overage compared to memory.high to calculate the number of
2425 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2426 	 * fairly lenient on small overages, and increasingly harsh when the
2427 	 * memcg in question makes it clear that it has no intention of stopping
2428 	 * its crazy behaviour, so we exponentially increase the delay based on
2429 	 * overage amount.
2430 	 */
2431 	penalty_jiffies = max_overage * max_overage * HZ;
2432 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2433 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2434 
2435 	/*
2436 	 * Factor in the task's own contribution to the overage, such that four
2437 	 * N-sized allocations are throttled approximately the same as one
2438 	 * 4N-sized allocation.
2439 	 *
2440 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2441 	 * larger the current charge patch is than that.
2442 	 */
2443 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2444 }
2445 
2446 /*
2447  * Scheduled by try_charge() to be executed from the userland return path
2448  * and reclaims memory over the high limit.
2449  */
2450 void mem_cgroup_handle_over_high(void)
2451 {
2452 	unsigned long penalty_jiffies;
2453 	unsigned long pflags;
2454 	unsigned long nr_reclaimed;
2455 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2456 	int nr_retries = MAX_RECLAIM_RETRIES;
2457 	struct mem_cgroup *memcg;
2458 	bool in_retry = false;
2459 
2460 	if (likely(!nr_pages))
2461 		return;
2462 
2463 	memcg = get_mem_cgroup_from_mm(current->mm);
2464 	current->memcg_nr_pages_over_high = 0;
2465 
2466 retry_reclaim:
2467 	/*
2468 	 * The allocating task should reclaim at least the batch size, but for
2469 	 * subsequent retries we only want to do what's necessary to prevent oom
2470 	 * or breaching resource isolation.
2471 	 *
2472 	 * This is distinct from memory.max or page allocator behaviour because
2473 	 * memory.high is currently batched, whereas memory.max and the page
2474 	 * allocator run every time an allocation is made.
2475 	 */
2476 	nr_reclaimed = reclaim_high(memcg,
2477 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2478 				    GFP_KERNEL);
2479 
2480 	/*
2481 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2482 	 * allocators proactively to slow down excessive growth.
2483 	 */
2484 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2485 					       mem_find_max_overage(memcg));
2486 
2487 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2488 						swap_find_max_overage(memcg));
2489 
2490 	/*
2491 	 * Clamp the max delay per usermode return so as to still keep the
2492 	 * application moving forwards and also permit diagnostics, albeit
2493 	 * extremely slowly.
2494 	 */
2495 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2496 
2497 	/*
2498 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2499 	 * that it's not even worth doing, in an attempt to be nice to those who
2500 	 * go only a small amount over their memory.high value and maybe haven't
2501 	 * been aggressively reclaimed enough yet.
2502 	 */
2503 	if (penalty_jiffies <= HZ / 100)
2504 		goto out;
2505 
2506 	/*
2507 	 * If reclaim is making forward progress but we're still over
2508 	 * memory.high, we want to encourage that rather than doing allocator
2509 	 * throttling.
2510 	 */
2511 	if (nr_reclaimed || nr_retries--) {
2512 		in_retry = true;
2513 		goto retry_reclaim;
2514 	}
2515 
2516 	/*
2517 	 * If we exit early, we're guaranteed to die (since
2518 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2519 	 * need to account for any ill-begotten jiffies to pay them off later.
2520 	 */
2521 	psi_memstall_enter(&pflags);
2522 	schedule_timeout_killable(penalty_jiffies);
2523 	psi_memstall_leave(&pflags);
2524 
2525 out:
2526 	css_put(&memcg->css);
2527 }
2528 
2529 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2530 			unsigned int nr_pages)
2531 {
2532 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2533 	int nr_retries = MAX_RECLAIM_RETRIES;
2534 	struct mem_cgroup *mem_over_limit;
2535 	struct page_counter *counter;
2536 	enum oom_status oom_status;
2537 	unsigned long nr_reclaimed;
2538 	bool may_swap = true;
2539 	bool drained = false;
2540 	unsigned long pflags;
2541 
2542 retry:
2543 	if (consume_stock(memcg, nr_pages))
2544 		return 0;
2545 
2546 	if (!do_memsw_account() ||
2547 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2548 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2549 			goto done_restock;
2550 		if (do_memsw_account())
2551 			page_counter_uncharge(&memcg->memsw, batch);
2552 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2553 	} else {
2554 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2555 		may_swap = false;
2556 	}
2557 
2558 	if (batch > nr_pages) {
2559 		batch = nr_pages;
2560 		goto retry;
2561 	}
2562 
2563 	/*
2564 	 * Memcg doesn't have a dedicated reserve for atomic
2565 	 * allocations. But like the global atomic pool, we need to
2566 	 * put the burden of reclaim on regular allocation requests
2567 	 * and let these go through as privileged allocations.
2568 	 */
2569 	if (gfp_mask & __GFP_ATOMIC)
2570 		goto force;
2571 
2572 	/*
2573 	 * Unlike in global OOM situations, memcg is not in a physical
2574 	 * memory shortage.  Allow dying and OOM-killed tasks to
2575 	 * bypass the last charges so that they can exit quickly and
2576 	 * free their memory.
2577 	 */
2578 	if (unlikely(should_force_charge()))
2579 		goto force;
2580 
2581 	/*
2582 	 * Prevent unbounded recursion when reclaim operations need to
2583 	 * allocate memory. This might exceed the limits temporarily,
2584 	 * but we prefer facilitating memory reclaim and getting back
2585 	 * under the limit over triggering OOM kills in these cases.
2586 	 */
2587 	if (unlikely(current->flags & PF_MEMALLOC))
2588 		goto force;
2589 
2590 	if (unlikely(task_in_memcg_oom(current)))
2591 		goto nomem;
2592 
2593 	if (!gfpflags_allow_blocking(gfp_mask))
2594 		goto nomem;
2595 
2596 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2597 
2598 	psi_memstall_enter(&pflags);
2599 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2600 						    gfp_mask, may_swap);
2601 	psi_memstall_leave(&pflags);
2602 
2603 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2604 		goto retry;
2605 
2606 	if (!drained) {
2607 		drain_all_stock(mem_over_limit);
2608 		drained = true;
2609 		goto retry;
2610 	}
2611 
2612 	if (gfp_mask & __GFP_NORETRY)
2613 		goto nomem;
2614 	/*
2615 	 * Even though the limit is exceeded at this point, reclaim
2616 	 * may have been able to free some pages.  Retry the charge
2617 	 * before killing the task.
2618 	 *
2619 	 * Only for regular pages, though: huge pages are rather
2620 	 * unlikely to succeed so close to the limit, and we fall back
2621 	 * to regular pages anyway in case of failure.
2622 	 */
2623 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2624 		goto retry;
2625 	/*
2626 	 * At task move, charge accounts can be doubly counted. So, it's
2627 	 * better to wait until the end of task_move if something is going on.
2628 	 */
2629 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2630 		goto retry;
2631 
2632 	if (nr_retries--)
2633 		goto retry;
2634 
2635 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2636 		goto nomem;
2637 
2638 	if (fatal_signal_pending(current))
2639 		goto force;
2640 
2641 	/*
2642 	 * keep retrying as long as the memcg oom killer is able to make
2643 	 * a forward progress or bypass the charge if the oom killer
2644 	 * couldn't make any progress.
2645 	 */
2646 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2647 		       get_order(nr_pages * PAGE_SIZE));
2648 	switch (oom_status) {
2649 	case OOM_SUCCESS:
2650 		nr_retries = MAX_RECLAIM_RETRIES;
2651 		goto retry;
2652 	case OOM_FAILED:
2653 		goto force;
2654 	default:
2655 		goto nomem;
2656 	}
2657 nomem:
2658 	if (!(gfp_mask & __GFP_NOFAIL))
2659 		return -ENOMEM;
2660 force:
2661 	/*
2662 	 * The allocation either can't fail or will lead to more memory
2663 	 * being freed very soon.  Allow memory usage go over the limit
2664 	 * temporarily by force charging it.
2665 	 */
2666 	page_counter_charge(&memcg->memory, nr_pages);
2667 	if (do_memsw_account())
2668 		page_counter_charge(&memcg->memsw, nr_pages);
2669 
2670 	return 0;
2671 
2672 done_restock:
2673 	if (batch > nr_pages)
2674 		refill_stock(memcg, batch - nr_pages);
2675 
2676 	/*
2677 	 * If the hierarchy is above the normal consumption range, schedule
2678 	 * reclaim on returning to userland.  We can perform reclaim here
2679 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2680 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2681 	 * not recorded as it most likely matches current's and won't
2682 	 * change in the meantime.  As high limit is checked again before
2683 	 * reclaim, the cost of mismatch is negligible.
2684 	 */
2685 	do {
2686 		bool mem_high, swap_high;
2687 
2688 		mem_high = page_counter_read(&memcg->memory) >
2689 			READ_ONCE(memcg->memory.high);
2690 		swap_high = page_counter_read(&memcg->swap) >
2691 			READ_ONCE(memcg->swap.high);
2692 
2693 		/* Don't bother a random interrupted task */
2694 		if (in_interrupt()) {
2695 			if (mem_high) {
2696 				schedule_work(&memcg->high_work);
2697 				break;
2698 			}
2699 			continue;
2700 		}
2701 
2702 		if (mem_high || swap_high) {
2703 			/*
2704 			 * The allocating tasks in this cgroup will need to do
2705 			 * reclaim or be throttled to prevent further growth
2706 			 * of the memory or swap footprints.
2707 			 *
2708 			 * Target some best-effort fairness between the tasks,
2709 			 * and distribute reclaim work and delay penalties
2710 			 * based on how much each task is actually allocating.
2711 			 */
2712 			current->memcg_nr_pages_over_high += batch;
2713 			set_notify_resume(current);
2714 			break;
2715 		}
2716 	} while ((memcg = parent_mem_cgroup(memcg)));
2717 
2718 	return 0;
2719 }
2720 
2721 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2722 			     unsigned int nr_pages)
2723 {
2724 	if (mem_cgroup_is_root(memcg))
2725 		return 0;
2726 
2727 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2728 }
2729 
2730 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2731 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2732 {
2733 	if (mem_cgroup_is_root(memcg))
2734 		return;
2735 
2736 	page_counter_uncharge(&memcg->memory, nr_pages);
2737 	if (do_memsw_account())
2738 		page_counter_uncharge(&memcg->memsw, nr_pages);
2739 }
2740 #endif
2741 
2742 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2743 {
2744 	VM_BUG_ON_PAGE(page_memcg(page), page);
2745 	/*
2746 	 * Any of the following ensures page's memcg stability:
2747 	 *
2748 	 * - the page lock
2749 	 * - LRU isolation
2750 	 * - lock_page_memcg()
2751 	 * - exclusive reference
2752 	 */
2753 	page->memcg_data = (unsigned long)memcg;
2754 }
2755 
2756 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
2757 {
2758 	struct mem_cgroup *memcg;
2759 
2760 	rcu_read_lock();
2761 retry:
2762 	memcg = obj_cgroup_memcg(objcg);
2763 	if (unlikely(!css_tryget(&memcg->css)))
2764 		goto retry;
2765 	rcu_read_unlock();
2766 
2767 	return memcg;
2768 }
2769 
2770 #ifdef CONFIG_MEMCG_KMEM
2771 /*
2772  * The allocated objcg pointers array is not accounted directly.
2773  * Moreover, it should not come from DMA buffer and is not readily
2774  * reclaimable. So those GFP bits should be masked off.
2775  */
2776 #define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2777 
2778 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2779 				 gfp_t gfp, bool new_page)
2780 {
2781 	unsigned int objects = objs_per_slab_page(s, page);
2782 	unsigned long memcg_data;
2783 	void *vec;
2784 
2785 	gfp &= ~OBJCGS_CLEAR_MASK;
2786 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2787 			   page_to_nid(page));
2788 	if (!vec)
2789 		return -ENOMEM;
2790 
2791 	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2792 	if (new_page) {
2793 		/*
2794 		 * If the slab page is brand new and nobody can yet access
2795 		 * it's memcg_data, no synchronization is required and
2796 		 * memcg_data can be simply assigned.
2797 		 */
2798 		page->memcg_data = memcg_data;
2799 	} else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2800 		/*
2801 		 * If the slab page is already in use, somebody can allocate
2802 		 * and assign obj_cgroups in parallel. In this case the existing
2803 		 * objcg vector should be reused.
2804 		 */
2805 		kfree(vec);
2806 		return 0;
2807 	}
2808 
2809 	kmemleak_not_leak(vec);
2810 	return 0;
2811 }
2812 
2813 /*
2814  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2815  *
2816  * A passed kernel object can be a slab object or a generic kernel page, so
2817  * different mechanisms for getting the memory cgroup pointer should be used.
2818  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2819  * can not know for sure how the kernel object is implemented.
2820  * mem_cgroup_from_obj() can be safely used in such cases.
2821  *
2822  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2823  * cgroup_mutex, etc.
2824  */
2825 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2826 {
2827 	struct page *page;
2828 
2829 	if (mem_cgroup_disabled())
2830 		return NULL;
2831 
2832 	page = virt_to_head_page(p);
2833 
2834 	/*
2835 	 * Slab objects are accounted individually, not per-page.
2836 	 * Memcg membership data for each individual object is saved in
2837 	 * the page->obj_cgroups.
2838 	 */
2839 	if (page_objcgs_check(page)) {
2840 		struct obj_cgroup *objcg;
2841 		unsigned int off;
2842 
2843 		off = obj_to_index(page->slab_cache, page, p);
2844 		objcg = page_objcgs(page)[off];
2845 		if (objcg)
2846 			return obj_cgroup_memcg(objcg);
2847 
2848 		return NULL;
2849 	}
2850 
2851 	/*
2852 	 * page_memcg_check() is used here, because page_has_obj_cgroups()
2853 	 * check above could fail because the object cgroups vector wasn't set
2854 	 * at that moment, but it can be set concurrently.
2855 	 * page_memcg_check(page) will guarantee that a proper memory
2856 	 * cgroup pointer or NULL will be returned.
2857 	 */
2858 	return page_memcg_check(page);
2859 }
2860 
2861 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2862 {
2863 	struct obj_cgroup *objcg = NULL;
2864 	struct mem_cgroup *memcg;
2865 
2866 	if (memcg_kmem_bypass())
2867 		return NULL;
2868 
2869 	rcu_read_lock();
2870 	if (unlikely(active_memcg()))
2871 		memcg = active_memcg();
2872 	else
2873 		memcg = mem_cgroup_from_task(current);
2874 
2875 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2876 		objcg = rcu_dereference(memcg->objcg);
2877 		if (objcg && obj_cgroup_tryget(objcg))
2878 			break;
2879 		objcg = NULL;
2880 	}
2881 	rcu_read_unlock();
2882 
2883 	return objcg;
2884 }
2885 
2886 static int memcg_alloc_cache_id(void)
2887 {
2888 	int id, size;
2889 	int err;
2890 
2891 	id = ida_simple_get(&memcg_cache_ida,
2892 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2893 	if (id < 0)
2894 		return id;
2895 
2896 	if (id < memcg_nr_cache_ids)
2897 		return id;
2898 
2899 	/*
2900 	 * There's no space for the new id in memcg_caches arrays,
2901 	 * so we have to grow them.
2902 	 */
2903 	down_write(&memcg_cache_ids_sem);
2904 
2905 	size = 2 * (id + 1);
2906 	if (size < MEMCG_CACHES_MIN_SIZE)
2907 		size = MEMCG_CACHES_MIN_SIZE;
2908 	else if (size > MEMCG_CACHES_MAX_SIZE)
2909 		size = MEMCG_CACHES_MAX_SIZE;
2910 
2911 	err = memcg_update_all_list_lrus(size);
2912 	if (!err)
2913 		memcg_nr_cache_ids = size;
2914 
2915 	up_write(&memcg_cache_ids_sem);
2916 
2917 	if (err) {
2918 		ida_simple_remove(&memcg_cache_ida, id);
2919 		return err;
2920 	}
2921 	return id;
2922 }
2923 
2924 static void memcg_free_cache_id(int id)
2925 {
2926 	ida_simple_remove(&memcg_cache_ida, id);
2927 }
2928 
2929 /*
2930  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2931  * @objcg: object cgroup to uncharge
2932  * @nr_pages: number of pages to uncharge
2933  */
2934 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2935 				      unsigned int nr_pages)
2936 {
2937 	struct mem_cgroup *memcg;
2938 
2939 	memcg = get_mem_cgroup_from_objcg(objcg);
2940 
2941 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2942 		page_counter_uncharge(&memcg->kmem, nr_pages);
2943 	refill_stock(memcg, nr_pages);
2944 
2945 	css_put(&memcg->css);
2946 }
2947 
2948 /*
2949  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2950  * @objcg: object cgroup to charge
2951  * @gfp: reclaim mode
2952  * @nr_pages: number of pages to charge
2953  *
2954  * Returns 0 on success, an error code on failure.
2955  */
2956 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2957 				   unsigned int nr_pages)
2958 {
2959 	struct page_counter *counter;
2960 	struct mem_cgroup *memcg;
2961 	int ret;
2962 
2963 	memcg = get_mem_cgroup_from_objcg(objcg);
2964 
2965 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2966 	if (ret)
2967 		goto out;
2968 
2969 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2970 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2971 
2972 		/*
2973 		 * Enforce __GFP_NOFAIL allocation because callers are not
2974 		 * prepared to see failures and likely do not have any failure
2975 		 * handling code.
2976 		 */
2977 		if (gfp & __GFP_NOFAIL) {
2978 			page_counter_charge(&memcg->kmem, nr_pages);
2979 			goto out;
2980 		}
2981 		cancel_charge(memcg, nr_pages);
2982 		ret = -ENOMEM;
2983 	}
2984 out:
2985 	css_put(&memcg->css);
2986 
2987 	return ret;
2988 }
2989 
2990 /**
2991  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2992  * @page: page to charge
2993  * @gfp: reclaim mode
2994  * @order: allocation order
2995  *
2996  * Returns 0 on success, an error code on failure.
2997  */
2998 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2999 {
3000 	struct obj_cgroup *objcg;
3001 	int ret = 0;
3002 
3003 	objcg = get_obj_cgroup_from_current();
3004 	if (objcg) {
3005 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3006 		if (!ret) {
3007 			page->memcg_data = (unsigned long)objcg |
3008 				MEMCG_DATA_KMEM;
3009 			return 0;
3010 		}
3011 		obj_cgroup_put(objcg);
3012 	}
3013 	return ret;
3014 }
3015 
3016 /**
3017  * __memcg_kmem_uncharge_page: uncharge a kmem page
3018  * @page: page to uncharge
3019  * @order: allocation order
3020  */
3021 void __memcg_kmem_uncharge_page(struct page *page, int order)
3022 {
3023 	struct obj_cgroup *objcg;
3024 	unsigned int nr_pages = 1 << order;
3025 
3026 	if (!PageMemcgKmem(page))
3027 		return;
3028 
3029 	objcg = __page_objcg(page);
3030 	obj_cgroup_uncharge_pages(objcg, nr_pages);
3031 	page->memcg_data = 0;
3032 	obj_cgroup_put(objcg);
3033 }
3034 
3035 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3036 		     enum node_stat_item idx, int nr)
3037 {
3038 	unsigned long flags;
3039 	struct obj_stock *stock = get_obj_stock(&flags);
3040 	int *bytes;
3041 
3042 	/*
3043 	 * Save vmstat data in stock and skip vmstat array update unless
3044 	 * accumulating over a page of vmstat data or when pgdat or idx
3045 	 * changes.
3046 	 */
3047 	if (stock->cached_objcg != objcg) {
3048 		drain_obj_stock(stock);
3049 		obj_cgroup_get(objcg);
3050 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3051 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3052 		stock->cached_objcg = objcg;
3053 		stock->cached_pgdat = pgdat;
3054 	} else if (stock->cached_pgdat != pgdat) {
3055 		/* Flush the existing cached vmstat data */
3056 		struct pglist_data *oldpg = stock->cached_pgdat;
3057 
3058 		if (stock->nr_slab_reclaimable_b) {
3059 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3060 					  stock->nr_slab_reclaimable_b);
3061 			stock->nr_slab_reclaimable_b = 0;
3062 		}
3063 		if (stock->nr_slab_unreclaimable_b) {
3064 			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3065 					  stock->nr_slab_unreclaimable_b);
3066 			stock->nr_slab_unreclaimable_b = 0;
3067 		}
3068 		stock->cached_pgdat = pgdat;
3069 	}
3070 
3071 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3072 					       : &stock->nr_slab_unreclaimable_b;
3073 	/*
3074 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3075 	 * cached locally at least once before pushing it out.
3076 	 */
3077 	if (!*bytes) {
3078 		*bytes = nr;
3079 		nr = 0;
3080 	} else {
3081 		*bytes += nr;
3082 		if (abs(*bytes) > PAGE_SIZE) {
3083 			nr = *bytes;
3084 			*bytes = 0;
3085 		} else {
3086 			nr = 0;
3087 		}
3088 	}
3089 	if (nr)
3090 		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3091 
3092 	put_obj_stock(flags);
3093 }
3094 
3095 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3096 {
3097 	unsigned long flags;
3098 	struct obj_stock *stock = get_obj_stock(&flags);
3099 	bool ret = false;
3100 
3101 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3102 		stock->nr_bytes -= nr_bytes;
3103 		ret = true;
3104 	}
3105 
3106 	put_obj_stock(flags);
3107 
3108 	return ret;
3109 }
3110 
3111 static void drain_obj_stock(struct obj_stock *stock)
3112 {
3113 	struct obj_cgroup *old = stock->cached_objcg;
3114 
3115 	if (!old)
3116 		return;
3117 
3118 	if (stock->nr_bytes) {
3119 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3120 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3121 
3122 		if (nr_pages)
3123 			obj_cgroup_uncharge_pages(old, nr_pages);
3124 
3125 		/*
3126 		 * The leftover is flushed to the centralized per-memcg value.
3127 		 * On the next attempt to refill obj stock it will be moved
3128 		 * to a per-cpu stock (probably, on an other CPU), see
3129 		 * refill_obj_stock().
3130 		 *
3131 		 * How often it's flushed is a trade-off between the memory
3132 		 * limit enforcement accuracy and potential CPU contention,
3133 		 * so it might be changed in the future.
3134 		 */
3135 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3136 		stock->nr_bytes = 0;
3137 	}
3138 
3139 	/*
3140 	 * Flush the vmstat data in current stock
3141 	 */
3142 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3143 		if (stock->nr_slab_reclaimable_b) {
3144 			mod_objcg_mlstate(old, stock->cached_pgdat,
3145 					  NR_SLAB_RECLAIMABLE_B,
3146 					  stock->nr_slab_reclaimable_b);
3147 			stock->nr_slab_reclaimable_b = 0;
3148 		}
3149 		if (stock->nr_slab_unreclaimable_b) {
3150 			mod_objcg_mlstate(old, stock->cached_pgdat,
3151 					  NR_SLAB_UNRECLAIMABLE_B,
3152 					  stock->nr_slab_unreclaimable_b);
3153 			stock->nr_slab_unreclaimable_b = 0;
3154 		}
3155 		stock->cached_pgdat = NULL;
3156 	}
3157 
3158 	obj_cgroup_put(old);
3159 	stock->cached_objcg = NULL;
3160 }
3161 
3162 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3163 				     struct mem_cgroup *root_memcg)
3164 {
3165 	struct mem_cgroup *memcg;
3166 
3167 	if (in_task() && stock->task_obj.cached_objcg) {
3168 		memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
3169 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3170 			return true;
3171 	}
3172 	if (stock->irq_obj.cached_objcg) {
3173 		memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
3174 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3175 			return true;
3176 	}
3177 
3178 	return false;
3179 }
3180 
3181 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3182 			     bool allow_uncharge)
3183 {
3184 	unsigned long flags;
3185 	struct obj_stock *stock = get_obj_stock(&flags);
3186 	unsigned int nr_pages = 0;
3187 
3188 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3189 		drain_obj_stock(stock);
3190 		obj_cgroup_get(objcg);
3191 		stock->cached_objcg = objcg;
3192 		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3193 				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3194 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3195 	}
3196 	stock->nr_bytes += nr_bytes;
3197 
3198 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3199 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3200 		stock->nr_bytes &= (PAGE_SIZE - 1);
3201 	}
3202 
3203 	put_obj_stock(flags);
3204 
3205 	if (nr_pages)
3206 		obj_cgroup_uncharge_pages(objcg, nr_pages);
3207 }
3208 
3209 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3210 {
3211 	unsigned int nr_pages, nr_bytes;
3212 	int ret;
3213 
3214 	if (consume_obj_stock(objcg, size))
3215 		return 0;
3216 
3217 	/*
3218 	 * In theory, objcg->nr_charged_bytes can have enough
3219 	 * pre-charged bytes to satisfy the allocation. However,
3220 	 * flushing objcg->nr_charged_bytes requires two atomic
3221 	 * operations, and objcg->nr_charged_bytes can't be big.
3222 	 * The shared objcg->nr_charged_bytes can also become a
3223 	 * performance bottleneck if all tasks of the same memcg are
3224 	 * trying to update it. So it's better to ignore it and try
3225 	 * grab some new pages. The stock's nr_bytes will be flushed to
3226 	 * objcg->nr_charged_bytes later on when objcg changes.
3227 	 *
3228 	 * The stock's nr_bytes may contain enough pre-charged bytes
3229 	 * to allow one less page from being charged, but we can't rely
3230 	 * on the pre-charged bytes not being changed outside of
3231 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3232 	 * pre-charged bytes as well when charging pages. To avoid a
3233 	 * page uncharge right after a page charge, we set the
3234 	 * allow_uncharge flag to false when calling refill_obj_stock()
3235 	 * to temporarily allow the pre-charged bytes to exceed the page
3236 	 * size limit. The maximum reachable value of the pre-charged
3237 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3238 	 * race.
3239 	 */
3240 	nr_pages = size >> PAGE_SHIFT;
3241 	nr_bytes = size & (PAGE_SIZE - 1);
3242 
3243 	if (nr_bytes)
3244 		nr_pages += 1;
3245 
3246 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3247 	if (!ret && nr_bytes)
3248 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3249 
3250 	return ret;
3251 }
3252 
3253 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3254 {
3255 	refill_obj_stock(objcg, size, true);
3256 }
3257 
3258 #endif /* CONFIG_MEMCG_KMEM */
3259 
3260 /*
3261  * Because page_memcg(head) is not set on tails, set it now.
3262  */
3263 void split_page_memcg(struct page *head, unsigned int nr)
3264 {
3265 	struct mem_cgroup *memcg = page_memcg(head);
3266 	int i;
3267 
3268 	if (mem_cgroup_disabled() || !memcg)
3269 		return;
3270 
3271 	for (i = 1; i < nr; i++)
3272 		head[i].memcg_data = head->memcg_data;
3273 
3274 	if (PageMemcgKmem(head))
3275 		obj_cgroup_get_many(__page_objcg(head), nr - 1);
3276 	else
3277 		css_get_many(&memcg->css, nr - 1);
3278 }
3279 
3280 #ifdef CONFIG_MEMCG_SWAP
3281 /**
3282  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3283  * @entry: swap entry to be moved
3284  * @from:  mem_cgroup which the entry is moved from
3285  * @to:  mem_cgroup which the entry is moved to
3286  *
3287  * It succeeds only when the swap_cgroup's record for this entry is the same
3288  * as the mem_cgroup's id of @from.
3289  *
3290  * Returns 0 on success, -EINVAL on failure.
3291  *
3292  * The caller must have charged to @to, IOW, called page_counter_charge() about
3293  * both res and memsw, and called css_get().
3294  */
3295 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3296 				struct mem_cgroup *from, struct mem_cgroup *to)
3297 {
3298 	unsigned short old_id, new_id;
3299 
3300 	old_id = mem_cgroup_id(from);
3301 	new_id = mem_cgroup_id(to);
3302 
3303 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3304 		mod_memcg_state(from, MEMCG_SWAP, -1);
3305 		mod_memcg_state(to, MEMCG_SWAP, 1);
3306 		return 0;
3307 	}
3308 	return -EINVAL;
3309 }
3310 #else
3311 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3312 				struct mem_cgroup *from, struct mem_cgroup *to)
3313 {
3314 	return -EINVAL;
3315 }
3316 #endif
3317 
3318 static DEFINE_MUTEX(memcg_max_mutex);
3319 
3320 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3321 				 unsigned long max, bool memsw)
3322 {
3323 	bool enlarge = false;
3324 	bool drained = false;
3325 	int ret;
3326 	bool limits_invariant;
3327 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3328 
3329 	do {
3330 		if (signal_pending(current)) {
3331 			ret = -EINTR;
3332 			break;
3333 		}
3334 
3335 		mutex_lock(&memcg_max_mutex);
3336 		/*
3337 		 * Make sure that the new limit (memsw or memory limit) doesn't
3338 		 * break our basic invariant rule memory.max <= memsw.max.
3339 		 */
3340 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3341 					   max <= memcg->memsw.max;
3342 		if (!limits_invariant) {
3343 			mutex_unlock(&memcg_max_mutex);
3344 			ret = -EINVAL;
3345 			break;
3346 		}
3347 		if (max > counter->max)
3348 			enlarge = true;
3349 		ret = page_counter_set_max(counter, max);
3350 		mutex_unlock(&memcg_max_mutex);
3351 
3352 		if (!ret)
3353 			break;
3354 
3355 		if (!drained) {
3356 			drain_all_stock(memcg);
3357 			drained = true;
3358 			continue;
3359 		}
3360 
3361 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3362 					GFP_KERNEL, !memsw)) {
3363 			ret = -EBUSY;
3364 			break;
3365 		}
3366 	} while (true);
3367 
3368 	if (!ret && enlarge)
3369 		memcg_oom_recover(memcg);
3370 
3371 	return ret;
3372 }
3373 
3374 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3375 					    gfp_t gfp_mask,
3376 					    unsigned long *total_scanned)
3377 {
3378 	unsigned long nr_reclaimed = 0;
3379 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3380 	unsigned long reclaimed;
3381 	int loop = 0;
3382 	struct mem_cgroup_tree_per_node *mctz;
3383 	unsigned long excess;
3384 	unsigned long nr_scanned;
3385 
3386 	if (order > 0)
3387 		return 0;
3388 
3389 	mctz = soft_limit_tree_node(pgdat->node_id);
3390 
3391 	/*
3392 	 * Do not even bother to check the largest node if the root
3393 	 * is empty. Do it lockless to prevent lock bouncing. Races
3394 	 * are acceptable as soft limit is best effort anyway.
3395 	 */
3396 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3397 		return 0;
3398 
3399 	/*
3400 	 * This loop can run a while, specially if mem_cgroup's continuously
3401 	 * keep exceeding their soft limit and putting the system under
3402 	 * pressure
3403 	 */
3404 	do {
3405 		if (next_mz)
3406 			mz = next_mz;
3407 		else
3408 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3409 		if (!mz)
3410 			break;
3411 
3412 		nr_scanned = 0;
3413 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3414 						    gfp_mask, &nr_scanned);
3415 		nr_reclaimed += reclaimed;
3416 		*total_scanned += nr_scanned;
3417 		spin_lock_irq(&mctz->lock);
3418 		__mem_cgroup_remove_exceeded(mz, mctz);
3419 
3420 		/*
3421 		 * If we failed to reclaim anything from this memory cgroup
3422 		 * it is time to move on to the next cgroup
3423 		 */
3424 		next_mz = NULL;
3425 		if (!reclaimed)
3426 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3427 
3428 		excess = soft_limit_excess(mz->memcg);
3429 		/*
3430 		 * One school of thought says that we should not add
3431 		 * back the node to the tree if reclaim returns 0.
3432 		 * But our reclaim could return 0, simply because due
3433 		 * to priority we are exposing a smaller subset of
3434 		 * memory to reclaim from. Consider this as a longer
3435 		 * term TODO.
3436 		 */
3437 		/* If excess == 0, no tree ops */
3438 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3439 		spin_unlock_irq(&mctz->lock);
3440 		css_put(&mz->memcg->css);
3441 		loop++;
3442 		/*
3443 		 * Could not reclaim anything and there are no more
3444 		 * mem cgroups to try or we seem to be looping without
3445 		 * reclaiming anything.
3446 		 */
3447 		if (!nr_reclaimed &&
3448 			(next_mz == NULL ||
3449 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3450 			break;
3451 	} while (!nr_reclaimed);
3452 	if (next_mz)
3453 		css_put(&next_mz->memcg->css);
3454 	return nr_reclaimed;
3455 }
3456 
3457 /*
3458  * Reclaims as many pages from the given memcg as possible.
3459  *
3460  * Caller is responsible for holding css reference for memcg.
3461  */
3462 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3463 {
3464 	int nr_retries = MAX_RECLAIM_RETRIES;
3465 
3466 	/* we call try-to-free pages for make this cgroup empty */
3467 	lru_add_drain_all();
3468 
3469 	drain_all_stock(memcg);
3470 
3471 	/* try to free all pages in this cgroup */
3472 	while (nr_retries && page_counter_read(&memcg->memory)) {
3473 		int progress;
3474 
3475 		if (signal_pending(current))
3476 			return -EINTR;
3477 
3478 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3479 							GFP_KERNEL, true);
3480 		if (!progress) {
3481 			nr_retries--;
3482 			/* maybe some writeback is necessary */
3483 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3484 		}
3485 
3486 	}
3487 
3488 	return 0;
3489 }
3490 
3491 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3492 					    char *buf, size_t nbytes,
3493 					    loff_t off)
3494 {
3495 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3496 
3497 	if (mem_cgroup_is_root(memcg))
3498 		return -EINVAL;
3499 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3500 }
3501 
3502 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3503 				     struct cftype *cft)
3504 {
3505 	return 1;
3506 }
3507 
3508 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3509 				      struct cftype *cft, u64 val)
3510 {
3511 	if (val == 1)
3512 		return 0;
3513 
3514 	pr_warn_once("Non-hierarchical mode is deprecated. "
3515 		     "Please report your usecase to linux-mm@kvack.org if you "
3516 		     "depend on this functionality.\n");
3517 
3518 	return -EINVAL;
3519 }
3520 
3521 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3522 {
3523 	unsigned long val;
3524 
3525 	if (mem_cgroup_is_root(memcg)) {
3526 		/* mem_cgroup_threshold() calls here from irqsafe context */
3527 		cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
3528 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3529 			memcg_page_state(memcg, NR_ANON_MAPPED);
3530 		if (swap)
3531 			val += memcg_page_state(memcg, MEMCG_SWAP);
3532 	} else {
3533 		if (!swap)
3534 			val = page_counter_read(&memcg->memory);
3535 		else
3536 			val = page_counter_read(&memcg->memsw);
3537 	}
3538 	return val;
3539 }
3540 
3541 enum {
3542 	RES_USAGE,
3543 	RES_LIMIT,
3544 	RES_MAX_USAGE,
3545 	RES_FAILCNT,
3546 	RES_SOFT_LIMIT,
3547 };
3548 
3549 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3550 			       struct cftype *cft)
3551 {
3552 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3553 	struct page_counter *counter;
3554 
3555 	switch (MEMFILE_TYPE(cft->private)) {
3556 	case _MEM:
3557 		counter = &memcg->memory;
3558 		break;
3559 	case _MEMSWAP:
3560 		counter = &memcg->memsw;
3561 		break;
3562 	case _KMEM:
3563 		counter = &memcg->kmem;
3564 		break;
3565 	case _TCP:
3566 		counter = &memcg->tcpmem;
3567 		break;
3568 	default:
3569 		BUG();
3570 	}
3571 
3572 	switch (MEMFILE_ATTR(cft->private)) {
3573 	case RES_USAGE:
3574 		if (counter == &memcg->memory)
3575 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3576 		if (counter == &memcg->memsw)
3577 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3578 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3579 	case RES_LIMIT:
3580 		return (u64)counter->max * PAGE_SIZE;
3581 	case RES_MAX_USAGE:
3582 		return (u64)counter->watermark * PAGE_SIZE;
3583 	case RES_FAILCNT:
3584 		return counter->failcnt;
3585 	case RES_SOFT_LIMIT:
3586 		return (u64)memcg->soft_limit * PAGE_SIZE;
3587 	default:
3588 		BUG();
3589 	}
3590 }
3591 
3592 #ifdef CONFIG_MEMCG_KMEM
3593 static int memcg_online_kmem(struct mem_cgroup *memcg)
3594 {
3595 	struct obj_cgroup *objcg;
3596 	int memcg_id;
3597 
3598 	if (cgroup_memory_nokmem)
3599 		return 0;
3600 
3601 	BUG_ON(memcg->kmemcg_id >= 0);
3602 	BUG_ON(memcg->kmem_state);
3603 
3604 	memcg_id = memcg_alloc_cache_id();
3605 	if (memcg_id < 0)
3606 		return memcg_id;
3607 
3608 	objcg = obj_cgroup_alloc();
3609 	if (!objcg) {
3610 		memcg_free_cache_id(memcg_id);
3611 		return -ENOMEM;
3612 	}
3613 	objcg->memcg = memcg;
3614 	rcu_assign_pointer(memcg->objcg, objcg);
3615 
3616 	static_branch_enable(&memcg_kmem_enabled_key);
3617 
3618 	memcg->kmemcg_id = memcg_id;
3619 	memcg->kmem_state = KMEM_ONLINE;
3620 
3621 	return 0;
3622 }
3623 
3624 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3625 {
3626 	struct cgroup_subsys_state *css;
3627 	struct mem_cgroup *parent, *child;
3628 	int kmemcg_id;
3629 
3630 	if (memcg->kmem_state != KMEM_ONLINE)
3631 		return;
3632 
3633 	memcg->kmem_state = KMEM_ALLOCATED;
3634 
3635 	parent = parent_mem_cgroup(memcg);
3636 	if (!parent)
3637 		parent = root_mem_cgroup;
3638 
3639 	memcg_reparent_objcgs(memcg, parent);
3640 
3641 	kmemcg_id = memcg->kmemcg_id;
3642 	BUG_ON(kmemcg_id < 0);
3643 
3644 	/*
3645 	 * Change kmemcg_id of this cgroup and all its descendants to the
3646 	 * parent's id, and then move all entries from this cgroup's list_lrus
3647 	 * to ones of the parent. After we have finished, all list_lrus
3648 	 * corresponding to this cgroup are guaranteed to remain empty. The
3649 	 * ordering is imposed by list_lru_node->lock taken by
3650 	 * memcg_drain_all_list_lrus().
3651 	 */
3652 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3653 	css_for_each_descendant_pre(css, &memcg->css) {
3654 		child = mem_cgroup_from_css(css);
3655 		BUG_ON(child->kmemcg_id != kmemcg_id);
3656 		child->kmemcg_id = parent->kmemcg_id;
3657 	}
3658 	rcu_read_unlock();
3659 
3660 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3661 
3662 	memcg_free_cache_id(kmemcg_id);
3663 }
3664 
3665 static void memcg_free_kmem(struct mem_cgroup *memcg)
3666 {
3667 	/* css_alloc() failed, offlining didn't happen */
3668 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3669 		memcg_offline_kmem(memcg);
3670 }
3671 #else
3672 static int memcg_online_kmem(struct mem_cgroup *memcg)
3673 {
3674 	return 0;
3675 }
3676 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3677 {
3678 }
3679 static void memcg_free_kmem(struct mem_cgroup *memcg)
3680 {
3681 }
3682 #endif /* CONFIG_MEMCG_KMEM */
3683 
3684 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3685 				 unsigned long max)
3686 {
3687 	int ret;
3688 
3689 	mutex_lock(&memcg_max_mutex);
3690 	ret = page_counter_set_max(&memcg->kmem, max);
3691 	mutex_unlock(&memcg_max_mutex);
3692 	return ret;
3693 }
3694 
3695 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3696 {
3697 	int ret;
3698 
3699 	mutex_lock(&memcg_max_mutex);
3700 
3701 	ret = page_counter_set_max(&memcg->tcpmem, max);
3702 	if (ret)
3703 		goto out;
3704 
3705 	if (!memcg->tcpmem_active) {
3706 		/*
3707 		 * The active flag needs to be written after the static_key
3708 		 * update. This is what guarantees that the socket activation
3709 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3710 		 * for details, and note that we don't mark any socket as
3711 		 * belonging to this memcg until that flag is up.
3712 		 *
3713 		 * We need to do this, because static_keys will span multiple
3714 		 * sites, but we can't control their order. If we mark a socket
3715 		 * as accounted, but the accounting functions are not patched in
3716 		 * yet, we'll lose accounting.
3717 		 *
3718 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3719 		 * because when this value change, the code to process it is not
3720 		 * patched in yet.
3721 		 */
3722 		static_branch_inc(&memcg_sockets_enabled_key);
3723 		memcg->tcpmem_active = true;
3724 	}
3725 out:
3726 	mutex_unlock(&memcg_max_mutex);
3727 	return ret;
3728 }
3729 
3730 /*
3731  * The user of this function is...
3732  * RES_LIMIT.
3733  */
3734 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3735 				char *buf, size_t nbytes, loff_t off)
3736 {
3737 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3738 	unsigned long nr_pages;
3739 	int ret;
3740 
3741 	buf = strstrip(buf);
3742 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3743 	if (ret)
3744 		return ret;
3745 
3746 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3747 	case RES_LIMIT:
3748 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3749 			ret = -EINVAL;
3750 			break;
3751 		}
3752 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3753 		case _MEM:
3754 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3755 			break;
3756 		case _MEMSWAP:
3757 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3758 			break;
3759 		case _KMEM:
3760 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3761 				     "Please report your usecase to linux-mm@kvack.org if you "
3762 				     "depend on this functionality.\n");
3763 			ret = memcg_update_kmem_max(memcg, nr_pages);
3764 			break;
3765 		case _TCP:
3766 			ret = memcg_update_tcp_max(memcg, nr_pages);
3767 			break;
3768 		}
3769 		break;
3770 	case RES_SOFT_LIMIT:
3771 		memcg->soft_limit = nr_pages;
3772 		ret = 0;
3773 		break;
3774 	}
3775 	return ret ?: nbytes;
3776 }
3777 
3778 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3779 				size_t nbytes, loff_t off)
3780 {
3781 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3782 	struct page_counter *counter;
3783 
3784 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3785 	case _MEM:
3786 		counter = &memcg->memory;
3787 		break;
3788 	case _MEMSWAP:
3789 		counter = &memcg->memsw;
3790 		break;
3791 	case _KMEM:
3792 		counter = &memcg->kmem;
3793 		break;
3794 	case _TCP:
3795 		counter = &memcg->tcpmem;
3796 		break;
3797 	default:
3798 		BUG();
3799 	}
3800 
3801 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3802 	case RES_MAX_USAGE:
3803 		page_counter_reset_watermark(counter);
3804 		break;
3805 	case RES_FAILCNT:
3806 		counter->failcnt = 0;
3807 		break;
3808 	default:
3809 		BUG();
3810 	}
3811 
3812 	return nbytes;
3813 }
3814 
3815 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3816 					struct cftype *cft)
3817 {
3818 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3819 }
3820 
3821 #ifdef CONFIG_MMU
3822 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3823 					struct cftype *cft, u64 val)
3824 {
3825 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3826 
3827 	if (val & ~MOVE_MASK)
3828 		return -EINVAL;
3829 
3830 	/*
3831 	 * No kind of locking is needed in here, because ->can_attach() will
3832 	 * check this value once in the beginning of the process, and then carry
3833 	 * on with stale data. This means that changes to this value will only
3834 	 * affect task migrations starting after the change.
3835 	 */
3836 	memcg->move_charge_at_immigrate = val;
3837 	return 0;
3838 }
3839 #else
3840 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3841 					struct cftype *cft, u64 val)
3842 {
3843 	return -ENOSYS;
3844 }
3845 #endif
3846 
3847 #ifdef CONFIG_NUMA
3848 
3849 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3850 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3851 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3852 
3853 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3854 				int nid, unsigned int lru_mask, bool tree)
3855 {
3856 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3857 	unsigned long nr = 0;
3858 	enum lru_list lru;
3859 
3860 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3861 
3862 	for_each_lru(lru) {
3863 		if (!(BIT(lru) & lru_mask))
3864 			continue;
3865 		if (tree)
3866 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3867 		else
3868 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3869 	}
3870 	return nr;
3871 }
3872 
3873 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3874 					     unsigned int lru_mask,
3875 					     bool tree)
3876 {
3877 	unsigned long nr = 0;
3878 	enum lru_list lru;
3879 
3880 	for_each_lru(lru) {
3881 		if (!(BIT(lru) & lru_mask))
3882 			continue;
3883 		if (tree)
3884 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3885 		else
3886 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3887 	}
3888 	return nr;
3889 }
3890 
3891 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3892 {
3893 	struct numa_stat {
3894 		const char *name;
3895 		unsigned int lru_mask;
3896 	};
3897 
3898 	static const struct numa_stat stats[] = {
3899 		{ "total", LRU_ALL },
3900 		{ "file", LRU_ALL_FILE },
3901 		{ "anon", LRU_ALL_ANON },
3902 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3903 	};
3904 	const struct numa_stat *stat;
3905 	int nid;
3906 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3907 
3908 	cgroup_rstat_flush(memcg->css.cgroup);
3909 
3910 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3911 		seq_printf(m, "%s=%lu", stat->name,
3912 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3913 						   false));
3914 		for_each_node_state(nid, N_MEMORY)
3915 			seq_printf(m, " N%d=%lu", nid,
3916 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3917 							stat->lru_mask, false));
3918 		seq_putc(m, '\n');
3919 	}
3920 
3921 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3922 
3923 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
3924 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3925 						   true));
3926 		for_each_node_state(nid, N_MEMORY)
3927 			seq_printf(m, " N%d=%lu", nid,
3928 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3929 							stat->lru_mask, true));
3930 		seq_putc(m, '\n');
3931 	}
3932 
3933 	return 0;
3934 }
3935 #endif /* CONFIG_NUMA */
3936 
3937 static const unsigned int memcg1_stats[] = {
3938 	NR_FILE_PAGES,
3939 	NR_ANON_MAPPED,
3940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3941 	NR_ANON_THPS,
3942 #endif
3943 	NR_SHMEM,
3944 	NR_FILE_MAPPED,
3945 	NR_FILE_DIRTY,
3946 	NR_WRITEBACK,
3947 	MEMCG_SWAP,
3948 };
3949 
3950 static const char *const memcg1_stat_names[] = {
3951 	"cache",
3952 	"rss",
3953 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3954 	"rss_huge",
3955 #endif
3956 	"shmem",
3957 	"mapped_file",
3958 	"dirty",
3959 	"writeback",
3960 	"swap",
3961 };
3962 
3963 /* Universal VM events cgroup1 shows, original sort order */
3964 static const unsigned int memcg1_events[] = {
3965 	PGPGIN,
3966 	PGPGOUT,
3967 	PGFAULT,
3968 	PGMAJFAULT,
3969 };
3970 
3971 static int memcg_stat_show(struct seq_file *m, void *v)
3972 {
3973 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3974 	unsigned long memory, memsw;
3975 	struct mem_cgroup *mi;
3976 	unsigned int i;
3977 
3978 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3979 
3980 	cgroup_rstat_flush(memcg->css.cgroup);
3981 
3982 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3983 		unsigned long nr;
3984 
3985 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3986 			continue;
3987 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
3988 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
3989 	}
3990 
3991 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3992 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3993 			   memcg_events_local(memcg, memcg1_events[i]));
3994 
3995 	for (i = 0; i < NR_LRU_LISTS; i++)
3996 		seq_printf(m, "%s %lu\n", lru_list_name(i),
3997 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3998 			   PAGE_SIZE);
3999 
4000 	/* Hierarchical information */
4001 	memory = memsw = PAGE_COUNTER_MAX;
4002 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4003 		memory = min(memory, READ_ONCE(mi->memory.max));
4004 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4005 	}
4006 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4007 		   (u64)memory * PAGE_SIZE);
4008 	if (do_memsw_account())
4009 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4010 			   (u64)memsw * PAGE_SIZE);
4011 
4012 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4013 		unsigned long nr;
4014 
4015 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4016 			continue;
4017 		nr = memcg_page_state(memcg, memcg1_stats[i]);
4018 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4019 						(u64)nr * PAGE_SIZE);
4020 	}
4021 
4022 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4023 		seq_printf(m, "total_%s %llu\n",
4024 			   vm_event_name(memcg1_events[i]),
4025 			   (u64)memcg_events(memcg, memcg1_events[i]));
4026 
4027 	for (i = 0; i < NR_LRU_LISTS; i++)
4028 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4029 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4030 			   PAGE_SIZE);
4031 
4032 #ifdef CONFIG_DEBUG_VM
4033 	{
4034 		pg_data_t *pgdat;
4035 		struct mem_cgroup_per_node *mz;
4036 		unsigned long anon_cost = 0;
4037 		unsigned long file_cost = 0;
4038 
4039 		for_each_online_pgdat(pgdat) {
4040 			mz = memcg->nodeinfo[pgdat->node_id];
4041 
4042 			anon_cost += mz->lruvec.anon_cost;
4043 			file_cost += mz->lruvec.file_cost;
4044 		}
4045 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4046 		seq_printf(m, "file_cost %lu\n", file_cost);
4047 	}
4048 #endif
4049 
4050 	return 0;
4051 }
4052 
4053 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4054 				      struct cftype *cft)
4055 {
4056 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4057 
4058 	return mem_cgroup_swappiness(memcg);
4059 }
4060 
4061 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4062 				       struct cftype *cft, u64 val)
4063 {
4064 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4065 
4066 	if (val > 200)
4067 		return -EINVAL;
4068 
4069 	if (!mem_cgroup_is_root(memcg))
4070 		memcg->swappiness = val;
4071 	else
4072 		vm_swappiness = val;
4073 
4074 	return 0;
4075 }
4076 
4077 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4078 {
4079 	struct mem_cgroup_threshold_ary *t;
4080 	unsigned long usage;
4081 	int i;
4082 
4083 	rcu_read_lock();
4084 	if (!swap)
4085 		t = rcu_dereference(memcg->thresholds.primary);
4086 	else
4087 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4088 
4089 	if (!t)
4090 		goto unlock;
4091 
4092 	usage = mem_cgroup_usage(memcg, swap);
4093 
4094 	/*
4095 	 * current_threshold points to threshold just below or equal to usage.
4096 	 * If it's not true, a threshold was crossed after last
4097 	 * call of __mem_cgroup_threshold().
4098 	 */
4099 	i = t->current_threshold;
4100 
4101 	/*
4102 	 * Iterate backward over array of thresholds starting from
4103 	 * current_threshold and check if a threshold is crossed.
4104 	 * If none of thresholds below usage is crossed, we read
4105 	 * only one element of the array here.
4106 	 */
4107 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4108 		eventfd_signal(t->entries[i].eventfd, 1);
4109 
4110 	/* i = current_threshold + 1 */
4111 	i++;
4112 
4113 	/*
4114 	 * Iterate forward over array of thresholds starting from
4115 	 * current_threshold+1 and check if a threshold is crossed.
4116 	 * If none of thresholds above usage is crossed, we read
4117 	 * only one element of the array here.
4118 	 */
4119 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4120 		eventfd_signal(t->entries[i].eventfd, 1);
4121 
4122 	/* Update current_threshold */
4123 	t->current_threshold = i - 1;
4124 unlock:
4125 	rcu_read_unlock();
4126 }
4127 
4128 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4129 {
4130 	while (memcg) {
4131 		__mem_cgroup_threshold(memcg, false);
4132 		if (do_memsw_account())
4133 			__mem_cgroup_threshold(memcg, true);
4134 
4135 		memcg = parent_mem_cgroup(memcg);
4136 	}
4137 }
4138 
4139 static int compare_thresholds(const void *a, const void *b)
4140 {
4141 	const struct mem_cgroup_threshold *_a = a;
4142 	const struct mem_cgroup_threshold *_b = b;
4143 
4144 	if (_a->threshold > _b->threshold)
4145 		return 1;
4146 
4147 	if (_a->threshold < _b->threshold)
4148 		return -1;
4149 
4150 	return 0;
4151 }
4152 
4153 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4154 {
4155 	struct mem_cgroup_eventfd_list *ev;
4156 
4157 	spin_lock(&memcg_oom_lock);
4158 
4159 	list_for_each_entry(ev, &memcg->oom_notify, list)
4160 		eventfd_signal(ev->eventfd, 1);
4161 
4162 	spin_unlock(&memcg_oom_lock);
4163 	return 0;
4164 }
4165 
4166 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4167 {
4168 	struct mem_cgroup *iter;
4169 
4170 	for_each_mem_cgroup_tree(iter, memcg)
4171 		mem_cgroup_oom_notify_cb(iter);
4172 }
4173 
4174 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4175 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4176 {
4177 	struct mem_cgroup_thresholds *thresholds;
4178 	struct mem_cgroup_threshold_ary *new;
4179 	unsigned long threshold;
4180 	unsigned long usage;
4181 	int i, size, ret;
4182 
4183 	ret = page_counter_memparse(args, "-1", &threshold);
4184 	if (ret)
4185 		return ret;
4186 
4187 	mutex_lock(&memcg->thresholds_lock);
4188 
4189 	if (type == _MEM) {
4190 		thresholds = &memcg->thresholds;
4191 		usage = mem_cgroup_usage(memcg, false);
4192 	} else if (type == _MEMSWAP) {
4193 		thresholds = &memcg->memsw_thresholds;
4194 		usage = mem_cgroup_usage(memcg, true);
4195 	} else
4196 		BUG();
4197 
4198 	/* Check if a threshold crossed before adding a new one */
4199 	if (thresholds->primary)
4200 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4201 
4202 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4203 
4204 	/* Allocate memory for new array of thresholds */
4205 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4206 	if (!new) {
4207 		ret = -ENOMEM;
4208 		goto unlock;
4209 	}
4210 	new->size = size;
4211 
4212 	/* Copy thresholds (if any) to new array */
4213 	if (thresholds->primary)
4214 		memcpy(new->entries, thresholds->primary->entries,
4215 		       flex_array_size(new, entries, size - 1));
4216 
4217 	/* Add new threshold */
4218 	new->entries[size - 1].eventfd = eventfd;
4219 	new->entries[size - 1].threshold = threshold;
4220 
4221 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4222 	sort(new->entries, size, sizeof(*new->entries),
4223 			compare_thresholds, NULL);
4224 
4225 	/* Find current threshold */
4226 	new->current_threshold = -1;
4227 	for (i = 0; i < size; i++) {
4228 		if (new->entries[i].threshold <= usage) {
4229 			/*
4230 			 * new->current_threshold will not be used until
4231 			 * rcu_assign_pointer(), so it's safe to increment
4232 			 * it here.
4233 			 */
4234 			++new->current_threshold;
4235 		} else
4236 			break;
4237 	}
4238 
4239 	/* Free old spare buffer and save old primary buffer as spare */
4240 	kfree(thresholds->spare);
4241 	thresholds->spare = thresholds->primary;
4242 
4243 	rcu_assign_pointer(thresholds->primary, new);
4244 
4245 	/* To be sure that nobody uses thresholds */
4246 	synchronize_rcu();
4247 
4248 unlock:
4249 	mutex_unlock(&memcg->thresholds_lock);
4250 
4251 	return ret;
4252 }
4253 
4254 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4255 	struct eventfd_ctx *eventfd, const char *args)
4256 {
4257 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4258 }
4259 
4260 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4261 	struct eventfd_ctx *eventfd, const char *args)
4262 {
4263 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4264 }
4265 
4266 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4267 	struct eventfd_ctx *eventfd, enum res_type type)
4268 {
4269 	struct mem_cgroup_thresholds *thresholds;
4270 	struct mem_cgroup_threshold_ary *new;
4271 	unsigned long usage;
4272 	int i, j, size, entries;
4273 
4274 	mutex_lock(&memcg->thresholds_lock);
4275 
4276 	if (type == _MEM) {
4277 		thresholds = &memcg->thresholds;
4278 		usage = mem_cgroup_usage(memcg, false);
4279 	} else if (type == _MEMSWAP) {
4280 		thresholds = &memcg->memsw_thresholds;
4281 		usage = mem_cgroup_usage(memcg, true);
4282 	} else
4283 		BUG();
4284 
4285 	if (!thresholds->primary)
4286 		goto unlock;
4287 
4288 	/* Check if a threshold crossed before removing */
4289 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4290 
4291 	/* Calculate new number of threshold */
4292 	size = entries = 0;
4293 	for (i = 0; i < thresholds->primary->size; i++) {
4294 		if (thresholds->primary->entries[i].eventfd != eventfd)
4295 			size++;
4296 		else
4297 			entries++;
4298 	}
4299 
4300 	new = thresholds->spare;
4301 
4302 	/* If no items related to eventfd have been cleared, nothing to do */
4303 	if (!entries)
4304 		goto unlock;
4305 
4306 	/* Set thresholds array to NULL if we don't have thresholds */
4307 	if (!size) {
4308 		kfree(new);
4309 		new = NULL;
4310 		goto swap_buffers;
4311 	}
4312 
4313 	new->size = size;
4314 
4315 	/* Copy thresholds and find current threshold */
4316 	new->current_threshold = -1;
4317 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4318 		if (thresholds->primary->entries[i].eventfd == eventfd)
4319 			continue;
4320 
4321 		new->entries[j] = thresholds->primary->entries[i];
4322 		if (new->entries[j].threshold <= usage) {
4323 			/*
4324 			 * new->current_threshold will not be used
4325 			 * until rcu_assign_pointer(), so it's safe to increment
4326 			 * it here.
4327 			 */
4328 			++new->current_threshold;
4329 		}
4330 		j++;
4331 	}
4332 
4333 swap_buffers:
4334 	/* Swap primary and spare array */
4335 	thresholds->spare = thresholds->primary;
4336 
4337 	rcu_assign_pointer(thresholds->primary, new);
4338 
4339 	/* To be sure that nobody uses thresholds */
4340 	synchronize_rcu();
4341 
4342 	/* If all events are unregistered, free the spare array */
4343 	if (!new) {
4344 		kfree(thresholds->spare);
4345 		thresholds->spare = NULL;
4346 	}
4347 unlock:
4348 	mutex_unlock(&memcg->thresholds_lock);
4349 }
4350 
4351 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4352 	struct eventfd_ctx *eventfd)
4353 {
4354 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4355 }
4356 
4357 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4358 	struct eventfd_ctx *eventfd)
4359 {
4360 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4361 }
4362 
4363 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4364 	struct eventfd_ctx *eventfd, const char *args)
4365 {
4366 	struct mem_cgroup_eventfd_list *event;
4367 
4368 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4369 	if (!event)
4370 		return -ENOMEM;
4371 
4372 	spin_lock(&memcg_oom_lock);
4373 
4374 	event->eventfd = eventfd;
4375 	list_add(&event->list, &memcg->oom_notify);
4376 
4377 	/* already in OOM ? */
4378 	if (memcg->under_oom)
4379 		eventfd_signal(eventfd, 1);
4380 	spin_unlock(&memcg_oom_lock);
4381 
4382 	return 0;
4383 }
4384 
4385 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4386 	struct eventfd_ctx *eventfd)
4387 {
4388 	struct mem_cgroup_eventfd_list *ev, *tmp;
4389 
4390 	spin_lock(&memcg_oom_lock);
4391 
4392 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4393 		if (ev->eventfd == eventfd) {
4394 			list_del(&ev->list);
4395 			kfree(ev);
4396 		}
4397 	}
4398 
4399 	spin_unlock(&memcg_oom_lock);
4400 }
4401 
4402 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4403 {
4404 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4405 
4406 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4407 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4408 	seq_printf(sf, "oom_kill %lu\n",
4409 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4410 	return 0;
4411 }
4412 
4413 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4414 	struct cftype *cft, u64 val)
4415 {
4416 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4417 
4418 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4419 	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4420 		return -EINVAL;
4421 
4422 	memcg->oom_kill_disable = val;
4423 	if (!val)
4424 		memcg_oom_recover(memcg);
4425 
4426 	return 0;
4427 }
4428 
4429 #ifdef CONFIG_CGROUP_WRITEBACK
4430 
4431 #include <trace/events/writeback.h>
4432 
4433 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4434 {
4435 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4436 }
4437 
4438 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4439 {
4440 	wb_domain_exit(&memcg->cgwb_domain);
4441 }
4442 
4443 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4444 {
4445 	wb_domain_size_changed(&memcg->cgwb_domain);
4446 }
4447 
4448 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4449 {
4450 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4451 
4452 	if (!memcg->css.parent)
4453 		return NULL;
4454 
4455 	return &memcg->cgwb_domain;
4456 }
4457 
4458 /**
4459  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4460  * @wb: bdi_writeback in question
4461  * @pfilepages: out parameter for number of file pages
4462  * @pheadroom: out parameter for number of allocatable pages according to memcg
4463  * @pdirty: out parameter for number of dirty pages
4464  * @pwriteback: out parameter for number of pages under writeback
4465  *
4466  * Determine the numbers of file, headroom, dirty, and writeback pages in
4467  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4468  * is a bit more involved.
4469  *
4470  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4471  * headroom is calculated as the lowest headroom of itself and the
4472  * ancestors.  Note that this doesn't consider the actual amount of
4473  * available memory in the system.  The caller should further cap
4474  * *@pheadroom accordingly.
4475  */
4476 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4477 			 unsigned long *pheadroom, unsigned long *pdirty,
4478 			 unsigned long *pwriteback)
4479 {
4480 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4481 	struct mem_cgroup *parent;
4482 
4483 	cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
4484 
4485 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4486 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4487 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4488 			memcg_page_state(memcg, NR_ACTIVE_FILE);
4489 
4490 	*pheadroom = PAGE_COUNTER_MAX;
4491 	while ((parent = parent_mem_cgroup(memcg))) {
4492 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4493 					    READ_ONCE(memcg->memory.high));
4494 		unsigned long used = page_counter_read(&memcg->memory);
4495 
4496 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4497 		memcg = parent;
4498 	}
4499 }
4500 
4501 /*
4502  * Foreign dirty flushing
4503  *
4504  * There's an inherent mismatch between memcg and writeback.  The former
4505  * tracks ownership per-page while the latter per-inode.  This was a
4506  * deliberate design decision because honoring per-page ownership in the
4507  * writeback path is complicated, may lead to higher CPU and IO overheads
4508  * and deemed unnecessary given that write-sharing an inode across
4509  * different cgroups isn't a common use-case.
4510  *
4511  * Combined with inode majority-writer ownership switching, this works well
4512  * enough in most cases but there are some pathological cases.  For
4513  * example, let's say there are two cgroups A and B which keep writing to
4514  * different but confined parts of the same inode.  B owns the inode and
4515  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4516  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4517  * triggering background writeback.  A will be slowed down without a way to
4518  * make writeback of the dirty pages happen.
4519  *
4520  * Conditions like the above can lead to a cgroup getting repeatedly and
4521  * severely throttled after making some progress after each
4522  * dirty_expire_interval while the underlying IO device is almost
4523  * completely idle.
4524  *
4525  * Solving this problem completely requires matching the ownership tracking
4526  * granularities between memcg and writeback in either direction.  However,
4527  * the more egregious behaviors can be avoided by simply remembering the
4528  * most recent foreign dirtying events and initiating remote flushes on
4529  * them when local writeback isn't enough to keep the memory clean enough.
4530  *
4531  * The following two functions implement such mechanism.  When a foreign
4532  * page - a page whose memcg and writeback ownerships don't match - is
4533  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4534  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4535  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4536  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4537  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4538  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4539  * limited to MEMCG_CGWB_FRN_CNT.
4540  *
4541  * The mechanism only remembers IDs and doesn't hold any object references.
4542  * As being wrong occasionally doesn't matter, updates and accesses to the
4543  * records are lockless and racy.
4544  */
4545 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4546 					     struct bdi_writeback *wb)
4547 {
4548 	struct mem_cgroup *memcg = page_memcg(page);
4549 	struct memcg_cgwb_frn *frn;
4550 	u64 now = get_jiffies_64();
4551 	u64 oldest_at = now;
4552 	int oldest = -1;
4553 	int i;
4554 
4555 	trace_track_foreign_dirty(page, wb);
4556 
4557 	/*
4558 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4559 	 * using it.  If not replace the oldest one which isn't being
4560 	 * written out.
4561 	 */
4562 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4563 		frn = &memcg->cgwb_frn[i];
4564 		if (frn->bdi_id == wb->bdi->id &&
4565 		    frn->memcg_id == wb->memcg_css->id)
4566 			break;
4567 		if (time_before64(frn->at, oldest_at) &&
4568 		    atomic_read(&frn->done.cnt) == 1) {
4569 			oldest = i;
4570 			oldest_at = frn->at;
4571 		}
4572 	}
4573 
4574 	if (i < MEMCG_CGWB_FRN_CNT) {
4575 		/*
4576 		 * Re-using an existing one.  Update timestamp lazily to
4577 		 * avoid making the cacheline hot.  We want them to be
4578 		 * reasonably up-to-date and significantly shorter than
4579 		 * dirty_expire_interval as that's what expires the record.
4580 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4581 		 */
4582 		unsigned long update_intv =
4583 			min_t(unsigned long, HZ,
4584 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4585 
4586 		if (time_before64(frn->at, now - update_intv))
4587 			frn->at = now;
4588 	} else if (oldest >= 0) {
4589 		/* replace the oldest free one */
4590 		frn = &memcg->cgwb_frn[oldest];
4591 		frn->bdi_id = wb->bdi->id;
4592 		frn->memcg_id = wb->memcg_css->id;
4593 		frn->at = now;
4594 	}
4595 }
4596 
4597 /* issue foreign writeback flushes for recorded foreign dirtying events */
4598 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4599 {
4600 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4601 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4602 	u64 now = jiffies_64;
4603 	int i;
4604 
4605 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4606 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4607 
4608 		/*
4609 		 * If the record is older than dirty_expire_interval,
4610 		 * writeback on it has already started.  No need to kick it
4611 		 * off again.  Also, don't start a new one if there's
4612 		 * already one in flight.
4613 		 */
4614 		if (time_after64(frn->at, now - intv) &&
4615 		    atomic_read(&frn->done.cnt) == 1) {
4616 			frn->at = 0;
4617 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4618 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4619 					       WB_REASON_FOREIGN_FLUSH,
4620 					       &frn->done);
4621 		}
4622 	}
4623 }
4624 
4625 #else	/* CONFIG_CGROUP_WRITEBACK */
4626 
4627 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4628 {
4629 	return 0;
4630 }
4631 
4632 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4633 {
4634 }
4635 
4636 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4637 {
4638 }
4639 
4640 #endif	/* CONFIG_CGROUP_WRITEBACK */
4641 
4642 /*
4643  * DO NOT USE IN NEW FILES.
4644  *
4645  * "cgroup.event_control" implementation.
4646  *
4647  * This is way over-engineered.  It tries to support fully configurable
4648  * events for each user.  Such level of flexibility is completely
4649  * unnecessary especially in the light of the planned unified hierarchy.
4650  *
4651  * Please deprecate this and replace with something simpler if at all
4652  * possible.
4653  */
4654 
4655 /*
4656  * Unregister event and free resources.
4657  *
4658  * Gets called from workqueue.
4659  */
4660 static void memcg_event_remove(struct work_struct *work)
4661 {
4662 	struct mem_cgroup_event *event =
4663 		container_of(work, struct mem_cgroup_event, remove);
4664 	struct mem_cgroup *memcg = event->memcg;
4665 
4666 	remove_wait_queue(event->wqh, &event->wait);
4667 
4668 	event->unregister_event(memcg, event->eventfd);
4669 
4670 	/* Notify userspace the event is going away. */
4671 	eventfd_signal(event->eventfd, 1);
4672 
4673 	eventfd_ctx_put(event->eventfd);
4674 	kfree(event);
4675 	css_put(&memcg->css);
4676 }
4677 
4678 /*
4679  * Gets called on EPOLLHUP on eventfd when user closes it.
4680  *
4681  * Called with wqh->lock held and interrupts disabled.
4682  */
4683 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4684 			    int sync, void *key)
4685 {
4686 	struct mem_cgroup_event *event =
4687 		container_of(wait, struct mem_cgroup_event, wait);
4688 	struct mem_cgroup *memcg = event->memcg;
4689 	__poll_t flags = key_to_poll(key);
4690 
4691 	if (flags & EPOLLHUP) {
4692 		/*
4693 		 * If the event has been detached at cgroup removal, we
4694 		 * can simply return knowing the other side will cleanup
4695 		 * for us.
4696 		 *
4697 		 * We can't race against event freeing since the other
4698 		 * side will require wqh->lock via remove_wait_queue(),
4699 		 * which we hold.
4700 		 */
4701 		spin_lock(&memcg->event_list_lock);
4702 		if (!list_empty(&event->list)) {
4703 			list_del_init(&event->list);
4704 			/*
4705 			 * We are in atomic context, but cgroup_event_remove()
4706 			 * may sleep, so we have to call it in workqueue.
4707 			 */
4708 			schedule_work(&event->remove);
4709 		}
4710 		spin_unlock(&memcg->event_list_lock);
4711 	}
4712 
4713 	return 0;
4714 }
4715 
4716 static void memcg_event_ptable_queue_proc(struct file *file,
4717 		wait_queue_head_t *wqh, poll_table *pt)
4718 {
4719 	struct mem_cgroup_event *event =
4720 		container_of(pt, struct mem_cgroup_event, pt);
4721 
4722 	event->wqh = wqh;
4723 	add_wait_queue(wqh, &event->wait);
4724 }
4725 
4726 /*
4727  * DO NOT USE IN NEW FILES.
4728  *
4729  * Parse input and register new cgroup event handler.
4730  *
4731  * Input must be in format '<event_fd> <control_fd> <args>'.
4732  * Interpretation of args is defined by control file implementation.
4733  */
4734 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4735 					 char *buf, size_t nbytes, loff_t off)
4736 {
4737 	struct cgroup_subsys_state *css = of_css(of);
4738 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4739 	struct mem_cgroup_event *event;
4740 	struct cgroup_subsys_state *cfile_css;
4741 	unsigned int efd, cfd;
4742 	struct fd efile;
4743 	struct fd cfile;
4744 	const char *name;
4745 	char *endp;
4746 	int ret;
4747 
4748 	buf = strstrip(buf);
4749 
4750 	efd = simple_strtoul(buf, &endp, 10);
4751 	if (*endp != ' ')
4752 		return -EINVAL;
4753 	buf = endp + 1;
4754 
4755 	cfd = simple_strtoul(buf, &endp, 10);
4756 	if ((*endp != ' ') && (*endp != '\0'))
4757 		return -EINVAL;
4758 	buf = endp + 1;
4759 
4760 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4761 	if (!event)
4762 		return -ENOMEM;
4763 
4764 	event->memcg = memcg;
4765 	INIT_LIST_HEAD(&event->list);
4766 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4767 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4768 	INIT_WORK(&event->remove, memcg_event_remove);
4769 
4770 	efile = fdget(efd);
4771 	if (!efile.file) {
4772 		ret = -EBADF;
4773 		goto out_kfree;
4774 	}
4775 
4776 	event->eventfd = eventfd_ctx_fileget(efile.file);
4777 	if (IS_ERR(event->eventfd)) {
4778 		ret = PTR_ERR(event->eventfd);
4779 		goto out_put_efile;
4780 	}
4781 
4782 	cfile = fdget(cfd);
4783 	if (!cfile.file) {
4784 		ret = -EBADF;
4785 		goto out_put_eventfd;
4786 	}
4787 
4788 	/* the process need read permission on control file */
4789 	/* AV: shouldn't we check that it's been opened for read instead? */
4790 	ret = file_permission(cfile.file, MAY_READ);
4791 	if (ret < 0)
4792 		goto out_put_cfile;
4793 
4794 	/*
4795 	 * Determine the event callbacks and set them in @event.  This used
4796 	 * to be done via struct cftype but cgroup core no longer knows
4797 	 * about these events.  The following is crude but the whole thing
4798 	 * is for compatibility anyway.
4799 	 *
4800 	 * DO NOT ADD NEW FILES.
4801 	 */
4802 	name = cfile.file->f_path.dentry->d_name.name;
4803 
4804 	if (!strcmp(name, "memory.usage_in_bytes")) {
4805 		event->register_event = mem_cgroup_usage_register_event;
4806 		event->unregister_event = mem_cgroup_usage_unregister_event;
4807 	} else if (!strcmp(name, "memory.oom_control")) {
4808 		event->register_event = mem_cgroup_oom_register_event;
4809 		event->unregister_event = mem_cgroup_oom_unregister_event;
4810 	} else if (!strcmp(name, "memory.pressure_level")) {
4811 		event->register_event = vmpressure_register_event;
4812 		event->unregister_event = vmpressure_unregister_event;
4813 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4814 		event->register_event = memsw_cgroup_usage_register_event;
4815 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4816 	} else {
4817 		ret = -EINVAL;
4818 		goto out_put_cfile;
4819 	}
4820 
4821 	/*
4822 	 * Verify @cfile should belong to @css.  Also, remaining events are
4823 	 * automatically removed on cgroup destruction but the removal is
4824 	 * asynchronous, so take an extra ref on @css.
4825 	 */
4826 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4827 					       &memory_cgrp_subsys);
4828 	ret = -EINVAL;
4829 	if (IS_ERR(cfile_css))
4830 		goto out_put_cfile;
4831 	if (cfile_css != css) {
4832 		css_put(cfile_css);
4833 		goto out_put_cfile;
4834 	}
4835 
4836 	ret = event->register_event(memcg, event->eventfd, buf);
4837 	if (ret)
4838 		goto out_put_css;
4839 
4840 	vfs_poll(efile.file, &event->pt);
4841 
4842 	spin_lock_irq(&memcg->event_list_lock);
4843 	list_add(&event->list, &memcg->event_list);
4844 	spin_unlock_irq(&memcg->event_list_lock);
4845 
4846 	fdput(cfile);
4847 	fdput(efile);
4848 
4849 	return nbytes;
4850 
4851 out_put_css:
4852 	css_put(css);
4853 out_put_cfile:
4854 	fdput(cfile);
4855 out_put_eventfd:
4856 	eventfd_ctx_put(event->eventfd);
4857 out_put_efile:
4858 	fdput(efile);
4859 out_kfree:
4860 	kfree(event);
4861 
4862 	return ret;
4863 }
4864 
4865 static struct cftype mem_cgroup_legacy_files[] = {
4866 	{
4867 		.name = "usage_in_bytes",
4868 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4869 		.read_u64 = mem_cgroup_read_u64,
4870 	},
4871 	{
4872 		.name = "max_usage_in_bytes",
4873 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4874 		.write = mem_cgroup_reset,
4875 		.read_u64 = mem_cgroup_read_u64,
4876 	},
4877 	{
4878 		.name = "limit_in_bytes",
4879 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4880 		.write = mem_cgroup_write,
4881 		.read_u64 = mem_cgroup_read_u64,
4882 	},
4883 	{
4884 		.name = "soft_limit_in_bytes",
4885 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4886 		.write = mem_cgroup_write,
4887 		.read_u64 = mem_cgroup_read_u64,
4888 	},
4889 	{
4890 		.name = "failcnt",
4891 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4892 		.write = mem_cgroup_reset,
4893 		.read_u64 = mem_cgroup_read_u64,
4894 	},
4895 	{
4896 		.name = "stat",
4897 		.seq_show = memcg_stat_show,
4898 	},
4899 	{
4900 		.name = "force_empty",
4901 		.write = mem_cgroup_force_empty_write,
4902 	},
4903 	{
4904 		.name = "use_hierarchy",
4905 		.write_u64 = mem_cgroup_hierarchy_write,
4906 		.read_u64 = mem_cgroup_hierarchy_read,
4907 	},
4908 	{
4909 		.name = "cgroup.event_control",		/* XXX: for compat */
4910 		.write = memcg_write_event_control,
4911 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4912 	},
4913 	{
4914 		.name = "swappiness",
4915 		.read_u64 = mem_cgroup_swappiness_read,
4916 		.write_u64 = mem_cgroup_swappiness_write,
4917 	},
4918 	{
4919 		.name = "move_charge_at_immigrate",
4920 		.read_u64 = mem_cgroup_move_charge_read,
4921 		.write_u64 = mem_cgroup_move_charge_write,
4922 	},
4923 	{
4924 		.name = "oom_control",
4925 		.seq_show = mem_cgroup_oom_control_read,
4926 		.write_u64 = mem_cgroup_oom_control_write,
4927 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4928 	},
4929 	{
4930 		.name = "pressure_level",
4931 	},
4932 #ifdef CONFIG_NUMA
4933 	{
4934 		.name = "numa_stat",
4935 		.seq_show = memcg_numa_stat_show,
4936 	},
4937 #endif
4938 	{
4939 		.name = "kmem.limit_in_bytes",
4940 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4941 		.write = mem_cgroup_write,
4942 		.read_u64 = mem_cgroup_read_u64,
4943 	},
4944 	{
4945 		.name = "kmem.usage_in_bytes",
4946 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4947 		.read_u64 = mem_cgroup_read_u64,
4948 	},
4949 	{
4950 		.name = "kmem.failcnt",
4951 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4952 		.write = mem_cgroup_reset,
4953 		.read_u64 = mem_cgroup_read_u64,
4954 	},
4955 	{
4956 		.name = "kmem.max_usage_in_bytes",
4957 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4958 		.write = mem_cgroup_reset,
4959 		.read_u64 = mem_cgroup_read_u64,
4960 	},
4961 #if defined(CONFIG_MEMCG_KMEM) && \
4962 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4963 	{
4964 		.name = "kmem.slabinfo",
4965 		.seq_show = memcg_slab_show,
4966 	},
4967 #endif
4968 	{
4969 		.name = "kmem.tcp.limit_in_bytes",
4970 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4971 		.write = mem_cgroup_write,
4972 		.read_u64 = mem_cgroup_read_u64,
4973 	},
4974 	{
4975 		.name = "kmem.tcp.usage_in_bytes",
4976 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4977 		.read_u64 = mem_cgroup_read_u64,
4978 	},
4979 	{
4980 		.name = "kmem.tcp.failcnt",
4981 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4982 		.write = mem_cgroup_reset,
4983 		.read_u64 = mem_cgroup_read_u64,
4984 	},
4985 	{
4986 		.name = "kmem.tcp.max_usage_in_bytes",
4987 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4988 		.write = mem_cgroup_reset,
4989 		.read_u64 = mem_cgroup_read_u64,
4990 	},
4991 	{ },	/* terminate */
4992 };
4993 
4994 /*
4995  * Private memory cgroup IDR
4996  *
4997  * Swap-out records and page cache shadow entries need to store memcg
4998  * references in constrained space, so we maintain an ID space that is
4999  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5000  * memory-controlled cgroups to 64k.
5001  *
5002  * However, there usually are many references to the offline CSS after
5003  * the cgroup has been destroyed, such as page cache or reclaimable
5004  * slab objects, that don't need to hang on to the ID. We want to keep
5005  * those dead CSS from occupying IDs, or we might quickly exhaust the
5006  * relatively small ID space and prevent the creation of new cgroups
5007  * even when there are much fewer than 64k cgroups - possibly none.
5008  *
5009  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5010  * be freed and recycled when it's no longer needed, which is usually
5011  * when the CSS is offlined.
5012  *
5013  * The only exception to that are records of swapped out tmpfs/shmem
5014  * pages that need to be attributed to live ancestors on swapin. But
5015  * those references are manageable from userspace.
5016  */
5017 
5018 static DEFINE_IDR(mem_cgroup_idr);
5019 
5020 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5021 {
5022 	if (memcg->id.id > 0) {
5023 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5024 		memcg->id.id = 0;
5025 	}
5026 }
5027 
5028 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5029 						  unsigned int n)
5030 {
5031 	refcount_add(n, &memcg->id.ref);
5032 }
5033 
5034 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5035 {
5036 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5037 		mem_cgroup_id_remove(memcg);
5038 
5039 		/* Memcg ID pins CSS */
5040 		css_put(&memcg->css);
5041 	}
5042 }
5043 
5044 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5045 {
5046 	mem_cgroup_id_put_many(memcg, 1);
5047 }
5048 
5049 /**
5050  * mem_cgroup_from_id - look up a memcg from a memcg id
5051  * @id: the memcg id to look up
5052  *
5053  * Caller must hold rcu_read_lock().
5054  */
5055 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5056 {
5057 	WARN_ON_ONCE(!rcu_read_lock_held());
5058 	return idr_find(&mem_cgroup_idr, id);
5059 }
5060 
5061 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5062 {
5063 	struct mem_cgroup_per_node *pn;
5064 	int tmp = node;
5065 	/*
5066 	 * This routine is called against possible nodes.
5067 	 * But it's BUG to call kmalloc() against offline node.
5068 	 *
5069 	 * TODO: this routine can waste much memory for nodes which will
5070 	 *       never be onlined. It's better to use memory hotplug callback
5071 	 *       function.
5072 	 */
5073 	if (!node_state(node, N_NORMAL_MEMORY))
5074 		tmp = -1;
5075 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5076 	if (!pn)
5077 		return 1;
5078 
5079 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5080 						   GFP_KERNEL_ACCOUNT);
5081 	if (!pn->lruvec_stats_percpu) {
5082 		kfree(pn);
5083 		return 1;
5084 	}
5085 
5086 	lruvec_init(&pn->lruvec);
5087 	pn->usage_in_excess = 0;
5088 	pn->on_tree = false;
5089 	pn->memcg = memcg;
5090 
5091 	memcg->nodeinfo[node] = pn;
5092 	return 0;
5093 }
5094 
5095 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5096 {
5097 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5098 
5099 	if (!pn)
5100 		return;
5101 
5102 	free_percpu(pn->lruvec_stats_percpu);
5103 	kfree(pn);
5104 }
5105 
5106 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5107 {
5108 	int node;
5109 
5110 	for_each_node(node)
5111 		free_mem_cgroup_per_node_info(memcg, node);
5112 	free_percpu(memcg->vmstats_percpu);
5113 	kfree(memcg);
5114 }
5115 
5116 static void mem_cgroup_free(struct mem_cgroup *memcg)
5117 {
5118 	memcg_wb_domain_exit(memcg);
5119 	__mem_cgroup_free(memcg);
5120 }
5121 
5122 static struct mem_cgroup *mem_cgroup_alloc(void)
5123 {
5124 	struct mem_cgroup *memcg;
5125 	unsigned int size;
5126 	int node;
5127 	int __maybe_unused i;
5128 	long error = -ENOMEM;
5129 
5130 	size = sizeof(struct mem_cgroup);
5131 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5132 
5133 	memcg = kzalloc(size, GFP_KERNEL);
5134 	if (!memcg)
5135 		return ERR_PTR(error);
5136 
5137 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5138 				 1, MEM_CGROUP_ID_MAX,
5139 				 GFP_KERNEL);
5140 	if (memcg->id.id < 0) {
5141 		error = memcg->id.id;
5142 		goto fail;
5143 	}
5144 
5145 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5146 						 GFP_KERNEL_ACCOUNT);
5147 	if (!memcg->vmstats_percpu)
5148 		goto fail;
5149 
5150 	for_each_node(node)
5151 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5152 			goto fail;
5153 
5154 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5155 		goto fail;
5156 
5157 	INIT_WORK(&memcg->high_work, high_work_func);
5158 	INIT_LIST_HEAD(&memcg->oom_notify);
5159 	mutex_init(&memcg->thresholds_lock);
5160 	spin_lock_init(&memcg->move_lock);
5161 	vmpressure_init(&memcg->vmpressure);
5162 	INIT_LIST_HEAD(&memcg->event_list);
5163 	spin_lock_init(&memcg->event_list_lock);
5164 	memcg->socket_pressure = jiffies;
5165 #ifdef CONFIG_MEMCG_KMEM
5166 	memcg->kmemcg_id = -1;
5167 	INIT_LIST_HEAD(&memcg->objcg_list);
5168 #endif
5169 #ifdef CONFIG_CGROUP_WRITEBACK
5170 	INIT_LIST_HEAD(&memcg->cgwb_list);
5171 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5172 		memcg->cgwb_frn[i].done =
5173 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5174 #endif
5175 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5176 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5177 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5178 	memcg->deferred_split_queue.split_queue_len = 0;
5179 #endif
5180 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5181 	return memcg;
5182 fail:
5183 	mem_cgroup_id_remove(memcg);
5184 	__mem_cgroup_free(memcg);
5185 	return ERR_PTR(error);
5186 }
5187 
5188 static struct cgroup_subsys_state * __ref
5189 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5190 {
5191 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5192 	struct mem_cgroup *memcg, *old_memcg;
5193 	long error = -ENOMEM;
5194 
5195 	old_memcg = set_active_memcg(parent);
5196 	memcg = mem_cgroup_alloc();
5197 	set_active_memcg(old_memcg);
5198 	if (IS_ERR(memcg))
5199 		return ERR_CAST(memcg);
5200 
5201 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5202 	memcg->soft_limit = PAGE_COUNTER_MAX;
5203 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5204 	if (parent) {
5205 		memcg->swappiness = mem_cgroup_swappiness(parent);
5206 		memcg->oom_kill_disable = parent->oom_kill_disable;
5207 
5208 		page_counter_init(&memcg->memory, &parent->memory);
5209 		page_counter_init(&memcg->swap, &parent->swap);
5210 		page_counter_init(&memcg->kmem, &parent->kmem);
5211 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5212 	} else {
5213 		page_counter_init(&memcg->memory, NULL);
5214 		page_counter_init(&memcg->swap, NULL);
5215 		page_counter_init(&memcg->kmem, NULL);
5216 		page_counter_init(&memcg->tcpmem, NULL);
5217 
5218 		root_mem_cgroup = memcg;
5219 		return &memcg->css;
5220 	}
5221 
5222 	/* The following stuff does not apply to the root */
5223 	error = memcg_online_kmem(memcg);
5224 	if (error)
5225 		goto fail;
5226 
5227 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5228 		static_branch_inc(&memcg_sockets_enabled_key);
5229 
5230 	return &memcg->css;
5231 fail:
5232 	mem_cgroup_id_remove(memcg);
5233 	mem_cgroup_free(memcg);
5234 	return ERR_PTR(error);
5235 }
5236 
5237 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5238 {
5239 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5240 
5241 	/*
5242 	 * A memcg must be visible for expand_shrinker_info()
5243 	 * by the time the maps are allocated. So, we allocate maps
5244 	 * here, when for_each_mem_cgroup() can't skip it.
5245 	 */
5246 	if (alloc_shrinker_info(memcg)) {
5247 		mem_cgroup_id_remove(memcg);
5248 		return -ENOMEM;
5249 	}
5250 
5251 	/* Online state pins memcg ID, memcg ID pins CSS */
5252 	refcount_set(&memcg->id.ref, 1);
5253 	css_get(css);
5254 
5255 	if (unlikely(mem_cgroup_is_root(memcg)))
5256 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5257 				   2UL*HZ);
5258 	return 0;
5259 }
5260 
5261 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5262 {
5263 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5264 	struct mem_cgroup_event *event, *tmp;
5265 
5266 	/*
5267 	 * Unregister events and notify userspace.
5268 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5269 	 * directory to avoid race between userspace and kernelspace.
5270 	 */
5271 	spin_lock_irq(&memcg->event_list_lock);
5272 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5273 		list_del_init(&event->list);
5274 		schedule_work(&event->remove);
5275 	}
5276 	spin_unlock_irq(&memcg->event_list_lock);
5277 
5278 	page_counter_set_min(&memcg->memory, 0);
5279 	page_counter_set_low(&memcg->memory, 0);
5280 
5281 	memcg_offline_kmem(memcg);
5282 	reparent_shrinker_deferred(memcg);
5283 	wb_memcg_offline(memcg);
5284 
5285 	drain_all_stock(memcg);
5286 
5287 	mem_cgroup_id_put(memcg);
5288 }
5289 
5290 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5291 {
5292 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5293 
5294 	invalidate_reclaim_iterators(memcg);
5295 }
5296 
5297 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5298 {
5299 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5300 	int __maybe_unused i;
5301 
5302 #ifdef CONFIG_CGROUP_WRITEBACK
5303 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5304 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5305 #endif
5306 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5307 		static_branch_dec(&memcg_sockets_enabled_key);
5308 
5309 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5310 		static_branch_dec(&memcg_sockets_enabled_key);
5311 
5312 	vmpressure_cleanup(&memcg->vmpressure);
5313 	cancel_work_sync(&memcg->high_work);
5314 	mem_cgroup_remove_from_trees(memcg);
5315 	free_shrinker_info(memcg);
5316 	memcg_free_kmem(memcg);
5317 	mem_cgroup_free(memcg);
5318 }
5319 
5320 /**
5321  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5322  * @css: the target css
5323  *
5324  * Reset the states of the mem_cgroup associated with @css.  This is
5325  * invoked when the userland requests disabling on the default hierarchy
5326  * but the memcg is pinned through dependency.  The memcg should stop
5327  * applying policies and should revert to the vanilla state as it may be
5328  * made visible again.
5329  *
5330  * The current implementation only resets the essential configurations.
5331  * This needs to be expanded to cover all the visible parts.
5332  */
5333 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5334 {
5335 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5336 
5337 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5338 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5339 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5340 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5341 	page_counter_set_min(&memcg->memory, 0);
5342 	page_counter_set_low(&memcg->memory, 0);
5343 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5344 	memcg->soft_limit = PAGE_COUNTER_MAX;
5345 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5346 	memcg_wb_domain_size_changed(memcg);
5347 }
5348 
5349 void mem_cgroup_flush_stats(void)
5350 {
5351 	if (!spin_trylock(&stats_flush_lock))
5352 		return;
5353 
5354 	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
5355 	spin_unlock(&stats_flush_lock);
5356 }
5357 
5358 static void flush_memcg_stats_dwork(struct work_struct *w)
5359 {
5360 	mem_cgroup_flush_stats();
5361 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
5362 }
5363 
5364 static void flush_memcg_stats_work(struct work_struct *w)
5365 {
5366 	mem_cgroup_flush_stats();
5367 }
5368 
5369 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5370 {
5371 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5372 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5373 	struct memcg_vmstats_percpu *statc;
5374 	long delta, v;
5375 	int i, nid;
5376 
5377 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5378 
5379 	for (i = 0; i < MEMCG_NR_STAT; i++) {
5380 		/*
5381 		 * Collect the aggregated propagation counts of groups
5382 		 * below us. We're in a per-cpu loop here and this is
5383 		 * a global counter, so the first cycle will get them.
5384 		 */
5385 		delta = memcg->vmstats.state_pending[i];
5386 		if (delta)
5387 			memcg->vmstats.state_pending[i] = 0;
5388 
5389 		/* Add CPU changes on this level since the last flush */
5390 		v = READ_ONCE(statc->state[i]);
5391 		if (v != statc->state_prev[i]) {
5392 			delta += v - statc->state_prev[i];
5393 			statc->state_prev[i] = v;
5394 		}
5395 
5396 		if (!delta)
5397 			continue;
5398 
5399 		/* Aggregate counts on this level and propagate upwards */
5400 		memcg->vmstats.state[i] += delta;
5401 		if (parent)
5402 			parent->vmstats.state_pending[i] += delta;
5403 	}
5404 
5405 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5406 		delta = memcg->vmstats.events_pending[i];
5407 		if (delta)
5408 			memcg->vmstats.events_pending[i] = 0;
5409 
5410 		v = READ_ONCE(statc->events[i]);
5411 		if (v != statc->events_prev[i]) {
5412 			delta += v - statc->events_prev[i];
5413 			statc->events_prev[i] = v;
5414 		}
5415 
5416 		if (!delta)
5417 			continue;
5418 
5419 		memcg->vmstats.events[i] += delta;
5420 		if (parent)
5421 			parent->vmstats.events_pending[i] += delta;
5422 	}
5423 
5424 	for_each_node_state(nid, N_MEMORY) {
5425 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5426 		struct mem_cgroup_per_node *ppn = NULL;
5427 		struct lruvec_stats_percpu *lstatc;
5428 
5429 		if (parent)
5430 			ppn = parent->nodeinfo[nid];
5431 
5432 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5433 
5434 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5435 			delta = pn->lruvec_stats.state_pending[i];
5436 			if (delta)
5437 				pn->lruvec_stats.state_pending[i] = 0;
5438 
5439 			v = READ_ONCE(lstatc->state[i]);
5440 			if (v != lstatc->state_prev[i]) {
5441 				delta += v - lstatc->state_prev[i];
5442 				lstatc->state_prev[i] = v;
5443 			}
5444 
5445 			if (!delta)
5446 				continue;
5447 
5448 			pn->lruvec_stats.state[i] += delta;
5449 			if (ppn)
5450 				ppn->lruvec_stats.state_pending[i] += delta;
5451 		}
5452 	}
5453 }
5454 
5455 #ifdef CONFIG_MMU
5456 /* Handlers for move charge at task migration. */
5457 static int mem_cgroup_do_precharge(unsigned long count)
5458 {
5459 	int ret;
5460 
5461 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5462 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5463 	if (!ret) {
5464 		mc.precharge += count;
5465 		return ret;
5466 	}
5467 
5468 	/* Try charges one by one with reclaim, but do not retry */
5469 	while (count--) {
5470 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5471 		if (ret)
5472 			return ret;
5473 		mc.precharge++;
5474 		cond_resched();
5475 	}
5476 	return 0;
5477 }
5478 
5479 union mc_target {
5480 	struct page	*page;
5481 	swp_entry_t	ent;
5482 };
5483 
5484 enum mc_target_type {
5485 	MC_TARGET_NONE = 0,
5486 	MC_TARGET_PAGE,
5487 	MC_TARGET_SWAP,
5488 	MC_TARGET_DEVICE,
5489 };
5490 
5491 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5492 						unsigned long addr, pte_t ptent)
5493 {
5494 	struct page *page = vm_normal_page(vma, addr, ptent);
5495 
5496 	if (!page || !page_mapped(page))
5497 		return NULL;
5498 	if (PageAnon(page)) {
5499 		if (!(mc.flags & MOVE_ANON))
5500 			return NULL;
5501 	} else {
5502 		if (!(mc.flags & MOVE_FILE))
5503 			return NULL;
5504 	}
5505 	if (!get_page_unless_zero(page))
5506 		return NULL;
5507 
5508 	return page;
5509 }
5510 
5511 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5512 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5513 			pte_t ptent, swp_entry_t *entry)
5514 {
5515 	struct page *page = NULL;
5516 	swp_entry_t ent = pte_to_swp_entry(ptent);
5517 
5518 	if (!(mc.flags & MOVE_ANON))
5519 		return NULL;
5520 
5521 	/*
5522 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5523 	 * a device and because they are not accessible by CPU they are store
5524 	 * as special swap entry in the CPU page table.
5525 	 */
5526 	if (is_device_private_entry(ent)) {
5527 		page = pfn_swap_entry_to_page(ent);
5528 		/*
5529 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5530 		 * a refcount of 1 when free (unlike normal page)
5531 		 */
5532 		if (!page_ref_add_unless(page, 1, 1))
5533 			return NULL;
5534 		return page;
5535 	}
5536 
5537 	if (non_swap_entry(ent))
5538 		return NULL;
5539 
5540 	/*
5541 	 * Because lookup_swap_cache() updates some statistics counter,
5542 	 * we call find_get_page() with swapper_space directly.
5543 	 */
5544 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5545 	entry->val = ent.val;
5546 
5547 	return page;
5548 }
5549 #else
5550 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5551 			pte_t ptent, swp_entry_t *entry)
5552 {
5553 	return NULL;
5554 }
5555 #endif
5556 
5557 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5558 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5559 {
5560 	if (!vma->vm_file) /* anonymous vma */
5561 		return NULL;
5562 	if (!(mc.flags & MOVE_FILE))
5563 		return NULL;
5564 
5565 	/* page is moved even if it's not RSS of this task(page-faulted). */
5566 	/* shmem/tmpfs may report page out on swap: account for that too. */
5567 	return find_get_incore_page(vma->vm_file->f_mapping,
5568 			linear_page_index(vma, addr));
5569 }
5570 
5571 /**
5572  * mem_cgroup_move_account - move account of the page
5573  * @page: the page
5574  * @compound: charge the page as compound or small page
5575  * @from: mem_cgroup which the page is moved from.
5576  * @to:	mem_cgroup which the page is moved to. @from != @to.
5577  *
5578  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5579  *
5580  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5581  * from old cgroup.
5582  */
5583 static int mem_cgroup_move_account(struct page *page,
5584 				   bool compound,
5585 				   struct mem_cgroup *from,
5586 				   struct mem_cgroup *to)
5587 {
5588 	struct lruvec *from_vec, *to_vec;
5589 	struct pglist_data *pgdat;
5590 	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5591 	int ret;
5592 
5593 	VM_BUG_ON(from == to);
5594 	VM_BUG_ON_PAGE(PageLRU(page), page);
5595 	VM_BUG_ON(compound && !PageTransHuge(page));
5596 
5597 	/*
5598 	 * Prevent mem_cgroup_migrate() from looking at
5599 	 * page's memory cgroup of its source page while we change it.
5600 	 */
5601 	ret = -EBUSY;
5602 	if (!trylock_page(page))
5603 		goto out;
5604 
5605 	ret = -EINVAL;
5606 	if (page_memcg(page) != from)
5607 		goto out_unlock;
5608 
5609 	pgdat = page_pgdat(page);
5610 	from_vec = mem_cgroup_lruvec(from, pgdat);
5611 	to_vec = mem_cgroup_lruvec(to, pgdat);
5612 
5613 	lock_page_memcg(page);
5614 
5615 	if (PageAnon(page)) {
5616 		if (page_mapped(page)) {
5617 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5618 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5619 			if (PageTransHuge(page)) {
5620 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5621 						   -nr_pages);
5622 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5623 						   nr_pages);
5624 			}
5625 		}
5626 	} else {
5627 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5628 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5629 
5630 		if (PageSwapBacked(page)) {
5631 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5632 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5633 		}
5634 
5635 		if (page_mapped(page)) {
5636 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5637 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5638 		}
5639 
5640 		if (PageDirty(page)) {
5641 			struct address_space *mapping = page_mapping(page);
5642 
5643 			if (mapping_can_writeback(mapping)) {
5644 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5645 						   -nr_pages);
5646 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5647 						   nr_pages);
5648 			}
5649 		}
5650 	}
5651 
5652 	if (PageWriteback(page)) {
5653 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5654 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5655 	}
5656 
5657 	/*
5658 	 * All state has been migrated, let's switch to the new memcg.
5659 	 *
5660 	 * It is safe to change page's memcg here because the page
5661 	 * is referenced, charged, isolated, and locked: we can't race
5662 	 * with (un)charging, migration, LRU putback, or anything else
5663 	 * that would rely on a stable page's memory cgroup.
5664 	 *
5665 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5666 	 * to save space. As soon as we switch page's memory cgroup to a
5667 	 * new memcg that isn't locked, the above state can change
5668 	 * concurrently again. Make sure we're truly done with it.
5669 	 */
5670 	smp_mb();
5671 
5672 	css_get(&to->css);
5673 	css_put(&from->css);
5674 
5675 	page->memcg_data = (unsigned long)to;
5676 
5677 	__unlock_page_memcg(from);
5678 
5679 	ret = 0;
5680 
5681 	local_irq_disable();
5682 	mem_cgroup_charge_statistics(to, page, nr_pages);
5683 	memcg_check_events(to, page);
5684 	mem_cgroup_charge_statistics(from, page, -nr_pages);
5685 	memcg_check_events(from, page);
5686 	local_irq_enable();
5687 out_unlock:
5688 	unlock_page(page);
5689 out:
5690 	return ret;
5691 }
5692 
5693 /**
5694  * get_mctgt_type - get target type of moving charge
5695  * @vma: the vma the pte to be checked belongs
5696  * @addr: the address corresponding to the pte to be checked
5697  * @ptent: the pte to be checked
5698  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5699  *
5700  * Returns
5701  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5702  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5703  *     move charge. if @target is not NULL, the page is stored in target->page
5704  *     with extra refcnt got(Callers should handle it).
5705  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5706  *     target for charge migration. if @target is not NULL, the entry is stored
5707  *     in target->ent.
5708  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5709  *     (so ZONE_DEVICE page and thus not on the lru).
5710  *     For now we such page is charge like a regular page would be as for all
5711  *     intent and purposes it is just special memory taking the place of a
5712  *     regular page.
5713  *
5714  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5715  *
5716  * Called with pte lock held.
5717  */
5718 
5719 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5720 		unsigned long addr, pte_t ptent, union mc_target *target)
5721 {
5722 	struct page *page = NULL;
5723 	enum mc_target_type ret = MC_TARGET_NONE;
5724 	swp_entry_t ent = { .val = 0 };
5725 
5726 	if (pte_present(ptent))
5727 		page = mc_handle_present_pte(vma, addr, ptent);
5728 	else if (is_swap_pte(ptent))
5729 		page = mc_handle_swap_pte(vma, ptent, &ent);
5730 	else if (pte_none(ptent))
5731 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5732 
5733 	if (!page && !ent.val)
5734 		return ret;
5735 	if (page) {
5736 		/*
5737 		 * Do only loose check w/o serialization.
5738 		 * mem_cgroup_move_account() checks the page is valid or
5739 		 * not under LRU exclusion.
5740 		 */
5741 		if (page_memcg(page) == mc.from) {
5742 			ret = MC_TARGET_PAGE;
5743 			if (is_device_private_page(page))
5744 				ret = MC_TARGET_DEVICE;
5745 			if (target)
5746 				target->page = page;
5747 		}
5748 		if (!ret || !target)
5749 			put_page(page);
5750 	}
5751 	/*
5752 	 * There is a swap entry and a page doesn't exist or isn't charged.
5753 	 * But we cannot move a tail-page in a THP.
5754 	 */
5755 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5756 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5757 		ret = MC_TARGET_SWAP;
5758 		if (target)
5759 			target->ent = ent;
5760 	}
5761 	return ret;
5762 }
5763 
5764 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5765 /*
5766  * We don't consider PMD mapped swapping or file mapped pages because THP does
5767  * not support them for now.
5768  * Caller should make sure that pmd_trans_huge(pmd) is true.
5769  */
5770 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5771 		unsigned long addr, pmd_t pmd, union mc_target *target)
5772 {
5773 	struct page *page = NULL;
5774 	enum mc_target_type ret = MC_TARGET_NONE;
5775 
5776 	if (unlikely(is_swap_pmd(pmd))) {
5777 		VM_BUG_ON(thp_migration_supported() &&
5778 				  !is_pmd_migration_entry(pmd));
5779 		return ret;
5780 	}
5781 	page = pmd_page(pmd);
5782 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5783 	if (!(mc.flags & MOVE_ANON))
5784 		return ret;
5785 	if (page_memcg(page) == mc.from) {
5786 		ret = MC_TARGET_PAGE;
5787 		if (target) {
5788 			get_page(page);
5789 			target->page = page;
5790 		}
5791 	}
5792 	return ret;
5793 }
5794 #else
5795 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5796 		unsigned long addr, pmd_t pmd, union mc_target *target)
5797 {
5798 	return MC_TARGET_NONE;
5799 }
5800 #endif
5801 
5802 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5803 					unsigned long addr, unsigned long end,
5804 					struct mm_walk *walk)
5805 {
5806 	struct vm_area_struct *vma = walk->vma;
5807 	pte_t *pte;
5808 	spinlock_t *ptl;
5809 
5810 	ptl = pmd_trans_huge_lock(pmd, vma);
5811 	if (ptl) {
5812 		/*
5813 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5814 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5815 		 * this might change.
5816 		 */
5817 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5818 			mc.precharge += HPAGE_PMD_NR;
5819 		spin_unlock(ptl);
5820 		return 0;
5821 	}
5822 
5823 	if (pmd_trans_unstable(pmd))
5824 		return 0;
5825 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5826 	for (; addr != end; pte++, addr += PAGE_SIZE)
5827 		if (get_mctgt_type(vma, addr, *pte, NULL))
5828 			mc.precharge++;	/* increment precharge temporarily */
5829 	pte_unmap_unlock(pte - 1, ptl);
5830 	cond_resched();
5831 
5832 	return 0;
5833 }
5834 
5835 static const struct mm_walk_ops precharge_walk_ops = {
5836 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5837 };
5838 
5839 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5840 {
5841 	unsigned long precharge;
5842 
5843 	mmap_read_lock(mm);
5844 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5845 	mmap_read_unlock(mm);
5846 
5847 	precharge = mc.precharge;
5848 	mc.precharge = 0;
5849 
5850 	return precharge;
5851 }
5852 
5853 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5854 {
5855 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5856 
5857 	VM_BUG_ON(mc.moving_task);
5858 	mc.moving_task = current;
5859 	return mem_cgroup_do_precharge(precharge);
5860 }
5861 
5862 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5863 static void __mem_cgroup_clear_mc(void)
5864 {
5865 	struct mem_cgroup *from = mc.from;
5866 	struct mem_cgroup *to = mc.to;
5867 
5868 	/* we must uncharge all the leftover precharges from mc.to */
5869 	if (mc.precharge) {
5870 		cancel_charge(mc.to, mc.precharge);
5871 		mc.precharge = 0;
5872 	}
5873 	/*
5874 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5875 	 * we must uncharge here.
5876 	 */
5877 	if (mc.moved_charge) {
5878 		cancel_charge(mc.from, mc.moved_charge);
5879 		mc.moved_charge = 0;
5880 	}
5881 	/* we must fixup refcnts and charges */
5882 	if (mc.moved_swap) {
5883 		/* uncharge swap account from the old cgroup */
5884 		if (!mem_cgroup_is_root(mc.from))
5885 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5886 
5887 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5888 
5889 		/*
5890 		 * we charged both to->memory and to->memsw, so we
5891 		 * should uncharge to->memory.
5892 		 */
5893 		if (!mem_cgroup_is_root(mc.to))
5894 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5895 
5896 		mc.moved_swap = 0;
5897 	}
5898 	memcg_oom_recover(from);
5899 	memcg_oom_recover(to);
5900 	wake_up_all(&mc.waitq);
5901 }
5902 
5903 static void mem_cgroup_clear_mc(void)
5904 {
5905 	struct mm_struct *mm = mc.mm;
5906 
5907 	/*
5908 	 * we must clear moving_task before waking up waiters at the end of
5909 	 * task migration.
5910 	 */
5911 	mc.moving_task = NULL;
5912 	__mem_cgroup_clear_mc();
5913 	spin_lock(&mc.lock);
5914 	mc.from = NULL;
5915 	mc.to = NULL;
5916 	mc.mm = NULL;
5917 	spin_unlock(&mc.lock);
5918 
5919 	mmput(mm);
5920 }
5921 
5922 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5923 {
5924 	struct cgroup_subsys_state *css;
5925 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5926 	struct mem_cgroup *from;
5927 	struct task_struct *leader, *p;
5928 	struct mm_struct *mm;
5929 	unsigned long move_flags;
5930 	int ret = 0;
5931 
5932 	/* charge immigration isn't supported on the default hierarchy */
5933 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5934 		return 0;
5935 
5936 	/*
5937 	 * Multi-process migrations only happen on the default hierarchy
5938 	 * where charge immigration is not used.  Perform charge
5939 	 * immigration if @tset contains a leader and whine if there are
5940 	 * multiple.
5941 	 */
5942 	p = NULL;
5943 	cgroup_taskset_for_each_leader(leader, css, tset) {
5944 		WARN_ON_ONCE(p);
5945 		p = leader;
5946 		memcg = mem_cgroup_from_css(css);
5947 	}
5948 	if (!p)
5949 		return 0;
5950 
5951 	/*
5952 	 * We are now committed to this value whatever it is. Changes in this
5953 	 * tunable will only affect upcoming migrations, not the current one.
5954 	 * So we need to save it, and keep it going.
5955 	 */
5956 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5957 	if (!move_flags)
5958 		return 0;
5959 
5960 	from = mem_cgroup_from_task(p);
5961 
5962 	VM_BUG_ON(from == memcg);
5963 
5964 	mm = get_task_mm(p);
5965 	if (!mm)
5966 		return 0;
5967 	/* We move charges only when we move a owner of the mm */
5968 	if (mm->owner == p) {
5969 		VM_BUG_ON(mc.from);
5970 		VM_BUG_ON(mc.to);
5971 		VM_BUG_ON(mc.precharge);
5972 		VM_BUG_ON(mc.moved_charge);
5973 		VM_BUG_ON(mc.moved_swap);
5974 
5975 		spin_lock(&mc.lock);
5976 		mc.mm = mm;
5977 		mc.from = from;
5978 		mc.to = memcg;
5979 		mc.flags = move_flags;
5980 		spin_unlock(&mc.lock);
5981 		/* We set mc.moving_task later */
5982 
5983 		ret = mem_cgroup_precharge_mc(mm);
5984 		if (ret)
5985 			mem_cgroup_clear_mc();
5986 	} else {
5987 		mmput(mm);
5988 	}
5989 	return ret;
5990 }
5991 
5992 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5993 {
5994 	if (mc.to)
5995 		mem_cgroup_clear_mc();
5996 }
5997 
5998 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5999 				unsigned long addr, unsigned long end,
6000 				struct mm_walk *walk)
6001 {
6002 	int ret = 0;
6003 	struct vm_area_struct *vma = walk->vma;
6004 	pte_t *pte;
6005 	spinlock_t *ptl;
6006 	enum mc_target_type target_type;
6007 	union mc_target target;
6008 	struct page *page;
6009 
6010 	ptl = pmd_trans_huge_lock(pmd, vma);
6011 	if (ptl) {
6012 		if (mc.precharge < HPAGE_PMD_NR) {
6013 			spin_unlock(ptl);
6014 			return 0;
6015 		}
6016 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6017 		if (target_type == MC_TARGET_PAGE) {
6018 			page = target.page;
6019 			if (!isolate_lru_page(page)) {
6020 				if (!mem_cgroup_move_account(page, true,
6021 							     mc.from, mc.to)) {
6022 					mc.precharge -= HPAGE_PMD_NR;
6023 					mc.moved_charge += HPAGE_PMD_NR;
6024 				}
6025 				putback_lru_page(page);
6026 			}
6027 			put_page(page);
6028 		} else if (target_type == MC_TARGET_DEVICE) {
6029 			page = target.page;
6030 			if (!mem_cgroup_move_account(page, true,
6031 						     mc.from, mc.to)) {
6032 				mc.precharge -= HPAGE_PMD_NR;
6033 				mc.moved_charge += HPAGE_PMD_NR;
6034 			}
6035 			put_page(page);
6036 		}
6037 		spin_unlock(ptl);
6038 		return 0;
6039 	}
6040 
6041 	if (pmd_trans_unstable(pmd))
6042 		return 0;
6043 retry:
6044 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6045 	for (; addr != end; addr += PAGE_SIZE) {
6046 		pte_t ptent = *(pte++);
6047 		bool device = false;
6048 		swp_entry_t ent;
6049 
6050 		if (!mc.precharge)
6051 			break;
6052 
6053 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6054 		case MC_TARGET_DEVICE:
6055 			device = true;
6056 			fallthrough;
6057 		case MC_TARGET_PAGE:
6058 			page = target.page;
6059 			/*
6060 			 * We can have a part of the split pmd here. Moving it
6061 			 * can be done but it would be too convoluted so simply
6062 			 * ignore such a partial THP and keep it in original
6063 			 * memcg. There should be somebody mapping the head.
6064 			 */
6065 			if (PageTransCompound(page))
6066 				goto put;
6067 			if (!device && isolate_lru_page(page))
6068 				goto put;
6069 			if (!mem_cgroup_move_account(page, false,
6070 						mc.from, mc.to)) {
6071 				mc.precharge--;
6072 				/* we uncharge from mc.from later. */
6073 				mc.moved_charge++;
6074 			}
6075 			if (!device)
6076 				putback_lru_page(page);
6077 put:			/* get_mctgt_type() gets the page */
6078 			put_page(page);
6079 			break;
6080 		case MC_TARGET_SWAP:
6081 			ent = target.ent;
6082 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6083 				mc.precharge--;
6084 				mem_cgroup_id_get_many(mc.to, 1);
6085 				/* we fixup other refcnts and charges later. */
6086 				mc.moved_swap++;
6087 			}
6088 			break;
6089 		default:
6090 			break;
6091 		}
6092 	}
6093 	pte_unmap_unlock(pte - 1, ptl);
6094 	cond_resched();
6095 
6096 	if (addr != end) {
6097 		/*
6098 		 * We have consumed all precharges we got in can_attach().
6099 		 * We try charge one by one, but don't do any additional
6100 		 * charges to mc.to if we have failed in charge once in attach()
6101 		 * phase.
6102 		 */
6103 		ret = mem_cgroup_do_precharge(1);
6104 		if (!ret)
6105 			goto retry;
6106 	}
6107 
6108 	return ret;
6109 }
6110 
6111 static const struct mm_walk_ops charge_walk_ops = {
6112 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6113 };
6114 
6115 static void mem_cgroup_move_charge(void)
6116 {
6117 	lru_add_drain_all();
6118 	/*
6119 	 * Signal lock_page_memcg() to take the memcg's move_lock
6120 	 * while we're moving its pages to another memcg. Then wait
6121 	 * for already started RCU-only updates to finish.
6122 	 */
6123 	atomic_inc(&mc.from->moving_account);
6124 	synchronize_rcu();
6125 retry:
6126 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6127 		/*
6128 		 * Someone who are holding the mmap_lock might be waiting in
6129 		 * waitq. So we cancel all extra charges, wake up all waiters,
6130 		 * and retry. Because we cancel precharges, we might not be able
6131 		 * to move enough charges, but moving charge is a best-effort
6132 		 * feature anyway, so it wouldn't be a big problem.
6133 		 */
6134 		__mem_cgroup_clear_mc();
6135 		cond_resched();
6136 		goto retry;
6137 	}
6138 	/*
6139 	 * When we have consumed all precharges and failed in doing
6140 	 * additional charge, the page walk just aborts.
6141 	 */
6142 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6143 			NULL);
6144 
6145 	mmap_read_unlock(mc.mm);
6146 	atomic_dec(&mc.from->moving_account);
6147 }
6148 
6149 static void mem_cgroup_move_task(void)
6150 {
6151 	if (mc.to) {
6152 		mem_cgroup_move_charge();
6153 		mem_cgroup_clear_mc();
6154 	}
6155 }
6156 #else	/* !CONFIG_MMU */
6157 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6158 {
6159 	return 0;
6160 }
6161 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6162 {
6163 }
6164 static void mem_cgroup_move_task(void)
6165 {
6166 }
6167 #endif
6168 
6169 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6170 {
6171 	if (value == PAGE_COUNTER_MAX)
6172 		seq_puts(m, "max\n");
6173 	else
6174 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6175 
6176 	return 0;
6177 }
6178 
6179 static u64 memory_current_read(struct cgroup_subsys_state *css,
6180 			       struct cftype *cft)
6181 {
6182 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6183 
6184 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6185 }
6186 
6187 static int memory_min_show(struct seq_file *m, void *v)
6188 {
6189 	return seq_puts_memcg_tunable(m,
6190 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6191 }
6192 
6193 static ssize_t memory_min_write(struct kernfs_open_file *of,
6194 				char *buf, size_t nbytes, loff_t off)
6195 {
6196 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6197 	unsigned long min;
6198 	int err;
6199 
6200 	buf = strstrip(buf);
6201 	err = page_counter_memparse(buf, "max", &min);
6202 	if (err)
6203 		return err;
6204 
6205 	page_counter_set_min(&memcg->memory, min);
6206 
6207 	return nbytes;
6208 }
6209 
6210 static int memory_low_show(struct seq_file *m, void *v)
6211 {
6212 	return seq_puts_memcg_tunable(m,
6213 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6214 }
6215 
6216 static ssize_t memory_low_write(struct kernfs_open_file *of,
6217 				char *buf, size_t nbytes, loff_t off)
6218 {
6219 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6220 	unsigned long low;
6221 	int err;
6222 
6223 	buf = strstrip(buf);
6224 	err = page_counter_memparse(buf, "max", &low);
6225 	if (err)
6226 		return err;
6227 
6228 	page_counter_set_low(&memcg->memory, low);
6229 
6230 	return nbytes;
6231 }
6232 
6233 static int memory_high_show(struct seq_file *m, void *v)
6234 {
6235 	return seq_puts_memcg_tunable(m,
6236 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6237 }
6238 
6239 static ssize_t memory_high_write(struct kernfs_open_file *of,
6240 				 char *buf, size_t nbytes, loff_t off)
6241 {
6242 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6243 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6244 	bool drained = false;
6245 	unsigned long high;
6246 	int err;
6247 
6248 	buf = strstrip(buf);
6249 	err = page_counter_memparse(buf, "max", &high);
6250 	if (err)
6251 		return err;
6252 
6253 	page_counter_set_high(&memcg->memory, high);
6254 
6255 	for (;;) {
6256 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6257 		unsigned long reclaimed;
6258 
6259 		if (nr_pages <= high)
6260 			break;
6261 
6262 		if (signal_pending(current))
6263 			break;
6264 
6265 		if (!drained) {
6266 			drain_all_stock(memcg);
6267 			drained = true;
6268 			continue;
6269 		}
6270 
6271 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6272 							 GFP_KERNEL, true);
6273 
6274 		if (!reclaimed && !nr_retries--)
6275 			break;
6276 	}
6277 
6278 	memcg_wb_domain_size_changed(memcg);
6279 	return nbytes;
6280 }
6281 
6282 static int memory_max_show(struct seq_file *m, void *v)
6283 {
6284 	return seq_puts_memcg_tunable(m,
6285 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6286 }
6287 
6288 static ssize_t memory_max_write(struct kernfs_open_file *of,
6289 				char *buf, size_t nbytes, loff_t off)
6290 {
6291 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6292 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6293 	bool drained = false;
6294 	unsigned long max;
6295 	int err;
6296 
6297 	buf = strstrip(buf);
6298 	err = page_counter_memparse(buf, "max", &max);
6299 	if (err)
6300 		return err;
6301 
6302 	xchg(&memcg->memory.max, max);
6303 
6304 	for (;;) {
6305 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6306 
6307 		if (nr_pages <= max)
6308 			break;
6309 
6310 		if (signal_pending(current))
6311 			break;
6312 
6313 		if (!drained) {
6314 			drain_all_stock(memcg);
6315 			drained = true;
6316 			continue;
6317 		}
6318 
6319 		if (nr_reclaims) {
6320 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6321 							  GFP_KERNEL, true))
6322 				nr_reclaims--;
6323 			continue;
6324 		}
6325 
6326 		memcg_memory_event(memcg, MEMCG_OOM);
6327 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6328 			break;
6329 	}
6330 
6331 	memcg_wb_domain_size_changed(memcg);
6332 	return nbytes;
6333 }
6334 
6335 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6336 {
6337 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6338 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6339 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6340 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6341 	seq_printf(m, "oom_kill %lu\n",
6342 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6343 }
6344 
6345 static int memory_events_show(struct seq_file *m, void *v)
6346 {
6347 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6348 
6349 	__memory_events_show(m, memcg->memory_events);
6350 	return 0;
6351 }
6352 
6353 static int memory_events_local_show(struct seq_file *m, void *v)
6354 {
6355 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6356 
6357 	__memory_events_show(m, memcg->memory_events_local);
6358 	return 0;
6359 }
6360 
6361 static int memory_stat_show(struct seq_file *m, void *v)
6362 {
6363 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6364 	char *buf;
6365 
6366 	buf = memory_stat_format(memcg);
6367 	if (!buf)
6368 		return -ENOMEM;
6369 	seq_puts(m, buf);
6370 	kfree(buf);
6371 	return 0;
6372 }
6373 
6374 #ifdef CONFIG_NUMA
6375 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6376 						     int item)
6377 {
6378 	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6379 }
6380 
6381 static int memory_numa_stat_show(struct seq_file *m, void *v)
6382 {
6383 	int i;
6384 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6385 
6386 	cgroup_rstat_flush(memcg->css.cgroup);
6387 
6388 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6389 		int nid;
6390 
6391 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6392 			continue;
6393 
6394 		seq_printf(m, "%s", memory_stats[i].name);
6395 		for_each_node_state(nid, N_MEMORY) {
6396 			u64 size;
6397 			struct lruvec *lruvec;
6398 
6399 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6400 			size = lruvec_page_state_output(lruvec,
6401 							memory_stats[i].idx);
6402 			seq_printf(m, " N%d=%llu", nid, size);
6403 		}
6404 		seq_putc(m, '\n');
6405 	}
6406 
6407 	return 0;
6408 }
6409 #endif
6410 
6411 static int memory_oom_group_show(struct seq_file *m, void *v)
6412 {
6413 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6414 
6415 	seq_printf(m, "%d\n", memcg->oom_group);
6416 
6417 	return 0;
6418 }
6419 
6420 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6421 				      char *buf, size_t nbytes, loff_t off)
6422 {
6423 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6424 	int ret, oom_group;
6425 
6426 	buf = strstrip(buf);
6427 	if (!buf)
6428 		return -EINVAL;
6429 
6430 	ret = kstrtoint(buf, 0, &oom_group);
6431 	if (ret)
6432 		return ret;
6433 
6434 	if (oom_group != 0 && oom_group != 1)
6435 		return -EINVAL;
6436 
6437 	memcg->oom_group = oom_group;
6438 
6439 	return nbytes;
6440 }
6441 
6442 static struct cftype memory_files[] = {
6443 	{
6444 		.name = "current",
6445 		.flags = CFTYPE_NOT_ON_ROOT,
6446 		.read_u64 = memory_current_read,
6447 	},
6448 	{
6449 		.name = "min",
6450 		.flags = CFTYPE_NOT_ON_ROOT,
6451 		.seq_show = memory_min_show,
6452 		.write = memory_min_write,
6453 	},
6454 	{
6455 		.name = "low",
6456 		.flags = CFTYPE_NOT_ON_ROOT,
6457 		.seq_show = memory_low_show,
6458 		.write = memory_low_write,
6459 	},
6460 	{
6461 		.name = "high",
6462 		.flags = CFTYPE_NOT_ON_ROOT,
6463 		.seq_show = memory_high_show,
6464 		.write = memory_high_write,
6465 	},
6466 	{
6467 		.name = "max",
6468 		.flags = CFTYPE_NOT_ON_ROOT,
6469 		.seq_show = memory_max_show,
6470 		.write = memory_max_write,
6471 	},
6472 	{
6473 		.name = "events",
6474 		.flags = CFTYPE_NOT_ON_ROOT,
6475 		.file_offset = offsetof(struct mem_cgroup, events_file),
6476 		.seq_show = memory_events_show,
6477 	},
6478 	{
6479 		.name = "events.local",
6480 		.flags = CFTYPE_NOT_ON_ROOT,
6481 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6482 		.seq_show = memory_events_local_show,
6483 	},
6484 	{
6485 		.name = "stat",
6486 		.seq_show = memory_stat_show,
6487 	},
6488 #ifdef CONFIG_NUMA
6489 	{
6490 		.name = "numa_stat",
6491 		.seq_show = memory_numa_stat_show,
6492 	},
6493 #endif
6494 	{
6495 		.name = "oom.group",
6496 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6497 		.seq_show = memory_oom_group_show,
6498 		.write = memory_oom_group_write,
6499 	},
6500 	{ }	/* terminate */
6501 };
6502 
6503 struct cgroup_subsys memory_cgrp_subsys = {
6504 	.css_alloc = mem_cgroup_css_alloc,
6505 	.css_online = mem_cgroup_css_online,
6506 	.css_offline = mem_cgroup_css_offline,
6507 	.css_released = mem_cgroup_css_released,
6508 	.css_free = mem_cgroup_css_free,
6509 	.css_reset = mem_cgroup_css_reset,
6510 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6511 	.can_attach = mem_cgroup_can_attach,
6512 	.cancel_attach = mem_cgroup_cancel_attach,
6513 	.post_attach = mem_cgroup_move_task,
6514 	.dfl_cftypes = memory_files,
6515 	.legacy_cftypes = mem_cgroup_legacy_files,
6516 	.early_init = 0,
6517 };
6518 
6519 /*
6520  * This function calculates an individual cgroup's effective
6521  * protection which is derived from its own memory.min/low, its
6522  * parent's and siblings' settings, as well as the actual memory
6523  * distribution in the tree.
6524  *
6525  * The following rules apply to the effective protection values:
6526  *
6527  * 1. At the first level of reclaim, effective protection is equal to
6528  *    the declared protection in memory.min and memory.low.
6529  *
6530  * 2. To enable safe delegation of the protection configuration, at
6531  *    subsequent levels the effective protection is capped to the
6532  *    parent's effective protection.
6533  *
6534  * 3. To make complex and dynamic subtrees easier to configure, the
6535  *    user is allowed to overcommit the declared protection at a given
6536  *    level. If that is the case, the parent's effective protection is
6537  *    distributed to the children in proportion to how much protection
6538  *    they have declared and how much of it they are utilizing.
6539  *
6540  *    This makes distribution proportional, but also work-conserving:
6541  *    if one cgroup claims much more protection than it uses memory,
6542  *    the unused remainder is available to its siblings.
6543  *
6544  * 4. Conversely, when the declared protection is undercommitted at a
6545  *    given level, the distribution of the larger parental protection
6546  *    budget is NOT proportional. A cgroup's protection from a sibling
6547  *    is capped to its own memory.min/low setting.
6548  *
6549  * 5. However, to allow protecting recursive subtrees from each other
6550  *    without having to declare each individual cgroup's fixed share
6551  *    of the ancestor's claim to protection, any unutilized -
6552  *    "floating" - protection from up the tree is distributed in
6553  *    proportion to each cgroup's *usage*. This makes the protection
6554  *    neutral wrt sibling cgroups and lets them compete freely over
6555  *    the shared parental protection budget, but it protects the
6556  *    subtree as a whole from neighboring subtrees.
6557  *
6558  * Note that 4. and 5. are not in conflict: 4. is about protecting
6559  * against immediate siblings whereas 5. is about protecting against
6560  * neighboring subtrees.
6561  */
6562 static unsigned long effective_protection(unsigned long usage,
6563 					  unsigned long parent_usage,
6564 					  unsigned long setting,
6565 					  unsigned long parent_effective,
6566 					  unsigned long siblings_protected)
6567 {
6568 	unsigned long protected;
6569 	unsigned long ep;
6570 
6571 	protected = min(usage, setting);
6572 	/*
6573 	 * If all cgroups at this level combined claim and use more
6574 	 * protection then what the parent affords them, distribute
6575 	 * shares in proportion to utilization.
6576 	 *
6577 	 * We are using actual utilization rather than the statically
6578 	 * claimed protection in order to be work-conserving: claimed
6579 	 * but unused protection is available to siblings that would
6580 	 * otherwise get a smaller chunk than what they claimed.
6581 	 */
6582 	if (siblings_protected > parent_effective)
6583 		return protected * parent_effective / siblings_protected;
6584 
6585 	/*
6586 	 * Ok, utilized protection of all children is within what the
6587 	 * parent affords them, so we know whatever this child claims
6588 	 * and utilizes is effectively protected.
6589 	 *
6590 	 * If there is unprotected usage beyond this value, reclaim
6591 	 * will apply pressure in proportion to that amount.
6592 	 *
6593 	 * If there is unutilized protection, the cgroup will be fully
6594 	 * shielded from reclaim, but we do return a smaller value for
6595 	 * protection than what the group could enjoy in theory. This
6596 	 * is okay. With the overcommit distribution above, effective
6597 	 * protection is always dependent on how memory is actually
6598 	 * consumed among the siblings anyway.
6599 	 */
6600 	ep = protected;
6601 
6602 	/*
6603 	 * If the children aren't claiming (all of) the protection
6604 	 * afforded to them by the parent, distribute the remainder in
6605 	 * proportion to the (unprotected) memory of each cgroup. That
6606 	 * way, cgroups that aren't explicitly prioritized wrt each
6607 	 * other compete freely over the allowance, but they are
6608 	 * collectively protected from neighboring trees.
6609 	 *
6610 	 * We're using unprotected memory for the weight so that if
6611 	 * some cgroups DO claim explicit protection, we don't protect
6612 	 * the same bytes twice.
6613 	 *
6614 	 * Check both usage and parent_usage against the respective
6615 	 * protected values. One should imply the other, but they
6616 	 * aren't read atomically - make sure the division is sane.
6617 	 */
6618 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6619 		return ep;
6620 	if (parent_effective > siblings_protected &&
6621 	    parent_usage > siblings_protected &&
6622 	    usage > protected) {
6623 		unsigned long unclaimed;
6624 
6625 		unclaimed = parent_effective - siblings_protected;
6626 		unclaimed *= usage - protected;
6627 		unclaimed /= parent_usage - siblings_protected;
6628 
6629 		ep += unclaimed;
6630 	}
6631 
6632 	return ep;
6633 }
6634 
6635 /**
6636  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6637  * @root: the top ancestor of the sub-tree being checked
6638  * @memcg: the memory cgroup to check
6639  *
6640  * WARNING: This function is not stateless! It can only be used as part
6641  *          of a top-down tree iteration, not for isolated queries.
6642  */
6643 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6644 				     struct mem_cgroup *memcg)
6645 {
6646 	unsigned long usage, parent_usage;
6647 	struct mem_cgroup *parent;
6648 
6649 	if (mem_cgroup_disabled())
6650 		return;
6651 
6652 	if (!root)
6653 		root = root_mem_cgroup;
6654 
6655 	/*
6656 	 * Effective values of the reclaim targets are ignored so they
6657 	 * can be stale. Have a look at mem_cgroup_protection for more
6658 	 * details.
6659 	 * TODO: calculation should be more robust so that we do not need
6660 	 * that special casing.
6661 	 */
6662 	if (memcg == root)
6663 		return;
6664 
6665 	usage = page_counter_read(&memcg->memory);
6666 	if (!usage)
6667 		return;
6668 
6669 	parent = parent_mem_cgroup(memcg);
6670 	/* No parent means a non-hierarchical mode on v1 memcg */
6671 	if (!parent)
6672 		return;
6673 
6674 	if (parent == root) {
6675 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6676 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6677 		return;
6678 	}
6679 
6680 	parent_usage = page_counter_read(&parent->memory);
6681 
6682 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6683 			READ_ONCE(memcg->memory.min),
6684 			READ_ONCE(parent->memory.emin),
6685 			atomic_long_read(&parent->memory.children_min_usage)));
6686 
6687 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6688 			READ_ONCE(memcg->memory.low),
6689 			READ_ONCE(parent->memory.elow),
6690 			atomic_long_read(&parent->memory.children_low_usage)));
6691 }
6692 
6693 static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
6694 {
6695 	unsigned int nr_pages = thp_nr_pages(page);
6696 	int ret;
6697 
6698 	ret = try_charge(memcg, gfp, nr_pages);
6699 	if (ret)
6700 		goto out;
6701 
6702 	css_get(&memcg->css);
6703 	commit_charge(page, memcg);
6704 
6705 	local_irq_disable();
6706 	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6707 	memcg_check_events(memcg, page);
6708 	local_irq_enable();
6709 out:
6710 	return ret;
6711 }
6712 
6713 /**
6714  * __mem_cgroup_charge - charge a newly allocated page to a cgroup
6715  * @page: page to charge
6716  * @mm: mm context of the victim
6717  * @gfp_mask: reclaim mode
6718  *
6719  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6720  * pages according to @gfp_mask if necessary. if @mm is NULL, try to
6721  * charge to the active memcg.
6722  *
6723  * Do not use this for pages allocated for swapin.
6724  *
6725  * Returns 0 on success. Otherwise, an error code is returned.
6726  */
6727 int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
6728 			gfp_t gfp_mask)
6729 {
6730 	struct mem_cgroup *memcg;
6731 	int ret;
6732 
6733 	memcg = get_mem_cgroup_from_mm(mm);
6734 	ret = charge_memcg(page, memcg, gfp_mask);
6735 	css_put(&memcg->css);
6736 
6737 	return ret;
6738 }
6739 
6740 /**
6741  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6742  * @page: page to charge
6743  * @mm: mm context of the victim
6744  * @gfp: reclaim mode
6745  * @entry: swap entry for which the page is allocated
6746  *
6747  * This function charges a page allocated for swapin. Please call this before
6748  * adding the page to the swapcache.
6749  *
6750  * Returns 0 on success. Otherwise, an error code is returned.
6751  */
6752 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6753 				  gfp_t gfp, swp_entry_t entry)
6754 {
6755 	struct mem_cgroup *memcg;
6756 	unsigned short id;
6757 	int ret;
6758 
6759 	if (mem_cgroup_disabled())
6760 		return 0;
6761 
6762 	id = lookup_swap_cgroup_id(entry);
6763 	rcu_read_lock();
6764 	memcg = mem_cgroup_from_id(id);
6765 	if (!memcg || !css_tryget_online(&memcg->css))
6766 		memcg = get_mem_cgroup_from_mm(mm);
6767 	rcu_read_unlock();
6768 
6769 	ret = charge_memcg(page, memcg, gfp);
6770 
6771 	css_put(&memcg->css);
6772 	return ret;
6773 }
6774 
6775 /*
6776  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6777  * @entry: swap entry for which the page is charged
6778  *
6779  * Call this function after successfully adding the charged page to swapcache.
6780  *
6781  * Note: This function assumes the page for which swap slot is being uncharged
6782  * is order 0 page.
6783  */
6784 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6785 {
6786 	/*
6787 	 * Cgroup1's unified memory+swap counter has been charged with the
6788 	 * new swapcache page, finish the transfer by uncharging the swap
6789 	 * slot. The swap slot would also get uncharged when it dies, but
6790 	 * it can stick around indefinitely and we'd count the page twice
6791 	 * the entire time.
6792 	 *
6793 	 * Cgroup2 has separate resource counters for memory and swap,
6794 	 * so this is a non-issue here. Memory and swap charge lifetimes
6795 	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6796 	 * page to memory here, and uncharge swap when the slot is freed.
6797 	 */
6798 	if (!mem_cgroup_disabled() && do_memsw_account()) {
6799 		/*
6800 		 * The swap entry might not get freed for a long time,
6801 		 * let's not wait for it.  The page already received a
6802 		 * memory+swap charge, drop the swap entry duplicate.
6803 		 */
6804 		mem_cgroup_uncharge_swap(entry, 1);
6805 	}
6806 }
6807 
6808 struct uncharge_gather {
6809 	struct mem_cgroup *memcg;
6810 	unsigned long nr_memory;
6811 	unsigned long pgpgout;
6812 	unsigned long nr_kmem;
6813 	struct page *dummy_page;
6814 };
6815 
6816 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6817 {
6818 	memset(ug, 0, sizeof(*ug));
6819 }
6820 
6821 static void uncharge_batch(const struct uncharge_gather *ug)
6822 {
6823 	unsigned long flags;
6824 
6825 	if (ug->nr_memory) {
6826 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6827 		if (do_memsw_account())
6828 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6829 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6830 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6831 		memcg_oom_recover(ug->memcg);
6832 	}
6833 
6834 	local_irq_save(flags);
6835 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6836 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6837 	memcg_check_events(ug->memcg, ug->dummy_page);
6838 	local_irq_restore(flags);
6839 
6840 	/* drop reference from uncharge_page */
6841 	css_put(&ug->memcg->css);
6842 }
6843 
6844 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6845 {
6846 	unsigned long nr_pages;
6847 	struct mem_cgroup *memcg;
6848 	struct obj_cgroup *objcg;
6849 	bool use_objcg = PageMemcgKmem(page);
6850 
6851 	VM_BUG_ON_PAGE(PageLRU(page), page);
6852 
6853 	/*
6854 	 * Nobody should be changing or seriously looking at
6855 	 * page memcg or objcg at this point, we have fully
6856 	 * exclusive access to the page.
6857 	 */
6858 	if (use_objcg) {
6859 		objcg = __page_objcg(page);
6860 		/*
6861 		 * This get matches the put at the end of the function and
6862 		 * kmem pages do not hold memcg references anymore.
6863 		 */
6864 		memcg = get_mem_cgroup_from_objcg(objcg);
6865 	} else {
6866 		memcg = __page_memcg(page);
6867 	}
6868 
6869 	if (!memcg)
6870 		return;
6871 
6872 	if (ug->memcg != memcg) {
6873 		if (ug->memcg) {
6874 			uncharge_batch(ug);
6875 			uncharge_gather_clear(ug);
6876 		}
6877 		ug->memcg = memcg;
6878 		ug->dummy_page = page;
6879 
6880 		/* pairs with css_put in uncharge_batch */
6881 		css_get(&memcg->css);
6882 	}
6883 
6884 	nr_pages = compound_nr(page);
6885 
6886 	if (use_objcg) {
6887 		ug->nr_memory += nr_pages;
6888 		ug->nr_kmem += nr_pages;
6889 
6890 		page->memcg_data = 0;
6891 		obj_cgroup_put(objcg);
6892 	} else {
6893 		/* LRU pages aren't accounted at the root level */
6894 		if (!mem_cgroup_is_root(memcg))
6895 			ug->nr_memory += nr_pages;
6896 		ug->pgpgout++;
6897 
6898 		page->memcg_data = 0;
6899 	}
6900 
6901 	css_put(&memcg->css);
6902 }
6903 
6904 /**
6905  * __mem_cgroup_uncharge - uncharge a page
6906  * @page: page to uncharge
6907  *
6908  * Uncharge a page previously charged with __mem_cgroup_charge().
6909  */
6910 void __mem_cgroup_uncharge(struct page *page)
6911 {
6912 	struct uncharge_gather ug;
6913 
6914 	/* Don't touch page->lru of any random page, pre-check: */
6915 	if (!page_memcg(page))
6916 		return;
6917 
6918 	uncharge_gather_clear(&ug);
6919 	uncharge_page(page, &ug);
6920 	uncharge_batch(&ug);
6921 }
6922 
6923 /**
6924  * __mem_cgroup_uncharge_list - uncharge a list of page
6925  * @page_list: list of pages to uncharge
6926  *
6927  * Uncharge a list of pages previously charged with
6928  * __mem_cgroup_charge().
6929  */
6930 void __mem_cgroup_uncharge_list(struct list_head *page_list)
6931 {
6932 	struct uncharge_gather ug;
6933 	struct page *page;
6934 
6935 	uncharge_gather_clear(&ug);
6936 	list_for_each_entry(page, page_list, lru)
6937 		uncharge_page(page, &ug);
6938 	if (ug.memcg)
6939 		uncharge_batch(&ug);
6940 }
6941 
6942 /**
6943  * mem_cgroup_migrate - charge a page's replacement
6944  * @oldpage: currently circulating page
6945  * @newpage: replacement page
6946  *
6947  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6948  * be uncharged upon free.
6949  *
6950  * Both pages must be locked, @newpage->mapping must be set up.
6951  */
6952 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6953 {
6954 	struct mem_cgroup *memcg;
6955 	unsigned int nr_pages;
6956 	unsigned long flags;
6957 
6958 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6959 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6960 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6961 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6962 		       newpage);
6963 
6964 	if (mem_cgroup_disabled())
6965 		return;
6966 
6967 	/* Page cache replacement: new page already charged? */
6968 	if (page_memcg(newpage))
6969 		return;
6970 
6971 	memcg = page_memcg(oldpage);
6972 	VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
6973 	if (!memcg)
6974 		return;
6975 
6976 	/* Force-charge the new page. The old one will be freed soon */
6977 	nr_pages = thp_nr_pages(newpage);
6978 
6979 	if (!mem_cgroup_is_root(memcg)) {
6980 		page_counter_charge(&memcg->memory, nr_pages);
6981 		if (do_memsw_account())
6982 			page_counter_charge(&memcg->memsw, nr_pages);
6983 	}
6984 
6985 	css_get(&memcg->css);
6986 	commit_charge(newpage, memcg);
6987 
6988 	local_irq_save(flags);
6989 	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6990 	memcg_check_events(memcg, newpage);
6991 	local_irq_restore(flags);
6992 }
6993 
6994 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6995 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6996 
6997 void mem_cgroup_sk_alloc(struct sock *sk)
6998 {
6999 	struct mem_cgroup *memcg;
7000 
7001 	if (!mem_cgroup_sockets_enabled)
7002 		return;
7003 
7004 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7005 	if (in_interrupt())
7006 		return;
7007 
7008 	rcu_read_lock();
7009 	memcg = mem_cgroup_from_task(current);
7010 	if (memcg == root_mem_cgroup)
7011 		goto out;
7012 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7013 		goto out;
7014 	if (css_tryget(&memcg->css))
7015 		sk->sk_memcg = memcg;
7016 out:
7017 	rcu_read_unlock();
7018 }
7019 
7020 void mem_cgroup_sk_free(struct sock *sk)
7021 {
7022 	if (sk->sk_memcg)
7023 		css_put(&sk->sk_memcg->css);
7024 }
7025 
7026 /**
7027  * mem_cgroup_charge_skmem - charge socket memory
7028  * @memcg: memcg to charge
7029  * @nr_pages: number of pages to charge
7030  * @gfp_mask: reclaim mode
7031  *
7032  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7033  * @memcg's configured limit, %false if it doesn't.
7034  */
7035 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7036 			     gfp_t gfp_mask)
7037 {
7038 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7039 		struct page_counter *fail;
7040 
7041 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7042 			memcg->tcpmem_pressure = 0;
7043 			return true;
7044 		}
7045 		memcg->tcpmem_pressure = 1;
7046 		if (gfp_mask & __GFP_NOFAIL) {
7047 			page_counter_charge(&memcg->tcpmem, nr_pages);
7048 			return true;
7049 		}
7050 		return false;
7051 	}
7052 
7053 	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7054 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7055 		return true;
7056 	}
7057 
7058 	return false;
7059 }
7060 
7061 /**
7062  * mem_cgroup_uncharge_skmem - uncharge socket memory
7063  * @memcg: memcg to uncharge
7064  * @nr_pages: number of pages to uncharge
7065  */
7066 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7067 {
7068 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7069 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7070 		return;
7071 	}
7072 
7073 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7074 
7075 	refill_stock(memcg, nr_pages);
7076 }
7077 
7078 static int __init cgroup_memory(char *s)
7079 {
7080 	char *token;
7081 
7082 	while ((token = strsep(&s, ",")) != NULL) {
7083 		if (!*token)
7084 			continue;
7085 		if (!strcmp(token, "nosocket"))
7086 			cgroup_memory_nosocket = true;
7087 		if (!strcmp(token, "nokmem"))
7088 			cgroup_memory_nokmem = true;
7089 	}
7090 	return 0;
7091 }
7092 __setup("cgroup.memory=", cgroup_memory);
7093 
7094 /*
7095  * subsys_initcall() for memory controller.
7096  *
7097  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7098  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7099  * basically everything that doesn't depend on a specific mem_cgroup structure
7100  * should be initialized from here.
7101  */
7102 static int __init mem_cgroup_init(void)
7103 {
7104 	int cpu, node;
7105 
7106 	/*
7107 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7108 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7109 	 * to work fine, we should make sure that the overfill threshold can't
7110 	 * exceed S32_MAX / PAGE_SIZE.
7111 	 */
7112 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7113 
7114 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7115 				  memcg_hotplug_cpu_dead);
7116 
7117 	for_each_possible_cpu(cpu)
7118 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7119 			  drain_local_stock);
7120 
7121 	for_each_node(node) {
7122 		struct mem_cgroup_tree_per_node *rtpn;
7123 
7124 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7125 				    node_online(node) ? node : NUMA_NO_NODE);
7126 
7127 		rtpn->rb_root = RB_ROOT;
7128 		rtpn->rb_rightmost = NULL;
7129 		spin_lock_init(&rtpn->lock);
7130 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7131 	}
7132 
7133 	return 0;
7134 }
7135 subsys_initcall(mem_cgroup_init);
7136 
7137 #ifdef CONFIG_MEMCG_SWAP
7138 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7139 {
7140 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7141 		/*
7142 		 * The root cgroup cannot be destroyed, so it's refcount must
7143 		 * always be >= 1.
7144 		 */
7145 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7146 			VM_BUG_ON(1);
7147 			break;
7148 		}
7149 		memcg = parent_mem_cgroup(memcg);
7150 		if (!memcg)
7151 			memcg = root_mem_cgroup;
7152 	}
7153 	return memcg;
7154 }
7155 
7156 /**
7157  * mem_cgroup_swapout - transfer a memsw charge to swap
7158  * @page: page whose memsw charge to transfer
7159  * @entry: swap entry to move the charge to
7160  *
7161  * Transfer the memsw charge of @page to @entry.
7162  */
7163 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7164 {
7165 	struct mem_cgroup *memcg, *swap_memcg;
7166 	unsigned int nr_entries;
7167 	unsigned short oldid;
7168 
7169 	VM_BUG_ON_PAGE(PageLRU(page), page);
7170 	VM_BUG_ON_PAGE(page_count(page), page);
7171 
7172 	if (mem_cgroup_disabled())
7173 		return;
7174 
7175 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7176 		return;
7177 
7178 	memcg = page_memcg(page);
7179 
7180 	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7181 	if (!memcg)
7182 		return;
7183 
7184 	/*
7185 	 * In case the memcg owning these pages has been offlined and doesn't
7186 	 * have an ID allocated to it anymore, charge the closest online
7187 	 * ancestor for the swap instead and transfer the memory+swap charge.
7188 	 */
7189 	swap_memcg = mem_cgroup_id_get_online(memcg);
7190 	nr_entries = thp_nr_pages(page);
7191 	/* Get references for the tail pages, too */
7192 	if (nr_entries > 1)
7193 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7194 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7195 				   nr_entries);
7196 	VM_BUG_ON_PAGE(oldid, page);
7197 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7198 
7199 	page->memcg_data = 0;
7200 
7201 	if (!mem_cgroup_is_root(memcg))
7202 		page_counter_uncharge(&memcg->memory, nr_entries);
7203 
7204 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7205 		if (!mem_cgroup_is_root(swap_memcg))
7206 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7207 		page_counter_uncharge(&memcg->memsw, nr_entries);
7208 	}
7209 
7210 	/*
7211 	 * Interrupts should be disabled here because the caller holds the
7212 	 * i_pages lock which is taken with interrupts-off. It is
7213 	 * important here to have the interrupts disabled because it is the
7214 	 * only synchronisation we have for updating the per-CPU variables.
7215 	 */
7216 	VM_BUG_ON(!irqs_disabled());
7217 	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7218 	memcg_check_events(memcg, page);
7219 
7220 	css_put(&memcg->css);
7221 }
7222 
7223 /**
7224  * __mem_cgroup_try_charge_swap - try charging swap space for a page
7225  * @page: page being added to swap
7226  * @entry: swap entry to charge
7227  *
7228  * Try to charge @page's memcg for the swap space at @entry.
7229  *
7230  * Returns 0 on success, -ENOMEM on failure.
7231  */
7232 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7233 {
7234 	unsigned int nr_pages = thp_nr_pages(page);
7235 	struct page_counter *counter;
7236 	struct mem_cgroup *memcg;
7237 	unsigned short oldid;
7238 
7239 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7240 		return 0;
7241 
7242 	memcg = page_memcg(page);
7243 
7244 	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7245 	if (!memcg)
7246 		return 0;
7247 
7248 	if (!entry.val) {
7249 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7250 		return 0;
7251 	}
7252 
7253 	memcg = mem_cgroup_id_get_online(memcg);
7254 
7255 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7256 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7257 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7258 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7259 		mem_cgroup_id_put(memcg);
7260 		return -ENOMEM;
7261 	}
7262 
7263 	/* Get references for the tail pages, too */
7264 	if (nr_pages > 1)
7265 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7266 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7267 	VM_BUG_ON_PAGE(oldid, page);
7268 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7269 
7270 	return 0;
7271 }
7272 
7273 /**
7274  * __mem_cgroup_uncharge_swap - uncharge swap space
7275  * @entry: swap entry to uncharge
7276  * @nr_pages: the amount of swap space to uncharge
7277  */
7278 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7279 {
7280 	struct mem_cgroup *memcg;
7281 	unsigned short id;
7282 
7283 	id = swap_cgroup_record(entry, 0, nr_pages);
7284 	rcu_read_lock();
7285 	memcg = mem_cgroup_from_id(id);
7286 	if (memcg) {
7287 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7288 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7289 				page_counter_uncharge(&memcg->swap, nr_pages);
7290 			else
7291 				page_counter_uncharge(&memcg->memsw, nr_pages);
7292 		}
7293 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7294 		mem_cgroup_id_put_many(memcg, nr_pages);
7295 	}
7296 	rcu_read_unlock();
7297 }
7298 
7299 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7300 {
7301 	long nr_swap_pages = get_nr_swap_pages();
7302 
7303 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7304 		return nr_swap_pages;
7305 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7306 		nr_swap_pages = min_t(long, nr_swap_pages,
7307 				      READ_ONCE(memcg->swap.max) -
7308 				      page_counter_read(&memcg->swap));
7309 	return nr_swap_pages;
7310 }
7311 
7312 bool mem_cgroup_swap_full(struct page *page)
7313 {
7314 	struct mem_cgroup *memcg;
7315 
7316 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7317 
7318 	if (vm_swap_full())
7319 		return true;
7320 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7321 		return false;
7322 
7323 	memcg = page_memcg(page);
7324 	if (!memcg)
7325 		return false;
7326 
7327 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7328 		unsigned long usage = page_counter_read(&memcg->swap);
7329 
7330 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7331 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7332 			return true;
7333 	}
7334 
7335 	return false;
7336 }
7337 
7338 static int __init setup_swap_account(char *s)
7339 {
7340 	if (!strcmp(s, "1"))
7341 		cgroup_memory_noswap = false;
7342 	else if (!strcmp(s, "0"))
7343 		cgroup_memory_noswap = true;
7344 	return 1;
7345 }
7346 __setup("swapaccount=", setup_swap_account);
7347 
7348 static u64 swap_current_read(struct cgroup_subsys_state *css,
7349 			     struct cftype *cft)
7350 {
7351 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7352 
7353 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7354 }
7355 
7356 static int swap_high_show(struct seq_file *m, void *v)
7357 {
7358 	return seq_puts_memcg_tunable(m,
7359 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7360 }
7361 
7362 static ssize_t swap_high_write(struct kernfs_open_file *of,
7363 			       char *buf, size_t nbytes, loff_t off)
7364 {
7365 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7366 	unsigned long high;
7367 	int err;
7368 
7369 	buf = strstrip(buf);
7370 	err = page_counter_memparse(buf, "max", &high);
7371 	if (err)
7372 		return err;
7373 
7374 	page_counter_set_high(&memcg->swap, high);
7375 
7376 	return nbytes;
7377 }
7378 
7379 static int swap_max_show(struct seq_file *m, void *v)
7380 {
7381 	return seq_puts_memcg_tunable(m,
7382 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7383 }
7384 
7385 static ssize_t swap_max_write(struct kernfs_open_file *of,
7386 			      char *buf, size_t nbytes, loff_t off)
7387 {
7388 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7389 	unsigned long max;
7390 	int err;
7391 
7392 	buf = strstrip(buf);
7393 	err = page_counter_memparse(buf, "max", &max);
7394 	if (err)
7395 		return err;
7396 
7397 	xchg(&memcg->swap.max, max);
7398 
7399 	return nbytes;
7400 }
7401 
7402 static int swap_events_show(struct seq_file *m, void *v)
7403 {
7404 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7405 
7406 	seq_printf(m, "high %lu\n",
7407 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7408 	seq_printf(m, "max %lu\n",
7409 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7410 	seq_printf(m, "fail %lu\n",
7411 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7412 
7413 	return 0;
7414 }
7415 
7416 static struct cftype swap_files[] = {
7417 	{
7418 		.name = "swap.current",
7419 		.flags = CFTYPE_NOT_ON_ROOT,
7420 		.read_u64 = swap_current_read,
7421 	},
7422 	{
7423 		.name = "swap.high",
7424 		.flags = CFTYPE_NOT_ON_ROOT,
7425 		.seq_show = swap_high_show,
7426 		.write = swap_high_write,
7427 	},
7428 	{
7429 		.name = "swap.max",
7430 		.flags = CFTYPE_NOT_ON_ROOT,
7431 		.seq_show = swap_max_show,
7432 		.write = swap_max_write,
7433 	},
7434 	{
7435 		.name = "swap.events",
7436 		.flags = CFTYPE_NOT_ON_ROOT,
7437 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7438 		.seq_show = swap_events_show,
7439 	},
7440 	{ }	/* terminate */
7441 };
7442 
7443 static struct cftype memsw_files[] = {
7444 	{
7445 		.name = "memsw.usage_in_bytes",
7446 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7447 		.read_u64 = mem_cgroup_read_u64,
7448 	},
7449 	{
7450 		.name = "memsw.max_usage_in_bytes",
7451 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7452 		.write = mem_cgroup_reset,
7453 		.read_u64 = mem_cgroup_read_u64,
7454 	},
7455 	{
7456 		.name = "memsw.limit_in_bytes",
7457 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7458 		.write = mem_cgroup_write,
7459 		.read_u64 = mem_cgroup_read_u64,
7460 	},
7461 	{
7462 		.name = "memsw.failcnt",
7463 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7464 		.write = mem_cgroup_reset,
7465 		.read_u64 = mem_cgroup_read_u64,
7466 	},
7467 	{ },	/* terminate */
7468 };
7469 
7470 /*
7471  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7472  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7473  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7474  * boot parameter. This may result in premature OOPS inside
7475  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7476  */
7477 static int __init mem_cgroup_swap_init(void)
7478 {
7479 	/* No memory control -> no swap control */
7480 	if (mem_cgroup_disabled())
7481 		cgroup_memory_noswap = true;
7482 
7483 	if (cgroup_memory_noswap)
7484 		return 0;
7485 
7486 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7487 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7488 
7489 	return 0;
7490 }
7491 core_initcall(mem_cgroup_swap_init);
7492 
7493 #endif /* CONFIG_MEMCG_SWAP */
7494