xref: /openbmc/linux/mm/memcontrol.c (revision 78bb17f7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73 
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75 
76 #define MEM_CGROUP_RECLAIM_RETRIES	5
77 
78 /* Socket memory accounting disabled? */
79 static bool cgroup_memory_nosocket;
80 
81 /* Kernel memory accounting disabled? */
82 static bool cgroup_memory_nokmem;
83 
84 /* Whether the swap controller is active */
85 #ifdef CONFIG_MEMCG_SWAP
86 int do_swap_account __read_mostly;
87 #else
88 #define do_swap_account		0
89 #endif
90 
91 #ifdef CONFIG_CGROUP_WRITEBACK
92 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
93 #endif
94 
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100 
101 #define THRESHOLDS_EVENTS_TARGET 128
102 #define SOFTLIMIT_EVENTS_TARGET 1024
103 
104 /*
105  * Cgroups above their limits are maintained in a RB-Tree, independent of
106  * their hierarchy representation
107  */
108 
109 struct mem_cgroup_tree_per_node {
110 	struct rb_root rb_root;
111 	struct rb_node *rb_rightmost;
112 	spinlock_t lock;
113 };
114 
115 struct mem_cgroup_tree {
116 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
117 };
118 
119 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
120 
121 /* for OOM */
122 struct mem_cgroup_eventfd_list {
123 	struct list_head list;
124 	struct eventfd_ctx *eventfd;
125 };
126 
127 /*
128  * cgroup_event represents events which userspace want to receive.
129  */
130 struct mem_cgroup_event {
131 	/*
132 	 * memcg which the event belongs to.
133 	 */
134 	struct mem_cgroup *memcg;
135 	/*
136 	 * eventfd to signal userspace about the event.
137 	 */
138 	struct eventfd_ctx *eventfd;
139 	/*
140 	 * Each of these stored in a list by the cgroup.
141 	 */
142 	struct list_head list;
143 	/*
144 	 * register_event() callback will be used to add new userspace
145 	 * waiter for changes related to this event.  Use eventfd_signal()
146 	 * on eventfd to send notification to userspace.
147 	 */
148 	int (*register_event)(struct mem_cgroup *memcg,
149 			      struct eventfd_ctx *eventfd, const char *args);
150 	/*
151 	 * unregister_event() callback will be called when userspace closes
152 	 * the eventfd or on cgroup removing.  This callback must be set,
153 	 * if you want provide notification functionality.
154 	 */
155 	void (*unregister_event)(struct mem_cgroup *memcg,
156 				 struct eventfd_ctx *eventfd);
157 	/*
158 	 * All fields below needed to unregister event when
159 	 * userspace closes eventfd.
160 	 */
161 	poll_table pt;
162 	wait_queue_head_t *wqh;
163 	wait_queue_entry_t wait;
164 	struct work_struct remove;
165 };
166 
167 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
168 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
169 
170 /* Stuffs for move charges at task migration. */
171 /*
172  * Types of charges to be moved.
173  */
174 #define MOVE_ANON	0x1U
175 #define MOVE_FILE	0x2U
176 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
177 
178 /* "mc" and its members are protected by cgroup_mutex */
179 static struct move_charge_struct {
180 	spinlock_t	  lock; /* for from, to */
181 	struct mm_struct  *mm;
182 	struct mem_cgroup *from;
183 	struct mem_cgroup *to;
184 	unsigned long flags;
185 	unsigned long precharge;
186 	unsigned long moved_charge;
187 	unsigned long moved_swap;
188 	struct task_struct *moving_task;	/* a task moving charges */
189 	wait_queue_head_t waitq;		/* a waitq for other context */
190 } mc = {
191 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
192 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
193 };
194 
195 /*
196  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
197  * limit reclaim to prevent infinite loops, if they ever occur.
198  */
199 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
200 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
201 
202 enum charge_type {
203 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
204 	MEM_CGROUP_CHARGE_TYPE_ANON,
205 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
206 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
207 	NR_CHARGE_TYPE,
208 };
209 
210 /* for encoding cft->private value on file */
211 enum res_type {
212 	_MEM,
213 	_MEMSWAP,
214 	_OOM_TYPE,
215 	_KMEM,
216 	_TCP,
217 };
218 
219 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
220 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
221 #define MEMFILE_ATTR(val)	((val) & 0xffff)
222 /* Used for OOM nofiier */
223 #define OOM_CONTROL		(0)
224 
225 /*
226  * Iteration constructs for visiting all cgroups (under a tree).  If
227  * loops are exited prematurely (break), mem_cgroup_iter_break() must
228  * be used for reference counting.
229  */
230 #define for_each_mem_cgroup_tree(iter, root)		\
231 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
232 	     iter != NULL;				\
233 	     iter = mem_cgroup_iter(root, iter, NULL))
234 
235 #define for_each_mem_cgroup(iter)			\
236 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
237 	     iter != NULL;				\
238 	     iter = mem_cgroup_iter(NULL, iter, NULL))
239 
240 static inline bool should_force_charge(void)
241 {
242 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
243 		(current->flags & PF_EXITING);
244 }
245 
246 /* Some nice accessors for the vmpressure. */
247 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
248 {
249 	if (!memcg)
250 		memcg = root_mem_cgroup;
251 	return &memcg->vmpressure;
252 }
253 
254 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
255 {
256 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
257 }
258 
259 #ifdef CONFIG_MEMCG_KMEM
260 /*
261  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
262  * The main reason for not using cgroup id for this:
263  *  this works better in sparse environments, where we have a lot of memcgs,
264  *  but only a few kmem-limited. Or also, if we have, for instance, 200
265  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
266  *  200 entry array for that.
267  *
268  * The current size of the caches array is stored in memcg_nr_cache_ids. It
269  * will double each time we have to increase it.
270  */
271 static DEFINE_IDA(memcg_cache_ida);
272 int memcg_nr_cache_ids;
273 
274 /* Protects memcg_nr_cache_ids */
275 static DECLARE_RWSEM(memcg_cache_ids_sem);
276 
277 void memcg_get_cache_ids(void)
278 {
279 	down_read(&memcg_cache_ids_sem);
280 }
281 
282 void memcg_put_cache_ids(void)
283 {
284 	up_read(&memcg_cache_ids_sem);
285 }
286 
287 /*
288  * MIN_SIZE is different than 1, because we would like to avoid going through
289  * the alloc/free process all the time. In a small machine, 4 kmem-limited
290  * cgroups is a reasonable guess. In the future, it could be a parameter or
291  * tunable, but that is strictly not necessary.
292  *
293  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
294  * this constant directly from cgroup, but it is understandable that this is
295  * better kept as an internal representation in cgroup.c. In any case, the
296  * cgrp_id space is not getting any smaller, and we don't have to necessarily
297  * increase ours as well if it increases.
298  */
299 #define MEMCG_CACHES_MIN_SIZE 4
300 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
301 
302 /*
303  * A lot of the calls to the cache allocation functions are expected to be
304  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
305  * conditional to this static branch, we'll have to allow modules that does
306  * kmem_cache_alloc and the such to see this symbol as well
307  */
308 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
309 EXPORT_SYMBOL(memcg_kmem_enabled_key);
310 
311 struct workqueue_struct *memcg_kmem_cache_wq;
312 #endif
313 
314 static int memcg_shrinker_map_size;
315 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
316 
317 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
318 {
319 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
320 }
321 
322 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
323 					 int size, int old_size)
324 {
325 	struct memcg_shrinker_map *new, *old;
326 	int nid;
327 
328 	lockdep_assert_held(&memcg_shrinker_map_mutex);
329 
330 	for_each_node(nid) {
331 		old = rcu_dereference_protected(
332 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
333 		/* Not yet online memcg */
334 		if (!old)
335 			return 0;
336 
337 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
338 		if (!new)
339 			return -ENOMEM;
340 
341 		/* Set all old bits, clear all new bits */
342 		memset(new->map, (int)0xff, old_size);
343 		memset((void *)new->map + old_size, 0, size - old_size);
344 
345 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
346 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
347 	}
348 
349 	return 0;
350 }
351 
352 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
353 {
354 	struct mem_cgroup_per_node *pn;
355 	struct memcg_shrinker_map *map;
356 	int nid;
357 
358 	if (mem_cgroup_is_root(memcg))
359 		return;
360 
361 	for_each_node(nid) {
362 		pn = mem_cgroup_nodeinfo(memcg, nid);
363 		map = rcu_dereference_protected(pn->shrinker_map, true);
364 		if (map)
365 			kvfree(map);
366 		rcu_assign_pointer(pn->shrinker_map, NULL);
367 	}
368 }
369 
370 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
371 {
372 	struct memcg_shrinker_map *map;
373 	int nid, size, ret = 0;
374 
375 	if (mem_cgroup_is_root(memcg))
376 		return 0;
377 
378 	mutex_lock(&memcg_shrinker_map_mutex);
379 	size = memcg_shrinker_map_size;
380 	for_each_node(nid) {
381 		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
382 		if (!map) {
383 			memcg_free_shrinker_maps(memcg);
384 			ret = -ENOMEM;
385 			break;
386 		}
387 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
388 	}
389 	mutex_unlock(&memcg_shrinker_map_mutex);
390 
391 	return ret;
392 }
393 
394 int memcg_expand_shrinker_maps(int new_id)
395 {
396 	int size, old_size, ret = 0;
397 	struct mem_cgroup *memcg;
398 
399 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
400 	old_size = memcg_shrinker_map_size;
401 	if (size <= old_size)
402 		return 0;
403 
404 	mutex_lock(&memcg_shrinker_map_mutex);
405 	if (!root_mem_cgroup)
406 		goto unlock;
407 
408 	for_each_mem_cgroup(memcg) {
409 		if (mem_cgroup_is_root(memcg))
410 			continue;
411 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
412 		if (ret) {
413 			mem_cgroup_iter_break(NULL, memcg);
414 			goto unlock;
415 		}
416 	}
417 unlock:
418 	if (!ret)
419 		memcg_shrinker_map_size = size;
420 	mutex_unlock(&memcg_shrinker_map_mutex);
421 	return ret;
422 }
423 
424 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
425 {
426 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
427 		struct memcg_shrinker_map *map;
428 
429 		rcu_read_lock();
430 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
431 		/* Pairs with smp mb in shrink_slab() */
432 		smp_mb__before_atomic();
433 		set_bit(shrinker_id, map->map);
434 		rcu_read_unlock();
435 	}
436 }
437 
438 /**
439  * mem_cgroup_css_from_page - css of the memcg associated with a page
440  * @page: page of interest
441  *
442  * If memcg is bound to the default hierarchy, css of the memcg associated
443  * with @page is returned.  The returned css remains associated with @page
444  * until it is released.
445  *
446  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
447  * is returned.
448  */
449 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
450 {
451 	struct mem_cgroup *memcg;
452 
453 	memcg = page->mem_cgroup;
454 
455 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
456 		memcg = root_mem_cgroup;
457 
458 	return &memcg->css;
459 }
460 
461 /**
462  * page_cgroup_ino - return inode number of the memcg a page is charged to
463  * @page: the page
464  *
465  * Look up the closest online ancestor of the memory cgroup @page is charged to
466  * and return its inode number or 0 if @page is not charged to any cgroup. It
467  * is safe to call this function without holding a reference to @page.
468  *
469  * Note, this function is inherently racy, because there is nothing to prevent
470  * the cgroup inode from getting torn down and potentially reallocated a moment
471  * after page_cgroup_ino() returns, so it only should be used by callers that
472  * do not care (such as procfs interfaces).
473  */
474 ino_t page_cgroup_ino(struct page *page)
475 {
476 	struct mem_cgroup *memcg;
477 	unsigned long ino = 0;
478 
479 	rcu_read_lock();
480 	if (PageSlab(page) && !PageTail(page))
481 		memcg = memcg_from_slab_page(page);
482 	else
483 		memcg = READ_ONCE(page->mem_cgroup);
484 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
485 		memcg = parent_mem_cgroup(memcg);
486 	if (memcg)
487 		ino = cgroup_ino(memcg->css.cgroup);
488 	rcu_read_unlock();
489 	return ino;
490 }
491 
492 static struct mem_cgroup_per_node *
493 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
494 {
495 	int nid = page_to_nid(page);
496 
497 	return memcg->nodeinfo[nid];
498 }
499 
500 static struct mem_cgroup_tree_per_node *
501 soft_limit_tree_node(int nid)
502 {
503 	return soft_limit_tree.rb_tree_per_node[nid];
504 }
505 
506 static struct mem_cgroup_tree_per_node *
507 soft_limit_tree_from_page(struct page *page)
508 {
509 	int nid = page_to_nid(page);
510 
511 	return soft_limit_tree.rb_tree_per_node[nid];
512 }
513 
514 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
515 					 struct mem_cgroup_tree_per_node *mctz,
516 					 unsigned long new_usage_in_excess)
517 {
518 	struct rb_node **p = &mctz->rb_root.rb_node;
519 	struct rb_node *parent = NULL;
520 	struct mem_cgroup_per_node *mz_node;
521 	bool rightmost = true;
522 
523 	if (mz->on_tree)
524 		return;
525 
526 	mz->usage_in_excess = new_usage_in_excess;
527 	if (!mz->usage_in_excess)
528 		return;
529 	while (*p) {
530 		parent = *p;
531 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
532 					tree_node);
533 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
534 			p = &(*p)->rb_left;
535 			rightmost = false;
536 		}
537 
538 		/*
539 		 * We can't avoid mem cgroups that are over their soft
540 		 * limit by the same amount
541 		 */
542 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
543 			p = &(*p)->rb_right;
544 	}
545 
546 	if (rightmost)
547 		mctz->rb_rightmost = &mz->tree_node;
548 
549 	rb_link_node(&mz->tree_node, parent, p);
550 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
551 	mz->on_tree = true;
552 }
553 
554 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
555 					 struct mem_cgroup_tree_per_node *mctz)
556 {
557 	if (!mz->on_tree)
558 		return;
559 
560 	if (&mz->tree_node == mctz->rb_rightmost)
561 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
562 
563 	rb_erase(&mz->tree_node, &mctz->rb_root);
564 	mz->on_tree = false;
565 }
566 
567 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
568 				       struct mem_cgroup_tree_per_node *mctz)
569 {
570 	unsigned long flags;
571 
572 	spin_lock_irqsave(&mctz->lock, flags);
573 	__mem_cgroup_remove_exceeded(mz, mctz);
574 	spin_unlock_irqrestore(&mctz->lock, flags);
575 }
576 
577 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
578 {
579 	unsigned long nr_pages = page_counter_read(&memcg->memory);
580 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
581 	unsigned long excess = 0;
582 
583 	if (nr_pages > soft_limit)
584 		excess = nr_pages - soft_limit;
585 
586 	return excess;
587 }
588 
589 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
590 {
591 	unsigned long excess;
592 	struct mem_cgroup_per_node *mz;
593 	struct mem_cgroup_tree_per_node *mctz;
594 
595 	mctz = soft_limit_tree_from_page(page);
596 	if (!mctz)
597 		return;
598 	/*
599 	 * Necessary to update all ancestors when hierarchy is used.
600 	 * because their event counter is not touched.
601 	 */
602 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
603 		mz = mem_cgroup_page_nodeinfo(memcg, page);
604 		excess = soft_limit_excess(memcg);
605 		/*
606 		 * We have to update the tree if mz is on RB-tree or
607 		 * mem is over its softlimit.
608 		 */
609 		if (excess || mz->on_tree) {
610 			unsigned long flags;
611 
612 			spin_lock_irqsave(&mctz->lock, flags);
613 			/* if on-tree, remove it */
614 			if (mz->on_tree)
615 				__mem_cgroup_remove_exceeded(mz, mctz);
616 			/*
617 			 * Insert again. mz->usage_in_excess will be updated.
618 			 * If excess is 0, no tree ops.
619 			 */
620 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
621 			spin_unlock_irqrestore(&mctz->lock, flags);
622 		}
623 	}
624 }
625 
626 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
627 {
628 	struct mem_cgroup_tree_per_node *mctz;
629 	struct mem_cgroup_per_node *mz;
630 	int nid;
631 
632 	for_each_node(nid) {
633 		mz = mem_cgroup_nodeinfo(memcg, nid);
634 		mctz = soft_limit_tree_node(nid);
635 		if (mctz)
636 			mem_cgroup_remove_exceeded(mz, mctz);
637 	}
638 }
639 
640 static struct mem_cgroup_per_node *
641 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
642 {
643 	struct mem_cgroup_per_node *mz;
644 
645 retry:
646 	mz = NULL;
647 	if (!mctz->rb_rightmost)
648 		goto done;		/* Nothing to reclaim from */
649 
650 	mz = rb_entry(mctz->rb_rightmost,
651 		      struct mem_cgroup_per_node, tree_node);
652 	/*
653 	 * Remove the node now but someone else can add it back,
654 	 * we will to add it back at the end of reclaim to its correct
655 	 * position in the tree.
656 	 */
657 	__mem_cgroup_remove_exceeded(mz, mctz);
658 	if (!soft_limit_excess(mz->memcg) ||
659 	    !css_tryget(&mz->memcg->css))
660 		goto retry;
661 done:
662 	return mz;
663 }
664 
665 static struct mem_cgroup_per_node *
666 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
667 {
668 	struct mem_cgroup_per_node *mz;
669 
670 	spin_lock_irq(&mctz->lock);
671 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
672 	spin_unlock_irq(&mctz->lock);
673 	return mz;
674 }
675 
676 /**
677  * __mod_memcg_state - update cgroup memory statistics
678  * @memcg: the memory cgroup
679  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
680  * @val: delta to add to the counter, can be negative
681  */
682 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
683 {
684 	long x;
685 
686 	if (mem_cgroup_disabled())
687 		return;
688 
689 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
690 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
691 		struct mem_cgroup *mi;
692 
693 		/*
694 		 * Batch local counters to keep them in sync with
695 		 * the hierarchical ones.
696 		 */
697 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
698 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
699 			atomic_long_add(x, &mi->vmstats[idx]);
700 		x = 0;
701 	}
702 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
703 }
704 
705 static struct mem_cgroup_per_node *
706 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
707 {
708 	struct mem_cgroup *parent;
709 
710 	parent = parent_mem_cgroup(pn->memcg);
711 	if (!parent)
712 		return NULL;
713 	return mem_cgroup_nodeinfo(parent, nid);
714 }
715 
716 /**
717  * __mod_lruvec_state - update lruvec memory statistics
718  * @lruvec: the lruvec
719  * @idx: the stat item
720  * @val: delta to add to the counter, can be negative
721  *
722  * The lruvec is the intersection of the NUMA node and a cgroup. This
723  * function updates the all three counters that are affected by a
724  * change of state at this level: per-node, per-cgroup, per-lruvec.
725  */
726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
727 			int val)
728 {
729 	pg_data_t *pgdat = lruvec_pgdat(lruvec);
730 	struct mem_cgroup_per_node *pn;
731 	struct mem_cgroup *memcg;
732 	long x;
733 
734 	/* Update node */
735 	__mod_node_page_state(pgdat, idx, val);
736 
737 	if (mem_cgroup_disabled())
738 		return;
739 
740 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
741 	memcg = pn->memcg;
742 
743 	/* Update memcg */
744 	__mod_memcg_state(memcg, idx, val);
745 
746 	/* Update lruvec */
747 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
748 
749 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
750 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
751 		struct mem_cgroup_per_node *pi;
752 
753 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
754 			atomic_long_add(x, &pi->lruvec_stat[idx]);
755 		x = 0;
756 	}
757 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
758 }
759 
760 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
761 {
762 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
763 	struct mem_cgroup *memcg;
764 	struct lruvec *lruvec;
765 
766 	rcu_read_lock();
767 	memcg = mem_cgroup_from_obj(p);
768 
769 	/* Untracked pages have no memcg, no lruvec. Update only the node */
770 	if (!memcg || memcg == root_mem_cgroup) {
771 		__mod_node_page_state(pgdat, idx, val);
772 	} else {
773 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
774 		__mod_lruvec_state(lruvec, idx, val);
775 	}
776 	rcu_read_unlock();
777 }
778 
779 void mod_memcg_obj_state(void *p, int idx, int val)
780 {
781 	struct mem_cgroup *memcg;
782 
783 	rcu_read_lock();
784 	memcg = mem_cgroup_from_obj(p);
785 	if (memcg)
786 		mod_memcg_state(memcg, idx, val);
787 	rcu_read_unlock();
788 }
789 
790 /**
791  * __count_memcg_events - account VM events in a cgroup
792  * @memcg: the memory cgroup
793  * @idx: the event item
794  * @count: the number of events that occured
795  */
796 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
797 			  unsigned long count)
798 {
799 	unsigned long x;
800 
801 	if (mem_cgroup_disabled())
802 		return;
803 
804 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
805 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
806 		struct mem_cgroup *mi;
807 
808 		/*
809 		 * Batch local counters to keep them in sync with
810 		 * the hierarchical ones.
811 		 */
812 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
813 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
814 			atomic_long_add(x, &mi->vmevents[idx]);
815 		x = 0;
816 	}
817 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
818 }
819 
820 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
821 {
822 	return atomic_long_read(&memcg->vmevents[event]);
823 }
824 
825 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
826 {
827 	long x = 0;
828 	int cpu;
829 
830 	for_each_possible_cpu(cpu)
831 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
832 	return x;
833 }
834 
835 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
836 					 struct page *page,
837 					 bool compound, int nr_pages)
838 {
839 	/*
840 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
841 	 * counted as CACHE even if it's on ANON LRU.
842 	 */
843 	if (PageAnon(page))
844 		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
845 	else {
846 		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
847 		if (PageSwapBacked(page))
848 			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
849 	}
850 
851 	if (compound) {
852 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
853 		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
854 	}
855 
856 	/* pagein of a big page is an event. So, ignore page size */
857 	if (nr_pages > 0)
858 		__count_memcg_events(memcg, PGPGIN, 1);
859 	else {
860 		__count_memcg_events(memcg, PGPGOUT, 1);
861 		nr_pages = -nr_pages; /* for event */
862 	}
863 
864 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
865 }
866 
867 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
868 				       enum mem_cgroup_events_target target)
869 {
870 	unsigned long val, next;
871 
872 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
873 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
874 	/* from time_after() in jiffies.h */
875 	if ((long)(next - val) < 0) {
876 		switch (target) {
877 		case MEM_CGROUP_TARGET_THRESH:
878 			next = val + THRESHOLDS_EVENTS_TARGET;
879 			break;
880 		case MEM_CGROUP_TARGET_SOFTLIMIT:
881 			next = val + SOFTLIMIT_EVENTS_TARGET;
882 			break;
883 		default:
884 			break;
885 		}
886 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
887 		return true;
888 	}
889 	return false;
890 }
891 
892 /*
893  * Check events in order.
894  *
895  */
896 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
897 {
898 	/* threshold event is triggered in finer grain than soft limit */
899 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
900 						MEM_CGROUP_TARGET_THRESH))) {
901 		bool do_softlimit;
902 
903 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
904 						MEM_CGROUP_TARGET_SOFTLIMIT);
905 		mem_cgroup_threshold(memcg);
906 		if (unlikely(do_softlimit))
907 			mem_cgroup_update_tree(memcg, page);
908 	}
909 }
910 
911 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
912 {
913 	/*
914 	 * mm_update_next_owner() may clear mm->owner to NULL
915 	 * if it races with swapoff, page migration, etc.
916 	 * So this can be called with p == NULL.
917 	 */
918 	if (unlikely(!p))
919 		return NULL;
920 
921 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
922 }
923 EXPORT_SYMBOL(mem_cgroup_from_task);
924 
925 /**
926  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
927  * @mm: mm from which memcg should be extracted. It can be NULL.
928  *
929  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
930  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
931  * returned.
932  */
933 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
934 {
935 	struct mem_cgroup *memcg;
936 
937 	if (mem_cgroup_disabled())
938 		return NULL;
939 
940 	rcu_read_lock();
941 	do {
942 		/*
943 		 * Page cache insertions can happen withou an
944 		 * actual mm context, e.g. during disk probing
945 		 * on boot, loopback IO, acct() writes etc.
946 		 */
947 		if (unlikely(!mm))
948 			memcg = root_mem_cgroup;
949 		else {
950 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
951 			if (unlikely(!memcg))
952 				memcg = root_mem_cgroup;
953 		}
954 	} while (!css_tryget(&memcg->css));
955 	rcu_read_unlock();
956 	return memcg;
957 }
958 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
959 
960 /**
961  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
962  * @page: page from which memcg should be extracted.
963  *
964  * Obtain a reference on page->memcg and returns it if successful. Otherwise
965  * root_mem_cgroup is returned.
966  */
967 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
968 {
969 	struct mem_cgroup *memcg = page->mem_cgroup;
970 
971 	if (mem_cgroup_disabled())
972 		return NULL;
973 
974 	rcu_read_lock();
975 	/* Page should not get uncharged and freed memcg under us. */
976 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
977 		memcg = root_mem_cgroup;
978 	rcu_read_unlock();
979 	return memcg;
980 }
981 EXPORT_SYMBOL(get_mem_cgroup_from_page);
982 
983 /**
984  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
985  */
986 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
987 {
988 	if (unlikely(current->active_memcg)) {
989 		struct mem_cgroup *memcg;
990 
991 		rcu_read_lock();
992 		/* current->active_memcg must hold a ref. */
993 		if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
994 			memcg = root_mem_cgroup;
995 		else
996 			memcg = current->active_memcg;
997 		rcu_read_unlock();
998 		return memcg;
999 	}
1000 	return get_mem_cgroup_from_mm(current->mm);
1001 }
1002 
1003 /**
1004  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1005  * @root: hierarchy root
1006  * @prev: previously returned memcg, NULL on first invocation
1007  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1008  *
1009  * Returns references to children of the hierarchy below @root, or
1010  * @root itself, or %NULL after a full round-trip.
1011  *
1012  * Caller must pass the return value in @prev on subsequent
1013  * invocations for reference counting, or use mem_cgroup_iter_break()
1014  * to cancel a hierarchy walk before the round-trip is complete.
1015  *
1016  * Reclaimers can specify a node and a priority level in @reclaim to
1017  * divide up the memcgs in the hierarchy among all concurrent
1018  * reclaimers operating on the same node and priority.
1019  */
1020 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1021 				   struct mem_cgroup *prev,
1022 				   struct mem_cgroup_reclaim_cookie *reclaim)
1023 {
1024 	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1025 	struct cgroup_subsys_state *css = NULL;
1026 	struct mem_cgroup *memcg = NULL;
1027 	struct mem_cgroup *pos = NULL;
1028 
1029 	if (mem_cgroup_disabled())
1030 		return NULL;
1031 
1032 	if (!root)
1033 		root = root_mem_cgroup;
1034 
1035 	if (prev && !reclaim)
1036 		pos = prev;
1037 
1038 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1039 		if (prev)
1040 			goto out;
1041 		return root;
1042 	}
1043 
1044 	rcu_read_lock();
1045 
1046 	if (reclaim) {
1047 		struct mem_cgroup_per_node *mz;
1048 
1049 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1050 		iter = &mz->iter;
1051 
1052 		if (prev && reclaim->generation != iter->generation)
1053 			goto out_unlock;
1054 
1055 		while (1) {
1056 			pos = READ_ONCE(iter->position);
1057 			if (!pos || css_tryget(&pos->css))
1058 				break;
1059 			/*
1060 			 * css reference reached zero, so iter->position will
1061 			 * be cleared by ->css_released. However, we should not
1062 			 * rely on this happening soon, because ->css_released
1063 			 * is called from a work queue, and by busy-waiting we
1064 			 * might block it. So we clear iter->position right
1065 			 * away.
1066 			 */
1067 			(void)cmpxchg(&iter->position, pos, NULL);
1068 		}
1069 	}
1070 
1071 	if (pos)
1072 		css = &pos->css;
1073 
1074 	for (;;) {
1075 		css = css_next_descendant_pre(css, &root->css);
1076 		if (!css) {
1077 			/*
1078 			 * Reclaimers share the hierarchy walk, and a
1079 			 * new one might jump in right at the end of
1080 			 * the hierarchy - make sure they see at least
1081 			 * one group and restart from the beginning.
1082 			 */
1083 			if (!prev)
1084 				continue;
1085 			break;
1086 		}
1087 
1088 		/*
1089 		 * Verify the css and acquire a reference.  The root
1090 		 * is provided by the caller, so we know it's alive
1091 		 * and kicking, and don't take an extra reference.
1092 		 */
1093 		memcg = mem_cgroup_from_css(css);
1094 
1095 		if (css == &root->css)
1096 			break;
1097 
1098 		if (css_tryget(css))
1099 			break;
1100 
1101 		memcg = NULL;
1102 	}
1103 
1104 	if (reclaim) {
1105 		/*
1106 		 * The position could have already been updated by a competing
1107 		 * thread, so check that the value hasn't changed since we read
1108 		 * it to avoid reclaiming from the same cgroup twice.
1109 		 */
1110 		(void)cmpxchg(&iter->position, pos, memcg);
1111 
1112 		if (pos)
1113 			css_put(&pos->css);
1114 
1115 		if (!memcg)
1116 			iter->generation++;
1117 		else if (!prev)
1118 			reclaim->generation = iter->generation;
1119 	}
1120 
1121 out_unlock:
1122 	rcu_read_unlock();
1123 out:
1124 	if (prev && prev != root)
1125 		css_put(&prev->css);
1126 
1127 	return memcg;
1128 }
1129 
1130 /**
1131  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1132  * @root: hierarchy root
1133  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1134  */
1135 void mem_cgroup_iter_break(struct mem_cgroup *root,
1136 			   struct mem_cgroup *prev)
1137 {
1138 	if (!root)
1139 		root = root_mem_cgroup;
1140 	if (prev && prev != root)
1141 		css_put(&prev->css);
1142 }
1143 
1144 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1145 					struct mem_cgroup *dead_memcg)
1146 {
1147 	struct mem_cgroup_reclaim_iter *iter;
1148 	struct mem_cgroup_per_node *mz;
1149 	int nid;
1150 
1151 	for_each_node(nid) {
1152 		mz = mem_cgroup_nodeinfo(from, nid);
1153 		iter = &mz->iter;
1154 		cmpxchg(&iter->position, dead_memcg, NULL);
1155 	}
1156 }
1157 
1158 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1159 {
1160 	struct mem_cgroup *memcg = dead_memcg;
1161 	struct mem_cgroup *last;
1162 
1163 	do {
1164 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1165 		last = memcg;
1166 	} while ((memcg = parent_mem_cgroup(memcg)));
1167 
1168 	/*
1169 	 * When cgruop1 non-hierarchy mode is used,
1170 	 * parent_mem_cgroup() does not walk all the way up to the
1171 	 * cgroup root (root_mem_cgroup). So we have to handle
1172 	 * dead_memcg from cgroup root separately.
1173 	 */
1174 	if (last != root_mem_cgroup)
1175 		__invalidate_reclaim_iterators(root_mem_cgroup,
1176 						dead_memcg);
1177 }
1178 
1179 /**
1180  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1181  * @memcg: hierarchy root
1182  * @fn: function to call for each task
1183  * @arg: argument passed to @fn
1184  *
1185  * This function iterates over tasks attached to @memcg or to any of its
1186  * descendants and calls @fn for each task. If @fn returns a non-zero
1187  * value, the function breaks the iteration loop and returns the value.
1188  * Otherwise, it will iterate over all tasks and return 0.
1189  *
1190  * This function must not be called for the root memory cgroup.
1191  */
1192 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1193 			  int (*fn)(struct task_struct *, void *), void *arg)
1194 {
1195 	struct mem_cgroup *iter;
1196 	int ret = 0;
1197 
1198 	BUG_ON(memcg == root_mem_cgroup);
1199 
1200 	for_each_mem_cgroup_tree(iter, memcg) {
1201 		struct css_task_iter it;
1202 		struct task_struct *task;
1203 
1204 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1205 		while (!ret && (task = css_task_iter_next(&it)))
1206 			ret = fn(task, arg);
1207 		css_task_iter_end(&it);
1208 		if (ret) {
1209 			mem_cgroup_iter_break(memcg, iter);
1210 			break;
1211 		}
1212 	}
1213 	return ret;
1214 }
1215 
1216 /**
1217  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1218  * @page: the page
1219  * @pgdat: pgdat of the page
1220  *
1221  * This function is only safe when following the LRU page isolation
1222  * and putback protocol: the LRU lock must be held, and the page must
1223  * either be PageLRU() or the caller must have isolated/allocated it.
1224  */
1225 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1226 {
1227 	struct mem_cgroup_per_node *mz;
1228 	struct mem_cgroup *memcg;
1229 	struct lruvec *lruvec;
1230 
1231 	if (mem_cgroup_disabled()) {
1232 		lruvec = &pgdat->__lruvec;
1233 		goto out;
1234 	}
1235 
1236 	memcg = page->mem_cgroup;
1237 	/*
1238 	 * Swapcache readahead pages are added to the LRU - and
1239 	 * possibly migrated - before they are charged.
1240 	 */
1241 	if (!memcg)
1242 		memcg = root_mem_cgroup;
1243 
1244 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1245 	lruvec = &mz->lruvec;
1246 out:
1247 	/*
1248 	 * Since a node can be onlined after the mem_cgroup was created,
1249 	 * we have to be prepared to initialize lruvec->zone here;
1250 	 * and if offlined then reonlined, we need to reinitialize it.
1251 	 */
1252 	if (unlikely(lruvec->pgdat != pgdat))
1253 		lruvec->pgdat = pgdat;
1254 	return lruvec;
1255 }
1256 
1257 /**
1258  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1259  * @lruvec: mem_cgroup per zone lru vector
1260  * @lru: index of lru list the page is sitting on
1261  * @zid: zone id of the accounted pages
1262  * @nr_pages: positive when adding or negative when removing
1263  *
1264  * This function must be called under lru_lock, just before a page is added
1265  * to or just after a page is removed from an lru list (that ordering being
1266  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1267  */
1268 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1269 				int zid, int nr_pages)
1270 {
1271 	struct mem_cgroup_per_node *mz;
1272 	unsigned long *lru_size;
1273 	long size;
1274 
1275 	if (mem_cgroup_disabled())
1276 		return;
1277 
1278 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1279 	lru_size = &mz->lru_zone_size[zid][lru];
1280 
1281 	if (nr_pages < 0)
1282 		*lru_size += nr_pages;
1283 
1284 	size = *lru_size;
1285 	if (WARN_ONCE(size < 0,
1286 		"%s(%p, %d, %d): lru_size %ld\n",
1287 		__func__, lruvec, lru, nr_pages, size)) {
1288 		VM_BUG_ON(1);
1289 		*lru_size = 0;
1290 	}
1291 
1292 	if (nr_pages > 0)
1293 		*lru_size += nr_pages;
1294 }
1295 
1296 /**
1297  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1298  * @memcg: the memory cgroup
1299  *
1300  * Returns the maximum amount of memory @mem can be charged with, in
1301  * pages.
1302  */
1303 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1304 {
1305 	unsigned long margin = 0;
1306 	unsigned long count;
1307 	unsigned long limit;
1308 
1309 	count = page_counter_read(&memcg->memory);
1310 	limit = READ_ONCE(memcg->memory.max);
1311 	if (count < limit)
1312 		margin = limit - count;
1313 
1314 	if (do_memsw_account()) {
1315 		count = page_counter_read(&memcg->memsw);
1316 		limit = READ_ONCE(memcg->memsw.max);
1317 		if (count < limit)
1318 			margin = min(margin, limit - count);
1319 		else
1320 			margin = 0;
1321 	}
1322 
1323 	return margin;
1324 }
1325 
1326 /*
1327  * A routine for checking "mem" is under move_account() or not.
1328  *
1329  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1330  * moving cgroups. This is for waiting at high-memory pressure
1331  * caused by "move".
1332  */
1333 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1334 {
1335 	struct mem_cgroup *from;
1336 	struct mem_cgroup *to;
1337 	bool ret = false;
1338 	/*
1339 	 * Unlike task_move routines, we access mc.to, mc.from not under
1340 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1341 	 */
1342 	spin_lock(&mc.lock);
1343 	from = mc.from;
1344 	to = mc.to;
1345 	if (!from)
1346 		goto unlock;
1347 
1348 	ret = mem_cgroup_is_descendant(from, memcg) ||
1349 		mem_cgroup_is_descendant(to, memcg);
1350 unlock:
1351 	spin_unlock(&mc.lock);
1352 	return ret;
1353 }
1354 
1355 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1356 {
1357 	if (mc.moving_task && current != mc.moving_task) {
1358 		if (mem_cgroup_under_move(memcg)) {
1359 			DEFINE_WAIT(wait);
1360 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1361 			/* moving charge context might have finished. */
1362 			if (mc.moving_task)
1363 				schedule();
1364 			finish_wait(&mc.waitq, &wait);
1365 			return true;
1366 		}
1367 	}
1368 	return false;
1369 }
1370 
1371 static char *memory_stat_format(struct mem_cgroup *memcg)
1372 {
1373 	struct seq_buf s;
1374 	int i;
1375 
1376 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1377 	if (!s.buffer)
1378 		return NULL;
1379 
1380 	/*
1381 	 * Provide statistics on the state of the memory subsystem as
1382 	 * well as cumulative event counters that show past behavior.
1383 	 *
1384 	 * This list is ordered following a combination of these gradients:
1385 	 * 1) generic big picture -> specifics and details
1386 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1387 	 *
1388 	 * Current memory state:
1389 	 */
1390 
1391 	seq_buf_printf(&s, "anon %llu\n",
1392 		       (u64)memcg_page_state(memcg, MEMCG_RSS) *
1393 		       PAGE_SIZE);
1394 	seq_buf_printf(&s, "file %llu\n",
1395 		       (u64)memcg_page_state(memcg, MEMCG_CACHE) *
1396 		       PAGE_SIZE);
1397 	seq_buf_printf(&s, "kernel_stack %llu\n",
1398 		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1399 		       1024);
1400 	seq_buf_printf(&s, "slab %llu\n",
1401 		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
1402 			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
1403 		       PAGE_SIZE);
1404 	seq_buf_printf(&s, "sock %llu\n",
1405 		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1406 		       PAGE_SIZE);
1407 
1408 	seq_buf_printf(&s, "shmem %llu\n",
1409 		       (u64)memcg_page_state(memcg, NR_SHMEM) *
1410 		       PAGE_SIZE);
1411 	seq_buf_printf(&s, "file_mapped %llu\n",
1412 		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1413 		       PAGE_SIZE);
1414 	seq_buf_printf(&s, "file_dirty %llu\n",
1415 		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1416 		       PAGE_SIZE);
1417 	seq_buf_printf(&s, "file_writeback %llu\n",
1418 		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1419 		       PAGE_SIZE);
1420 
1421 	/*
1422 	 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
1423 	 * with the NR_ANON_THP vm counter, but right now it's a pain in the
1424 	 * arse because it requires migrating the work out of rmap to a place
1425 	 * where the page->mem_cgroup is set up and stable.
1426 	 */
1427 	seq_buf_printf(&s, "anon_thp %llu\n",
1428 		       (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
1429 		       PAGE_SIZE);
1430 
1431 	for (i = 0; i < NR_LRU_LISTS; i++)
1432 		seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1433 			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1434 			       PAGE_SIZE);
1435 
1436 	seq_buf_printf(&s, "slab_reclaimable %llu\n",
1437 		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
1438 		       PAGE_SIZE);
1439 	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1440 		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
1441 		       PAGE_SIZE);
1442 
1443 	/* Accumulated memory events */
1444 
1445 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1446 		       memcg_events(memcg, PGFAULT));
1447 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1448 		       memcg_events(memcg, PGMAJFAULT));
1449 
1450 	seq_buf_printf(&s, "workingset_refault %lu\n",
1451 		       memcg_page_state(memcg, WORKINGSET_REFAULT));
1452 	seq_buf_printf(&s, "workingset_activate %lu\n",
1453 		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1454 	seq_buf_printf(&s, "workingset_restore %lu\n",
1455 		       memcg_page_state(memcg, WORKINGSET_RESTORE));
1456 	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1457 		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1458 
1459 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1460 		       memcg_events(memcg, PGREFILL));
1461 	seq_buf_printf(&s, "pgscan %lu\n",
1462 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1463 		       memcg_events(memcg, PGSCAN_DIRECT));
1464 	seq_buf_printf(&s, "pgsteal %lu\n",
1465 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1466 		       memcg_events(memcg, PGSTEAL_DIRECT));
1467 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1468 		       memcg_events(memcg, PGACTIVATE));
1469 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1470 		       memcg_events(memcg, PGDEACTIVATE));
1471 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1472 		       memcg_events(memcg, PGLAZYFREE));
1473 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1474 		       memcg_events(memcg, PGLAZYFREED));
1475 
1476 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1477 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1478 		       memcg_events(memcg, THP_FAULT_ALLOC));
1479 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1480 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1481 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1482 
1483 	/* The above should easily fit into one page */
1484 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1485 
1486 	return s.buffer;
1487 }
1488 
1489 #define K(x) ((x) << (PAGE_SHIFT-10))
1490 /**
1491  * mem_cgroup_print_oom_context: Print OOM information relevant to
1492  * memory controller.
1493  * @memcg: The memory cgroup that went over limit
1494  * @p: Task that is going to be killed
1495  *
1496  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1497  * enabled
1498  */
1499 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1500 {
1501 	rcu_read_lock();
1502 
1503 	if (memcg) {
1504 		pr_cont(",oom_memcg=");
1505 		pr_cont_cgroup_path(memcg->css.cgroup);
1506 	} else
1507 		pr_cont(",global_oom");
1508 	if (p) {
1509 		pr_cont(",task_memcg=");
1510 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1511 	}
1512 	rcu_read_unlock();
1513 }
1514 
1515 /**
1516  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1517  * memory controller.
1518  * @memcg: The memory cgroup that went over limit
1519  */
1520 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1521 {
1522 	char *buf;
1523 
1524 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1525 		K((u64)page_counter_read(&memcg->memory)),
1526 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1527 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1528 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1529 			K((u64)page_counter_read(&memcg->swap)),
1530 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1531 	else {
1532 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1533 			K((u64)page_counter_read(&memcg->memsw)),
1534 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1535 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1536 			K((u64)page_counter_read(&memcg->kmem)),
1537 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1538 	}
1539 
1540 	pr_info("Memory cgroup stats for ");
1541 	pr_cont_cgroup_path(memcg->css.cgroup);
1542 	pr_cont(":");
1543 	buf = memory_stat_format(memcg);
1544 	if (!buf)
1545 		return;
1546 	pr_info("%s", buf);
1547 	kfree(buf);
1548 }
1549 
1550 /*
1551  * Return the memory (and swap, if configured) limit for a memcg.
1552  */
1553 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1554 {
1555 	unsigned long max;
1556 
1557 	max = READ_ONCE(memcg->memory.max);
1558 	if (mem_cgroup_swappiness(memcg)) {
1559 		unsigned long memsw_max;
1560 		unsigned long swap_max;
1561 
1562 		memsw_max = memcg->memsw.max;
1563 		swap_max = READ_ONCE(memcg->swap.max);
1564 		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1565 		max = min(max + swap_max, memsw_max);
1566 	}
1567 	return max;
1568 }
1569 
1570 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1571 {
1572 	return page_counter_read(&memcg->memory);
1573 }
1574 
1575 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1576 				     int order)
1577 {
1578 	struct oom_control oc = {
1579 		.zonelist = NULL,
1580 		.nodemask = NULL,
1581 		.memcg = memcg,
1582 		.gfp_mask = gfp_mask,
1583 		.order = order,
1584 	};
1585 	bool ret;
1586 
1587 	if (mutex_lock_killable(&oom_lock))
1588 		return true;
1589 	/*
1590 	 * A few threads which were not waiting at mutex_lock_killable() can
1591 	 * fail to bail out. Therefore, check again after holding oom_lock.
1592 	 */
1593 	ret = should_force_charge() || out_of_memory(&oc);
1594 	mutex_unlock(&oom_lock);
1595 	return ret;
1596 }
1597 
1598 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1599 				   pg_data_t *pgdat,
1600 				   gfp_t gfp_mask,
1601 				   unsigned long *total_scanned)
1602 {
1603 	struct mem_cgroup *victim = NULL;
1604 	int total = 0;
1605 	int loop = 0;
1606 	unsigned long excess;
1607 	unsigned long nr_scanned;
1608 	struct mem_cgroup_reclaim_cookie reclaim = {
1609 		.pgdat = pgdat,
1610 	};
1611 
1612 	excess = soft_limit_excess(root_memcg);
1613 
1614 	while (1) {
1615 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1616 		if (!victim) {
1617 			loop++;
1618 			if (loop >= 2) {
1619 				/*
1620 				 * If we have not been able to reclaim
1621 				 * anything, it might because there are
1622 				 * no reclaimable pages under this hierarchy
1623 				 */
1624 				if (!total)
1625 					break;
1626 				/*
1627 				 * We want to do more targeted reclaim.
1628 				 * excess >> 2 is not to excessive so as to
1629 				 * reclaim too much, nor too less that we keep
1630 				 * coming back to reclaim from this cgroup
1631 				 */
1632 				if (total >= (excess >> 2) ||
1633 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1634 					break;
1635 			}
1636 			continue;
1637 		}
1638 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1639 					pgdat, &nr_scanned);
1640 		*total_scanned += nr_scanned;
1641 		if (!soft_limit_excess(root_memcg))
1642 			break;
1643 	}
1644 	mem_cgroup_iter_break(root_memcg, victim);
1645 	return total;
1646 }
1647 
1648 #ifdef CONFIG_LOCKDEP
1649 static struct lockdep_map memcg_oom_lock_dep_map = {
1650 	.name = "memcg_oom_lock",
1651 };
1652 #endif
1653 
1654 static DEFINE_SPINLOCK(memcg_oom_lock);
1655 
1656 /*
1657  * Check OOM-Killer is already running under our hierarchy.
1658  * If someone is running, return false.
1659  */
1660 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1661 {
1662 	struct mem_cgroup *iter, *failed = NULL;
1663 
1664 	spin_lock(&memcg_oom_lock);
1665 
1666 	for_each_mem_cgroup_tree(iter, memcg) {
1667 		if (iter->oom_lock) {
1668 			/*
1669 			 * this subtree of our hierarchy is already locked
1670 			 * so we cannot give a lock.
1671 			 */
1672 			failed = iter;
1673 			mem_cgroup_iter_break(memcg, iter);
1674 			break;
1675 		} else
1676 			iter->oom_lock = true;
1677 	}
1678 
1679 	if (failed) {
1680 		/*
1681 		 * OK, we failed to lock the whole subtree so we have
1682 		 * to clean up what we set up to the failing subtree
1683 		 */
1684 		for_each_mem_cgroup_tree(iter, memcg) {
1685 			if (iter == failed) {
1686 				mem_cgroup_iter_break(memcg, iter);
1687 				break;
1688 			}
1689 			iter->oom_lock = false;
1690 		}
1691 	} else
1692 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1693 
1694 	spin_unlock(&memcg_oom_lock);
1695 
1696 	return !failed;
1697 }
1698 
1699 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1700 {
1701 	struct mem_cgroup *iter;
1702 
1703 	spin_lock(&memcg_oom_lock);
1704 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1705 	for_each_mem_cgroup_tree(iter, memcg)
1706 		iter->oom_lock = false;
1707 	spin_unlock(&memcg_oom_lock);
1708 }
1709 
1710 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1711 {
1712 	struct mem_cgroup *iter;
1713 
1714 	spin_lock(&memcg_oom_lock);
1715 	for_each_mem_cgroup_tree(iter, memcg)
1716 		iter->under_oom++;
1717 	spin_unlock(&memcg_oom_lock);
1718 }
1719 
1720 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1721 {
1722 	struct mem_cgroup *iter;
1723 
1724 	/*
1725 	 * When a new child is created while the hierarchy is under oom,
1726 	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1727 	 */
1728 	spin_lock(&memcg_oom_lock);
1729 	for_each_mem_cgroup_tree(iter, memcg)
1730 		if (iter->under_oom > 0)
1731 			iter->under_oom--;
1732 	spin_unlock(&memcg_oom_lock);
1733 }
1734 
1735 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1736 
1737 struct oom_wait_info {
1738 	struct mem_cgroup *memcg;
1739 	wait_queue_entry_t	wait;
1740 };
1741 
1742 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1743 	unsigned mode, int sync, void *arg)
1744 {
1745 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1746 	struct mem_cgroup *oom_wait_memcg;
1747 	struct oom_wait_info *oom_wait_info;
1748 
1749 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1750 	oom_wait_memcg = oom_wait_info->memcg;
1751 
1752 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1753 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1754 		return 0;
1755 	return autoremove_wake_function(wait, mode, sync, arg);
1756 }
1757 
1758 static void memcg_oom_recover(struct mem_cgroup *memcg)
1759 {
1760 	/*
1761 	 * For the following lockless ->under_oom test, the only required
1762 	 * guarantee is that it must see the state asserted by an OOM when
1763 	 * this function is called as a result of userland actions
1764 	 * triggered by the notification of the OOM.  This is trivially
1765 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1766 	 * triggering notification.
1767 	 */
1768 	if (memcg && memcg->under_oom)
1769 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1770 }
1771 
1772 enum oom_status {
1773 	OOM_SUCCESS,
1774 	OOM_FAILED,
1775 	OOM_ASYNC,
1776 	OOM_SKIPPED
1777 };
1778 
1779 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1780 {
1781 	enum oom_status ret;
1782 	bool locked;
1783 
1784 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1785 		return OOM_SKIPPED;
1786 
1787 	memcg_memory_event(memcg, MEMCG_OOM);
1788 
1789 	/*
1790 	 * We are in the middle of the charge context here, so we
1791 	 * don't want to block when potentially sitting on a callstack
1792 	 * that holds all kinds of filesystem and mm locks.
1793 	 *
1794 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1795 	 * handling until the charge can succeed; remember the context and put
1796 	 * the task to sleep at the end of the page fault when all locks are
1797 	 * released.
1798 	 *
1799 	 * On the other hand, in-kernel OOM killer allows for an async victim
1800 	 * memory reclaim (oom_reaper) and that means that we are not solely
1801 	 * relying on the oom victim to make a forward progress and we can
1802 	 * invoke the oom killer here.
1803 	 *
1804 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1805 	 * victim and then we have to bail out from the charge path.
1806 	 */
1807 	if (memcg->oom_kill_disable) {
1808 		if (!current->in_user_fault)
1809 			return OOM_SKIPPED;
1810 		css_get(&memcg->css);
1811 		current->memcg_in_oom = memcg;
1812 		current->memcg_oom_gfp_mask = mask;
1813 		current->memcg_oom_order = order;
1814 
1815 		return OOM_ASYNC;
1816 	}
1817 
1818 	mem_cgroup_mark_under_oom(memcg);
1819 
1820 	locked = mem_cgroup_oom_trylock(memcg);
1821 
1822 	if (locked)
1823 		mem_cgroup_oom_notify(memcg);
1824 
1825 	mem_cgroup_unmark_under_oom(memcg);
1826 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1827 		ret = OOM_SUCCESS;
1828 	else
1829 		ret = OOM_FAILED;
1830 
1831 	if (locked)
1832 		mem_cgroup_oom_unlock(memcg);
1833 
1834 	return ret;
1835 }
1836 
1837 /**
1838  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1839  * @handle: actually kill/wait or just clean up the OOM state
1840  *
1841  * This has to be called at the end of a page fault if the memcg OOM
1842  * handler was enabled.
1843  *
1844  * Memcg supports userspace OOM handling where failed allocations must
1845  * sleep on a waitqueue until the userspace task resolves the
1846  * situation.  Sleeping directly in the charge context with all kinds
1847  * of locks held is not a good idea, instead we remember an OOM state
1848  * in the task and mem_cgroup_oom_synchronize() has to be called at
1849  * the end of the page fault to complete the OOM handling.
1850  *
1851  * Returns %true if an ongoing memcg OOM situation was detected and
1852  * completed, %false otherwise.
1853  */
1854 bool mem_cgroup_oom_synchronize(bool handle)
1855 {
1856 	struct mem_cgroup *memcg = current->memcg_in_oom;
1857 	struct oom_wait_info owait;
1858 	bool locked;
1859 
1860 	/* OOM is global, do not handle */
1861 	if (!memcg)
1862 		return false;
1863 
1864 	if (!handle)
1865 		goto cleanup;
1866 
1867 	owait.memcg = memcg;
1868 	owait.wait.flags = 0;
1869 	owait.wait.func = memcg_oom_wake_function;
1870 	owait.wait.private = current;
1871 	INIT_LIST_HEAD(&owait.wait.entry);
1872 
1873 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1874 	mem_cgroup_mark_under_oom(memcg);
1875 
1876 	locked = mem_cgroup_oom_trylock(memcg);
1877 
1878 	if (locked)
1879 		mem_cgroup_oom_notify(memcg);
1880 
1881 	if (locked && !memcg->oom_kill_disable) {
1882 		mem_cgroup_unmark_under_oom(memcg);
1883 		finish_wait(&memcg_oom_waitq, &owait.wait);
1884 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1885 					 current->memcg_oom_order);
1886 	} else {
1887 		schedule();
1888 		mem_cgroup_unmark_under_oom(memcg);
1889 		finish_wait(&memcg_oom_waitq, &owait.wait);
1890 	}
1891 
1892 	if (locked) {
1893 		mem_cgroup_oom_unlock(memcg);
1894 		/*
1895 		 * There is no guarantee that an OOM-lock contender
1896 		 * sees the wakeups triggered by the OOM kill
1897 		 * uncharges.  Wake any sleepers explicitely.
1898 		 */
1899 		memcg_oom_recover(memcg);
1900 	}
1901 cleanup:
1902 	current->memcg_in_oom = NULL;
1903 	css_put(&memcg->css);
1904 	return true;
1905 }
1906 
1907 /**
1908  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1909  * @victim: task to be killed by the OOM killer
1910  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1911  *
1912  * Returns a pointer to a memory cgroup, which has to be cleaned up
1913  * by killing all belonging OOM-killable tasks.
1914  *
1915  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1916  */
1917 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1918 					    struct mem_cgroup *oom_domain)
1919 {
1920 	struct mem_cgroup *oom_group = NULL;
1921 	struct mem_cgroup *memcg;
1922 
1923 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1924 		return NULL;
1925 
1926 	if (!oom_domain)
1927 		oom_domain = root_mem_cgroup;
1928 
1929 	rcu_read_lock();
1930 
1931 	memcg = mem_cgroup_from_task(victim);
1932 	if (memcg == root_mem_cgroup)
1933 		goto out;
1934 
1935 	/*
1936 	 * If the victim task has been asynchronously moved to a different
1937 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1938 	 * In this case it's better to ignore memory.group.oom.
1939 	 */
1940 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1941 		goto out;
1942 
1943 	/*
1944 	 * Traverse the memory cgroup hierarchy from the victim task's
1945 	 * cgroup up to the OOMing cgroup (or root) to find the
1946 	 * highest-level memory cgroup with oom.group set.
1947 	 */
1948 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1949 		if (memcg->oom_group)
1950 			oom_group = memcg;
1951 
1952 		if (memcg == oom_domain)
1953 			break;
1954 	}
1955 
1956 	if (oom_group)
1957 		css_get(&oom_group->css);
1958 out:
1959 	rcu_read_unlock();
1960 
1961 	return oom_group;
1962 }
1963 
1964 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1965 {
1966 	pr_info("Tasks in ");
1967 	pr_cont_cgroup_path(memcg->css.cgroup);
1968 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1969 }
1970 
1971 /**
1972  * lock_page_memcg - lock a page->mem_cgroup binding
1973  * @page: the page
1974  *
1975  * This function protects unlocked LRU pages from being moved to
1976  * another cgroup.
1977  *
1978  * It ensures lifetime of the returned memcg. Caller is responsible
1979  * for the lifetime of the page; __unlock_page_memcg() is available
1980  * when @page might get freed inside the locked section.
1981  */
1982 struct mem_cgroup *lock_page_memcg(struct page *page)
1983 {
1984 	struct mem_cgroup *memcg;
1985 	unsigned long flags;
1986 
1987 	/*
1988 	 * The RCU lock is held throughout the transaction.  The fast
1989 	 * path can get away without acquiring the memcg->move_lock
1990 	 * because page moving starts with an RCU grace period.
1991 	 *
1992 	 * The RCU lock also protects the memcg from being freed when
1993 	 * the page state that is going to change is the only thing
1994 	 * preventing the page itself from being freed. E.g. writeback
1995 	 * doesn't hold a page reference and relies on PG_writeback to
1996 	 * keep off truncation, migration and so forth.
1997          */
1998 	rcu_read_lock();
1999 
2000 	if (mem_cgroup_disabled())
2001 		return NULL;
2002 again:
2003 	memcg = page->mem_cgroup;
2004 	if (unlikely(!memcg))
2005 		return NULL;
2006 
2007 	if (atomic_read(&memcg->moving_account) <= 0)
2008 		return memcg;
2009 
2010 	spin_lock_irqsave(&memcg->move_lock, flags);
2011 	if (memcg != page->mem_cgroup) {
2012 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2013 		goto again;
2014 	}
2015 
2016 	/*
2017 	 * When charge migration first begins, we can have locked and
2018 	 * unlocked page stat updates happening concurrently.  Track
2019 	 * the task who has the lock for unlock_page_memcg().
2020 	 */
2021 	memcg->move_lock_task = current;
2022 	memcg->move_lock_flags = flags;
2023 
2024 	return memcg;
2025 }
2026 EXPORT_SYMBOL(lock_page_memcg);
2027 
2028 /**
2029  * __unlock_page_memcg - unlock and unpin a memcg
2030  * @memcg: the memcg
2031  *
2032  * Unlock and unpin a memcg returned by lock_page_memcg().
2033  */
2034 void __unlock_page_memcg(struct mem_cgroup *memcg)
2035 {
2036 	if (memcg && memcg->move_lock_task == current) {
2037 		unsigned long flags = memcg->move_lock_flags;
2038 
2039 		memcg->move_lock_task = NULL;
2040 		memcg->move_lock_flags = 0;
2041 
2042 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2043 	}
2044 
2045 	rcu_read_unlock();
2046 }
2047 
2048 /**
2049  * unlock_page_memcg - unlock a page->mem_cgroup binding
2050  * @page: the page
2051  */
2052 void unlock_page_memcg(struct page *page)
2053 {
2054 	__unlock_page_memcg(page->mem_cgroup);
2055 }
2056 EXPORT_SYMBOL(unlock_page_memcg);
2057 
2058 struct memcg_stock_pcp {
2059 	struct mem_cgroup *cached; /* this never be root cgroup */
2060 	unsigned int nr_pages;
2061 	struct work_struct work;
2062 	unsigned long flags;
2063 #define FLUSHING_CACHED_CHARGE	0
2064 };
2065 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2066 static DEFINE_MUTEX(percpu_charge_mutex);
2067 
2068 /**
2069  * consume_stock: Try to consume stocked charge on this cpu.
2070  * @memcg: memcg to consume from.
2071  * @nr_pages: how many pages to charge.
2072  *
2073  * The charges will only happen if @memcg matches the current cpu's memcg
2074  * stock, and at least @nr_pages are available in that stock.  Failure to
2075  * service an allocation will refill the stock.
2076  *
2077  * returns true if successful, false otherwise.
2078  */
2079 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2080 {
2081 	struct memcg_stock_pcp *stock;
2082 	unsigned long flags;
2083 	bool ret = false;
2084 
2085 	if (nr_pages > MEMCG_CHARGE_BATCH)
2086 		return ret;
2087 
2088 	local_irq_save(flags);
2089 
2090 	stock = this_cpu_ptr(&memcg_stock);
2091 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2092 		stock->nr_pages -= nr_pages;
2093 		ret = true;
2094 	}
2095 
2096 	local_irq_restore(flags);
2097 
2098 	return ret;
2099 }
2100 
2101 /*
2102  * Returns stocks cached in percpu and reset cached information.
2103  */
2104 static void drain_stock(struct memcg_stock_pcp *stock)
2105 {
2106 	struct mem_cgroup *old = stock->cached;
2107 
2108 	if (stock->nr_pages) {
2109 		page_counter_uncharge(&old->memory, stock->nr_pages);
2110 		if (do_memsw_account())
2111 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2112 		css_put_many(&old->css, stock->nr_pages);
2113 		stock->nr_pages = 0;
2114 	}
2115 	stock->cached = NULL;
2116 }
2117 
2118 static void drain_local_stock(struct work_struct *dummy)
2119 {
2120 	struct memcg_stock_pcp *stock;
2121 	unsigned long flags;
2122 
2123 	/*
2124 	 * The only protection from memory hotplug vs. drain_stock races is
2125 	 * that we always operate on local CPU stock here with IRQ disabled
2126 	 */
2127 	local_irq_save(flags);
2128 
2129 	stock = this_cpu_ptr(&memcg_stock);
2130 	drain_stock(stock);
2131 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2132 
2133 	local_irq_restore(flags);
2134 }
2135 
2136 /*
2137  * Cache charges(val) to local per_cpu area.
2138  * This will be consumed by consume_stock() function, later.
2139  */
2140 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2141 {
2142 	struct memcg_stock_pcp *stock;
2143 	unsigned long flags;
2144 
2145 	local_irq_save(flags);
2146 
2147 	stock = this_cpu_ptr(&memcg_stock);
2148 	if (stock->cached != memcg) { /* reset if necessary */
2149 		drain_stock(stock);
2150 		stock->cached = memcg;
2151 	}
2152 	stock->nr_pages += nr_pages;
2153 
2154 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2155 		drain_stock(stock);
2156 
2157 	local_irq_restore(flags);
2158 }
2159 
2160 /*
2161  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2162  * of the hierarchy under it.
2163  */
2164 static void drain_all_stock(struct mem_cgroup *root_memcg)
2165 {
2166 	int cpu, curcpu;
2167 
2168 	/* If someone's already draining, avoid adding running more workers. */
2169 	if (!mutex_trylock(&percpu_charge_mutex))
2170 		return;
2171 	/*
2172 	 * Notify other cpus that system-wide "drain" is running
2173 	 * We do not care about races with the cpu hotplug because cpu down
2174 	 * as well as workers from this path always operate on the local
2175 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2176 	 */
2177 	curcpu = get_cpu();
2178 	for_each_online_cpu(cpu) {
2179 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2180 		struct mem_cgroup *memcg;
2181 		bool flush = false;
2182 
2183 		rcu_read_lock();
2184 		memcg = stock->cached;
2185 		if (memcg && stock->nr_pages &&
2186 		    mem_cgroup_is_descendant(memcg, root_memcg))
2187 			flush = true;
2188 		rcu_read_unlock();
2189 
2190 		if (flush &&
2191 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2192 			if (cpu == curcpu)
2193 				drain_local_stock(&stock->work);
2194 			else
2195 				schedule_work_on(cpu, &stock->work);
2196 		}
2197 	}
2198 	put_cpu();
2199 	mutex_unlock(&percpu_charge_mutex);
2200 }
2201 
2202 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2203 {
2204 	struct memcg_stock_pcp *stock;
2205 	struct mem_cgroup *memcg, *mi;
2206 
2207 	stock = &per_cpu(memcg_stock, cpu);
2208 	drain_stock(stock);
2209 
2210 	for_each_mem_cgroup(memcg) {
2211 		int i;
2212 
2213 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2214 			int nid;
2215 			long x;
2216 
2217 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2218 			if (x)
2219 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2220 					atomic_long_add(x, &memcg->vmstats[i]);
2221 
2222 			if (i >= NR_VM_NODE_STAT_ITEMS)
2223 				continue;
2224 
2225 			for_each_node(nid) {
2226 				struct mem_cgroup_per_node *pn;
2227 
2228 				pn = mem_cgroup_nodeinfo(memcg, nid);
2229 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2230 				if (x)
2231 					do {
2232 						atomic_long_add(x, &pn->lruvec_stat[i]);
2233 					} while ((pn = parent_nodeinfo(pn, nid)));
2234 			}
2235 		}
2236 
2237 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2238 			long x;
2239 
2240 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2241 			if (x)
2242 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2243 					atomic_long_add(x, &memcg->vmevents[i]);
2244 		}
2245 	}
2246 
2247 	return 0;
2248 }
2249 
2250 static void reclaim_high(struct mem_cgroup *memcg,
2251 			 unsigned int nr_pages,
2252 			 gfp_t gfp_mask)
2253 {
2254 	do {
2255 		if (page_counter_read(&memcg->memory) <=
2256 		    READ_ONCE(memcg->memory.high))
2257 			continue;
2258 		memcg_memory_event(memcg, MEMCG_HIGH);
2259 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2260 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2261 		 !mem_cgroup_is_root(memcg));
2262 }
2263 
2264 static void high_work_func(struct work_struct *work)
2265 {
2266 	struct mem_cgroup *memcg;
2267 
2268 	memcg = container_of(work, struct mem_cgroup, high_work);
2269 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2270 }
2271 
2272 /*
2273  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2274  * enough to still cause a significant slowdown in most cases, while still
2275  * allowing diagnostics and tracing to proceed without becoming stuck.
2276  */
2277 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2278 
2279 /*
2280  * When calculating the delay, we use these either side of the exponentiation to
2281  * maintain precision and scale to a reasonable number of jiffies (see the table
2282  * below.
2283  *
2284  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2285  *   overage ratio to a delay.
2286  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2287  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2288  *   to produce a reasonable delay curve.
2289  *
2290  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2291  * reasonable delay curve compared to precision-adjusted overage, not
2292  * penalising heavily at first, but still making sure that growth beyond the
2293  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2294  * example, with a high of 100 megabytes:
2295  *
2296  *  +-------+------------------------+
2297  *  | usage | time to allocate in ms |
2298  *  +-------+------------------------+
2299  *  | 100M  |                      0 |
2300  *  | 101M  |                      6 |
2301  *  | 102M  |                     25 |
2302  *  | 103M  |                     57 |
2303  *  | 104M  |                    102 |
2304  *  | 105M  |                    159 |
2305  *  | 106M  |                    230 |
2306  *  | 107M  |                    313 |
2307  *  | 108M  |                    409 |
2308  *  | 109M  |                    518 |
2309  *  | 110M  |                    639 |
2310  *  | 111M  |                    774 |
2311  *  | 112M  |                    921 |
2312  *  | 113M  |                   1081 |
2313  *  | 114M  |                   1254 |
2314  *  | 115M  |                   1439 |
2315  *  | 116M  |                   1638 |
2316  *  | 117M  |                   1849 |
2317  *  | 118M  |                   2000 |
2318  *  | 119M  |                   2000 |
2319  *  | 120M  |                   2000 |
2320  *  +-------+------------------------+
2321  */
2322  #define MEMCG_DELAY_PRECISION_SHIFT 20
2323  #define MEMCG_DELAY_SCALING_SHIFT 14
2324 
2325 static u64 calculate_overage(unsigned long usage, unsigned long high)
2326 {
2327 	u64 overage;
2328 
2329 	if (usage <= high)
2330 		return 0;
2331 
2332 	/*
2333 	 * Prevent division by 0 in overage calculation by acting as if
2334 	 * it was a threshold of 1 page
2335 	 */
2336 	high = max(high, 1UL);
2337 
2338 	overage = usage - high;
2339 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2340 	return div64_u64(overage, high);
2341 }
2342 
2343 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2344 {
2345 	u64 overage, max_overage = 0;
2346 
2347 	do {
2348 		overage = calculate_overage(page_counter_read(&memcg->memory),
2349 					    READ_ONCE(memcg->memory.high));
2350 		max_overage = max(overage, max_overage);
2351 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2352 		 !mem_cgroup_is_root(memcg));
2353 
2354 	return max_overage;
2355 }
2356 
2357 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2358 {
2359 	u64 overage, max_overage = 0;
2360 
2361 	do {
2362 		overage = calculate_overage(page_counter_read(&memcg->swap),
2363 					    READ_ONCE(memcg->swap.high));
2364 		if (overage)
2365 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2366 		max_overage = max(overage, max_overage);
2367 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2368 		 !mem_cgroup_is_root(memcg));
2369 
2370 	return max_overage;
2371 }
2372 
2373 /*
2374  * Get the number of jiffies that we should penalise a mischievous cgroup which
2375  * is exceeding its memory.high by checking both it and its ancestors.
2376  */
2377 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2378 					  unsigned int nr_pages,
2379 					  u64 max_overage)
2380 {
2381 	unsigned long penalty_jiffies;
2382 
2383 	if (!max_overage)
2384 		return 0;
2385 
2386 	/*
2387 	 * We use overage compared to memory.high to calculate the number of
2388 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2389 	 * fairly lenient on small overages, and increasingly harsh when the
2390 	 * memcg in question makes it clear that it has no intention of stopping
2391 	 * its crazy behaviour, so we exponentially increase the delay based on
2392 	 * overage amount.
2393 	 */
2394 	penalty_jiffies = max_overage * max_overage * HZ;
2395 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2396 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2397 
2398 	/*
2399 	 * Factor in the task's own contribution to the overage, such that four
2400 	 * N-sized allocations are throttled approximately the same as one
2401 	 * 4N-sized allocation.
2402 	 *
2403 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2404 	 * larger the current charge patch is than that.
2405 	 */
2406 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2407 }
2408 
2409 /*
2410  * Scheduled by try_charge() to be executed from the userland return path
2411  * and reclaims memory over the high limit.
2412  */
2413 void mem_cgroup_handle_over_high(void)
2414 {
2415 	unsigned long penalty_jiffies;
2416 	unsigned long pflags;
2417 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2418 	struct mem_cgroup *memcg;
2419 
2420 	if (likely(!nr_pages))
2421 		return;
2422 
2423 	memcg = get_mem_cgroup_from_mm(current->mm);
2424 	reclaim_high(memcg, nr_pages, GFP_KERNEL);
2425 	current->memcg_nr_pages_over_high = 0;
2426 
2427 	/*
2428 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2429 	 * allocators proactively to slow down excessive growth.
2430 	 */
2431 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2432 					       mem_find_max_overage(memcg));
2433 
2434 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2435 						swap_find_max_overage(memcg));
2436 
2437 	/*
2438 	 * Clamp the max delay per usermode return so as to still keep the
2439 	 * application moving forwards and also permit diagnostics, albeit
2440 	 * extremely slowly.
2441 	 */
2442 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2443 
2444 	/*
2445 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2446 	 * that it's not even worth doing, in an attempt to be nice to those who
2447 	 * go only a small amount over their memory.high value and maybe haven't
2448 	 * been aggressively reclaimed enough yet.
2449 	 */
2450 	if (penalty_jiffies <= HZ / 100)
2451 		goto out;
2452 
2453 	/*
2454 	 * If we exit early, we're guaranteed to die (since
2455 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2456 	 * need to account for any ill-begotten jiffies to pay them off later.
2457 	 */
2458 	psi_memstall_enter(&pflags);
2459 	schedule_timeout_killable(penalty_jiffies);
2460 	psi_memstall_leave(&pflags);
2461 
2462 out:
2463 	css_put(&memcg->css);
2464 }
2465 
2466 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2467 		      unsigned int nr_pages)
2468 {
2469 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2470 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2471 	struct mem_cgroup *mem_over_limit;
2472 	struct page_counter *counter;
2473 	unsigned long nr_reclaimed;
2474 	bool may_swap = true;
2475 	bool drained = false;
2476 	enum oom_status oom_status;
2477 
2478 	if (mem_cgroup_is_root(memcg))
2479 		return 0;
2480 retry:
2481 	if (consume_stock(memcg, nr_pages))
2482 		return 0;
2483 
2484 	if (!do_memsw_account() ||
2485 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2486 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2487 			goto done_restock;
2488 		if (do_memsw_account())
2489 			page_counter_uncharge(&memcg->memsw, batch);
2490 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2491 	} else {
2492 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2493 		may_swap = false;
2494 	}
2495 
2496 	if (batch > nr_pages) {
2497 		batch = nr_pages;
2498 		goto retry;
2499 	}
2500 
2501 	/*
2502 	 * Memcg doesn't have a dedicated reserve for atomic
2503 	 * allocations. But like the global atomic pool, we need to
2504 	 * put the burden of reclaim on regular allocation requests
2505 	 * and let these go through as privileged allocations.
2506 	 */
2507 	if (gfp_mask & __GFP_ATOMIC)
2508 		goto force;
2509 
2510 	/*
2511 	 * Unlike in global OOM situations, memcg is not in a physical
2512 	 * memory shortage.  Allow dying and OOM-killed tasks to
2513 	 * bypass the last charges so that they can exit quickly and
2514 	 * free their memory.
2515 	 */
2516 	if (unlikely(should_force_charge()))
2517 		goto force;
2518 
2519 	/*
2520 	 * Prevent unbounded recursion when reclaim operations need to
2521 	 * allocate memory. This might exceed the limits temporarily,
2522 	 * but we prefer facilitating memory reclaim and getting back
2523 	 * under the limit over triggering OOM kills in these cases.
2524 	 */
2525 	if (unlikely(current->flags & PF_MEMALLOC))
2526 		goto force;
2527 
2528 	if (unlikely(task_in_memcg_oom(current)))
2529 		goto nomem;
2530 
2531 	if (!gfpflags_allow_blocking(gfp_mask))
2532 		goto nomem;
2533 
2534 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2535 
2536 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2537 						    gfp_mask, may_swap);
2538 
2539 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2540 		goto retry;
2541 
2542 	if (!drained) {
2543 		drain_all_stock(mem_over_limit);
2544 		drained = true;
2545 		goto retry;
2546 	}
2547 
2548 	if (gfp_mask & __GFP_NORETRY)
2549 		goto nomem;
2550 	/*
2551 	 * Even though the limit is exceeded at this point, reclaim
2552 	 * may have been able to free some pages.  Retry the charge
2553 	 * before killing the task.
2554 	 *
2555 	 * Only for regular pages, though: huge pages are rather
2556 	 * unlikely to succeed so close to the limit, and we fall back
2557 	 * to regular pages anyway in case of failure.
2558 	 */
2559 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2560 		goto retry;
2561 	/*
2562 	 * At task move, charge accounts can be doubly counted. So, it's
2563 	 * better to wait until the end of task_move if something is going on.
2564 	 */
2565 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2566 		goto retry;
2567 
2568 	if (nr_retries--)
2569 		goto retry;
2570 
2571 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2572 		goto nomem;
2573 
2574 	if (gfp_mask & __GFP_NOFAIL)
2575 		goto force;
2576 
2577 	if (fatal_signal_pending(current))
2578 		goto force;
2579 
2580 	/*
2581 	 * keep retrying as long as the memcg oom killer is able to make
2582 	 * a forward progress or bypass the charge if the oom killer
2583 	 * couldn't make any progress.
2584 	 */
2585 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2586 		       get_order(nr_pages * PAGE_SIZE));
2587 	switch (oom_status) {
2588 	case OOM_SUCCESS:
2589 		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2590 		goto retry;
2591 	case OOM_FAILED:
2592 		goto force;
2593 	default:
2594 		goto nomem;
2595 	}
2596 nomem:
2597 	if (!(gfp_mask & __GFP_NOFAIL))
2598 		return -ENOMEM;
2599 force:
2600 	/*
2601 	 * The allocation either can't fail or will lead to more memory
2602 	 * being freed very soon.  Allow memory usage go over the limit
2603 	 * temporarily by force charging it.
2604 	 */
2605 	page_counter_charge(&memcg->memory, nr_pages);
2606 	if (do_memsw_account())
2607 		page_counter_charge(&memcg->memsw, nr_pages);
2608 	css_get_many(&memcg->css, nr_pages);
2609 
2610 	return 0;
2611 
2612 done_restock:
2613 	css_get_many(&memcg->css, batch);
2614 	if (batch > nr_pages)
2615 		refill_stock(memcg, batch - nr_pages);
2616 
2617 	/*
2618 	 * If the hierarchy is above the normal consumption range, schedule
2619 	 * reclaim on returning to userland.  We can perform reclaim here
2620 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2621 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2622 	 * not recorded as it most likely matches current's and won't
2623 	 * change in the meantime.  As high limit is checked again before
2624 	 * reclaim, the cost of mismatch is negligible.
2625 	 */
2626 	do {
2627 		bool mem_high, swap_high;
2628 
2629 		mem_high = page_counter_read(&memcg->memory) >
2630 			READ_ONCE(memcg->memory.high);
2631 		swap_high = page_counter_read(&memcg->swap) >
2632 			READ_ONCE(memcg->swap.high);
2633 
2634 		/* Don't bother a random interrupted task */
2635 		if (in_interrupt()) {
2636 			if (mem_high) {
2637 				schedule_work(&memcg->high_work);
2638 				break;
2639 			}
2640 			continue;
2641 		}
2642 
2643 		if (mem_high || swap_high) {
2644 			/*
2645 			 * The allocating tasks in this cgroup will need to do
2646 			 * reclaim or be throttled to prevent further growth
2647 			 * of the memory or swap footprints.
2648 			 *
2649 			 * Target some best-effort fairness between the tasks,
2650 			 * and distribute reclaim work and delay penalties
2651 			 * based on how much each task is actually allocating.
2652 			 */
2653 			current->memcg_nr_pages_over_high += batch;
2654 			set_notify_resume(current);
2655 			break;
2656 		}
2657 	} while ((memcg = parent_mem_cgroup(memcg)));
2658 
2659 	return 0;
2660 }
2661 
2662 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2663 {
2664 	if (mem_cgroup_is_root(memcg))
2665 		return;
2666 
2667 	page_counter_uncharge(&memcg->memory, nr_pages);
2668 	if (do_memsw_account())
2669 		page_counter_uncharge(&memcg->memsw, nr_pages);
2670 
2671 	css_put_many(&memcg->css, nr_pages);
2672 }
2673 
2674 static void lock_page_lru(struct page *page, int *isolated)
2675 {
2676 	pg_data_t *pgdat = page_pgdat(page);
2677 
2678 	spin_lock_irq(&pgdat->lru_lock);
2679 	if (PageLRU(page)) {
2680 		struct lruvec *lruvec;
2681 
2682 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2683 		ClearPageLRU(page);
2684 		del_page_from_lru_list(page, lruvec, page_lru(page));
2685 		*isolated = 1;
2686 	} else
2687 		*isolated = 0;
2688 }
2689 
2690 static void unlock_page_lru(struct page *page, int isolated)
2691 {
2692 	pg_data_t *pgdat = page_pgdat(page);
2693 
2694 	if (isolated) {
2695 		struct lruvec *lruvec;
2696 
2697 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2698 		VM_BUG_ON_PAGE(PageLRU(page), page);
2699 		SetPageLRU(page);
2700 		add_page_to_lru_list(page, lruvec, page_lru(page));
2701 	}
2702 	spin_unlock_irq(&pgdat->lru_lock);
2703 }
2704 
2705 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2706 			  bool lrucare)
2707 {
2708 	int isolated;
2709 
2710 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2711 
2712 	/*
2713 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2714 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2715 	 */
2716 	if (lrucare)
2717 		lock_page_lru(page, &isolated);
2718 
2719 	/*
2720 	 * Nobody should be changing or seriously looking at
2721 	 * page->mem_cgroup at this point:
2722 	 *
2723 	 * - the page is uncharged
2724 	 *
2725 	 * - the page is off-LRU
2726 	 *
2727 	 * - an anonymous fault has exclusive page access, except for
2728 	 *   a locked page table
2729 	 *
2730 	 * - a page cache insertion, a swapin fault, or a migration
2731 	 *   have the page locked
2732 	 */
2733 	page->mem_cgroup = memcg;
2734 
2735 	if (lrucare)
2736 		unlock_page_lru(page, isolated);
2737 }
2738 
2739 #ifdef CONFIG_MEMCG_KMEM
2740 /*
2741  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2742  *
2743  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2744  * cgroup_mutex, etc.
2745  */
2746 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2747 {
2748 	struct page *page;
2749 
2750 	if (mem_cgroup_disabled())
2751 		return NULL;
2752 
2753 	page = virt_to_head_page(p);
2754 
2755 	/*
2756 	 * Slab pages don't have page->mem_cgroup set because corresponding
2757 	 * kmem caches can be reparented during the lifetime. That's why
2758 	 * memcg_from_slab_page() should be used instead.
2759 	 */
2760 	if (PageSlab(page))
2761 		return memcg_from_slab_page(page);
2762 
2763 	/* All other pages use page->mem_cgroup */
2764 	return page->mem_cgroup;
2765 }
2766 
2767 static int memcg_alloc_cache_id(void)
2768 {
2769 	int id, size;
2770 	int err;
2771 
2772 	id = ida_simple_get(&memcg_cache_ida,
2773 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2774 	if (id < 0)
2775 		return id;
2776 
2777 	if (id < memcg_nr_cache_ids)
2778 		return id;
2779 
2780 	/*
2781 	 * There's no space for the new id in memcg_caches arrays,
2782 	 * so we have to grow them.
2783 	 */
2784 	down_write(&memcg_cache_ids_sem);
2785 
2786 	size = 2 * (id + 1);
2787 	if (size < MEMCG_CACHES_MIN_SIZE)
2788 		size = MEMCG_CACHES_MIN_SIZE;
2789 	else if (size > MEMCG_CACHES_MAX_SIZE)
2790 		size = MEMCG_CACHES_MAX_SIZE;
2791 
2792 	err = memcg_update_all_caches(size);
2793 	if (!err)
2794 		err = memcg_update_all_list_lrus(size);
2795 	if (!err)
2796 		memcg_nr_cache_ids = size;
2797 
2798 	up_write(&memcg_cache_ids_sem);
2799 
2800 	if (err) {
2801 		ida_simple_remove(&memcg_cache_ida, id);
2802 		return err;
2803 	}
2804 	return id;
2805 }
2806 
2807 static void memcg_free_cache_id(int id)
2808 {
2809 	ida_simple_remove(&memcg_cache_ida, id);
2810 }
2811 
2812 struct memcg_kmem_cache_create_work {
2813 	struct mem_cgroup *memcg;
2814 	struct kmem_cache *cachep;
2815 	struct work_struct work;
2816 };
2817 
2818 static void memcg_kmem_cache_create_func(struct work_struct *w)
2819 {
2820 	struct memcg_kmem_cache_create_work *cw =
2821 		container_of(w, struct memcg_kmem_cache_create_work, work);
2822 	struct mem_cgroup *memcg = cw->memcg;
2823 	struct kmem_cache *cachep = cw->cachep;
2824 
2825 	memcg_create_kmem_cache(memcg, cachep);
2826 
2827 	css_put(&memcg->css);
2828 	kfree(cw);
2829 }
2830 
2831 /*
2832  * Enqueue the creation of a per-memcg kmem_cache.
2833  */
2834 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2835 					       struct kmem_cache *cachep)
2836 {
2837 	struct memcg_kmem_cache_create_work *cw;
2838 
2839 	if (!css_tryget_online(&memcg->css))
2840 		return;
2841 
2842 	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2843 	if (!cw)
2844 		return;
2845 
2846 	cw->memcg = memcg;
2847 	cw->cachep = cachep;
2848 	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2849 
2850 	queue_work(memcg_kmem_cache_wq, &cw->work);
2851 }
2852 
2853 static inline bool memcg_kmem_bypass(void)
2854 {
2855 	if (in_interrupt())
2856 		return true;
2857 
2858 	/* Allow remote memcg charging in kthread contexts. */
2859 	if ((!current->mm || (current->flags & PF_KTHREAD)) &&
2860 	     !current->active_memcg)
2861 		return true;
2862 	return false;
2863 }
2864 
2865 /**
2866  * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2867  * @cachep: the original global kmem cache
2868  *
2869  * Return the kmem_cache we're supposed to use for a slab allocation.
2870  * We try to use the current memcg's version of the cache.
2871  *
2872  * If the cache does not exist yet, if we are the first user of it, we
2873  * create it asynchronously in a workqueue and let the current allocation
2874  * go through with the original cache.
2875  *
2876  * This function takes a reference to the cache it returns to assure it
2877  * won't get destroyed while we are working with it. Once the caller is
2878  * done with it, memcg_kmem_put_cache() must be called to release the
2879  * reference.
2880  */
2881 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2882 {
2883 	struct mem_cgroup *memcg;
2884 	struct kmem_cache *memcg_cachep;
2885 	struct memcg_cache_array *arr;
2886 	int kmemcg_id;
2887 
2888 	VM_BUG_ON(!is_root_cache(cachep));
2889 
2890 	if (memcg_kmem_bypass())
2891 		return cachep;
2892 
2893 	rcu_read_lock();
2894 
2895 	if (unlikely(current->active_memcg))
2896 		memcg = current->active_memcg;
2897 	else
2898 		memcg = mem_cgroup_from_task(current);
2899 
2900 	if (!memcg || memcg == root_mem_cgroup)
2901 		goto out_unlock;
2902 
2903 	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2904 	if (kmemcg_id < 0)
2905 		goto out_unlock;
2906 
2907 	arr = rcu_dereference(cachep->memcg_params.memcg_caches);
2908 
2909 	/*
2910 	 * Make sure we will access the up-to-date value. The code updating
2911 	 * memcg_caches issues a write barrier to match the data dependency
2912 	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
2913 	 */
2914 	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
2915 
2916 	/*
2917 	 * If we are in a safe context (can wait, and not in interrupt
2918 	 * context), we could be be predictable and return right away.
2919 	 * This would guarantee that the allocation being performed
2920 	 * already belongs in the new cache.
2921 	 *
2922 	 * However, there are some clashes that can arrive from locking.
2923 	 * For instance, because we acquire the slab_mutex while doing
2924 	 * memcg_create_kmem_cache, this means no further allocation
2925 	 * could happen with the slab_mutex held. So it's better to
2926 	 * defer everything.
2927 	 *
2928 	 * If the memcg is dying or memcg_cache is about to be released,
2929 	 * don't bother creating new kmem_caches. Because memcg_cachep
2930 	 * is ZEROed as the fist step of kmem offlining, we don't need
2931 	 * percpu_ref_tryget_live() here. css_tryget_online() check in
2932 	 * memcg_schedule_kmem_cache_create() will prevent us from
2933 	 * creation of a new kmem_cache.
2934 	 */
2935 	if (unlikely(!memcg_cachep))
2936 		memcg_schedule_kmem_cache_create(memcg, cachep);
2937 	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
2938 		cachep = memcg_cachep;
2939 out_unlock:
2940 	rcu_read_unlock();
2941 	return cachep;
2942 }
2943 
2944 /**
2945  * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2946  * @cachep: the cache returned by memcg_kmem_get_cache
2947  */
2948 void memcg_kmem_put_cache(struct kmem_cache *cachep)
2949 {
2950 	if (!is_root_cache(cachep))
2951 		percpu_ref_put(&cachep->memcg_params.refcnt);
2952 }
2953 
2954 /**
2955  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2956  * @memcg: memory cgroup to charge
2957  * @gfp: reclaim mode
2958  * @nr_pages: number of pages to charge
2959  *
2960  * Returns 0 on success, an error code on failure.
2961  */
2962 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
2963 			unsigned int nr_pages)
2964 {
2965 	struct page_counter *counter;
2966 	int ret;
2967 
2968 	ret = try_charge(memcg, gfp, nr_pages);
2969 	if (ret)
2970 		return ret;
2971 
2972 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2973 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2974 
2975 		/*
2976 		 * Enforce __GFP_NOFAIL allocation because callers are not
2977 		 * prepared to see failures and likely do not have any failure
2978 		 * handling code.
2979 		 */
2980 		if (gfp & __GFP_NOFAIL) {
2981 			page_counter_charge(&memcg->kmem, nr_pages);
2982 			return 0;
2983 		}
2984 		cancel_charge(memcg, nr_pages);
2985 		return -ENOMEM;
2986 	}
2987 	return 0;
2988 }
2989 
2990 /**
2991  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
2992  * @memcg: memcg to uncharge
2993  * @nr_pages: number of pages to uncharge
2994  */
2995 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
2996 {
2997 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2998 		page_counter_uncharge(&memcg->kmem, nr_pages);
2999 
3000 	page_counter_uncharge(&memcg->memory, nr_pages);
3001 	if (do_memsw_account())
3002 		page_counter_uncharge(&memcg->memsw, nr_pages);
3003 }
3004 
3005 /**
3006  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3007  * @page: page to charge
3008  * @gfp: reclaim mode
3009  * @order: allocation order
3010  *
3011  * Returns 0 on success, an error code on failure.
3012  */
3013 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3014 {
3015 	struct mem_cgroup *memcg;
3016 	int ret = 0;
3017 
3018 	if (memcg_kmem_bypass())
3019 		return 0;
3020 
3021 	memcg = get_mem_cgroup_from_current();
3022 	if (!mem_cgroup_is_root(memcg)) {
3023 		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3024 		if (!ret) {
3025 			page->mem_cgroup = memcg;
3026 			__SetPageKmemcg(page);
3027 		}
3028 	}
3029 	css_put(&memcg->css);
3030 	return ret;
3031 }
3032 
3033 /**
3034  * __memcg_kmem_uncharge_page: uncharge a kmem page
3035  * @page: page to uncharge
3036  * @order: allocation order
3037  */
3038 void __memcg_kmem_uncharge_page(struct page *page, int order)
3039 {
3040 	struct mem_cgroup *memcg = page->mem_cgroup;
3041 	unsigned int nr_pages = 1 << order;
3042 
3043 	if (!memcg)
3044 		return;
3045 
3046 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3047 	__memcg_kmem_uncharge(memcg, nr_pages);
3048 	page->mem_cgroup = NULL;
3049 
3050 	/* slab pages do not have PageKmemcg flag set */
3051 	if (PageKmemcg(page))
3052 		__ClearPageKmemcg(page);
3053 
3054 	css_put_many(&memcg->css, nr_pages);
3055 }
3056 #endif /* CONFIG_MEMCG_KMEM */
3057 
3058 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3059 
3060 /*
3061  * Because tail pages are not marked as "used", set it. We're under
3062  * pgdat->lru_lock and migration entries setup in all page mappings.
3063  */
3064 void mem_cgroup_split_huge_fixup(struct page *head)
3065 {
3066 	int i;
3067 
3068 	if (mem_cgroup_disabled())
3069 		return;
3070 
3071 	for (i = 1; i < HPAGE_PMD_NR; i++)
3072 		head[i].mem_cgroup = head->mem_cgroup;
3073 
3074 	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
3075 }
3076 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3077 
3078 #ifdef CONFIG_MEMCG_SWAP
3079 /**
3080  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3081  * @entry: swap entry to be moved
3082  * @from:  mem_cgroup which the entry is moved from
3083  * @to:  mem_cgroup which the entry is moved to
3084  *
3085  * It succeeds only when the swap_cgroup's record for this entry is the same
3086  * as the mem_cgroup's id of @from.
3087  *
3088  * Returns 0 on success, -EINVAL on failure.
3089  *
3090  * The caller must have charged to @to, IOW, called page_counter_charge() about
3091  * both res and memsw, and called css_get().
3092  */
3093 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3094 				struct mem_cgroup *from, struct mem_cgroup *to)
3095 {
3096 	unsigned short old_id, new_id;
3097 
3098 	old_id = mem_cgroup_id(from);
3099 	new_id = mem_cgroup_id(to);
3100 
3101 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3102 		mod_memcg_state(from, MEMCG_SWAP, -1);
3103 		mod_memcg_state(to, MEMCG_SWAP, 1);
3104 		return 0;
3105 	}
3106 	return -EINVAL;
3107 }
3108 #else
3109 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3110 				struct mem_cgroup *from, struct mem_cgroup *to)
3111 {
3112 	return -EINVAL;
3113 }
3114 #endif
3115 
3116 static DEFINE_MUTEX(memcg_max_mutex);
3117 
3118 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3119 				 unsigned long max, bool memsw)
3120 {
3121 	bool enlarge = false;
3122 	bool drained = false;
3123 	int ret;
3124 	bool limits_invariant;
3125 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3126 
3127 	do {
3128 		if (signal_pending(current)) {
3129 			ret = -EINTR;
3130 			break;
3131 		}
3132 
3133 		mutex_lock(&memcg_max_mutex);
3134 		/*
3135 		 * Make sure that the new limit (memsw or memory limit) doesn't
3136 		 * break our basic invariant rule memory.max <= memsw.max.
3137 		 */
3138 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3139 					   max <= memcg->memsw.max;
3140 		if (!limits_invariant) {
3141 			mutex_unlock(&memcg_max_mutex);
3142 			ret = -EINVAL;
3143 			break;
3144 		}
3145 		if (max > counter->max)
3146 			enlarge = true;
3147 		ret = page_counter_set_max(counter, max);
3148 		mutex_unlock(&memcg_max_mutex);
3149 
3150 		if (!ret)
3151 			break;
3152 
3153 		if (!drained) {
3154 			drain_all_stock(memcg);
3155 			drained = true;
3156 			continue;
3157 		}
3158 
3159 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3160 					GFP_KERNEL, !memsw)) {
3161 			ret = -EBUSY;
3162 			break;
3163 		}
3164 	} while (true);
3165 
3166 	if (!ret && enlarge)
3167 		memcg_oom_recover(memcg);
3168 
3169 	return ret;
3170 }
3171 
3172 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3173 					    gfp_t gfp_mask,
3174 					    unsigned long *total_scanned)
3175 {
3176 	unsigned long nr_reclaimed = 0;
3177 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3178 	unsigned long reclaimed;
3179 	int loop = 0;
3180 	struct mem_cgroup_tree_per_node *mctz;
3181 	unsigned long excess;
3182 	unsigned long nr_scanned;
3183 
3184 	if (order > 0)
3185 		return 0;
3186 
3187 	mctz = soft_limit_tree_node(pgdat->node_id);
3188 
3189 	/*
3190 	 * Do not even bother to check the largest node if the root
3191 	 * is empty. Do it lockless to prevent lock bouncing. Races
3192 	 * are acceptable as soft limit is best effort anyway.
3193 	 */
3194 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3195 		return 0;
3196 
3197 	/*
3198 	 * This loop can run a while, specially if mem_cgroup's continuously
3199 	 * keep exceeding their soft limit and putting the system under
3200 	 * pressure
3201 	 */
3202 	do {
3203 		if (next_mz)
3204 			mz = next_mz;
3205 		else
3206 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3207 		if (!mz)
3208 			break;
3209 
3210 		nr_scanned = 0;
3211 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3212 						    gfp_mask, &nr_scanned);
3213 		nr_reclaimed += reclaimed;
3214 		*total_scanned += nr_scanned;
3215 		spin_lock_irq(&mctz->lock);
3216 		__mem_cgroup_remove_exceeded(mz, mctz);
3217 
3218 		/*
3219 		 * If we failed to reclaim anything from this memory cgroup
3220 		 * it is time to move on to the next cgroup
3221 		 */
3222 		next_mz = NULL;
3223 		if (!reclaimed)
3224 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3225 
3226 		excess = soft_limit_excess(mz->memcg);
3227 		/*
3228 		 * One school of thought says that we should not add
3229 		 * back the node to the tree if reclaim returns 0.
3230 		 * But our reclaim could return 0, simply because due
3231 		 * to priority we are exposing a smaller subset of
3232 		 * memory to reclaim from. Consider this as a longer
3233 		 * term TODO.
3234 		 */
3235 		/* If excess == 0, no tree ops */
3236 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3237 		spin_unlock_irq(&mctz->lock);
3238 		css_put(&mz->memcg->css);
3239 		loop++;
3240 		/*
3241 		 * Could not reclaim anything and there are no more
3242 		 * mem cgroups to try or we seem to be looping without
3243 		 * reclaiming anything.
3244 		 */
3245 		if (!nr_reclaimed &&
3246 			(next_mz == NULL ||
3247 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3248 			break;
3249 	} while (!nr_reclaimed);
3250 	if (next_mz)
3251 		css_put(&next_mz->memcg->css);
3252 	return nr_reclaimed;
3253 }
3254 
3255 /*
3256  * Test whether @memcg has children, dead or alive.  Note that this
3257  * function doesn't care whether @memcg has use_hierarchy enabled and
3258  * returns %true if there are child csses according to the cgroup
3259  * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
3260  */
3261 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3262 {
3263 	bool ret;
3264 
3265 	rcu_read_lock();
3266 	ret = css_next_child(NULL, &memcg->css);
3267 	rcu_read_unlock();
3268 	return ret;
3269 }
3270 
3271 /*
3272  * Reclaims as many pages from the given memcg as possible.
3273  *
3274  * Caller is responsible for holding css reference for memcg.
3275  */
3276 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3277 {
3278 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3279 
3280 	/* we call try-to-free pages for make this cgroup empty */
3281 	lru_add_drain_all();
3282 
3283 	drain_all_stock(memcg);
3284 
3285 	/* try to free all pages in this cgroup */
3286 	while (nr_retries && page_counter_read(&memcg->memory)) {
3287 		int progress;
3288 
3289 		if (signal_pending(current))
3290 			return -EINTR;
3291 
3292 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3293 							GFP_KERNEL, true);
3294 		if (!progress) {
3295 			nr_retries--;
3296 			/* maybe some writeback is necessary */
3297 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3298 		}
3299 
3300 	}
3301 
3302 	return 0;
3303 }
3304 
3305 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3306 					    char *buf, size_t nbytes,
3307 					    loff_t off)
3308 {
3309 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3310 
3311 	if (mem_cgroup_is_root(memcg))
3312 		return -EINVAL;
3313 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3314 }
3315 
3316 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3317 				     struct cftype *cft)
3318 {
3319 	return mem_cgroup_from_css(css)->use_hierarchy;
3320 }
3321 
3322 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3323 				      struct cftype *cft, u64 val)
3324 {
3325 	int retval = 0;
3326 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3327 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3328 
3329 	if (memcg->use_hierarchy == val)
3330 		return 0;
3331 
3332 	/*
3333 	 * If parent's use_hierarchy is set, we can't make any modifications
3334 	 * in the child subtrees. If it is unset, then the change can
3335 	 * occur, provided the current cgroup has no children.
3336 	 *
3337 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3338 	 * set if there are no children.
3339 	 */
3340 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3341 				(val == 1 || val == 0)) {
3342 		if (!memcg_has_children(memcg))
3343 			memcg->use_hierarchy = val;
3344 		else
3345 			retval = -EBUSY;
3346 	} else
3347 		retval = -EINVAL;
3348 
3349 	return retval;
3350 }
3351 
3352 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3353 {
3354 	unsigned long val;
3355 
3356 	if (mem_cgroup_is_root(memcg)) {
3357 		val = memcg_page_state(memcg, MEMCG_CACHE) +
3358 			memcg_page_state(memcg, MEMCG_RSS);
3359 		if (swap)
3360 			val += memcg_page_state(memcg, MEMCG_SWAP);
3361 	} else {
3362 		if (!swap)
3363 			val = page_counter_read(&memcg->memory);
3364 		else
3365 			val = page_counter_read(&memcg->memsw);
3366 	}
3367 	return val;
3368 }
3369 
3370 enum {
3371 	RES_USAGE,
3372 	RES_LIMIT,
3373 	RES_MAX_USAGE,
3374 	RES_FAILCNT,
3375 	RES_SOFT_LIMIT,
3376 };
3377 
3378 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3379 			       struct cftype *cft)
3380 {
3381 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3382 	struct page_counter *counter;
3383 
3384 	switch (MEMFILE_TYPE(cft->private)) {
3385 	case _MEM:
3386 		counter = &memcg->memory;
3387 		break;
3388 	case _MEMSWAP:
3389 		counter = &memcg->memsw;
3390 		break;
3391 	case _KMEM:
3392 		counter = &memcg->kmem;
3393 		break;
3394 	case _TCP:
3395 		counter = &memcg->tcpmem;
3396 		break;
3397 	default:
3398 		BUG();
3399 	}
3400 
3401 	switch (MEMFILE_ATTR(cft->private)) {
3402 	case RES_USAGE:
3403 		if (counter == &memcg->memory)
3404 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3405 		if (counter == &memcg->memsw)
3406 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3407 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3408 	case RES_LIMIT:
3409 		return (u64)counter->max * PAGE_SIZE;
3410 	case RES_MAX_USAGE:
3411 		return (u64)counter->watermark * PAGE_SIZE;
3412 	case RES_FAILCNT:
3413 		return counter->failcnt;
3414 	case RES_SOFT_LIMIT:
3415 		return (u64)memcg->soft_limit * PAGE_SIZE;
3416 	default:
3417 		BUG();
3418 	}
3419 }
3420 
3421 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3422 {
3423 	unsigned long stat[MEMCG_NR_STAT] = {0};
3424 	struct mem_cgroup *mi;
3425 	int node, cpu, i;
3426 
3427 	for_each_online_cpu(cpu)
3428 		for (i = 0; i < MEMCG_NR_STAT; i++)
3429 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3430 
3431 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3432 		for (i = 0; i < MEMCG_NR_STAT; i++)
3433 			atomic_long_add(stat[i], &mi->vmstats[i]);
3434 
3435 	for_each_node(node) {
3436 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3437 		struct mem_cgroup_per_node *pi;
3438 
3439 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3440 			stat[i] = 0;
3441 
3442 		for_each_online_cpu(cpu)
3443 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3444 				stat[i] += per_cpu(
3445 					pn->lruvec_stat_cpu->count[i], cpu);
3446 
3447 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3448 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3449 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3450 	}
3451 }
3452 
3453 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3454 {
3455 	unsigned long events[NR_VM_EVENT_ITEMS];
3456 	struct mem_cgroup *mi;
3457 	int cpu, i;
3458 
3459 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3460 		events[i] = 0;
3461 
3462 	for_each_online_cpu(cpu)
3463 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3464 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3465 					     cpu);
3466 
3467 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3468 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3469 			atomic_long_add(events[i], &mi->vmevents[i]);
3470 }
3471 
3472 #ifdef CONFIG_MEMCG_KMEM
3473 static int memcg_online_kmem(struct mem_cgroup *memcg)
3474 {
3475 	int memcg_id;
3476 
3477 	if (cgroup_memory_nokmem)
3478 		return 0;
3479 
3480 	BUG_ON(memcg->kmemcg_id >= 0);
3481 	BUG_ON(memcg->kmem_state);
3482 
3483 	memcg_id = memcg_alloc_cache_id();
3484 	if (memcg_id < 0)
3485 		return memcg_id;
3486 
3487 	static_branch_inc(&memcg_kmem_enabled_key);
3488 	/*
3489 	 * A memory cgroup is considered kmem-online as soon as it gets
3490 	 * kmemcg_id. Setting the id after enabling static branching will
3491 	 * guarantee no one starts accounting before all call sites are
3492 	 * patched.
3493 	 */
3494 	memcg->kmemcg_id = memcg_id;
3495 	memcg->kmem_state = KMEM_ONLINE;
3496 	INIT_LIST_HEAD(&memcg->kmem_caches);
3497 
3498 	return 0;
3499 }
3500 
3501 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3502 {
3503 	struct cgroup_subsys_state *css;
3504 	struct mem_cgroup *parent, *child;
3505 	int kmemcg_id;
3506 
3507 	if (memcg->kmem_state != KMEM_ONLINE)
3508 		return;
3509 	/*
3510 	 * Clear the online state before clearing memcg_caches array
3511 	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3512 	 * guarantees that no cache will be created for this cgroup
3513 	 * after we are done (see memcg_create_kmem_cache()).
3514 	 */
3515 	memcg->kmem_state = KMEM_ALLOCATED;
3516 
3517 	parent = parent_mem_cgroup(memcg);
3518 	if (!parent)
3519 		parent = root_mem_cgroup;
3520 
3521 	/*
3522 	 * Deactivate and reparent kmem_caches.
3523 	 */
3524 	memcg_deactivate_kmem_caches(memcg, parent);
3525 
3526 	kmemcg_id = memcg->kmemcg_id;
3527 	BUG_ON(kmemcg_id < 0);
3528 
3529 	/*
3530 	 * Change kmemcg_id of this cgroup and all its descendants to the
3531 	 * parent's id, and then move all entries from this cgroup's list_lrus
3532 	 * to ones of the parent. After we have finished, all list_lrus
3533 	 * corresponding to this cgroup are guaranteed to remain empty. The
3534 	 * ordering is imposed by list_lru_node->lock taken by
3535 	 * memcg_drain_all_list_lrus().
3536 	 */
3537 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3538 	css_for_each_descendant_pre(css, &memcg->css) {
3539 		child = mem_cgroup_from_css(css);
3540 		BUG_ON(child->kmemcg_id != kmemcg_id);
3541 		child->kmemcg_id = parent->kmemcg_id;
3542 		if (!memcg->use_hierarchy)
3543 			break;
3544 	}
3545 	rcu_read_unlock();
3546 
3547 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3548 
3549 	memcg_free_cache_id(kmemcg_id);
3550 }
3551 
3552 static void memcg_free_kmem(struct mem_cgroup *memcg)
3553 {
3554 	/* css_alloc() failed, offlining didn't happen */
3555 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3556 		memcg_offline_kmem(memcg);
3557 
3558 	if (memcg->kmem_state == KMEM_ALLOCATED) {
3559 		WARN_ON(!list_empty(&memcg->kmem_caches));
3560 		static_branch_dec(&memcg_kmem_enabled_key);
3561 	}
3562 }
3563 #else
3564 static int memcg_online_kmem(struct mem_cgroup *memcg)
3565 {
3566 	return 0;
3567 }
3568 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3569 {
3570 }
3571 static void memcg_free_kmem(struct mem_cgroup *memcg)
3572 {
3573 }
3574 #endif /* CONFIG_MEMCG_KMEM */
3575 
3576 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3577 				 unsigned long max)
3578 {
3579 	int ret;
3580 
3581 	mutex_lock(&memcg_max_mutex);
3582 	ret = page_counter_set_max(&memcg->kmem, max);
3583 	mutex_unlock(&memcg_max_mutex);
3584 	return ret;
3585 }
3586 
3587 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3588 {
3589 	int ret;
3590 
3591 	mutex_lock(&memcg_max_mutex);
3592 
3593 	ret = page_counter_set_max(&memcg->tcpmem, max);
3594 	if (ret)
3595 		goto out;
3596 
3597 	if (!memcg->tcpmem_active) {
3598 		/*
3599 		 * The active flag needs to be written after the static_key
3600 		 * update. This is what guarantees that the socket activation
3601 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3602 		 * for details, and note that we don't mark any socket as
3603 		 * belonging to this memcg until that flag is up.
3604 		 *
3605 		 * We need to do this, because static_keys will span multiple
3606 		 * sites, but we can't control their order. If we mark a socket
3607 		 * as accounted, but the accounting functions are not patched in
3608 		 * yet, we'll lose accounting.
3609 		 *
3610 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3611 		 * because when this value change, the code to process it is not
3612 		 * patched in yet.
3613 		 */
3614 		static_branch_inc(&memcg_sockets_enabled_key);
3615 		memcg->tcpmem_active = true;
3616 	}
3617 out:
3618 	mutex_unlock(&memcg_max_mutex);
3619 	return ret;
3620 }
3621 
3622 /*
3623  * The user of this function is...
3624  * RES_LIMIT.
3625  */
3626 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3627 				char *buf, size_t nbytes, loff_t off)
3628 {
3629 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3630 	unsigned long nr_pages;
3631 	int ret;
3632 
3633 	buf = strstrip(buf);
3634 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3635 	if (ret)
3636 		return ret;
3637 
3638 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3639 	case RES_LIMIT:
3640 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3641 			ret = -EINVAL;
3642 			break;
3643 		}
3644 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3645 		case _MEM:
3646 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3647 			break;
3648 		case _MEMSWAP:
3649 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3650 			break;
3651 		case _KMEM:
3652 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3653 				     "Please report your usecase to linux-mm@kvack.org if you "
3654 				     "depend on this functionality.\n");
3655 			ret = memcg_update_kmem_max(memcg, nr_pages);
3656 			break;
3657 		case _TCP:
3658 			ret = memcg_update_tcp_max(memcg, nr_pages);
3659 			break;
3660 		}
3661 		break;
3662 	case RES_SOFT_LIMIT:
3663 		memcg->soft_limit = nr_pages;
3664 		ret = 0;
3665 		break;
3666 	}
3667 	return ret ?: nbytes;
3668 }
3669 
3670 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3671 				size_t nbytes, loff_t off)
3672 {
3673 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3674 	struct page_counter *counter;
3675 
3676 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3677 	case _MEM:
3678 		counter = &memcg->memory;
3679 		break;
3680 	case _MEMSWAP:
3681 		counter = &memcg->memsw;
3682 		break;
3683 	case _KMEM:
3684 		counter = &memcg->kmem;
3685 		break;
3686 	case _TCP:
3687 		counter = &memcg->tcpmem;
3688 		break;
3689 	default:
3690 		BUG();
3691 	}
3692 
3693 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3694 	case RES_MAX_USAGE:
3695 		page_counter_reset_watermark(counter);
3696 		break;
3697 	case RES_FAILCNT:
3698 		counter->failcnt = 0;
3699 		break;
3700 	default:
3701 		BUG();
3702 	}
3703 
3704 	return nbytes;
3705 }
3706 
3707 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3708 					struct cftype *cft)
3709 {
3710 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3711 }
3712 
3713 #ifdef CONFIG_MMU
3714 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3715 					struct cftype *cft, u64 val)
3716 {
3717 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3718 
3719 	if (val & ~MOVE_MASK)
3720 		return -EINVAL;
3721 
3722 	/*
3723 	 * No kind of locking is needed in here, because ->can_attach() will
3724 	 * check this value once in the beginning of the process, and then carry
3725 	 * on with stale data. This means that changes to this value will only
3726 	 * affect task migrations starting after the change.
3727 	 */
3728 	memcg->move_charge_at_immigrate = val;
3729 	return 0;
3730 }
3731 #else
3732 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3733 					struct cftype *cft, u64 val)
3734 {
3735 	return -ENOSYS;
3736 }
3737 #endif
3738 
3739 #ifdef CONFIG_NUMA
3740 
3741 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3742 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3743 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3744 
3745 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3746 					   int nid, unsigned int lru_mask)
3747 {
3748 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3749 	unsigned long nr = 0;
3750 	enum lru_list lru;
3751 
3752 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3753 
3754 	for_each_lru(lru) {
3755 		if (!(BIT(lru) & lru_mask))
3756 			continue;
3757 		nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3758 	}
3759 	return nr;
3760 }
3761 
3762 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3763 					     unsigned int lru_mask)
3764 {
3765 	unsigned long nr = 0;
3766 	enum lru_list lru;
3767 
3768 	for_each_lru(lru) {
3769 		if (!(BIT(lru) & lru_mask))
3770 			continue;
3771 		nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3772 	}
3773 	return nr;
3774 }
3775 
3776 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3777 {
3778 	struct numa_stat {
3779 		const char *name;
3780 		unsigned int lru_mask;
3781 	};
3782 
3783 	static const struct numa_stat stats[] = {
3784 		{ "total", LRU_ALL },
3785 		{ "file", LRU_ALL_FILE },
3786 		{ "anon", LRU_ALL_ANON },
3787 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3788 	};
3789 	const struct numa_stat *stat;
3790 	int nid;
3791 	unsigned long nr;
3792 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3793 
3794 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3795 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3796 		seq_printf(m, "%s=%lu", stat->name, nr);
3797 		for_each_node_state(nid, N_MEMORY) {
3798 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3799 							  stat->lru_mask);
3800 			seq_printf(m, " N%d=%lu", nid, nr);
3801 		}
3802 		seq_putc(m, '\n');
3803 	}
3804 
3805 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3806 		struct mem_cgroup *iter;
3807 
3808 		nr = 0;
3809 		for_each_mem_cgroup_tree(iter, memcg)
3810 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3811 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3812 		for_each_node_state(nid, N_MEMORY) {
3813 			nr = 0;
3814 			for_each_mem_cgroup_tree(iter, memcg)
3815 				nr += mem_cgroup_node_nr_lru_pages(
3816 					iter, nid, stat->lru_mask);
3817 			seq_printf(m, " N%d=%lu", nid, nr);
3818 		}
3819 		seq_putc(m, '\n');
3820 	}
3821 
3822 	return 0;
3823 }
3824 #endif /* CONFIG_NUMA */
3825 
3826 static const unsigned int memcg1_stats[] = {
3827 	MEMCG_CACHE,
3828 	MEMCG_RSS,
3829 	MEMCG_RSS_HUGE,
3830 	NR_SHMEM,
3831 	NR_FILE_MAPPED,
3832 	NR_FILE_DIRTY,
3833 	NR_WRITEBACK,
3834 	MEMCG_SWAP,
3835 };
3836 
3837 static const char *const memcg1_stat_names[] = {
3838 	"cache",
3839 	"rss",
3840 	"rss_huge",
3841 	"shmem",
3842 	"mapped_file",
3843 	"dirty",
3844 	"writeback",
3845 	"swap",
3846 };
3847 
3848 /* Universal VM events cgroup1 shows, original sort order */
3849 static const unsigned int memcg1_events[] = {
3850 	PGPGIN,
3851 	PGPGOUT,
3852 	PGFAULT,
3853 	PGMAJFAULT,
3854 };
3855 
3856 static int memcg_stat_show(struct seq_file *m, void *v)
3857 {
3858 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3859 	unsigned long memory, memsw;
3860 	struct mem_cgroup *mi;
3861 	unsigned int i;
3862 
3863 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3864 
3865 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3866 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3867 			continue;
3868 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3869 			   memcg_page_state_local(memcg, memcg1_stats[i]) *
3870 			   PAGE_SIZE);
3871 	}
3872 
3873 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3874 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3875 			   memcg_events_local(memcg, memcg1_events[i]));
3876 
3877 	for (i = 0; i < NR_LRU_LISTS; i++)
3878 		seq_printf(m, "%s %lu\n", lru_list_name(i),
3879 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3880 			   PAGE_SIZE);
3881 
3882 	/* Hierarchical information */
3883 	memory = memsw = PAGE_COUNTER_MAX;
3884 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3885 		memory = min(memory, READ_ONCE(mi->memory.max));
3886 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
3887 	}
3888 	seq_printf(m, "hierarchical_memory_limit %llu\n",
3889 		   (u64)memory * PAGE_SIZE);
3890 	if (do_memsw_account())
3891 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3892 			   (u64)memsw * PAGE_SIZE);
3893 
3894 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3895 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3896 			continue;
3897 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3898 			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
3899 			   PAGE_SIZE);
3900 	}
3901 
3902 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3903 		seq_printf(m, "total_%s %llu\n",
3904 			   vm_event_name(memcg1_events[i]),
3905 			   (u64)memcg_events(memcg, memcg1_events[i]));
3906 
3907 	for (i = 0; i < NR_LRU_LISTS; i++)
3908 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3909 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3910 			   PAGE_SIZE);
3911 
3912 #ifdef CONFIG_DEBUG_VM
3913 	{
3914 		pg_data_t *pgdat;
3915 		struct mem_cgroup_per_node *mz;
3916 		struct zone_reclaim_stat *rstat;
3917 		unsigned long recent_rotated[2] = {0, 0};
3918 		unsigned long recent_scanned[2] = {0, 0};
3919 
3920 		for_each_online_pgdat(pgdat) {
3921 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3922 			rstat = &mz->lruvec.reclaim_stat;
3923 
3924 			recent_rotated[0] += rstat->recent_rotated[0];
3925 			recent_rotated[1] += rstat->recent_rotated[1];
3926 			recent_scanned[0] += rstat->recent_scanned[0];
3927 			recent_scanned[1] += rstat->recent_scanned[1];
3928 		}
3929 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3930 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3931 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3932 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3933 	}
3934 #endif
3935 
3936 	return 0;
3937 }
3938 
3939 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3940 				      struct cftype *cft)
3941 {
3942 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3943 
3944 	return mem_cgroup_swappiness(memcg);
3945 }
3946 
3947 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3948 				       struct cftype *cft, u64 val)
3949 {
3950 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3951 
3952 	if (val > 100)
3953 		return -EINVAL;
3954 
3955 	if (css->parent)
3956 		memcg->swappiness = val;
3957 	else
3958 		vm_swappiness = val;
3959 
3960 	return 0;
3961 }
3962 
3963 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3964 {
3965 	struct mem_cgroup_threshold_ary *t;
3966 	unsigned long usage;
3967 	int i;
3968 
3969 	rcu_read_lock();
3970 	if (!swap)
3971 		t = rcu_dereference(memcg->thresholds.primary);
3972 	else
3973 		t = rcu_dereference(memcg->memsw_thresholds.primary);
3974 
3975 	if (!t)
3976 		goto unlock;
3977 
3978 	usage = mem_cgroup_usage(memcg, swap);
3979 
3980 	/*
3981 	 * current_threshold points to threshold just below or equal to usage.
3982 	 * If it's not true, a threshold was crossed after last
3983 	 * call of __mem_cgroup_threshold().
3984 	 */
3985 	i = t->current_threshold;
3986 
3987 	/*
3988 	 * Iterate backward over array of thresholds starting from
3989 	 * current_threshold and check if a threshold is crossed.
3990 	 * If none of thresholds below usage is crossed, we read
3991 	 * only one element of the array here.
3992 	 */
3993 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3994 		eventfd_signal(t->entries[i].eventfd, 1);
3995 
3996 	/* i = current_threshold + 1 */
3997 	i++;
3998 
3999 	/*
4000 	 * Iterate forward over array of thresholds starting from
4001 	 * current_threshold+1 and check if a threshold is crossed.
4002 	 * If none of thresholds above usage is crossed, we read
4003 	 * only one element of the array here.
4004 	 */
4005 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4006 		eventfd_signal(t->entries[i].eventfd, 1);
4007 
4008 	/* Update current_threshold */
4009 	t->current_threshold = i - 1;
4010 unlock:
4011 	rcu_read_unlock();
4012 }
4013 
4014 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4015 {
4016 	while (memcg) {
4017 		__mem_cgroup_threshold(memcg, false);
4018 		if (do_memsw_account())
4019 			__mem_cgroup_threshold(memcg, true);
4020 
4021 		memcg = parent_mem_cgroup(memcg);
4022 	}
4023 }
4024 
4025 static int compare_thresholds(const void *a, const void *b)
4026 {
4027 	const struct mem_cgroup_threshold *_a = a;
4028 	const struct mem_cgroup_threshold *_b = b;
4029 
4030 	if (_a->threshold > _b->threshold)
4031 		return 1;
4032 
4033 	if (_a->threshold < _b->threshold)
4034 		return -1;
4035 
4036 	return 0;
4037 }
4038 
4039 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4040 {
4041 	struct mem_cgroup_eventfd_list *ev;
4042 
4043 	spin_lock(&memcg_oom_lock);
4044 
4045 	list_for_each_entry(ev, &memcg->oom_notify, list)
4046 		eventfd_signal(ev->eventfd, 1);
4047 
4048 	spin_unlock(&memcg_oom_lock);
4049 	return 0;
4050 }
4051 
4052 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4053 {
4054 	struct mem_cgroup *iter;
4055 
4056 	for_each_mem_cgroup_tree(iter, memcg)
4057 		mem_cgroup_oom_notify_cb(iter);
4058 }
4059 
4060 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4061 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4062 {
4063 	struct mem_cgroup_thresholds *thresholds;
4064 	struct mem_cgroup_threshold_ary *new;
4065 	unsigned long threshold;
4066 	unsigned long usage;
4067 	int i, size, ret;
4068 
4069 	ret = page_counter_memparse(args, "-1", &threshold);
4070 	if (ret)
4071 		return ret;
4072 
4073 	mutex_lock(&memcg->thresholds_lock);
4074 
4075 	if (type == _MEM) {
4076 		thresholds = &memcg->thresholds;
4077 		usage = mem_cgroup_usage(memcg, false);
4078 	} else if (type == _MEMSWAP) {
4079 		thresholds = &memcg->memsw_thresholds;
4080 		usage = mem_cgroup_usage(memcg, true);
4081 	} else
4082 		BUG();
4083 
4084 	/* Check if a threshold crossed before adding a new one */
4085 	if (thresholds->primary)
4086 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4087 
4088 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4089 
4090 	/* Allocate memory for new array of thresholds */
4091 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4092 	if (!new) {
4093 		ret = -ENOMEM;
4094 		goto unlock;
4095 	}
4096 	new->size = size;
4097 
4098 	/* Copy thresholds (if any) to new array */
4099 	if (thresholds->primary) {
4100 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4101 				sizeof(struct mem_cgroup_threshold));
4102 	}
4103 
4104 	/* Add new threshold */
4105 	new->entries[size - 1].eventfd = eventfd;
4106 	new->entries[size - 1].threshold = threshold;
4107 
4108 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4109 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4110 			compare_thresholds, NULL);
4111 
4112 	/* Find current threshold */
4113 	new->current_threshold = -1;
4114 	for (i = 0; i < size; i++) {
4115 		if (new->entries[i].threshold <= usage) {
4116 			/*
4117 			 * new->current_threshold will not be used until
4118 			 * rcu_assign_pointer(), so it's safe to increment
4119 			 * it here.
4120 			 */
4121 			++new->current_threshold;
4122 		} else
4123 			break;
4124 	}
4125 
4126 	/* Free old spare buffer and save old primary buffer as spare */
4127 	kfree(thresholds->spare);
4128 	thresholds->spare = thresholds->primary;
4129 
4130 	rcu_assign_pointer(thresholds->primary, new);
4131 
4132 	/* To be sure that nobody uses thresholds */
4133 	synchronize_rcu();
4134 
4135 unlock:
4136 	mutex_unlock(&memcg->thresholds_lock);
4137 
4138 	return ret;
4139 }
4140 
4141 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4142 	struct eventfd_ctx *eventfd, const char *args)
4143 {
4144 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4145 }
4146 
4147 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4148 	struct eventfd_ctx *eventfd, const char *args)
4149 {
4150 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4151 }
4152 
4153 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4154 	struct eventfd_ctx *eventfd, enum res_type type)
4155 {
4156 	struct mem_cgroup_thresholds *thresholds;
4157 	struct mem_cgroup_threshold_ary *new;
4158 	unsigned long usage;
4159 	int i, j, size, entries;
4160 
4161 	mutex_lock(&memcg->thresholds_lock);
4162 
4163 	if (type == _MEM) {
4164 		thresholds = &memcg->thresholds;
4165 		usage = mem_cgroup_usage(memcg, false);
4166 	} else if (type == _MEMSWAP) {
4167 		thresholds = &memcg->memsw_thresholds;
4168 		usage = mem_cgroup_usage(memcg, true);
4169 	} else
4170 		BUG();
4171 
4172 	if (!thresholds->primary)
4173 		goto unlock;
4174 
4175 	/* Check if a threshold crossed before removing */
4176 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4177 
4178 	/* Calculate new number of threshold */
4179 	size = entries = 0;
4180 	for (i = 0; i < thresholds->primary->size; i++) {
4181 		if (thresholds->primary->entries[i].eventfd != eventfd)
4182 			size++;
4183 		else
4184 			entries++;
4185 	}
4186 
4187 	new = thresholds->spare;
4188 
4189 	/* If no items related to eventfd have been cleared, nothing to do */
4190 	if (!entries)
4191 		goto unlock;
4192 
4193 	/* Set thresholds array to NULL if we don't have thresholds */
4194 	if (!size) {
4195 		kfree(new);
4196 		new = NULL;
4197 		goto swap_buffers;
4198 	}
4199 
4200 	new->size = size;
4201 
4202 	/* Copy thresholds and find current threshold */
4203 	new->current_threshold = -1;
4204 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4205 		if (thresholds->primary->entries[i].eventfd == eventfd)
4206 			continue;
4207 
4208 		new->entries[j] = thresholds->primary->entries[i];
4209 		if (new->entries[j].threshold <= usage) {
4210 			/*
4211 			 * new->current_threshold will not be used
4212 			 * until rcu_assign_pointer(), so it's safe to increment
4213 			 * it here.
4214 			 */
4215 			++new->current_threshold;
4216 		}
4217 		j++;
4218 	}
4219 
4220 swap_buffers:
4221 	/* Swap primary and spare array */
4222 	thresholds->spare = thresholds->primary;
4223 
4224 	rcu_assign_pointer(thresholds->primary, new);
4225 
4226 	/* To be sure that nobody uses thresholds */
4227 	synchronize_rcu();
4228 
4229 	/* If all events are unregistered, free the spare array */
4230 	if (!new) {
4231 		kfree(thresholds->spare);
4232 		thresholds->spare = NULL;
4233 	}
4234 unlock:
4235 	mutex_unlock(&memcg->thresholds_lock);
4236 }
4237 
4238 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4239 	struct eventfd_ctx *eventfd)
4240 {
4241 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4242 }
4243 
4244 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4245 	struct eventfd_ctx *eventfd)
4246 {
4247 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4248 }
4249 
4250 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4251 	struct eventfd_ctx *eventfd, const char *args)
4252 {
4253 	struct mem_cgroup_eventfd_list *event;
4254 
4255 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4256 	if (!event)
4257 		return -ENOMEM;
4258 
4259 	spin_lock(&memcg_oom_lock);
4260 
4261 	event->eventfd = eventfd;
4262 	list_add(&event->list, &memcg->oom_notify);
4263 
4264 	/* already in OOM ? */
4265 	if (memcg->under_oom)
4266 		eventfd_signal(eventfd, 1);
4267 	spin_unlock(&memcg_oom_lock);
4268 
4269 	return 0;
4270 }
4271 
4272 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4273 	struct eventfd_ctx *eventfd)
4274 {
4275 	struct mem_cgroup_eventfd_list *ev, *tmp;
4276 
4277 	spin_lock(&memcg_oom_lock);
4278 
4279 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4280 		if (ev->eventfd == eventfd) {
4281 			list_del(&ev->list);
4282 			kfree(ev);
4283 		}
4284 	}
4285 
4286 	spin_unlock(&memcg_oom_lock);
4287 }
4288 
4289 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4290 {
4291 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4292 
4293 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4294 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4295 	seq_printf(sf, "oom_kill %lu\n",
4296 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4297 	return 0;
4298 }
4299 
4300 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4301 	struct cftype *cft, u64 val)
4302 {
4303 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4304 
4305 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4306 	if (!css->parent || !((val == 0) || (val == 1)))
4307 		return -EINVAL;
4308 
4309 	memcg->oom_kill_disable = val;
4310 	if (!val)
4311 		memcg_oom_recover(memcg);
4312 
4313 	return 0;
4314 }
4315 
4316 #ifdef CONFIG_CGROUP_WRITEBACK
4317 
4318 #include <trace/events/writeback.h>
4319 
4320 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4321 {
4322 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4323 }
4324 
4325 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4326 {
4327 	wb_domain_exit(&memcg->cgwb_domain);
4328 }
4329 
4330 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4331 {
4332 	wb_domain_size_changed(&memcg->cgwb_domain);
4333 }
4334 
4335 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4336 {
4337 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4338 
4339 	if (!memcg->css.parent)
4340 		return NULL;
4341 
4342 	return &memcg->cgwb_domain;
4343 }
4344 
4345 /*
4346  * idx can be of type enum memcg_stat_item or node_stat_item.
4347  * Keep in sync with memcg_exact_page().
4348  */
4349 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4350 {
4351 	long x = atomic_long_read(&memcg->vmstats[idx]);
4352 	int cpu;
4353 
4354 	for_each_online_cpu(cpu)
4355 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4356 	if (x < 0)
4357 		x = 0;
4358 	return x;
4359 }
4360 
4361 /**
4362  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4363  * @wb: bdi_writeback in question
4364  * @pfilepages: out parameter for number of file pages
4365  * @pheadroom: out parameter for number of allocatable pages according to memcg
4366  * @pdirty: out parameter for number of dirty pages
4367  * @pwriteback: out parameter for number of pages under writeback
4368  *
4369  * Determine the numbers of file, headroom, dirty, and writeback pages in
4370  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4371  * is a bit more involved.
4372  *
4373  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4374  * headroom is calculated as the lowest headroom of itself and the
4375  * ancestors.  Note that this doesn't consider the actual amount of
4376  * available memory in the system.  The caller should further cap
4377  * *@pheadroom accordingly.
4378  */
4379 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4380 			 unsigned long *pheadroom, unsigned long *pdirty,
4381 			 unsigned long *pwriteback)
4382 {
4383 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4384 	struct mem_cgroup *parent;
4385 
4386 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4387 
4388 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4389 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4390 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4391 	*pheadroom = PAGE_COUNTER_MAX;
4392 
4393 	while ((parent = parent_mem_cgroup(memcg))) {
4394 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4395 					    READ_ONCE(memcg->memory.high));
4396 		unsigned long used = page_counter_read(&memcg->memory);
4397 
4398 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4399 		memcg = parent;
4400 	}
4401 }
4402 
4403 /*
4404  * Foreign dirty flushing
4405  *
4406  * There's an inherent mismatch between memcg and writeback.  The former
4407  * trackes ownership per-page while the latter per-inode.  This was a
4408  * deliberate design decision because honoring per-page ownership in the
4409  * writeback path is complicated, may lead to higher CPU and IO overheads
4410  * and deemed unnecessary given that write-sharing an inode across
4411  * different cgroups isn't a common use-case.
4412  *
4413  * Combined with inode majority-writer ownership switching, this works well
4414  * enough in most cases but there are some pathological cases.  For
4415  * example, let's say there are two cgroups A and B which keep writing to
4416  * different but confined parts of the same inode.  B owns the inode and
4417  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4418  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4419  * triggering background writeback.  A will be slowed down without a way to
4420  * make writeback of the dirty pages happen.
4421  *
4422  * Conditions like the above can lead to a cgroup getting repatedly and
4423  * severely throttled after making some progress after each
4424  * dirty_expire_interval while the underyling IO device is almost
4425  * completely idle.
4426  *
4427  * Solving this problem completely requires matching the ownership tracking
4428  * granularities between memcg and writeback in either direction.  However,
4429  * the more egregious behaviors can be avoided by simply remembering the
4430  * most recent foreign dirtying events and initiating remote flushes on
4431  * them when local writeback isn't enough to keep the memory clean enough.
4432  *
4433  * The following two functions implement such mechanism.  When a foreign
4434  * page - a page whose memcg and writeback ownerships don't match - is
4435  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4436  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4437  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4438  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4439  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4440  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4441  * limited to MEMCG_CGWB_FRN_CNT.
4442  *
4443  * The mechanism only remembers IDs and doesn't hold any object references.
4444  * As being wrong occasionally doesn't matter, updates and accesses to the
4445  * records are lockless and racy.
4446  */
4447 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4448 					     struct bdi_writeback *wb)
4449 {
4450 	struct mem_cgroup *memcg = page->mem_cgroup;
4451 	struct memcg_cgwb_frn *frn;
4452 	u64 now = get_jiffies_64();
4453 	u64 oldest_at = now;
4454 	int oldest = -1;
4455 	int i;
4456 
4457 	trace_track_foreign_dirty(page, wb);
4458 
4459 	/*
4460 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4461 	 * using it.  If not replace the oldest one which isn't being
4462 	 * written out.
4463 	 */
4464 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4465 		frn = &memcg->cgwb_frn[i];
4466 		if (frn->bdi_id == wb->bdi->id &&
4467 		    frn->memcg_id == wb->memcg_css->id)
4468 			break;
4469 		if (time_before64(frn->at, oldest_at) &&
4470 		    atomic_read(&frn->done.cnt) == 1) {
4471 			oldest = i;
4472 			oldest_at = frn->at;
4473 		}
4474 	}
4475 
4476 	if (i < MEMCG_CGWB_FRN_CNT) {
4477 		/*
4478 		 * Re-using an existing one.  Update timestamp lazily to
4479 		 * avoid making the cacheline hot.  We want them to be
4480 		 * reasonably up-to-date and significantly shorter than
4481 		 * dirty_expire_interval as that's what expires the record.
4482 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4483 		 */
4484 		unsigned long update_intv =
4485 			min_t(unsigned long, HZ,
4486 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4487 
4488 		if (time_before64(frn->at, now - update_intv))
4489 			frn->at = now;
4490 	} else if (oldest >= 0) {
4491 		/* replace the oldest free one */
4492 		frn = &memcg->cgwb_frn[oldest];
4493 		frn->bdi_id = wb->bdi->id;
4494 		frn->memcg_id = wb->memcg_css->id;
4495 		frn->at = now;
4496 	}
4497 }
4498 
4499 /* issue foreign writeback flushes for recorded foreign dirtying events */
4500 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4501 {
4502 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4503 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4504 	u64 now = jiffies_64;
4505 	int i;
4506 
4507 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4508 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4509 
4510 		/*
4511 		 * If the record is older than dirty_expire_interval,
4512 		 * writeback on it has already started.  No need to kick it
4513 		 * off again.  Also, don't start a new one if there's
4514 		 * already one in flight.
4515 		 */
4516 		if (time_after64(frn->at, now - intv) &&
4517 		    atomic_read(&frn->done.cnt) == 1) {
4518 			frn->at = 0;
4519 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4520 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4521 					       WB_REASON_FOREIGN_FLUSH,
4522 					       &frn->done);
4523 		}
4524 	}
4525 }
4526 
4527 #else	/* CONFIG_CGROUP_WRITEBACK */
4528 
4529 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4530 {
4531 	return 0;
4532 }
4533 
4534 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4535 {
4536 }
4537 
4538 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4539 {
4540 }
4541 
4542 #endif	/* CONFIG_CGROUP_WRITEBACK */
4543 
4544 /*
4545  * DO NOT USE IN NEW FILES.
4546  *
4547  * "cgroup.event_control" implementation.
4548  *
4549  * This is way over-engineered.  It tries to support fully configurable
4550  * events for each user.  Such level of flexibility is completely
4551  * unnecessary especially in the light of the planned unified hierarchy.
4552  *
4553  * Please deprecate this and replace with something simpler if at all
4554  * possible.
4555  */
4556 
4557 /*
4558  * Unregister event and free resources.
4559  *
4560  * Gets called from workqueue.
4561  */
4562 static void memcg_event_remove(struct work_struct *work)
4563 {
4564 	struct mem_cgroup_event *event =
4565 		container_of(work, struct mem_cgroup_event, remove);
4566 	struct mem_cgroup *memcg = event->memcg;
4567 
4568 	remove_wait_queue(event->wqh, &event->wait);
4569 
4570 	event->unregister_event(memcg, event->eventfd);
4571 
4572 	/* Notify userspace the event is going away. */
4573 	eventfd_signal(event->eventfd, 1);
4574 
4575 	eventfd_ctx_put(event->eventfd);
4576 	kfree(event);
4577 	css_put(&memcg->css);
4578 }
4579 
4580 /*
4581  * Gets called on EPOLLHUP on eventfd when user closes it.
4582  *
4583  * Called with wqh->lock held and interrupts disabled.
4584  */
4585 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4586 			    int sync, void *key)
4587 {
4588 	struct mem_cgroup_event *event =
4589 		container_of(wait, struct mem_cgroup_event, wait);
4590 	struct mem_cgroup *memcg = event->memcg;
4591 	__poll_t flags = key_to_poll(key);
4592 
4593 	if (flags & EPOLLHUP) {
4594 		/*
4595 		 * If the event has been detached at cgroup removal, we
4596 		 * can simply return knowing the other side will cleanup
4597 		 * for us.
4598 		 *
4599 		 * We can't race against event freeing since the other
4600 		 * side will require wqh->lock via remove_wait_queue(),
4601 		 * which we hold.
4602 		 */
4603 		spin_lock(&memcg->event_list_lock);
4604 		if (!list_empty(&event->list)) {
4605 			list_del_init(&event->list);
4606 			/*
4607 			 * We are in atomic context, but cgroup_event_remove()
4608 			 * may sleep, so we have to call it in workqueue.
4609 			 */
4610 			schedule_work(&event->remove);
4611 		}
4612 		spin_unlock(&memcg->event_list_lock);
4613 	}
4614 
4615 	return 0;
4616 }
4617 
4618 static void memcg_event_ptable_queue_proc(struct file *file,
4619 		wait_queue_head_t *wqh, poll_table *pt)
4620 {
4621 	struct mem_cgroup_event *event =
4622 		container_of(pt, struct mem_cgroup_event, pt);
4623 
4624 	event->wqh = wqh;
4625 	add_wait_queue(wqh, &event->wait);
4626 }
4627 
4628 /*
4629  * DO NOT USE IN NEW FILES.
4630  *
4631  * Parse input and register new cgroup event handler.
4632  *
4633  * Input must be in format '<event_fd> <control_fd> <args>'.
4634  * Interpretation of args is defined by control file implementation.
4635  */
4636 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4637 					 char *buf, size_t nbytes, loff_t off)
4638 {
4639 	struct cgroup_subsys_state *css = of_css(of);
4640 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4641 	struct mem_cgroup_event *event;
4642 	struct cgroup_subsys_state *cfile_css;
4643 	unsigned int efd, cfd;
4644 	struct fd efile;
4645 	struct fd cfile;
4646 	const char *name;
4647 	char *endp;
4648 	int ret;
4649 
4650 	buf = strstrip(buf);
4651 
4652 	efd = simple_strtoul(buf, &endp, 10);
4653 	if (*endp != ' ')
4654 		return -EINVAL;
4655 	buf = endp + 1;
4656 
4657 	cfd = simple_strtoul(buf, &endp, 10);
4658 	if ((*endp != ' ') && (*endp != '\0'))
4659 		return -EINVAL;
4660 	buf = endp + 1;
4661 
4662 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4663 	if (!event)
4664 		return -ENOMEM;
4665 
4666 	event->memcg = memcg;
4667 	INIT_LIST_HEAD(&event->list);
4668 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4669 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4670 	INIT_WORK(&event->remove, memcg_event_remove);
4671 
4672 	efile = fdget(efd);
4673 	if (!efile.file) {
4674 		ret = -EBADF;
4675 		goto out_kfree;
4676 	}
4677 
4678 	event->eventfd = eventfd_ctx_fileget(efile.file);
4679 	if (IS_ERR(event->eventfd)) {
4680 		ret = PTR_ERR(event->eventfd);
4681 		goto out_put_efile;
4682 	}
4683 
4684 	cfile = fdget(cfd);
4685 	if (!cfile.file) {
4686 		ret = -EBADF;
4687 		goto out_put_eventfd;
4688 	}
4689 
4690 	/* the process need read permission on control file */
4691 	/* AV: shouldn't we check that it's been opened for read instead? */
4692 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4693 	if (ret < 0)
4694 		goto out_put_cfile;
4695 
4696 	/*
4697 	 * Determine the event callbacks and set them in @event.  This used
4698 	 * to be done via struct cftype but cgroup core no longer knows
4699 	 * about these events.  The following is crude but the whole thing
4700 	 * is for compatibility anyway.
4701 	 *
4702 	 * DO NOT ADD NEW FILES.
4703 	 */
4704 	name = cfile.file->f_path.dentry->d_name.name;
4705 
4706 	if (!strcmp(name, "memory.usage_in_bytes")) {
4707 		event->register_event = mem_cgroup_usage_register_event;
4708 		event->unregister_event = mem_cgroup_usage_unregister_event;
4709 	} else if (!strcmp(name, "memory.oom_control")) {
4710 		event->register_event = mem_cgroup_oom_register_event;
4711 		event->unregister_event = mem_cgroup_oom_unregister_event;
4712 	} else if (!strcmp(name, "memory.pressure_level")) {
4713 		event->register_event = vmpressure_register_event;
4714 		event->unregister_event = vmpressure_unregister_event;
4715 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4716 		event->register_event = memsw_cgroup_usage_register_event;
4717 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4718 	} else {
4719 		ret = -EINVAL;
4720 		goto out_put_cfile;
4721 	}
4722 
4723 	/*
4724 	 * Verify @cfile should belong to @css.  Also, remaining events are
4725 	 * automatically removed on cgroup destruction but the removal is
4726 	 * asynchronous, so take an extra ref on @css.
4727 	 */
4728 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4729 					       &memory_cgrp_subsys);
4730 	ret = -EINVAL;
4731 	if (IS_ERR(cfile_css))
4732 		goto out_put_cfile;
4733 	if (cfile_css != css) {
4734 		css_put(cfile_css);
4735 		goto out_put_cfile;
4736 	}
4737 
4738 	ret = event->register_event(memcg, event->eventfd, buf);
4739 	if (ret)
4740 		goto out_put_css;
4741 
4742 	vfs_poll(efile.file, &event->pt);
4743 
4744 	spin_lock(&memcg->event_list_lock);
4745 	list_add(&event->list, &memcg->event_list);
4746 	spin_unlock(&memcg->event_list_lock);
4747 
4748 	fdput(cfile);
4749 	fdput(efile);
4750 
4751 	return nbytes;
4752 
4753 out_put_css:
4754 	css_put(css);
4755 out_put_cfile:
4756 	fdput(cfile);
4757 out_put_eventfd:
4758 	eventfd_ctx_put(event->eventfd);
4759 out_put_efile:
4760 	fdput(efile);
4761 out_kfree:
4762 	kfree(event);
4763 
4764 	return ret;
4765 }
4766 
4767 static struct cftype mem_cgroup_legacy_files[] = {
4768 	{
4769 		.name = "usage_in_bytes",
4770 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4771 		.read_u64 = mem_cgroup_read_u64,
4772 	},
4773 	{
4774 		.name = "max_usage_in_bytes",
4775 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4776 		.write = mem_cgroup_reset,
4777 		.read_u64 = mem_cgroup_read_u64,
4778 	},
4779 	{
4780 		.name = "limit_in_bytes",
4781 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4782 		.write = mem_cgroup_write,
4783 		.read_u64 = mem_cgroup_read_u64,
4784 	},
4785 	{
4786 		.name = "soft_limit_in_bytes",
4787 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4788 		.write = mem_cgroup_write,
4789 		.read_u64 = mem_cgroup_read_u64,
4790 	},
4791 	{
4792 		.name = "failcnt",
4793 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4794 		.write = mem_cgroup_reset,
4795 		.read_u64 = mem_cgroup_read_u64,
4796 	},
4797 	{
4798 		.name = "stat",
4799 		.seq_show = memcg_stat_show,
4800 	},
4801 	{
4802 		.name = "force_empty",
4803 		.write = mem_cgroup_force_empty_write,
4804 	},
4805 	{
4806 		.name = "use_hierarchy",
4807 		.write_u64 = mem_cgroup_hierarchy_write,
4808 		.read_u64 = mem_cgroup_hierarchy_read,
4809 	},
4810 	{
4811 		.name = "cgroup.event_control",		/* XXX: for compat */
4812 		.write = memcg_write_event_control,
4813 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4814 	},
4815 	{
4816 		.name = "swappiness",
4817 		.read_u64 = mem_cgroup_swappiness_read,
4818 		.write_u64 = mem_cgroup_swappiness_write,
4819 	},
4820 	{
4821 		.name = "move_charge_at_immigrate",
4822 		.read_u64 = mem_cgroup_move_charge_read,
4823 		.write_u64 = mem_cgroup_move_charge_write,
4824 	},
4825 	{
4826 		.name = "oom_control",
4827 		.seq_show = mem_cgroup_oom_control_read,
4828 		.write_u64 = mem_cgroup_oom_control_write,
4829 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4830 	},
4831 	{
4832 		.name = "pressure_level",
4833 	},
4834 #ifdef CONFIG_NUMA
4835 	{
4836 		.name = "numa_stat",
4837 		.seq_show = memcg_numa_stat_show,
4838 	},
4839 #endif
4840 	{
4841 		.name = "kmem.limit_in_bytes",
4842 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4843 		.write = mem_cgroup_write,
4844 		.read_u64 = mem_cgroup_read_u64,
4845 	},
4846 	{
4847 		.name = "kmem.usage_in_bytes",
4848 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4849 		.read_u64 = mem_cgroup_read_u64,
4850 	},
4851 	{
4852 		.name = "kmem.failcnt",
4853 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4854 		.write = mem_cgroup_reset,
4855 		.read_u64 = mem_cgroup_read_u64,
4856 	},
4857 	{
4858 		.name = "kmem.max_usage_in_bytes",
4859 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4860 		.write = mem_cgroup_reset,
4861 		.read_u64 = mem_cgroup_read_u64,
4862 	},
4863 #if defined(CONFIG_MEMCG_KMEM) && \
4864 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4865 	{
4866 		.name = "kmem.slabinfo",
4867 		.seq_start = memcg_slab_start,
4868 		.seq_next = memcg_slab_next,
4869 		.seq_stop = memcg_slab_stop,
4870 		.seq_show = memcg_slab_show,
4871 	},
4872 #endif
4873 	{
4874 		.name = "kmem.tcp.limit_in_bytes",
4875 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4876 		.write = mem_cgroup_write,
4877 		.read_u64 = mem_cgroup_read_u64,
4878 	},
4879 	{
4880 		.name = "kmem.tcp.usage_in_bytes",
4881 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4882 		.read_u64 = mem_cgroup_read_u64,
4883 	},
4884 	{
4885 		.name = "kmem.tcp.failcnt",
4886 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4887 		.write = mem_cgroup_reset,
4888 		.read_u64 = mem_cgroup_read_u64,
4889 	},
4890 	{
4891 		.name = "kmem.tcp.max_usage_in_bytes",
4892 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4893 		.write = mem_cgroup_reset,
4894 		.read_u64 = mem_cgroup_read_u64,
4895 	},
4896 	{ },	/* terminate */
4897 };
4898 
4899 /*
4900  * Private memory cgroup IDR
4901  *
4902  * Swap-out records and page cache shadow entries need to store memcg
4903  * references in constrained space, so we maintain an ID space that is
4904  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4905  * memory-controlled cgroups to 64k.
4906  *
4907  * However, there usually are many references to the oflline CSS after
4908  * the cgroup has been destroyed, such as page cache or reclaimable
4909  * slab objects, that don't need to hang on to the ID. We want to keep
4910  * those dead CSS from occupying IDs, or we might quickly exhaust the
4911  * relatively small ID space and prevent the creation of new cgroups
4912  * even when there are much fewer than 64k cgroups - possibly none.
4913  *
4914  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4915  * be freed and recycled when it's no longer needed, which is usually
4916  * when the CSS is offlined.
4917  *
4918  * The only exception to that are records of swapped out tmpfs/shmem
4919  * pages that need to be attributed to live ancestors on swapin. But
4920  * those references are manageable from userspace.
4921  */
4922 
4923 static DEFINE_IDR(mem_cgroup_idr);
4924 
4925 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4926 {
4927 	if (memcg->id.id > 0) {
4928 		idr_remove(&mem_cgroup_idr, memcg->id.id);
4929 		memcg->id.id = 0;
4930 	}
4931 }
4932 
4933 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
4934 						  unsigned int n)
4935 {
4936 	refcount_add(n, &memcg->id.ref);
4937 }
4938 
4939 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4940 {
4941 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4942 		mem_cgroup_id_remove(memcg);
4943 
4944 		/* Memcg ID pins CSS */
4945 		css_put(&memcg->css);
4946 	}
4947 }
4948 
4949 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4950 {
4951 	mem_cgroup_id_put_many(memcg, 1);
4952 }
4953 
4954 /**
4955  * mem_cgroup_from_id - look up a memcg from a memcg id
4956  * @id: the memcg id to look up
4957  *
4958  * Caller must hold rcu_read_lock().
4959  */
4960 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4961 {
4962 	WARN_ON_ONCE(!rcu_read_lock_held());
4963 	return idr_find(&mem_cgroup_idr, id);
4964 }
4965 
4966 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4967 {
4968 	struct mem_cgroup_per_node *pn;
4969 	int tmp = node;
4970 	/*
4971 	 * This routine is called against possible nodes.
4972 	 * But it's BUG to call kmalloc() against offline node.
4973 	 *
4974 	 * TODO: this routine can waste much memory for nodes which will
4975 	 *       never be onlined. It's better to use memory hotplug callback
4976 	 *       function.
4977 	 */
4978 	if (!node_state(node, N_NORMAL_MEMORY))
4979 		tmp = -1;
4980 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4981 	if (!pn)
4982 		return 1;
4983 
4984 	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
4985 	if (!pn->lruvec_stat_local) {
4986 		kfree(pn);
4987 		return 1;
4988 	}
4989 
4990 	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4991 	if (!pn->lruvec_stat_cpu) {
4992 		free_percpu(pn->lruvec_stat_local);
4993 		kfree(pn);
4994 		return 1;
4995 	}
4996 
4997 	lruvec_init(&pn->lruvec);
4998 	pn->usage_in_excess = 0;
4999 	pn->on_tree = false;
5000 	pn->memcg = memcg;
5001 
5002 	memcg->nodeinfo[node] = pn;
5003 	return 0;
5004 }
5005 
5006 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5007 {
5008 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5009 
5010 	if (!pn)
5011 		return;
5012 
5013 	free_percpu(pn->lruvec_stat_cpu);
5014 	free_percpu(pn->lruvec_stat_local);
5015 	kfree(pn);
5016 }
5017 
5018 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5019 {
5020 	int node;
5021 
5022 	for_each_node(node)
5023 		free_mem_cgroup_per_node_info(memcg, node);
5024 	free_percpu(memcg->vmstats_percpu);
5025 	free_percpu(memcg->vmstats_local);
5026 	kfree(memcg);
5027 }
5028 
5029 static void mem_cgroup_free(struct mem_cgroup *memcg)
5030 {
5031 	memcg_wb_domain_exit(memcg);
5032 	/*
5033 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
5034 	 * on parent's and all ancestor levels.
5035 	 */
5036 	memcg_flush_percpu_vmstats(memcg);
5037 	memcg_flush_percpu_vmevents(memcg);
5038 	__mem_cgroup_free(memcg);
5039 }
5040 
5041 static struct mem_cgroup *mem_cgroup_alloc(void)
5042 {
5043 	struct mem_cgroup *memcg;
5044 	unsigned int size;
5045 	int node;
5046 	int __maybe_unused i;
5047 	long error = -ENOMEM;
5048 
5049 	size = sizeof(struct mem_cgroup);
5050 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5051 
5052 	memcg = kzalloc(size, GFP_KERNEL);
5053 	if (!memcg)
5054 		return ERR_PTR(error);
5055 
5056 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5057 				 1, MEM_CGROUP_ID_MAX,
5058 				 GFP_KERNEL);
5059 	if (memcg->id.id < 0) {
5060 		error = memcg->id.id;
5061 		goto fail;
5062 	}
5063 
5064 	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
5065 	if (!memcg->vmstats_local)
5066 		goto fail;
5067 
5068 	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
5069 	if (!memcg->vmstats_percpu)
5070 		goto fail;
5071 
5072 	for_each_node(node)
5073 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5074 			goto fail;
5075 
5076 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5077 		goto fail;
5078 
5079 	INIT_WORK(&memcg->high_work, high_work_func);
5080 	INIT_LIST_HEAD(&memcg->oom_notify);
5081 	mutex_init(&memcg->thresholds_lock);
5082 	spin_lock_init(&memcg->move_lock);
5083 	vmpressure_init(&memcg->vmpressure);
5084 	INIT_LIST_HEAD(&memcg->event_list);
5085 	spin_lock_init(&memcg->event_list_lock);
5086 	memcg->socket_pressure = jiffies;
5087 #ifdef CONFIG_MEMCG_KMEM
5088 	memcg->kmemcg_id = -1;
5089 #endif
5090 #ifdef CONFIG_CGROUP_WRITEBACK
5091 	INIT_LIST_HEAD(&memcg->cgwb_list);
5092 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5093 		memcg->cgwb_frn[i].done =
5094 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5095 #endif
5096 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5097 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5098 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5099 	memcg->deferred_split_queue.split_queue_len = 0;
5100 #endif
5101 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5102 	return memcg;
5103 fail:
5104 	mem_cgroup_id_remove(memcg);
5105 	__mem_cgroup_free(memcg);
5106 	return ERR_PTR(error);
5107 }
5108 
5109 static struct cgroup_subsys_state * __ref
5110 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5111 {
5112 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5113 	struct mem_cgroup *memcg;
5114 	long error = -ENOMEM;
5115 
5116 	memcg = mem_cgroup_alloc();
5117 	if (IS_ERR(memcg))
5118 		return ERR_CAST(memcg);
5119 
5120 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5121 	memcg->soft_limit = PAGE_COUNTER_MAX;
5122 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5123 	if (parent) {
5124 		memcg->swappiness = mem_cgroup_swappiness(parent);
5125 		memcg->oom_kill_disable = parent->oom_kill_disable;
5126 	}
5127 	if (parent && parent->use_hierarchy) {
5128 		memcg->use_hierarchy = true;
5129 		page_counter_init(&memcg->memory, &parent->memory);
5130 		page_counter_init(&memcg->swap, &parent->swap);
5131 		page_counter_init(&memcg->memsw, &parent->memsw);
5132 		page_counter_init(&memcg->kmem, &parent->kmem);
5133 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5134 	} else {
5135 		page_counter_init(&memcg->memory, NULL);
5136 		page_counter_init(&memcg->swap, NULL);
5137 		page_counter_init(&memcg->memsw, NULL);
5138 		page_counter_init(&memcg->kmem, NULL);
5139 		page_counter_init(&memcg->tcpmem, NULL);
5140 		/*
5141 		 * Deeper hierachy with use_hierarchy == false doesn't make
5142 		 * much sense so let cgroup subsystem know about this
5143 		 * unfortunate state in our controller.
5144 		 */
5145 		if (parent != root_mem_cgroup)
5146 			memory_cgrp_subsys.broken_hierarchy = true;
5147 	}
5148 
5149 	/* The following stuff does not apply to the root */
5150 	if (!parent) {
5151 #ifdef CONFIG_MEMCG_KMEM
5152 		INIT_LIST_HEAD(&memcg->kmem_caches);
5153 #endif
5154 		root_mem_cgroup = memcg;
5155 		return &memcg->css;
5156 	}
5157 
5158 	error = memcg_online_kmem(memcg);
5159 	if (error)
5160 		goto fail;
5161 
5162 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5163 		static_branch_inc(&memcg_sockets_enabled_key);
5164 
5165 	return &memcg->css;
5166 fail:
5167 	mem_cgroup_id_remove(memcg);
5168 	mem_cgroup_free(memcg);
5169 	return ERR_PTR(error);
5170 }
5171 
5172 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5173 {
5174 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5175 
5176 	/*
5177 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5178 	 * by the time the maps are allocated. So, we allocate maps
5179 	 * here, when for_each_mem_cgroup() can't skip it.
5180 	 */
5181 	if (memcg_alloc_shrinker_maps(memcg)) {
5182 		mem_cgroup_id_remove(memcg);
5183 		return -ENOMEM;
5184 	}
5185 
5186 	/* Online state pins memcg ID, memcg ID pins CSS */
5187 	refcount_set(&memcg->id.ref, 1);
5188 	css_get(css);
5189 	return 0;
5190 }
5191 
5192 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5193 {
5194 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5195 	struct mem_cgroup_event *event, *tmp;
5196 
5197 	/*
5198 	 * Unregister events and notify userspace.
5199 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5200 	 * directory to avoid race between userspace and kernelspace.
5201 	 */
5202 	spin_lock(&memcg->event_list_lock);
5203 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5204 		list_del_init(&event->list);
5205 		schedule_work(&event->remove);
5206 	}
5207 	spin_unlock(&memcg->event_list_lock);
5208 
5209 	page_counter_set_min(&memcg->memory, 0);
5210 	page_counter_set_low(&memcg->memory, 0);
5211 
5212 	memcg_offline_kmem(memcg);
5213 	wb_memcg_offline(memcg);
5214 
5215 	drain_all_stock(memcg);
5216 
5217 	mem_cgroup_id_put(memcg);
5218 }
5219 
5220 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5221 {
5222 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5223 
5224 	invalidate_reclaim_iterators(memcg);
5225 }
5226 
5227 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5228 {
5229 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5230 	int __maybe_unused i;
5231 
5232 #ifdef CONFIG_CGROUP_WRITEBACK
5233 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5234 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5235 #endif
5236 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5237 		static_branch_dec(&memcg_sockets_enabled_key);
5238 
5239 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5240 		static_branch_dec(&memcg_sockets_enabled_key);
5241 
5242 	vmpressure_cleanup(&memcg->vmpressure);
5243 	cancel_work_sync(&memcg->high_work);
5244 	mem_cgroup_remove_from_trees(memcg);
5245 	memcg_free_shrinker_maps(memcg);
5246 	memcg_free_kmem(memcg);
5247 	mem_cgroup_free(memcg);
5248 }
5249 
5250 /**
5251  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5252  * @css: the target css
5253  *
5254  * Reset the states of the mem_cgroup associated with @css.  This is
5255  * invoked when the userland requests disabling on the default hierarchy
5256  * but the memcg is pinned through dependency.  The memcg should stop
5257  * applying policies and should revert to the vanilla state as it may be
5258  * made visible again.
5259  *
5260  * The current implementation only resets the essential configurations.
5261  * This needs to be expanded to cover all the visible parts.
5262  */
5263 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5264 {
5265 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5266 
5267 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5268 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5269 	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5270 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5271 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5272 	page_counter_set_min(&memcg->memory, 0);
5273 	page_counter_set_low(&memcg->memory, 0);
5274 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5275 	memcg->soft_limit = PAGE_COUNTER_MAX;
5276 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5277 	memcg_wb_domain_size_changed(memcg);
5278 }
5279 
5280 #ifdef CONFIG_MMU
5281 /* Handlers for move charge at task migration. */
5282 static int mem_cgroup_do_precharge(unsigned long count)
5283 {
5284 	int ret;
5285 
5286 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5287 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5288 	if (!ret) {
5289 		mc.precharge += count;
5290 		return ret;
5291 	}
5292 
5293 	/* Try charges one by one with reclaim, but do not retry */
5294 	while (count--) {
5295 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5296 		if (ret)
5297 			return ret;
5298 		mc.precharge++;
5299 		cond_resched();
5300 	}
5301 	return 0;
5302 }
5303 
5304 union mc_target {
5305 	struct page	*page;
5306 	swp_entry_t	ent;
5307 };
5308 
5309 enum mc_target_type {
5310 	MC_TARGET_NONE = 0,
5311 	MC_TARGET_PAGE,
5312 	MC_TARGET_SWAP,
5313 	MC_TARGET_DEVICE,
5314 };
5315 
5316 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5317 						unsigned long addr, pte_t ptent)
5318 {
5319 	struct page *page = vm_normal_page(vma, addr, ptent);
5320 
5321 	if (!page || !page_mapped(page))
5322 		return NULL;
5323 	if (PageAnon(page)) {
5324 		if (!(mc.flags & MOVE_ANON))
5325 			return NULL;
5326 	} else {
5327 		if (!(mc.flags & MOVE_FILE))
5328 			return NULL;
5329 	}
5330 	if (!get_page_unless_zero(page))
5331 		return NULL;
5332 
5333 	return page;
5334 }
5335 
5336 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5337 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5338 			pte_t ptent, swp_entry_t *entry)
5339 {
5340 	struct page *page = NULL;
5341 	swp_entry_t ent = pte_to_swp_entry(ptent);
5342 
5343 	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5344 		return NULL;
5345 
5346 	/*
5347 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5348 	 * a device and because they are not accessible by CPU they are store
5349 	 * as special swap entry in the CPU page table.
5350 	 */
5351 	if (is_device_private_entry(ent)) {
5352 		page = device_private_entry_to_page(ent);
5353 		/*
5354 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5355 		 * a refcount of 1 when free (unlike normal page)
5356 		 */
5357 		if (!page_ref_add_unless(page, 1, 1))
5358 			return NULL;
5359 		return page;
5360 	}
5361 
5362 	/*
5363 	 * Because lookup_swap_cache() updates some statistics counter,
5364 	 * we call find_get_page() with swapper_space directly.
5365 	 */
5366 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5367 	if (do_memsw_account())
5368 		entry->val = ent.val;
5369 
5370 	return page;
5371 }
5372 #else
5373 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5374 			pte_t ptent, swp_entry_t *entry)
5375 {
5376 	return NULL;
5377 }
5378 #endif
5379 
5380 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5381 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5382 {
5383 	struct page *page = NULL;
5384 	struct address_space *mapping;
5385 	pgoff_t pgoff;
5386 
5387 	if (!vma->vm_file) /* anonymous vma */
5388 		return NULL;
5389 	if (!(mc.flags & MOVE_FILE))
5390 		return NULL;
5391 
5392 	mapping = vma->vm_file->f_mapping;
5393 	pgoff = linear_page_index(vma, addr);
5394 
5395 	/* page is moved even if it's not RSS of this task(page-faulted). */
5396 #ifdef CONFIG_SWAP
5397 	/* shmem/tmpfs may report page out on swap: account for that too. */
5398 	if (shmem_mapping(mapping)) {
5399 		page = find_get_entry(mapping, pgoff);
5400 		if (xa_is_value(page)) {
5401 			swp_entry_t swp = radix_to_swp_entry(page);
5402 			if (do_memsw_account())
5403 				*entry = swp;
5404 			page = find_get_page(swap_address_space(swp),
5405 					     swp_offset(swp));
5406 		}
5407 	} else
5408 		page = find_get_page(mapping, pgoff);
5409 #else
5410 	page = find_get_page(mapping, pgoff);
5411 #endif
5412 	return page;
5413 }
5414 
5415 /**
5416  * mem_cgroup_move_account - move account of the page
5417  * @page: the page
5418  * @compound: charge the page as compound or small page
5419  * @from: mem_cgroup which the page is moved from.
5420  * @to:	mem_cgroup which the page is moved to. @from != @to.
5421  *
5422  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5423  *
5424  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5425  * from old cgroup.
5426  */
5427 static int mem_cgroup_move_account(struct page *page,
5428 				   bool compound,
5429 				   struct mem_cgroup *from,
5430 				   struct mem_cgroup *to)
5431 {
5432 	struct lruvec *from_vec, *to_vec;
5433 	struct pglist_data *pgdat;
5434 	unsigned long flags;
5435 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5436 	int ret;
5437 	bool anon;
5438 
5439 	VM_BUG_ON(from == to);
5440 	VM_BUG_ON_PAGE(PageLRU(page), page);
5441 	VM_BUG_ON(compound && !PageTransHuge(page));
5442 
5443 	/*
5444 	 * Prevent mem_cgroup_migrate() from looking at
5445 	 * page->mem_cgroup of its source page while we change it.
5446 	 */
5447 	ret = -EBUSY;
5448 	if (!trylock_page(page))
5449 		goto out;
5450 
5451 	ret = -EINVAL;
5452 	if (page->mem_cgroup != from)
5453 		goto out_unlock;
5454 
5455 	anon = PageAnon(page);
5456 
5457 	pgdat = page_pgdat(page);
5458 	from_vec = mem_cgroup_lruvec(from, pgdat);
5459 	to_vec = mem_cgroup_lruvec(to, pgdat);
5460 
5461 	spin_lock_irqsave(&from->move_lock, flags);
5462 
5463 	if (!anon && page_mapped(page)) {
5464 		__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5465 		__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5466 	}
5467 
5468 	/*
5469 	 * move_lock grabbed above and caller set from->moving_account, so
5470 	 * mod_memcg_page_state will serialize updates to PageDirty.
5471 	 * So mapping should be stable for dirty pages.
5472 	 */
5473 	if (!anon && PageDirty(page)) {
5474 		struct address_space *mapping = page_mapping(page);
5475 
5476 		if (mapping_cap_account_dirty(mapping)) {
5477 			__mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
5478 			__mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
5479 		}
5480 	}
5481 
5482 	if (PageWriteback(page)) {
5483 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5484 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5485 	}
5486 
5487 	/*
5488 	 * It is safe to change page->mem_cgroup here because the page
5489 	 * is referenced, charged, and isolated - we can't race with
5490 	 * uncharging, charging, migration, or LRU putback.
5491 	 */
5492 
5493 	/* caller should have done css_get */
5494 	page->mem_cgroup = to;
5495 
5496 	spin_unlock_irqrestore(&from->move_lock, flags);
5497 
5498 	ret = 0;
5499 
5500 	local_irq_disable();
5501 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
5502 	memcg_check_events(to, page);
5503 	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
5504 	memcg_check_events(from, page);
5505 	local_irq_enable();
5506 out_unlock:
5507 	unlock_page(page);
5508 out:
5509 	return ret;
5510 }
5511 
5512 /**
5513  * get_mctgt_type - get target type of moving charge
5514  * @vma: the vma the pte to be checked belongs
5515  * @addr: the address corresponding to the pte to be checked
5516  * @ptent: the pte to be checked
5517  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5518  *
5519  * Returns
5520  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5521  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5522  *     move charge. if @target is not NULL, the page is stored in target->page
5523  *     with extra refcnt got(Callers should handle it).
5524  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5525  *     target for charge migration. if @target is not NULL, the entry is stored
5526  *     in target->ent.
5527  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5528  *     (so ZONE_DEVICE page and thus not on the lru).
5529  *     For now we such page is charge like a regular page would be as for all
5530  *     intent and purposes it is just special memory taking the place of a
5531  *     regular page.
5532  *
5533  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5534  *
5535  * Called with pte lock held.
5536  */
5537 
5538 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5539 		unsigned long addr, pte_t ptent, union mc_target *target)
5540 {
5541 	struct page *page = NULL;
5542 	enum mc_target_type ret = MC_TARGET_NONE;
5543 	swp_entry_t ent = { .val = 0 };
5544 
5545 	if (pte_present(ptent))
5546 		page = mc_handle_present_pte(vma, addr, ptent);
5547 	else if (is_swap_pte(ptent))
5548 		page = mc_handle_swap_pte(vma, ptent, &ent);
5549 	else if (pte_none(ptent))
5550 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5551 
5552 	if (!page && !ent.val)
5553 		return ret;
5554 	if (page) {
5555 		/*
5556 		 * Do only loose check w/o serialization.
5557 		 * mem_cgroup_move_account() checks the page is valid or
5558 		 * not under LRU exclusion.
5559 		 */
5560 		if (page->mem_cgroup == mc.from) {
5561 			ret = MC_TARGET_PAGE;
5562 			if (is_device_private_page(page))
5563 				ret = MC_TARGET_DEVICE;
5564 			if (target)
5565 				target->page = page;
5566 		}
5567 		if (!ret || !target)
5568 			put_page(page);
5569 	}
5570 	/*
5571 	 * There is a swap entry and a page doesn't exist or isn't charged.
5572 	 * But we cannot move a tail-page in a THP.
5573 	 */
5574 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5575 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5576 		ret = MC_TARGET_SWAP;
5577 		if (target)
5578 			target->ent = ent;
5579 	}
5580 	return ret;
5581 }
5582 
5583 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5584 /*
5585  * We don't consider PMD mapped swapping or file mapped pages because THP does
5586  * not support them for now.
5587  * Caller should make sure that pmd_trans_huge(pmd) is true.
5588  */
5589 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5590 		unsigned long addr, pmd_t pmd, union mc_target *target)
5591 {
5592 	struct page *page = NULL;
5593 	enum mc_target_type ret = MC_TARGET_NONE;
5594 
5595 	if (unlikely(is_swap_pmd(pmd))) {
5596 		VM_BUG_ON(thp_migration_supported() &&
5597 				  !is_pmd_migration_entry(pmd));
5598 		return ret;
5599 	}
5600 	page = pmd_page(pmd);
5601 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5602 	if (!(mc.flags & MOVE_ANON))
5603 		return ret;
5604 	if (page->mem_cgroup == mc.from) {
5605 		ret = MC_TARGET_PAGE;
5606 		if (target) {
5607 			get_page(page);
5608 			target->page = page;
5609 		}
5610 	}
5611 	return ret;
5612 }
5613 #else
5614 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5615 		unsigned long addr, pmd_t pmd, union mc_target *target)
5616 {
5617 	return MC_TARGET_NONE;
5618 }
5619 #endif
5620 
5621 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5622 					unsigned long addr, unsigned long end,
5623 					struct mm_walk *walk)
5624 {
5625 	struct vm_area_struct *vma = walk->vma;
5626 	pte_t *pte;
5627 	spinlock_t *ptl;
5628 
5629 	ptl = pmd_trans_huge_lock(pmd, vma);
5630 	if (ptl) {
5631 		/*
5632 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5633 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5634 		 * this might change.
5635 		 */
5636 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5637 			mc.precharge += HPAGE_PMD_NR;
5638 		spin_unlock(ptl);
5639 		return 0;
5640 	}
5641 
5642 	if (pmd_trans_unstable(pmd))
5643 		return 0;
5644 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5645 	for (; addr != end; pte++, addr += PAGE_SIZE)
5646 		if (get_mctgt_type(vma, addr, *pte, NULL))
5647 			mc.precharge++;	/* increment precharge temporarily */
5648 	pte_unmap_unlock(pte - 1, ptl);
5649 	cond_resched();
5650 
5651 	return 0;
5652 }
5653 
5654 static const struct mm_walk_ops precharge_walk_ops = {
5655 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5656 };
5657 
5658 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5659 {
5660 	unsigned long precharge;
5661 
5662 	down_read(&mm->mmap_sem);
5663 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5664 	up_read(&mm->mmap_sem);
5665 
5666 	precharge = mc.precharge;
5667 	mc.precharge = 0;
5668 
5669 	return precharge;
5670 }
5671 
5672 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5673 {
5674 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5675 
5676 	VM_BUG_ON(mc.moving_task);
5677 	mc.moving_task = current;
5678 	return mem_cgroup_do_precharge(precharge);
5679 }
5680 
5681 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5682 static void __mem_cgroup_clear_mc(void)
5683 {
5684 	struct mem_cgroup *from = mc.from;
5685 	struct mem_cgroup *to = mc.to;
5686 
5687 	/* we must uncharge all the leftover precharges from mc.to */
5688 	if (mc.precharge) {
5689 		cancel_charge(mc.to, mc.precharge);
5690 		mc.precharge = 0;
5691 	}
5692 	/*
5693 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5694 	 * we must uncharge here.
5695 	 */
5696 	if (mc.moved_charge) {
5697 		cancel_charge(mc.from, mc.moved_charge);
5698 		mc.moved_charge = 0;
5699 	}
5700 	/* we must fixup refcnts and charges */
5701 	if (mc.moved_swap) {
5702 		/* uncharge swap account from the old cgroup */
5703 		if (!mem_cgroup_is_root(mc.from))
5704 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5705 
5706 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5707 
5708 		/*
5709 		 * we charged both to->memory and to->memsw, so we
5710 		 * should uncharge to->memory.
5711 		 */
5712 		if (!mem_cgroup_is_root(mc.to))
5713 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5714 
5715 		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5716 		css_put_many(&mc.to->css, mc.moved_swap);
5717 
5718 		mc.moved_swap = 0;
5719 	}
5720 	memcg_oom_recover(from);
5721 	memcg_oom_recover(to);
5722 	wake_up_all(&mc.waitq);
5723 }
5724 
5725 static void mem_cgroup_clear_mc(void)
5726 {
5727 	struct mm_struct *mm = mc.mm;
5728 
5729 	/*
5730 	 * we must clear moving_task before waking up waiters at the end of
5731 	 * task migration.
5732 	 */
5733 	mc.moving_task = NULL;
5734 	__mem_cgroup_clear_mc();
5735 	spin_lock(&mc.lock);
5736 	mc.from = NULL;
5737 	mc.to = NULL;
5738 	mc.mm = NULL;
5739 	spin_unlock(&mc.lock);
5740 
5741 	mmput(mm);
5742 }
5743 
5744 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5745 {
5746 	struct cgroup_subsys_state *css;
5747 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5748 	struct mem_cgroup *from;
5749 	struct task_struct *leader, *p;
5750 	struct mm_struct *mm;
5751 	unsigned long move_flags;
5752 	int ret = 0;
5753 
5754 	/* charge immigration isn't supported on the default hierarchy */
5755 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5756 		return 0;
5757 
5758 	/*
5759 	 * Multi-process migrations only happen on the default hierarchy
5760 	 * where charge immigration is not used.  Perform charge
5761 	 * immigration if @tset contains a leader and whine if there are
5762 	 * multiple.
5763 	 */
5764 	p = NULL;
5765 	cgroup_taskset_for_each_leader(leader, css, tset) {
5766 		WARN_ON_ONCE(p);
5767 		p = leader;
5768 		memcg = mem_cgroup_from_css(css);
5769 	}
5770 	if (!p)
5771 		return 0;
5772 
5773 	/*
5774 	 * We are now commited to this value whatever it is. Changes in this
5775 	 * tunable will only affect upcoming migrations, not the current one.
5776 	 * So we need to save it, and keep it going.
5777 	 */
5778 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5779 	if (!move_flags)
5780 		return 0;
5781 
5782 	from = mem_cgroup_from_task(p);
5783 
5784 	VM_BUG_ON(from == memcg);
5785 
5786 	mm = get_task_mm(p);
5787 	if (!mm)
5788 		return 0;
5789 	/* We move charges only when we move a owner of the mm */
5790 	if (mm->owner == p) {
5791 		VM_BUG_ON(mc.from);
5792 		VM_BUG_ON(mc.to);
5793 		VM_BUG_ON(mc.precharge);
5794 		VM_BUG_ON(mc.moved_charge);
5795 		VM_BUG_ON(mc.moved_swap);
5796 
5797 		spin_lock(&mc.lock);
5798 		mc.mm = mm;
5799 		mc.from = from;
5800 		mc.to = memcg;
5801 		mc.flags = move_flags;
5802 		spin_unlock(&mc.lock);
5803 		/* We set mc.moving_task later */
5804 
5805 		ret = mem_cgroup_precharge_mc(mm);
5806 		if (ret)
5807 			mem_cgroup_clear_mc();
5808 	} else {
5809 		mmput(mm);
5810 	}
5811 	return ret;
5812 }
5813 
5814 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5815 {
5816 	if (mc.to)
5817 		mem_cgroup_clear_mc();
5818 }
5819 
5820 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5821 				unsigned long addr, unsigned long end,
5822 				struct mm_walk *walk)
5823 {
5824 	int ret = 0;
5825 	struct vm_area_struct *vma = walk->vma;
5826 	pte_t *pte;
5827 	spinlock_t *ptl;
5828 	enum mc_target_type target_type;
5829 	union mc_target target;
5830 	struct page *page;
5831 
5832 	ptl = pmd_trans_huge_lock(pmd, vma);
5833 	if (ptl) {
5834 		if (mc.precharge < HPAGE_PMD_NR) {
5835 			spin_unlock(ptl);
5836 			return 0;
5837 		}
5838 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5839 		if (target_type == MC_TARGET_PAGE) {
5840 			page = target.page;
5841 			if (!isolate_lru_page(page)) {
5842 				if (!mem_cgroup_move_account(page, true,
5843 							     mc.from, mc.to)) {
5844 					mc.precharge -= HPAGE_PMD_NR;
5845 					mc.moved_charge += HPAGE_PMD_NR;
5846 				}
5847 				putback_lru_page(page);
5848 			}
5849 			put_page(page);
5850 		} else if (target_type == MC_TARGET_DEVICE) {
5851 			page = target.page;
5852 			if (!mem_cgroup_move_account(page, true,
5853 						     mc.from, mc.to)) {
5854 				mc.precharge -= HPAGE_PMD_NR;
5855 				mc.moved_charge += HPAGE_PMD_NR;
5856 			}
5857 			put_page(page);
5858 		}
5859 		spin_unlock(ptl);
5860 		return 0;
5861 	}
5862 
5863 	if (pmd_trans_unstable(pmd))
5864 		return 0;
5865 retry:
5866 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5867 	for (; addr != end; addr += PAGE_SIZE) {
5868 		pte_t ptent = *(pte++);
5869 		bool device = false;
5870 		swp_entry_t ent;
5871 
5872 		if (!mc.precharge)
5873 			break;
5874 
5875 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5876 		case MC_TARGET_DEVICE:
5877 			device = true;
5878 			fallthrough;
5879 		case MC_TARGET_PAGE:
5880 			page = target.page;
5881 			/*
5882 			 * We can have a part of the split pmd here. Moving it
5883 			 * can be done but it would be too convoluted so simply
5884 			 * ignore such a partial THP and keep it in original
5885 			 * memcg. There should be somebody mapping the head.
5886 			 */
5887 			if (PageTransCompound(page))
5888 				goto put;
5889 			if (!device && isolate_lru_page(page))
5890 				goto put;
5891 			if (!mem_cgroup_move_account(page, false,
5892 						mc.from, mc.to)) {
5893 				mc.precharge--;
5894 				/* we uncharge from mc.from later. */
5895 				mc.moved_charge++;
5896 			}
5897 			if (!device)
5898 				putback_lru_page(page);
5899 put:			/* get_mctgt_type() gets the page */
5900 			put_page(page);
5901 			break;
5902 		case MC_TARGET_SWAP:
5903 			ent = target.ent;
5904 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5905 				mc.precharge--;
5906 				/* we fixup refcnts and charges later. */
5907 				mc.moved_swap++;
5908 			}
5909 			break;
5910 		default:
5911 			break;
5912 		}
5913 	}
5914 	pte_unmap_unlock(pte - 1, ptl);
5915 	cond_resched();
5916 
5917 	if (addr != end) {
5918 		/*
5919 		 * We have consumed all precharges we got in can_attach().
5920 		 * We try charge one by one, but don't do any additional
5921 		 * charges to mc.to if we have failed in charge once in attach()
5922 		 * phase.
5923 		 */
5924 		ret = mem_cgroup_do_precharge(1);
5925 		if (!ret)
5926 			goto retry;
5927 	}
5928 
5929 	return ret;
5930 }
5931 
5932 static const struct mm_walk_ops charge_walk_ops = {
5933 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
5934 };
5935 
5936 static void mem_cgroup_move_charge(void)
5937 {
5938 	lru_add_drain_all();
5939 	/*
5940 	 * Signal lock_page_memcg() to take the memcg's move_lock
5941 	 * while we're moving its pages to another memcg. Then wait
5942 	 * for already started RCU-only updates to finish.
5943 	 */
5944 	atomic_inc(&mc.from->moving_account);
5945 	synchronize_rcu();
5946 retry:
5947 	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5948 		/*
5949 		 * Someone who are holding the mmap_sem might be waiting in
5950 		 * waitq. So we cancel all extra charges, wake up all waiters,
5951 		 * and retry. Because we cancel precharges, we might not be able
5952 		 * to move enough charges, but moving charge is a best-effort
5953 		 * feature anyway, so it wouldn't be a big problem.
5954 		 */
5955 		__mem_cgroup_clear_mc();
5956 		cond_resched();
5957 		goto retry;
5958 	}
5959 	/*
5960 	 * When we have consumed all precharges and failed in doing
5961 	 * additional charge, the page walk just aborts.
5962 	 */
5963 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
5964 			NULL);
5965 
5966 	up_read(&mc.mm->mmap_sem);
5967 	atomic_dec(&mc.from->moving_account);
5968 }
5969 
5970 static void mem_cgroup_move_task(void)
5971 {
5972 	if (mc.to) {
5973 		mem_cgroup_move_charge();
5974 		mem_cgroup_clear_mc();
5975 	}
5976 }
5977 #else	/* !CONFIG_MMU */
5978 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5979 {
5980 	return 0;
5981 }
5982 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5983 {
5984 }
5985 static void mem_cgroup_move_task(void)
5986 {
5987 }
5988 #endif
5989 
5990 /*
5991  * Cgroup retains root cgroups across [un]mount cycles making it necessary
5992  * to verify whether we're attached to the default hierarchy on each mount
5993  * attempt.
5994  */
5995 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5996 {
5997 	/*
5998 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5999 	 * guarantees that @root doesn't have any children, so turning it
6000 	 * on for the root memcg is enough.
6001 	 */
6002 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6003 		root_mem_cgroup->use_hierarchy = true;
6004 	else
6005 		root_mem_cgroup->use_hierarchy = false;
6006 }
6007 
6008 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6009 {
6010 	if (value == PAGE_COUNTER_MAX)
6011 		seq_puts(m, "max\n");
6012 	else
6013 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6014 
6015 	return 0;
6016 }
6017 
6018 static u64 memory_current_read(struct cgroup_subsys_state *css,
6019 			       struct cftype *cft)
6020 {
6021 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6022 
6023 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6024 }
6025 
6026 static int memory_min_show(struct seq_file *m, void *v)
6027 {
6028 	return seq_puts_memcg_tunable(m,
6029 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6030 }
6031 
6032 static ssize_t memory_min_write(struct kernfs_open_file *of,
6033 				char *buf, size_t nbytes, loff_t off)
6034 {
6035 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6036 	unsigned long min;
6037 	int err;
6038 
6039 	buf = strstrip(buf);
6040 	err = page_counter_memparse(buf, "max", &min);
6041 	if (err)
6042 		return err;
6043 
6044 	page_counter_set_min(&memcg->memory, min);
6045 
6046 	return nbytes;
6047 }
6048 
6049 static int memory_low_show(struct seq_file *m, void *v)
6050 {
6051 	return seq_puts_memcg_tunable(m,
6052 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6053 }
6054 
6055 static ssize_t memory_low_write(struct kernfs_open_file *of,
6056 				char *buf, size_t nbytes, loff_t off)
6057 {
6058 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6059 	unsigned long low;
6060 	int err;
6061 
6062 	buf = strstrip(buf);
6063 	err = page_counter_memparse(buf, "max", &low);
6064 	if (err)
6065 		return err;
6066 
6067 	page_counter_set_low(&memcg->memory, low);
6068 
6069 	return nbytes;
6070 }
6071 
6072 static int memory_high_show(struct seq_file *m, void *v)
6073 {
6074 	return seq_puts_memcg_tunable(m,
6075 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6076 }
6077 
6078 static ssize_t memory_high_write(struct kernfs_open_file *of,
6079 				 char *buf, size_t nbytes, loff_t off)
6080 {
6081 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6082 	unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
6083 	bool drained = false;
6084 	unsigned long high;
6085 	int err;
6086 
6087 	buf = strstrip(buf);
6088 	err = page_counter_memparse(buf, "max", &high);
6089 	if (err)
6090 		return err;
6091 
6092 	page_counter_set_high(&memcg->memory, high);
6093 
6094 	for (;;) {
6095 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6096 		unsigned long reclaimed;
6097 
6098 		if (nr_pages <= high)
6099 			break;
6100 
6101 		if (signal_pending(current))
6102 			break;
6103 
6104 		if (!drained) {
6105 			drain_all_stock(memcg);
6106 			drained = true;
6107 			continue;
6108 		}
6109 
6110 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6111 							 GFP_KERNEL, true);
6112 
6113 		if (!reclaimed && !nr_retries--)
6114 			break;
6115 	}
6116 
6117 	return nbytes;
6118 }
6119 
6120 static int memory_max_show(struct seq_file *m, void *v)
6121 {
6122 	return seq_puts_memcg_tunable(m,
6123 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6124 }
6125 
6126 static ssize_t memory_max_write(struct kernfs_open_file *of,
6127 				char *buf, size_t nbytes, loff_t off)
6128 {
6129 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6130 	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
6131 	bool drained = false;
6132 	unsigned long max;
6133 	int err;
6134 
6135 	buf = strstrip(buf);
6136 	err = page_counter_memparse(buf, "max", &max);
6137 	if (err)
6138 		return err;
6139 
6140 	xchg(&memcg->memory.max, max);
6141 
6142 	for (;;) {
6143 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6144 
6145 		if (nr_pages <= max)
6146 			break;
6147 
6148 		if (signal_pending(current))
6149 			break;
6150 
6151 		if (!drained) {
6152 			drain_all_stock(memcg);
6153 			drained = true;
6154 			continue;
6155 		}
6156 
6157 		if (nr_reclaims) {
6158 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6159 							  GFP_KERNEL, true))
6160 				nr_reclaims--;
6161 			continue;
6162 		}
6163 
6164 		memcg_memory_event(memcg, MEMCG_OOM);
6165 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6166 			break;
6167 	}
6168 
6169 	memcg_wb_domain_size_changed(memcg);
6170 	return nbytes;
6171 }
6172 
6173 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6174 {
6175 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6176 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6177 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6178 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6179 	seq_printf(m, "oom_kill %lu\n",
6180 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6181 }
6182 
6183 static int memory_events_show(struct seq_file *m, void *v)
6184 {
6185 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6186 
6187 	__memory_events_show(m, memcg->memory_events);
6188 	return 0;
6189 }
6190 
6191 static int memory_events_local_show(struct seq_file *m, void *v)
6192 {
6193 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6194 
6195 	__memory_events_show(m, memcg->memory_events_local);
6196 	return 0;
6197 }
6198 
6199 static int memory_stat_show(struct seq_file *m, void *v)
6200 {
6201 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6202 	char *buf;
6203 
6204 	buf = memory_stat_format(memcg);
6205 	if (!buf)
6206 		return -ENOMEM;
6207 	seq_puts(m, buf);
6208 	kfree(buf);
6209 	return 0;
6210 }
6211 
6212 static int memory_oom_group_show(struct seq_file *m, void *v)
6213 {
6214 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6215 
6216 	seq_printf(m, "%d\n", memcg->oom_group);
6217 
6218 	return 0;
6219 }
6220 
6221 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6222 				      char *buf, size_t nbytes, loff_t off)
6223 {
6224 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6225 	int ret, oom_group;
6226 
6227 	buf = strstrip(buf);
6228 	if (!buf)
6229 		return -EINVAL;
6230 
6231 	ret = kstrtoint(buf, 0, &oom_group);
6232 	if (ret)
6233 		return ret;
6234 
6235 	if (oom_group != 0 && oom_group != 1)
6236 		return -EINVAL;
6237 
6238 	memcg->oom_group = oom_group;
6239 
6240 	return nbytes;
6241 }
6242 
6243 static struct cftype memory_files[] = {
6244 	{
6245 		.name = "current",
6246 		.flags = CFTYPE_NOT_ON_ROOT,
6247 		.read_u64 = memory_current_read,
6248 	},
6249 	{
6250 		.name = "min",
6251 		.flags = CFTYPE_NOT_ON_ROOT,
6252 		.seq_show = memory_min_show,
6253 		.write = memory_min_write,
6254 	},
6255 	{
6256 		.name = "low",
6257 		.flags = CFTYPE_NOT_ON_ROOT,
6258 		.seq_show = memory_low_show,
6259 		.write = memory_low_write,
6260 	},
6261 	{
6262 		.name = "high",
6263 		.flags = CFTYPE_NOT_ON_ROOT,
6264 		.seq_show = memory_high_show,
6265 		.write = memory_high_write,
6266 	},
6267 	{
6268 		.name = "max",
6269 		.flags = CFTYPE_NOT_ON_ROOT,
6270 		.seq_show = memory_max_show,
6271 		.write = memory_max_write,
6272 	},
6273 	{
6274 		.name = "events",
6275 		.flags = CFTYPE_NOT_ON_ROOT,
6276 		.file_offset = offsetof(struct mem_cgroup, events_file),
6277 		.seq_show = memory_events_show,
6278 	},
6279 	{
6280 		.name = "events.local",
6281 		.flags = CFTYPE_NOT_ON_ROOT,
6282 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6283 		.seq_show = memory_events_local_show,
6284 	},
6285 	{
6286 		.name = "stat",
6287 		.seq_show = memory_stat_show,
6288 	},
6289 	{
6290 		.name = "oom.group",
6291 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6292 		.seq_show = memory_oom_group_show,
6293 		.write = memory_oom_group_write,
6294 	},
6295 	{ }	/* terminate */
6296 };
6297 
6298 struct cgroup_subsys memory_cgrp_subsys = {
6299 	.css_alloc = mem_cgroup_css_alloc,
6300 	.css_online = mem_cgroup_css_online,
6301 	.css_offline = mem_cgroup_css_offline,
6302 	.css_released = mem_cgroup_css_released,
6303 	.css_free = mem_cgroup_css_free,
6304 	.css_reset = mem_cgroup_css_reset,
6305 	.can_attach = mem_cgroup_can_attach,
6306 	.cancel_attach = mem_cgroup_cancel_attach,
6307 	.post_attach = mem_cgroup_move_task,
6308 	.bind = mem_cgroup_bind,
6309 	.dfl_cftypes = memory_files,
6310 	.legacy_cftypes = mem_cgroup_legacy_files,
6311 	.early_init = 0,
6312 };
6313 
6314 /*
6315  * This function calculates an individual cgroup's effective
6316  * protection which is derived from its own memory.min/low, its
6317  * parent's and siblings' settings, as well as the actual memory
6318  * distribution in the tree.
6319  *
6320  * The following rules apply to the effective protection values:
6321  *
6322  * 1. At the first level of reclaim, effective protection is equal to
6323  *    the declared protection in memory.min and memory.low.
6324  *
6325  * 2. To enable safe delegation of the protection configuration, at
6326  *    subsequent levels the effective protection is capped to the
6327  *    parent's effective protection.
6328  *
6329  * 3. To make complex and dynamic subtrees easier to configure, the
6330  *    user is allowed to overcommit the declared protection at a given
6331  *    level. If that is the case, the parent's effective protection is
6332  *    distributed to the children in proportion to how much protection
6333  *    they have declared and how much of it they are utilizing.
6334  *
6335  *    This makes distribution proportional, but also work-conserving:
6336  *    if one cgroup claims much more protection than it uses memory,
6337  *    the unused remainder is available to its siblings.
6338  *
6339  * 4. Conversely, when the declared protection is undercommitted at a
6340  *    given level, the distribution of the larger parental protection
6341  *    budget is NOT proportional. A cgroup's protection from a sibling
6342  *    is capped to its own memory.min/low setting.
6343  *
6344  * 5. However, to allow protecting recursive subtrees from each other
6345  *    without having to declare each individual cgroup's fixed share
6346  *    of the ancestor's claim to protection, any unutilized -
6347  *    "floating" - protection from up the tree is distributed in
6348  *    proportion to each cgroup's *usage*. This makes the protection
6349  *    neutral wrt sibling cgroups and lets them compete freely over
6350  *    the shared parental protection budget, but it protects the
6351  *    subtree as a whole from neighboring subtrees.
6352  *
6353  * Note that 4. and 5. are not in conflict: 4. is about protecting
6354  * against immediate siblings whereas 5. is about protecting against
6355  * neighboring subtrees.
6356  */
6357 static unsigned long effective_protection(unsigned long usage,
6358 					  unsigned long parent_usage,
6359 					  unsigned long setting,
6360 					  unsigned long parent_effective,
6361 					  unsigned long siblings_protected)
6362 {
6363 	unsigned long protected;
6364 	unsigned long ep;
6365 
6366 	protected = min(usage, setting);
6367 	/*
6368 	 * If all cgroups at this level combined claim and use more
6369 	 * protection then what the parent affords them, distribute
6370 	 * shares in proportion to utilization.
6371 	 *
6372 	 * We are using actual utilization rather than the statically
6373 	 * claimed protection in order to be work-conserving: claimed
6374 	 * but unused protection is available to siblings that would
6375 	 * otherwise get a smaller chunk than what they claimed.
6376 	 */
6377 	if (siblings_protected > parent_effective)
6378 		return protected * parent_effective / siblings_protected;
6379 
6380 	/*
6381 	 * Ok, utilized protection of all children is within what the
6382 	 * parent affords them, so we know whatever this child claims
6383 	 * and utilizes is effectively protected.
6384 	 *
6385 	 * If there is unprotected usage beyond this value, reclaim
6386 	 * will apply pressure in proportion to that amount.
6387 	 *
6388 	 * If there is unutilized protection, the cgroup will be fully
6389 	 * shielded from reclaim, but we do return a smaller value for
6390 	 * protection than what the group could enjoy in theory. This
6391 	 * is okay. With the overcommit distribution above, effective
6392 	 * protection is always dependent on how memory is actually
6393 	 * consumed among the siblings anyway.
6394 	 */
6395 	ep = protected;
6396 
6397 	/*
6398 	 * If the children aren't claiming (all of) the protection
6399 	 * afforded to them by the parent, distribute the remainder in
6400 	 * proportion to the (unprotected) memory of each cgroup. That
6401 	 * way, cgroups that aren't explicitly prioritized wrt each
6402 	 * other compete freely over the allowance, but they are
6403 	 * collectively protected from neighboring trees.
6404 	 *
6405 	 * We're using unprotected memory for the weight so that if
6406 	 * some cgroups DO claim explicit protection, we don't protect
6407 	 * the same bytes twice.
6408 	 */
6409 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6410 		return ep;
6411 
6412 	if (parent_effective > siblings_protected && usage > protected) {
6413 		unsigned long unclaimed;
6414 
6415 		unclaimed = parent_effective - siblings_protected;
6416 		unclaimed *= usage - protected;
6417 		unclaimed /= parent_usage - siblings_protected;
6418 
6419 		ep += unclaimed;
6420 	}
6421 
6422 	return ep;
6423 }
6424 
6425 /**
6426  * mem_cgroup_protected - check if memory consumption is in the normal range
6427  * @root: the top ancestor of the sub-tree being checked
6428  * @memcg: the memory cgroup to check
6429  *
6430  * WARNING: This function is not stateless! It can only be used as part
6431  *          of a top-down tree iteration, not for isolated queries.
6432  *
6433  * Returns one of the following:
6434  *   MEMCG_PROT_NONE: cgroup memory is not protected
6435  *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
6436  *     an unprotected supply of reclaimable memory from other cgroups.
6437  *   MEMCG_PROT_MIN: cgroup memory is protected
6438  */
6439 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
6440 						struct mem_cgroup *memcg)
6441 {
6442 	unsigned long usage, parent_usage;
6443 	struct mem_cgroup *parent;
6444 
6445 	if (mem_cgroup_disabled())
6446 		return MEMCG_PROT_NONE;
6447 
6448 	if (!root)
6449 		root = root_mem_cgroup;
6450 	if (memcg == root)
6451 		return MEMCG_PROT_NONE;
6452 
6453 	usage = page_counter_read(&memcg->memory);
6454 	if (!usage)
6455 		return MEMCG_PROT_NONE;
6456 
6457 	parent = parent_mem_cgroup(memcg);
6458 	/* No parent means a non-hierarchical mode on v1 memcg */
6459 	if (!parent)
6460 		return MEMCG_PROT_NONE;
6461 
6462 	if (parent == root) {
6463 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6464 		memcg->memory.elow = memcg->memory.low;
6465 		goto out;
6466 	}
6467 
6468 	parent_usage = page_counter_read(&parent->memory);
6469 
6470 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6471 			READ_ONCE(memcg->memory.min),
6472 			READ_ONCE(parent->memory.emin),
6473 			atomic_long_read(&parent->memory.children_min_usage)));
6474 
6475 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6476 			memcg->memory.low, READ_ONCE(parent->memory.elow),
6477 			atomic_long_read(&parent->memory.children_low_usage)));
6478 
6479 out:
6480 	if (usage <= memcg->memory.emin)
6481 		return MEMCG_PROT_MIN;
6482 	else if (usage <= memcg->memory.elow)
6483 		return MEMCG_PROT_LOW;
6484 	else
6485 		return MEMCG_PROT_NONE;
6486 }
6487 
6488 /**
6489  * mem_cgroup_try_charge - try charging a page
6490  * @page: page to charge
6491  * @mm: mm context of the victim
6492  * @gfp_mask: reclaim mode
6493  * @memcgp: charged memcg return
6494  * @compound: charge the page as compound or small page
6495  *
6496  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6497  * pages according to @gfp_mask if necessary.
6498  *
6499  * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6500  * Otherwise, an error code is returned.
6501  *
6502  * After page->mapping has been set up, the caller must finalize the
6503  * charge with mem_cgroup_commit_charge().  Or abort the transaction
6504  * with mem_cgroup_cancel_charge() in case page instantiation fails.
6505  */
6506 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
6507 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6508 			  bool compound)
6509 {
6510 	struct mem_cgroup *memcg = NULL;
6511 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6512 	int ret = 0;
6513 
6514 	if (mem_cgroup_disabled())
6515 		goto out;
6516 
6517 	if (PageSwapCache(page)) {
6518 		/*
6519 		 * Every swap fault against a single page tries to charge the
6520 		 * page, bail as early as possible.  shmem_unuse() encounters
6521 		 * already charged pages, too.  The USED bit is protected by
6522 		 * the page lock, which serializes swap cache removal, which
6523 		 * in turn serializes uncharging.
6524 		 */
6525 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6526 		if (compound_head(page)->mem_cgroup)
6527 			goto out;
6528 
6529 		if (do_swap_account) {
6530 			swp_entry_t ent = { .val = page_private(page), };
6531 			unsigned short id = lookup_swap_cgroup_id(ent);
6532 
6533 			rcu_read_lock();
6534 			memcg = mem_cgroup_from_id(id);
6535 			if (memcg && !css_tryget_online(&memcg->css))
6536 				memcg = NULL;
6537 			rcu_read_unlock();
6538 		}
6539 	}
6540 
6541 	if (!memcg)
6542 		memcg = get_mem_cgroup_from_mm(mm);
6543 
6544 	ret = try_charge(memcg, gfp_mask, nr_pages);
6545 
6546 	css_put(&memcg->css);
6547 out:
6548 	*memcgp = memcg;
6549 	return ret;
6550 }
6551 
6552 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
6553 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6554 			  bool compound)
6555 {
6556 	struct mem_cgroup *memcg;
6557 	int ret;
6558 
6559 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
6560 	memcg = *memcgp;
6561 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
6562 	return ret;
6563 }
6564 
6565 /**
6566  * mem_cgroup_commit_charge - commit a page charge
6567  * @page: page to charge
6568  * @memcg: memcg to charge the page to
6569  * @lrucare: page might be on LRU already
6570  * @compound: charge the page as compound or small page
6571  *
6572  * Finalize a charge transaction started by mem_cgroup_try_charge(),
6573  * after page->mapping has been set up.  This must happen atomically
6574  * as part of the page instantiation, i.e. under the page table lock
6575  * for anonymous pages, under the page lock for page and swap cache.
6576  *
6577  * In addition, the page must not be on the LRU during the commit, to
6578  * prevent racing with task migration.  If it might be, use @lrucare.
6579  *
6580  * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6581  */
6582 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6583 			      bool lrucare, bool compound)
6584 {
6585 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6586 
6587 	VM_BUG_ON_PAGE(!page->mapping, page);
6588 	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6589 
6590 	if (mem_cgroup_disabled())
6591 		return;
6592 	/*
6593 	 * Swap faults will attempt to charge the same page multiple
6594 	 * times.  But reuse_swap_page() might have removed the page
6595 	 * from swapcache already, so we can't check PageSwapCache().
6596 	 */
6597 	if (!memcg)
6598 		return;
6599 
6600 	commit_charge(page, memcg, lrucare);
6601 
6602 	local_irq_disable();
6603 	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6604 	memcg_check_events(memcg, page);
6605 	local_irq_enable();
6606 
6607 	if (do_memsw_account() && PageSwapCache(page)) {
6608 		swp_entry_t entry = { .val = page_private(page) };
6609 		/*
6610 		 * The swap entry might not get freed for a long time,
6611 		 * let's not wait for it.  The page already received a
6612 		 * memory+swap charge, drop the swap entry duplicate.
6613 		 */
6614 		mem_cgroup_uncharge_swap(entry, nr_pages);
6615 	}
6616 }
6617 
6618 /**
6619  * mem_cgroup_cancel_charge - cancel a page charge
6620  * @page: page to charge
6621  * @memcg: memcg to charge the page to
6622  * @compound: charge the page as compound or small page
6623  *
6624  * Cancel a charge transaction started by mem_cgroup_try_charge().
6625  */
6626 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6627 		bool compound)
6628 {
6629 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6630 
6631 	if (mem_cgroup_disabled())
6632 		return;
6633 	/*
6634 	 * Swap faults will attempt to charge the same page multiple
6635 	 * times.  But reuse_swap_page() might have removed the page
6636 	 * from swapcache already, so we can't check PageSwapCache().
6637 	 */
6638 	if (!memcg)
6639 		return;
6640 
6641 	cancel_charge(memcg, nr_pages);
6642 }
6643 
6644 struct uncharge_gather {
6645 	struct mem_cgroup *memcg;
6646 	unsigned long pgpgout;
6647 	unsigned long nr_anon;
6648 	unsigned long nr_file;
6649 	unsigned long nr_kmem;
6650 	unsigned long nr_huge;
6651 	unsigned long nr_shmem;
6652 	struct page *dummy_page;
6653 };
6654 
6655 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6656 {
6657 	memset(ug, 0, sizeof(*ug));
6658 }
6659 
6660 static void uncharge_batch(const struct uncharge_gather *ug)
6661 {
6662 	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
6663 	unsigned long flags;
6664 
6665 	if (!mem_cgroup_is_root(ug->memcg)) {
6666 		page_counter_uncharge(&ug->memcg->memory, nr_pages);
6667 		if (do_memsw_account())
6668 			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6669 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6670 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6671 		memcg_oom_recover(ug->memcg);
6672 	}
6673 
6674 	local_irq_save(flags);
6675 	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6676 	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6677 	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6678 	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6679 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6680 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
6681 	memcg_check_events(ug->memcg, ug->dummy_page);
6682 	local_irq_restore(flags);
6683 
6684 	if (!mem_cgroup_is_root(ug->memcg))
6685 		css_put_many(&ug->memcg->css, nr_pages);
6686 }
6687 
6688 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6689 {
6690 	VM_BUG_ON_PAGE(PageLRU(page), page);
6691 	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6692 			!PageHWPoison(page) , page);
6693 
6694 	if (!page->mem_cgroup)
6695 		return;
6696 
6697 	/*
6698 	 * Nobody should be changing or seriously looking at
6699 	 * page->mem_cgroup at this point, we have fully
6700 	 * exclusive access to the page.
6701 	 */
6702 
6703 	if (ug->memcg != page->mem_cgroup) {
6704 		if (ug->memcg) {
6705 			uncharge_batch(ug);
6706 			uncharge_gather_clear(ug);
6707 		}
6708 		ug->memcg = page->mem_cgroup;
6709 	}
6710 
6711 	if (!PageKmemcg(page)) {
6712 		unsigned int nr_pages = 1;
6713 
6714 		if (PageTransHuge(page)) {
6715 			nr_pages = compound_nr(page);
6716 			ug->nr_huge += nr_pages;
6717 		}
6718 		if (PageAnon(page))
6719 			ug->nr_anon += nr_pages;
6720 		else {
6721 			ug->nr_file += nr_pages;
6722 			if (PageSwapBacked(page))
6723 				ug->nr_shmem += nr_pages;
6724 		}
6725 		ug->pgpgout++;
6726 	} else {
6727 		ug->nr_kmem += compound_nr(page);
6728 		__ClearPageKmemcg(page);
6729 	}
6730 
6731 	ug->dummy_page = page;
6732 	page->mem_cgroup = NULL;
6733 }
6734 
6735 static void uncharge_list(struct list_head *page_list)
6736 {
6737 	struct uncharge_gather ug;
6738 	struct list_head *next;
6739 
6740 	uncharge_gather_clear(&ug);
6741 
6742 	/*
6743 	 * Note that the list can be a single page->lru; hence the
6744 	 * do-while loop instead of a simple list_for_each_entry().
6745 	 */
6746 	next = page_list->next;
6747 	do {
6748 		struct page *page;
6749 
6750 		page = list_entry(next, struct page, lru);
6751 		next = page->lru.next;
6752 
6753 		uncharge_page(page, &ug);
6754 	} while (next != page_list);
6755 
6756 	if (ug.memcg)
6757 		uncharge_batch(&ug);
6758 }
6759 
6760 /**
6761  * mem_cgroup_uncharge - uncharge a page
6762  * @page: page to uncharge
6763  *
6764  * Uncharge a page previously charged with mem_cgroup_try_charge() and
6765  * mem_cgroup_commit_charge().
6766  */
6767 void mem_cgroup_uncharge(struct page *page)
6768 {
6769 	struct uncharge_gather ug;
6770 
6771 	if (mem_cgroup_disabled())
6772 		return;
6773 
6774 	/* Don't touch page->lru of any random page, pre-check: */
6775 	if (!page->mem_cgroup)
6776 		return;
6777 
6778 	uncharge_gather_clear(&ug);
6779 	uncharge_page(page, &ug);
6780 	uncharge_batch(&ug);
6781 }
6782 
6783 /**
6784  * mem_cgroup_uncharge_list - uncharge a list of page
6785  * @page_list: list of pages to uncharge
6786  *
6787  * Uncharge a list of pages previously charged with
6788  * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6789  */
6790 void mem_cgroup_uncharge_list(struct list_head *page_list)
6791 {
6792 	if (mem_cgroup_disabled())
6793 		return;
6794 
6795 	if (!list_empty(page_list))
6796 		uncharge_list(page_list);
6797 }
6798 
6799 /**
6800  * mem_cgroup_migrate - charge a page's replacement
6801  * @oldpage: currently circulating page
6802  * @newpage: replacement page
6803  *
6804  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6805  * be uncharged upon free.
6806  *
6807  * Both pages must be locked, @newpage->mapping must be set up.
6808  */
6809 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6810 {
6811 	struct mem_cgroup *memcg;
6812 	unsigned int nr_pages;
6813 	unsigned long flags;
6814 
6815 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6816 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6817 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6818 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6819 		       newpage);
6820 
6821 	if (mem_cgroup_disabled())
6822 		return;
6823 
6824 	/* Page cache replacement: new page already charged? */
6825 	if (newpage->mem_cgroup)
6826 		return;
6827 
6828 	/* Swapcache readahead pages can get replaced before being charged */
6829 	memcg = oldpage->mem_cgroup;
6830 	if (!memcg)
6831 		return;
6832 
6833 	/* Force-charge the new page. The old one will be freed soon */
6834 	nr_pages = hpage_nr_pages(newpage);
6835 
6836 	page_counter_charge(&memcg->memory, nr_pages);
6837 	if (do_memsw_account())
6838 		page_counter_charge(&memcg->memsw, nr_pages);
6839 	css_get_many(&memcg->css, nr_pages);
6840 
6841 	commit_charge(newpage, memcg, false);
6842 
6843 	local_irq_save(flags);
6844 	mem_cgroup_charge_statistics(memcg, newpage, PageTransHuge(newpage),
6845 			nr_pages);
6846 	memcg_check_events(memcg, newpage);
6847 	local_irq_restore(flags);
6848 }
6849 
6850 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6851 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6852 
6853 void mem_cgroup_sk_alloc(struct sock *sk)
6854 {
6855 	struct mem_cgroup *memcg;
6856 
6857 	if (!mem_cgroup_sockets_enabled)
6858 		return;
6859 
6860 	/* Do not associate the sock with unrelated interrupted task's memcg. */
6861 	if (in_interrupt())
6862 		return;
6863 
6864 	rcu_read_lock();
6865 	memcg = mem_cgroup_from_task(current);
6866 	if (memcg == root_mem_cgroup)
6867 		goto out;
6868 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6869 		goto out;
6870 	if (css_tryget(&memcg->css))
6871 		sk->sk_memcg = memcg;
6872 out:
6873 	rcu_read_unlock();
6874 }
6875 
6876 void mem_cgroup_sk_free(struct sock *sk)
6877 {
6878 	if (sk->sk_memcg)
6879 		css_put(&sk->sk_memcg->css);
6880 }
6881 
6882 /**
6883  * mem_cgroup_charge_skmem - charge socket memory
6884  * @memcg: memcg to charge
6885  * @nr_pages: number of pages to charge
6886  *
6887  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6888  * @memcg's configured limit, %false if the charge had to be forced.
6889  */
6890 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6891 {
6892 	gfp_t gfp_mask = GFP_KERNEL;
6893 
6894 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6895 		struct page_counter *fail;
6896 
6897 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6898 			memcg->tcpmem_pressure = 0;
6899 			return true;
6900 		}
6901 		page_counter_charge(&memcg->tcpmem, nr_pages);
6902 		memcg->tcpmem_pressure = 1;
6903 		return false;
6904 	}
6905 
6906 	/* Don't block in the packet receive path */
6907 	if (in_softirq())
6908 		gfp_mask = GFP_NOWAIT;
6909 
6910 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6911 
6912 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6913 		return true;
6914 
6915 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6916 	return false;
6917 }
6918 
6919 /**
6920  * mem_cgroup_uncharge_skmem - uncharge socket memory
6921  * @memcg: memcg to uncharge
6922  * @nr_pages: number of pages to uncharge
6923  */
6924 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6925 {
6926 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6927 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6928 		return;
6929 	}
6930 
6931 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6932 
6933 	refill_stock(memcg, nr_pages);
6934 }
6935 
6936 static int __init cgroup_memory(char *s)
6937 {
6938 	char *token;
6939 
6940 	while ((token = strsep(&s, ",")) != NULL) {
6941 		if (!*token)
6942 			continue;
6943 		if (!strcmp(token, "nosocket"))
6944 			cgroup_memory_nosocket = true;
6945 		if (!strcmp(token, "nokmem"))
6946 			cgroup_memory_nokmem = true;
6947 	}
6948 	return 0;
6949 }
6950 __setup("cgroup.memory=", cgroup_memory);
6951 
6952 /*
6953  * subsys_initcall() for memory controller.
6954  *
6955  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6956  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6957  * basically everything that doesn't depend on a specific mem_cgroup structure
6958  * should be initialized from here.
6959  */
6960 static int __init mem_cgroup_init(void)
6961 {
6962 	int cpu, node;
6963 
6964 #ifdef CONFIG_MEMCG_KMEM
6965 	/*
6966 	 * Kmem cache creation is mostly done with the slab_mutex held,
6967 	 * so use a workqueue with limited concurrency to avoid stalling
6968 	 * all worker threads in case lots of cgroups are created and
6969 	 * destroyed simultaneously.
6970 	 */
6971 	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6972 	BUG_ON(!memcg_kmem_cache_wq);
6973 #endif
6974 
6975 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6976 				  memcg_hotplug_cpu_dead);
6977 
6978 	for_each_possible_cpu(cpu)
6979 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6980 			  drain_local_stock);
6981 
6982 	for_each_node(node) {
6983 		struct mem_cgroup_tree_per_node *rtpn;
6984 
6985 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6986 				    node_online(node) ? node : NUMA_NO_NODE);
6987 
6988 		rtpn->rb_root = RB_ROOT;
6989 		rtpn->rb_rightmost = NULL;
6990 		spin_lock_init(&rtpn->lock);
6991 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6992 	}
6993 
6994 	return 0;
6995 }
6996 subsys_initcall(mem_cgroup_init);
6997 
6998 #ifdef CONFIG_MEMCG_SWAP
6999 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7000 {
7001 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7002 		/*
7003 		 * The root cgroup cannot be destroyed, so it's refcount must
7004 		 * always be >= 1.
7005 		 */
7006 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7007 			VM_BUG_ON(1);
7008 			break;
7009 		}
7010 		memcg = parent_mem_cgroup(memcg);
7011 		if (!memcg)
7012 			memcg = root_mem_cgroup;
7013 	}
7014 	return memcg;
7015 }
7016 
7017 /**
7018  * mem_cgroup_swapout - transfer a memsw charge to swap
7019  * @page: page whose memsw charge to transfer
7020  * @entry: swap entry to move the charge to
7021  *
7022  * Transfer the memsw charge of @page to @entry.
7023  */
7024 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7025 {
7026 	struct mem_cgroup *memcg, *swap_memcg;
7027 	unsigned int nr_entries;
7028 	unsigned short oldid;
7029 
7030 	VM_BUG_ON_PAGE(PageLRU(page), page);
7031 	VM_BUG_ON_PAGE(page_count(page), page);
7032 
7033 	if (!do_memsw_account())
7034 		return;
7035 
7036 	memcg = page->mem_cgroup;
7037 
7038 	/* Readahead page, never charged */
7039 	if (!memcg)
7040 		return;
7041 
7042 	/*
7043 	 * In case the memcg owning these pages has been offlined and doesn't
7044 	 * have an ID allocated to it anymore, charge the closest online
7045 	 * ancestor for the swap instead and transfer the memory+swap charge.
7046 	 */
7047 	swap_memcg = mem_cgroup_id_get_online(memcg);
7048 	nr_entries = hpage_nr_pages(page);
7049 	/* Get references for the tail pages, too */
7050 	if (nr_entries > 1)
7051 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7052 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7053 				   nr_entries);
7054 	VM_BUG_ON_PAGE(oldid, page);
7055 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7056 
7057 	page->mem_cgroup = NULL;
7058 
7059 	if (!mem_cgroup_is_root(memcg))
7060 		page_counter_uncharge(&memcg->memory, nr_entries);
7061 
7062 	if (memcg != swap_memcg) {
7063 		if (!mem_cgroup_is_root(swap_memcg))
7064 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7065 		page_counter_uncharge(&memcg->memsw, nr_entries);
7066 	}
7067 
7068 	/*
7069 	 * Interrupts should be disabled here because the caller holds the
7070 	 * i_pages lock which is taken with interrupts-off. It is
7071 	 * important here to have the interrupts disabled because it is the
7072 	 * only synchronisation we have for updating the per-CPU variables.
7073 	 */
7074 	VM_BUG_ON(!irqs_disabled());
7075 	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
7076 				     -nr_entries);
7077 	memcg_check_events(memcg, page);
7078 
7079 	if (!mem_cgroup_is_root(memcg))
7080 		css_put_many(&memcg->css, nr_entries);
7081 }
7082 
7083 /**
7084  * mem_cgroup_try_charge_swap - try charging swap space for a page
7085  * @page: page being added to swap
7086  * @entry: swap entry to charge
7087  *
7088  * Try to charge @page's memcg for the swap space at @entry.
7089  *
7090  * Returns 0 on success, -ENOMEM on failure.
7091  */
7092 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7093 {
7094 	unsigned int nr_pages = hpage_nr_pages(page);
7095 	struct page_counter *counter;
7096 	struct mem_cgroup *memcg;
7097 	unsigned short oldid;
7098 
7099 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
7100 		return 0;
7101 
7102 	memcg = page->mem_cgroup;
7103 
7104 	/* Readahead page, never charged */
7105 	if (!memcg)
7106 		return 0;
7107 
7108 	if (!entry.val) {
7109 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7110 		return 0;
7111 	}
7112 
7113 	memcg = mem_cgroup_id_get_online(memcg);
7114 
7115 	if (!mem_cgroup_is_root(memcg) &&
7116 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7117 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7118 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7119 		mem_cgroup_id_put(memcg);
7120 		return -ENOMEM;
7121 	}
7122 
7123 	/* Get references for the tail pages, too */
7124 	if (nr_pages > 1)
7125 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7126 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7127 	VM_BUG_ON_PAGE(oldid, page);
7128 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7129 
7130 	return 0;
7131 }
7132 
7133 /**
7134  * mem_cgroup_uncharge_swap - uncharge swap space
7135  * @entry: swap entry to uncharge
7136  * @nr_pages: the amount of swap space to uncharge
7137  */
7138 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7139 {
7140 	struct mem_cgroup *memcg;
7141 	unsigned short id;
7142 
7143 	if (!do_swap_account)
7144 		return;
7145 
7146 	id = swap_cgroup_record(entry, 0, nr_pages);
7147 	rcu_read_lock();
7148 	memcg = mem_cgroup_from_id(id);
7149 	if (memcg) {
7150 		if (!mem_cgroup_is_root(memcg)) {
7151 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7152 				page_counter_uncharge(&memcg->swap, nr_pages);
7153 			else
7154 				page_counter_uncharge(&memcg->memsw, nr_pages);
7155 		}
7156 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7157 		mem_cgroup_id_put_many(memcg, nr_pages);
7158 	}
7159 	rcu_read_unlock();
7160 }
7161 
7162 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7163 {
7164 	long nr_swap_pages = get_nr_swap_pages();
7165 
7166 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7167 		return nr_swap_pages;
7168 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7169 		nr_swap_pages = min_t(long, nr_swap_pages,
7170 				      READ_ONCE(memcg->swap.max) -
7171 				      page_counter_read(&memcg->swap));
7172 	return nr_swap_pages;
7173 }
7174 
7175 bool mem_cgroup_swap_full(struct page *page)
7176 {
7177 	struct mem_cgroup *memcg;
7178 
7179 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7180 
7181 	if (vm_swap_full())
7182 		return true;
7183 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7184 		return false;
7185 
7186 	memcg = page->mem_cgroup;
7187 	if (!memcg)
7188 		return false;
7189 
7190 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7191 		unsigned long usage = page_counter_read(&memcg->swap);
7192 
7193 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7194 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7195 			return true;
7196 	}
7197 
7198 	return false;
7199 }
7200 
7201 /* for remember boot option*/
7202 #ifdef CONFIG_MEMCG_SWAP_ENABLED
7203 static int really_do_swap_account __initdata = 1;
7204 #else
7205 static int really_do_swap_account __initdata;
7206 #endif
7207 
7208 static int __init enable_swap_account(char *s)
7209 {
7210 	if (!strcmp(s, "1"))
7211 		really_do_swap_account = 1;
7212 	else if (!strcmp(s, "0"))
7213 		really_do_swap_account = 0;
7214 	return 1;
7215 }
7216 __setup("swapaccount=", enable_swap_account);
7217 
7218 static u64 swap_current_read(struct cgroup_subsys_state *css,
7219 			     struct cftype *cft)
7220 {
7221 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7222 
7223 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7224 }
7225 
7226 static int swap_high_show(struct seq_file *m, void *v)
7227 {
7228 	return seq_puts_memcg_tunable(m,
7229 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7230 }
7231 
7232 static ssize_t swap_high_write(struct kernfs_open_file *of,
7233 			       char *buf, size_t nbytes, loff_t off)
7234 {
7235 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7236 	unsigned long high;
7237 	int err;
7238 
7239 	buf = strstrip(buf);
7240 	err = page_counter_memparse(buf, "max", &high);
7241 	if (err)
7242 		return err;
7243 
7244 	page_counter_set_high(&memcg->swap, high);
7245 
7246 	return nbytes;
7247 }
7248 
7249 static int swap_max_show(struct seq_file *m, void *v)
7250 {
7251 	return seq_puts_memcg_tunable(m,
7252 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7253 }
7254 
7255 static ssize_t swap_max_write(struct kernfs_open_file *of,
7256 			      char *buf, size_t nbytes, loff_t off)
7257 {
7258 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7259 	unsigned long max;
7260 	int err;
7261 
7262 	buf = strstrip(buf);
7263 	err = page_counter_memparse(buf, "max", &max);
7264 	if (err)
7265 		return err;
7266 
7267 	xchg(&memcg->swap.max, max);
7268 
7269 	return nbytes;
7270 }
7271 
7272 static int swap_events_show(struct seq_file *m, void *v)
7273 {
7274 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7275 
7276 	seq_printf(m, "high %lu\n",
7277 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7278 	seq_printf(m, "max %lu\n",
7279 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7280 	seq_printf(m, "fail %lu\n",
7281 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7282 
7283 	return 0;
7284 }
7285 
7286 static struct cftype swap_files[] = {
7287 	{
7288 		.name = "swap.current",
7289 		.flags = CFTYPE_NOT_ON_ROOT,
7290 		.read_u64 = swap_current_read,
7291 	},
7292 	{
7293 		.name = "swap.high",
7294 		.flags = CFTYPE_NOT_ON_ROOT,
7295 		.seq_show = swap_high_show,
7296 		.write = swap_high_write,
7297 	},
7298 	{
7299 		.name = "swap.max",
7300 		.flags = CFTYPE_NOT_ON_ROOT,
7301 		.seq_show = swap_max_show,
7302 		.write = swap_max_write,
7303 	},
7304 	{
7305 		.name = "swap.events",
7306 		.flags = CFTYPE_NOT_ON_ROOT,
7307 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7308 		.seq_show = swap_events_show,
7309 	},
7310 	{ }	/* terminate */
7311 };
7312 
7313 static struct cftype memsw_cgroup_files[] = {
7314 	{
7315 		.name = "memsw.usage_in_bytes",
7316 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7317 		.read_u64 = mem_cgroup_read_u64,
7318 	},
7319 	{
7320 		.name = "memsw.max_usage_in_bytes",
7321 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7322 		.write = mem_cgroup_reset,
7323 		.read_u64 = mem_cgroup_read_u64,
7324 	},
7325 	{
7326 		.name = "memsw.limit_in_bytes",
7327 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7328 		.write = mem_cgroup_write,
7329 		.read_u64 = mem_cgroup_read_u64,
7330 	},
7331 	{
7332 		.name = "memsw.failcnt",
7333 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7334 		.write = mem_cgroup_reset,
7335 		.read_u64 = mem_cgroup_read_u64,
7336 	},
7337 	{ },	/* terminate */
7338 };
7339 
7340 static int __init mem_cgroup_swap_init(void)
7341 {
7342 	if (!mem_cgroup_disabled() && really_do_swap_account) {
7343 		do_swap_account = 1;
7344 		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
7345 					       swap_files));
7346 		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
7347 						  memsw_cgroup_files));
7348 	}
7349 	return 0;
7350 }
7351 subsys_initcall(mem_cgroup_swap_init);
7352 
7353 #endif /* CONFIG_MEMCG_SWAP */
7354