xref: /openbmc/linux/mm/memcontrol.c (revision 2c64e9cb)
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * Kernel Memory Controller
14  * Copyright (C) 2012 Parallels Inc. and Google Inc.
15  * Authors: Glauber Costa and Suleiman Souhlal
16  *
17  * Native page reclaim
18  * Charge lifetime sanitation
19  * Lockless page tracking & accounting
20  * Unified hierarchy configuration model
21  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22  *
23  * This program is free software; you can redistribute it and/or modify
24  * it under the terms of the GNU General Public License as published by
25  * the Free Software Foundation; either version 2 of the License, or
26  * (at your option) any later version.
27  *
28  * This program is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
31  * GNU General Public License for more details.
32  */
33 
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
37 #include <linux/mm.h>
38 #include <linux/sched/mm.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/hugetlb.h>
41 #include <linux/pagemap.h>
42 #include <linux/vm_event_item.h>
43 #include <linux/smp.h>
44 #include <linux/page-flags.h>
45 #include <linux/backing-dev.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/rcupdate.h>
48 #include <linux/limits.h>
49 #include <linux/export.h>
50 #include <linux/mutex.h>
51 #include <linux/rbtree.h>
52 #include <linux/slab.h>
53 #include <linux/swap.h>
54 #include <linux/swapops.h>
55 #include <linux/spinlock.h>
56 #include <linux/eventfd.h>
57 #include <linux/poll.h>
58 #include <linux/sort.h>
59 #include <linux/fs.h>
60 #include <linux/seq_file.h>
61 #include <linux/vmpressure.h>
62 #include <linux/mm_inline.h>
63 #include <linux/swap_cgroup.h>
64 #include <linux/cpu.h>
65 #include <linux/oom.h>
66 #include <linux/lockdep.h>
67 #include <linux/file.h>
68 #include <linux/tracehook.h>
69 #include "internal.h"
70 #include <net/sock.h>
71 #include <net/ip.h>
72 #include "slab.h"
73 
74 #include <linux/uaccess.h>
75 
76 #include <trace/events/vmscan.h>
77 
78 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
79 EXPORT_SYMBOL(memory_cgrp_subsys);
80 
81 struct mem_cgroup *root_mem_cgroup __read_mostly;
82 
83 #define MEM_CGROUP_RECLAIM_RETRIES	5
84 
85 /* Socket memory accounting disabled? */
86 static bool cgroup_memory_nosocket;
87 
88 /* Kernel memory accounting disabled? */
89 static bool cgroup_memory_nokmem;
90 
91 /* Whether the swap controller is active */
92 #ifdef CONFIG_MEMCG_SWAP
93 int do_swap_account __read_mostly;
94 #else
95 #define do_swap_account		0
96 #endif
97 
98 /* Whether legacy memory+swap accounting is active */
99 static bool do_memsw_account(void)
100 {
101 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
102 }
103 
104 static const char *const mem_cgroup_lru_names[] = {
105 	"inactive_anon",
106 	"active_anon",
107 	"inactive_file",
108 	"active_file",
109 	"unevictable",
110 };
111 
112 #define THRESHOLDS_EVENTS_TARGET 128
113 #define SOFTLIMIT_EVENTS_TARGET 1024
114 #define NUMAINFO_EVENTS_TARGET	1024
115 
116 /*
117  * Cgroups above their limits are maintained in a RB-Tree, independent of
118  * their hierarchy representation
119  */
120 
121 struct mem_cgroup_tree_per_node {
122 	struct rb_root rb_root;
123 	struct rb_node *rb_rightmost;
124 	spinlock_t lock;
125 };
126 
127 struct mem_cgroup_tree {
128 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
129 };
130 
131 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
132 
133 /* for OOM */
134 struct mem_cgroup_eventfd_list {
135 	struct list_head list;
136 	struct eventfd_ctx *eventfd;
137 };
138 
139 /*
140  * cgroup_event represents events which userspace want to receive.
141  */
142 struct mem_cgroup_event {
143 	/*
144 	 * memcg which the event belongs to.
145 	 */
146 	struct mem_cgroup *memcg;
147 	/*
148 	 * eventfd to signal userspace about the event.
149 	 */
150 	struct eventfd_ctx *eventfd;
151 	/*
152 	 * Each of these stored in a list by the cgroup.
153 	 */
154 	struct list_head list;
155 	/*
156 	 * register_event() callback will be used to add new userspace
157 	 * waiter for changes related to this event.  Use eventfd_signal()
158 	 * on eventfd to send notification to userspace.
159 	 */
160 	int (*register_event)(struct mem_cgroup *memcg,
161 			      struct eventfd_ctx *eventfd, const char *args);
162 	/*
163 	 * unregister_event() callback will be called when userspace closes
164 	 * the eventfd or on cgroup removing.  This callback must be set,
165 	 * if you want provide notification functionality.
166 	 */
167 	void (*unregister_event)(struct mem_cgroup *memcg,
168 				 struct eventfd_ctx *eventfd);
169 	/*
170 	 * All fields below needed to unregister event when
171 	 * userspace closes eventfd.
172 	 */
173 	poll_table pt;
174 	wait_queue_head_t *wqh;
175 	wait_queue_entry_t wait;
176 	struct work_struct remove;
177 };
178 
179 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
180 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
181 
182 /* Stuffs for move charges at task migration. */
183 /*
184  * Types of charges to be moved.
185  */
186 #define MOVE_ANON	0x1U
187 #define MOVE_FILE	0x2U
188 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
189 
190 /* "mc" and its members are protected by cgroup_mutex */
191 static struct move_charge_struct {
192 	spinlock_t	  lock; /* for from, to */
193 	struct mm_struct  *mm;
194 	struct mem_cgroup *from;
195 	struct mem_cgroup *to;
196 	unsigned long flags;
197 	unsigned long precharge;
198 	unsigned long moved_charge;
199 	unsigned long moved_swap;
200 	struct task_struct *moving_task;	/* a task moving charges */
201 	wait_queue_head_t waitq;		/* a waitq for other context */
202 } mc = {
203 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
204 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
205 };
206 
207 /*
208  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
209  * limit reclaim to prevent infinite loops, if they ever occur.
210  */
211 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
212 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
213 
214 enum charge_type {
215 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
216 	MEM_CGROUP_CHARGE_TYPE_ANON,
217 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
218 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
219 	NR_CHARGE_TYPE,
220 };
221 
222 /* for encoding cft->private value on file */
223 enum res_type {
224 	_MEM,
225 	_MEMSWAP,
226 	_OOM_TYPE,
227 	_KMEM,
228 	_TCP,
229 };
230 
231 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
232 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
233 #define MEMFILE_ATTR(val)	((val) & 0xffff)
234 /* Used for OOM nofiier */
235 #define OOM_CONTROL		(0)
236 
237 /*
238  * Iteration constructs for visiting all cgroups (under a tree).  If
239  * loops are exited prematurely (break), mem_cgroup_iter_break() must
240  * be used for reference counting.
241  */
242 #define for_each_mem_cgroup_tree(iter, root)		\
243 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
244 	     iter != NULL;				\
245 	     iter = mem_cgroup_iter(root, iter, NULL))
246 
247 #define for_each_mem_cgroup(iter)			\
248 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
249 	     iter != NULL;				\
250 	     iter = mem_cgroup_iter(NULL, iter, NULL))
251 
252 static inline bool should_force_charge(void)
253 {
254 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
255 		(current->flags & PF_EXITING);
256 }
257 
258 /* Some nice accessors for the vmpressure. */
259 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
260 {
261 	if (!memcg)
262 		memcg = root_mem_cgroup;
263 	return &memcg->vmpressure;
264 }
265 
266 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
267 {
268 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
269 }
270 
271 #ifdef CONFIG_MEMCG_KMEM
272 /*
273  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
274  * The main reason for not using cgroup id for this:
275  *  this works better in sparse environments, where we have a lot of memcgs,
276  *  but only a few kmem-limited. Or also, if we have, for instance, 200
277  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
278  *  200 entry array for that.
279  *
280  * The current size of the caches array is stored in memcg_nr_cache_ids. It
281  * will double each time we have to increase it.
282  */
283 static DEFINE_IDA(memcg_cache_ida);
284 int memcg_nr_cache_ids;
285 
286 /* Protects memcg_nr_cache_ids */
287 static DECLARE_RWSEM(memcg_cache_ids_sem);
288 
289 void memcg_get_cache_ids(void)
290 {
291 	down_read(&memcg_cache_ids_sem);
292 }
293 
294 void memcg_put_cache_ids(void)
295 {
296 	up_read(&memcg_cache_ids_sem);
297 }
298 
299 /*
300  * MIN_SIZE is different than 1, because we would like to avoid going through
301  * the alloc/free process all the time. In a small machine, 4 kmem-limited
302  * cgroups is a reasonable guess. In the future, it could be a parameter or
303  * tunable, but that is strictly not necessary.
304  *
305  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
306  * this constant directly from cgroup, but it is understandable that this is
307  * better kept as an internal representation in cgroup.c. In any case, the
308  * cgrp_id space is not getting any smaller, and we don't have to necessarily
309  * increase ours as well if it increases.
310  */
311 #define MEMCG_CACHES_MIN_SIZE 4
312 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
313 
314 /*
315  * A lot of the calls to the cache allocation functions are expected to be
316  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
317  * conditional to this static branch, we'll have to allow modules that does
318  * kmem_cache_alloc and the such to see this symbol as well
319  */
320 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
321 EXPORT_SYMBOL(memcg_kmem_enabled_key);
322 
323 struct workqueue_struct *memcg_kmem_cache_wq;
324 
325 static int memcg_shrinker_map_size;
326 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
327 
328 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
329 {
330 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
331 }
332 
333 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
334 					 int size, int old_size)
335 {
336 	struct memcg_shrinker_map *new, *old;
337 	int nid;
338 
339 	lockdep_assert_held(&memcg_shrinker_map_mutex);
340 
341 	for_each_node(nid) {
342 		old = rcu_dereference_protected(
343 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
344 		/* Not yet online memcg */
345 		if (!old)
346 			return 0;
347 
348 		new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
349 		if (!new)
350 			return -ENOMEM;
351 
352 		/* Set all old bits, clear all new bits */
353 		memset(new->map, (int)0xff, old_size);
354 		memset((void *)new->map + old_size, 0, size - old_size);
355 
356 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
357 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
358 	}
359 
360 	return 0;
361 }
362 
363 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
364 {
365 	struct mem_cgroup_per_node *pn;
366 	struct memcg_shrinker_map *map;
367 	int nid;
368 
369 	if (mem_cgroup_is_root(memcg))
370 		return;
371 
372 	for_each_node(nid) {
373 		pn = mem_cgroup_nodeinfo(memcg, nid);
374 		map = rcu_dereference_protected(pn->shrinker_map, true);
375 		if (map)
376 			kvfree(map);
377 		rcu_assign_pointer(pn->shrinker_map, NULL);
378 	}
379 }
380 
381 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
382 {
383 	struct memcg_shrinker_map *map;
384 	int nid, size, ret = 0;
385 
386 	if (mem_cgroup_is_root(memcg))
387 		return 0;
388 
389 	mutex_lock(&memcg_shrinker_map_mutex);
390 	size = memcg_shrinker_map_size;
391 	for_each_node(nid) {
392 		map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
393 		if (!map) {
394 			memcg_free_shrinker_maps(memcg);
395 			ret = -ENOMEM;
396 			break;
397 		}
398 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
399 	}
400 	mutex_unlock(&memcg_shrinker_map_mutex);
401 
402 	return ret;
403 }
404 
405 int memcg_expand_shrinker_maps(int new_id)
406 {
407 	int size, old_size, ret = 0;
408 	struct mem_cgroup *memcg;
409 
410 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
411 	old_size = memcg_shrinker_map_size;
412 	if (size <= old_size)
413 		return 0;
414 
415 	mutex_lock(&memcg_shrinker_map_mutex);
416 	if (!root_mem_cgroup)
417 		goto unlock;
418 
419 	for_each_mem_cgroup(memcg) {
420 		if (mem_cgroup_is_root(memcg))
421 			continue;
422 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
423 		if (ret)
424 			goto unlock;
425 	}
426 unlock:
427 	if (!ret)
428 		memcg_shrinker_map_size = size;
429 	mutex_unlock(&memcg_shrinker_map_mutex);
430 	return ret;
431 }
432 
433 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
434 {
435 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
436 		struct memcg_shrinker_map *map;
437 
438 		rcu_read_lock();
439 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
440 		/* Pairs with smp mb in shrink_slab() */
441 		smp_mb__before_atomic();
442 		set_bit(shrinker_id, map->map);
443 		rcu_read_unlock();
444 	}
445 }
446 
447 #else /* CONFIG_MEMCG_KMEM */
448 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
449 {
450 	return 0;
451 }
452 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
453 #endif /* CONFIG_MEMCG_KMEM */
454 
455 /**
456  * mem_cgroup_css_from_page - css of the memcg associated with a page
457  * @page: page of interest
458  *
459  * If memcg is bound to the default hierarchy, css of the memcg associated
460  * with @page is returned.  The returned css remains associated with @page
461  * until it is released.
462  *
463  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
464  * is returned.
465  */
466 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
467 {
468 	struct mem_cgroup *memcg;
469 
470 	memcg = page->mem_cgroup;
471 
472 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
473 		memcg = root_mem_cgroup;
474 
475 	return &memcg->css;
476 }
477 
478 /**
479  * page_cgroup_ino - return inode number of the memcg a page is charged to
480  * @page: the page
481  *
482  * Look up the closest online ancestor of the memory cgroup @page is charged to
483  * and return its inode number or 0 if @page is not charged to any cgroup. It
484  * is safe to call this function without holding a reference to @page.
485  *
486  * Note, this function is inherently racy, because there is nothing to prevent
487  * the cgroup inode from getting torn down and potentially reallocated a moment
488  * after page_cgroup_ino() returns, so it only should be used by callers that
489  * do not care (such as procfs interfaces).
490  */
491 ino_t page_cgroup_ino(struct page *page)
492 {
493 	struct mem_cgroup *memcg;
494 	unsigned long ino = 0;
495 
496 	rcu_read_lock();
497 	memcg = READ_ONCE(page->mem_cgroup);
498 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
499 		memcg = parent_mem_cgroup(memcg);
500 	if (memcg)
501 		ino = cgroup_ino(memcg->css.cgroup);
502 	rcu_read_unlock();
503 	return ino;
504 }
505 
506 static struct mem_cgroup_per_node *
507 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
508 {
509 	int nid = page_to_nid(page);
510 
511 	return memcg->nodeinfo[nid];
512 }
513 
514 static struct mem_cgroup_tree_per_node *
515 soft_limit_tree_node(int nid)
516 {
517 	return soft_limit_tree.rb_tree_per_node[nid];
518 }
519 
520 static struct mem_cgroup_tree_per_node *
521 soft_limit_tree_from_page(struct page *page)
522 {
523 	int nid = page_to_nid(page);
524 
525 	return soft_limit_tree.rb_tree_per_node[nid];
526 }
527 
528 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
529 					 struct mem_cgroup_tree_per_node *mctz,
530 					 unsigned long new_usage_in_excess)
531 {
532 	struct rb_node **p = &mctz->rb_root.rb_node;
533 	struct rb_node *parent = NULL;
534 	struct mem_cgroup_per_node *mz_node;
535 	bool rightmost = true;
536 
537 	if (mz->on_tree)
538 		return;
539 
540 	mz->usage_in_excess = new_usage_in_excess;
541 	if (!mz->usage_in_excess)
542 		return;
543 	while (*p) {
544 		parent = *p;
545 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
546 					tree_node);
547 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
548 			p = &(*p)->rb_left;
549 			rightmost = false;
550 		}
551 
552 		/*
553 		 * We can't avoid mem cgroups that are over their soft
554 		 * limit by the same amount
555 		 */
556 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
557 			p = &(*p)->rb_right;
558 	}
559 
560 	if (rightmost)
561 		mctz->rb_rightmost = &mz->tree_node;
562 
563 	rb_link_node(&mz->tree_node, parent, p);
564 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
565 	mz->on_tree = true;
566 }
567 
568 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
569 					 struct mem_cgroup_tree_per_node *mctz)
570 {
571 	if (!mz->on_tree)
572 		return;
573 
574 	if (&mz->tree_node == mctz->rb_rightmost)
575 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
576 
577 	rb_erase(&mz->tree_node, &mctz->rb_root);
578 	mz->on_tree = false;
579 }
580 
581 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
582 				       struct mem_cgroup_tree_per_node *mctz)
583 {
584 	unsigned long flags;
585 
586 	spin_lock_irqsave(&mctz->lock, flags);
587 	__mem_cgroup_remove_exceeded(mz, mctz);
588 	spin_unlock_irqrestore(&mctz->lock, flags);
589 }
590 
591 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
592 {
593 	unsigned long nr_pages = page_counter_read(&memcg->memory);
594 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
595 	unsigned long excess = 0;
596 
597 	if (nr_pages > soft_limit)
598 		excess = nr_pages - soft_limit;
599 
600 	return excess;
601 }
602 
603 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
604 {
605 	unsigned long excess;
606 	struct mem_cgroup_per_node *mz;
607 	struct mem_cgroup_tree_per_node *mctz;
608 
609 	mctz = soft_limit_tree_from_page(page);
610 	if (!mctz)
611 		return;
612 	/*
613 	 * Necessary to update all ancestors when hierarchy is used.
614 	 * because their event counter is not touched.
615 	 */
616 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
617 		mz = mem_cgroup_page_nodeinfo(memcg, page);
618 		excess = soft_limit_excess(memcg);
619 		/*
620 		 * We have to update the tree if mz is on RB-tree or
621 		 * mem is over its softlimit.
622 		 */
623 		if (excess || mz->on_tree) {
624 			unsigned long flags;
625 
626 			spin_lock_irqsave(&mctz->lock, flags);
627 			/* if on-tree, remove it */
628 			if (mz->on_tree)
629 				__mem_cgroup_remove_exceeded(mz, mctz);
630 			/*
631 			 * Insert again. mz->usage_in_excess will be updated.
632 			 * If excess is 0, no tree ops.
633 			 */
634 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
635 			spin_unlock_irqrestore(&mctz->lock, flags);
636 		}
637 	}
638 }
639 
640 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
641 {
642 	struct mem_cgroup_tree_per_node *mctz;
643 	struct mem_cgroup_per_node *mz;
644 	int nid;
645 
646 	for_each_node(nid) {
647 		mz = mem_cgroup_nodeinfo(memcg, nid);
648 		mctz = soft_limit_tree_node(nid);
649 		if (mctz)
650 			mem_cgroup_remove_exceeded(mz, mctz);
651 	}
652 }
653 
654 static struct mem_cgroup_per_node *
655 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
656 {
657 	struct mem_cgroup_per_node *mz;
658 
659 retry:
660 	mz = NULL;
661 	if (!mctz->rb_rightmost)
662 		goto done;		/* Nothing to reclaim from */
663 
664 	mz = rb_entry(mctz->rb_rightmost,
665 		      struct mem_cgroup_per_node, tree_node);
666 	/*
667 	 * Remove the node now but someone else can add it back,
668 	 * we will to add it back at the end of reclaim to its correct
669 	 * position in the tree.
670 	 */
671 	__mem_cgroup_remove_exceeded(mz, mctz);
672 	if (!soft_limit_excess(mz->memcg) ||
673 	    !css_tryget_online(&mz->memcg->css))
674 		goto retry;
675 done:
676 	return mz;
677 }
678 
679 static struct mem_cgroup_per_node *
680 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
681 {
682 	struct mem_cgroup_per_node *mz;
683 
684 	spin_lock_irq(&mctz->lock);
685 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
686 	spin_unlock_irq(&mctz->lock);
687 	return mz;
688 }
689 
690 static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
691 				      int event)
692 {
693 	return atomic_long_read(&memcg->events[event]);
694 }
695 
696 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
697 					 struct page *page,
698 					 bool compound, int nr_pages)
699 {
700 	/*
701 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
702 	 * counted as CACHE even if it's on ANON LRU.
703 	 */
704 	if (PageAnon(page))
705 		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
706 	else {
707 		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
708 		if (PageSwapBacked(page))
709 			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
710 	}
711 
712 	if (compound) {
713 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
714 		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
715 	}
716 
717 	/* pagein of a big page is an event. So, ignore page size */
718 	if (nr_pages > 0)
719 		__count_memcg_events(memcg, PGPGIN, 1);
720 	else {
721 		__count_memcg_events(memcg, PGPGOUT, 1);
722 		nr_pages = -nr_pages; /* for event */
723 	}
724 
725 	__this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
726 }
727 
728 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
729 				       enum mem_cgroup_events_target target)
730 {
731 	unsigned long val, next;
732 
733 	val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
734 	next = __this_cpu_read(memcg->stat_cpu->targets[target]);
735 	/* from time_after() in jiffies.h */
736 	if ((long)(next - val) < 0) {
737 		switch (target) {
738 		case MEM_CGROUP_TARGET_THRESH:
739 			next = val + THRESHOLDS_EVENTS_TARGET;
740 			break;
741 		case MEM_CGROUP_TARGET_SOFTLIMIT:
742 			next = val + SOFTLIMIT_EVENTS_TARGET;
743 			break;
744 		case MEM_CGROUP_TARGET_NUMAINFO:
745 			next = val + NUMAINFO_EVENTS_TARGET;
746 			break;
747 		default:
748 			break;
749 		}
750 		__this_cpu_write(memcg->stat_cpu->targets[target], next);
751 		return true;
752 	}
753 	return false;
754 }
755 
756 /*
757  * Check events in order.
758  *
759  */
760 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
761 {
762 	/* threshold event is triggered in finer grain than soft limit */
763 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
764 						MEM_CGROUP_TARGET_THRESH))) {
765 		bool do_softlimit;
766 		bool do_numainfo __maybe_unused;
767 
768 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
769 						MEM_CGROUP_TARGET_SOFTLIMIT);
770 #if MAX_NUMNODES > 1
771 		do_numainfo = mem_cgroup_event_ratelimit(memcg,
772 						MEM_CGROUP_TARGET_NUMAINFO);
773 #endif
774 		mem_cgroup_threshold(memcg);
775 		if (unlikely(do_softlimit))
776 			mem_cgroup_update_tree(memcg, page);
777 #if MAX_NUMNODES > 1
778 		if (unlikely(do_numainfo))
779 			atomic_inc(&memcg->numainfo_events);
780 #endif
781 	}
782 }
783 
784 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
785 {
786 	/*
787 	 * mm_update_next_owner() may clear mm->owner to NULL
788 	 * if it races with swapoff, page migration, etc.
789 	 * So this can be called with p == NULL.
790 	 */
791 	if (unlikely(!p))
792 		return NULL;
793 
794 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
795 }
796 EXPORT_SYMBOL(mem_cgroup_from_task);
797 
798 /**
799  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
800  * @mm: mm from which memcg should be extracted. It can be NULL.
801  *
802  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
803  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
804  * returned.
805  */
806 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
807 {
808 	struct mem_cgroup *memcg;
809 
810 	if (mem_cgroup_disabled())
811 		return NULL;
812 
813 	rcu_read_lock();
814 	do {
815 		/*
816 		 * Page cache insertions can happen withou an
817 		 * actual mm context, e.g. during disk probing
818 		 * on boot, loopback IO, acct() writes etc.
819 		 */
820 		if (unlikely(!mm))
821 			memcg = root_mem_cgroup;
822 		else {
823 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
824 			if (unlikely(!memcg))
825 				memcg = root_mem_cgroup;
826 		}
827 	} while (!css_tryget_online(&memcg->css));
828 	rcu_read_unlock();
829 	return memcg;
830 }
831 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
832 
833 /**
834  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
835  * @page: page from which memcg should be extracted.
836  *
837  * Obtain a reference on page->memcg and returns it if successful. Otherwise
838  * root_mem_cgroup is returned.
839  */
840 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
841 {
842 	struct mem_cgroup *memcg = page->mem_cgroup;
843 
844 	if (mem_cgroup_disabled())
845 		return NULL;
846 
847 	rcu_read_lock();
848 	if (!memcg || !css_tryget_online(&memcg->css))
849 		memcg = root_mem_cgroup;
850 	rcu_read_unlock();
851 	return memcg;
852 }
853 EXPORT_SYMBOL(get_mem_cgroup_from_page);
854 
855 /**
856  * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
857  */
858 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
859 {
860 	if (unlikely(current->active_memcg)) {
861 		struct mem_cgroup *memcg = root_mem_cgroup;
862 
863 		rcu_read_lock();
864 		if (css_tryget_online(&current->active_memcg->css))
865 			memcg = current->active_memcg;
866 		rcu_read_unlock();
867 		return memcg;
868 	}
869 	return get_mem_cgroup_from_mm(current->mm);
870 }
871 
872 /**
873  * mem_cgroup_iter - iterate over memory cgroup hierarchy
874  * @root: hierarchy root
875  * @prev: previously returned memcg, NULL on first invocation
876  * @reclaim: cookie for shared reclaim walks, NULL for full walks
877  *
878  * Returns references to children of the hierarchy below @root, or
879  * @root itself, or %NULL after a full round-trip.
880  *
881  * Caller must pass the return value in @prev on subsequent
882  * invocations for reference counting, or use mem_cgroup_iter_break()
883  * to cancel a hierarchy walk before the round-trip is complete.
884  *
885  * Reclaimers can specify a node and a priority level in @reclaim to
886  * divide up the memcgs in the hierarchy among all concurrent
887  * reclaimers operating on the same node and priority.
888  */
889 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
890 				   struct mem_cgroup *prev,
891 				   struct mem_cgroup_reclaim_cookie *reclaim)
892 {
893 	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
894 	struct cgroup_subsys_state *css = NULL;
895 	struct mem_cgroup *memcg = NULL;
896 	struct mem_cgroup *pos = NULL;
897 
898 	if (mem_cgroup_disabled())
899 		return NULL;
900 
901 	if (!root)
902 		root = root_mem_cgroup;
903 
904 	if (prev && !reclaim)
905 		pos = prev;
906 
907 	if (!root->use_hierarchy && root != root_mem_cgroup) {
908 		if (prev)
909 			goto out;
910 		return root;
911 	}
912 
913 	rcu_read_lock();
914 
915 	if (reclaim) {
916 		struct mem_cgroup_per_node *mz;
917 
918 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
919 		iter = &mz->iter[reclaim->priority];
920 
921 		if (prev && reclaim->generation != iter->generation)
922 			goto out_unlock;
923 
924 		while (1) {
925 			pos = READ_ONCE(iter->position);
926 			if (!pos || css_tryget(&pos->css))
927 				break;
928 			/*
929 			 * css reference reached zero, so iter->position will
930 			 * be cleared by ->css_released. However, we should not
931 			 * rely on this happening soon, because ->css_released
932 			 * is called from a work queue, and by busy-waiting we
933 			 * might block it. So we clear iter->position right
934 			 * away.
935 			 */
936 			(void)cmpxchg(&iter->position, pos, NULL);
937 		}
938 	}
939 
940 	if (pos)
941 		css = &pos->css;
942 
943 	for (;;) {
944 		css = css_next_descendant_pre(css, &root->css);
945 		if (!css) {
946 			/*
947 			 * Reclaimers share the hierarchy walk, and a
948 			 * new one might jump in right at the end of
949 			 * the hierarchy - make sure they see at least
950 			 * one group and restart from the beginning.
951 			 */
952 			if (!prev)
953 				continue;
954 			break;
955 		}
956 
957 		/*
958 		 * Verify the css and acquire a reference.  The root
959 		 * is provided by the caller, so we know it's alive
960 		 * and kicking, and don't take an extra reference.
961 		 */
962 		memcg = mem_cgroup_from_css(css);
963 
964 		if (css == &root->css)
965 			break;
966 
967 		if (css_tryget(css))
968 			break;
969 
970 		memcg = NULL;
971 	}
972 
973 	if (reclaim) {
974 		/*
975 		 * The position could have already been updated by a competing
976 		 * thread, so check that the value hasn't changed since we read
977 		 * it to avoid reclaiming from the same cgroup twice.
978 		 */
979 		(void)cmpxchg(&iter->position, pos, memcg);
980 
981 		if (pos)
982 			css_put(&pos->css);
983 
984 		if (!memcg)
985 			iter->generation++;
986 		else if (!prev)
987 			reclaim->generation = iter->generation;
988 	}
989 
990 out_unlock:
991 	rcu_read_unlock();
992 out:
993 	if (prev && prev != root)
994 		css_put(&prev->css);
995 
996 	return memcg;
997 }
998 
999 /**
1000  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1001  * @root: hierarchy root
1002  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1003  */
1004 void mem_cgroup_iter_break(struct mem_cgroup *root,
1005 			   struct mem_cgroup *prev)
1006 {
1007 	if (!root)
1008 		root = root_mem_cgroup;
1009 	if (prev && prev != root)
1010 		css_put(&prev->css);
1011 }
1012 
1013 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1014 {
1015 	struct mem_cgroup *memcg = dead_memcg;
1016 	struct mem_cgroup_reclaim_iter *iter;
1017 	struct mem_cgroup_per_node *mz;
1018 	int nid;
1019 	int i;
1020 
1021 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1022 		for_each_node(nid) {
1023 			mz = mem_cgroup_nodeinfo(memcg, nid);
1024 			for (i = 0; i <= DEF_PRIORITY; i++) {
1025 				iter = &mz->iter[i];
1026 				cmpxchg(&iter->position,
1027 					dead_memcg, NULL);
1028 			}
1029 		}
1030 	}
1031 }
1032 
1033 /**
1034  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1035  * @memcg: hierarchy root
1036  * @fn: function to call for each task
1037  * @arg: argument passed to @fn
1038  *
1039  * This function iterates over tasks attached to @memcg or to any of its
1040  * descendants and calls @fn for each task. If @fn returns a non-zero
1041  * value, the function breaks the iteration loop and returns the value.
1042  * Otherwise, it will iterate over all tasks and return 0.
1043  *
1044  * This function must not be called for the root memory cgroup.
1045  */
1046 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1047 			  int (*fn)(struct task_struct *, void *), void *arg)
1048 {
1049 	struct mem_cgroup *iter;
1050 	int ret = 0;
1051 
1052 	BUG_ON(memcg == root_mem_cgroup);
1053 
1054 	for_each_mem_cgroup_tree(iter, memcg) {
1055 		struct css_task_iter it;
1056 		struct task_struct *task;
1057 
1058 		css_task_iter_start(&iter->css, 0, &it);
1059 		while (!ret && (task = css_task_iter_next(&it)))
1060 			ret = fn(task, arg);
1061 		css_task_iter_end(&it);
1062 		if (ret) {
1063 			mem_cgroup_iter_break(memcg, iter);
1064 			break;
1065 		}
1066 	}
1067 	return ret;
1068 }
1069 
1070 /**
1071  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1072  * @page: the page
1073  * @pgdat: pgdat of the page
1074  *
1075  * This function is only safe when following the LRU page isolation
1076  * and putback protocol: the LRU lock must be held, and the page must
1077  * either be PageLRU() or the caller must have isolated/allocated it.
1078  */
1079 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1080 {
1081 	struct mem_cgroup_per_node *mz;
1082 	struct mem_cgroup *memcg;
1083 	struct lruvec *lruvec;
1084 
1085 	if (mem_cgroup_disabled()) {
1086 		lruvec = &pgdat->lruvec;
1087 		goto out;
1088 	}
1089 
1090 	memcg = page->mem_cgroup;
1091 	/*
1092 	 * Swapcache readahead pages are added to the LRU - and
1093 	 * possibly migrated - before they are charged.
1094 	 */
1095 	if (!memcg)
1096 		memcg = root_mem_cgroup;
1097 
1098 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1099 	lruvec = &mz->lruvec;
1100 out:
1101 	/*
1102 	 * Since a node can be onlined after the mem_cgroup was created,
1103 	 * we have to be prepared to initialize lruvec->zone here;
1104 	 * and if offlined then reonlined, we need to reinitialize it.
1105 	 */
1106 	if (unlikely(lruvec->pgdat != pgdat))
1107 		lruvec->pgdat = pgdat;
1108 	return lruvec;
1109 }
1110 
1111 /**
1112  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1113  * @lruvec: mem_cgroup per zone lru vector
1114  * @lru: index of lru list the page is sitting on
1115  * @zid: zone id of the accounted pages
1116  * @nr_pages: positive when adding or negative when removing
1117  *
1118  * This function must be called under lru_lock, just before a page is added
1119  * to or just after a page is removed from an lru list (that ordering being
1120  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1121  */
1122 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1123 				int zid, int nr_pages)
1124 {
1125 	struct mem_cgroup_per_node *mz;
1126 	unsigned long *lru_size;
1127 	long size;
1128 
1129 	if (mem_cgroup_disabled())
1130 		return;
1131 
1132 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1133 	lru_size = &mz->lru_zone_size[zid][lru];
1134 
1135 	if (nr_pages < 0)
1136 		*lru_size += nr_pages;
1137 
1138 	size = *lru_size;
1139 	if (WARN_ONCE(size < 0,
1140 		"%s(%p, %d, %d): lru_size %ld\n",
1141 		__func__, lruvec, lru, nr_pages, size)) {
1142 		VM_BUG_ON(1);
1143 		*lru_size = 0;
1144 	}
1145 
1146 	if (nr_pages > 0)
1147 		*lru_size += nr_pages;
1148 }
1149 
1150 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1151 {
1152 	struct mem_cgroup *task_memcg;
1153 	struct task_struct *p;
1154 	bool ret;
1155 
1156 	p = find_lock_task_mm(task);
1157 	if (p) {
1158 		task_memcg = get_mem_cgroup_from_mm(p->mm);
1159 		task_unlock(p);
1160 	} else {
1161 		/*
1162 		 * All threads may have already detached their mm's, but the oom
1163 		 * killer still needs to detect if they have already been oom
1164 		 * killed to prevent needlessly killing additional tasks.
1165 		 */
1166 		rcu_read_lock();
1167 		task_memcg = mem_cgroup_from_task(task);
1168 		css_get(&task_memcg->css);
1169 		rcu_read_unlock();
1170 	}
1171 	ret = mem_cgroup_is_descendant(task_memcg, memcg);
1172 	css_put(&task_memcg->css);
1173 	return ret;
1174 }
1175 
1176 /**
1177  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1178  * @memcg: the memory cgroup
1179  *
1180  * Returns the maximum amount of memory @mem can be charged with, in
1181  * pages.
1182  */
1183 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1184 {
1185 	unsigned long margin = 0;
1186 	unsigned long count;
1187 	unsigned long limit;
1188 
1189 	count = page_counter_read(&memcg->memory);
1190 	limit = READ_ONCE(memcg->memory.max);
1191 	if (count < limit)
1192 		margin = limit - count;
1193 
1194 	if (do_memsw_account()) {
1195 		count = page_counter_read(&memcg->memsw);
1196 		limit = READ_ONCE(memcg->memsw.max);
1197 		if (count <= limit)
1198 			margin = min(margin, limit - count);
1199 		else
1200 			margin = 0;
1201 	}
1202 
1203 	return margin;
1204 }
1205 
1206 /*
1207  * A routine for checking "mem" is under move_account() or not.
1208  *
1209  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1210  * moving cgroups. This is for waiting at high-memory pressure
1211  * caused by "move".
1212  */
1213 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1214 {
1215 	struct mem_cgroup *from;
1216 	struct mem_cgroup *to;
1217 	bool ret = false;
1218 	/*
1219 	 * Unlike task_move routines, we access mc.to, mc.from not under
1220 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1221 	 */
1222 	spin_lock(&mc.lock);
1223 	from = mc.from;
1224 	to = mc.to;
1225 	if (!from)
1226 		goto unlock;
1227 
1228 	ret = mem_cgroup_is_descendant(from, memcg) ||
1229 		mem_cgroup_is_descendant(to, memcg);
1230 unlock:
1231 	spin_unlock(&mc.lock);
1232 	return ret;
1233 }
1234 
1235 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1236 {
1237 	if (mc.moving_task && current != mc.moving_task) {
1238 		if (mem_cgroup_under_move(memcg)) {
1239 			DEFINE_WAIT(wait);
1240 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1241 			/* moving charge context might have finished. */
1242 			if (mc.moving_task)
1243 				schedule();
1244 			finish_wait(&mc.waitq, &wait);
1245 			return true;
1246 		}
1247 	}
1248 	return false;
1249 }
1250 
1251 static const unsigned int memcg1_stats[] = {
1252 	MEMCG_CACHE,
1253 	MEMCG_RSS,
1254 	MEMCG_RSS_HUGE,
1255 	NR_SHMEM,
1256 	NR_FILE_MAPPED,
1257 	NR_FILE_DIRTY,
1258 	NR_WRITEBACK,
1259 	MEMCG_SWAP,
1260 };
1261 
1262 static const char *const memcg1_stat_names[] = {
1263 	"cache",
1264 	"rss",
1265 	"rss_huge",
1266 	"shmem",
1267 	"mapped_file",
1268 	"dirty",
1269 	"writeback",
1270 	"swap",
1271 };
1272 
1273 #define K(x) ((x) << (PAGE_SHIFT-10))
1274 /**
1275  * mem_cgroup_print_oom_context: Print OOM information relevant to
1276  * memory controller.
1277  * @memcg: The memory cgroup that went over limit
1278  * @p: Task that is going to be killed
1279  *
1280  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1281  * enabled
1282  */
1283 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1284 {
1285 	rcu_read_lock();
1286 
1287 	if (memcg) {
1288 		pr_cont(",oom_memcg=");
1289 		pr_cont_cgroup_path(memcg->css.cgroup);
1290 	} else
1291 		pr_cont(",global_oom");
1292 	if (p) {
1293 		pr_cont(",task_memcg=");
1294 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1295 	}
1296 	rcu_read_unlock();
1297 }
1298 
1299 /**
1300  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1301  * memory controller.
1302  * @memcg: The memory cgroup that went over limit
1303  */
1304 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1305 {
1306 	struct mem_cgroup *iter;
1307 	unsigned int i;
1308 
1309 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1310 		K((u64)page_counter_read(&memcg->memory)),
1311 		K((u64)memcg->memory.max), memcg->memory.failcnt);
1312 	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1313 		K((u64)page_counter_read(&memcg->memsw)),
1314 		K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1315 	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1316 		K((u64)page_counter_read(&memcg->kmem)),
1317 		K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1318 
1319 	for_each_mem_cgroup_tree(iter, memcg) {
1320 		pr_info("Memory cgroup stats for ");
1321 		pr_cont_cgroup_path(iter->css.cgroup);
1322 		pr_cont(":");
1323 
1324 		for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1325 			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1326 				continue;
1327 			pr_cont(" %s:%luKB", memcg1_stat_names[i],
1328 				K(memcg_page_state(iter, memcg1_stats[i])));
1329 		}
1330 
1331 		for (i = 0; i < NR_LRU_LISTS; i++)
1332 			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1333 				K(memcg_page_state(iter, NR_LRU_BASE + i)));
1334 
1335 		pr_cont("\n");
1336 	}
1337 }
1338 
1339 /*
1340  * Return the memory (and swap, if configured) limit for a memcg.
1341  */
1342 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1343 {
1344 	unsigned long max;
1345 
1346 	max = memcg->memory.max;
1347 	if (mem_cgroup_swappiness(memcg)) {
1348 		unsigned long memsw_max;
1349 		unsigned long swap_max;
1350 
1351 		memsw_max = memcg->memsw.max;
1352 		swap_max = memcg->swap.max;
1353 		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1354 		max = min(max + swap_max, memsw_max);
1355 	}
1356 	return max;
1357 }
1358 
1359 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1360 				     int order)
1361 {
1362 	struct oom_control oc = {
1363 		.zonelist = NULL,
1364 		.nodemask = NULL,
1365 		.memcg = memcg,
1366 		.gfp_mask = gfp_mask,
1367 		.order = order,
1368 	};
1369 	bool ret;
1370 
1371 	if (mutex_lock_killable(&oom_lock))
1372 		return true;
1373 	/*
1374 	 * A few threads which were not waiting at mutex_lock_killable() can
1375 	 * fail to bail out. Therefore, check again after holding oom_lock.
1376 	 */
1377 	ret = should_force_charge() || out_of_memory(&oc);
1378 	mutex_unlock(&oom_lock);
1379 	return ret;
1380 }
1381 
1382 #if MAX_NUMNODES > 1
1383 
1384 /**
1385  * test_mem_cgroup_node_reclaimable
1386  * @memcg: the target memcg
1387  * @nid: the node ID to be checked.
1388  * @noswap : specify true here if the user wants flle only information.
1389  *
1390  * This function returns whether the specified memcg contains any
1391  * reclaimable pages on a node. Returns true if there are any reclaimable
1392  * pages in the node.
1393  */
1394 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1395 		int nid, bool noswap)
1396 {
1397 	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
1398 
1399 	if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) ||
1400 	    lruvec_page_state(lruvec, NR_ACTIVE_FILE))
1401 		return true;
1402 	if (noswap || !total_swap_pages)
1403 		return false;
1404 	if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) ||
1405 	    lruvec_page_state(lruvec, NR_ACTIVE_ANON))
1406 		return true;
1407 	return false;
1408 
1409 }
1410 
1411 /*
1412  * Always updating the nodemask is not very good - even if we have an empty
1413  * list or the wrong list here, we can start from some node and traverse all
1414  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1415  *
1416  */
1417 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1418 {
1419 	int nid;
1420 	/*
1421 	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1422 	 * pagein/pageout changes since the last update.
1423 	 */
1424 	if (!atomic_read(&memcg->numainfo_events))
1425 		return;
1426 	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1427 		return;
1428 
1429 	/* make a nodemask where this memcg uses memory from */
1430 	memcg->scan_nodes = node_states[N_MEMORY];
1431 
1432 	for_each_node_mask(nid, node_states[N_MEMORY]) {
1433 
1434 		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1435 			node_clear(nid, memcg->scan_nodes);
1436 	}
1437 
1438 	atomic_set(&memcg->numainfo_events, 0);
1439 	atomic_set(&memcg->numainfo_updating, 0);
1440 }
1441 
1442 /*
1443  * Selecting a node where we start reclaim from. Because what we need is just
1444  * reducing usage counter, start from anywhere is O,K. Considering
1445  * memory reclaim from current node, there are pros. and cons.
1446  *
1447  * Freeing memory from current node means freeing memory from a node which
1448  * we'll use or we've used. So, it may make LRU bad. And if several threads
1449  * hit limits, it will see a contention on a node. But freeing from remote
1450  * node means more costs for memory reclaim because of memory latency.
1451  *
1452  * Now, we use round-robin. Better algorithm is welcomed.
1453  */
1454 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1455 {
1456 	int node;
1457 
1458 	mem_cgroup_may_update_nodemask(memcg);
1459 	node = memcg->last_scanned_node;
1460 
1461 	node = next_node_in(node, memcg->scan_nodes);
1462 	/*
1463 	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1464 	 * last time it really checked all the LRUs due to rate limiting.
1465 	 * Fallback to the current node in that case for simplicity.
1466 	 */
1467 	if (unlikely(node == MAX_NUMNODES))
1468 		node = numa_node_id();
1469 
1470 	memcg->last_scanned_node = node;
1471 	return node;
1472 }
1473 #else
1474 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1475 {
1476 	return 0;
1477 }
1478 #endif
1479 
1480 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1481 				   pg_data_t *pgdat,
1482 				   gfp_t gfp_mask,
1483 				   unsigned long *total_scanned)
1484 {
1485 	struct mem_cgroup *victim = NULL;
1486 	int total = 0;
1487 	int loop = 0;
1488 	unsigned long excess;
1489 	unsigned long nr_scanned;
1490 	struct mem_cgroup_reclaim_cookie reclaim = {
1491 		.pgdat = pgdat,
1492 		.priority = 0,
1493 	};
1494 
1495 	excess = soft_limit_excess(root_memcg);
1496 
1497 	while (1) {
1498 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1499 		if (!victim) {
1500 			loop++;
1501 			if (loop >= 2) {
1502 				/*
1503 				 * If we have not been able to reclaim
1504 				 * anything, it might because there are
1505 				 * no reclaimable pages under this hierarchy
1506 				 */
1507 				if (!total)
1508 					break;
1509 				/*
1510 				 * We want to do more targeted reclaim.
1511 				 * excess >> 2 is not to excessive so as to
1512 				 * reclaim too much, nor too less that we keep
1513 				 * coming back to reclaim from this cgroup
1514 				 */
1515 				if (total >= (excess >> 2) ||
1516 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1517 					break;
1518 			}
1519 			continue;
1520 		}
1521 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1522 					pgdat, &nr_scanned);
1523 		*total_scanned += nr_scanned;
1524 		if (!soft_limit_excess(root_memcg))
1525 			break;
1526 	}
1527 	mem_cgroup_iter_break(root_memcg, victim);
1528 	return total;
1529 }
1530 
1531 #ifdef CONFIG_LOCKDEP
1532 static struct lockdep_map memcg_oom_lock_dep_map = {
1533 	.name = "memcg_oom_lock",
1534 };
1535 #endif
1536 
1537 static DEFINE_SPINLOCK(memcg_oom_lock);
1538 
1539 /*
1540  * Check OOM-Killer is already running under our hierarchy.
1541  * If someone is running, return false.
1542  */
1543 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1544 {
1545 	struct mem_cgroup *iter, *failed = NULL;
1546 
1547 	spin_lock(&memcg_oom_lock);
1548 
1549 	for_each_mem_cgroup_tree(iter, memcg) {
1550 		if (iter->oom_lock) {
1551 			/*
1552 			 * this subtree of our hierarchy is already locked
1553 			 * so we cannot give a lock.
1554 			 */
1555 			failed = iter;
1556 			mem_cgroup_iter_break(memcg, iter);
1557 			break;
1558 		} else
1559 			iter->oom_lock = true;
1560 	}
1561 
1562 	if (failed) {
1563 		/*
1564 		 * OK, we failed to lock the whole subtree so we have
1565 		 * to clean up what we set up to the failing subtree
1566 		 */
1567 		for_each_mem_cgroup_tree(iter, memcg) {
1568 			if (iter == failed) {
1569 				mem_cgroup_iter_break(memcg, iter);
1570 				break;
1571 			}
1572 			iter->oom_lock = false;
1573 		}
1574 	} else
1575 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1576 
1577 	spin_unlock(&memcg_oom_lock);
1578 
1579 	return !failed;
1580 }
1581 
1582 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1583 {
1584 	struct mem_cgroup *iter;
1585 
1586 	spin_lock(&memcg_oom_lock);
1587 	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1588 	for_each_mem_cgroup_tree(iter, memcg)
1589 		iter->oom_lock = false;
1590 	spin_unlock(&memcg_oom_lock);
1591 }
1592 
1593 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1594 {
1595 	struct mem_cgroup *iter;
1596 
1597 	spin_lock(&memcg_oom_lock);
1598 	for_each_mem_cgroup_tree(iter, memcg)
1599 		iter->under_oom++;
1600 	spin_unlock(&memcg_oom_lock);
1601 }
1602 
1603 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1604 {
1605 	struct mem_cgroup *iter;
1606 
1607 	/*
1608 	 * When a new child is created while the hierarchy is under oom,
1609 	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1610 	 */
1611 	spin_lock(&memcg_oom_lock);
1612 	for_each_mem_cgroup_tree(iter, memcg)
1613 		if (iter->under_oom > 0)
1614 			iter->under_oom--;
1615 	spin_unlock(&memcg_oom_lock);
1616 }
1617 
1618 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1619 
1620 struct oom_wait_info {
1621 	struct mem_cgroup *memcg;
1622 	wait_queue_entry_t	wait;
1623 };
1624 
1625 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1626 	unsigned mode, int sync, void *arg)
1627 {
1628 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1629 	struct mem_cgroup *oom_wait_memcg;
1630 	struct oom_wait_info *oom_wait_info;
1631 
1632 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1633 	oom_wait_memcg = oom_wait_info->memcg;
1634 
1635 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1636 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1637 		return 0;
1638 	return autoremove_wake_function(wait, mode, sync, arg);
1639 }
1640 
1641 static void memcg_oom_recover(struct mem_cgroup *memcg)
1642 {
1643 	/*
1644 	 * For the following lockless ->under_oom test, the only required
1645 	 * guarantee is that it must see the state asserted by an OOM when
1646 	 * this function is called as a result of userland actions
1647 	 * triggered by the notification of the OOM.  This is trivially
1648 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1649 	 * triggering notification.
1650 	 */
1651 	if (memcg && memcg->under_oom)
1652 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1653 }
1654 
1655 enum oom_status {
1656 	OOM_SUCCESS,
1657 	OOM_FAILED,
1658 	OOM_ASYNC,
1659 	OOM_SKIPPED
1660 };
1661 
1662 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1663 {
1664 	enum oom_status ret;
1665 	bool locked;
1666 
1667 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1668 		return OOM_SKIPPED;
1669 
1670 	memcg_memory_event(memcg, MEMCG_OOM);
1671 
1672 	/*
1673 	 * We are in the middle of the charge context here, so we
1674 	 * don't want to block when potentially sitting on a callstack
1675 	 * that holds all kinds of filesystem and mm locks.
1676 	 *
1677 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1678 	 * handling until the charge can succeed; remember the context and put
1679 	 * the task to sleep at the end of the page fault when all locks are
1680 	 * released.
1681 	 *
1682 	 * On the other hand, in-kernel OOM killer allows for an async victim
1683 	 * memory reclaim (oom_reaper) and that means that we are not solely
1684 	 * relying on the oom victim to make a forward progress and we can
1685 	 * invoke the oom killer here.
1686 	 *
1687 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1688 	 * victim and then we have to bail out from the charge path.
1689 	 */
1690 	if (memcg->oom_kill_disable) {
1691 		if (!current->in_user_fault)
1692 			return OOM_SKIPPED;
1693 		css_get(&memcg->css);
1694 		current->memcg_in_oom = memcg;
1695 		current->memcg_oom_gfp_mask = mask;
1696 		current->memcg_oom_order = order;
1697 
1698 		return OOM_ASYNC;
1699 	}
1700 
1701 	mem_cgroup_mark_under_oom(memcg);
1702 
1703 	locked = mem_cgroup_oom_trylock(memcg);
1704 
1705 	if (locked)
1706 		mem_cgroup_oom_notify(memcg);
1707 
1708 	mem_cgroup_unmark_under_oom(memcg);
1709 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1710 		ret = OOM_SUCCESS;
1711 	else
1712 		ret = OOM_FAILED;
1713 
1714 	if (locked)
1715 		mem_cgroup_oom_unlock(memcg);
1716 
1717 	return ret;
1718 }
1719 
1720 /**
1721  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1722  * @handle: actually kill/wait or just clean up the OOM state
1723  *
1724  * This has to be called at the end of a page fault if the memcg OOM
1725  * handler was enabled.
1726  *
1727  * Memcg supports userspace OOM handling where failed allocations must
1728  * sleep on a waitqueue until the userspace task resolves the
1729  * situation.  Sleeping directly in the charge context with all kinds
1730  * of locks held is not a good idea, instead we remember an OOM state
1731  * in the task and mem_cgroup_oom_synchronize() has to be called at
1732  * the end of the page fault to complete the OOM handling.
1733  *
1734  * Returns %true if an ongoing memcg OOM situation was detected and
1735  * completed, %false otherwise.
1736  */
1737 bool mem_cgroup_oom_synchronize(bool handle)
1738 {
1739 	struct mem_cgroup *memcg = current->memcg_in_oom;
1740 	struct oom_wait_info owait;
1741 	bool locked;
1742 
1743 	/* OOM is global, do not handle */
1744 	if (!memcg)
1745 		return false;
1746 
1747 	if (!handle)
1748 		goto cleanup;
1749 
1750 	owait.memcg = memcg;
1751 	owait.wait.flags = 0;
1752 	owait.wait.func = memcg_oom_wake_function;
1753 	owait.wait.private = current;
1754 	INIT_LIST_HEAD(&owait.wait.entry);
1755 
1756 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1757 	mem_cgroup_mark_under_oom(memcg);
1758 
1759 	locked = mem_cgroup_oom_trylock(memcg);
1760 
1761 	if (locked)
1762 		mem_cgroup_oom_notify(memcg);
1763 
1764 	if (locked && !memcg->oom_kill_disable) {
1765 		mem_cgroup_unmark_under_oom(memcg);
1766 		finish_wait(&memcg_oom_waitq, &owait.wait);
1767 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1768 					 current->memcg_oom_order);
1769 	} else {
1770 		schedule();
1771 		mem_cgroup_unmark_under_oom(memcg);
1772 		finish_wait(&memcg_oom_waitq, &owait.wait);
1773 	}
1774 
1775 	if (locked) {
1776 		mem_cgroup_oom_unlock(memcg);
1777 		/*
1778 		 * There is no guarantee that an OOM-lock contender
1779 		 * sees the wakeups triggered by the OOM kill
1780 		 * uncharges.  Wake any sleepers explicitely.
1781 		 */
1782 		memcg_oom_recover(memcg);
1783 	}
1784 cleanup:
1785 	current->memcg_in_oom = NULL;
1786 	css_put(&memcg->css);
1787 	return true;
1788 }
1789 
1790 /**
1791  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1792  * @victim: task to be killed by the OOM killer
1793  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1794  *
1795  * Returns a pointer to a memory cgroup, which has to be cleaned up
1796  * by killing all belonging OOM-killable tasks.
1797  *
1798  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1799  */
1800 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1801 					    struct mem_cgroup *oom_domain)
1802 {
1803 	struct mem_cgroup *oom_group = NULL;
1804 	struct mem_cgroup *memcg;
1805 
1806 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1807 		return NULL;
1808 
1809 	if (!oom_domain)
1810 		oom_domain = root_mem_cgroup;
1811 
1812 	rcu_read_lock();
1813 
1814 	memcg = mem_cgroup_from_task(victim);
1815 	if (memcg == root_mem_cgroup)
1816 		goto out;
1817 
1818 	/*
1819 	 * Traverse the memory cgroup hierarchy from the victim task's
1820 	 * cgroup up to the OOMing cgroup (or root) to find the
1821 	 * highest-level memory cgroup with oom.group set.
1822 	 */
1823 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1824 		if (memcg->oom_group)
1825 			oom_group = memcg;
1826 
1827 		if (memcg == oom_domain)
1828 			break;
1829 	}
1830 
1831 	if (oom_group)
1832 		css_get(&oom_group->css);
1833 out:
1834 	rcu_read_unlock();
1835 
1836 	return oom_group;
1837 }
1838 
1839 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1840 {
1841 	pr_info("Tasks in ");
1842 	pr_cont_cgroup_path(memcg->css.cgroup);
1843 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1844 }
1845 
1846 /**
1847  * lock_page_memcg - lock a page->mem_cgroup binding
1848  * @page: the page
1849  *
1850  * This function protects unlocked LRU pages from being moved to
1851  * another cgroup.
1852  *
1853  * It ensures lifetime of the returned memcg. Caller is responsible
1854  * for the lifetime of the page; __unlock_page_memcg() is available
1855  * when @page might get freed inside the locked section.
1856  */
1857 struct mem_cgroup *lock_page_memcg(struct page *page)
1858 {
1859 	struct mem_cgroup *memcg;
1860 	unsigned long flags;
1861 
1862 	/*
1863 	 * The RCU lock is held throughout the transaction.  The fast
1864 	 * path can get away without acquiring the memcg->move_lock
1865 	 * because page moving starts with an RCU grace period.
1866 	 *
1867 	 * The RCU lock also protects the memcg from being freed when
1868 	 * the page state that is going to change is the only thing
1869 	 * preventing the page itself from being freed. E.g. writeback
1870 	 * doesn't hold a page reference and relies on PG_writeback to
1871 	 * keep off truncation, migration and so forth.
1872          */
1873 	rcu_read_lock();
1874 
1875 	if (mem_cgroup_disabled())
1876 		return NULL;
1877 again:
1878 	memcg = page->mem_cgroup;
1879 	if (unlikely(!memcg))
1880 		return NULL;
1881 
1882 	if (atomic_read(&memcg->moving_account) <= 0)
1883 		return memcg;
1884 
1885 	spin_lock_irqsave(&memcg->move_lock, flags);
1886 	if (memcg != page->mem_cgroup) {
1887 		spin_unlock_irqrestore(&memcg->move_lock, flags);
1888 		goto again;
1889 	}
1890 
1891 	/*
1892 	 * When charge migration first begins, we can have locked and
1893 	 * unlocked page stat updates happening concurrently.  Track
1894 	 * the task who has the lock for unlock_page_memcg().
1895 	 */
1896 	memcg->move_lock_task = current;
1897 	memcg->move_lock_flags = flags;
1898 
1899 	return memcg;
1900 }
1901 EXPORT_SYMBOL(lock_page_memcg);
1902 
1903 /**
1904  * __unlock_page_memcg - unlock and unpin a memcg
1905  * @memcg: the memcg
1906  *
1907  * Unlock and unpin a memcg returned by lock_page_memcg().
1908  */
1909 void __unlock_page_memcg(struct mem_cgroup *memcg)
1910 {
1911 	if (memcg && memcg->move_lock_task == current) {
1912 		unsigned long flags = memcg->move_lock_flags;
1913 
1914 		memcg->move_lock_task = NULL;
1915 		memcg->move_lock_flags = 0;
1916 
1917 		spin_unlock_irqrestore(&memcg->move_lock, flags);
1918 	}
1919 
1920 	rcu_read_unlock();
1921 }
1922 
1923 /**
1924  * unlock_page_memcg - unlock a page->mem_cgroup binding
1925  * @page: the page
1926  */
1927 void unlock_page_memcg(struct page *page)
1928 {
1929 	__unlock_page_memcg(page->mem_cgroup);
1930 }
1931 EXPORT_SYMBOL(unlock_page_memcg);
1932 
1933 struct memcg_stock_pcp {
1934 	struct mem_cgroup *cached; /* this never be root cgroup */
1935 	unsigned int nr_pages;
1936 	struct work_struct work;
1937 	unsigned long flags;
1938 #define FLUSHING_CACHED_CHARGE	0
1939 };
1940 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1941 static DEFINE_MUTEX(percpu_charge_mutex);
1942 
1943 /**
1944  * consume_stock: Try to consume stocked charge on this cpu.
1945  * @memcg: memcg to consume from.
1946  * @nr_pages: how many pages to charge.
1947  *
1948  * The charges will only happen if @memcg matches the current cpu's memcg
1949  * stock, and at least @nr_pages are available in that stock.  Failure to
1950  * service an allocation will refill the stock.
1951  *
1952  * returns true if successful, false otherwise.
1953  */
1954 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1955 {
1956 	struct memcg_stock_pcp *stock;
1957 	unsigned long flags;
1958 	bool ret = false;
1959 
1960 	if (nr_pages > MEMCG_CHARGE_BATCH)
1961 		return ret;
1962 
1963 	local_irq_save(flags);
1964 
1965 	stock = this_cpu_ptr(&memcg_stock);
1966 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1967 		stock->nr_pages -= nr_pages;
1968 		ret = true;
1969 	}
1970 
1971 	local_irq_restore(flags);
1972 
1973 	return ret;
1974 }
1975 
1976 /*
1977  * Returns stocks cached in percpu and reset cached information.
1978  */
1979 static void drain_stock(struct memcg_stock_pcp *stock)
1980 {
1981 	struct mem_cgroup *old = stock->cached;
1982 
1983 	if (stock->nr_pages) {
1984 		page_counter_uncharge(&old->memory, stock->nr_pages);
1985 		if (do_memsw_account())
1986 			page_counter_uncharge(&old->memsw, stock->nr_pages);
1987 		css_put_many(&old->css, stock->nr_pages);
1988 		stock->nr_pages = 0;
1989 	}
1990 	stock->cached = NULL;
1991 }
1992 
1993 static void drain_local_stock(struct work_struct *dummy)
1994 {
1995 	struct memcg_stock_pcp *stock;
1996 	unsigned long flags;
1997 
1998 	/*
1999 	 * The only protection from memory hotplug vs. drain_stock races is
2000 	 * that we always operate on local CPU stock here with IRQ disabled
2001 	 */
2002 	local_irq_save(flags);
2003 
2004 	stock = this_cpu_ptr(&memcg_stock);
2005 	drain_stock(stock);
2006 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2007 
2008 	local_irq_restore(flags);
2009 }
2010 
2011 /*
2012  * Cache charges(val) to local per_cpu area.
2013  * This will be consumed by consume_stock() function, later.
2014  */
2015 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2016 {
2017 	struct memcg_stock_pcp *stock;
2018 	unsigned long flags;
2019 
2020 	local_irq_save(flags);
2021 
2022 	stock = this_cpu_ptr(&memcg_stock);
2023 	if (stock->cached != memcg) { /* reset if necessary */
2024 		drain_stock(stock);
2025 		stock->cached = memcg;
2026 	}
2027 	stock->nr_pages += nr_pages;
2028 
2029 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2030 		drain_stock(stock);
2031 
2032 	local_irq_restore(flags);
2033 }
2034 
2035 /*
2036  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2037  * of the hierarchy under it.
2038  */
2039 static void drain_all_stock(struct mem_cgroup *root_memcg)
2040 {
2041 	int cpu, curcpu;
2042 
2043 	/* If someone's already draining, avoid adding running more workers. */
2044 	if (!mutex_trylock(&percpu_charge_mutex))
2045 		return;
2046 	/*
2047 	 * Notify other cpus that system-wide "drain" is running
2048 	 * We do not care about races with the cpu hotplug because cpu down
2049 	 * as well as workers from this path always operate on the local
2050 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2051 	 */
2052 	curcpu = get_cpu();
2053 	for_each_online_cpu(cpu) {
2054 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2055 		struct mem_cgroup *memcg;
2056 
2057 		memcg = stock->cached;
2058 		if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
2059 			continue;
2060 		if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
2061 			css_put(&memcg->css);
2062 			continue;
2063 		}
2064 		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2065 			if (cpu == curcpu)
2066 				drain_local_stock(&stock->work);
2067 			else
2068 				schedule_work_on(cpu, &stock->work);
2069 		}
2070 		css_put(&memcg->css);
2071 	}
2072 	put_cpu();
2073 	mutex_unlock(&percpu_charge_mutex);
2074 }
2075 
2076 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2077 {
2078 	struct memcg_stock_pcp *stock;
2079 	struct mem_cgroup *memcg;
2080 
2081 	stock = &per_cpu(memcg_stock, cpu);
2082 	drain_stock(stock);
2083 
2084 	for_each_mem_cgroup(memcg) {
2085 		int i;
2086 
2087 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2088 			int nid;
2089 			long x;
2090 
2091 			x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
2092 			if (x)
2093 				atomic_long_add(x, &memcg->stat[i]);
2094 
2095 			if (i >= NR_VM_NODE_STAT_ITEMS)
2096 				continue;
2097 
2098 			for_each_node(nid) {
2099 				struct mem_cgroup_per_node *pn;
2100 
2101 				pn = mem_cgroup_nodeinfo(memcg, nid);
2102 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2103 				if (x)
2104 					atomic_long_add(x, &pn->lruvec_stat[i]);
2105 			}
2106 		}
2107 
2108 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2109 			long x;
2110 
2111 			x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
2112 			if (x)
2113 				atomic_long_add(x, &memcg->events[i]);
2114 		}
2115 	}
2116 
2117 	return 0;
2118 }
2119 
2120 static void reclaim_high(struct mem_cgroup *memcg,
2121 			 unsigned int nr_pages,
2122 			 gfp_t gfp_mask)
2123 {
2124 	do {
2125 		if (page_counter_read(&memcg->memory) <= memcg->high)
2126 			continue;
2127 		memcg_memory_event(memcg, MEMCG_HIGH);
2128 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2129 	} while ((memcg = parent_mem_cgroup(memcg)));
2130 }
2131 
2132 static void high_work_func(struct work_struct *work)
2133 {
2134 	struct mem_cgroup *memcg;
2135 
2136 	memcg = container_of(work, struct mem_cgroup, high_work);
2137 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2138 }
2139 
2140 /*
2141  * Scheduled by try_charge() to be executed from the userland return path
2142  * and reclaims memory over the high limit.
2143  */
2144 void mem_cgroup_handle_over_high(void)
2145 {
2146 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2147 	struct mem_cgroup *memcg;
2148 
2149 	if (likely(!nr_pages))
2150 		return;
2151 
2152 	memcg = get_mem_cgroup_from_mm(current->mm);
2153 	reclaim_high(memcg, nr_pages, GFP_KERNEL);
2154 	css_put(&memcg->css);
2155 	current->memcg_nr_pages_over_high = 0;
2156 }
2157 
2158 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2159 		      unsigned int nr_pages)
2160 {
2161 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2162 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2163 	struct mem_cgroup *mem_over_limit;
2164 	struct page_counter *counter;
2165 	unsigned long nr_reclaimed;
2166 	bool may_swap = true;
2167 	bool drained = false;
2168 	bool oomed = false;
2169 	enum oom_status oom_status;
2170 
2171 	if (mem_cgroup_is_root(memcg))
2172 		return 0;
2173 retry:
2174 	if (consume_stock(memcg, nr_pages))
2175 		return 0;
2176 
2177 	if (!do_memsw_account() ||
2178 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2179 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2180 			goto done_restock;
2181 		if (do_memsw_account())
2182 			page_counter_uncharge(&memcg->memsw, batch);
2183 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2184 	} else {
2185 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2186 		may_swap = false;
2187 	}
2188 
2189 	if (batch > nr_pages) {
2190 		batch = nr_pages;
2191 		goto retry;
2192 	}
2193 
2194 	/*
2195 	 * Unlike in global OOM situations, memcg is not in a physical
2196 	 * memory shortage.  Allow dying and OOM-killed tasks to
2197 	 * bypass the last charges so that they can exit quickly and
2198 	 * free their memory.
2199 	 */
2200 	if (unlikely(should_force_charge()))
2201 		goto force;
2202 
2203 	/*
2204 	 * Prevent unbounded recursion when reclaim operations need to
2205 	 * allocate memory. This might exceed the limits temporarily,
2206 	 * but we prefer facilitating memory reclaim and getting back
2207 	 * under the limit over triggering OOM kills in these cases.
2208 	 */
2209 	if (unlikely(current->flags & PF_MEMALLOC))
2210 		goto force;
2211 
2212 	if (unlikely(task_in_memcg_oom(current)))
2213 		goto nomem;
2214 
2215 	if (!gfpflags_allow_blocking(gfp_mask))
2216 		goto nomem;
2217 
2218 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2219 
2220 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2221 						    gfp_mask, may_swap);
2222 
2223 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2224 		goto retry;
2225 
2226 	if (!drained) {
2227 		drain_all_stock(mem_over_limit);
2228 		drained = true;
2229 		goto retry;
2230 	}
2231 
2232 	if (gfp_mask & __GFP_NORETRY)
2233 		goto nomem;
2234 	/*
2235 	 * Even though the limit is exceeded at this point, reclaim
2236 	 * may have been able to free some pages.  Retry the charge
2237 	 * before killing the task.
2238 	 *
2239 	 * Only for regular pages, though: huge pages are rather
2240 	 * unlikely to succeed so close to the limit, and we fall back
2241 	 * to regular pages anyway in case of failure.
2242 	 */
2243 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2244 		goto retry;
2245 	/*
2246 	 * At task move, charge accounts can be doubly counted. So, it's
2247 	 * better to wait until the end of task_move if something is going on.
2248 	 */
2249 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2250 		goto retry;
2251 
2252 	if (nr_retries--)
2253 		goto retry;
2254 
2255 	if (gfp_mask & __GFP_RETRY_MAYFAIL && oomed)
2256 		goto nomem;
2257 
2258 	if (gfp_mask & __GFP_NOFAIL)
2259 		goto force;
2260 
2261 	if (fatal_signal_pending(current))
2262 		goto force;
2263 
2264 	/*
2265 	 * keep retrying as long as the memcg oom killer is able to make
2266 	 * a forward progress or bypass the charge if the oom killer
2267 	 * couldn't make any progress.
2268 	 */
2269 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2270 		       get_order(nr_pages * PAGE_SIZE));
2271 	switch (oom_status) {
2272 	case OOM_SUCCESS:
2273 		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2274 		oomed = true;
2275 		goto retry;
2276 	case OOM_FAILED:
2277 		goto force;
2278 	default:
2279 		goto nomem;
2280 	}
2281 nomem:
2282 	if (!(gfp_mask & __GFP_NOFAIL))
2283 		return -ENOMEM;
2284 force:
2285 	/*
2286 	 * The allocation either can't fail or will lead to more memory
2287 	 * being freed very soon.  Allow memory usage go over the limit
2288 	 * temporarily by force charging it.
2289 	 */
2290 	page_counter_charge(&memcg->memory, nr_pages);
2291 	if (do_memsw_account())
2292 		page_counter_charge(&memcg->memsw, nr_pages);
2293 	css_get_many(&memcg->css, nr_pages);
2294 
2295 	return 0;
2296 
2297 done_restock:
2298 	css_get_many(&memcg->css, batch);
2299 	if (batch > nr_pages)
2300 		refill_stock(memcg, batch - nr_pages);
2301 
2302 	/*
2303 	 * If the hierarchy is above the normal consumption range, schedule
2304 	 * reclaim on returning to userland.  We can perform reclaim here
2305 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2306 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2307 	 * not recorded as it most likely matches current's and won't
2308 	 * change in the meantime.  As high limit is checked again before
2309 	 * reclaim, the cost of mismatch is negligible.
2310 	 */
2311 	do {
2312 		if (page_counter_read(&memcg->memory) > memcg->high) {
2313 			/* Don't bother a random interrupted task */
2314 			if (in_interrupt()) {
2315 				schedule_work(&memcg->high_work);
2316 				break;
2317 			}
2318 			current->memcg_nr_pages_over_high += batch;
2319 			set_notify_resume(current);
2320 			break;
2321 		}
2322 	} while ((memcg = parent_mem_cgroup(memcg)));
2323 
2324 	return 0;
2325 }
2326 
2327 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2328 {
2329 	if (mem_cgroup_is_root(memcg))
2330 		return;
2331 
2332 	page_counter_uncharge(&memcg->memory, nr_pages);
2333 	if (do_memsw_account())
2334 		page_counter_uncharge(&memcg->memsw, nr_pages);
2335 
2336 	css_put_many(&memcg->css, nr_pages);
2337 }
2338 
2339 static void lock_page_lru(struct page *page, int *isolated)
2340 {
2341 	pg_data_t *pgdat = page_pgdat(page);
2342 
2343 	spin_lock_irq(&pgdat->lru_lock);
2344 	if (PageLRU(page)) {
2345 		struct lruvec *lruvec;
2346 
2347 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2348 		ClearPageLRU(page);
2349 		del_page_from_lru_list(page, lruvec, page_lru(page));
2350 		*isolated = 1;
2351 	} else
2352 		*isolated = 0;
2353 }
2354 
2355 static void unlock_page_lru(struct page *page, int isolated)
2356 {
2357 	pg_data_t *pgdat = page_pgdat(page);
2358 
2359 	if (isolated) {
2360 		struct lruvec *lruvec;
2361 
2362 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2363 		VM_BUG_ON_PAGE(PageLRU(page), page);
2364 		SetPageLRU(page);
2365 		add_page_to_lru_list(page, lruvec, page_lru(page));
2366 	}
2367 	spin_unlock_irq(&pgdat->lru_lock);
2368 }
2369 
2370 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2371 			  bool lrucare)
2372 {
2373 	int isolated;
2374 
2375 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2376 
2377 	/*
2378 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2379 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2380 	 */
2381 	if (lrucare)
2382 		lock_page_lru(page, &isolated);
2383 
2384 	/*
2385 	 * Nobody should be changing or seriously looking at
2386 	 * page->mem_cgroup at this point:
2387 	 *
2388 	 * - the page is uncharged
2389 	 *
2390 	 * - the page is off-LRU
2391 	 *
2392 	 * - an anonymous fault has exclusive page access, except for
2393 	 *   a locked page table
2394 	 *
2395 	 * - a page cache insertion, a swapin fault, or a migration
2396 	 *   have the page locked
2397 	 */
2398 	page->mem_cgroup = memcg;
2399 
2400 	if (lrucare)
2401 		unlock_page_lru(page, isolated);
2402 }
2403 
2404 #ifdef CONFIG_MEMCG_KMEM
2405 static int memcg_alloc_cache_id(void)
2406 {
2407 	int id, size;
2408 	int err;
2409 
2410 	id = ida_simple_get(&memcg_cache_ida,
2411 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2412 	if (id < 0)
2413 		return id;
2414 
2415 	if (id < memcg_nr_cache_ids)
2416 		return id;
2417 
2418 	/*
2419 	 * There's no space for the new id in memcg_caches arrays,
2420 	 * so we have to grow them.
2421 	 */
2422 	down_write(&memcg_cache_ids_sem);
2423 
2424 	size = 2 * (id + 1);
2425 	if (size < MEMCG_CACHES_MIN_SIZE)
2426 		size = MEMCG_CACHES_MIN_SIZE;
2427 	else if (size > MEMCG_CACHES_MAX_SIZE)
2428 		size = MEMCG_CACHES_MAX_SIZE;
2429 
2430 	err = memcg_update_all_caches(size);
2431 	if (!err)
2432 		err = memcg_update_all_list_lrus(size);
2433 	if (!err)
2434 		memcg_nr_cache_ids = size;
2435 
2436 	up_write(&memcg_cache_ids_sem);
2437 
2438 	if (err) {
2439 		ida_simple_remove(&memcg_cache_ida, id);
2440 		return err;
2441 	}
2442 	return id;
2443 }
2444 
2445 static void memcg_free_cache_id(int id)
2446 {
2447 	ida_simple_remove(&memcg_cache_ida, id);
2448 }
2449 
2450 struct memcg_kmem_cache_create_work {
2451 	struct mem_cgroup *memcg;
2452 	struct kmem_cache *cachep;
2453 	struct work_struct work;
2454 };
2455 
2456 static void memcg_kmem_cache_create_func(struct work_struct *w)
2457 {
2458 	struct memcg_kmem_cache_create_work *cw =
2459 		container_of(w, struct memcg_kmem_cache_create_work, work);
2460 	struct mem_cgroup *memcg = cw->memcg;
2461 	struct kmem_cache *cachep = cw->cachep;
2462 
2463 	memcg_create_kmem_cache(memcg, cachep);
2464 
2465 	css_put(&memcg->css);
2466 	kfree(cw);
2467 }
2468 
2469 /*
2470  * Enqueue the creation of a per-memcg kmem_cache.
2471  */
2472 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2473 					       struct kmem_cache *cachep)
2474 {
2475 	struct memcg_kmem_cache_create_work *cw;
2476 
2477 	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2478 	if (!cw)
2479 		return;
2480 
2481 	css_get(&memcg->css);
2482 
2483 	cw->memcg = memcg;
2484 	cw->cachep = cachep;
2485 	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2486 
2487 	queue_work(memcg_kmem_cache_wq, &cw->work);
2488 }
2489 
2490 static inline bool memcg_kmem_bypass(void)
2491 {
2492 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2493 		return true;
2494 	return false;
2495 }
2496 
2497 /**
2498  * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2499  * @cachep: the original global kmem cache
2500  *
2501  * Return the kmem_cache we're supposed to use for a slab allocation.
2502  * We try to use the current memcg's version of the cache.
2503  *
2504  * If the cache does not exist yet, if we are the first user of it, we
2505  * create it asynchronously in a workqueue and let the current allocation
2506  * go through with the original cache.
2507  *
2508  * This function takes a reference to the cache it returns to assure it
2509  * won't get destroyed while we are working with it. Once the caller is
2510  * done with it, memcg_kmem_put_cache() must be called to release the
2511  * reference.
2512  */
2513 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2514 {
2515 	struct mem_cgroup *memcg;
2516 	struct kmem_cache *memcg_cachep;
2517 	int kmemcg_id;
2518 
2519 	VM_BUG_ON(!is_root_cache(cachep));
2520 
2521 	if (memcg_kmem_bypass())
2522 		return cachep;
2523 
2524 	memcg = get_mem_cgroup_from_current();
2525 	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2526 	if (kmemcg_id < 0)
2527 		goto out;
2528 
2529 	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2530 	if (likely(memcg_cachep))
2531 		return memcg_cachep;
2532 
2533 	/*
2534 	 * If we are in a safe context (can wait, and not in interrupt
2535 	 * context), we could be be predictable and return right away.
2536 	 * This would guarantee that the allocation being performed
2537 	 * already belongs in the new cache.
2538 	 *
2539 	 * However, there are some clashes that can arrive from locking.
2540 	 * For instance, because we acquire the slab_mutex while doing
2541 	 * memcg_create_kmem_cache, this means no further allocation
2542 	 * could happen with the slab_mutex held. So it's better to
2543 	 * defer everything.
2544 	 */
2545 	memcg_schedule_kmem_cache_create(memcg, cachep);
2546 out:
2547 	css_put(&memcg->css);
2548 	return cachep;
2549 }
2550 
2551 /**
2552  * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2553  * @cachep: the cache returned by memcg_kmem_get_cache
2554  */
2555 void memcg_kmem_put_cache(struct kmem_cache *cachep)
2556 {
2557 	if (!is_root_cache(cachep))
2558 		css_put(&cachep->memcg_params.memcg->css);
2559 }
2560 
2561 /**
2562  * __memcg_kmem_charge_memcg: charge a kmem page
2563  * @page: page to charge
2564  * @gfp: reclaim mode
2565  * @order: allocation order
2566  * @memcg: memory cgroup to charge
2567  *
2568  * Returns 0 on success, an error code on failure.
2569  */
2570 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2571 			    struct mem_cgroup *memcg)
2572 {
2573 	unsigned int nr_pages = 1 << order;
2574 	struct page_counter *counter;
2575 	int ret;
2576 
2577 	ret = try_charge(memcg, gfp, nr_pages);
2578 	if (ret)
2579 		return ret;
2580 
2581 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2582 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2583 		cancel_charge(memcg, nr_pages);
2584 		return -ENOMEM;
2585 	}
2586 
2587 	page->mem_cgroup = memcg;
2588 
2589 	return 0;
2590 }
2591 
2592 /**
2593  * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
2594  * @page: page to charge
2595  * @gfp: reclaim mode
2596  * @order: allocation order
2597  *
2598  * Returns 0 on success, an error code on failure.
2599  */
2600 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2601 {
2602 	struct mem_cgroup *memcg;
2603 	int ret = 0;
2604 
2605 	if (memcg_kmem_bypass())
2606 		return 0;
2607 
2608 	memcg = get_mem_cgroup_from_current();
2609 	if (!mem_cgroup_is_root(memcg)) {
2610 		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2611 		if (!ret)
2612 			__SetPageKmemcg(page);
2613 	}
2614 	css_put(&memcg->css);
2615 	return ret;
2616 }
2617 /**
2618  * __memcg_kmem_uncharge: uncharge a kmem page
2619  * @page: page to uncharge
2620  * @order: allocation order
2621  */
2622 void __memcg_kmem_uncharge(struct page *page, int order)
2623 {
2624 	struct mem_cgroup *memcg = page->mem_cgroup;
2625 	unsigned int nr_pages = 1 << order;
2626 
2627 	if (!memcg)
2628 		return;
2629 
2630 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2631 
2632 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2633 		page_counter_uncharge(&memcg->kmem, nr_pages);
2634 
2635 	page_counter_uncharge(&memcg->memory, nr_pages);
2636 	if (do_memsw_account())
2637 		page_counter_uncharge(&memcg->memsw, nr_pages);
2638 
2639 	page->mem_cgroup = NULL;
2640 
2641 	/* slab pages do not have PageKmemcg flag set */
2642 	if (PageKmemcg(page))
2643 		__ClearPageKmemcg(page);
2644 
2645 	css_put_many(&memcg->css, nr_pages);
2646 }
2647 #endif /* CONFIG_MEMCG_KMEM */
2648 
2649 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2650 
2651 /*
2652  * Because tail pages are not marked as "used", set it. We're under
2653  * pgdat->lru_lock and migration entries setup in all page mappings.
2654  */
2655 void mem_cgroup_split_huge_fixup(struct page *head)
2656 {
2657 	int i;
2658 
2659 	if (mem_cgroup_disabled())
2660 		return;
2661 
2662 	for (i = 1; i < HPAGE_PMD_NR; i++)
2663 		head[i].mem_cgroup = head->mem_cgroup;
2664 
2665 	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
2666 }
2667 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2668 
2669 #ifdef CONFIG_MEMCG_SWAP
2670 /**
2671  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2672  * @entry: swap entry to be moved
2673  * @from:  mem_cgroup which the entry is moved from
2674  * @to:  mem_cgroup which the entry is moved to
2675  *
2676  * It succeeds only when the swap_cgroup's record for this entry is the same
2677  * as the mem_cgroup's id of @from.
2678  *
2679  * Returns 0 on success, -EINVAL on failure.
2680  *
2681  * The caller must have charged to @to, IOW, called page_counter_charge() about
2682  * both res and memsw, and called css_get().
2683  */
2684 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2685 				struct mem_cgroup *from, struct mem_cgroup *to)
2686 {
2687 	unsigned short old_id, new_id;
2688 
2689 	old_id = mem_cgroup_id(from);
2690 	new_id = mem_cgroup_id(to);
2691 
2692 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2693 		mod_memcg_state(from, MEMCG_SWAP, -1);
2694 		mod_memcg_state(to, MEMCG_SWAP, 1);
2695 		return 0;
2696 	}
2697 	return -EINVAL;
2698 }
2699 #else
2700 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2701 				struct mem_cgroup *from, struct mem_cgroup *to)
2702 {
2703 	return -EINVAL;
2704 }
2705 #endif
2706 
2707 static DEFINE_MUTEX(memcg_max_mutex);
2708 
2709 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
2710 				 unsigned long max, bool memsw)
2711 {
2712 	bool enlarge = false;
2713 	bool drained = false;
2714 	int ret;
2715 	bool limits_invariant;
2716 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2717 
2718 	do {
2719 		if (signal_pending(current)) {
2720 			ret = -EINTR;
2721 			break;
2722 		}
2723 
2724 		mutex_lock(&memcg_max_mutex);
2725 		/*
2726 		 * Make sure that the new limit (memsw or memory limit) doesn't
2727 		 * break our basic invariant rule memory.max <= memsw.max.
2728 		 */
2729 		limits_invariant = memsw ? max >= memcg->memory.max :
2730 					   max <= memcg->memsw.max;
2731 		if (!limits_invariant) {
2732 			mutex_unlock(&memcg_max_mutex);
2733 			ret = -EINVAL;
2734 			break;
2735 		}
2736 		if (max > counter->max)
2737 			enlarge = true;
2738 		ret = page_counter_set_max(counter, max);
2739 		mutex_unlock(&memcg_max_mutex);
2740 
2741 		if (!ret)
2742 			break;
2743 
2744 		if (!drained) {
2745 			drain_all_stock(memcg);
2746 			drained = true;
2747 			continue;
2748 		}
2749 
2750 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
2751 					GFP_KERNEL, !memsw)) {
2752 			ret = -EBUSY;
2753 			break;
2754 		}
2755 	} while (true);
2756 
2757 	if (!ret && enlarge)
2758 		memcg_oom_recover(memcg);
2759 
2760 	return ret;
2761 }
2762 
2763 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2764 					    gfp_t gfp_mask,
2765 					    unsigned long *total_scanned)
2766 {
2767 	unsigned long nr_reclaimed = 0;
2768 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
2769 	unsigned long reclaimed;
2770 	int loop = 0;
2771 	struct mem_cgroup_tree_per_node *mctz;
2772 	unsigned long excess;
2773 	unsigned long nr_scanned;
2774 
2775 	if (order > 0)
2776 		return 0;
2777 
2778 	mctz = soft_limit_tree_node(pgdat->node_id);
2779 
2780 	/*
2781 	 * Do not even bother to check the largest node if the root
2782 	 * is empty. Do it lockless to prevent lock bouncing. Races
2783 	 * are acceptable as soft limit is best effort anyway.
2784 	 */
2785 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2786 		return 0;
2787 
2788 	/*
2789 	 * This loop can run a while, specially if mem_cgroup's continuously
2790 	 * keep exceeding their soft limit and putting the system under
2791 	 * pressure
2792 	 */
2793 	do {
2794 		if (next_mz)
2795 			mz = next_mz;
2796 		else
2797 			mz = mem_cgroup_largest_soft_limit_node(mctz);
2798 		if (!mz)
2799 			break;
2800 
2801 		nr_scanned = 0;
2802 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2803 						    gfp_mask, &nr_scanned);
2804 		nr_reclaimed += reclaimed;
2805 		*total_scanned += nr_scanned;
2806 		spin_lock_irq(&mctz->lock);
2807 		__mem_cgroup_remove_exceeded(mz, mctz);
2808 
2809 		/*
2810 		 * If we failed to reclaim anything from this memory cgroup
2811 		 * it is time to move on to the next cgroup
2812 		 */
2813 		next_mz = NULL;
2814 		if (!reclaimed)
2815 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2816 
2817 		excess = soft_limit_excess(mz->memcg);
2818 		/*
2819 		 * One school of thought says that we should not add
2820 		 * back the node to the tree if reclaim returns 0.
2821 		 * But our reclaim could return 0, simply because due
2822 		 * to priority we are exposing a smaller subset of
2823 		 * memory to reclaim from. Consider this as a longer
2824 		 * term TODO.
2825 		 */
2826 		/* If excess == 0, no tree ops */
2827 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2828 		spin_unlock_irq(&mctz->lock);
2829 		css_put(&mz->memcg->css);
2830 		loop++;
2831 		/*
2832 		 * Could not reclaim anything and there are no more
2833 		 * mem cgroups to try or we seem to be looping without
2834 		 * reclaiming anything.
2835 		 */
2836 		if (!nr_reclaimed &&
2837 			(next_mz == NULL ||
2838 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2839 			break;
2840 	} while (!nr_reclaimed);
2841 	if (next_mz)
2842 		css_put(&next_mz->memcg->css);
2843 	return nr_reclaimed;
2844 }
2845 
2846 /*
2847  * Test whether @memcg has children, dead or alive.  Note that this
2848  * function doesn't care whether @memcg has use_hierarchy enabled and
2849  * returns %true if there are child csses according to the cgroup
2850  * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2851  */
2852 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2853 {
2854 	bool ret;
2855 
2856 	rcu_read_lock();
2857 	ret = css_next_child(NULL, &memcg->css);
2858 	rcu_read_unlock();
2859 	return ret;
2860 }
2861 
2862 /*
2863  * Reclaims as many pages from the given memcg as possible.
2864  *
2865  * Caller is responsible for holding css reference for memcg.
2866  */
2867 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2868 {
2869 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2870 
2871 	/* we call try-to-free pages for make this cgroup empty */
2872 	lru_add_drain_all();
2873 
2874 	drain_all_stock(memcg);
2875 
2876 	/* try to free all pages in this cgroup */
2877 	while (nr_retries && page_counter_read(&memcg->memory)) {
2878 		int progress;
2879 
2880 		if (signal_pending(current))
2881 			return -EINTR;
2882 
2883 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
2884 							GFP_KERNEL, true);
2885 		if (!progress) {
2886 			nr_retries--;
2887 			/* maybe some writeback is necessary */
2888 			congestion_wait(BLK_RW_ASYNC, HZ/10);
2889 		}
2890 
2891 	}
2892 
2893 	return 0;
2894 }
2895 
2896 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2897 					    char *buf, size_t nbytes,
2898 					    loff_t off)
2899 {
2900 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2901 
2902 	if (mem_cgroup_is_root(memcg))
2903 		return -EINVAL;
2904 	return mem_cgroup_force_empty(memcg) ?: nbytes;
2905 }
2906 
2907 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2908 				     struct cftype *cft)
2909 {
2910 	return mem_cgroup_from_css(css)->use_hierarchy;
2911 }
2912 
2913 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2914 				      struct cftype *cft, u64 val)
2915 {
2916 	int retval = 0;
2917 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2918 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2919 
2920 	if (memcg->use_hierarchy == val)
2921 		return 0;
2922 
2923 	/*
2924 	 * If parent's use_hierarchy is set, we can't make any modifications
2925 	 * in the child subtrees. If it is unset, then the change can
2926 	 * occur, provided the current cgroup has no children.
2927 	 *
2928 	 * For the root cgroup, parent_mem is NULL, we allow value to be
2929 	 * set if there are no children.
2930 	 */
2931 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2932 				(val == 1 || val == 0)) {
2933 		if (!memcg_has_children(memcg))
2934 			memcg->use_hierarchy = val;
2935 		else
2936 			retval = -EBUSY;
2937 	} else
2938 		retval = -EINVAL;
2939 
2940 	return retval;
2941 }
2942 
2943 struct accumulated_stats {
2944 	unsigned long stat[MEMCG_NR_STAT];
2945 	unsigned long events[NR_VM_EVENT_ITEMS];
2946 	unsigned long lru_pages[NR_LRU_LISTS];
2947 	const unsigned int *stats_array;
2948 	const unsigned int *events_array;
2949 	int stats_size;
2950 	int events_size;
2951 };
2952 
2953 static void accumulate_memcg_tree(struct mem_cgroup *memcg,
2954 				  struct accumulated_stats *acc)
2955 {
2956 	struct mem_cgroup *mi;
2957 	int i;
2958 
2959 	for_each_mem_cgroup_tree(mi, memcg) {
2960 		for (i = 0; i < acc->stats_size; i++)
2961 			acc->stat[i] += memcg_page_state(mi,
2962 				acc->stats_array ? acc->stats_array[i] : i);
2963 
2964 		for (i = 0; i < acc->events_size; i++)
2965 			acc->events[i] += memcg_sum_events(mi,
2966 				acc->events_array ? acc->events_array[i] : i);
2967 
2968 		for (i = 0; i < NR_LRU_LISTS; i++)
2969 			acc->lru_pages[i] += memcg_page_state(mi,
2970 							      NR_LRU_BASE + i);
2971 	}
2972 }
2973 
2974 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2975 {
2976 	unsigned long val = 0;
2977 
2978 	if (mem_cgroup_is_root(memcg)) {
2979 		struct mem_cgroup *iter;
2980 
2981 		for_each_mem_cgroup_tree(iter, memcg) {
2982 			val += memcg_page_state(iter, MEMCG_CACHE);
2983 			val += memcg_page_state(iter, MEMCG_RSS);
2984 			if (swap)
2985 				val += memcg_page_state(iter, MEMCG_SWAP);
2986 		}
2987 	} else {
2988 		if (!swap)
2989 			val = page_counter_read(&memcg->memory);
2990 		else
2991 			val = page_counter_read(&memcg->memsw);
2992 	}
2993 	return val;
2994 }
2995 
2996 enum {
2997 	RES_USAGE,
2998 	RES_LIMIT,
2999 	RES_MAX_USAGE,
3000 	RES_FAILCNT,
3001 	RES_SOFT_LIMIT,
3002 };
3003 
3004 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3005 			       struct cftype *cft)
3006 {
3007 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3008 	struct page_counter *counter;
3009 
3010 	switch (MEMFILE_TYPE(cft->private)) {
3011 	case _MEM:
3012 		counter = &memcg->memory;
3013 		break;
3014 	case _MEMSWAP:
3015 		counter = &memcg->memsw;
3016 		break;
3017 	case _KMEM:
3018 		counter = &memcg->kmem;
3019 		break;
3020 	case _TCP:
3021 		counter = &memcg->tcpmem;
3022 		break;
3023 	default:
3024 		BUG();
3025 	}
3026 
3027 	switch (MEMFILE_ATTR(cft->private)) {
3028 	case RES_USAGE:
3029 		if (counter == &memcg->memory)
3030 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3031 		if (counter == &memcg->memsw)
3032 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3033 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3034 	case RES_LIMIT:
3035 		return (u64)counter->max * PAGE_SIZE;
3036 	case RES_MAX_USAGE:
3037 		return (u64)counter->watermark * PAGE_SIZE;
3038 	case RES_FAILCNT:
3039 		return counter->failcnt;
3040 	case RES_SOFT_LIMIT:
3041 		return (u64)memcg->soft_limit * PAGE_SIZE;
3042 	default:
3043 		BUG();
3044 	}
3045 }
3046 
3047 #ifdef CONFIG_MEMCG_KMEM
3048 static int memcg_online_kmem(struct mem_cgroup *memcg)
3049 {
3050 	int memcg_id;
3051 
3052 	if (cgroup_memory_nokmem)
3053 		return 0;
3054 
3055 	BUG_ON(memcg->kmemcg_id >= 0);
3056 	BUG_ON(memcg->kmem_state);
3057 
3058 	memcg_id = memcg_alloc_cache_id();
3059 	if (memcg_id < 0)
3060 		return memcg_id;
3061 
3062 	static_branch_inc(&memcg_kmem_enabled_key);
3063 	/*
3064 	 * A memory cgroup is considered kmem-online as soon as it gets
3065 	 * kmemcg_id. Setting the id after enabling static branching will
3066 	 * guarantee no one starts accounting before all call sites are
3067 	 * patched.
3068 	 */
3069 	memcg->kmemcg_id = memcg_id;
3070 	memcg->kmem_state = KMEM_ONLINE;
3071 	INIT_LIST_HEAD(&memcg->kmem_caches);
3072 
3073 	return 0;
3074 }
3075 
3076 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3077 {
3078 	struct cgroup_subsys_state *css;
3079 	struct mem_cgroup *parent, *child;
3080 	int kmemcg_id;
3081 
3082 	if (memcg->kmem_state != KMEM_ONLINE)
3083 		return;
3084 	/*
3085 	 * Clear the online state before clearing memcg_caches array
3086 	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3087 	 * guarantees that no cache will be created for this cgroup
3088 	 * after we are done (see memcg_create_kmem_cache()).
3089 	 */
3090 	memcg->kmem_state = KMEM_ALLOCATED;
3091 
3092 	memcg_deactivate_kmem_caches(memcg);
3093 
3094 	kmemcg_id = memcg->kmemcg_id;
3095 	BUG_ON(kmemcg_id < 0);
3096 
3097 	parent = parent_mem_cgroup(memcg);
3098 	if (!parent)
3099 		parent = root_mem_cgroup;
3100 
3101 	/*
3102 	 * Change kmemcg_id of this cgroup and all its descendants to the
3103 	 * parent's id, and then move all entries from this cgroup's list_lrus
3104 	 * to ones of the parent. After we have finished, all list_lrus
3105 	 * corresponding to this cgroup are guaranteed to remain empty. The
3106 	 * ordering is imposed by list_lru_node->lock taken by
3107 	 * memcg_drain_all_list_lrus().
3108 	 */
3109 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3110 	css_for_each_descendant_pre(css, &memcg->css) {
3111 		child = mem_cgroup_from_css(css);
3112 		BUG_ON(child->kmemcg_id != kmemcg_id);
3113 		child->kmemcg_id = parent->kmemcg_id;
3114 		if (!memcg->use_hierarchy)
3115 			break;
3116 	}
3117 	rcu_read_unlock();
3118 
3119 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3120 
3121 	memcg_free_cache_id(kmemcg_id);
3122 }
3123 
3124 static void memcg_free_kmem(struct mem_cgroup *memcg)
3125 {
3126 	/* css_alloc() failed, offlining didn't happen */
3127 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3128 		memcg_offline_kmem(memcg);
3129 
3130 	if (memcg->kmem_state == KMEM_ALLOCATED) {
3131 		memcg_destroy_kmem_caches(memcg);
3132 		static_branch_dec(&memcg_kmem_enabled_key);
3133 		WARN_ON(page_counter_read(&memcg->kmem));
3134 	}
3135 }
3136 #else
3137 static int memcg_online_kmem(struct mem_cgroup *memcg)
3138 {
3139 	return 0;
3140 }
3141 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3142 {
3143 }
3144 static void memcg_free_kmem(struct mem_cgroup *memcg)
3145 {
3146 }
3147 #endif /* CONFIG_MEMCG_KMEM */
3148 
3149 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3150 				 unsigned long max)
3151 {
3152 	int ret;
3153 
3154 	mutex_lock(&memcg_max_mutex);
3155 	ret = page_counter_set_max(&memcg->kmem, max);
3156 	mutex_unlock(&memcg_max_mutex);
3157 	return ret;
3158 }
3159 
3160 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3161 {
3162 	int ret;
3163 
3164 	mutex_lock(&memcg_max_mutex);
3165 
3166 	ret = page_counter_set_max(&memcg->tcpmem, max);
3167 	if (ret)
3168 		goto out;
3169 
3170 	if (!memcg->tcpmem_active) {
3171 		/*
3172 		 * The active flag needs to be written after the static_key
3173 		 * update. This is what guarantees that the socket activation
3174 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3175 		 * for details, and note that we don't mark any socket as
3176 		 * belonging to this memcg until that flag is up.
3177 		 *
3178 		 * We need to do this, because static_keys will span multiple
3179 		 * sites, but we can't control their order. If we mark a socket
3180 		 * as accounted, but the accounting functions are not patched in
3181 		 * yet, we'll lose accounting.
3182 		 *
3183 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3184 		 * because when this value change, the code to process it is not
3185 		 * patched in yet.
3186 		 */
3187 		static_branch_inc(&memcg_sockets_enabled_key);
3188 		memcg->tcpmem_active = true;
3189 	}
3190 out:
3191 	mutex_unlock(&memcg_max_mutex);
3192 	return ret;
3193 }
3194 
3195 /*
3196  * The user of this function is...
3197  * RES_LIMIT.
3198  */
3199 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3200 				char *buf, size_t nbytes, loff_t off)
3201 {
3202 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3203 	unsigned long nr_pages;
3204 	int ret;
3205 
3206 	buf = strstrip(buf);
3207 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3208 	if (ret)
3209 		return ret;
3210 
3211 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3212 	case RES_LIMIT:
3213 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3214 			ret = -EINVAL;
3215 			break;
3216 		}
3217 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3218 		case _MEM:
3219 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3220 			break;
3221 		case _MEMSWAP:
3222 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3223 			break;
3224 		case _KMEM:
3225 			ret = memcg_update_kmem_max(memcg, nr_pages);
3226 			break;
3227 		case _TCP:
3228 			ret = memcg_update_tcp_max(memcg, nr_pages);
3229 			break;
3230 		}
3231 		break;
3232 	case RES_SOFT_LIMIT:
3233 		memcg->soft_limit = nr_pages;
3234 		ret = 0;
3235 		break;
3236 	}
3237 	return ret ?: nbytes;
3238 }
3239 
3240 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3241 				size_t nbytes, loff_t off)
3242 {
3243 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3244 	struct page_counter *counter;
3245 
3246 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3247 	case _MEM:
3248 		counter = &memcg->memory;
3249 		break;
3250 	case _MEMSWAP:
3251 		counter = &memcg->memsw;
3252 		break;
3253 	case _KMEM:
3254 		counter = &memcg->kmem;
3255 		break;
3256 	case _TCP:
3257 		counter = &memcg->tcpmem;
3258 		break;
3259 	default:
3260 		BUG();
3261 	}
3262 
3263 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3264 	case RES_MAX_USAGE:
3265 		page_counter_reset_watermark(counter);
3266 		break;
3267 	case RES_FAILCNT:
3268 		counter->failcnt = 0;
3269 		break;
3270 	default:
3271 		BUG();
3272 	}
3273 
3274 	return nbytes;
3275 }
3276 
3277 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3278 					struct cftype *cft)
3279 {
3280 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3281 }
3282 
3283 #ifdef CONFIG_MMU
3284 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3285 					struct cftype *cft, u64 val)
3286 {
3287 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3288 
3289 	if (val & ~MOVE_MASK)
3290 		return -EINVAL;
3291 
3292 	/*
3293 	 * No kind of locking is needed in here, because ->can_attach() will
3294 	 * check this value once in the beginning of the process, and then carry
3295 	 * on with stale data. This means that changes to this value will only
3296 	 * affect task migrations starting after the change.
3297 	 */
3298 	memcg->move_charge_at_immigrate = val;
3299 	return 0;
3300 }
3301 #else
3302 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3303 					struct cftype *cft, u64 val)
3304 {
3305 	return -ENOSYS;
3306 }
3307 #endif
3308 
3309 #ifdef CONFIG_NUMA
3310 
3311 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3312 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3313 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3314 
3315 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3316 					   int nid, unsigned int lru_mask)
3317 {
3318 	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
3319 	unsigned long nr = 0;
3320 	enum lru_list lru;
3321 
3322 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3323 
3324 	for_each_lru(lru) {
3325 		if (!(BIT(lru) & lru_mask))
3326 			continue;
3327 		nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3328 	}
3329 	return nr;
3330 }
3331 
3332 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3333 					     unsigned int lru_mask)
3334 {
3335 	unsigned long nr = 0;
3336 	enum lru_list lru;
3337 
3338 	for_each_lru(lru) {
3339 		if (!(BIT(lru) & lru_mask))
3340 			continue;
3341 		nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3342 	}
3343 	return nr;
3344 }
3345 
3346 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3347 {
3348 	struct numa_stat {
3349 		const char *name;
3350 		unsigned int lru_mask;
3351 	};
3352 
3353 	static const struct numa_stat stats[] = {
3354 		{ "total", LRU_ALL },
3355 		{ "file", LRU_ALL_FILE },
3356 		{ "anon", LRU_ALL_ANON },
3357 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3358 	};
3359 	const struct numa_stat *stat;
3360 	int nid;
3361 	unsigned long nr;
3362 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3363 
3364 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3365 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3366 		seq_printf(m, "%s=%lu", stat->name, nr);
3367 		for_each_node_state(nid, N_MEMORY) {
3368 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3369 							  stat->lru_mask);
3370 			seq_printf(m, " N%d=%lu", nid, nr);
3371 		}
3372 		seq_putc(m, '\n');
3373 	}
3374 
3375 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3376 		struct mem_cgroup *iter;
3377 
3378 		nr = 0;
3379 		for_each_mem_cgroup_tree(iter, memcg)
3380 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3381 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3382 		for_each_node_state(nid, N_MEMORY) {
3383 			nr = 0;
3384 			for_each_mem_cgroup_tree(iter, memcg)
3385 				nr += mem_cgroup_node_nr_lru_pages(
3386 					iter, nid, stat->lru_mask);
3387 			seq_printf(m, " N%d=%lu", nid, nr);
3388 		}
3389 		seq_putc(m, '\n');
3390 	}
3391 
3392 	return 0;
3393 }
3394 #endif /* CONFIG_NUMA */
3395 
3396 /* Universal VM events cgroup1 shows, original sort order */
3397 static const unsigned int memcg1_events[] = {
3398 	PGPGIN,
3399 	PGPGOUT,
3400 	PGFAULT,
3401 	PGMAJFAULT,
3402 };
3403 
3404 static const char *const memcg1_event_names[] = {
3405 	"pgpgin",
3406 	"pgpgout",
3407 	"pgfault",
3408 	"pgmajfault",
3409 };
3410 
3411 static int memcg_stat_show(struct seq_file *m, void *v)
3412 {
3413 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3414 	unsigned long memory, memsw;
3415 	struct mem_cgroup *mi;
3416 	unsigned int i;
3417 	struct accumulated_stats acc;
3418 
3419 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3420 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3421 
3422 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3423 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3424 			continue;
3425 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3426 			   memcg_page_state(memcg, memcg1_stats[i]) *
3427 			   PAGE_SIZE);
3428 	}
3429 
3430 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3431 		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3432 			   memcg_sum_events(memcg, memcg1_events[i]));
3433 
3434 	for (i = 0; i < NR_LRU_LISTS; i++)
3435 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3436 			   memcg_page_state(memcg, NR_LRU_BASE + i) *
3437 			   PAGE_SIZE);
3438 
3439 	/* Hierarchical information */
3440 	memory = memsw = PAGE_COUNTER_MAX;
3441 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3442 		memory = min(memory, mi->memory.max);
3443 		memsw = min(memsw, mi->memsw.max);
3444 	}
3445 	seq_printf(m, "hierarchical_memory_limit %llu\n",
3446 		   (u64)memory * PAGE_SIZE);
3447 	if (do_memsw_account())
3448 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3449 			   (u64)memsw * PAGE_SIZE);
3450 
3451 	memset(&acc, 0, sizeof(acc));
3452 	acc.stats_size = ARRAY_SIZE(memcg1_stats);
3453 	acc.stats_array = memcg1_stats;
3454 	acc.events_size = ARRAY_SIZE(memcg1_events);
3455 	acc.events_array = memcg1_events;
3456 	accumulate_memcg_tree(memcg, &acc);
3457 
3458 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3459 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3460 			continue;
3461 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3462 			   (u64)acc.stat[i] * PAGE_SIZE);
3463 	}
3464 
3465 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3466 		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
3467 			   (u64)acc.events[i]);
3468 
3469 	for (i = 0; i < NR_LRU_LISTS; i++)
3470 		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
3471 			   (u64)acc.lru_pages[i] * PAGE_SIZE);
3472 
3473 #ifdef CONFIG_DEBUG_VM
3474 	{
3475 		pg_data_t *pgdat;
3476 		struct mem_cgroup_per_node *mz;
3477 		struct zone_reclaim_stat *rstat;
3478 		unsigned long recent_rotated[2] = {0, 0};
3479 		unsigned long recent_scanned[2] = {0, 0};
3480 
3481 		for_each_online_pgdat(pgdat) {
3482 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3483 			rstat = &mz->lruvec.reclaim_stat;
3484 
3485 			recent_rotated[0] += rstat->recent_rotated[0];
3486 			recent_rotated[1] += rstat->recent_rotated[1];
3487 			recent_scanned[0] += rstat->recent_scanned[0];
3488 			recent_scanned[1] += rstat->recent_scanned[1];
3489 		}
3490 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3491 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3492 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3493 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3494 	}
3495 #endif
3496 
3497 	return 0;
3498 }
3499 
3500 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3501 				      struct cftype *cft)
3502 {
3503 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3504 
3505 	return mem_cgroup_swappiness(memcg);
3506 }
3507 
3508 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3509 				       struct cftype *cft, u64 val)
3510 {
3511 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3512 
3513 	if (val > 100)
3514 		return -EINVAL;
3515 
3516 	if (css->parent)
3517 		memcg->swappiness = val;
3518 	else
3519 		vm_swappiness = val;
3520 
3521 	return 0;
3522 }
3523 
3524 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3525 {
3526 	struct mem_cgroup_threshold_ary *t;
3527 	unsigned long usage;
3528 	int i;
3529 
3530 	rcu_read_lock();
3531 	if (!swap)
3532 		t = rcu_dereference(memcg->thresholds.primary);
3533 	else
3534 		t = rcu_dereference(memcg->memsw_thresholds.primary);
3535 
3536 	if (!t)
3537 		goto unlock;
3538 
3539 	usage = mem_cgroup_usage(memcg, swap);
3540 
3541 	/*
3542 	 * current_threshold points to threshold just below or equal to usage.
3543 	 * If it's not true, a threshold was crossed after last
3544 	 * call of __mem_cgroup_threshold().
3545 	 */
3546 	i = t->current_threshold;
3547 
3548 	/*
3549 	 * Iterate backward over array of thresholds starting from
3550 	 * current_threshold and check if a threshold is crossed.
3551 	 * If none of thresholds below usage is crossed, we read
3552 	 * only one element of the array here.
3553 	 */
3554 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3555 		eventfd_signal(t->entries[i].eventfd, 1);
3556 
3557 	/* i = current_threshold + 1 */
3558 	i++;
3559 
3560 	/*
3561 	 * Iterate forward over array of thresholds starting from
3562 	 * current_threshold+1 and check if a threshold is crossed.
3563 	 * If none of thresholds above usage is crossed, we read
3564 	 * only one element of the array here.
3565 	 */
3566 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3567 		eventfd_signal(t->entries[i].eventfd, 1);
3568 
3569 	/* Update current_threshold */
3570 	t->current_threshold = i - 1;
3571 unlock:
3572 	rcu_read_unlock();
3573 }
3574 
3575 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3576 {
3577 	while (memcg) {
3578 		__mem_cgroup_threshold(memcg, false);
3579 		if (do_memsw_account())
3580 			__mem_cgroup_threshold(memcg, true);
3581 
3582 		memcg = parent_mem_cgroup(memcg);
3583 	}
3584 }
3585 
3586 static int compare_thresholds(const void *a, const void *b)
3587 {
3588 	const struct mem_cgroup_threshold *_a = a;
3589 	const struct mem_cgroup_threshold *_b = b;
3590 
3591 	if (_a->threshold > _b->threshold)
3592 		return 1;
3593 
3594 	if (_a->threshold < _b->threshold)
3595 		return -1;
3596 
3597 	return 0;
3598 }
3599 
3600 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3601 {
3602 	struct mem_cgroup_eventfd_list *ev;
3603 
3604 	spin_lock(&memcg_oom_lock);
3605 
3606 	list_for_each_entry(ev, &memcg->oom_notify, list)
3607 		eventfd_signal(ev->eventfd, 1);
3608 
3609 	spin_unlock(&memcg_oom_lock);
3610 	return 0;
3611 }
3612 
3613 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3614 {
3615 	struct mem_cgroup *iter;
3616 
3617 	for_each_mem_cgroup_tree(iter, memcg)
3618 		mem_cgroup_oom_notify_cb(iter);
3619 }
3620 
3621 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3622 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3623 {
3624 	struct mem_cgroup_thresholds *thresholds;
3625 	struct mem_cgroup_threshold_ary *new;
3626 	unsigned long threshold;
3627 	unsigned long usage;
3628 	int i, size, ret;
3629 
3630 	ret = page_counter_memparse(args, "-1", &threshold);
3631 	if (ret)
3632 		return ret;
3633 
3634 	mutex_lock(&memcg->thresholds_lock);
3635 
3636 	if (type == _MEM) {
3637 		thresholds = &memcg->thresholds;
3638 		usage = mem_cgroup_usage(memcg, false);
3639 	} else if (type == _MEMSWAP) {
3640 		thresholds = &memcg->memsw_thresholds;
3641 		usage = mem_cgroup_usage(memcg, true);
3642 	} else
3643 		BUG();
3644 
3645 	/* Check if a threshold crossed before adding a new one */
3646 	if (thresholds->primary)
3647 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3648 
3649 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3650 
3651 	/* Allocate memory for new array of thresholds */
3652 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
3653 	if (!new) {
3654 		ret = -ENOMEM;
3655 		goto unlock;
3656 	}
3657 	new->size = size;
3658 
3659 	/* Copy thresholds (if any) to new array */
3660 	if (thresholds->primary) {
3661 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3662 				sizeof(struct mem_cgroup_threshold));
3663 	}
3664 
3665 	/* Add new threshold */
3666 	new->entries[size - 1].eventfd = eventfd;
3667 	new->entries[size - 1].threshold = threshold;
3668 
3669 	/* Sort thresholds. Registering of new threshold isn't time-critical */
3670 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3671 			compare_thresholds, NULL);
3672 
3673 	/* Find current threshold */
3674 	new->current_threshold = -1;
3675 	for (i = 0; i < size; i++) {
3676 		if (new->entries[i].threshold <= usage) {
3677 			/*
3678 			 * new->current_threshold will not be used until
3679 			 * rcu_assign_pointer(), so it's safe to increment
3680 			 * it here.
3681 			 */
3682 			++new->current_threshold;
3683 		} else
3684 			break;
3685 	}
3686 
3687 	/* Free old spare buffer and save old primary buffer as spare */
3688 	kfree(thresholds->spare);
3689 	thresholds->spare = thresholds->primary;
3690 
3691 	rcu_assign_pointer(thresholds->primary, new);
3692 
3693 	/* To be sure that nobody uses thresholds */
3694 	synchronize_rcu();
3695 
3696 unlock:
3697 	mutex_unlock(&memcg->thresholds_lock);
3698 
3699 	return ret;
3700 }
3701 
3702 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3703 	struct eventfd_ctx *eventfd, const char *args)
3704 {
3705 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3706 }
3707 
3708 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3709 	struct eventfd_ctx *eventfd, const char *args)
3710 {
3711 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3712 }
3713 
3714 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3715 	struct eventfd_ctx *eventfd, enum res_type type)
3716 {
3717 	struct mem_cgroup_thresholds *thresholds;
3718 	struct mem_cgroup_threshold_ary *new;
3719 	unsigned long usage;
3720 	int i, j, size;
3721 
3722 	mutex_lock(&memcg->thresholds_lock);
3723 
3724 	if (type == _MEM) {
3725 		thresholds = &memcg->thresholds;
3726 		usage = mem_cgroup_usage(memcg, false);
3727 	} else if (type == _MEMSWAP) {
3728 		thresholds = &memcg->memsw_thresholds;
3729 		usage = mem_cgroup_usage(memcg, true);
3730 	} else
3731 		BUG();
3732 
3733 	if (!thresholds->primary)
3734 		goto unlock;
3735 
3736 	/* Check if a threshold crossed before removing */
3737 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3738 
3739 	/* Calculate new number of threshold */
3740 	size = 0;
3741 	for (i = 0; i < thresholds->primary->size; i++) {
3742 		if (thresholds->primary->entries[i].eventfd != eventfd)
3743 			size++;
3744 	}
3745 
3746 	new = thresholds->spare;
3747 
3748 	/* Set thresholds array to NULL if we don't have thresholds */
3749 	if (!size) {
3750 		kfree(new);
3751 		new = NULL;
3752 		goto swap_buffers;
3753 	}
3754 
3755 	new->size = size;
3756 
3757 	/* Copy thresholds and find current threshold */
3758 	new->current_threshold = -1;
3759 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3760 		if (thresholds->primary->entries[i].eventfd == eventfd)
3761 			continue;
3762 
3763 		new->entries[j] = thresholds->primary->entries[i];
3764 		if (new->entries[j].threshold <= usage) {
3765 			/*
3766 			 * new->current_threshold will not be used
3767 			 * until rcu_assign_pointer(), so it's safe to increment
3768 			 * it here.
3769 			 */
3770 			++new->current_threshold;
3771 		}
3772 		j++;
3773 	}
3774 
3775 swap_buffers:
3776 	/* Swap primary and spare array */
3777 	thresholds->spare = thresholds->primary;
3778 
3779 	rcu_assign_pointer(thresholds->primary, new);
3780 
3781 	/* To be sure that nobody uses thresholds */
3782 	synchronize_rcu();
3783 
3784 	/* If all events are unregistered, free the spare array */
3785 	if (!new) {
3786 		kfree(thresholds->spare);
3787 		thresholds->spare = NULL;
3788 	}
3789 unlock:
3790 	mutex_unlock(&memcg->thresholds_lock);
3791 }
3792 
3793 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3794 	struct eventfd_ctx *eventfd)
3795 {
3796 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3797 }
3798 
3799 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3800 	struct eventfd_ctx *eventfd)
3801 {
3802 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3803 }
3804 
3805 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3806 	struct eventfd_ctx *eventfd, const char *args)
3807 {
3808 	struct mem_cgroup_eventfd_list *event;
3809 
3810 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
3811 	if (!event)
3812 		return -ENOMEM;
3813 
3814 	spin_lock(&memcg_oom_lock);
3815 
3816 	event->eventfd = eventfd;
3817 	list_add(&event->list, &memcg->oom_notify);
3818 
3819 	/* already in OOM ? */
3820 	if (memcg->under_oom)
3821 		eventfd_signal(eventfd, 1);
3822 	spin_unlock(&memcg_oom_lock);
3823 
3824 	return 0;
3825 }
3826 
3827 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3828 	struct eventfd_ctx *eventfd)
3829 {
3830 	struct mem_cgroup_eventfd_list *ev, *tmp;
3831 
3832 	spin_lock(&memcg_oom_lock);
3833 
3834 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3835 		if (ev->eventfd == eventfd) {
3836 			list_del(&ev->list);
3837 			kfree(ev);
3838 		}
3839 	}
3840 
3841 	spin_unlock(&memcg_oom_lock);
3842 }
3843 
3844 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3845 {
3846 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
3847 
3848 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3849 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3850 	seq_printf(sf, "oom_kill %lu\n",
3851 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
3852 	return 0;
3853 }
3854 
3855 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3856 	struct cftype *cft, u64 val)
3857 {
3858 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3859 
3860 	/* cannot set to root cgroup and only 0 and 1 are allowed */
3861 	if (!css->parent || !((val == 0) || (val == 1)))
3862 		return -EINVAL;
3863 
3864 	memcg->oom_kill_disable = val;
3865 	if (!val)
3866 		memcg_oom_recover(memcg);
3867 
3868 	return 0;
3869 }
3870 
3871 #ifdef CONFIG_CGROUP_WRITEBACK
3872 
3873 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3874 {
3875 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3876 }
3877 
3878 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3879 {
3880 	wb_domain_exit(&memcg->cgwb_domain);
3881 }
3882 
3883 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3884 {
3885 	wb_domain_size_changed(&memcg->cgwb_domain);
3886 }
3887 
3888 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3889 {
3890 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3891 
3892 	if (!memcg->css.parent)
3893 		return NULL;
3894 
3895 	return &memcg->cgwb_domain;
3896 }
3897 
3898 /*
3899  * idx can be of type enum memcg_stat_item or node_stat_item.
3900  * Keep in sync with memcg_exact_page().
3901  */
3902 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
3903 {
3904 	long x = atomic_long_read(&memcg->stat[idx]);
3905 	int cpu;
3906 
3907 	for_each_online_cpu(cpu)
3908 		x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
3909 	if (x < 0)
3910 		x = 0;
3911 	return x;
3912 }
3913 
3914 /**
3915  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3916  * @wb: bdi_writeback in question
3917  * @pfilepages: out parameter for number of file pages
3918  * @pheadroom: out parameter for number of allocatable pages according to memcg
3919  * @pdirty: out parameter for number of dirty pages
3920  * @pwriteback: out parameter for number of pages under writeback
3921  *
3922  * Determine the numbers of file, headroom, dirty, and writeback pages in
3923  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3924  * is a bit more involved.
3925  *
3926  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3927  * headroom is calculated as the lowest headroom of itself and the
3928  * ancestors.  Note that this doesn't consider the actual amount of
3929  * available memory in the system.  The caller should further cap
3930  * *@pheadroom accordingly.
3931  */
3932 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3933 			 unsigned long *pheadroom, unsigned long *pdirty,
3934 			 unsigned long *pwriteback)
3935 {
3936 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3937 	struct mem_cgroup *parent;
3938 
3939 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
3940 
3941 	/* this should eventually include NR_UNSTABLE_NFS */
3942 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
3943 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
3944 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
3945 	*pheadroom = PAGE_COUNTER_MAX;
3946 
3947 	while ((parent = parent_mem_cgroup(memcg))) {
3948 		unsigned long ceiling = min(memcg->memory.max, memcg->high);
3949 		unsigned long used = page_counter_read(&memcg->memory);
3950 
3951 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3952 		memcg = parent;
3953 	}
3954 }
3955 
3956 #else	/* CONFIG_CGROUP_WRITEBACK */
3957 
3958 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3959 {
3960 	return 0;
3961 }
3962 
3963 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3964 {
3965 }
3966 
3967 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3968 {
3969 }
3970 
3971 #endif	/* CONFIG_CGROUP_WRITEBACK */
3972 
3973 /*
3974  * DO NOT USE IN NEW FILES.
3975  *
3976  * "cgroup.event_control" implementation.
3977  *
3978  * This is way over-engineered.  It tries to support fully configurable
3979  * events for each user.  Such level of flexibility is completely
3980  * unnecessary especially in the light of the planned unified hierarchy.
3981  *
3982  * Please deprecate this and replace with something simpler if at all
3983  * possible.
3984  */
3985 
3986 /*
3987  * Unregister event and free resources.
3988  *
3989  * Gets called from workqueue.
3990  */
3991 static void memcg_event_remove(struct work_struct *work)
3992 {
3993 	struct mem_cgroup_event *event =
3994 		container_of(work, struct mem_cgroup_event, remove);
3995 	struct mem_cgroup *memcg = event->memcg;
3996 
3997 	remove_wait_queue(event->wqh, &event->wait);
3998 
3999 	event->unregister_event(memcg, event->eventfd);
4000 
4001 	/* Notify userspace the event is going away. */
4002 	eventfd_signal(event->eventfd, 1);
4003 
4004 	eventfd_ctx_put(event->eventfd);
4005 	kfree(event);
4006 	css_put(&memcg->css);
4007 }
4008 
4009 /*
4010  * Gets called on EPOLLHUP on eventfd when user closes it.
4011  *
4012  * Called with wqh->lock held and interrupts disabled.
4013  */
4014 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4015 			    int sync, void *key)
4016 {
4017 	struct mem_cgroup_event *event =
4018 		container_of(wait, struct mem_cgroup_event, wait);
4019 	struct mem_cgroup *memcg = event->memcg;
4020 	__poll_t flags = key_to_poll(key);
4021 
4022 	if (flags & EPOLLHUP) {
4023 		/*
4024 		 * If the event has been detached at cgroup removal, we
4025 		 * can simply return knowing the other side will cleanup
4026 		 * for us.
4027 		 *
4028 		 * We can't race against event freeing since the other
4029 		 * side will require wqh->lock via remove_wait_queue(),
4030 		 * which we hold.
4031 		 */
4032 		spin_lock(&memcg->event_list_lock);
4033 		if (!list_empty(&event->list)) {
4034 			list_del_init(&event->list);
4035 			/*
4036 			 * We are in atomic context, but cgroup_event_remove()
4037 			 * may sleep, so we have to call it in workqueue.
4038 			 */
4039 			schedule_work(&event->remove);
4040 		}
4041 		spin_unlock(&memcg->event_list_lock);
4042 	}
4043 
4044 	return 0;
4045 }
4046 
4047 static void memcg_event_ptable_queue_proc(struct file *file,
4048 		wait_queue_head_t *wqh, poll_table *pt)
4049 {
4050 	struct mem_cgroup_event *event =
4051 		container_of(pt, struct mem_cgroup_event, pt);
4052 
4053 	event->wqh = wqh;
4054 	add_wait_queue(wqh, &event->wait);
4055 }
4056 
4057 /*
4058  * DO NOT USE IN NEW FILES.
4059  *
4060  * Parse input and register new cgroup event handler.
4061  *
4062  * Input must be in format '<event_fd> <control_fd> <args>'.
4063  * Interpretation of args is defined by control file implementation.
4064  */
4065 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4066 					 char *buf, size_t nbytes, loff_t off)
4067 {
4068 	struct cgroup_subsys_state *css = of_css(of);
4069 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4070 	struct mem_cgroup_event *event;
4071 	struct cgroup_subsys_state *cfile_css;
4072 	unsigned int efd, cfd;
4073 	struct fd efile;
4074 	struct fd cfile;
4075 	const char *name;
4076 	char *endp;
4077 	int ret;
4078 
4079 	buf = strstrip(buf);
4080 
4081 	efd = simple_strtoul(buf, &endp, 10);
4082 	if (*endp != ' ')
4083 		return -EINVAL;
4084 	buf = endp + 1;
4085 
4086 	cfd = simple_strtoul(buf, &endp, 10);
4087 	if ((*endp != ' ') && (*endp != '\0'))
4088 		return -EINVAL;
4089 	buf = endp + 1;
4090 
4091 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4092 	if (!event)
4093 		return -ENOMEM;
4094 
4095 	event->memcg = memcg;
4096 	INIT_LIST_HEAD(&event->list);
4097 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4098 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4099 	INIT_WORK(&event->remove, memcg_event_remove);
4100 
4101 	efile = fdget(efd);
4102 	if (!efile.file) {
4103 		ret = -EBADF;
4104 		goto out_kfree;
4105 	}
4106 
4107 	event->eventfd = eventfd_ctx_fileget(efile.file);
4108 	if (IS_ERR(event->eventfd)) {
4109 		ret = PTR_ERR(event->eventfd);
4110 		goto out_put_efile;
4111 	}
4112 
4113 	cfile = fdget(cfd);
4114 	if (!cfile.file) {
4115 		ret = -EBADF;
4116 		goto out_put_eventfd;
4117 	}
4118 
4119 	/* the process need read permission on control file */
4120 	/* AV: shouldn't we check that it's been opened for read instead? */
4121 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4122 	if (ret < 0)
4123 		goto out_put_cfile;
4124 
4125 	/*
4126 	 * Determine the event callbacks and set them in @event.  This used
4127 	 * to be done via struct cftype but cgroup core no longer knows
4128 	 * about these events.  The following is crude but the whole thing
4129 	 * is for compatibility anyway.
4130 	 *
4131 	 * DO NOT ADD NEW FILES.
4132 	 */
4133 	name = cfile.file->f_path.dentry->d_name.name;
4134 
4135 	if (!strcmp(name, "memory.usage_in_bytes")) {
4136 		event->register_event = mem_cgroup_usage_register_event;
4137 		event->unregister_event = mem_cgroup_usage_unregister_event;
4138 	} else if (!strcmp(name, "memory.oom_control")) {
4139 		event->register_event = mem_cgroup_oom_register_event;
4140 		event->unregister_event = mem_cgroup_oom_unregister_event;
4141 	} else if (!strcmp(name, "memory.pressure_level")) {
4142 		event->register_event = vmpressure_register_event;
4143 		event->unregister_event = vmpressure_unregister_event;
4144 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4145 		event->register_event = memsw_cgroup_usage_register_event;
4146 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4147 	} else {
4148 		ret = -EINVAL;
4149 		goto out_put_cfile;
4150 	}
4151 
4152 	/*
4153 	 * Verify @cfile should belong to @css.  Also, remaining events are
4154 	 * automatically removed on cgroup destruction but the removal is
4155 	 * asynchronous, so take an extra ref on @css.
4156 	 */
4157 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4158 					       &memory_cgrp_subsys);
4159 	ret = -EINVAL;
4160 	if (IS_ERR(cfile_css))
4161 		goto out_put_cfile;
4162 	if (cfile_css != css) {
4163 		css_put(cfile_css);
4164 		goto out_put_cfile;
4165 	}
4166 
4167 	ret = event->register_event(memcg, event->eventfd, buf);
4168 	if (ret)
4169 		goto out_put_css;
4170 
4171 	vfs_poll(efile.file, &event->pt);
4172 
4173 	spin_lock(&memcg->event_list_lock);
4174 	list_add(&event->list, &memcg->event_list);
4175 	spin_unlock(&memcg->event_list_lock);
4176 
4177 	fdput(cfile);
4178 	fdput(efile);
4179 
4180 	return nbytes;
4181 
4182 out_put_css:
4183 	css_put(css);
4184 out_put_cfile:
4185 	fdput(cfile);
4186 out_put_eventfd:
4187 	eventfd_ctx_put(event->eventfd);
4188 out_put_efile:
4189 	fdput(efile);
4190 out_kfree:
4191 	kfree(event);
4192 
4193 	return ret;
4194 }
4195 
4196 static struct cftype mem_cgroup_legacy_files[] = {
4197 	{
4198 		.name = "usage_in_bytes",
4199 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4200 		.read_u64 = mem_cgroup_read_u64,
4201 	},
4202 	{
4203 		.name = "max_usage_in_bytes",
4204 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4205 		.write = mem_cgroup_reset,
4206 		.read_u64 = mem_cgroup_read_u64,
4207 	},
4208 	{
4209 		.name = "limit_in_bytes",
4210 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4211 		.write = mem_cgroup_write,
4212 		.read_u64 = mem_cgroup_read_u64,
4213 	},
4214 	{
4215 		.name = "soft_limit_in_bytes",
4216 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4217 		.write = mem_cgroup_write,
4218 		.read_u64 = mem_cgroup_read_u64,
4219 	},
4220 	{
4221 		.name = "failcnt",
4222 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4223 		.write = mem_cgroup_reset,
4224 		.read_u64 = mem_cgroup_read_u64,
4225 	},
4226 	{
4227 		.name = "stat",
4228 		.seq_show = memcg_stat_show,
4229 	},
4230 	{
4231 		.name = "force_empty",
4232 		.write = mem_cgroup_force_empty_write,
4233 	},
4234 	{
4235 		.name = "use_hierarchy",
4236 		.write_u64 = mem_cgroup_hierarchy_write,
4237 		.read_u64 = mem_cgroup_hierarchy_read,
4238 	},
4239 	{
4240 		.name = "cgroup.event_control",		/* XXX: for compat */
4241 		.write = memcg_write_event_control,
4242 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4243 	},
4244 	{
4245 		.name = "swappiness",
4246 		.read_u64 = mem_cgroup_swappiness_read,
4247 		.write_u64 = mem_cgroup_swappiness_write,
4248 	},
4249 	{
4250 		.name = "move_charge_at_immigrate",
4251 		.read_u64 = mem_cgroup_move_charge_read,
4252 		.write_u64 = mem_cgroup_move_charge_write,
4253 	},
4254 	{
4255 		.name = "oom_control",
4256 		.seq_show = mem_cgroup_oom_control_read,
4257 		.write_u64 = mem_cgroup_oom_control_write,
4258 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4259 	},
4260 	{
4261 		.name = "pressure_level",
4262 	},
4263 #ifdef CONFIG_NUMA
4264 	{
4265 		.name = "numa_stat",
4266 		.seq_show = memcg_numa_stat_show,
4267 	},
4268 #endif
4269 	{
4270 		.name = "kmem.limit_in_bytes",
4271 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4272 		.write = mem_cgroup_write,
4273 		.read_u64 = mem_cgroup_read_u64,
4274 	},
4275 	{
4276 		.name = "kmem.usage_in_bytes",
4277 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4278 		.read_u64 = mem_cgroup_read_u64,
4279 	},
4280 	{
4281 		.name = "kmem.failcnt",
4282 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4283 		.write = mem_cgroup_reset,
4284 		.read_u64 = mem_cgroup_read_u64,
4285 	},
4286 	{
4287 		.name = "kmem.max_usage_in_bytes",
4288 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4289 		.write = mem_cgroup_reset,
4290 		.read_u64 = mem_cgroup_read_u64,
4291 	},
4292 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
4293 	{
4294 		.name = "kmem.slabinfo",
4295 		.seq_start = memcg_slab_start,
4296 		.seq_next = memcg_slab_next,
4297 		.seq_stop = memcg_slab_stop,
4298 		.seq_show = memcg_slab_show,
4299 	},
4300 #endif
4301 	{
4302 		.name = "kmem.tcp.limit_in_bytes",
4303 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4304 		.write = mem_cgroup_write,
4305 		.read_u64 = mem_cgroup_read_u64,
4306 	},
4307 	{
4308 		.name = "kmem.tcp.usage_in_bytes",
4309 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4310 		.read_u64 = mem_cgroup_read_u64,
4311 	},
4312 	{
4313 		.name = "kmem.tcp.failcnt",
4314 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4315 		.write = mem_cgroup_reset,
4316 		.read_u64 = mem_cgroup_read_u64,
4317 	},
4318 	{
4319 		.name = "kmem.tcp.max_usage_in_bytes",
4320 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4321 		.write = mem_cgroup_reset,
4322 		.read_u64 = mem_cgroup_read_u64,
4323 	},
4324 	{ },	/* terminate */
4325 };
4326 
4327 /*
4328  * Private memory cgroup IDR
4329  *
4330  * Swap-out records and page cache shadow entries need to store memcg
4331  * references in constrained space, so we maintain an ID space that is
4332  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4333  * memory-controlled cgroups to 64k.
4334  *
4335  * However, there usually are many references to the oflline CSS after
4336  * the cgroup has been destroyed, such as page cache or reclaimable
4337  * slab objects, that don't need to hang on to the ID. We want to keep
4338  * those dead CSS from occupying IDs, or we might quickly exhaust the
4339  * relatively small ID space and prevent the creation of new cgroups
4340  * even when there are much fewer than 64k cgroups - possibly none.
4341  *
4342  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4343  * be freed and recycled when it's no longer needed, which is usually
4344  * when the CSS is offlined.
4345  *
4346  * The only exception to that are records of swapped out tmpfs/shmem
4347  * pages that need to be attributed to live ancestors on swapin. But
4348  * those references are manageable from userspace.
4349  */
4350 
4351 static DEFINE_IDR(mem_cgroup_idr);
4352 
4353 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4354 {
4355 	if (memcg->id.id > 0) {
4356 		idr_remove(&mem_cgroup_idr, memcg->id.id);
4357 		memcg->id.id = 0;
4358 	}
4359 }
4360 
4361 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4362 {
4363 	refcount_add(n, &memcg->id.ref);
4364 }
4365 
4366 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4367 {
4368 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4369 		mem_cgroup_id_remove(memcg);
4370 
4371 		/* Memcg ID pins CSS */
4372 		css_put(&memcg->css);
4373 	}
4374 }
4375 
4376 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4377 {
4378 	mem_cgroup_id_get_many(memcg, 1);
4379 }
4380 
4381 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4382 {
4383 	mem_cgroup_id_put_many(memcg, 1);
4384 }
4385 
4386 /**
4387  * mem_cgroup_from_id - look up a memcg from a memcg id
4388  * @id: the memcg id to look up
4389  *
4390  * Caller must hold rcu_read_lock().
4391  */
4392 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4393 {
4394 	WARN_ON_ONCE(!rcu_read_lock_held());
4395 	return idr_find(&mem_cgroup_idr, id);
4396 }
4397 
4398 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4399 {
4400 	struct mem_cgroup_per_node *pn;
4401 	int tmp = node;
4402 	/*
4403 	 * This routine is called against possible nodes.
4404 	 * But it's BUG to call kmalloc() against offline node.
4405 	 *
4406 	 * TODO: this routine can waste much memory for nodes which will
4407 	 *       never be onlined. It's better to use memory hotplug callback
4408 	 *       function.
4409 	 */
4410 	if (!node_state(node, N_NORMAL_MEMORY))
4411 		tmp = -1;
4412 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4413 	if (!pn)
4414 		return 1;
4415 
4416 	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4417 	if (!pn->lruvec_stat_cpu) {
4418 		kfree(pn);
4419 		return 1;
4420 	}
4421 
4422 	lruvec_init(&pn->lruvec);
4423 	pn->usage_in_excess = 0;
4424 	pn->on_tree = false;
4425 	pn->memcg = memcg;
4426 
4427 	memcg->nodeinfo[node] = pn;
4428 	return 0;
4429 }
4430 
4431 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4432 {
4433 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4434 
4435 	if (!pn)
4436 		return;
4437 
4438 	free_percpu(pn->lruvec_stat_cpu);
4439 	kfree(pn);
4440 }
4441 
4442 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4443 {
4444 	int node;
4445 
4446 	for_each_node(node)
4447 		free_mem_cgroup_per_node_info(memcg, node);
4448 	free_percpu(memcg->stat_cpu);
4449 	kfree(memcg);
4450 }
4451 
4452 static void mem_cgroup_free(struct mem_cgroup *memcg)
4453 {
4454 	memcg_wb_domain_exit(memcg);
4455 	__mem_cgroup_free(memcg);
4456 }
4457 
4458 static struct mem_cgroup *mem_cgroup_alloc(void)
4459 {
4460 	struct mem_cgroup *memcg;
4461 	unsigned int size;
4462 	int node;
4463 
4464 	size = sizeof(struct mem_cgroup);
4465 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4466 
4467 	memcg = kzalloc(size, GFP_KERNEL);
4468 	if (!memcg)
4469 		return NULL;
4470 
4471 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4472 				 1, MEM_CGROUP_ID_MAX,
4473 				 GFP_KERNEL);
4474 	if (memcg->id.id < 0)
4475 		goto fail;
4476 
4477 	memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4478 	if (!memcg->stat_cpu)
4479 		goto fail;
4480 
4481 	for_each_node(node)
4482 		if (alloc_mem_cgroup_per_node_info(memcg, node))
4483 			goto fail;
4484 
4485 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4486 		goto fail;
4487 
4488 	INIT_WORK(&memcg->high_work, high_work_func);
4489 	memcg->last_scanned_node = MAX_NUMNODES;
4490 	INIT_LIST_HEAD(&memcg->oom_notify);
4491 	mutex_init(&memcg->thresholds_lock);
4492 	spin_lock_init(&memcg->move_lock);
4493 	vmpressure_init(&memcg->vmpressure);
4494 	INIT_LIST_HEAD(&memcg->event_list);
4495 	spin_lock_init(&memcg->event_list_lock);
4496 	memcg->socket_pressure = jiffies;
4497 #ifdef CONFIG_MEMCG_KMEM
4498 	memcg->kmemcg_id = -1;
4499 #endif
4500 #ifdef CONFIG_CGROUP_WRITEBACK
4501 	INIT_LIST_HEAD(&memcg->cgwb_list);
4502 #endif
4503 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4504 	return memcg;
4505 fail:
4506 	mem_cgroup_id_remove(memcg);
4507 	__mem_cgroup_free(memcg);
4508 	return NULL;
4509 }
4510 
4511 static struct cgroup_subsys_state * __ref
4512 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4513 {
4514 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4515 	struct mem_cgroup *memcg;
4516 	long error = -ENOMEM;
4517 
4518 	memcg = mem_cgroup_alloc();
4519 	if (!memcg)
4520 		return ERR_PTR(error);
4521 
4522 	memcg->high = PAGE_COUNTER_MAX;
4523 	memcg->soft_limit = PAGE_COUNTER_MAX;
4524 	if (parent) {
4525 		memcg->swappiness = mem_cgroup_swappiness(parent);
4526 		memcg->oom_kill_disable = parent->oom_kill_disable;
4527 	}
4528 	if (parent && parent->use_hierarchy) {
4529 		memcg->use_hierarchy = true;
4530 		page_counter_init(&memcg->memory, &parent->memory);
4531 		page_counter_init(&memcg->swap, &parent->swap);
4532 		page_counter_init(&memcg->memsw, &parent->memsw);
4533 		page_counter_init(&memcg->kmem, &parent->kmem);
4534 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4535 	} else {
4536 		page_counter_init(&memcg->memory, NULL);
4537 		page_counter_init(&memcg->swap, NULL);
4538 		page_counter_init(&memcg->memsw, NULL);
4539 		page_counter_init(&memcg->kmem, NULL);
4540 		page_counter_init(&memcg->tcpmem, NULL);
4541 		/*
4542 		 * Deeper hierachy with use_hierarchy == false doesn't make
4543 		 * much sense so let cgroup subsystem know about this
4544 		 * unfortunate state in our controller.
4545 		 */
4546 		if (parent != root_mem_cgroup)
4547 			memory_cgrp_subsys.broken_hierarchy = true;
4548 	}
4549 
4550 	/* The following stuff does not apply to the root */
4551 	if (!parent) {
4552 		root_mem_cgroup = memcg;
4553 		return &memcg->css;
4554 	}
4555 
4556 	error = memcg_online_kmem(memcg);
4557 	if (error)
4558 		goto fail;
4559 
4560 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4561 		static_branch_inc(&memcg_sockets_enabled_key);
4562 
4563 	return &memcg->css;
4564 fail:
4565 	mem_cgroup_id_remove(memcg);
4566 	mem_cgroup_free(memcg);
4567 	return ERR_PTR(-ENOMEM);
4568 }
4569 
4570 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4571 {
4572 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4573 
4574 	/*
4575 	 * A memcg must be visible for memcg_expand_shrinker_maps()
4576 	 * by the time the maps are allocated. So, we allocate maps
4577 	 * here, when for_each_mem_cgroup() can't skip it.
4578 	 */
4579 	if (memcg_alloc_shrinker_maps(memcg)) {
4580 		mem_cgroup_id_remove(memcg);
4581 		return -ENOMEM;
4582 	}
4583 
4584 	/* Online state pins memcg ID, memcg ID pins CSS */
4585 	refcount_set(&memcg->id.ref, 1);
4586 	css_get(css);
4587 	return 0;
4588 }
4589 
4590 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4591 {
4592 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4593 	struct mem_cgroup_event *event, *tmp;
4594 
4595 	/*
4596 	 * Unregister events and notify userspace.
4597 	 * Notify userspace about cgroup removing only after rmdir of cgroup
4598 	 * directory to avoid race between userspace and kernelspace.
4599 	 */
4600 	spin_lock(&memcg->event_list_lock);
4601 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4602 		list_del_init(&event->list);
4603 		schedule_work(&event->remove);
4604 	}
4605 	spin_unlock(&memcg->event_list_lock);
4606 
4607 	page_counter_set_min(&memcg->memory, 0);
4608 	page_counter_set_low(&memcg->memory, 0);
4609 
4610 	memcg_offline_kmem(memcg);
4611 	wb_memcg_offline(memcg);
4612 
4613 	drain_all_stock(memcg);
4614 
4615 	mem_cgroup_id_put(memcg);
4616 }
4617 
4618 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4619 {
4620 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4621 
4622 	invalidate_reclaim_iterators(memcg);
4623 }
4624 
4625 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4626 {
4627 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4628 
4629 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4630 		static_branch_dec(&memcg_sockets_enabled_key);
4631 
4632 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4633 		static_branch_dec(&memcg_sockets_enabled_key);
4634 
4635 	vmpressure_cleanup(&memcg->vmpressure);
4636 	cancel_work_sync(&memcg->high_work);
4637 	mem_cgroup_remove_from_trees(memcg);
4638 	memcg_free_shrinker_maps(memcg);
4639 	memcg_free_kmem(memcg);
4640 	mem_cgroup_free(memcg);
4641 }
4642 
4643 /**
4644  * mem_cgroup_css_reset - reset the states of a mem_cgroup
4645  * @css: the target css
4646  *
4647  * Reset the states of the mem_cgroup associated with @css.  This is
4648  * invoked when the userland requests disabling on the default hierarchy
4649  * but the memcg is pinned through dependency.  The memcg should stop
4650  * applying policies and should revert to the vanilla state as it may be
4651  * made visible again.
4652  *
4653  * The current implementation only resets the essential configurations.
4654  * This needs to be expanded to cover all the visible parts.
4655  */
4656 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4657 {
4658 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4659 
4660 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4661 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4662 	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
4663 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4664 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
4665 	page_counter_set_min(&memcg->memory, 0);
4666 	page_counter_set_low(&memcg->memory, 0);
4667 	memcg->high = PAGE_COUNTER_MAX;
4668 	memcg->soft_limit = PAGE_COUNTER_MAX;
4669 	memcg_wb_domain_size_changed(memcg);
4670 }
4671 
4672 #ifdef CONFIG_MMU
4673 /* Handlers for move charge at task migration. */
4674 static int mem_cgroup_do_precharge(unsigned long count)
4675 {
4676 	int ret;
4677 
4678 	/* Try a single bulk charge without reclaim first, kswapd may wake */
4679 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4680 	if (!ret) {
4681 		mc.precharge += count;
4682 		return ret;
4683 	}
4684 
4685 	/* Try charges one by one with reclaim, but do not retry */
4686 	while (count--) {
4687 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4688 		if (ret)
4689 			return ret;
4690 		mc.precharge++;
4691 		cond_resched();
4692 	}
4693 	return 0;
4694 }
4695 
4696 union mc_target {
4697 	struct page	*page;
4698 	swp_entry_t	ent;
4699 };
4700 
4701 enum mc_target_type {
4702 	MC_TARGET_NONE = 0,
4703 	MC_TARGET_PAGE,
4704 	MC_TARGET_SWAP,
4705 	MC_TARGET_DEVICE,
4706 };
4707 
4708 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4709 						unsigned long addr, pte_t ptent)
4710 {
4711 	struct page *page = _vm_normal_page(vma, addr, ptent, true);
4712 
4713 	if (!page || !page_mapped(page))
4714 		return NULL;
4715 	if (PageAnon(page)) {
4716 		if (!(mc.flags & MOVE_ANON))
4717 			return NULL;
4718 	} else {
4719 		if (!(mc.flags & MOVE_FILE))
4720 			return NULL;
4721 	}
4722 	if (!get_page_unless_zero(page))
4723 		return NULL;
4724 
4725 	return page;
4726 }
4727 
4728 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
4729 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4730 			pte_t ptent, swp_entry_t *entry)
4731 {
4732 	struct page *page = NULL;
4733 	swp_entry_t ent = pte_to_swp_entry(ptent);
4734 
4735 	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4736 		return NULL;
4737 
4738 	/*
4739 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4740 	 * a device and because they are not accessible by CPU they are store
4741 	 * as special swap entry in the CPU page table.
4742 	 */
4743 	if (is_device_private_entry(ent)) {
4744 		page = device_private_entry_to_page(ent);
4745 		/*
4746 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4747 		 * a refcount of 1 when free (unlike normal page)
4748 		 */
4749 		if (!page_ref_add_unless(page, 1, 1))
4750 			return NULL;
4751 		return page;
4752 	}
4753 
4754 	/*
4755 	 * Because lookup_swap_cache() updates some statistics counter,
4756 	 * we call find_get_page() with swapper_space directly.
4757 	 */
4758 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
4759 	if (do_memsw_account())
4760 		entry->val = ent.val;
4761 
4762 	return page;
4763 }
4764 #else
4765 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4766 			pte_t ptent, swp_entry_t *entry)
4767 {
4768 	return NULL;
4769 }
4770 #endif
4771 
4772 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4773 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4774 {
4775 	struct page *page = NULL;
4776 	struct address_space *mapping;
4777 	pgoff_t pgoff;
4778 
4779 	if (!vma->vm_file) /* anonymous vma */
4780 		return NULL;
4781 	if (!(mc.flags & MOVE_FILE))
4782 		return NULL;
4783 
4784 	mapping = vma->vm_file->f_mapping;
4785 	pgoff = linear_page_index(vma, addr);
4786 
4787 	/* page is moved even if it's not RSS of this task(page-faulted). */
4788 #ifdef CONFIG_SWAP
4789 	/* shmem/tmpfs may report page out on swap: account for that too. */
4790 	if (shmem_mapping(mapping)) {
4791 		page = find_get_entry(mapping, pgoff);
4792 		if (xa_is_value(page)) {
4793 			swp_entry_t swp = radix_to_swp_entry(page);
4794 			if (do_memsw_account())
4795 				*entry = swp;
4796 			page = find_get_page(swap_address_space(swp),
4797 					     swp_offset(swp));
4798 		}
4799 	} else
4800 		page = find_get_page(mapping, pgoff);
4801 #else
4802 	page = find_get_page(mapping, pgoff);
4803 #endif
4804 	return page;
4805 }
4806 
4807 /**
4808  * mem_cgroup_move_account - move account of the page
4809  * @page: the page
4810  * @compound: charge the page as compound or small page
4811  * @from: mem_cgroup which the page is moved from.
4812  * @to:	mem_cgroup which the page is moved to. @from != @to.
4813  *
4814  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4815  *
4816  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4817  * from old cgroup.
4818  */
4819 static int mem_cgroup_move_account(struct page *page,
4820 				   bool compound,
4821 				   struct mem_cgroup *from,
4822 				   struct mem_cgroup *to)
4823 {
4824 	unsigned long flags;
4825 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4826 	int ret;
4827 	bool anon;
4828 
4829 	VM_BUG_ON(from == to);
4830 	VM_BUG_ON_PAGE(PageLRU(page), page);
4831 	VM_BUG_ON(compound && !PageTransHuge(page));
4832 
4833 	/*
4834 	 * Prevent mem_cgroup_migrate() from looking at
4835 	 * page->mem_cgroup of its source page while we change it.
4836 	 */
4837 	ret = -EBUSY;
4838 	if (!trylock_page(page))
4839 		goto out;
4840 
4841 	ret = -EINVAL;
4842 	if (page->mem_cgroup != from)
4843 		goto out_unlock;
4844 
4845 	anon = PageAnon(page);
4846 
4847 	spin_lock_irqsave(&from->move_lock, flags);
4848 
4849 	if (!anon && page_mapped(page)) {
4850 		__mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4851 		__mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
4852 	}
4853 
4854 	/*
4855 	 * move_lock grabbed above and caller set from->moving_account, so
4856 	 * mod_memcg_page_state will serialize updates to PageDirty.
4857 	 * So mapping should be stable for dirty pages.
4858 	 */
4859 	if (!anon && PageDirty(page)) {
4860 		struct address_space *mapping = page_mapping(page);
4861 
4862 		if (mapping_cap_account_dirty(mapping)) {
4863 			__mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4864 			__mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
4865 		}
4866 	}
4867 
4868 	if (PageWriteback(page)) {
4869 		__mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4870 		__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
4871 	}
4872 
4873 	/*
4874 	 * It is safe to change page->mem_cgroup here because the page
4875 	 * is referenced, charged, and isolated - we can't race with
4876 	 * uncharging, charging, migration, or LRU putback.
4877 	 */
4878 
4879 	/* caller should have done css_get */
4880 	page->mem_cgroup = to;
4881 	spin_unlock_irqrestore(&from->move_lock, flags);
4882 
4883 	ret = 0;
4884 
4885 	local_irq_disable();
4886 	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4887 	memcg_check_events(to, page);
4888 	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4889 	memcg_check_events(from, page);
4890 	local_irq_enable();
4891 out_unlock:
4892 	unlock_page(page);
4893 out:
4894 	return ret;
4895 }
4896 
4897 /**
4898  * get_mctgt_type - get target type of moving charge
4899  * @vma: the vma the pte to be checked belongs
4900  * @addr: the address corresponding to the pte to be checked
4901  * @ptent: the pte to be checked
4902  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4903  *
4904  * Returns
4905  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4906  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4907  *     move charge. if @target is not NULL, the page is stored in target->page
4908  *     with extra refcnt got(Callers should handle it).
4909  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4910  *     target for charge migration. if @target is not NULL, the entry is stored
4911  *     in target->ent.
4912  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PUBLIC
4913  *     or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4914  *     For now we such page is charge like a regular page would be as for all
4915  *     intent and purposes it is just special memory taking the place of a
4916  *     regular page.
4917  *
4918  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
4919  *
4920  * Called with pte lock held.
4921  */
4922 
4923 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4924 		unsigned long addr, pte_t ptent, union mc_target *target)
4925 {
4926 	struct page *page = NULL;
4927 	enum mc_target_type ret = MC_TARGET_NONE;
4928 	swp_entry_t ent = { .val = 0 };
4929 
4930 	if (pte_present(ptent))
4931 		page = mc_handle_present_pte(vma, addr, ptent);
4932 	else if (is_swap_pte(ptent))
4933 		page = mc_handle_swap_pte(vma, ptent, &ent);
4934 	else if (pte_none(ptent))
4935 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4936 
4937 	if (!page && !ent.val)
4938 		return ret;
4939 	if (page) {
4940 		/*
4941 		 * Do only loose check w/o serialization.
4942 		 * mem_cgroup_move_account() checks the page is valid or
4943 		 * not under LRU exclusion.
4944 		 */
4945 		if (page->mem_cgroup == mc.from) {
4946 			ret = MC_TARGET_PAGE;
4947 			if (is_device_private_page(page) ||
4948 			    is_device_public_page(page))
4949 				ret = MC_TARGET_DEVICE;
4950 			if (target)
4951 				target->page = page;
4952 		}
4953 		if (!ret || !target)
4954 			put_page(page);
4955 	}
4956 	/*
4957 	 * There is a swap entry and a page doesn't exist or isn't charged.
4958 	 * But we cannot move a tail-page in a THP.
4959 	 */
4960 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
4961 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4962 		ret = MC_TARGET_SWAP;
4963 		if (target)
4964 			target->ent = ent;
4965 	}
4966 	return ret;
4967 }
4968 
4969 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4970 /*
4971  * We don't consider PMD mapped swapping or file mapped pages because THP does
4972  * not support them for now.
4973  * Caller should make sure that pmd_trans_huge(pmd) is true.
4974  */
4975 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4976 		unsigned long addr, pmd_t pmd, union mc_target *target)
4977 {
4978 	struct page *page = NULL;
4979 	enum mc_target_type ret = MC_TARGET_NONE;
4980 
4981 	if (unlikely(is_swap_pmd(pmd))) {
4982 		VM_BUG_ON(thp_migration_supported() &&
4983 				  !is_pmd_migration_entry(pmd));
4984 		return ret;
4985 	}
4986 	page = pmd_page(pmd);
4987 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4988 	if (!(mc.flags & MOVE_ANON))
4989 		return ret;
4990 	if (page->mem_cgroup == mc.from) {
4991 		ret = MC_TARGET_PAGE;
4992 		if (target) {
4993 			get_page(page);
4994 			target->page = page;
4995 		}
4996 	}
4997 	return ret;
4998 }
4999 #else
5000 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5001 		unsigned long addr, pmd_t pmd, union mc_target *target)
5002 {
5003 	return MC_TARGET_NONE;
5004 }
5005 #endif
5006 
5007 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5008 					unsigned long addr, unsigned long end,
5009 					struct mm_walk *walk)
5010 {
5011 	struct vm_area_struct *vma = walk->vma;
5012 	pte_t *pte;
5013 	spinlock_t *ptl;
5014 
5015 	ptl = pmd_trans_huge_lock(pmd, vma);
5016 	if (ptl) {
5017 		/*
5018 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5019 		 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
5020 		 * MEMORY_DEVICE_PRIVATE but this might change.
5021 		 */
5022 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5023 			mc.precharge += HPAGE_PMD_NR;
5024 		spin_unlock(ptl);
5025 		return 0;
5026 	}
5027 
5028 	if (pmd_trans_unstable(pmd))
5029 		return 0;
5030 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5031 	for (; addr != end; pte++, addr += PAGE_SIZE)
5032 		if (get_mctgt_type(vma, addr, *pte, NULL))
5033 			mc.precharge++;	/* increment precharge temporarily */
5034 	pte_unmap_unlock(pte - 1, ptl);
5035 	cond_resched();
5036 
5037 	return 0;
5038 }
5039 
5040 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5041 {
5042 	unsigned long precharge;
5043 
5044 	struct mm_walk mem_cgroup_count_precharge_walk = {
5045 		.pmd_entry = mem_cgroup_count_precharge_pte_range,
5046 		.mm = mm,
5047 	};
5048 	down_read(&mm->mmap_sem);
5049 	walk_page_range(0, mm->highest_vm_end,
5050 			&mem_cgroup_count_precharge_walk);
5051 	up_read(&mm->mmap_sem);
5052 
5053 	precharge = mc.precharge;
5054 	mc.precharge = 0;
5055 
5056 	return precharge;
5057 }
5058 
5059 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5060 {
5061 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5062 
5063 	VM_BUG_ON(mc.moving_task);
5064 	mc.moving_task = current;
5065 	return mem_cgroup_do_precharge(precharge);
5066 }
5067 
5068 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5069 static void __mem_cgroup_clear_mc(void)
5070 {
5071 	struct mem_cgroup *from = mc.from;
5072 	struct mem_cgroup *to = mc.to;
5073 
5074 	/* we must uncharge all the leftover precharges from mc.to */
5075 	if (mc.precharge) {
5076 		cancel_charge(mc.to, mc.precharge);
5077 		mc.precharge = 0;
5078 	}
5079 	/*
5080 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5081 	 * we must uncharge here.
5082 	 */
5083 	if (mc.moved_charge) {
5084 		cancel_charge(mc.from, mc.moved_charge);
5085 		mc.moved_charge = 0;
5086 	}
5087 	/* we must fixup refcnts and charges */
5088 	if (mc.moved_swap) {
5089 		/* uncharge swap account from the old cgroup */
5090 		if (!mem_cgroup_is_root(mc.from))
5091 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5092 
5093 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5094 
5095 		/*
5096 		 * we charged both to->memory and to->memsw, so we
5097 		 * should uncharge to->memory.
5098 		 */
5099 		if (!mem_cgroup_is_root(mc.to))
5100 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5101 
5102 		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5103 		css_put_many(&mc.to->css, mc.moved_swap);
5104 
5105 		mc.moved_swap = 0;
5106 	}
5107 	memcg_oom_recover(from);
5108 	memcg_oom_recover(to);
5109 	wake_up_all(&mc.waitq);
5110 }
5111 
5112 static void mem_cgroup_clear_mc(void)
5113 {
5114 	struct mm_struct *mm = mc.mm;
5115 
5116 	/*
5117 	 * we must clear moving_task before waking up waiters at the end of
5118 	 * task migration.
5119 	 */
5120 	mc.moving_task = NULL;
5121 	__mem_cgroup_clear_mc();
5122 	spin_lock(&mc.lock);
5123 	mc.from = NULL;
5124 	mc.to = NULL;
5125 	mc.mm = NULL;
5126 	spin_unlock(&mc.lock);
5127 
5128 	mmput(mm);
5129 }
5130 
5131 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5132 {
5133 	struct cgroup_subsys_state *css;
5134 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5135 	struct mem_cgroup *from;
5136 	struct task_struct *leader, *p;
5137 	struct mm_struct *mm;
5138 	unsigned long move_flags;
5139 	int ret = 0;
5140 
5141 	/* charge immigration isn't supported on the default hierarchy */
5142 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5143 		return 0;
5144 
5145 	/*
5146 	 * Multi-process migrations only happen on the default hierarchy
5147 	 * where charge immigration is not used.  Perform charge
5148 	 * immigration if @tset contains a leader and whine if there are
5149 	 * multiple.
5150 	 */
5151 	p = NULL;
5152 	cgroup_taskset_for_each_leader(leader, css, tset) {
5153 		WARN_ON_ONCE(p);
5154 		p = leader;
5155 		memcg = mem_cgroup_from_css(css);
5156 	}
5157 	if (!p)
5158 		return 0;
5159 
5160 	/*
5161 	 * We are now commited to this value whatever it is. Changes in this
5162 	 * tunable will only affect upcoming migrations, not the current one.
5163 	 * So we need to save it, and keep it going.
5164 	 */
5165 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5166 	if (!move_flags)
5167 		return 0;
5168 
5169 	from = mem_cgroup_from_task(p);
5170 
5171 	VM_BUG_ON(from == memcg);
5172 
5173 	mm = get_task_mm(p);
5174 	if (!mm)
5175 		return 0;
5176 	/* We move charges only when we move a owner of the mm */
5177 	if (mm->owner == p) {
5178 		VM_BUG_ON(mc.from);
5179 		VM_BUG_ON(mc.to);
5180 		VM_BUG_ON(mc.precharge);
5181 		VM_BUG_ON(mc.moved_charge);
5182 		VM_BUG_ON(mc.moved_swap);
5183 
5184 		spin_lock(&mc.lock);
5185 		mc.mm = mm;
5186 		mc.from = from;
5187 		mc.to = memcg;
5188 		mc.flags = move_flags;
5189 		spin_unlock(&mc.lock);
5190 		/* We set mc.moving_task later */
5191 
5192 		ret = mem_cgroup_precharge_mc(mm);
5193 		if (ret)
5194 			mem_cgroup_clear_mc();
5195 	} else {
5196 		mmput(mm);
5197 	}
5198 	return ret;
5199 }
5200 
5201 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5202 {
5203 	if (mc.to)
5204 		mem_cgroup_clear_mc();
5205 }
5206 
5207 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5208 				unsigned long addr, unsigned long end,
5209 				struct mm_walk *walk)
5210 {
5211 	int ret = 0;
5212 	struct vm_area_struct *vma = walk->vma;
5213 	pte_t *pte;
5214 	spinlock_t *ptl;
5215 	enum mc_target_type target_type;
5216 	union mc_target target;
5217 	struct page *page;
5218 
5219 	ptl = pmd_trans_huge_lock(pmd, vma);
5220 	if (ptl) {
5221 		if (mc.precharge < HPAGE_PMD_NR) {
5222 			spin_unlock(ptl);
5223 			return 0;
5224 		}
5225 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5226 		if (target_type == MC_TARGET_PAGE) {
5227 			page = target.page;
5228 			if (!isolate_lru_page(page)) {
5229 				if (!mem_cgroup_move_account(page, true,
5230 							     mc.from, mc.to)) {
5231 					mc.precharge -= HPAGE_PMD_NR;
5232 					mc.moved_charge += HPAGE_PMD_NR;
5233 				}
5234 				putback_lru_page(page);
5235 			}
5236 			put_page(page);
5237 		} else if (target_type == MC_TARGET_DEVICE) {
5238 			page = target.page;
5239 			if (!mem_cgroup_move_account(page, true,
5240 						     mc.from, mc.to)) {
5241 				mc.precharge -= HPAGE_PMD_NR;
5242 				mc.moved_charge += HPAGE_PMD_NR;
5243 			}
5244 			put_page(page);
5245 		}
5246 		spin_unlock(ptl);
5247 		return 0;
5248 	}
5249 
5250 	if (pmd_trans_unstable(pmd))
5251 		return 0;
5252 retry:
5253 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5254 	for (; addr != end; addr += PAGE_SIZE) {
5255 		pte_t ptent = *(pte++);
5256 		bool device = false;
5257 		swp_entry_t ent;
5258 
5259 		if (!mc.precharge)
5260 			break;
5261 
5262 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5263 		case MC_TARGET_DEVICE:
5264 			device = true;
5265 			/* fall through */
5266 		case MC_TARGET_PAGE:
5267 			page = target.page;
5268 			/*
5269 			 * We can have a part of the split pmd here. Moving it
5270 			 * can be done but it would be too convoluted so simply
5271 			 * ignore such a partial THP and keep it in original
5272 			 * memcg. There should be somebody mapping the head.
5273 			 */
5274 			if (PageTransCompound(page))
5275 				goto put;
5276 			if (!device && isolate_lru_page(page))
5277 				goto put;
5278 			if (!mem_cgroup_move_account(page, false,
5279 						mc.from, mc.to)) {
5280 				mc.precharge--;
5281 				/* we uncharge from mc.from later. */
5282 				mc.moved_charge++;
5283 			}
5284 			if (!device)
5285 				putback_lru_page(page);
5286 put:			/* get_mctgt_type() gets the page */
5287 			put_page(page);
5288 			break;
5289 		case MC_TARGET_SWAP:
5290 			ent = target.ent;
5291 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5292 				mc.precharge--;
5293 				/* we fixup refcnts and charges later. */
5294 				mc.moved_swap++;
5295 			}
5296 			break;
5297 		default:
5298 			break;
5299 		}
5300 	}
5301 	pte_unmap_unlock(pte - 1, ptl);
5302 	cond_resched();
5303 
5304 	if (addr != end) {
5305 		/*
5306 		 * We have consumed all precharges we got in can_attach().
5307 		 * We try charge one by one, but don't do any additional
5308 		 * charges to mc.to if we have failed in charge once in attach()
5309 		 * phase.
5310 		 */
5311 		ret = mem_cgroup_do_precharge(1);
5312 		if (!ret)
5313 			goto retry;
5314 	}
5315 
5316 	return ret;
5317 }
5318 
5319 static void mem_cgroup_move_charge(void)
5320 {
5321 	struct mm_walk mem_cgroup_move_charge_walk = {
5322 		.pmd_entry = mem_cgroup_move_charge_pte_range,
5323 		.mm = mc.mm,
5324 	};
5325 
5326 	lru_add_drain_all();
5327 	/*
5328 	 * Signal lock_page_memcg() to take the memcg's move_lock
5329 	 * while we're moving its pages to another memcg. Then wait
5330 	 * for already started RCU-only updates to finish.
5331 	 */
5332 	atomic_inc(&mc.from->moving_account);
5333 	synchronize_rcu();
5334 retry:
5335 	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5336 		/*
5337 		 * Someone who are holding the mmap_sem might be waiting in
5338 		 * waitq. So we cancel all extra charges, wake up all waiters,
5339 		 * and retry. Because we cancel precharges, we might not be able
5340 		 * to move enough charges, but moving charge is a best-effort
5341 		 * feature anyway, so it wouldn't be a big problem.
5342 		 */
5343 		__mem_cgroup_clear_mc();
5344 		cond_resched();
5345 		goto retry;
5346 	}
5347 	/*
5348 	 * When we have consumed all precharges and failed in doing
5349 	 * additional charge, the page walk just aborts.
5350 	 */
5351 	walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5352 
5353 	up_read(&mc.mm->mmap_sem);
5354 	atomic_dec(&mc.from->moving_account);
5355 }
5356 
5357 static void mem_cgroup_move_task(void)
5358 {
5359 	if (mc.to) {
5360 		mem_cgroup_move_charge();
5361 		mem_cgroup_clear_mc();
5362 	}
5363 }
5364 #else	/* !CONFIG_MMU */
5365 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5366 {
5367 	return 0;
5368 }
5369 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5370 {
5371 }
5372 static void mem_cgroup_move_task(void)
5373 {
5374 }
5375 #endif
5376 
5377 /*
5378  * Cgroup retains root cgroups across [un]mount cycles making it necessary
5379  * to verify whether we're attached to the default hierarchy on each mount
5380  * attempt.
5381  */
5382 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5383 {
5384 	/*
5385 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5386 	 * guarantees that @root doesn't have any children, so turning it
5387 	 * on for the root memcg is enough.
5388 	 */
5389 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5390 		root_mem_cgroup->use_hierarchy = true;
5391 	else
5392 		root_mem_cgroup->use_hierarchy = false;
5393 }
5394 
5395 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
5396 {
5397 	if (value == PAGE_COUNTER_MAX)
5398 		seq_puts(m, "max\n");
5399 	else
5400 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
5401 
5402 	return 0;
5403 }
5404 
5405 static u64 memory_current_read(struct cgroup_subsys_state *css,
5406 			       struct cftype *cft)
5407 {
5408 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5409 
5410 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5411 }
5412 
5413 static int memory_min_show(struct seq_file *m, void *v)
5414 {
5415 	return seq_puts_memcg_tunable(m,
5416 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
5417 }
5418 
5419 static ssize_t memory_min_write(struct kernfs_open_file *of,
5420 				char *buf, size_t nbytes, loff_t off)
5421 {
5422 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5423 	unsigned long min;
5424 	int err;
5425 
5426 	buf = strstrip(buf);
5427 	err = page_counter_memparse(buf, "max", &min);
5428 	if (err)
5429 		return err;
5430 
5431 	page_counter_set_min(&memcg->memory, min);
5432 
5433 	return nbytes;
5434 }
5435 
5436 static int memory_low_show(struct seq_file *m, void *v)
5437 {
5438 	return seq_puts_memcg_tunable(m,
5439 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
5440 }
5441 
5442 static ssize_t memory_low_write(struct kernfs_open_file *of,
5443 				char *buf, size_t nbytes, loff_t off)
5444 {
5445 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5446 	unsigned long low;
5447 	int err;
5448 
5449 	buf = strstrip(buf);
5450 	err = page_counter_memparse(buf, "max", &low);
5451 	if (err)
5452 		return err;
5453 
5454 	page_counter_set_low(&memcg->memory, low);
5455 
5456 	return nbytes;
5457 }
5458 
5459 static int memory_high_show(struct seq_file *m, void *v)
5460 {
5461 	return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
5462 }
5463 
5464 static ssize_t memory_high_write(struct kernfs_open_file *of,
5465 				 char *buf, size_t nbytes, loff_t off)
5466 {
5467 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5468 	unsigned long nr_pages;
5469 	unsigned long high;
5470 	int err;
5471 
5472 	buf = strstrip(buf);
5473 	err = page_counter_memparse(buf, "max", &high);
5474 	if (err)
5475 		return err;
5476 
5477 	memcg->high = high;
5478 
5479 	nr_pages = page_counter_read(&memcg->memory);
5480 	if (nr_pages > high)
5481 		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5482 					     GFP_KERNEL, true);
5483 
5484 	memcg_wb_domain_size_changed(memcg);
5485 	return nbytes;
5486 }
5487 
5488 static int memory_max_show(struct seq_file *m, void *v)
5489 {
5490 	return seq_puts_memcg_tunable(m,
5491 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
5492 }
5493 
5494 static ssize_t memory_max_write(struct kernfs_open_file *of,
5495 				char *buf, size_t nbytes, loff_t off)
5496 {
5497 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5498 	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5499 	bool drained = false;
5500 	unsigned long max;
5501 	int err;
5502 
5503 	buf = strstrip(buf);
5504 	err = page_counter_memparse(buf, "max", &max);
5505 	if (err)
5506 		return err;
5507 
5508 	xchg(&memcg->memory.max, max);
5509 
5510 	for (;;) {
5511 		unsigned long nr_pages = page_counter_read(&memcg->memory);
5512 
5513 		if (nr_pages <= max)
5514 			break;
5515 
5516 		if (signal_pending(current)) {
5517 			err = -EINTR;
5518 			break;
5519 		}
5520 
5521 		if (!drained) {
5522 			drain_all_stock(memcg);
5523 			drained = true;
5524 			continue;
5525 		}
5526 
5527 		if (nr_reclaims) {
5528 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5529 							  GFP_KERNEL, true))
5530 				nr_reclaims--;
5531 			continue;
5532 		}
5533 
5534 		memcg_memory_event(memcg, MEMCG_OOM);
5535 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5536 			break;
5537 	}
5538 
5539 	memcg_wb_domain_size_changed(memcg);
5540 	return nbytes;
5541 }
5542 
5543 static int memory_events_show(struct seq_file *m, void *v)
5544 {
5545 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5546 
5547 	seq_printf(m, "low %lu\n",
5548 		   atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
5549 	seq_printf(m, "high %lu\n",
5550 		   atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
5551 	seq_printf(m, "max %lu\n",
5552 		   atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
5553 	seq_printf(m, "oom %lu\n",
5554 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
5555 	seq_printf(m, "oom_kill %lu\n",
5556 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
5557 
5558 	return 0;
5559 }
5560 
5561 static int memory_stat_show(struct seq_file *m, void *v)
5562 {
5563 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5564 	struct accumulated_stats acc;
5565 	int i;
5566 
5567 	/*
5568 	 * Provide statistics on the state of the memory subsystem as
5569 	 * well as cumulative event counters that show past behavior.
5570 	 *
5571 	 * This list is ordered following a combination of these gradients:
5572 	 * 1) generic big picture -> specifics and details
5573 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
5574 	 *
5575 	 * Current memory state:
5576 	 */
5577 
5578 	memset(&acc, 0, sizeof(acc));
5579 	acc.stats_size = MEMCG_NR_STAT;
5580 	acc.events_size = NR_VM_EVENT_ITEMS;
5581 	accumulate_memcg_tree(memcg, &acc);
5582 
5583 	seq_printf(m, "anon %llu\n",
5584 		   (u64)acc.stat[MEMCG_RSS] * PAGE_SIZE);
5585 	seq_printf(m, "file %llu\n",
5586 		   (u64)acc.stat[MEMCG_CACHE] * PAGE_SIZE);
5587 	seq_printf(m, "kernel_stack %llu\n",
5588 		   (u64)acc.stat[MEMCG_KERNEL_STACK_KB] * 1024);
5589 	seq_printf(m, "slab %llu\n",
5590 		   (u64)(acc.stat[NR_SLAB_RECLAIMABLE] +
5591 			 acc.stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5592 	seq_printf(m, "sock %llu\n",
5593 		   (u64)acc.stat[MEMCG_SOCK] * PAGE_SIZE);
5594 
5595 	seq_printf(m, "shmem %llu\n",
5596 		   (u64)acc.stat[NR_SHMEM] * PAGE_SIZE);
5597 	seq_printf(m, "file_mapped %llu\n",
5598 		   (u64)acc.stat[NR_FILE_MAPPED] * PAGE_SIZE);
5599 	seq_printf(m, "file_dirty %llu\n",
5600 		   (u64)acc.stat[NR_FILE_DIRTY] * PAGE_SIZE);
5601 	seq_printf(m, "file_writeback %llu\n",
5602 		   (u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE);
5603 
5604 	/*
5605 	 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
5606 	 * with the NR_ANON_THP vm counter, but right now it's a pain in the
5607 	 * arse because it requires migrating the work out of rmap to a place
5608 	 * where the page->mem_cgroup is set up and stable.
5609 	 */
5610 	seq_printf(m, "anon_thp %llu\n",
5611 		   (u64)acc.stat[MEMCG_RSS_HUGE] * PAGE_SIZE);
5612 
5613 	for (i = 0; i < NR_LRU_LISTS; i++)
5614 		seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i],
5615 			   (u64)acc.lru_pages[i] * PAGE_SIZE);
5616 
5617 	seq_printf(m, "slab_reclaimable %llu\n",
5618 		   (u64)acc.stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
5619 	seq_printf(m, "slab_unreclaimable %llu\n",
5620 		   (u64)acc.stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5621 
5622 	/* Accumulated memory events */
5623 
5624 	seq_printf(m, "pgfault %lu\n", acc.events[PGFAULT]);
5625 	seq_printf(m, "pgmajfault %lu\n", acc.events[PGMAJFAULT]);
5626 
5627 	seq_printf(m, "workingset_refault %lu\n",
5628 		   acc.stat[WORKINGSET_REFAULT]);
5629 	seq_printf(m, "workingset_activate %lu\n",
5630 		   acc.stat[WORKINGSET_ACTIVATE]);
5631 	seq_printf(m, "workingset_nodereclaim %lu\n",
5632 		   acc.stat[WORKINGSET_NODERECLAIM]);
5633 
5634 	seq_printf(m, "pgrefill %lu\n", acc.events[PGREFILL]);
5635 	seq_printf(m, "pgscan %lu\n", acc.events[PGSCAN_KSWAPD] +
5636 		   acc.events[PGSCAN_DIRECT]);
5637 	seq_printf(m, "pgsteal %lu\n", acc.events[PGSTEAL_KSWAPD] +
5638 		   acc.events[PGSTEAL_DIRECT]);
5639 	seq_printf(m, "pgactivate %lu\n", acc.events[PGACTIVATE]);
5640 	seq_printf(m, "pgdeactivate %lu\n", acc.events[PGDEACTIVATE]);
5641 	seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]);
5642 	seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]);
5643 
5644 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5645 	seq_printf(m, "thp_fault_alloc %lu\n", acc.events[THP_FAULT_ALLOC]);
5646 	seq_printf(m, "thp_collapse_alloc %lu\n",
5647 		   acc.events[THP_COLLAPSE_ALLOC]);
5648 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5649 
5650 	return 0;
5651 }
5652 
5653 static int memory_oom_group_show(struct seq_file *m, void *v)
5654 {
5655 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5656 
5657 	seq_printf(m, "%d\n", memcg->oom_group);
5658 
5659 	return 0;
5660 }
5661 
5662 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
5663 				      char *buf, size_t nbytes, loff_t off)
5664 {
5665 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5666 	int ret, oom_group;
5667 
5668 	buf = strstrip(buf);
5669 	if (!buf)
5670 		return -EINVAL;
5671 
5672 	ret = kstrtoint(buf, 0, &oom_group);
5673 	if (ret)
5674 		return ret;
5675 
5676 	if (oom_group != 0 && oom_group != 1)
5677 		return -EINVAL;
5678 
5679 	memcg->oom_group = oom_group;
5680 
5681 	return nbytes;
5682 }
5683 
5684 static struct cftype memory_files[] = {
5685 	{
5686 		.name = "current",
5687 		.flags = CFTYPE_NOT_ON_ROOT,
5688 		.read_u64 = memory_current_read,
5689 	},
5690 	{
5691 		.name = "min",
5692 		.flags = CFTYPE_NOT_ON_ROOT,
5693 		.seq_show = memory_min_show,
5694 		.write = memory_min_write,
5695 	},
5696 	{
5697 		.name = "low",
5698 		.flags = CFTYPE_NOT_ON_ROOT,
5699 		.seq_show = memory_low_show,
5700 		.write = memory_low_write,
5701 	},
5702 	{
5703 		.name = "high",
5704 		.flags = CFTYPE_NOT_ON_ROOT,
5705 		.seq_show = memory_high_show,
5706 		.write = memory_high_write,
5707 	},
5708 	{
5709 		.name = "max",
5710 		.flags = CFTYPE_NOT_ON_ROOT,
5711 		.seq_show = memory_max_show,
5712 		.write = memory_max_write,
5713 	},
5714 	{
5715 		.name = "events",
5716 		.flags = CFTYPE_NOT_ON_ROOT,
5717 		.file_offset = offsetof(struct mem_cgroup, events_file),
5718 		.seq_show = memory_events_show,
5719 	},
5720 	{
5721 		.name = "stat",
5722 		.flags = CFTYPE_NOT_ON_ROOT,
5723 		.seq_show = memory_stat_show,
5724 	},
5725 	{
5726 		.name = "oom.group",
5727 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
5728 		.seq_show = memory_oom_group_show,
5729 		.write = memory_oom_group_write,
5730 	},
5731 	{ }	/* terminate */
5732 };
5733 
5734 struct cgroup_subsys memory_cgrp_subsys = {
5735 	.css_alloc = mem_cgroup_css_alloc,
5736 	.css_online = mem_cgroup_css_online,
5737 	.css_offline = mem_cgroup_css_offline,
5738 	.css_released = mem_cgroup_css_released,
5739 	.css_free = mem_cgroup_css_free,
5740 	.css_reset = mem_cgroup_css_reset,
5741 	.can_attach = mem_cgroup_can_attach,
5742 	.cancel_attach = mem_cgroup_cancel_attach,
5743 	.post_attach = mem_cgroup_move_task,
5744 	.bind = mem_cgroup_bind,
5745 	.dfl_cftypes = memory_files,
5746 	.legacy_cftypes = mem_cgroup_legacy_files,
5747 	.early_init = 0,
5748 };
5749 
5750 /**
5751  * mem_cgroup_protected - check if memory consumption is in the normal range
5752  * @root: the top ancestor of the sub-tree being checked
5753  * @memcg: the memory cgroup to check
5754  *
5755  * WARNING: This function is not stateless! It can only be used as part
5756  *          of a top-down tree iteration, not for isolated queries.
5757  *
5758  * Returns one of the following:
5759  *   MEMCG_PROT_NONE: cgroup memory is not protected
5760  *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
5761  *     an unprotected supply of reclaimable memory from other cgroups.
5762  *   MEMCG_PROT_MIN: cgroup memory is protected
5763  *
5764  * @root is exclusive; it is never protected when looked at directly
5765  *
5766  * To provide a proper hierarchical behavior, effective memory.min/low values
5767  * are used. Below is the description of how effective memory.low is calculated.
5768  * Effective memory.min values is calculated in the same way.
5769  *
5770  * Effective memory.low is always equal or less than the original memory.low.
5771  * If there is no memory.low overcommittment (which is always true for
5772  * top-level memory cgroups), these two values are equal.
5773  * Otherwise, it's a part of parent's effective memory.low,
5774  * calculated as a cgroup's memory.low usage divided by sum of sibling's
5775  * memory.low usages, where memory.low usage is the size of actually
5776  * protected memory.
5777  *
5778  *                                             low_usage
5779  * elow = min( memory.low, parent->elow * ------------------ ),
5780  *                                        siblings_low_usage
5781  *
5782  *             | memory.current, if memory.current < memory.low
5783  * low_usage = |
5784  *	       | 0, otherwise.
5785  *
5786  *
5787  * Such definition of the effective memory.low provides the expected
5788  * hierarchical behavior: parent's memory.low value is limiting
5789  * children, unprotected memory is reclaimed first and cgroups,
5790  * which are not using their guarantee do not affect actual memory
5791  * distribution.
5792  *
5793  * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
5794  *
5795  *     A      A/memory.low = 2G, A/memory.current = 6G
5796  *    //\\
5797  *   BC  DE   B/memory.low = 3G  B/memory.current = 2G
5798  *            C/memory.low = 1G  C/memory.current = 2G
5799  *            D/memory.low = 0   D/memory.current = 2G
5800  *            E/memory.low = 10G E/memory.current = 0
5801  *
5802  * and the memory pressure is applied, the following memory distribution
5803  * is expected (approximately):
5804  *
5805  *     A/memory.current = 2G
5806  *
5807  *     B/memory.current = 1.3G
5808  *     C/memory.current = 0.6G
5809  *     D/memory.current = 0
5810  *     E/memory.current = 0
5811  *
5812  * These calculations require constant tracking of the actual low usages
5813  * (see propagate_protected_usage()), as well as recursive calculation of
5814  * effective memory.low values. But as we do call mem_cgroup_protected()
5815  * path for each memory cgroup top-down from the reclaim,
5816  * it's possible to optimize this part, and save calculated elow
5817  * for next usage. This part is intentionally racy, but it's ok,
5818  * as memory.low is a best-effort mechanism.
5819  */
5820 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
5821 						struct mem_cgroup *memcg)
5822 {
5823 	struct mem_cgroup *parent;
5824 	unsigned long emin, parent_emin;
5825 	unsigned long elow, parent_elow;
5826 	unsigned long usage;
5827 
5828 	if (mem_cgroup_disabled())
5829 		return MEMCG_PROT_NONE;
5830 
5831 	if (!root)
5832 		root = root_mem_cgroup;
5833 	if (memcg == root)
5834 		return MEMCG_PROT_NONE;
5835 
5836 	usage = page_counter_read(&memcg->memory);
5837 	if (!usage)
5838 		return MEMCG_PROT_NONE;
5839 
5840 	emin = memcg->memory.min;
5841 	elow = memcg->memory.low;
5842 
5843 	parent = parent_mem_cgroup(memcg);
5844 	/* No parent means a non-hierarchical mode on v1 memcg */
5845 	if (!parent)
5846 		return MEMCG_PROT_NONE;
5847 
5848 	if (parent == root)
5849 		goto exit;
5850 
5851 	parent_emin = READ_ONCE(parent->memory.emin);
5852 	emin = min(emin, parent_emin);
5853 	if (emin && parent_emin) {
5854 		unsigned long min_usage, siblings_min_usage;
5855 
5856 		min_usage = min(usage, memcg->memory.min);
5857 		siblings_min_usage = atomic_long_read(
5858 			&parent->memory.children_min_usage);
5859 
5860 		if (min_usage && siblings_min_usage)
5861 			emin = min(emin, parent_emin * min_usage /
5862 				   siblings_min_usage);
5863 	}
5864 
5865 	parent_elow = READ_ONCE(parent->memory.elow);
5866 	elow = min(elow, parent_elow);
5867 	if (elow && parent_elow) {
5868 		unsigned long low_usage, siblings_low_usage;
5869 
5870 		low_usage = min(usage, memcg->memory.low);
5871 		siblings_low_usage = atomic_long_read(
5872 			&parent->memory.children_low_usage);
5873 
5874 		if (low_usage && siblings_low_usage)
5875 			elow = min(elow, parent_elow * low_usage /
5876 				   siblings_low_usage);
5877 	}
5878 
5879 exit:
5880 	memcg->memory.emin = emin;
5881 	memcg->memory.elow = elow;
5882 
5883 	if (usage <= emin)
5884 		return MEMCG_PROT_MIN;
5885 	else if (usage <= elow)
5886 		return MEMCG_PROT_LOW;
5887 	else
5888 		return MEMCG_PROT_NONE;
5889 }
5890 
5891 /**
5892  * mem_cgroup_try_charge - try charging a page
5893  * @page: page to charge
5894  * @mm: mm context of the victim
5895  * @gfp_mask: reclaim mode
5896  * @memcgp: charged memcg return
5897  * @compound: charge the page as compound or small page
5898  *
5899  * Try to charge @page to the memcg that @mm belongs to, reclaiming
5900  * pages according to @gfp_mask if necessary.
5901  *
5902  * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5903  * Otherwise, an error code is returned.
5904  *
5905  * After page->mapping has been set up, the caller must finalize the
5906  * charge with mem_cgroup_commit_charge().  Or abort the transaction
5907  * with mem_cgroup_cancel_charge() in case page instantiation fails.
5908  */
5909 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5910 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5911 			  bool compound)
5912 {
5913 	struct mem_cgroup *memcg = NULL;
5914 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5915 	int ret = 0;
5916 
5917 	if (mem_cgroup_disabled())
5918 		goto out;
5919 
5920 	if (PageSwapCache(page)) {
5921 		/*
5922 		 * Every swap fault against a single page tries to charge the
5923 		 * page, bail as early as possible.  shmem_unuse() encounters
5924 		 * already charged pages, too.  The USED bit is protected by
5925 		 * the page lock, which serializes swap cache removal, which
5926 		 * in turn serializes uncharging.
5927 		 */
5928 		VM_BUG_ON_PAGE(!PageLocked(page), page);
5929 		if (compound_head(page)->mem_cgroup)
5930 			goto out;
5931 
5932 		if (do_swap_account) {
5933 			swp_entry_t ent = { .val = page_private(page), };
5934 			unsigned short id = lookup_swap_cgroup_id(ent);
5935 
5936 			rcu_read_lock();
5937 			memcg = mem_cgroup_from_id(id);
5938 			if (memcg && !css_tryget_online(&memcg->css))
5939 				memcg = NULL;
5940 			rcu_read_unlock();
5941 		}
5942 	}
5943 
5944 	if (!memcg)
5945 		memcg = get_mem_cgroup_from_mm(mm);
5946 
5947 	ret = try_charge(memcg, gfp_mask, nr_pages);
5948 
5949 	css_put(&memcg->css);
5950 out:
5951 	*memcgp = memcg;
5952 	return ret;
5953 }
5954 
5955 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
5956 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5957 			  bool compound)
5958 {
5959 	struct mem_cgroup *memcg;
5960 	int ret;
5961 
5962 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
5963 	memcg = *memcgp;
5964 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
5965 	return ret;
5966 }
5967 
5968 /**
5969  * mem_cgroup_commit_charge - commit a page charge
5970  * @page: page to charge
5971  * @memcg: memcg to charge the page to
5972  * @lrucare: page might be on LRU already
5973  * @compound: charge the page as compound or small page
5974  *
5975  * Finalize a charge transaction started by mem_cgroup_try_charge(),
5976  * after page->mapping has been set up.  This must happen atomically
5977  * as part of the page instantiation, i.e. under the page table lock
5978  * for anonymous pages, under the page lock for page and swap cache.
5979  *
5980  * In addition, the page must not be on the LRU during the commit, to
5981  * prevent racing with task migration.  If it might be, use @lrucare.
5982  *
5983  * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5984  */
5985 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5986 			      bool lrucare, bool compound)
5987 {
5988 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5989 
5990 	VM_BUG_ON_PAGE(!page->mapping, page);
5991 	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5992 
5993 	if (mem_cgroup_disabled())
5994 		return;
5995 	/*
5996 	 * Swap faults will attempt to charge the same page multiple
5997 	 * times.  But reuse_swap_page() might have removed the page
5998 	 * from swapcache already, so we can't check PageSwapCache().
5999 	 */
6000 	if (!memcg)
6001 		return;
6002 
6003 	commit_charge(page, memcg, lrucare);
6004 
6005 	local_irq_disable();
6006 	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6007 	memcg_check_events(memcg, page);
6008 	local_irq_enable();
6009 
6010 	if (do_memsw_account() && PageSwapCache(page)) {
6011 		swp_entry_t entry = { .val = page_private(page) };
6012 		/*
6013 		 * The swap entry might not get freed for a long time,
6014 		 * let's not wait for it.  The page already received a
6015 		 * memory+swap charge, drop the swap entry duplicate.
6016 		 */
6017 		mem_cgroup_uncharge_swap(entry, nr_pages);
6018 	}
6019 }
6020 
6021 /**
6022  * mem_cgroup_cancel_charge - cancel a page charge
6023  * @page: page to charge
6024  * @memcg: memcg to charge the page to
6025  * @compound: charge the page as compound or small page
6026  *
6027  * Cancel a charge transaction started by mem_cgroup_try_charge().
6028  */
6029 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6030 		bool compound)
6031 {
6032 	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6033 
6034 	if (mem_cgroup_disabled())
6035 		return;
6036 	/*
6037 	 * Swap faults will attempt to charge the same page multiple
6038 	 * times.  But reuse_swap_page() might have removed the page
6039 	 * from swapcache already, so we can't check PageSwapCache().
6040 	 */
6041 	if (!memcg)
6042 		return;
6043 
6044 	cancel_charge(memcg, nr_pages);
6045 }
6046 
6047 struct uncharge_gather {
6048 	struct mem_cgroup *memcg;
6049 	unsigned long pgpgout;
6050 	unsigned long nr_anon;
6051 	unsigned long nr_file;
6052 	unsigned long nr_kmem;
6053 	unsigned long nr_huge;
6054 	unsigned long nr_shmem;
6055 	struct page *dummy_page;
6056 };
6057 
6058 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6059 {
6060 	memset(ug, 0, sizeof(*ug));
6061 }
6062 
6063 static void uncharge_batch(const struct uncharge_gather *ug)
6064 {
6065 	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
6066 	unsigned long flags;
6067 
6068 	if (!mem_cgroup_is_root(ug->memcg)) {
6069 		page_counter_uncharge(&ug->memcg->memory, nr_pages);
6070 		if (do_memsw_account())
6071 			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6072 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6073 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6074 		memcg_oom_recover(ug->memcg);
6075 	}
6076 
6077 	local_irq_save(flags);
6078 	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6079 	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6080 	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6081 	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6082 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6083 	__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
6084 	memcg_check_events(ug->memcg, ug->dummy_page);
6085 	local_irq_restore(flags);
6086 
6087 	if (!mem_cgroup_is_root(ug->memcg))
6088 		css_put_many(&ug->memcg->css, nr_pages);
6089 }
6090 
6091 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6092 {
6093 	VM_BUG_ON_PAGE(PageLRU(page), page);
6094 	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6095 			!PageHWPoison(page) , page);
6096 
6097 	if (!page->mem_cgroup)
6098 		return;
6099 
6100 	/*
6101 	 * Nobody should be changing or seriously looking at
6102 	 * page->mem_cgroup at this point, we have fully
6103 	 * exclusive access to the page.
6104 	 */
6105 
6106 	if (ug->memcg != page->mem_cgroup) {
6107 		if (ug->memcg) {
6108 			uncharge_batch(ug);
6109 			uncharge_gather_clear(ug);
6110 		}
6111 		ug->memcg = page->mem_cgroup;
6112 	}
6113 
6114 	if (!PageKmemcg(page)) {
6115 		unsigned int nr_pages = 1;
6116 
6117 		if (PageTransHuge(page)) {
6118 			nr_pages <<= compound_order(page);
6119 			ug->nr_huge += nr_pages;
6120 		}
6121 		if (PageAnon(page))
6122 			ug->nr_anon += nr_pages;
6123 		else {
6124 			ug->nr_file += nr_pages;
6125 			if (PageSwapBacked(page))
6126 				ug->nr_shmem += nr_pages;
6127 		}
6128 		ug->pgpgout++;
6129 	} else {
6130 		ug->nr_kmem += 1 << compound_order(page);
6131 		__ClearPageKmemcg(page);
6132 	}
6133 
6134 	ug->dummy_page = page;
6135 	page->mem_cgroup = NULL;
6136 }
6137 
6138 static void uncharge_list(struct list_head *page_list)
6139 {
6140 	struct uncharge_gather ug;
6141 	struct list_head *next;
6142 
6143 	uncharge_gather_clear(&ug);
6144 
6145 	/*
6146 	 * Note that the list can be a single page->lru; hence the
6147 	 * do-while loop instead of a simple list_for_each_entry().
6148 	 */
6149 	next = page_list->next;
6150 	do {
6151 		struct page *page;
6152 
6153 		page = list_entry(next, struct page, lru);
6154 		next = page->lru.next;
6155 
6156 		uncharge_page(page, &ug);
6157 	} while (next != page_list);
6158 
6159 	if (ug.memcg)
6160 		uncharge_batch(&ug);
6161 }
6162 
6163 /**
6164  * mem_cgroup_uncharge - uncharge a page
6165  * @page: page to uncharge
6166  *
6167  * Uncharge a page previously charged with mem_cgroup_try_charge() and
6168  * mem_cgroup_commit_charge().
6169  */
6170 void mem_cgroup_uncharge(struct page *page)
6171 {
6172 	struct uncharge_gather ug;
6173 
6174 	if (mem_cgroup_disabled())
6175 		return;
6176 
6177 	/* Don't touch page->lru of any random page, pre-check: */
6178 	if (!page->mem_cgroup)
6179 		return;
6180 
6181 	uncharge_gather_clear(&ug);
6182 	uncharge_page(page, &ug);
6183 	uncharge_batch(&ug);
6184 }
6185 
6186 /**
6187  * mem_cgroup_uncharge_list - uncharge a list of page
6188  * @page_list: list of pages to uncharge
6189  *
6190  * Uncharge a list of pages previously charged with
6191  * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6192  */
6193 void mem_cgroup_uncharge_list(struct list_head *page_list)
6194 {
6195 	if (mem_cgroup_disabled())
6196 		return;
6197 
6198 	if (!list_empty(page_list))
6199 		uncharge_list(page_list);
6200 }
6201 
6202 /**
6203  * mem_cgroup_migrate - charge a page's replacement
6204  * @oldpage: currently circulating page
6205  * @newpage: replacement page
6206  *
6207  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6208  * be uncharged upon free.
6209  *
6210  * Both pages must be locked, @newpage->mapping must be set up.
6211  */
6212 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6213 {
6214 	struct mem_cgroup *memcg;
6215 	unsigned int nr_pages;
6216 	bool compound;
6217 	unsigned long flags;
6218 
6219 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6220 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6221 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6222 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6223 		       newpage);
6224 
6225 	if (mem_cgroup_disabled())
6226 		return;
6227 
6228 	/* Page cache replacement: new page already charged? */
6229 	if (newpage->mem_cgroup)
6230 		return;
6231 
6232 	/* Swapcache readahead pages can get replaced before being charged */
6233 	memcg = oldpage->mem_cgroup;
6234 	if (!memcg)
6235 		return;
6236 
6237 	/* Force-charge the new page. The old one will be freed soon */
6238 	compound = PageTransHuge(newpage);
6239 	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
6240 
6241 	page_counter_charge(&memcg->memory, nr_pages);
6242 	if (do_memsw_account())
6243 		page_counter_charge(&memcg->memsw, nr_pages);
6244 	css_get_many(&memcg->css, nr_pages);
6245 
6246 	commit_charge(newpage, memcg, false);
6247 
6248 	local_irq_save(flags);
6249 	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
6250 	memcg_check_events(memcg, newpage);
6251 	local_irq_restore(flags);
6252 }
6253 
6254 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6255 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6256 
6257 void mem_cgroup_sk_alloc(struct sock *sk)
6258 {
6259 	struct mem_cgroup *memcg;
6260 
6261 	if (!mem_cgroup_sockets_enabled)
6262 		return;
6263 
6264 	/*
6265 	 * Socket cloning can throw us here with sk_memcg already
6266 	 * filled. It won't however, necessarily happen from
6267 	 * process context. So the test for root memcg given
6268 	 * the current task's memcg won't help us in this case.
6269 	 *
6270 	 * Respecting the original socket's memcg is a better
6271 	 * decision in this case.
6272 	 */
6273 	if (sk->sk_memcg) {
6274 		css_get(&sk->sk_memcg->css);
6275 		return;
6276 	}
6277 
6278 	rcu_read_lock();
6279 	memcg = mem_cgroup_from_task(current);
6280 	if (memcg == root_mem_cgroup)
6281 		goto out;
6282 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6283 		goto out;
6284 	if (css_tryget_online(&memcg->css))
6285 		sk->sk_memcg = memcg;
6286 out:
6287 	rcu_read_unlock();
6288 }
6289 
6290 void mem_cgroup_sk_free(struct sock *sk)
6291 {
6292 	if (sk->sk_memcg)
6293 		css_put(&sk->sk_memcg->css);
6294 }
6295 
6296 /**
6297  * mem_cgroup_charge_skmem - charge socket memory
6298  * @memcg: memcg to charge
6299  * @nr_pages: number of pages to charge
6300  *
6301  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6302  * @memcg's configured limit, %false if the charge had to be forced.
6303  */
6304 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6305 {
6306 	gfp_t gfp_mask = GFP_KERNEL;
6307 
6308 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6309 		struct page_counter *fail;
6310 
6311 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6312 			memcg->tcpmem_pressure = 0;
6313 			return true;
6314 		}
6315 		page_counter_charge(&memcg->tcpmem, nr_pages);
6316 		memcg->tcpmem_pressure = 1;
6317 		return false;
6318 	}
6319 
6320 	/* Don't block in the packet receive path */
6321 	if (in_softirq())
6322 		gfp_mask = GFP_NOWAIT;
6323 
6324 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6325 
6326 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6327 		return true;
6328 
6329 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6330 	return false;
6331 }
6332 
6333 /**
6334  * mem_cgroup_uncharge_skmem - uncharge socket memory
6335  * @memcg: memcg to uncharge
6336  * @nr_pages: number of pages to uncharge
6337  */
6338 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6339 {
6340 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6341 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6342 		return;
6343 	}
6344 
6345 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6346 
6347 	refill_stock(memcg, nr_pages);
6348 }
6349 
6350 static int __init cgroup_memory(char *s)
6351 {
6352 	char *token;
6353 
6354 	while ((token = strsep(&s, ",")) != NULL) {
6355 		if (!*token)
6356 			continue;
6357 		if (!strcmp(token, "nosocket"))
6358 			cgroup_memory_nosocket = true;
6359 		if (!strcmp(token, "nokmem"))
6360 			cgroup_memory_nokmem = true;
6361 	}
6362 	return 0;
6363 }
6364 __setup("cgroup.memory=", cgroup_memory);
6365 
6366 /*
6367  * subsys_initcall() for memory controller.
6368  *
6369  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6370  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6371  * basically everything that doesn't depend on a specific mem_cgroup structure
6372  * should be initialized from here.
6373  */
6374 static int __init mem_cgroup_init(void)
6375 {
6376 	int cpu, node;
6377 
6378 #ifdef CONFIG_MEMCG_KMEM
6379 	/*
6380 	 * Kmem cache creation is mostly done with the slab_mutex held,
6381 	 * so use a workqueue with limited concurrency to avoid stalling
6382 	 * all worker threads in case lots of cgroups are created and
6383 	 * destroyed simultaneously.
6384 	 */
6385 	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6386 	BUG_ON(!memcg_kmem_cache_wq);
6387 #endif
6388 
6389 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6390 				  memcg_hotplug_cpu_dead);
6391 
6392 	for_each_possible_cpu(cpu)
6393 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6394 			  drain_local_stock);
6395 
6396 	for_each_node(node) {
6397 		struct mem_cgroup_tree_per_node *rtpn;
6398 
6399 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6400 				    node_online(node) ? node : NUMA_NO_NODE);
6401 
6402 		rtpn->rb_root = RB_ROOT;
6403 		rtpn->rb_rightmost = NULL;
6404 		spin_lock_init(&rtpn->lock);
6405 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6406 	}
6407 
6408 	return 0;
6409 }
6410 subsys_initcall(mem_cgroup_init);
6411 
6412 #ifdef CONFIG_MEMCG_SWAP
6413 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
6414 {
6415 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
6416 		/*
6417 		 * The root cgroup cannot be destroyed, so it's refcount must
6418 		 * always be >= 1.
6419 		 */
6420 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
6421 			VM_BUG_ON(1);
6422 			break;
6423 		}
6424 		memcg = parent_mem_cgroup(memcg);
6425 		if (!memcg)
6426 			memcg = root_mem_cgroup;
6427 	}
6428 	return memcg;
6429 }
6430 
6431 /**
6432  * mem_cgroup_swapout - transfer a memsw charge to swap
6433  * @page: page whose memsw charge to transfer
6434  * @entry: swap entry to move the charge to
6435  *
6436  * Transfer the memsw charge of @page to @entry.
6437  */
6438 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6439 {
6440 	struct mem_cgroup *memcg, *swap_memcg;
6441 	unsigned int nr_entries;
6442 	unsigned short oldid;
6443 
6444 	VM_BUG_ON_PAGE(PageLRU(page), page);
6445 	VM_BUG_ON_PAGE(page_count(page), page);
6446 
6447 	if (!do_memsw_account())
6448 		return;
6449 
6450 	memcg = page->mem_cgroup;
6451 
6452 	/* Readahead page, never charged */
6453 	if (!memcg)
6454 		return;
6455 
6456 	/*
6457 	 * In case the memcg owning these pages has been offlined and doesn't
6458 	 * have an ID allocated to it anymore, charge the closest online
6459 	 * ancestor for the swap instead and transfer the memory+swap charge.
6460 	 */
6461 	swap_memcg = mem_cgroup_id_get_online(memcg);
6462 	nr_entries = hpage_nr_pages(page);
6463 	/* Get references for the tail pages, too */
6464 	if (nr_entries > 1)
6465 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
6466 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
6467 				   nr_entries);
6468 	VM_BUG_ON_PAGE(oldid, page);
6469 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
6470 
6471 	page->mem_cgroup = NULL;
6472 
6473 	if (!mem_cgroup_is_root(memcg))
6474 		page_counter_uncharge(&memcg->memory, nr_entries);
6475 
6476 	if (memcg != swap_memcg) {
6477 		if (!mem_cgroup_is_root(swap_memcg))
6478 			page_counter_charge(&swap_memcg->memsw, nr_entries);
6479 		page_counter_uncharge(&memcg->memsw, nr_entries);
6480 	}
6481 
6482 	/*
6483 	 * Interrupts should be disabled here because the caller holds the
6484 	 * i_pages lock which is taken with interrupts-off. It is
6485 	 * important here to have the interrupts disabled because it is the
6486 	 * only synchronisation we have for updating the per-CPU variables.
6487 	 */
6488 	VM_BUG_ON(!irqs_disabled());
6489 	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
6490 				     -nr_entries);
6491 	memcg_check_events(memcg, page);
6492 
6493 	if (!mem_cgroup_is_root(memcg))
6494 		css_put_many(&memcg->css, nr_entries);
6495 }
6496 
6497 /**
6498  * mem_cgroup_try_charge_swap - try charging swap space for a page
6499  * @page: page being added to swap
6500  * @entry: swap entry to charge
6501  *
6502  * Try to charge @page's memcg for the swap space at @entry.
6503  *
6504  * Returns 0 on success, -ENOMEM on failure.
6505  */
6506 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6507 {
6508 	unsigned int nr_pages = hpage_nr_pages(page);
6509 	struct page_counter *counter;
6510 	struct mem_cgroup *memcg;
6511 	unsigned short oldid;
6512 
6513 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6514 		return 0;
6515 
6516 	memcg = page->mem_cgroup;
6517 
6518 	/* Readahead page, never charged */
6519 	if (!memcg)
6520 		return 0;
6521 
6522 	if (!entry.val) {
6523 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
6524 		return 0;
6525 	}
6526 
6527 	memcg = mem_cgroup_id_get_online(memcg);
6528 
6529 	if (!mem_cgroup_is_root(memcg) &&
6530 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
6531 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
6532 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
6533 		mem_cgroup_id_put(memcg);
6534 		return -ENOMEM;
6535 	}
6536 
6537 	/* Get references for the tail pages, too */
6538 	if (nr_pages > 1)
6539 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
6540 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
6541 	VM_BUG_ON_PAGE(oldid, page);
6542 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
6543 
6544 	return 0;
6545 }
6546 
6547 /**
6548  * mem_cgroup_uncharge_swap - uncharge swap space
6549  * @entry: swap entry to uncharge
6550  * @nr_pages: the amount of swap space to uncharge
6551  */
6552 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6553 {
6554 	struct mem_cgroup *memcg;
6555 	unsigned short id;
6556 
6557 	if (!do_swap_account)
6558 		return;
6559 
6560 	id = swap_cgroup_record(entry, 0, nr_pages);
6561 	rcu_read_lock();
6562 	memcg = mem_cgroup_from_id(id);
6563 	if (memcg) {
6564 		if (!mem_cgroup_is_root(memcg)) {
6565 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6566 				page_counter_uncharge(&memcg->swap, nr_pages);
6567 			else
6568 				page_counter_uncharge(&memcg->memsw, nr_pages);
6569 		}
6570 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
6571 		mem_cgroup_id_put_many(memcg, nr_pages);
6572 	}
6573 	rcu_read_unlock();
6574 }
6575 
6576 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6577 {
6578 	long nr_swap_pages = get_nr_swap_pages();
6579 
6580 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6581 		return nr_swap_pages;
6582 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6583 		nr_swap_pages = min_t(long, nr_swap_pages,
6584 				      READ_ONCE(memcg->swap.max) -
6585 				      page_counter_read(&memcg->swap));
6586 	return nr_swap_pages;
6587 }
6588 
6589 bool mem_cgroup_swap_full(struct page *page)
6590 {
6591 	struct mem_cgroup *memcg;
6592 
6593 	VM_BUG_ON_PAGE(!PageLocked(page), page);
6594 
6595 	if (vm_swap_full())
6596 		return true;
6597 	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6598 		return false;
6599 
6600 	memcg = page->mem_cgroup;
6601 	if (!memcg)
6602 		return false;
6603 
6604 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6605 		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
6606 			return true;
6607 
6608 	return false;
6609 }
6610 
6611 /* for remember boot option*/
6612 #ifdef CONFIG_MEMCG_SWAP_ENABLED
6613 static int really_do_swap_account __initdata = 1;
6614 #else
6615 static int really_do_swap_account __initdata;
6616 #endif
6617 
6618 static int __init enable_swap_account(char *s)
6619 {
6620 	if (!strcmp(s, "1"))
6621 		really_do_swap_account = 1;
6622 	else if (!strcmp(s, "0"))
6623 		really_do_swap_account = 0;
6624 	return 1;
6625 }
6626 __setup("swapaccount=", enable_swap_account);
6627 
6628 static u64 swap_current_read(struct cgroup_subsys_state *css,
6629 			     struct cftype *cft)
6630 {
6631 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6632 
6633 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6634 }
6635 
6636 static int swap_max_show(struct seq_file *m, void *v)
6637 {
6638 	return seq_puts_memcg_tunable(m,
6639 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
6640 }
6641 
6642 static ssize_t swap_max_write(struct kernfs_open_file *of,
6643 			      char *buf, size_t nbytes, loff_t off)
6644 {
6645 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6646 	unsigned long max;
6647 	int err;
6648 
6649 	buf = strstrip(buf);
6650 	err = page_counter_memparse(buf, "max", &max);
6651 	if (err)
6652 		return err;
6653 
6654 	xchg(&memcg->swap.max, max);
6655 
6656 	return nbytes;
6657 }
6658 
6659 static int swap_events_show(struct seq_file *m, void *v)
6660 {
6661 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6662 
6663 	seq_printf(m, "max %lu\n",
6664 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
6665 	seq_printf(m, "fail %lu\n",
6666 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
6667 
6668 	return 0;
6669 }
6670 
6671 static struct cftype swap_files[] = {
6672 	{
6673 		.name = "swap.current",
6674 		.flags = CFTYPE_NOT_ON_ROOT,
6675 		.read_u64 = swap_current_read,
6676 	},
6677 	{
6678 		.name = "swap.max",
6679 		.flags = CFTYPE_NOT_ON_ROOT,
6680 		.seq_show = swap_max_show,
6681 		.write = swap_max_write,
6682 	},
6683 	{
6684 		.name = "swap.events",
6685 		.flags = CFTYPE_NOT_ON_ROOT,
6686 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
6687 		.seq_show = swap_events_show,
6688 	},
6689 	{ }	/* terminate */
6690 };
6691 
6692 static struct cftype memsw_cgroup_files[] = {
6693 	{
6694 		.name = "memsw.usage_in_bytes",
6695 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6696 		.read_u64 = mem_cgroup_read_u64,
6697 	},
6698 	{
6699 		.name = "memsw.max_usage_in_bytes",
6700 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6701 		.write = mem_cgroup_reset,
6702 		.read_u64 = mem_cgroup_read_u64,
6703 	},
6704 	{
6705 		.name = "memsw.limit_in_bytes",
6706 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6707 		.write = mem_cgroup_write,
6708 		.read_u64 = mem_cgroup_read_u64,
6709 	},
6710 	{
6711 		.name = "memsw.failcnt",
6712 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6713 		.write = mem_cgroup_reset,
6714 		.read_u64 = mem_cgroup_read_u64,
6715 	},
6716 	{ },	/* terminate */
6717 };
6718 
6719 static int __init mem_cgroup_swap_init(void)
6720 {
6721 	if (!mem_cgroup_disabled() && really_do_swap_account) {
6722 		do_swap_account = 1;
6723 		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6724 					       swap_files));
6725 		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6726 						  memsw_cgroup_files));
6727 	}
6728 	return 0;
6729 }
6730 subsys_initcall(mem_cgroup_swap_init);
6731 
6732 #endif /* CONFIG_MEMCG_SWAP */
6733