xref: /openbmc/linux/mm/memcontrol.c (revision 1c2f87c2)
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * Kernel Memory Controller
14  * Copyright (C) 2012 Parallels Inc. and Google Inc.
15  * Authors: Glauber Costa and Suleiman Souhlal
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License as published by
19  * the Free Software Foundation; either version 2 of the License, or
20  * (at your option) any later version.
21  *
22  * This program is distributed in the hope that it will be useful,
23  * but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  * GNU General Public License for more details.
26  */
27 
28 #include <linux/res_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/mm.h>
32 #include <linux/hugetlb.h>
33 #include <linux/pagemap.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/page_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include "internal.h"
60 #include <net/sock.h>
61 #include <net/ip.h>
62 #include <net/tcp_memcontrol.h>
63 #include "slab.h"
64 
65 #include <asm/uaccess.h>
66 
67 #include <trace/events/vmscan.h>
68 
69 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70 EXPORT_SYMBOL(memory_cgrp_subsys);
71 
72 #define MEM_CGROUP_RECLAIM_RETRIES	5
73 static struct mem_cgroup *root_mem_cgroup __read_mostly;
74 
75 #ifdef CONFIG_MEMCG_SWAP
76 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
77 int do_swap_account __read_mostly;
78 
79 /* for remember boot option*/
80 #ifdef CONFIG_MEMCG_SWAP_ENABLED
81 static int really_do_swap_account __initdata = 1;
82 #else
83 static int really_do_swap_account __initdata = 0;
84 #endif
85 
86 #else
87 #define do_swap_account		0
88 #endif
89 
90 
91 static const char * const mem_cgroup_stat_names[] = {
92 	"cache",
93 	"rss",
94 	"rss_huge",
95 	"mapped_file",
96 	"writeback",
97 	"swap",
98 };
99 
100 enum mem_cgroup_events_index {
101 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
102 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
103 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
104 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
105 	MEM_CGROUP_EVENTS_NSTATS,
106 };
107 
108 static const char * const mem_cgroup_events_names[] = {
109 	"pgpgin",
110 	"pgpgout",
111 	"pgfault",
112 	"pgmajfault",
113 };
114 
115 static const char * const mem_cgroup_lru_names[] = {
116 	"inactive_anon",
117 	"active_anon",
118 	"inactive_file",
119 	"active_file",
120 	"unevictable",
121 };
122 
123 /*
124  * Per memcg event counter is incremented at every pagein/pageout. With THP,
125  * it will be incremated by the number of pages. This counter is used for
126  * for trigger some periodic events. This is straightforward and better
127  * than using jiffies etc. to handle periodic memcg event.
128  */
129 enum mem_cgroup_events_target {
130 	MEM_CGROUP_TARGET_THRESH,
131 	MEM_CGROUP_TARGET_SOFTLIMIT,
132 	MEM_CGROUP_TARGET_NUMAINFO,
133 	MEM_CGROUP_NTARGETS,
134 };
135 #define THRESHOLDS_EVENTS_TARGET 128
136 #define SOFTLIMIT_EVENTS_TARGET 1024
137 #define NUMAINFO_EVENTS_TARGET	1024
138 
139 struct mem_cgroup_stat_cpu {
140 	long count[MEM_CGROUP_STAT_NSTATS];
141 	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
142 	unsigned long nr_page_events;
143 	unsigned long targets[MEM_CGROUP_NTARGETS];
144 };
145 
146 struct mem_cgroup_reclaim_iter {
147 	/*
148 	 * last scanned hierarchy member. Valid only if last_dead_count
149 	 * matches memcg->dead_count of the hierarchy root group.
150 	 */
151 	struct mem_cgroup *last_visited;
152 	int last_dead_count;
153 
154 	/* scan generation, increased every round-trip */
155 	unsigned int generation;
156 };
157 
158 /*
159  * per-zone information in memory controller.
160  */
161 struct mem_cgroup_per_zone {
162 	struct lruvec		lruvec;
163 	unsigned long		lru_size[NR_LRU_LISTS];
164 
165 	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166 
167 	struct rb_node		tree_node;	/* RB tree node */
168 	unsigned long long	usage_in_excess;/* Set to the value by which */
169 						/* the soft limit is exceeded*/
170 	bool			on_tree;
171 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
172 						/* use container_of	   */
173 };
174 
175 struct mem_cgroup_per_node {
176 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177 };
178 
179 /*
180  * Cgroups above their limits are maintained in a RB-Tree, independent of
181  * their hierarchy representation
182  */
183 
184 struct mem_cgroup_tree_per_zone {
185 	struct rb_root rb_root;
186 	spinlock_t lock;
187 };
188 
189 struct mem_cgroup_tree_per_node {
190 	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191 };
192 
193 struct mem_cgroup_tree {
194 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195 };
196 
197 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198 
199 struct mem_cgroup_threshold {
200 	struct eventfd_ctx *eventfd;
201 	u64 threshold;
202 };
203 
204 /* For threshold */
205 struct mem_cgroup_threshold_ary {
206 	/* An array index points to threshold just below or equal to usage. */
207 	int current_threshold;
208 	/* Size of entries[] */
209 	unsigned int size;
210 	/* Array of thresholds */
211 	struct mem_cgroup_threshold entries[0];
212 };
213 
214 struct mem_cgroup_thresholds {
215 	/* Primary thresholds array */
216 	struct mem_cgroup_threshold_ary *primary;
217 	/*
218 	 * Spare threshold array.
219 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 	 * It must be able to store at least primary->size - 1 entries.
221 	 */
222 	struct mem_cgroup_threshold_ary *spare;
223 };
224 
225 /* for OOM */
226 struct mem_cgroup_eventfd_list {
227 	struct list_head list;
228 	struct eventfd_ctx *eventfd;
229 };
230 
231 /*
232  * cgroup_event represents events which userspace want to receive.
233  */
234 struct mem_cgroup_event {
235 	/*
236 	 * memcg which the event belongs to.
237 	 */
238 	struct mem_cgroup *memcg;
239 	/*
240 	 * eventfd to signal userspace about the event.
241 	 */
242 	struct eventfd_ctx *eventfd;
243 	/*
244 	 * Each of these stored in a list by the cgroup.
245 	 */
246 	struct list_head list;
247 	/*
248 	 * register_event() callback will be used to add new userspace
249 	 * waiter for changes related to this event.  Use eventfd_signal()
250 	 * on eventfd to send notification to userspace.
251 	 */
252 	int (*register_event)(struct mem_cgroup *memcg,
253 			      struct eventfd_ctx *eventfd, const char *args);
254 	/*
255 	 * unregister_event() callback will be called when userspace closes
256 	 * the eventfd or on cgroup removing.  This callback must be set,
257 	 * if you want provide notification functionality.
258 	 */
259 	void (*unregister_event)(struct mem_cgroup *memcg,
260 				 struct eventfd_ctx *eventfd);
261 	/*
262 	 * All fields below needed to unregister event when
263 	 * userspace closes eventfd.
264 	 */
265 	poll_table pt;
266 	wait_queue_head_t *wqh;
267 	wait_queue_t wait;
268 	struct work_struct remove;
269 };
270 
271 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
273 
274 /*
275  * The memory controller data structure. The memory controller controls both
276  * page cache and RSS per cgroup. We would eventually like to provide
277  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278  * to help the administrator determine what knobs to tune.
279  *
280  * TODO: Add a water mark for the memory controller. Reclaim will begin when
281  * we hit the water mark. May be even add a low water mark, such that
282  * no reclaim occurs from a cgroup at it's low water mark, this is
283  * a feature that will be implemented much later in the future.
284  */
285 struct mem_cgroup {
286 	struct cgroup_subsys_state css;
287 	/*
288 	 * the counter to account for memory usage
289 	 */
290 	struct res_counter res;
291 
292 	/* vmpressure notifications */
293 	struct vmpressure vmpressure;
294 
295 	/*
296 	 * the counter to account for mem+swap usage.
297 	 */
298 	struct res_counter memsw;
299 
300 	/*
301 	 * the counter to account for kernel memory usage.
302 	 */
303 	struct res_counter kmem;
304 	/*
305 	 * Should the accounting and control be hierarchical, per subtree?
306 	 */
307 	bool use_hierarchy;
308 	unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
309 
310 	bool		oom_lock;
311 	atomic_t	under_oom;
312 	atomic_t	oom_wakeups;
313 
314 	int	swappiness;
315 	/* OOM-Killer disable */
316 	int		oom_kill_disable;
317 
318 	/* set when res.limit == memsw.limit */
319 	bool		memsw_is_minimum;
320 
321 	/* protect arrays of thresholds */
322 	struct mutex thresholds_lock;
323 
324 	/* thresholds for memory usage. RCU-protected */
325 	struct mem_cgroup_thresholds thresholds;
326 
327 	/* thresholds for mem+swap usage. RCU-protected */
328 	struct mem_cgroup_thresholds memsw_thresholds;
329 
330 	/* For oom notifier event fd */
331 	struct list_head oom_notify;
332 
333 	/*
334 	 * Should we move charges of a task when a task is moved into this
335 	 * mem_cgroup ? And what type of charges should we move ?
336 	 */
337 	unsigned long move_charge_at_immigrate;
338 	/*
339 	 * set > 0 if pages under this cgroup are moving to other cgroup.
340 	 */
341 	atomic_t	moving_account;
342 	/* taken only while moving_account > 0 */
343 	spinlock_t	move_lock;
344 	/*
345 	 * percpu counter.
346 	 */
347 	struct mem_cgroup_stat_cpu __percpu *stat;
348 	/*
349 	 * used when a cpu is offlined or other synchronizations
350 	 * See mem_cgroup_read_stat().
351 	 */
352 	struct mem_cgroup_stat_cpu nocpu_base;
353 	spinlock_t pcp_counter_lock;
354 
355 	atomic_t	dead_count;
356 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
357 	struct cg_proto tcp_mem;
358 #endif
359 #if defined(CONFIG_MEMCG_KMEM)
360 	/* analogous to slab_common's slab_caches list. per-memcg */
361 	struct list_head memcg_slab_caches;
362 	/* Not a spinlock, we can take a lot of time walking the list */
363 	struct mutex slab_caches_mutex;
364         /* Index in the kmem_cache->memcg_params->memcg_caches array */
365 	int kmemcg_id;
366 #endif
367 
368 	int last_scanned_node;
369 #if MAX_NUMNODES > 1
370 	nodemask_t	scan_nodes;
371 	atomic_t	numainfo_events;
372 	atomic_t	numainfo_updating;
373 #endif
374 
375 	/* List of events which userspace want to receive */
376 	struct list_head event_list;
377 	spinlock_t event_list_lock;
378 
379 	struct mem_cgroup_per_node *nodeinfo[0];
380 	/* WARNING: nodeinfo must be the last member here */
381 };
382 
383 /* internal only representation about the status of kmem accounting. */
384 enum {
385 	KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
386 	KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
387 };
388 
389 #ifdef CONFIG_MEMCG_KMEM
390 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
391 {
392 	set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
393 }
394 
395 static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
396 {
397 	return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
398 }
399 
400 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
401 {
402 	/*
403 	 * Our caller must use css_get() first, because memcg_uncharge_kmem()
404 	 * will call css_put() if it sees the memcg is dead.
405 	 */
406 	smp_wmb();
407 	if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
408 		set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
409 }
410 
411 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
412 {
413 	return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
414 				  &memcg->kmem_account_flags);
415 }
416 #endif
417 
418 /* Stuffs for move charges at task migration. */
419 /*
420  * Types of charges to be moved. "move_charge_at_immitgrate" and
421  * "immigrate_flags" are treated as a left-shifted bitmap of these types.
422  */
423 enum move_type {
424 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
425 	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
426 	NR_MOVE_TYPE,
427 };
428 
429 /* "mc" and its members are protected by cgroup_mutex */
430 static struct move_charge_struct {
431 	spinlock_t	  lock; /* for from, to */
432 	struct mem_cgroup *from;
433 	struct mem_cgroup *to;
434 	unsigned long immigrate_flags;
435 	unsigned long precharge;
436 	unsigned long moved_charge;
437 	unsigned long moved_swap;
438 	struct task_struct *moving_task;	/* a task moving charges */
439 	wait_queue_head_t waitq;		/* a waitq for other context */
440 } mc = {
441 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
442 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
443 };
444 
445 static bool move_anon(void)
446 {
447 	return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
448 }
449 
450 static bool move_file(void)
451 {
452 	return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
453 }
454 
455 /*
456  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
457  * limit reclaim to prevent infinite loops, if they ever occur.
458  */
459 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
460 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
461 
462 enum charge_type {
463 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
464 	MEM_CGROUP_CHARGE_TYPE_ANON,
465 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
466 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
467 	NR_CHARGE_TYPE,
468 };
469 
470 /* for encoding cft->private value on file */
471 enum res_type {
472 	_MEM,
473 	_MEMSWAP,
474 	_OOM_TYPE,
475 	_KMEM,
476 };
477 
478 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
479 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
480 #define MEMFILE_ATTR(val)	((val) & 0xffff)
481 /* Used for OOM nofiier */
482 #define OOM_CONTROL		(0)
483 
484 /*
485  * Reclaim flags for mem_cgroup_hierarchical_reclaim
486  */
487 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
488 #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
489 #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
490 #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
491 
492 /*
493  * The memcg_create_mutex will be held whenever a new cgroup is created.
494  * As a consequence, any change that needs to protect against new child cgroups
495  * appearing has to hold it as well.
496  */
497 static DEFINE_MUTEX(memcg_create_mutex);
498 
499 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
500 {
501 	return s ? container_of(s, struct mem_cgroup, css) : NULL;
502 }
503 
504 /* Some nice accessors for the vmpressure. */
505 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
506 {
507 	if (!memcg)
508 		memcg = root_mem_cgroup;
509 	return &memcg->vmpressure;
510 }
511 
512 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
513 {
514 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
515 }
516 
517 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
518 {
519 	return (memcg == root_mem_cgroup);
520 }
521 
522 /*
523  * We restrict the id in the range of [1, 65535], so it can fit into
524  * an unsigned short.
525  */
526 #define MEM_CGROUP_ID_MAX	USHRT_MAX
527 
528 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
529 {
530 	/*
531 	 * The ID of the root cgroup is 0, but memcg treat 0 as an
532 	 * invalid ID, so we return (cgroup_id + 1).
533 	 */
534 	return memcg->css.cgroup->id + 1;
535 }
536 
537 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
538 {
539 	struct cgroup_subsys_state *css;
540 
541 	css = css_from_id(id - 1, &memory_cgrp_subsys);
542 	return mem_cgroup_from_css(css);
543 }
544 
545 /* Writing them here to avoid exposing memcg's inner layout */
546 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
547 
548 void sock_update_memcg(struct sock *sk)
549 {
550 	if (mem_cgroup_sockets_enabled) {
551 		struct mem_cgroup *memcg;
552 		struct cg_proto *cg_proto;
553 
554 		BUG_ON(!sk->sk_prot->proto_cgroup);
555 
556 		/* Socket cloning can throw us here with sk_cgrp already
557 		 * filled. It won't however, necessarily happen from
558 		 * process context. So the test for root memcg given
559 		 * the current task's memcg won't help us in this case.
560 		 *
561 		 * Respecting the original socket's memcg is a better
562 		 * decision in this case.
563 		 */
564 		if (sk->sk_cgrp) {
565 			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
566 			css_get(&sk->sk_cgrp->memcg->css);
567 			return;
568 		}
569 
570 		rcu_read_lock();
571 		memcg = mem_cgroup_from_task(current);
572 		cg_proto = sk->sk_prot->proto_cgroup(memcg);
573 		if (!mem_cgroup_is_root(memcg) &&
574 		    memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
575 			sk->sk_cgrp = cg_proto;
576 		}
577 		rcu_read_unlock();
578 	}
579 }
580 EXPORT_SYMBOL(sock_update_memcg);
581 
582 void sock_release_memcg(struct sock *sk)
583 {
584 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
585 		struct mem_cgroup *memcg;
586 		WARN_ON(!sk->sk_cgrp->memcg);
587 		memcg = sk->sk_cgrp->memcg;
588 		css_put(&sk->sk_cgrp->memcg->css);
589 	}
590 }
591 
592 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
593 {
594 	if (!memcg || mem_cgroup_is_root(memcg))
595 		return NULL;
596 
597 	return &memcg->tcp_mem;
598 }
599 EXPORT_SYMBOL(tcp_proto_cgroup);
600 
601 static void disarm_sock_keys(struct mem_cgroup *memcg)
602 {
603 	if (!memcg_proto_activated(&memcg->tcp_mem))
604 		return;
605 	static_key_slow_dec(&memcg_socket_limit_enabled);
606 }
607 #else
608 static void disarm_sock_keys(struct mem_cgroup *memcg)
609 {
610 }
611 #endif
612 
613 #ifdef CONFIG_MEMCG_KMEM
614 /*
615  * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
616  * The main reason for not using cgroup id for this:
617  *  this works better in sparse environments, where we have a lot of memcgs,
618  *  but only a few kmem-limited. Or also, if we have, for instance, 200
619  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
620  *  200 entry array for that.
621  *
622  * The current size of the caches array is stored in
623  * memcg_limited_groups_array_size.  It will double each time we have to
624  * increase it.
625  */
626 static DEFINE_IDA(kmem_limited_groups);
627 int memcg_limited_groups_array_size;
628 
629 /*
630  * MIN_SIZE is different than 1, because we would like to avoid going through
631  * the alloc/free process all the time. In a small machine, 4 kmem-limited
632  * cgroups is a reasonable guess. In the future, it could be a parameter or
633  * tunable, but that is strictly not necessary.
634  *
635  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
636  * this constant directly from cgroup, but it is understandable that this is
637  * better kept as an internal representation in cgroup.c. In any case, the
638  * cgrp_id space is not getting any smaller, and we don't have to necessarily
639  * increase ours as well if it increases.
640  */
641 #define MEMCG_CACHES_MIN_SIZE 4
642 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
643 
644 /*
645  * A lot of the calls to the cache allocation functions are expected to be
646  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
647  * conditional to this static branch, we'll have to allow modules that does
648  * kmem_cache_alloc and the such to see this symbol as well
649  */
650 struct static_key memcg_kmem_enabled_key;
651 EXPORT_SYMBOL(memcg_kmem_enabled_key);
652 
653 static void disarm_kmem_keys(struct mem_cgroup *memcg)
654 {
655 	if (memcg_kmem_is_active(memcg)) {
656 		static_key_slow_dec(&memcg_kmem_enabled_key);
657 		ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
658 	}
659 	/*
660 	 * This check can't live in kmem destruction function,
661 	 * since the charges will outlive the cgroup
662 	 */
663 	WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
664 }
665 #else
666 static void disarm_kmem_keys(struct mem_cgroup *memcg)
667 {
668 }
669 #endif /* CONFIG_MEMCG_KMEM */
670 
671 static void disarm_static_keys(struct mem_cgroup *memcg)
672 {
673 	disarm_sock_keys(memcg);
674 	disarm_kmem_keys(memcg);
675 }
676 
677 static void drain_all_stock_async(struct mem_cgroup *memcg);
678 
679 static struct mem_cgroup_per_zone *
680 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
681 {
682 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
683 	return &memcg->nodeinfo[nid]->zoneinfo[zid];
684 }
685 
686 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
687 {
688 	return &memcg->css;
689 }
690 
691 static struct mem_cgroup_per_zone *
692 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
693 {
694 	int nid = page_to_nid(page);
695 	int zid = page_zonenum(page);
696 
697 	return mem_cgroup_zoneinfo(memcg, nid, zid);
698 }
699 
700 static struct mem_cgroup_tree_per_zone *
701 soft_limit_tree_node_zone(int nid, int zid)
702 {
703 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
704 }
705 
706 static struct mem_cgroup_tree_per_zone *
707 soft_limit_tree_from_page(struct page *page)
708 {
709 	int nid = page_to_nid(page);
710 	int zid = page_zonenum(page);
711 
712 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
713 }
714 
715 static void
716 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
717 				struct mem_cgroup_per_zone *mz,
718 				struct mem_cgroup_tree_per_zone *mctz,
719 				unsigned long long new_usage_in_excess)
720 {
721 	struct rb_node **p = &mctz->rb_root.rb_node;
722 	struct rb_node *parent = NULL;
723 	struct mem_cgroup_per_zone *mz_node;
724 
725 	if (mz->on_tree)
726 		return;
727 
728 	mz->usage_in_excess = new_usage_in_excess;
729 	if (!mz->usage_in_excess)
730 		return;
731 	while (*p) {
732 		parent = *p;
733 		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
734 					tree_node);
735 		if (mz->usage_in_excess < mz_node->usage_in_excess)
736 			p = &(*p)->rb_left;
737 		/*
738 		 * We can't avoid mem cgroups that are over their soft
739 		 * limit by the same amount
740 		 */
741 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
742 			p = &(*p)->rb_right;
743 	}
744 	rb_link_node(&mz->tree_node, parent, p);
745 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
746 	mz->on_tree = true;
747 }
748 
749 static void
750 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
751 				struct mem_cgroup_per_zone *mz,
752 				struct mem_cgroup_tree_per_zone *mctz)
753 {
754 	if (!mz->on_tree)
755 		return;
756 	rb_erase(&mz->tree_node, &mctz->rb_root);
757 	mz->on_tree = false;
758 }
759 
760 static void
761 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
762 				struct mem_cgroup_per_zone *mz,
763 				struct mem_cgroup_tree_per_zone *mctz)
764 {
765 	spin_lock(&mctz->lock);
766 	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
767 	spin_unlock(&mctz->lock);
768 }
769 
770 
771 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
772 {
773 	unsigned long long excess;
774 	struct mem_cgroup_per_zone *mz;
775 	struct mem_cgroup_tree_per_zone *mctz;
776 	int nid = page_to_nid(page);
777 	int zid = page_zonenum(page);
778 	mctz = soft_limit_tree_from_page(page);
779 
780 	/*
781 	 * Necessary to update all ancestors when hierarchy is used.
782 	 * because their event counter is not touched.
783 	 */
784 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
785 		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
786 		excess = res_counter_soft_limit_excess(&memcg->res);
787 		/*
788 		 * We have to update the tree if mz is on RB-tree or
789 		 * mem is over its softlimit.
790 		 */
791 		if (excess || mz->on_tree) {
792 			spin_lock(&mctz->lock);
793 			/* if on-tree, remove it */
794 			if (mz->on_tree)
795 				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
796 			/*
797 			 * Insert again. mz->usage_in_excess will be updated.
798 			 * If excess is 0, no tree ops.
799 			 */
800 			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
801 			spin_unlock(&mctz->lock);
802 		}
803 	}
804 }
805 
806 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
807 {
808 	int node, zone;
809 	struct mem_cgroup_per_zone *mz;
810 	struct mem_cgroup_tree_per_zone *mctz;
811 
812 	for_each_node(node) {
813 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
814 			mz = mem_cgroup_zoneinfo(memcg, node, zone);
815 			mctz = soft_limit_tree_node_zone(node, zone);
816 			mem_cgroup_remove_exceeded(memcg, mz, mctz);
817 		}
818 	}
819 }
820 
821 static struct mem_cgroup_per_zone *
822 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
823 {
824 	struct rb_node *rightmost = NULL;
825 	struct mem_cgroup_per_zone *mz;
826 
827 retry:
828 	mz = NULL;
829 	rightmost = rb_last(&mctz->rb_root);
830 	if (!rightmost)
831 		goto done;		/* Nothing to reclaim from */
832 
833 	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
834 	/*
835 	 * Remove the node now but someone else can add it back,
836 	 * we will to add it back at the end of reclaim to its correct
837 	 * position in the tree.
838 	 */
839 	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
840 	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
841 		!css_tryget(&mz->memcg->css))
842 		goto retry;
843 done:
844 	return mz;
845 }
846 
847 static struct mem_cgroup_per_zone *
848 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
849 {
850 	struct mem_cgroup_per_zone *mz;
851 
852 	spin_lock(&mctz->lock);
853 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
854 	spin_unlock(&mctz->lock);
855 	return mz;
856 }
857 
858 /*
859  * Implementation Note: reading percpu statistics for memcg.
860  *
861  * Both of vmstat[] and percpu_counter has threshold and do periodic
862  * synchronization to implement "quick" read. There are trade-off between
863  * reading cost and precision of value. Then, we may have a chance to implement
864  * a periodic synchronizion of counter in memcg's counter.
865  *
866  * But this _read() function is used for user interface now. The user accounts
867  * memory usage by memory cgroup and he _always_ requires exact value because
868  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
869  * have to visit all online cpus and make sum. So, for now, unnecessary
870  * synchronization is not implemented. (just implemented for cpu hotplug)
871  *
872  * If there are kernel internal actions which can make use of some not-exact
873  * value, and reading all cpu value can be performance bottleneck in some
874  * common workload, threashold and synchonization as vmstat[] should be
875  * implemented.
876  */
877 static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
878 				 enum mem_cgroup_stat_index idx)
879 {
880 	long val = 0;
881 	int cpu;
882 
883 	get_online_cpus();
884 	for_each_online_cpu(cpu)
885 		val += per_cpu(memcg->stat->count[idx], cpu);
886 #ifdef CONFIG_HOTPLUG_CPU
887 	spin_lock(&memcg->pcp_counter_lock);
888 	val += memcg->nocpu_base.count[idx];
889 	spin_unlock(&memcg->pcp_counter_lock);
890 #endif
891 	put_online_cpus();
892 	return val;
893 }
894 
895 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
896 					 bool charge)
897 {
898 	int val = (charge) ? 1 : -1;
899 	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
900 }
901 
902 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
903 					    enum mem_cgroup_events_index idx)
904 {
905 	unsigned long val = 0;
906 	int cpu;
907 
908 	get_online_cpus();
909 	for_each_online_cpu(cpu)
910 		val += per_cpu(memcg->stat->events[idx], cpu);
911 #ifdef CONFIG_HOTPLUG_CPU
912 	spin_lock(&memcg->pcp_counter_lock);
913 	val += memcg->nocpu_base.events[idx];
914 	spin_unlock(&memcg->pcp_counter_lock);
915 #endif
916 	put_online_cpus();
917 	return val;
918 }
919 
920 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
921 					 struct page *page,
922 					 bool anon, int nr_pages)
923 {
924 	/*
925 	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
926 	 * counted as CACHE even if it's on ANON LRU.
927 	 */
928 	if (anon)
929 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
930 				nr_pages);
931 	else
932 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
933 				nr_pages);
934 
935 	if (PageTransHuge(page))
936 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
937 				nr_pages);
938 
939 	/* pagein of a big page is an event. So, ignore page size */
940 	if (nr_pages > 0)
941 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
942 	else {
943 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
944 		nr_pages = -nr_pages; /* for event */
945 	}
946 
947 	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
948 }
949 
950 unsigned long
951 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
952 {
953 	struct mem_cgroup_per_zone *mz;
954 
955 	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
956 	return mz->lru_size[lru];
957 }
958 
959 static unsigned long
960 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
961 			unsigned int lru_mask)
962 {
963 	struct mem_cgroup_per_zone *mz;
964 	enum lru_list lru;
965 	unsigned long ret = 0;
966 
967 	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
968 
969 	for_each_lru(lru) {
970 		if (BIT(lru) & lru_mask)
971 			ret += mz->lru_size[lru];
972 	}
973 	return ret;
974 }
975 
976 static unsigned long
977 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
978 			int nid, unsigned int lru_mask)
979 {
980 	u64 total = 0;
981 	int zid;
982 
983 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
984 		total += mem_cgroup_zone_nr_lru_pages(memcg,
985 						nid, zid, lru_mask);
986 
987 	return total;
988 }
989 
990 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
991 			unsigned int lru_mask)
992 {
993 	int nid;
994 	u64 total = 0;
995 
996 	for_each_node_state(nid, N_MEMORY)
997 		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
998 	return total;
999 }
1000 
1001 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002 				       enum mem_cgroup_events_target target)
1003 {
1004 	unsigned long val, next;
1005 
1006 	val = __this_cpu_read(memcg->stat->nr_page_events);
1007 	next = __this_cpu_read(memcg->stat->targets[target]);
1008 	/* from time_after() in jiffies.h */
1009 	if ((long)next - (long)val < 0) {
1010 		switch (target) {
1011 		case MEM_CGROUP_TARGET_THRESH:
1012 			next = val + THRESHOLDS_EVENTS_TARGET;
1013 			break;
1014 		case MEM_CGROUP_TARGET_SOFTLIMIT:
1015 			next = val + SOFTLIMIT_EVENTS_TARGET;
1016 			break;
1017 		case MEM_CGROUP_TARGET_NUMAINFO:
1018 			next = val + NUMAINFO_EVENTS_TARGET;
1019 			break;
1020 		default:
1021 			break;
1022 		}
1023 		__this_cpu_write(memcg->stat->targets[target], next);
1024 		return true;
1025 	}
1026 	return false;
1027 }
1028 
1029 /*
1030  * Check events in order.
1031  *
1032  */
1033 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1034 {
1035 	preempt_disable();
1036 	/* threshold event is triggered in finer grain than soft limit */
1037 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038 						MEM_CGROUP_TARGET_THRESH))) {
1039 		bool do_softlimit;
1040 		bool do_numainfo __maybe_unused;
1041 
1042 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043 						MEM_CGROUP_TARGET_SOFTLIMIT);
1044 #if MAX_NUMNODES > 1
1045 		do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046 						MEM_CGROUP_TARGET_NUMAINFO);
1047 #endif
1048 		preempt_enable();
1049 
1050 		mem_cgroup_threshold(memcg);
1051 		if (unlikely(do_softlimit))
1052 			mem_cgroup_update_tree(memcg, page);
1053 #if MAX_NUMNODES > 1
1054 		if (unlikely(do_numainfo))
1055 			atomic_inc(&memcg->numainfo_events);
1056 #endif
1057 	} else
1058 		preempt_enable();
1059 }
1060 
1061 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1062 {
1063 	/*
1064 	 * mm_update_next_owner() may clear mm->owner to NULL
1065 	 * if it races with swapoff, page migration, etc.
1066 	 * So this can be called with p == NULL.
1067 	 */
1068 	if (unlikely(!p))
1069 		return NULL;
1070 
1071 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1072 }
1073 
1074 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1075 {
1076 	struct mem_cgroup *memcg = NULL;
1077 
1078 	rcu_read_lock();
1079 	do {
1080 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1081 		if (unlikely(!memcg))
1082 			memcg = root_mem_cgroup;
1083 	} while (!css_tryget(&memcg->css));
1084 	rcu_read_unlock();
1085 	return memcg;
1086 }
1087 
1088 /*
1089  * Returns a next (in a pre-order walk) alive memcg (with elevated css
1090  * ref. count) or NULL if the whole root's subtree has been visited.
1091  *
1092  * helper function to be used by mem_cgroup_iter
1093  */
1094 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1095 		struct mem_cgroup *last_visited)
1096 {
1097 	struct cgroup_subsys_state *prev_css, *next_css;
1098 
1099 	prev_css = last_visited ? &last_visited->css : NULL;
1100 skip_node:
1101 	next_css = css_next_descendant_pre(prev_css, &root->css);
1102 
1103 	/*
1104 	 * Even if we found a group we have to make sure it is
1105 	 * alive. css && !memcg means that the groups should be
1106 	 * skipped and we should continue the tree walk.
1107 	 * last_visited css is safe to use because it is
1108 	 * protected by css_get and the tree walk is rcu safe.
1109 	 *
1110 	 * We do not take a reference on the root of the tree walk
1111 	 * because we might race with the root removal when it would
1112 	 * be the only node in the iterated hierarchy and mem_cgroup_iter
1113 	 * would end up in an endless loop because it expects that at
1114 	 * least one valid node will be returned. Root cannot disappear
1115 	 * because caller of the iterator should hold it already so
1116 	 * skipping css reference should be safe.
1117 	 */
1118 	if (next_css) {
1119 		if ((next_css == &root->css) ||
1120 		    ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1121 			return mem_cgroup_from_css(next_css);
1122 
1123 		prev_css = next_css;
1124 		goto skip_node;
1125 	}
1126 
1127 	return NULL;
1128 }
1129 
1130 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1131 {
1132 	/*
1133 	 * When a group in the hierarchy below root is destroyed, the
1134 	 * hierarchy iterator can no longer be trusted since it might
1135 	 * have pointed to the destroyed group.  Invalidate it.
1136 	 */
1137 	atomic_inc(&root->dead_count);
1138 }
1139 
1140 static struct mem_cgroup *
1141 mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1142 		     struct mem_cgroup *root,
1143 		     int *sequence)
1144 {
1145 	struct mem_cgroup *position = NULL;
1146 	/*
1147 	 * A cgroup destruction happens in two stages: offlining and
1148 	 * release.  They are separated by a RCU grace period.
1149 	 *
1150 	 * If the iterator is valid, we may still race with an
1151 	 * offlining.  The RCU lock ensures the object won't be
1152 	 * released, tryget will fail if we lost the race.
1153 	 */
1154 	*sequence = atomic_read(&root->dead_count);
1155 	if (iter->last_dead_count == *sequence) {
1156 		smp_rmb();
1157 		position = iter->last_visited;
1158 
1159 		/*
1160 		 * We cannot take a reference to root because we might race
1161 		 * with root removal and returning NULL would end up in
1162 		 * an endless loop on the iterator user level when root
1163 		 * would be returned all the time.
1164 		 */
1165 		if (position && position != root &&
1166 				!css_tryget(&position->css))
1167 			position = NULL;
1168 	}
1169 	return position;
1170 }
1171 
1172 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1173 				   struct mem_cgroup *last_visited,
1174 				   struct mem_cgroup *new_position,
1175 				   struct mem_cgroup *root,
1176 				   int sequence)
1177 {
1178 	/* root reference counting symmetric to mem_cgroup_iter_load */
1179 	if (last_visited && last_visited != root)
1180 		css_put(&last_visited->css);
1181 	/*
1182 	 * We store the sequence count from the time @last_visited was
1183 	 * loaded successfully instead of rereading it here so that we
1184 	 * don't lose destruction events in between.  We could have
1185 	 * raced with the destruction of @new_position after all.
1186 	 */
1187 	iter->last_visited = new_position;
1188 	smp_wmb();
1189 	iter->last_dead_count = sequence;
1190 }
1191 
1192 /**
1193  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1194  * @root: hierarchy root
1195  * @prev: previously returned memcg, NULL on first invocation
1196  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1197  *
1198  * Returns references to children of the hierarchy below @root, or
1199  * @root itself, or %NULL after a full round-trip.
1200  *
1201  * Caller must pass the return value in @prev on subsequent
1202  * invocations for reference counting, or use mem_cgroup_iter_break()
1203  * to cancel a hierarchy walk before the round-trip is complete.
1204  *
1205  * Reclaimers can specify a zone and a priority level in @reclaim to
1206  * divide up the memcgs in the hierarchy among all concurrent
1207  * reclaimers operating on the same zone and priority.
1208  */
1209 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1210 				   struct mem_cgroup *prev,
1211 				   struct mem_cgroup_reclaim_cookie *reclaim)
1212 {
1213 	struct mem_cgroup *memcg = NULL;
1214 	struct mem_cgroup *last_visited = NULL;
1215 
1216 	if (mem_cgroup_disabled())
1217 		return NULL;
1218 
1219 	if (!root)
1220 		root = root_mem_cgroup;
1221 
1222 	if (prev && !reclaim)
1223 		last_visited = prev;
1224 
1225 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1226 		if (prev)
1227 			goto out_css_put;
1228 		return root;
1229 	}
1230 
1231 	rcu_read_lock();
1232 	while (!memcg) {
1233 		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1234 		int uninitialized_var(seq);
1235 
1236 		if (reclaim) {
1237 			int nid = zone_to_nid(reclaim->zone);
1238 			int zid = zone_idx(reclaim->zone);
1239 			struct mem_cgroup_per_zone *mz;
1240 
1241 			mz = mem_cgroup_zoneinfo(root, nid, zid);
1242 			iter = &mz->reclaim_iter[reclaim->priority];
1243 			if (prev && reclaim->generation != iter->generation) {
1244 				iter->last_visited = NULL;
1245 				goto out_unlock;
1246 			}
1247 
1248 			last_visited = mem_cgroup_iter_load(iter, root, &seq);
1249 		}
1250 
1251 		memcg = __mem_cgroup_iter_next(root, last_visited);
1252 
1253 		if (reclaim) {
1254 			mem_cgroup_iter_update(iter, last_visited, memcg, root,
1255 					seq);
1256 
1257 			if (!memcg)
1258 				iter->generation++;
1259 			else if (!prev && memcg)
1260 				reclaim->generation = iter->generation;
1261 		}
1262 
1263 		if (prev && !memcg)
1264 			goto out_unlock;
1265 	}
1266 out_unlock:
1267 	rcu_read_unlock();
1268 out_css_put:
1269 	if (prev && prev != root)
1270 		css_put(&prev->css);
1271 
1272 	return memcg;
1273 }
1274 
1275 /**
1276  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1277  * @root: hierarchy root
1278  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1279  */
1280 void mem_cgroup_iter_break(struct mem_cgroup *root,
1281 			   struct mem_cgroup *prev)
1282 {
1283 	if (!root)
1284 		root = root_mem_cgroup;
1285 	if (prev && prev != root)
1286 		css_put(&prev->css);
1287 }
1288 
1289 /*
1290  * Iteration constructs for visiting all cgroups (under a tree).  If
1291  * loops are exited prematurely (break), mem_cgroup_iter_break() must
1292  * be used for reference counting.
1293  */
1294 #define for_each_mem_cgroup_tree(iter, root)		\
1295 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1296 	     iter != NULL;				\
1297 	     iter = mem_cgroup_iter(root, iter, NULL))
1298 
1299 #define for_each_mem_cgroup(iter)			\
1300 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1301 	     iter != NULL;				\
1302 	     iter = mem_cgroup_iter(NULL, iter, NULL))
1303 
1304 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1305 {
1306 	struct mem_cgroup *memcg;
1307 
1308 	rcu_read_lock();
1309 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1310 	if (unlikely(!memcg))
1311 		goto out;
1312 
1313 	switch (idx) {
1314 	case PGFAULT:
1315 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1316 		break;
1317 	case PGMAJFAULT:
1318 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1319 		break;
1320 	default:
1321 		BUG();
1322 	}
1323 out:
1324 	rcu_read_unlock();
1325 }
1326 EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1327 
1328 /**
1329  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1330  * @zone: zone of the wanted lruvec
1331  * @memcg: memcg of the wanted lruvec
1332  *
1333  * Returns the lru list vector holding pages for the given @zone and
1334  * @mem.  This can be the global zone lruvec, if the memory controller
1335  * is disabled.
1336  */
1337 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1338 				      struct mem_cgroup *memcg)
1339 {
1340 	struct mem_cgroup_per_zone *mz;
1341 	struct lruvec *lruvec;
1342 
1343 	if (mem_cgroup_disabled()) {
1344 		lruvec = &zone->lruvec;
1345 		goto out;
1346 	}
1347 
1348 	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1349 	lruvec = &mz->lruvec;
1350 out:
1351 	/*
1352 	 * Since a node can be onlined after the mem_cgroup was created,
1353 	 * we have to be prepared to initialize lruvec->zone here;
1354 	 * and if offlined then reonlined, we need to reinitialize it.
1355 	 */
1356 	if (unlikely(lruvec->zone != zone))
1357 		lruvec->zone = zone;
1358 	return lruvec;
1359 }
1360 
1361 /*
1362  * Following LRU functions are allowed to be used without PCG_LOCK.
1363  * Operations are called by routine of global LRU independently from memcg.
1364  * What we have to take care of here is validness of pc->mem_cgroup.
1365  *
1366  * Changes to pc->mem_cgroup happens when
1367  * 1. charge
1368  * 2. moving account
1369  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1370  * It is added to LRU before charge.
1371  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1372  * When moving account, the page is not on LRU. It's isolated.
1373  */
1374 
1375 /**
1376  * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1377  * @page: the page
1378  * @zone: zone of the page
1379  */
1380 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1381 {
1382 	struct mem_cgroup_per_zone *mz;
1383 	struct mem_cgroup *memcg;
1384 	struct page_cgroup *pc;
1385 	struct lruvec *lruvec;
1386 
1387 	if (mem_cgroup_disabled()) {
1388 		lruvec = &zone->lruvec;
1389 		goto out;
1390 	}
1391 
1392 	pc = lookup_page_cgroup(page);
1393 	memcg = pc->mem_cgroup;
1394 
1395 	/*
1396 	 * Surreptitiously switch any uncharged offlist page to root:
1397 	 * an uncharged page off lru does nothing to secure
1398 	 * its former mem_cgroup from sudden removal.
1399 	 *
1400 	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1401 	 * under page_cgroup lock: between them, they make all uses
1402 	 * of pc->mem_cgroup safe.
1403 	 */
1404 	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1405 		pc->mem_cgroup = memcg = root_mem_cgroup;
1406 
1407 	mz = page_cgroup_zoneinfo(memcg, page);
1408 	lruvec = &mz->lruvec;
1409 out:
1410 	/*
1411 	 * Since a node can be onlined after the mem_cgroup was created,
1412 	 * we have to be prepared to initialize lruvec->zone here;
1413 	 * and if offlined then reonlined, we need to reinitialize it.
1414 	 */
1415 	if (unlikely(lruvec->zone != zone))
1416 		lruvec->zone = zone;
1417 	return lruvec;
1418 }
1419 
1420 /**
1421  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1422  * @lruvec: mem_cgroup per zone lru vector
1423  * @lru: index of lru list the page is sitting on
1424  * @nr_pages: positive when adding or negative when removing
1425  *
1426  * This function must be called when a page is added to or removed from an
1427  * lru list.
1428  */
1429 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1430 				int nr_pages)
1431 {
1432 	struct mem_cgroup_per_zone *mz;
1433 	unsigned long *lru_size;
1434 
1435 	if (mem_cgroup_disabled())
1436 		return;
1437 
1438 	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1439 	lru_size = mz->lru_size + lru;
1440 	*lru_size += nr_pages;
1441 	VM_BUG_ON((long)(*lru_size) < 0);
1442 }
1443 
1444 /*
1445  * Checks whether given mem is same or in the root_mem_cgroup's
1446  * hierarchy subtree
1447  */
1448 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1449 				  struct mem_cgroup *memcg)
1450 {
1451 	if (root_memcg == memcg)
1452 		return true;
1453 	if (!root_memcg->use_hierarchy || !memcg)
1454 		return false;
1455 	return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1456 }
1457 
1458 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1459 				       struct mem_cgroup *memcg)
1460 {
1461 	bool ret;
1462 
1463 	rcu_read_lock();
1464 	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1465 	rcu_read_unlock();
1466 	return ret;
1467 }
1468 
1469 bool task_in_mem_cgroup(struct task_struct *task,
1470 			const struct mem_cgroup *memcg)
1471 {
1472 	struct mem_cgroup *curr = NULL;
1473 	struct task_struct *p;
1474 	bool ret;
1475 
1476 	p = find_lock_task_mm(task);
1477 	if (p) {
1478 		curr = get_mem_cgroup_from_mm(p->mm);
1479 		task_unlock(p);
1480 	} else {
1481 		/*
1482 		 * All threads may have already detached their mm's, but the oom
1483 		 * killer still needs to detect if they have already been oom
1484 		 * killed to prevent needlessly killing additional tasks.
1485 		 */
1486 		rcu_read_lock();
1487 		curr = mem_cgroup_from_task(task);
1488 		if (curr)
1489 			css_get(&curr->css);
1490 		rcu_read_unlock();
1491 	}
1492 	/*
1493 	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1494 	 * use_hierarchy of "curr" here make this function true if hierarchy is
1495 	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1496 	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1497 	 */
1498 	ret = mem_cgroup_same_or_subtree(memcg, curr);
1499 	css_put(&curr->css);
1500 	return ret;
1501 }
1502 
1503 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1504 {
1505 	unsigned long inactive_ratio;
1506 	unsigned long inactive;
1507 	unsigned long active;
1508 	unsigned long gb;
1509 
1510 	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1511 	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1512 
1513 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1514 	if (gb)
1515 		inactive_ratio = int_sqrt(10 * gb);
1516 	else
1517 		inactive_ratio = 1;
1518 
1519 	return inactive * inactive_ratio < active;
1520 }
1521 
1522 #define mem_cgroup_from_res_counter(counter, member)	\
1523 	container_of(counter, struct mem_cgroup, member)
1524 
1525 /**
1526  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1527  * @memcg: the memory cgroup
1528  *
1529  * Returns the maximum amount of memory @mem can be charged with, in
1530  * pages.
1531  */
1532 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1533 {
1534 	unsigned long long margin;
1535 
1536 	margin = res_counter_margin(&memcg->res);
1537 	if (do_swap_account)
1538 		margin = min(margin, res_counter_margin(&memcg->memsw));
1539 	return margin >> PAGE_SHIFT;
1540 }
1541 
1542 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1543 {
1544 	/* root ? */
1545 	if (!css_parent(&memcg->css))
1546 		return vm_swappiness;
1547 
1548 	return memcg->swappiness;
1549 }
1550 
1551 /*
1552  * memcg->moving_account is used for checking possibility that some thread is
1553  * calling move_account(). When a thread on CPU-A starts moving pages under
1554  * a memcg, other threads should check memcg->moving_account under
1555  * rcu_read_lock(), like this:
1556  *
1557  *         CPU-A                                    CPU-B
1558  *                                              rcu_read_lock()
1559  *         memcg->moving_account+1              if (memcg->mocing_account)
1560  *                                                   take heavy locks.
1561  *         synchronize_rcu()                    update something.
1562  *                                              rcu_read_unlock()
1563  *         start move here.
1564  */
1565 
1566 /* for quick checking without looking up memcg */
1567 atomic_t memcg_moving __read_mostly;
1568 
1569 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1570 {
1571 	atomic_inc(&memcg_moving);
1572 	atomic_inc(&memcg->moving_account);
1573 	synchronize_rcu();
1574 }
1575 
1576 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1577 {
1578 	/*
1579 	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1580 	 * We check NULL in callee rather than caller.
1581 	 */
1582 	if (memcg) {
1583 		atomic_dec(&memcg_moving);
1584 		atomic_dec(&memcg->moving_account);
1585 	}
1586 }
1587 
1588 /*
1589  * 2 routines for checking "mem" is under move_account() or not.
1590  *
1591  * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
1592  *			  is used for avoiding races in accounting.  If true,
1593  *			  pc->mem_cgroup may be overwritten.
1594  *
1595  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1596  *			  under hierarchy of moving cgroups. This is for
1597  *			  waiting at hith-memory prressure caused by "move".
1598  */
1599 
1600 static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1601 {
1602 	VM_BUG_ON(!rcu_read_lock_held());
1603 	return atomic_read(&memcg->moving_account) > 0;
1604 }
1605 
1606 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1607 {
1608 	struct mem_cgroup *from;
1609 	struct mem_cgroup *to;
1610 	bool ret = false;
1611 	/*
1612 	 * Unlike task_move routines, we access mc.to, mc.from not under
1613 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1614 	 */
1615 	spin_lock(&mc.lock);
1616 	from = mc.from;
1617 	to = mc.to;
1618 	if (!from)
1619 		goto unlock;
1620 
1621 	ret = mem_cgroup_same_or_subtree(memcg, from)
1622 		|| mem_cgroup_same_or_subtree(memcg, to);
1623 unlock:
1624 	spin_unlock(&mc.lock);
1625 	return ret;
1626 }
1627 
1628 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1629 {
1630 	if (mc.moving_task && current != mc.moving_task) {
1631 		if (mem_cgroup_under_move(memcg)) {
1632 			DEFINE_WAIT(wait);
1633 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1634 			/* moving charge context might have finished. */
1635 			if (mc.moving_task)
1636 				schedule();
1637 			finish_wait(&mc.waitq, &wait);
1638 			return true;
1639 		}
1640 	}
1641 	return false;
1642 }
1643 
1644 /*
1645  * Take this lock when
1646  * - a code tries to modify page's memcg while it's USED.
1647  * - a code tries to modify page state accounting in a memcg.
1648  * see mem_cgroup_stolen(), too.
1649  */
1650 static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1651 				  unsigned long *flags)
1652 {
1653 	spin_lock_irqsave(&memcg->move_lock, *flags);
1654 }
1655 
1656 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1657 				unsigned long *flags)
1658 {
1659 	spin_unlock_irqrestore(&memcg->move_lock, *flags);
1660 }
1661 
1662 #define K(x) ((x) << (PAGE_SHIFT-10))
1663 /**
1664  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1665  * @memcg: The memory cgroup that went over limit
1666  * @p: Task that is going to be killed
1667  *
1668  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1669  * enabled
1670  */
1671 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1672 {
1673 	/* oom_info_lock ensures that parallel ooms do not interleave */
1674 	static DEFINE_MUTEX(oom_info_lock);
1675 	struct mem_cgroup *iter;
1676 	unsigned int i;
1677 
1678 	if (!p)
1679 		return;
1680 
1681 	mutex_lock(&oom_info_lock);
1682 	rcu_read_lock();
1683 
1684 	pr_info("Task in ");
1685 	pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1686 	pr_info(" killed as a result of limit of ");
1687 	pr_cont_cgroup_path(memcg->css.cgroup);
1688 	pr_info("\n");
1689 
1690 	rcu_read_unlock();
1691 
1692 	pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1693 		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1694 		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1695 		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1696 	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1697 		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1698 		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1699 		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1700 	pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1701 		res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1702 		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1703 		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1704 
1705 	for_each_mem_cgroup_tree(iter, memcg) {
1706 		pr_info("Memory cgroup stats for ");
1707 		pr_cont_cgroup_path(iter->css.cgroup);
1708 		pr_cont(":");
1709 
1710 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1711 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1712 				continue;
1713 			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1714 				K(mem_cgroup_read_stat(iter, i)));
1715 		}
1716 
1717 		for (i = 0; i < NR_LRU_LISTS; i++)
1718 			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1719 				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1720 
1721 		pr_cont("\n");
1722 	}
1723 	mutex_unlock(&oom_info_lock);
1724 }
1725 
1726 /*
1727  * This function returns the number of memcg under hierarchy tree. Returns
1728  * 1(self count) if no children.
1729  */
1730 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1731 {
1732 	int num = 0;
1733 	struct mem_cgroup *iter;
1734 
1735 	for_each_mem_cgroup_tree(iter, memcg)
1736 		num++;
1737 	return num;
1738 }
1739 
1740 /*
1741  * Return the memory (and swap, if configured) limit for a memcg.
1742  */
1743 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1744 {
1745 	u64 limit;
1746 
1747 	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1748 
1749 	/*
1750 	 * Do not consider swap space if we cannot swap due to swappiness
1751 	 */
1752 	if (mem_cgroup_swappiness(memcg)) {
1753 		u64 memsw;
1754 
1755 		limit += total_swap_pages << PAGE_SHIFT;
1756 		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1757 
1758 		/*
1759 		 * If memsw is finite and limits the amount of swap space
1760 		 * available to this memcg, return that limit.
1761 		 */
1762 		limit = min(limit, memsw);
1763 	}
1764 
1765 	return limit;
1766 }
1767 
1768 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1769 				     int order)
1770 {
1771 	struct mem_cgroup *iter;
1772 	unsigned long chosen_points = 0;
1773 	unsigned long totalpages;
1774 	unsigned int points = 0;
1775 	struct task_struct *chosen = NULL;
1776 
1777 	/*
1778 	 * If current has a pending SIGKILL or is exiting, then automatically
1779 	 * select it.  The goal is to allow it to allocate so that it may
1780 	 * quickly exit and free its memory.
1781 	 */
1782 	if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1783 		set_thread_flag(TIF_MEMDIE);
1784 		return;
1785 	}
1786 
1787 	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1788 	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1789 	for_each_mem_cgroup_tree(iter, memcg) {
1790 		struct css_task_iter it;
1791 		struct task_struct *task;
1792 
1793 		css_task_iter_start(&iter->css, &it);
1794 		while ((task = css_task_iter_next(&it))) {
1795 			switch (oom_scan_process_thread(task, totalpages, NULL,
1796 							false)) {
1797 			case OOM_SCAN_SELECT:
1798 				if (chosen)
1799 					put_task_struct(chosen);
1800 				chosen = task;
1801 				chosen_points = ULONG_MAX;
1802 				get_task_struct(chosen);
1803 				/* fall through */
1804 			case OOM_SCAN_CONTINUE:
1805 				continue;
1806 			case OOM_SCAN_ABORT:
1807 				css_task_iter_end(&it);
1808 				mem_cgroup_iter_break(memcg, iter);
1809 				if (chosen)
1810 					put_task_struct(chosen);
1811 				return;
1812 			case OOM_SCAN_OK:
1813 				break;
1814 			};
1815 			points = oom_badness(task, memcg, NULL, totalpages);
1816 			if (!points || points < chosen_points)
1817 				continue;
1818 			/* Prefer thread group leaders for display purposes */
1819 			if (points == chosen_points &&
1820 			    thread_group_leader(chosen))
1821 				continue;
1822 
1823 			if (chosen)
1824 				put_task_struct(chosen);
1825 			chosen = task;
1826 			chosen_points = points;
1827 			get_task_struct(chosen);
1828 		}
1829 		css_task_iter_end(&it);
1830 	}
1831 
1832 	if (!chosen)
1833 		return;
1834 	points = chosen_points * 1000 / totalpages;
1835 	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1836 			 NULL, "Memory cgroup out of memory");
1837 }
1838 
1839 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1840 					gfp_t gfp_mask,
1841 					unsigned long flags)
1842 {
1843 	unsigned long total = 0;
1844 	bool noswap = false;
1845 	int loop;
1846 
1847 	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1848 		noswap = true;
1849 	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1850 		noswap = true;
1851 
1852 	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1853 		if (loop)
1854 			drain_all_stock_async(memcg);
1855 		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1856 		/*
1857 		 * Allow limit shrinkers, which are triggered directly
1858 		 * by userspace, to catch signals and stop reclaim
1859 		 * after minimal progress, regardless of the margin.
1860 		 */
1861 		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1862 			break;
1863 		if (mem_cgroup_margin(memcg))
1864 			break;
1865 		/*
1866 		 * If nothing was reclaimed after two attempts, there
1867 		 * may be no reclaimable pages in this hierarchy.
1868 		 */
1869 		if (loop && !total)
1870 			break;
1871 	}
1872 	return total;
1873 }
1874 
1875 /**
1876  * test_mem_cgroup_node_reclaimable
1877  * @memcg: the target memcg
1878  * @nid: the node ID to be checked.
1879  * @noswap : specify true here if the user wants flle only information.
1880  *
1881  * This function returns whether the specified memcg contains any
1882  * reclaimable pages on a node. Returns true if there are any reclaimable
1883  * pages in the node.
1884  */
1885 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1886 		int nid, bool noswap)
1887 {
1888 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1889 		return true;
1890 	if (noswap || !total_swap_pages)
1891 		return false;
1892 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1893 		return true;
1894 	return false;
1895 
1896 }
1897 #if MAX_NUMNODES > 1
1898 
1899 /*
1900  * Always updating the nodemask is not very good - even if we have an empty
1901  * list or the wrong list here, we can start from some node and traverse all
1902  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1903  *
1904  */
1905 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1906 {
1907 	int nid;
1908 	/*
1909 	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1910 	 * pagein/pageout changes since the last update.
1911 	 */
1912 	if (!atomic_read(&memcg->numainfo_events))
1913 		return;
1914 	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1915 		return;
1916 
1917 	/* make a nodemask where this memcg uses memory from */
1918 	memcg->scan_nodes = node_states[N_MEMORY];
1919 
1920 	for_each_node_mask(nid, node_states[N_MEMORY]) {
1921 
1922 		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1923 			node_clear(nid, memcg->scan_nodes);
1924 	}
1925 
1926 	atomic_set(&memcg->numainfo_events, 0);
1927 	atomic_set(&memcg->numainfo_updating, 0);
1928 }
1929 
1930 /*
1931  * Selecting a node where we start reclaim from. Because what we need is just
1932  * reducing usage counter, start from anywhere is O,K. Considering
1933  * memory reclaim from current node, there are pros. and cons.
1934  *
1935  * Freeing memory from current node means freeing memory from a node which
1936  * we'll use or we've used. So, it may make LRU bad. And if several threads
1937  * hit limits, it will see a contention on a node. But freeing from remote
1938  * node means more costs for memory reclaim because of memory latency.
1939  *
1940  * Now, we use round-robin. Better algorithm is welcomed.
1941  */
1942 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1943 {
1944 	int node;
1945 
1946 	mem_cgroup_may_update_nodemask(memcg);
1947 	node = memcg->last_scanned_node;
1948 
1949 	node = next_node(node, memcg->scan_nodes);
1950 	if (node == MAX_NUMNODES)
1951 		node = first_node(memcg->scan_nodes);
1952 	/*
1953 	 * We call this when we hit limit, not when pages are added to LRU.
1954 	 * No LRU may hold pages because all pages are UNEVICTABLE or
1955 	 * memcg is too small and all pages are not on LRU. In that case,
1956 	 * we use curret node.
1957 	 */
1958 	if (unlikely(node == MAX_NUMNODES))
1959 		node = numa_node_id();
1960 
1961 	memcg->last_scanned_node = node;
1962 	return node;
1963 }
1964 
1965 /*
1966  * Check all nodes whether it contains reclaimable pages or not.
1967  * For quick scan, we make use of scan_nodes. This will allow us to skip
1968  * unused nodes. But scan_nodes is lazily updated and may not cotain
1969  * enough new information. We need to do double check.
1970  */
1971 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1972 {
1973 	int nid;
1974 
1975 	/*
1976 	 * quick check...making use of scan_node.
1977 	 * We can skip unused nodes.
1978 	 */
1979 	if (!nodes_empty(memcg->scan_nodes)) {
1980 		for (nid = first_node(memcg->scan_nodes);
1981 		     nid < MAX_NUMNODES;
1982 		     nid = next_node(nid, memcg->scan_nodes)) {
1983 
1984 			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1985 				return true;
1986 		}
1987 	}
1988 	/*
1989 	 * Check rest of nodes.
1990 	 */
1991 	for_each_node_state(nid, N_MEMORY) {
1992 		if (node_isset(nid, memcg->scan_nodes))
1993 			continue;
1994 		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1995 			return true;
1996 	}
1997 	return false;
1998 }
1999 
2000 #else
2001 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
2002 {
2003 	return 0;
2004 }
2005 
2006 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
2007 {
2008 	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
2009 }
2010 #endif
2011 
2012 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2013 				   struct zone *zone,
2014 				   gfp_t gfp_mask,
2015 				   unsigned long *total_scanned)
2016 {
2017 	struct mem_cgroup *victim = NULL;
2018 	int total = 0;
2019 	int loop = 0;
2020 	unsigned long excess;
2021 	unsigned long nr_scanned;
2022 	struct mem_cgroup_reclaim_cookie reclaim = {
2023 		.zone = zone,
2024 		.priority = 0,
2025 	};
2026 
2027 	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2028 
2029 	while (1) {
2030 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2031 		if (!victim) {
2032 			loop++;
2033 			if (loop >= 2) {
2034 				/*
2035 				 * If we have not been able to reclaim
2036 				 * anything, it might because there are
2037 				 * no reclaimable pages under this hierarchy
2038 				 */
2039 				if (!total)
2040 					break;
2041 				/*
2042 				 * We want to do more targeted reclaim.
2043 				 * excess >> 2 is not to excessive so as to
2044 				 * reclaim too much, nor too less that we keep
2045 				 * coming back to reclaim from this cgroup
2046 				 */
2047 				if (total >= (excess >> 2) ||
2048 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2049 					break;
2050 			}
2051 			continue;
2052 		}
2053 		if (!mem_cgroup_reclaimable(victim, false))
2054 			continue;
2055 		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2056 						     zone, &nr_scanned);
2057 		*total_scanned += nr_scanned;
2058 		if (!res_counter_soft_limit_excess(&root_memcg->res))
2059 			break;
2060 	}
2061 	mem_cgroup_iter_break(root_memcg, victim);
2062 	return total;
2063 }
2064 
2065 #ifdef CONFIG_LOCKDEP
2066 static struct lockdep_map memcg_oom_lock_dep_map = {
2067 	.name = "memcg_oom_lock",
2068 };
2069 #endif
2070 
2071 static DEFINE_SPINLOCK(memcg_oom_lock);
2072 
2073 /*
2074  * Check OOM-Killer is already running under our hierarchy.
2075  * If someone is running, return false.
2076  */
2077 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2078 {
2079 	struct mem_cgroup *iter, *failed = NULL;
2080 
2081 	spin_lock(&memcg_oom_lock);
2082 
2083 	for_each_mem_cgroup_tree(iter, memcg) {
2084 		if (iter->oom_lock) {
2085 			/*
2086 			 * this subtree of our hierarchy is already locked
2087 			 * so we cannot give a lock.
2088 			 */
2089 			failed = iter;
2090 			mem_cgroup_iter_break(memcg, iter);
2091 			break;
2092 		} else
2093 			iter->oom_lock = true;
2094 	}
2095 
2096 	if (failed) {
2097 		/*
2098 		 * OK, we failed to lock the whole subtree so we have
2099 		 * to clean up what we set up to the failing subtree
2100 		 */
2101 		for_each_mem_cgroup_tree(iter, memcg) {
2102 			if (iter == failed) {
2103 				mem_cgroup_iter_break(memcg, iter);
2104 				break;
2105 			}
2106 			iter->oom_lock = false;
2107 		}
2108 	} else
2109 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2110 
2111 	spin_unlock(&memcg_oom_lock);
2112 
2113 	return !failed;
2114 }
2115 
2116 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2117 {
2118 	struct mem_cgroup *iter;
2119 
2120 	spin_lock(&memcg_oom_lock);
2121 	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2122 	for_each_mem_cgroup_tree(iter, memcg)
2123 		iter->oom_lock = false;
2124 	spin_unlock(&memcg_oom_lock);
2125 }
2126 
2127 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2128 {
2129 	struct mem_cgroup *iter;
2130 
2131 	for_each_mem_cgroup_tree(iter, memcg)
2132 		atomic_inc(&iter->under_oom);
2133 }
2134 
2135 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2136 {
2137 	struct mem_cgroup *iter;
2138 
2139 	/*
2140 	 * When a new child is created while the hierarchy is under oom,
2141 	 * mem_cgroup_oom_lock() may not be called. We have to use
2142 	 * atomic_add_unless() here.
2143 	 */
2144 	for_each_mem_cgroup_tree(iter, memcg)
2145 		atomic_add_unless(&iter->under_oom, -1, 0);
2146 }
2147 
2148 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2149 
2150 struct oom_wait_info {
2151 	struct mem_cgroup *memcg;
2152 	wait_queue_t	wait;
2153 };
2154 
2155 static int memcg_oom_wake_function(wait_queue_t *wait,
2156 	unsigned mode, int sync, void *arg)
2157 {
2158 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2159 	struct mem_cgroup *oom_wait_memcg;
2160 	struct oom_wait_info *oom_wait_info;
2161 
2162 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2163 	oom_wait_memcg = oom_wait_info->memcg;
2164 
2165 	/*
2166 	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2167 	 * Then we can use css_is_ancestor without taking care of RCU.
2168 	 */
2169 	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2170 		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2171 		return 0;
2172 	return autoremove_wake_function(wait, mode, sync, arg);
2173 }
2174 
2175 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2176 {
2177 	atomic_inc(&memcg->oom_wakeups);
2178 	/* for filtering, pass "memcg" as argument. */
2179 	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2180 }
2181 
2182 static void memcg_oom_recover(struct mem_cgroup *memcg)
2183 {
2184 	if (memcg && atomic_read(&memcg->under_oom))
2185 		memcg_wakeup_oom(memcg);
2186 }
2187 
2188 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2189 {
2190 	if (!current->memcg_oom.may_oom)
2191 		return;
2192 	/*
2193 	 * We are in the middle of the charge context here, so we
2194 	 * don't want to block when potentially sitting on a callstack
2195 	 * that holds all kinds of filesystem and mm locks.
2196 	 *
2197 	 * Also, the caller may handle a failed allocation gracefully
2198 	 * (like optional page cache readahead) and so an OOM killer
2199 	 * invocation might not even be necessary.
2200 	 *
2201 	 * That's why we don't do anything here except remember the
2202 	 * OOM context and then deal with it at the end of the page
2203 	 * fault when the stack is unwound, the locks are released,
2204 	 * and when we know whether the fault was overall successful.
2205 	 */
2206 	css_get(&memcg->css);
2207 	current->memcg_oom.memcg = memcg;
2208 	current->memcg_oom.gfp_mask = mask;
2209 	current->memcg_oom.order = order;
2210 }
2211 
2212 /**
2213  * mem_cgroup_oom_synchronize - complete memcg OOM handling
2214  * @handle: actually kill/wait or just clean up the OOM state
2215  *
2216  * This has to be called at the end of a page fault if the memcg OOM
2217  * handler was enabled.
2218  *
2219  * Memcg supports userspace OOM handling where failed allocations must
2220  * sleep on a waitqueue until the userspace task resolves the
2221  * situation.  Sleeping directly in the charge context with all kinds
2222  * of locks held is not a good idea, instead we remember an OOM state
2223  * in the task and mem_cgroup_oom_synchronize() has to be called at
2224  * the end of the page fault to complete the OOM handling.
2225  *
2226  * Returns %true if an ongoing memcg OOM situation was detected and
2227  * completed, %false otherwise.
2228  */
2229 bool mem_cgroup_oom_synchronize(bool handle)
2230 {
2231 	struct mem_cgroup *memcg = current->memcg_oom.memcg;
2232 	struct oom_wait_info owait;
2233 	bool locked;
2234 
2235 	/* OOM is global, do not handle */
2236 	if (!memcg)
2237 		return false;
2238 
2239 	if (!handle)
2240 		goto cleanup;
2241 
2242 	owait.memcg = memcg;
2243 	owait.wait.flags = 0;
2244 	owait.wait.func = memcg_oom_wake_function;
2245 	owait.wait.private = current;
2246 	INIT_LIST_HEAD(&owait.wait.task_list);
2247 
2248 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2249 	mem_cgroup_mark_under_oom(memcg);
2250 
2251 	locked = mem_cgroup_oom_trylock(memcg);
2252 
2253 	if (locked)
2254 		mem_cgroup_oom_notify(memcg);
2255 
2256 	if (locked && !memcg->oom_kill_disable) {
2257 		mem_cgroup_unmark_under_oom(memcg);
2258 		finish_wait(&memcg_oom_waitq, &owait.wait);
2259 		mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2260 					 current->memcg_oom.order);
2261 	} else {
2262 		schedule();
2263 		mem_cgroup_unmark_under_oom(memcg);
2264 		finish_wait(&memcg_oom_waitq, &owait.wait);
2265 	}
2266 
2267 	if (locked) {
2268 		mem_cgroup_oom_unlock(memcg);
2269 		/*
2270 		 * There is no guarantee that an OOM-lock contender
2271 		 * sees the wakeups triggered by the OOM kill
2272 		 * uncharges.  Wake any sleepers explicitely.
2273 		 */
2274 		memcg_oom_recover(memcg);
2275 	}
2276 cleanup:
2277 	current->memcg_oom.memcg = NULL;
2278 	css_put(&memcg->css);
2279 	return true;
2280 }
2281 
2282 /*
2283  * Currently used to update mapped file statistics, but the routine can be
2284  * generalized to update other statistics as well.
2285  *
2286  * Notes: Race condition
2287  *
2288  * We usually use page_cgroup_lock() for accessing page_cgroup member but
2289  * it tends to be costly. But considering some conditions, we doesn't need
2290  * to do so _always_.
2291  *
2292  * Considering "charge", lock_page_cgroup() is not required because all
2293  * file-stat operations happen after a page is attached to radix-tree. There
2294  * are no race with "charge".
2295  *
2296  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2297  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2298  * if there are race with "uncharge". Statistics itself is properly handled
2299  * by flags.
2300  *
2301  * Considering "move", this is an only case we see a race. To make the race
2302  * small, we check mm->moving_account and detect there are possibility of race
2303  * If there is, we take a lock.
2304  */
2305 
2306 void __mem_cgroup_begin_update_page_stat(struct page *page,
2307 				bool *locked, unsigned long *flags)
2308 {
2309 	struct mem_cgroup *memcg;
2310 	struct page_cgroup *pc;
2311 
2312 	pc = lookup_page_cgroup(page);
2313 again:
2314 	memcg = pc->mem_cgroup;
2315 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2316 		return;
2317 	/*
2318 	 * If this memory cgroup is not under account moving, we don't
2319 	 * need to take move_lock_mem_cgroup(). Because we already hold
2320 	 * rcu_read_lock(), any calls to move_account will be delayed until
2321 	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2322 	 */
2323 	if (!mem_cgroup_stolen(memcg))
2324 		return;
2325 
2326 	move_lock_mem_cgroup(memcg, flags);
2327 	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2328 		move_unlock_mem_cgroup(memcg, flags);
2329 		goto again;
2330 	}
2331 	*locked = true;
2332 }
2333 
2334 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2335 {
2336 	struct page_cgroup *pc = lookup_page_cgroup(page);
2337 
2338 	/*
2339 	 * It's guaranteed that pc->mem_cgroup never changes while
2340 	 * lock is held because a routine modifies pc->mem_cgroup
2341 	 * should take move_lock_mem_cgroup().
2342 	 */
2343 	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2344 }
2345 
2346 void mem_cgroup_update_page_stat(struct page *page,
2347 				 enum mem_cgroup_stat_index idx, int val)
2348 {
2349 	struct mem_cgroup *memcg;
2350 	struct page_cgroup *pc = lookup_page_cgroup(page);
2351 	unsigned long uninitialized_var(flags);
2352 
2353 	if (mem_cgroup_disabled())
2354 		return;
2355 
2356 	VM_BUG_ON(!rcu_read_lock_held());
2357 	memcg = pc->mem_cgroup;
2358 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2359 		return;
2360 
2361 	this_cpu_add(memcg->stat->count[idx], val);
2362 }
2363 
2364 /*
2365  * size of first charge trial. "32" comes from vmscan.c's magic value.
2366  * TODO: maybe necessary to use big numbers in big irons.
2367  */
2368 #define CHARGE_BATCH	32U
2369 struct memcg_stock_pcp {
2370 	struct mem_cgroup *cached; /* this never be root cgroup */
2371 	unsigned int nr_pages;
2372 	struct work_struct work;
2373 	unsigned long flags;
2374 #define FLUSHING_CACHED_CHARGE	0
2375 };
2376 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2377 static DEFINE_MUTEX(percpu_charge_mutex);
2378 
2379 /**
2380  * consume_stock: Try to consume stocked charge on this cpu.
2381  * @memcg: memcg to consume from.
2382  * @nr_pages: how many pages to charge.
2383  *
2384  * The charges will only happen if @memcg matches the current cpu's memcg
2385  * stock, and at least @nr_pages are available in that stock.  Failure to
2386  * service an allocation will refill the stock.
2387  *
2388  * returns true if successful, false otherwise.
2389  */
2390 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2391 {
2392 	struct memcg_stock_pcp *stock;
2393 	bool ret = true;
2394 
2395 	if (nr_pages > CHARGE_BATCH)
2396 		return false;
2397 
2398 	stock = &get_cpu_var(memcg_stock);
2399 	if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2400 		stock->nr_pages -= nr_pages;
2401 	else /* need to call res_counter_charge */
2402 		ret = false;
2403 	put_cpu_var(memcg_stock);
2404 	return ret;
2405 }
2406 
2407 /*
2408  * Returns stocks cached in percpu to res_counter and reset cached information.
2409  */
2410 static void drain_stock(struct memcg_stock_pcp *stock)
2411 {
2412 	struct mem_cgroup *old = stock->cached;
2413 
2414 	if (stock->nr_pages) {
2415 		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2416 
2417 		res_counter_uncharge(&old->res, bytes);
2418 		if (do_swap_account)
2419 			res_counter_uncharge(&old->memsw, bytes);
2420 		stock->nr_pages = 0;
2421 	}
2422 	stock->cached = NULL;
2423 }
2424 
2425 /*
2426  * This must be called under preempt disabled or must be called by
2427  * a thread which is pinned to local cpu.
2428  */
2429 static void drain_local_stock(struct work_struct *dummy)
2430 {
2431 	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2432 	drain_stock(stock);
2433 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2434 }
2435 
2436 static void __init memcg_stock_init(void)
2437 {
2438 	int cpu;
2439 
2440 	for_each_possible_cpu(cpu) {
2441 		struct memcg_stock_pcp *stock =
2442 					&per_cpu(memcg_stock, cpu);
2443 		INIT_WORK(&stock->work, drain_local_stock);
2444 	}
2445 }
2446 
2447 /*
2448  * Cache charges(val) which is from res_counter, to local per_cpu area.
2449  * This will be consumed by consume_stock() function, later.
2450  */
2451 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2452 {
2453 	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2454 
2455 	if (stock->cached != memcg) { /* reset if necessary */
2456 		drain_stock(stock);
2457 		stock->cached = memcg;
2458 	}
2459 	stock->nr_pages += nr_pages;
2460 	put_cpu_var(memcg_stock);
2461 }
2462 
2463 /*
2464  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2465  * of the hierarchy under it. sync flag says whether we should block
2466  * until the work is done.
2467  */
2468 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2469 {
2470 	int cpu, curcpu;
2471 
2472 	/* Notify other cpus that system-wide "drain" is running */
2473 	get_online_cpus();
2474 	curcpu = get_cpu();
2475 	for_each_online_cpu(cpu) {
2476 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2477 		struct mem_cgroup *memcg;
2478 
2479 		memcg = stock->cached;
2480 		if (!memcg || !stock->nr_pages)
2481 			continue;
2482 		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2483 			continue;
2484 		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2485 			if (cpu == curcpu)
2486 				drain_local_stock(&stock->work);
2487 			else
2488 				schedule_work_on(cpu, &stock->work);
2489 		}
2490 	}
2491 	put_cpu();
2492 
2493 	if (!sync)
2494 		goto out;
2495 
2496 	for_each_online_cpu(cpu) {
2497 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2498 		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2499 			flush_work(&stock->work);
2500 	}
2501 out:
2502 	put_online_cpus();
2503 }
2504 
2505 /*
2506  * Tries to drain stocked charges in other cpus. This function is asynchronous
2507  * and just put a work per cpu for draining localy on each cpu. Caller can
2508  * expects some charges will be back to res_counter later but cannot wait for
2509  * it.
2510  */
2511 static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2512 {
2513 	/*
2514 	 * If someone calls draining, avoid adding more kworker runs.
2515 	 */
2516 	if (!mutex_trylock(&percpu_charge_mutex))
2517 		return;
2518 	drain_all_stock(root_memcg, false);
2519 	mutex_unlock(&percpu_charge_mutex);
2520 }
2521 
2522 /* This is a synchronous drain interface. */
2523 static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2524 {
2525 	/* called when force_empty is called */
2526 	mutex_lock(&percpu_charge_mutex);
2527 	drain_all_stock(root_memcg, true);
2528 	mutex_unlock(&percpu_charge_mutex);
2529 }
2530 
2531 /*
2532  * This function drains percpu counter value from DEAD cpu and
2533  * move it to local cpu. Note that this function can be preempted.
2534  */
2535 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2536 {
2537 	int i;
2538 
2539 	spin_lock(&memcg->pcp_counter_lock);
2540 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2541 		long x = per_cpu(memcg->stat->count[i], cpu);
2542 
2543 		per_cpu(memcg->stat->count[i], cpu) = 0;
2544 		memcg->nocpu_base.count[i] += x;
2545 	}
2546 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2547 		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2548 
2549 		per_cpu(memcg->stat->events[i], cpu) = 0;
2550 		memcg->nocpu_base.events[i] += x;
2551 	}
2552 	spin_unlock(&memcg->pcp_counter_lock);
2553 }
2554 
2555 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2556 					unsigned long action,
2557 					void *hcpu)
2558 {
2559 	int cpu = (unsigned long)hcpu;
2560 	struct memcg_stock_pcp *stock;
2561 	struct mem_cgroup *iter;
2562 
2563 	if (action == CPU_ONLINE)
2564 		return NOTIFY_OK;
2565 
2566 	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2567 		return NOTIFY_OK;
2568 
2569 	for_each_mem_cgroup(iter)
2570 		mem_cgroup_drain_pcp_counter(iter, cpu);
2571 
2572 	stock = &per_cpu(memcg_stock, cpu);
2573 	drain_stock(stock);
2574 	return NOTIFY_OK;
2575 }
2576 
2577 
2578 /* See mem_cgroup_try_charge() for details */
2579 enum {
2580 	CHARGE_OK,		/* success */
2581 	CHARGE_RETRY,		/* need to retry but retry is not bad */
2582 	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2583 	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2584 };
2585 
2586 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2587 				unsigned int nr_pages, unsigned int min_pages,
2588 				bool invoke_oom)
2589 {
2590 	unsigned long csize = nr_pages * PAGE_SIZE;
2591 	struct mem_cgroup *mem_over_limit;
2592 	struct res_counter *fail_res;
2593 	unsigned long flags = 0;
2594 	int ret;
2595 
2596 	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2597 
2598 	if (likely(!ret)) {
2599 		if (!do_swap_account)
2600 			return CHARGE_OK;
2601 		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2602 		if (likely(!ret))
2603 			return CHARGE_OK;
2604 
2605 		res_counter_uncharge(&memcg->res, csize);
2606 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2607 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2608 	} else
2609 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2610 	/*
2611 	 * Never reclaim on behalf of optional batching, retry with a
2612 	 * single page instead.
2613 	 */
2614 	if (nr_pages > min_pages)
2615 		return CHARGE_RETRY;
2616 
2617 	if (!(gfp_mask & __GFP_WAIT))
2618 		return CHARGE_WOULDBLOCK;
2619 
2620 	if (gfp_mask & __GFP_NORETRY)
2621 		return CHARGE_NOMEM;
2622 
2623 	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2624 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2625 		return CHARGE_RETRY;
2626 	/*
2627 	 * Even though the limit is exceeded at this point, reclaim
2628 	 * may have been able to free some pages.  Retry the charge
2629 	 * before killing the task.
2630 	 *
2631 	 * Only for regular pages, though: huge pages are rather
2632 	 * unlikely to succeed so close to the limit, and we fall back
2633 	 * to regular pages anyway in case of failure.
2634 	 */
2635 	if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2636 		return CHARGE_RETRY;
2637 
2638 	/*
2639 	 * At task move, charge accounts can be doubly counted. So, it's
2640 	 * better to wait until the end of task_move if something is going on.
2641 	 */
2642 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2643 		return CHARGE_RETRY;
2644 
2645 	if (invoke_oom)
2646 		mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
2647 
2648 	return CHARGE_NOMEM;
2649 }
2650 
2651 /**
2652  * mem_cgroup_try_charge - try charging a memcg
2653  * @memcg: memcg to charge
2654  * @nr_pages: number of pages to charge
2655  * @oom: trigger OOM if reclaim fails
2656  *
2657  * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2658  * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2659  */
2660 static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2661 				 gfp_t gfp_mask,
2662 				 unsigned int nr_pages,
2663 				 bool oom)
2664 {
2665 	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2666 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2667 	int ret;
2668 
2669 	if (mem_cgroup_is_root(memcg))
2670 		goto done;
2671 	/*
2672 	 * Unlike in global OOM situations, memcg is not in a physical
2673 	 * memory shortage.  Allow dying and OOM-killed tasks to
2674 	 * bypass the last charges so that they can exit quickly and
2675 	 * free their memory.
2676 	 */
2677 	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2678 		     fatal_signal_pending(current)))
2679 		goto bypass;
2680 
2681 	if (unlikely(task_in_memcg_oom(current)))
2682 		goto nomem;
2683 
2684 	if (gfp_mask & __GFP_NOFAIL)
2685 		oom = false;
2686 again:
2687 	if (consume_stock(memcg, nr_pages))
2688 		goto done;
2689 
2690 	do {
2691 		bool invoke_oom = oom && !nr_oom_retries;
2692 
2693 		/* If killed, bypass charge */
2694 		if (fatal_signal_pending(current))
2695 			goto bypass;
2696 
2697 		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2698 					   nr_pages, invoke_oom);
2699 		switch (ret) {
2700 		case CHARGE_OK:
2701 			break;
2702 		case CHARGE_RETRY: /* not in OOM situation but retry */
2703 			batch = nr_pages;
2704 			goto again;
2705 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2706 			goto nomem;
2707 		case CHARGE_NOMEM: /* OOM routine works */
2708 			if (!oom || invoke_oom)
2709 				goto nomem;
2710 			nr_oom_retries--;
2711 			break;
2712 		}
2713 	} while (ret != CHARGE_OK);
2714 
2715 	if (batch > nr_pages)
2716 		refill_stock(memcg, batch - nr_pages);
2717 done:
2718 	return 0;
2719 nomem:
2720 	if (!(gfp_mask & __GFP_NOFAIL))
2721 		return -ENOMEM;
2722 bypass:
2723 	return -EINTR;
2724 }
2725 
2726 /**
2727  * mem_cgroup_try_charge_mm - try charging a mm
2728  * @mm: mm_struct to charge
2729  * @nr_pages: number of pages to charge
2730  * @oom: trigger OOM if reclaim fails
2731  *
2732  * Returns the charged mem_cgroup associated with the given mm_struct or
2733  * NULL the charge failed.
2734  */
2735 static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2736 				 gfp_t gfp_mask,
2737 				 unsigned int nr_pages,
2738 				 bool oom)
2739 
2740 {
2741 	struct mem_cgroup *memcg;
2742 	int ret;
2743 
2744 	memcg = get_mem_cgroup_from_mm(mm);
2745 	ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2746 	css_put(&memcg->css);
2747 	if (ret == -EINTR)
2748 		memcg = root_mem_cgroup;
2749 	else if (ret)
2750 		memcg = NULL;
2751 
2752 	return memcg;
2753 }
2754 
2755 /*
2756  * Somemtimes we have to undo a charge we got by try_charge().
2757  * This function is for that and do uncharge, put css's refcnt.
2758  * gotten by try_charge().
2759  */
2760 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2761 				       unsigned int nr_pages)
2762 {
2763 	if (!mem_cgroup_is_root(memcg)) {
2764 		unsigned long bytes = nr_pages * PAGE_SIZE;
2765 
2766 		res_counter_uncharge(&memcg->res, bytes);
2767 		if (do_swap_account)
2768 			res_counter_uncharge(&memcg->memsw, bytes);
2769 	}
2770 }
2771 
2772 /*
2773  * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2774  * This is useful when moving usage to parent cgroup.
2775  */
2776 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2777 					unsigned int nr_pages)
2778 {
2779 	unsigned long bytes = nr_pages * PAGE_SIZE;
2780 
2781 	if (mem_cgroup_is_root(memcg))
2782 		return;
2783 
2784 	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2785 	if (do_swap_account)
2786 		res_counter_uncharge_until(&memcg->memsw,
2787 						memcg->memsw.parent, bytes);
2788 }
2789 
2790 /*
2791  * A helper function to get mem_cgroup from ID. must be called under
2792  * rcu_read_lock().  The caller is responsible for calling css_tryget if
2793  * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2794  * called against removed memcg.)
2795  */
2796 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2797 {
2798 	/* ID 0 is unused ID */
2799 	if (!id)
2800 		return NULL;
2801 	return mem_cgroup_from_id(id);
2802 }
2803 
2804 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2805 {
2806 	struct mem_cgroup *memcg = NULL;
2807 	struct page_cgroup *pc;
2808 	unsigned short id;
2809 	swp_entry_t ent;
2810 
2811 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2812 
2813 	pc = lookup_page_cgroup(page);
2814 	lock_page_cgroup(pc);
2815 	if (PageCgroupUsed(pc)) {
2816 		memcg = pc->mem_cgroup;
2817 		if (memcg && !css_tryget(&memcg->css))
2818 			memcg = NULL;
2819 	} else if (PageSwapCache(page)) {
2820 		ent.val = page_private(page);
2821 		id = lookup_swap_cgroup_id(ent);
2822 		rcu_read_lock();
2823 		memcg = mem_cgroup_lookup(id);
2824 		if (memcg && !css_tryget(&memcg->css))
2825 			memcg = NULL;
2826 		rcu_read_unlock();
2827 	}
2828 	unlock_page_cgroup(pc);
2829 	return memcg;
2830 }
2831 
2832 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2833 				       struct page *page,
2834 				       unsigned int nr_pages,
2835 				       enum charge_type ctype,
2836 				       bool lrucare)
2837 {
2838 	struct page_cgroup *pc = lookup_page_cgroup(page);
2839 	struct zone *uninitialized_var(zone);
2840 	struct lruvec *lruvec;
2841 	bool was_on_lru = false;
2842 	bool anon;
2843 
2844 	lock_page_cgroup(pc);
2845 	VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2846 	/*
2847 	 * we don't need page_cgroup_lock about tail pages, becase they are not
2848 	 * accessed by any other context at this point.
2849 	 */
2850 
2851 	/*
2852 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2853 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2854 	 */
2855 	if (lrucare) {
2856 		zone = page_zone(page);
2857 		spin_lock_irq(&zone->lru_lock);
2858 		if (PageLRU(page)) {
2859 			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2860 			ClearPageLRU(page);
2861 			del_page_from_lru_list(page, lruvec, page_lru(page));
2862 			was_on_lru = true;
2863 		}
2864 	}
2865 
2866 	pc->mem_cgroup = memcg;
2867 	/*
2868 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2869 	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2870 	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2871 	 * before USED bit, we need memory barrier here.
2872 	 * See mem_cgroup_add_lru_list(), etc.
2873 	 */
2874 	smp_wmb();
2875 	SetPageCgroupUsed(pc);
2876 
2877 	if (lrucare) {
2878 		if (was_on_lru) {
2879 			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2880 			VM_BUG_ON_PAGE(PageLRU(page), page);
2881 			SetPageLRU(page);
2882 			add_page_to_lru_list(page, lruvec, page_lru(page));
2883 		}
2884 		spin_unlock_irq(&zone->lru_lock);
2885 	}
2886 
2887 	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2888 		anon = true;
2889 	else
2890 		anon = false;
2891 
2892 	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2893 	unlock_page_cgroup(pc);
2894 
2895 	/*
2896 	 * "charge_statistics" updated event counter. Then, check it.
2897 	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2898 	 * if they exceeds softlimit.
2899 	 */
2900 	memcg_check_events(memcg, page);
2901 }
2902 
2903 static DEFINE_MUTEX(set_limit_mutex);
2904 
2905 #ifdef CONFIG_MEMCG_KMEM
2906 static DEFINE_MUTEX(activate_kmem_mutex);
2907 
2908 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2909 {
2910 	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2911 		memcg_kmem_is_active(memcg);
2912 }
2913 
2914 /*
2915  * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2916  * in the memcg_cache_params struct.
2917  */
2918 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2919 {
2920 	struct kmem_cache *cachep;
2921 
2922 	VM_BUG_ON(p->is_root_cache);
2923 	cachep = p->root_cache;
2924 	return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2925 }
2926 
2927 #ifdef CONFIG_SLABINFO
2928 static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2929 {
2930 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2931 	struct memcg_cache_params *params;
2932 
2933 	if (!memcg_can_account_kmem(memcg))
2934 		return -EIO;
2935 
2936 	print_slabinfo_header(m);
2937 
2938 	mutex_lock(&memcg->slab_caches_mutex);
2939 	list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2940 		cache_show(memcg_params_to_cache(params), m);
2941 	mutex_unlock(&memcg->slab_caches_mutex);
2942 
2943 	return 0;
2944 }
2945 #endif
2946 
2947 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2948 {
2949 	struct res_counter *fail_res;
2950 	int ret = 0;
2951 
2952 	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2953 	if (ret)
2954 		return ret;
2955 
2956 	ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2957 				    oom_gfp_allowed(gfp));
2958 	if (ret == -EINTR)  {
2959 		/*
2960 		 * mem_cgroup_try_charge() chosed to bypass to root due to
2961 		 * OOM kill or fatal signal.  Since our only options are to
2962 		 * either fail the allocation or charge it to this cgroup, do
2963 		 * it as a temporary condition. But we can't fail. From a
2964 		 * kmem/slab perspective, the cache has already been selected,
2965 		 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2966 		 * our minds.
2967 		 *
2968 		 * This condition will only trigger if the task entered
2969 		 * memcg_charge_kmem in a sane state, but was OOM-killed during
2970 		 * mem_cgroup_try_charge() above. Tasks that were already
2971 		 * dying when the allocation triggers should have been already
2972 		 * directed to the root cgroup in memcontrol.h
2973 		 */
2974 		res_counter_charge_nofail(&memcg->res, size, &fail_res);
2975 		if (do_swap_account)
2976 			res_counter_charge_nofail(&memcg->memsw, size,
2977 						  &fail_res);
2978 		ret = 0;
2979 	} else if (ret)
2980 		res_counter_uncharge(&memcg->kmem, size);
2981 
2982 	return ret;
2983 }
2984 
2985 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2986 {
2987 	res_counter_uncharge(&memcg->res, size);
2988 	if (do_swap_account)
2989 		res_counter_uncharge(&memcg->memsw, size);
2990 
2991 	/* Not down to 0 */
2992 	if (res_counter_uncharge(&memcg->kmem, size))
2993 		return;
2994 
2995 	/*
2996 	 * Releases a reference taken in kmem_cgroup_css_offline in case
2997 	 * this last uncharge is racing with the offlining code or it is
2998 	 * outliving the memcg existence.
2999 	 *
3000 	 * The memory barrier imposed by test&clear is paired with the
3001 	 * explicit one in memcg_kmem_mark_dead().
3002 	 */
3003 	if (memcg_kmem_test_and_clear_dead(memcg))
3004 		css_put(&memcg->css);
3005 }
3006 
3007 /*
3008  * helper for acessing a memcg's index. It will be used as an index in the
3009  * child cache array in kmem_cache, and also to derive its name. This function
3010  * will return -1 when this is not a kmem-limited memcg.
3011  */
3012 int memcg_cache_id(struct mem_cgroup *memcg)
3013 {
3014 	return memcg ? memcg->kmemcg_id : -1;
3015 }
3016 
3017 static size_t memcg_caches_array_size(int num_groups)
3018 {
3019 	ssize_t size;
3020 	if (num_groups <= 0)
3021 		return 0;
3022 
3023 	size = 2 * num_groups;
3024 	if (size < MEMCG_CACHES_MIN_SIZE)
3025 		size = MEMCG_CACHES_MIN_SIZE;
3026 	else if (size > MEMCG_CACHES_MAX_SIZE)
3027 		size = MEMCG_CACHES_MAX_SIZE;
3028 
3029 	return size;
3030 }
3031 
3032 /*
3033  * We should update the current array size iff all caches updates succeed. This
3034  * can only be done from the slab side. The slab mutex needs to be held when
3035  * calling this.
3036  */
3037 void memcg_update_array_size(int num)
3038 {
3039 	if (num > memcg_limited_groups_array_size)
3040 		memcg_limited_groups_array_size = memcg_caches_array_size(num);
3041 }
3042 
3043 static void kmem_cache_destroy_work_func(struct work_struct *w);
3044 
3045 int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3046 {
3047 	struct memcg_cache_params *cur_params = s->memcg_params;
3048 
3049 	VM_BUG_ON(!is_root_cache(s));
3050 
3051 	if (num_groups > memcg_limited_groups_array_size) {
3052 		int i;
3053 		struct memcg_cache_params *new_params;
3054 		ssize_t size = memcg_caches_array_size(num_groups);
3055 
3056 		size *= sizeof(void *);
3057 		size += offsetof(struct memcg_cache_params, memcg_caches);
3058 
3059 		new_params = kzalloc(size, GFP_KERNEL);
3060 		if (!new_params)
3061 			return -ENOMEM;
3062 
3063 		new_params->is_root_cache = true;
3064 
3065 		/*
3066 		 * There is the chance it will be bigger than
3067 		 * memcg_limited_groups_array_size, if we failed an allocation
3068 		 * in a cache, in which case all caches updated before it, will
3069 		 * have a bigger array.
3070 		 *
3071 		 * But if that is the case, the data after
3072 		 * memcg_limited_groups_array_size is certainly unused
3073 		 */
3074 		for (i = 0; i < memcg_limited_groups_array_size; i++) {
3075 			if (!cur_params->memcg_caches[i])
3076 				continue;
3077 			new_params->memcg_caches[i] =
3078 						cur_params->memcg_caches[i];
3079 		}
3080 
3081 		/*
3082 		 * Ideally, we would wait until all caches succeed, and only
3083 		 * then free the old one. But this is not worth the extra
3084 		 * pointer per-cache we'd have to have for this.
3085 		 *
3086 		 * It is not a big deal if some caches are left with a size
3087 		 * bigger than the others. And all updates will reset this
3088 		 * anyway.
3089 		 */
3090 		rcu_assign_pointer(s->memcg_params, new_params);
3091 		if (cur_params)
3092 			kfree_rcu(cur_params, rcu_head);
3093 	}
3094 	return 0;
3095 }
3096 
3097 char *memcg_create_cache_name(struct mem_cgroup *memcg,
3098 			      struct kmem_cache *root_cache)
3099 {
3100 	static char *buf = NULL;
3101 
3102 	/*
3103 	 * We need a mutex here to protect the shared buffer. Since this is
3104 	 * expected to be called only on cache creation, we can employ the
3105 	 * slab_mutex for that purpose.
3106 	 */
3107 	lockdep_assert_held(&slab_mutex);
3108 
3109 	if (!buf) {
3110 		buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3111 		if (!buf)
3112 			return NULL;
3113 	}
3114 
3115 	cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
3116 	return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
3117 			 memcg_cache_id(memcg), buf);
3118 }
3119 
3120 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3121 			     struct kmem_cache *root_cache)
3122 {
3123 	size_t size;
3124 
3125 	if (!memcg_kmem_enabled())
3126 		return 0;
3127 
3128 	if (!memcg) {
3129 		size = offsetof(struct memcg_cache_params, memcg_caches);
3130 		size += memcg_limited_groups_array_size * sizeof(void *);
3131 	} else
3132 		size = sizeof(struct memcg_cache_params);
3133 
3134 	s->memcg_params = kzalloc(size, GFP_KERNEL);
3135 	if (!s->memcg_params)
3136 		return -ENOMEM;
3137 
3138 	if (memcg) {
3139 		s->memcg_params->memcg = memcg;
3140 		s->memcg_params->root_cache = root_cache;
3141 		INIT_WORK(&s->memcg_params->destroy,
3142 				kmem_cache_destroy_work_func);
3143 		css_get(&memcg->css);
3144 	} else
3145 		s->memcg_params->is_root_cache = true;
3146 
3147 	return 0;
3148 }
3149 
3150 void memcg_free_cache_params(struct kmem_cache *s)
3151 {
3152 	if (!s->memcg_params)
3153 		return;
3154 	if (!s->memcg_params->is_root_cache)
3155 		css_put(&s->memcg_params->memcg->css);
3156 	kfree(s->memcg_params);
3157 }
3158 
3159 void memcg_register_cache(struct kmem_cache *s)
3160 {
3161 	struct kmem_cache *root;
3162 	struct mem_cgroup *memcg;
3163 	int id;
3164 
3165 	if (is_root_cache(s))
3166 		return;
3167 
3168 	/*
3169 	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3170 	 * array while we are modifying it.
3171 	 */
3172 	lockdep_assert_held(&slab_mutex);
3173 
3174 	root = s->memcg_params->root_cache;
3175 	memcg = s->memcg_params->memcg;
3176 	id = memcg_cache_id(memcg);
3177 
3178 	/*
3179 	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3180 	 * barrier here to ensure nobody will see the kmem_cache partially
3181 	 * initialized.
3182 	 */
3183 	smp_wmb();
3184 
3185 	/*
3186 	 * Initialize the pointer to this cache in its parent's memcg_params
3187 	 * before adding it to the memcg_slab_caches list, otherwise we can
3188 	 * fail to convert memcg_params_to_cache() while traversing the list.
3189 	 */
3190 	VM_BUG_ON(root->memcg_params->memcg_caches[id]);
3191 	root->memcg_params->memcg_caches[id] = s;
3192 
3193 	mutex_lock(&memcg->slab_caches_mutex);
3194 	list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3195 	mutex_unlock(&memcg->slab_caches_mutex);
3196 }
3197 
3198 void memcg_unregister_cache(struct kmem_cache *s)
3199 {
3200 	struct kmem_cache *root;
3201 	struct mem_cgroup *memcg;
3202 	int id;
3203 
3204 	if (is_root_cache(s))
3205 		return;
3206 
3207 	/*
3208 	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3209 	 * array while we are modifying it.
3210 	 */
3211 	lockdep_assert_held(&slab_mutex);
3212 
3213 	root = s->memcg_params->root_cache;
3214 	memcg = s->memcg_params->memcg;
3215 	id = memcg_cache_id(memcg);
3216 
3217 	mutex_lock(&memcg->slab_caches_mutex);
3218 	list_del(&s->memcg_params->list);
3219 	mutex_unlock(&memcg->slab_caches_mutex);
3220 
3221 	/*
3222 	 * Clear the pointer to this cache in its parent's memcg_params only
3223 	 * after removing it from the memcg_slab_caches list, otherwise we can
3224 	 * fail to convert memcg_params_to_cache() while traversing the list.
3225 	 */
3226 	VM_BUG_ON(root->memcg_params->memcg_caches[id] != s);
3227 	root->memcg_params->memcg_caches[id] = NULL;
3228 }
3229 
3230 /*
3231  * During the creation a new cache, we need to disable our accounting mechanism
3232  * altogether. This is true even if we are not creating, but rather just
3233  * enqueing new caches to be created.
3234  *
3235  * This is because that process will trigger allocations; some visible, like
3236  * explicit kmallocs to auxiliary data structures, name strings and internal
3237  * cache structures; some well concealed, like INIT_WORK() that can allocate
3238  * objects during debug.
3239  *
3240  * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3241  * to it. This may not be a bounded recursion: since the first cache creation
3242  * failed to complete (waiting on the allocation), we'll just try to create the
3243  * cache again, failing at the same point.
3244  *
3245  * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3246  * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3247  * inside the following two functions.
3248  */
3249 static inline void memcg_stop_kmem_account(void)
3250 {
3251 	VM_BUG_ON(!current->mm);
3252 	current->memcg_kmem_skip_account++;
3253 }
3254 
3255 static inline void memcg_resume_kmem_account(void)
3256 {
3257 	VM_BUG_ON(!current->mm);
3258 	current->memcg_kmem_skip_account--;
3259 }
3260 
3261 static void kmem_cache_destroy_work_func(struct work_struct *w)
3262 {
3263 	struct kmem_cache *cachep;
3264 	struct memcg_cache_params *p;
3265 
3266 	p = container_of(w, struct memcg_cache_params, destroy);
3267 
3268 	cachep = memcg_params_to_cache(p);
3269 
3270 	/*
3271 	 * If we get down to 0 after shrink, we could delete right away.
3272 	 * However, memcg_release_pages() already puts us back in the workqueue
3273 	 * in that case. If we proceed deleting, we'll get a dangling
3274 	 * reference, and removing the object from the workqueue in that case
3275 	 * is unnecessary complication. We are not a fast path.
3276 	 *
3277 	 * Note that this case is fundamentally different from racing with
3278 	 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3279 	 * kmem_cache_shrink, not only we would be reinserting a dead cache
3280 	 * into the queue, but doing so from inside the worker racing to
3281 	 * destroy it.
3282 	 *
3283 	 * So if we aren't down to zero, we'll just schedule a worker and try
3284 	 * again
3285 	 */
3286 	if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
3287 		kmem_cache_shrink(cachep);
3288 	else
3289 		kmem_cache_destroy(cachep);
3290 }
3291 
3292 void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3293 {
3294 	if (!cachep->memcg_params->dead)
3295 		return;
3296 
3297 	/*
3298 	 * There are many ways in which we can get here.
3299 	 *
3300 	 * We can get to a memory-pressure situation while the delayed work is
3301 	 * still pending to run. The vmscan shrinkers can then release all
3302 	 * cache memory and get us to destruction. If this is the case, we'll
3303 	 * be executed twice, which is a bug (the second time will execute over
3304 	 * bogus data). In this case, cancelling the work should be fine.
3305 	 *
3306 	 * But we can also get here from the worker itself, if
3307 	 * kmem_cache_shrink is enough to shake all the remaining objects and
3308 	 * get the page count to 0. In this case, we'll deadlock if we try to
3309 	 * cancel the work (the worker runs with an internal lock held, which
3310 	 * is the same lock we would hold for cancel_work_sync().)
3311 	 *
3312 	 * Since we can't possibly know who got us here, just refrain from
3313 	 * running if there is already work pending
3314 	 */
3315 	if (work_pending(&cachep->memcg_params->destroy))
3316 		return;
3317 	/*
3318 	 * We have to defer the actual destroying to a workqueue, because
3319 	 * we might currently be in a context that cannot sleep.
3320 	 */
3321 	schedule_work(&cachep->memcg_params->destroy);
3322 }
3323 
3324 int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3325 {
3326 	struct kmem_cache *c;
3327 	int i, failed = 0;
3328 
3329 	/*
3330 	 * If the cache is being destroyed, we trust that there is no one else
3331 	 * requesting objects from it. Even if there are, the sanity checks in
3332 	 * kmem_cache_destroy should caught this ill-case.
3333 	 *
3334 	 * Still, we don't want anyone else freeing memcg_caches under our
3335 	 * noses, which can happen if a new memcg comes to life. As usual,
3336 	 * we'll take the activate_kmem_mutex to protect ourselves against
3337 	 * this.
3338 	 */
3339 	mutex_lock(&activate_kmem_mutex);
3340 	for_each_memcg_cache_index(i) {
3341 		c = cache_from_memcg_idx(s, i);
3342 		if (!c)
3343 			continue;
3344 
3345 		/*
3346 		 * We will now manually delete the caches, so to avoid races
3347 		 * we need to cancel all pending destruction workers and
3348 		 * proceed with destruction ourselves.
3349 		 *
3350 		 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3351 		 * and that could spawn the workers again: it is likely that
3352 		 * the cache still have active pages until this very moment.
3353 		 * This would lead us back to mem_cgroup_destroy_cache.
3354 		 *
3355 		 * But that will not execute at all if the "dead" flag is not
3356 		 * set, so flip it down to guarantee we are in control.
3357 		 */
3358 		c->memcg_params->dead = false;
3359 		cancel_work_sync(&c->memcg_params->destroy);
3360 		kmem_cache_destroy(c);
3361 
3362 		if (cache_from_memcg_idx(s, i))
3363 			failed++;
3364 	}
3365 	mutex_unlock(&activate_kmem_mutex);
3366 	return failed;
3367 }
3368 
3369 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3370 {
3371 	struct kmem_cache *cachep;
3372 	struct memcg_cache_params *params;
3373 
3374 	if (!memcg_kmem_is_active(memcg))
3375 		return;
3376 
3377 	mutex_lock(&memcg->slab_caches_mutex);
3378 	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3379 		cachep = memcg_params_to_cache(params);
3380 		cachep->memcg_params->dead = true;
3381 		schedule_work(&cachep->memcg_params->destroy);
3382 	}
3383 	mutex_unlock(&memcg->slab_caches_mutex);
3384 }
3385 
3386 struct create_work {
3387 	struct mem_cgroup *memcg;
3388 	struct kmem_cache *cachep;
3389 	struct work_struct work;
3390 };
3391 
3392 static void memcg_create_cache_work_func(struct work_struct *w)
3393 {
3394 	struct create_work *cw = container_of(w, struct create_work, work);
3395 	struct mem_cgroup *memcg = cw->memcg;
3396 	struct kmem_cache *cachep = cw->cachep;
3397 
3398 	kmem_cache_create_memcg(memcg, cachep);
3399 	css_put(&memcg->css);
3400 	kfree(cw);
3401 }
3402 
3403 /*
3404  * Enqueue the creation of a per-memcg kmem_cache.
3405  */
3406 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3407 					 struct kmem_cache *cachep)
3408 {
3409 	struct create_work *cw;
3410 
3411 	cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3412 	if (cw == NULL) {
3413 		css_put(&memcg->css);
3414 		return;
3415 	}
3416 
3417 	cw->memcg = memcg;
3418 	cw->cachep = cachep;
3419 
3420 	INIT_WORK(&cw->work, memcg_create_cache_work_func);
3421 	schedule_work(&cw->work);
3422 }
3423 
3424 static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3425 				       struct kmem_cache *cachep)
3426 {
3427 	/*
3428 	 * We need to stop accounting when we kmalloc, because if the
3429 	 * corresponding kmalloc cache is not yet created, the first allocation
3430 	 * in __memcg_create_cache_enqueue will recurse.
3431 	 *
3432 	 * However, it is better to enclose the whole function. Depending on
3433 	 * the debugging options enabled, INIT_WORK(), for instance, can
3434 	 * trigger an allocation. This too, will make us recurse. Because at
3435 	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3436 	 * the safest choice is to do it like this, wrapping the whole function.
3437 	 */
3438 	memcg_stop_kmem_account();
3439 	__memcg_create_cache_enqueue(memcg, cachep);
3440 	memcg_resume_kmem_account();
3441 }
3442 /*
3443  * Return the kmem_cache we're supposed to use for a slab allocation.
3444  * We try to use the current memcg's version of the cache.
3445  *
3446  * If the cache does not exist yet, if we are the first user of it,
3447  * we either create it immediately, if possible, or create it asynchronously
3448  * in a workqueue.
3449  * In the latter case, we will let the current allocation go through with
3450  * the original cache.
3451  *
3452  * Can't be called in interrupt context or from kernel threads.
3453  * This function needs to be called with rcu_read_lock() held.
3454  */
3455 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3456 					  gfp_t gfp)
3457 {
3458 	struct mem_cgroup *memcg;
3459 	struct kmem_cache *memcg_cachep;
3460 
3461 	VM_BUG_ON(!cachep->memcg_params);
3462 	VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3463 
3464 	if (!current->mm || current->memcg_kmem_skip_account)
3465 		return cachep;
3466 
3467 	rcu_read_lock();
3468 	memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3469 
3470 	if (!memcg_can_account_kmem(memcg))
3471 		goto out;
3472 
3473 	memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3474 	if (likely(memcg_cachep)) {
3475 		cachep = memcg_cachep;
3476 		goto out;
3477 	}
3478 
3479 	/* The corresponding put will be done in the workqueue. */
3480 	if (!css_tryget(&memcg->css))
3481 		goto out;
3482 	rcu_read_unlock();
3483 
3484 	/*
3485 	 * If we are in a safe context (can wait, and not in interrupt
3486 	 * context), we could be be predictable and return right away.
3487 	 * This would guarantee that the allocation being performed
3488 	 * already belongs in the new cache.
3489 	 *
3490 	 * However, there are some clashes that can arrive from locking.
3491 	 * For instance, because we acquire the slab_mutex while doing
3492 	 * kmem_cache_dup, this means no further allocation could happen
3493 	 * with the slab_mutex held.
3494 	 *
3495 	 * Also, because cache creation issue get_online_cpus(), this
3496 	 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3497 	 * that ends up reversed during cpu hotplug. (cpuset allocates
3498 	 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3499 	 * better to defer everything.
3500 	 */
3501 	memcg_create_cache_enqueue(memcg, cachep);
3502 	return cachep;
3503 out:
3504 	rcu_read_unlock();
3505 	return cachep;
3506 }
3507 EXPORT_SYMBOL(__memcg_kmem_get_cache);
3508 
3509 /*
3510  * We need to verify if the allocation against current->mm->owner's memcg is
3511  * possible for the given order. But the page is not allocated yet, so we'll
3512  * need a further commit step to do the final arrangements.
3513  *
3514  * It is possible for the task to switch cgroups in this mean time, so at
3515  * commit time, we can't rely on task conversion any longer.  We'll then use
3516  * the handle argument to return to the caller which cgroup we should commit
3517  * against. We could also return the memcg directly and avoid the pointer
3518  * passing, but a boolean return value gives better semantics considering
3519  * the compiled-out case as well.
3520  *
3521  * Returning true means the allocation is possible.
3522  */
3523 bool
3524 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3525 {
3526 	struct mem_cgroup *memcg;
3527 	int ret;
3528 
3529 	*_memcg = NULL;
3530 
3531 	/*
3532 	 * Disabling accounting is only relevant for some specific memcg
3533 	 * internal allocations. Therefore we would initially not have such
3534 	 * check here, since direct calls to the page allocator that are marked
3535 	 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3536 	 * concerned with cache allocations, and by having this test at
3537 	 * memcg_kmem_get_cache, we are already able to relay the allocation to
3538 	 * the root cache and bypass the memcg cache altogether.
3539 	 *
3540 	 * There is one exception, though: the SLUB allocator does not create
3541 	 * large order caches, but rather service large kmallocs directly from
3542 	 * the page allocator. Therefore, the following sequence when backed by
3543 	 * the SLUB allocator:
3544 	 *
3545 	 *	memcg_stop_kmem_account();
3546 	 *	kmalloc(<large_number>)
3547 	 *	memcg_resume_kmem_account();
3548 	 *
3549 	 * would effectively ignore the fact that we should skip accounting,
3550 	 * since it will drive us directly to this function without passing
3551 	 * through the cache selector memcg_kmem_get_cache. Such large
3552 	 * allocations are extremely rare but can happen, for instance, for the
3553 	 * cache arrays. We bring this test here.
3554 	 */
3555 	if (!current->mm || current->memcg_kmem_skip_account)
3556 		return true;
3557 
3558 	memcg = get_mem_cgroup_from_mm(current->mm);
3559 
3560 	if (!memcg_can_account_kmem(memcg)) {
3561 		css_put(&memcg->css);
3562 		return true;
3563 	}
3564 
3565 	ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3566 	if (!ret)
3567 		*_memcg = memcg;
3568 
3569 	css_put(&memcg->css);
3570 	return (ret == 0);
3571 }
3572 
3573 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3574 			      int order)
3575 {
3576 	struct page_cgroup *pc;
3577 
3578 	VM_BUG_ON(mem_cgroup_is_root(memcg));
3579 
3580 	/* The page allocation failed. Revert */
3581 	if (!page) {
3582 		memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3583 		return;
3584 	}
3585 
3586 	pc = lookup_page_cgroup(page);
3587 	lock_page_cgroup(pc);
3588 	pc->mem_cgroup = memcg;
3589 	SetPageCgroupUsed(pc);
3590 	unlock_page_cgroup(pc);
3591 }
3592 
3593 void __memcg_kmem_uncharge_pages(struct page *page, int order)
3594 {
3595 	struct mem_cgroup *memcg = NULL;
3596 	struct page_cgroup *pc;
3597 
3598 
3599 	pc = lookup_page_cgroup(page);
3600 	/*
3601 	 * Fast unlocked return. Theoretically might have changed, have to
3602 	 * check again after locking.
3603 	 */
3604 	if (!PageCgroupUsed(pc))
3605 		return;
3606 
3607 	lock_page_cgroup(pc);
3608 	if (PageCgroupUsed(pc)) {
3609 		memcg = pc->mem_cgroup;
3610 		ClearPageCgroupUsed(pc);
3611 	}
3612 	unlock_page_cgroup(pc);
3613 
3614 	/*
3615 	 * We trust that only if there is a memcg associated with the page, it
3616 	 * is a valid allocation
3617 	 */
3618 	if (!memcg)
3619 		return;
3620 
3621 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3622 	memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3623 }
3624 #else
3625 static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3626 {
3627 }
3628 #endif /* CONFIG_MEMCG_KMEM */
3629 
3630 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3631 
3632 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3633 /*
3634  * Because tail pages are not marked as "used", set it. We're under
3635  * zone->lru_lock, 'splitting on pmd' and compound_lock.
3636  * charge/uncharge will be never happen and move_account() is done under
3637  * compound_lock(), so we don't have to take care of races.
3638  */
3639 void mem_cgroup_split_huge_fixup(struct page *head)
3640 {
3641 	struct page_cgroup *head_pc = lookup_page_cgroup(head);
3642 	struct page_cgroup *pc;
3643 	struct mem_cgroup *memcg;
3644 	int i;
3645 
3646 	if (mem_cgroup_disabled())
3647 		return;
3648 
3649 	memcg = head_pc->mem_cgroup;
3650 	for (i = 1; i < HPAGE_PMD_NR; i++) {
3651 		pc = head_pc + i;
3652 		pc->mem_cgroup = memcg;
3653 		smp_wmb();/* see __commit_charge() */
3654 		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3655 	}
3656 	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3657 		       HPAGE_PMD_NR);
3658 }
3659 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3660 
3661 /**
3662  * mem_cgroup_move_account - move account of the page
3663  * @page: the page
3664  * @nr_pages: number of regular pages (>1 for huge pages)
3665  * @pc:	page_cgroup of the page.
3666  * @from: mem_cgroup which the page is moved from.
3667  * @to:	mem_cgroup which the page is moved to. @from != @to.
3668  *
3669  * The caller must confirm following.
3670  * - page is not on LRU (isolate_page() is useful.)
3671  * - compound_lock is held when nr_pages > 1
3672  *
3673  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3674  * from old cgroup.
3675  */
3676 static int mem_cgroup_move_account(struct page *page,
3677 				   unsigned int nr_pages,
3678 				   struct page_cgroup *pc,
3679 				   struct mem_cgroup *from,
3680 				   struct mem_cgroup *to)
3681 {
3682 	unsigned long flags;
3683 	int ret;
3684 	bool anon = PageAnon(page);
3685 
3686 	VM_BUG_ON(from == to);
3687 	VM_BUG_ON_PAGE(PageLRU(page), page);
3688 	/*
3689 	 * The page is isolated from LRU. So, collapse function
3690 	 * will not handle this page. But page splitting can happen.
3691 	 * Do this check under compound_page_lock(). The caller should
3692 	 * hold it.
3693 	 */
3694 	ret = -EBUSY;
3695 	if (nr_pages > 1 && !PageTransHuge(page))
3696 		goto out;
3697 
3698 	lock_page_cgroup(pc);
3699 
3700 	ret = -EINVAL;
3701 	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3702 		goto unlock;
3703 
3704 	move_lock_mem_cgroup(from, &flags);
3705 
3706 	if (!anon && page_mapped(page)) {
3707 		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3708 			       nr_pages);
3709 		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3710 			       nr_pages);
3711 	}
3712 
3713 	if (PageWriteback(page)) {
3714 		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3715 			       nr_pages);
3716 		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3717 			       nr_pages);
3718 	}
3719 
3720 	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3721 
3722 	/* caller should have done css_get */
3723 	pc->mem_cgroup = to;
3724 	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3725 	move_unlock_mem_cgroup(from, &flags);
3726 	ret = 0;
3727 unlock:
3728 	unlock_page_cgroup(pc);
3729 	/*
3730 	 * check events
3731 	 */
3732 	memcg_check_events(to, page);
3733 	memcg_check_events(from, page);
3734 out:
3735 	return ret;
3736 }
3737 
3738 /**
3739  * mem_cgroup_move_parent - moves page to the parent group
3740  * @page: the page to move
3741  * @pc: page_cgroup of the page
3742  * @child: page's cgroup
3743  *
3744  * move charges to its parent or the root cgroup if the group has no
3745  * parent (aka use_hierarchy==0).
3746  * Although this might fail (get_page_unless_zero, isolate_lru_page or
3747  * mem_cgroup_move_account fails) the failure is always temporary and
3748  * it signals a race with a page removal/uncharge or migration. In the
3749  * first case the page is on the way out and it will vanish from the LRU
3750  * on the next attempt and the call should be retried later.
3751  * Isolation from the LRU fails only if page has been isolated from
3752  * the LRU since we looked at it and that usually means either global
3753  * reclaim or migration going on. The page will either get back to the
3754  * LRU or vanish.
3755  * Finaly mem_cgroup_move_account fails only if the page got uncharged
3756  * (!PageCgroupUsed) or moved to a different group. The page will
3757  * disappear in the next attempt.
3758  */
3759 static int mem_cgroup_move_parent(struct page *page,
3760 				  struct page_cgroup *pc,
3761 				  struct mem_cgroup *child)
3762 {
3763 	struct mem_cgroup *parent;
3764 	unsigned int nr_pages;
3765 	unsigned long uninitialized_var(flags);
3766 	int ret;
3767 
3768 	VM_BUG_ON(mem_cgroup_is_root(child));
3769 
3770 	ret = -EBUSY;
3771 	if (!get_page_unless_zero(page))
3772 		goto out;
3773 	if (isolate_lru_page(page))
3774 		goto put;
3775 
3776 	nr_pages = hpage_nr_pages(page);
3777 
3778 	parent = parent_mem_cgroup(child);
3779 	/*
3780 	 * If no parent, move charges to root cgroup.
3781 	 */
3782 	if (!parent)
3783 		parent = root_mem_cgroup;
3784 
3785 	if (nr_pages > 1) {
3786 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3787 		flags = compound_lock_irqsave(page);
3788 	}
3789 
3790 	ret = mem_cgroup_move_account(page, nr_pages,
3791 				pc, child, parent);
3792 	if (!ret)
3793 		__mem_cgroup_cancel_local_charge(child, nr_pages);
3794 
3795 	if (nr_pages > 1)
3796 		compound_unlock_irqrestore(page, flags);
3797 	putback_lru_page(page);
3798 put:
3799 	put_page(page);
3800 out:
3801 	return ret;
3802 }
3803 
3804 int mem_cgroup_charge_anon(struct page *page,
3805 			      struct mm_struct *mm, gfp_t gfp_mask)
3806 {
3807 	unsigned int nr_pages = 1;
3808 	struct mem_cgroup *memcg;
3809 	bool oom = true;
3810 
3811 	if (mem_cgroup_disabled())
3812 		return 0;
3813 
3814 	VM_BUG_ON_PAGE(page_mapped(page), page);
3815 	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3816 	VM_BUG_ON(!mm);
3817 
3818 	if (PageTransHuge(page)) {
3819 		nr_pages <<= compound_order(page);
3820 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3821 		/*
3822 		 * Never OOM-kill a process for a huge page.  The
3823 		 * fault handler will fall back to regular pages.
3824 		 */
3825 		oom = false;
3826 	}
3827 
3828 	memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3829 	if (!memcg)
3830 		return -ENOMEM;
3831 	__mem_cgroup_commit_charge(memcg, page, nr_pages,
3832 				   MEM_CGROUP_CHARGE_TYPE_ANON, false);
3833 	return 0;
3834 }
3835 
3836 /*
3837  * While swap-in, try_charge -> commit or cancel, the page is locked.
3838  * And when try_charge() successfully returns, one refcnt to memcg without
3839  * struct page_cgroup is acquired. This refcnt will be consumed by
3840  * "commit()" or removed by "cancel()"
3841  */
3842 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3843 					  struct page *page,
3844 					  gfp_t mask,
3845 					  struct mem_cgroup **memcgp)
3846 {
3847 	struct mem_cgroup *memcg = NULL;
3848 	struct page_cgroup *pc;
3849 	int ret;
3850 
3851 	pc = lookup_page_cgroup(page);
3852 	/*
3853 	 * Every swap fault against a single page tries to charge the
3854 	 * page, bail as early as possible.  shmem_unuse() encounters
3855 	 * already charged pages, too.  The USED bit is protected by
3856 	 * the page lock, which serializes swap cache removal, which
3857 	 * in turn serializes uncharging.
3858 	 */
3859 	if (PageCgroupUsed(pc))
3860 		goto out;
3861 	if (do_swap_account)
3862 		memcg = try_get_mem_cgroup_from_page(page);
3863 	if (!memcg)
3864 		memcg = get_mem_cgroup_from_mm(mm);
3865 	ret = mem_cgroup_try_charge(memcg, mask, 1, true);
3866 	css_put(&memcg->css);
3867 	if (ret == -EINTR)
3868 		memcg = root_mem_cgroup;
3869 	else if (ret)
3870 		return ret;
3871 out:
3872 	*memcgp = memcg;
3873 	return 0;
3874 }
3875 
3876 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3877 				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3878 {
3879 	if (mem_cgroup_disabled()) {
3880 		*memcgp = NULL;
3881 		return 0;
3882 	}
3883 	/*
3884 	 * A racing thread's fault, or swapoff, may have already
3885 	 * updated the pte, and even removed page from swap cache: in
3886 	 * those cases unuse_pte()'s pte_same() test will fail; but
3887 	 * there's also a KSM case which does need to charge the page.
3888 	 */
3889 	if (!PageSwapCache(page)) {
3890 		struct mem_cgroup *memcg;
3891 
3892 		memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3893 		if (!memcg)
3894 			return -ENOMEM;
3895 		*memcgp = memcg;
3896 		return 0;
3897 	}
3898 	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3899 }
3900 
3901 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3902 {
3903 	if (mem_cgroup_disabled())
3904 		return;
3905 	if (!memcg)
3906 		return;
3907 	__mem_cgroup_cancel_charge(memcg, 1);
3908 }
3909 
3910 static void
3911 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
3912 					enum charge_type ctype)
3913 {
3914 	if (mem_cgroup_disabled())
3915 		return;
3916 	if (!memcg)
3917 		return;
3918 
3919 	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3920 	/*
3921 	 * Now swap is on-memory. This means this page may be
3922 	 * counted both as mem and swap....double count.
3923 	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3924 	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3925 	 * may call delete_from_swap_cache() before reach here.
3926 	 */
3927 	if (do_swap_account && PageSwapCache(page)) {
3928 		swp_entry_t ent = {.val = page_private(page)};
3929 		mem_cgroup_uncharge_swap(ent);
3930 	}
3931 }
3932 
3933 void mem_cgroup_commit_charge_swapin(struct page *page,
3934 				     struct mem_cgroup *memcg)
3935 {
3936 	__mem_cgroup_commit_charge_swapin(page, memcg,
3937 					  MEM_CGROUP_CHARGE_TYPE_ANON);
3938 }
3939 
3940 int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3941 				gfp_t gfp_mask)
3942 {
3943 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3944 	struct mem_cgroup *memcg;
3945 	int ret;
3946 
3947 	if (mem_cgroup_disabled())
3948 		return 0;
3949 	if (PageCompound(page))
3950 		return 0;
3951 
3952 	if (PageSwapCache(page)) { /* shmem */
3953 		ret = __mem_cgroup_try_charge_swapin(mm, page,
3954 						     gfp_mask, &memcg);
3955 		if (ret)
3956 			return ret;
3957 		__mem_cgroup_commit_charge_swapin(page, memcg, type);
3958 		return 0;
3959 	}
3960 
3961 	/*
3962 	 * Page cache insertions can happen without an actual mm
3963 	 * context, e.g. during disk probing on boot.
3964 	 */
3965 	if (unlikely(!mm))
3966 		memcg = root_mem_cgroup;
3967 	else {
3968 		memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3969 		if (!memcg)
3970 			return -ENOMEM;
3971 	}
3972 	__mem_cgroup_commit_charge(memcg, page, 1, type, false);
3973 	return 0;
3974 }
3975 
3976 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3977 				   unsigned int nr_pages,
3978 				   const enum charge_type ctype)
3979 {
3980 	struct memcg_batch_info *batch = NULL;
3981 	bool uncharge_memsw = true;
3982 
3983 	/* If swapout, usage of swap doesn't decrease */
3984 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3985 		uncharge_memsw = false;
3986 
3987 	batch = &current->memcg_batch;
3988 	/*
3989 	 * In usual, we do css_get() when we remember memcg pointer.
3990 	 * But in this case, we keep res->usage until end of a series of
3991 	 * uncharges. Then, it's ok to ignore memcg's refcnt.
3992 	 */
3993 	if (!batch->memcg)
3994 		batch->memcg = memcg;
3995 	/*
3996 	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
3997 	 * In those cases, all pages freed continuously can be expected to be in
3998 	 * the same cgroup and we have chance to coalesce uncharges.
3999 	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4000 	 * because we want to do uncharge as soon as possible.
4001 	 */
4002 
4003 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4004 		goto direct_uncharge;
4005 
4006 	if (nr_pages > 1)
4007 		goto direct_uncharge;
4008 
4009 	/*
4010 	 * In typical case, batch->memcg == mem. This means we can
4011 	 * merge a series of uncharges to an uncharge of res_counter.
4012 	 * If not, we uncharge res_counter ony by one.
4013 	 */
4014 	if (batch->memcg != memcg)
4015 		goto direct_uncharge;
4016 	/* remember freed charge and uncharge it later */
4017 	batch->nr_pages++;
4018 	if (uncharge_memsw)
4019 		batch->memsw_nr_pages++;
4020 	return;
4021 direct_uncharge:
4022 	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
4023 	if (uncharge_memsw)
4024 		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4025 	if (unlikely(batch->memcg != memcg))
4026 		memcg_oom_recover(memcg);
4027 }
4028 
4029 /*
4030  * uncharge if !page_mapped(page)
4031  */
4032 static struct mem_cgroup *
4033 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4034 			     bool end_migration)
4035 {
4036 	struct mem_cgroup *memcg = NULL;
4037 	unsigned int nr_pages = 1;
4038 	struct page_cgroup *pc;
4039 	bool anon;
4040 
4041 	if (mem_cgroup_disabled())
4042 		return NULL;
4043 
4044 	if (PageTransHuge(page)) {
4045 		nr_pages <<= compound_order(page);
4046 		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
4047 	}
4048 	/*
4049 	 * Check if our page_cgroup is valid
4050 	 */
4051 	pc = lookup_page_cgroup(page);
4052 	if (unlikely(!PageCgroupUsed(pc)))
4053 		return NULL;
4054 
4055 	lock_page_cgroup(pc);
4056 
4057 	memcg = pc->mem_cgroup;
4058 
4059 	if (!PageCgroupUsed(pc))
4060 		goto unlock_out;
4061 
4062 	anon = PageAnon(page);
4063 
4064 	switch (ctype) {
4065 	case MEM_CGROUP_CHARGE_TYPE_ANON:
4066 		/*
4067 		 * Generally PageAnon tells if it's the anon statistics to be
4068 		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4069 		 * used before page reached the stage of being marked PageAnon.
4070 		 */
4071 		anon = true;
4072 		/* fallthrough */
4073 	case MEM_CGROUP_CHARGE_TYPE_DROP:
4074 		/* See mem_cgroup_prepare_migration() */
4075 		if (page_mapped(page))
4076 			goto unlock_out;
4077 		/*
4078 		 * Pages under migration may not be uncharged.  But
4079 		 * end_migration() /must/ be the one uncharging the
4080 		 * unused post-migration page and so it has to call
4081 		 * here with the migration bit still set.  See the
4082 		 * res_counter handling below.
4083 		 */
4084 		if (!end_migration && PageCgroupMigration(pc))
4085 			goto unlock_out;
4086 		break;
4087 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4088 		if (!PageAnon(page)) {	/* Shared memory */
4089 			if (page->mapping && !page_is_file_cache(page))
4090 				goto unlock_out;
4091 		} else if (page_mapped(page)) /* Anon */
4092 				goto unlock_out;
4093 		break;
4094 	default:
4095 		break;
4096 	}
4097 
4098 	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
4099 
4100 	ClearPageCgroupUsed(pc);
4101 	/*
4102 	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4103 	 * freed from LRU. This is safe because uncharged page is expected not
4104 	 * to be reused (freed soon). Exception is SwapCache, it's handled by
4105 	 * special functions.
4106 	 */
4107 
4108 	unlock_page_cgroup(pc);
4109 	/*
4110 	 * even after unlock, we have memcg->res.usage here and this memcg
4111 	 * will never be freed, so it's safe to call css_get().
4112 	 */
4113 	memcg_check_events(memcg, page);
4114 	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4115 		mem_cgroup_swap_statistics(memcg, true);
4116 		css_get(&memcg->css);
4117 	}
4118 	/*
4119 	 * Migration does not charge the res_counter for the
4120 	 * replacement page, so leave it alone when phasing out the
4121 	 * page that is unused after the migration.
4122 	 */
4123 	if (!end_migration && !mem_cgroup_is_root(memcg))
4124 		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
4125 
4126 	return memcg;
4127 
4128 unlock_out:
4129 	unlock_page_cgroup(pc);
4130 	return NULL;
4131 }
4132 
4133 void mem_cgroup_uncharge_page(struct page *page)
4134 {
4135 	/* early check. */
4136 	if (page_mapped(page))
4137 		return;
4138 	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4139 	/*
4140 	 * If the page is in swap cache, uncharge should be deferred
4141 	 * to the swap path, which also properly accounts swap usage
4142 	 * and handles memcg lifetime.
4143 	 *
4144 	 * Note that this check is not stable and reclaim may add the
4145 	 * page to swap cache at any time after this.  However, if the
4146 	 * page is not in swap cache by the time page->mapcount hits
4147 	 * 0, there won't be any page table references to the swap
4148 	 * slot, and reclaim will free it and not actually write the
4149 	 * page to disk.
4150 	 */
4151 	if (PageSwapCache(page))
4152 		return;
4153 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
4154 }
4155 
4156 void mem_cgroup_uncharge_cache_page(struct page *page)
4157 {
4158 	VM_BUG_ON_PAGE(page_mapped(page), page);
4159 	VM_BUG_ON_PAGE(page->mapping, page);
4160 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4161 }
4162 
4163 /*
4164  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4165  * In that cases, pages are freed continuously and we can expect pages
4166  * are in the same memcg. All these calls itself limits the number of
4167  * pages freed at once, then uncharge_start/end() is called properly.
4168  * This may be called prural(2) times in a context,
4169  */
4170 
4171 void mem_cgroup_uncharge_start(void)
4172 {
4173 	current->memcg_batch.do_batch++;
4174 	/* We can do nest. */
4175 	if (current->memcg_batch.do_batch == 1) {
4176 		current->memcg_batch.memcg = NULL;
4177 		current->memcg_batch.nr_pages = 0;
4178 		current->memcg_batch.memsw_nr_pages = 0;
4179 	}
4180 }
4181 
4182 void mem_cgroup_uncharge_end(void)
4183 {
4184 	struct memcg_batch_info *batch = &current->memcg_batch;
4185 
4186 	if (!batch->do_batch)
4187 		return;
4188 
4189 	batch->do_batch--;
4190 	if (batch->do_batch) /* If stacked, do nothing. */
4191 		return;
4192 
4193 	if (!batch->memcg)
4194 		return;
4195 	/*
4196 	 * This "batch->memcg" is valid without any css_get/put etc...
4197 	 * bacause we hide charges behind us.
4198 	 */
4199 	if (batch->nr_pages)
4200 		res_counter_uncharge(&batch->memcg->res,
4201 				     batch->nr_pages * PAGE_SIZE);
4202 	if (batch->memsw_nr_pages)
4203 		res_counter_uncharge(&batch->memcg->memsw,
4204 				     batch->memsw_nr_pages * PAGE_SIZE);
4205 	memcg_oom_recover(batch->memcg);
4206 	/* forget this pointer (for sanity check) */
4207 	batch->memcg = NULL;
4208 }
4209 
4210 #ifdef CONFIG_SWAP
4211 /*
4212  * called after __delete_from_swap_cache() and drop "page" account.
4213  * memcg information is recorded to swap_cgroup of "ent"
4214  */
4215 void
4216 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4217 {
4218 	struct mem_cgroup *memcg;
4219 	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4220 
4221 	if (!swapout) /* this was a swap cache but the swap is unused ! */
4222 		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4223 
4224 	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4225 
4226 	/*
4227 	 * record memcg information,  if swapout && memcg != NULL,
4228 	 * css_get() was called in uncharge().
4229 	 */
4230 	if (do_swap_account && swapout && memcg)
4231 		swap_cgroup_record(ent, mem_cgroup_id(memcg));
4232 }
4233 #endif
4234 
4235 #ifdef CONFIG_MEMCG_SWAP
4236 /*
4237  * called from swap_entry_free(). remove record in swap_cgroup and
4238  * uncharge "memsw" account.
4239  */
4240 void mem_cgroup_uncharge_swap(swp_entry_t ent)
4241 {
4242 	struct mem_cgroup *memcg;
4243 	unsigned short id;
4244 
4245 	if (!do_swap_account)
4246 		return;
4247 
4248 	id = swap_cgroup_record(ent, 0);
4249 	rcu_read_lock();
4250 	memcg = mem_cgroup_lookup(id);
4251 	if (memcg) {
4252 		/*
4253 		 * We uncharge this because swap is freed.
4254 		 * This memcg can be obsolete one. We avoid calling css_tryget
4255 		 */
4256 		if (!mem_cgroup_is_root(memcg))
4257 			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4258 		mem_cgroup_swap_statistics(memcg, false);
4259 		css_put(&memcg->css);
4260 	}
4261 	rcu_read_unlock();
4262 }
4263 
4264 /**
4265  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4266  * @entry: swap entry to be moved
4267  * @from:  mem_cgroup which the entry is moved from
4268  * @to:  mem_cgroup which the entry is moved to
4269  *
4270  * It succeeds only when the swap_cgroup's record for this entry is the same
4271  * as the mem_cgroup's id of @from.
4272  *
4273  * Returns 0 on success, -EINVAL on failure.
4274  *
4275  * The caller must have charged to @to, IOW, called res_counter_charge() about
4276  * both res and memsw, and called css_get().
4277  */
4278 static int mem_cgroup_move_swap_account(swp_entry_t entry,
4279 				struct mem_cgroup *from, struct mem_cgroup *to)
4280 {
4281 	unsigned short old_id, new_id;
4282 
4283 	old_id = mem_cgroup_id(from);
4284 	new_id = mem_cgroup_id(to);
4285 
4286 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
4287 		mem_cgroup_swap_statistics(from, false);
4288 		mem_cgroup_swap_statistics(to, true);
4289 		/*
4290 		 * This function is only called from task migration context now.
4291 		 * It postpones res_counter and refcount handling till the end
4292 		 * of task migration(mem_cgroup_clear_mc()) for performance
4293 		 * improvement. But we cannot postpone css_get(to)  because if
4294 		 * the process that has been moved to @to does swap-in, the
4295 		 * refcount of @to might be decreased to 0.
4296 		 *
4297 		 * We are in attach() phase, so the cgroup is guaranteed to be
4298 		 * alive, so we can just call css_get().
4299 		 */
4300 		css_get(&to->css);
4301 		return 0;
4302 	}
4303 	return -EINVAL;
4304 }
4305 #else
4306 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4307 				struct mem_cgroup *from, struct mem_cgroup *to)
4308 {
4309 	return -EINVAL;
4310 }
4311 #endif
4312 
4313 /*
4314  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4315  * page belongs to.
4316  */
4317 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4318 				  struct mem_cgroup **memcgp)
4319 {
4320 	struct mem_cgroup *memcg = NULL;
4321 	unsigned int nr_pages = 1;
4322 	struct page_cgroup *pc;
4323 	enum charge_type ctype;
4324 
4325 	*memcgp = NULL;
4326 
4327 	if (mem_cgroup_disabled())
4328 		return;
4329 
4330 	if (PageTransHuge(page))
4331 		nr_pages <<= compound_order(page);
4332 
4333 	pc = lookup_page_cgroup(page);
4334 	lock_page_cgroup(pc);
4335 	if (PageCgroupUsed(pc)) {
4336 		memcg = pc->mem_cgroup;
4337 		css_get(&memcg->css);
4338 		/*
4339 		 * At migrating an anonymous page, its mapcount goes down
4340 		 * to 0 and uncharge() will be called. But, even if it's fully
4341 		 * unmapped, migration may fail and this page has to be
4342 		 * charged again. We set MIGRATION flag here and delay uncharge
4343 		 * until end_migration() is called
4344 		 *
4345 		 * Corner Case Thinking
4346 		 * A)
4347 		 * When the old page was mapped as Anon and it's unmap-and-freed
4348 		 * while migration was ongoing.
4349 		 * If unmap finds the old page, uncharge() of it will be delayed
4350 		 * until end_migration(). If unmap finds a new page, it's
4351 		 * uncharged when it make mapcount to be 1->0. If unmap code
4352 		 * finds swap_migration_entry, the new page will not be mapped
4353 		 * and end_migration() will find it(mapcount==0).
4354 		 *
4355 		 * B)
4356 		 * When the old page was mapped but migraion fails, the kernel
4357 		 * remaps it. A charge for it is kept by MIGRATION flag even
4358 		 * if mapcount goes down to 0. We can do remap successfully
4359 		 * without charging it again.
4360 		 *
4361 		 * C)
4362 		 * The "old" page is under lock_page() until the end of
4363 		 * migration, so, the old page itself will not be swapped-out.
4364 		 * If the new page is swapped out before end_migraton, our
4365 		 * hook to usual swap-out path will catch the event.
4366 		 */
4367 		if (PageAnon(page))
4368 			SetPageCgroupMigration(pc);
4369 	}
4370 	unlock_page_cgroup(pc);
4371 	/*
4372 	 * If the page is not charged at this point,
4373 	 * we return here.
4374 	 */
4375 	if (!memcg)
4376 		return;
4377 
4378 	*memcgp = memcg;
4379 	/*
4380 	 * We charge new page before it's used/mapped. So, even if unlock_page()
4381 	 * is called before end_migration, we can catch all events on this new
4382 	 * page. In the case new page is migrated but not remapped, new page's
4383 	 * mapcount will be finally 0 and we call uncharge in end_migration().
4384 	 */
4385 	if (PageAnon(page))
4386 		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4387 	else
4388 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4389 	/*
4390 	 * The page is committed to the memcg, but it's not actually
4391 	 * charged to the res_counter since we plan on replacing the
4392 	 * old one and only one page is going to be left afterwards.
4393 	 */
4394 	__mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4395 }
4396 
4397 /* remove redundant charge if migration failed*/
4398 void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4399 	struct page *oldpage, struct page *newpage, bool migration_ok)
4400 {
4401 	struct page *used, *unused;
4402 	struct page_cgroup *pc;
4403 	bool anon;
4404 
4405 	if (!memcg)
4406 		return;
4407 
4408 	if (!migration_ok) {
4409 		used = oldpage;
4410 		unused = newpage;
4411 	} else {
4412 		used = newpage;
4413 		unused = oldpage;
4414 	}
4415 	anon = PageAnon(used);
4416 	__mem_cgroup_uncharge_common(unused,
4417 				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4418 				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
4419 				     true);
4420 	css_put(&memcg->css);
4421 	/*
4422 	 * We disallowed uncharge of pages under migration because mapcount
4423 	 * of the page goes down to zero, temporarly.
4424 	 * Clear the flag and check the page should be charged.
4425 	 */
4426 	pc = lookup_page_cgroup(oldpage);
4427 	lock_page_cgroup(pc);
4428 	ClearPageCgroupMigration(pc);
4429 	unlock_page_cgroup(pc);
4430 
4431 	/*
4432 	 * If a page is a file cache, radix-tree replacement is very atomic
4433 	 * and we can skip this check. When it was an Anon page, its mapcount
4434 	 * goes down to 0. But because we added MIGRATION flage, it's not
4435 	 * uncharged yet. There are several case but page->mapcount check
4436 	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4437 	 * check. (see prepare_charge() also)
4438 	 */
4439 	if (anon)
4440 		mem_cgroup_uncharge_page(used);
4441 }
4442 
4443 /*
4444  * At replace page cache, newpage is not under any memcg but it's on
4445  * LRU. So, this function doesn't touch res_counter but handles LRU
4446  * in correct way. Both pages are locked so we cannot race with uncharge.
4447  */
4448 void mem_cgroup_replace_page_cache(struct page *oldpage,
4449 				  struct page *newpage)
4450 {
4451 	struct mem_cgroup *memcg = NULL;
4452 	struct page_cgroup *pc;
4453 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4454 
4455 	if (mem_cgroup_disabled())
4456 		return;
4457 
4458 	pc = lookup_page_cgroup(oldpage);
4459 	/* fix accounting on old pages */
4460 	lock_page_cgroup(pc);
4461 	if (PageCgroupUsed(pc)) {
4462 		memcg = pc->mem_cgroup;
4463 		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4464 		ClearPageCgroupUsed(pc);
4465 	}
4466 	unlock_page_cgroup(pc);
4467 
4468 	/*
4469 	 * When called from shmem_replace_page(), in some cases the
4470 	 * oldpage has already been charged, and in some cases not.
4471 	 */
4472 	if (!memcg)
4473 		return;
4474 	/*
4475 	 * Even if newpage->mapping was NULL before starting replacement,
4476 	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4477 	 * LRU while we overwrite pc->mem_cgroup.
4478 	 */
4479 	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4480 }
4481 
4482 #ifdef CONFIG_DEBUG_VM
4483 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4484 {
4485 	struct page_cgroup *pc;
4486 
4487 	pc = lookup_page_cgroup(page);
4488 	/*
4489 	 * Can be NULL while feeding pages into the page allocator for
4490 	 * the first time, i.e. during boot or memory hotplug;
4491 	 * or when mem_cgroup_disabled().
4492 	 */
4493 	if (likely(pc) && PageCgroupUsed(pc))
4494 		return pc;
4495 	return NULL;
4496 }
4497 
4498 bool mem_cgroup_bad_page_check(struct page *page)
4499 {
4500 	if (mem_cgroup_disabled())
4501 		return false;
4502 
4503 	return lookup_page_cgroup_used(page) != NULL;
4504 }
4505 
4506 void mem_cgroup_print_bad_page(struct page *page)
4507 {
4508 	struct page_cgroup *pc;
4509 
4510 	pc = lookup_page_cgroup_used(page);
4511 	if (pc) {
4512 		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4513 			 pc, pc->flags, pc->mem_cgroup);
4514 	}
4515 }
4516 #endif
4517 
4518 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4519 				unsigned long long val)
4520 {
4521 	int retry_count;
4522 	u64 memswlimit, memlimit;
4523 	int ret = 0;
4524 	int children = mem_cgroup_count_children(memcg);
4525 	u64 curusage, oldusage;
4526 	int enlarge;
4527 
4528 	/*
4529 	 * For keeping hierarchical_reclaim simple, how long we should retry
4530 	 * is depends on callers. We set our retry-count to be function
4531 	 * of # of children which we should visit in this loop.
4532 	 */
4533 	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4534 
4535 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4536 
4537 	enlarge = 0;
4538 	while (retry_count) {
4539 		if (signal_pending(current)) {
4540 			ret = -EINTR;
4541 			break;
4542 		}
4543 		/*
4544 		 * Rather than hide all in some function, I do this in
4545 		 * open coded manner. You see what this really does.
4546 		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4547 		 */
4548 		mutex_lock(&set_limit_mutex);
4549 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4550 		if (memswlimit < val) {
4551 			ret = -EINVAL;
4552 			mutex_unlock(&set_limit_mutex);
4553 			break;
4554 		}
4555 
4556 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4557 		if (memlimit < val)
4558 			enlarge = 1;
4559 
4560 		ret = res_counter_set_limit(&memcg->res, val);
4561 		if (!ret) {
4562 			if (memswlimit == val)
4563 				memcg->memsw_is_minimum = true;
4564 			else
4565 				memcg->memsw_is_minimum = false;
4566 		}
4567 		mutex_unlock(&set_limit_mutex);
4568 
4569 		if (!ret)
4570 			break;
4571 
4572 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4573 				   MEM_CGROUP_RECLAIM_SHRINK);
4574 		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4575 		/* Usage is reduced ? */
4576 		if (curusage >= oldusage)
4577 			retry_count--;
4578 		else
4579 			oldusage = curusage;
4580 	}
4581 	if (!ret && enlarge)
4582 		memcg_oom_recover(memcg);
4583 
4584 	return ret;
4585 }
4586 
4587 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4588 					unsigned long long val)
4589 {
4590 	int retry_count;
4591 	u64 memlimit, memswlimit, oldusage, curusage;
4592 	int children = mem_cgroup_count_children(memcg);
4593 	int ret = -EBUSY;
4594 	int enlarge = 0;
4595 
4596 	/* see mem_cgroup_resize_res_limit */
4597 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4598 	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4599 	while (retry_count) {
4600 		if (signal_pending(current)) {
4601 			ret = -EINTR;
4602 			break;
4603 		}
4604 		/*
4605 		 * Rather than hide all in some function, I do this in
4606 		 * open coded manner. You see what this really does.
4607 		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4608 		 */
4609 		mutex_lock(&set_limit_mutex);
4610 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4611 		if (memlimit > val) {
4612 			ret = -EINVAL;
4613 			mutex_unlock(&set_limit_mutex);
4614 			break;
4615 		}
4616 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4617 		if (memswlimit < val)
4618 			enlarge = 1;
4619 		ret = res_counter_set_limit(&memcg->memsw, val);
4620 		if (!ret) {
4621 			if (memlimit == val)
4622 				memcg->memsw_is_minimum = true;
4623 			else
4624 				memcg->memsw_is_minimum = false;
4625 		}
4626 		mutex_unlock(&set_limit_mutex);
4627 
4628 		if (!ret)
4629 			break;
4630 
4631 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4632 				   MEM_CGROUP_RECLAIM_NOSWAP |
4633 				   MEM_CGROUP_RECLAIM_SHRINK);
4634 		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4635 		/* Usage is reduced ? */
4636 		if (curusage >= oldusage)
4637 			retry_count--;
4638 		else
4639 			oldusage = curusage;
4640 	}
4641 	if (!ret && enlarge)
4642 		memcg_oom_recover(memcg);
4643 	return ret;
4644 }
4645 
4646 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4647 					    gfp_t gfp_mask,
4648 					    unsigned long *total_scanned)
4649 {
4650 	unsigned long nr_reclaimed = 0;
4651 	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4652 	unsigned long reclaimed;
4653 	int loop = 0;
4654 	struct mem_cgroup_tree_per_zone *mctz;
4655 	unsigned long long excess;
4656 	unsigned long nr_scanned;
4657 
4658 	if (order > 0)
4659 		return 0;
4660 
4661 	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4662 	/*
4663 	 * This loop can run a while, specially if mem_cgroup's continuously
4664 	 * keep exceeding their soft limit and putting the system under
4665 	 * pressure
4666 	 */
4667 	do {
4668 		if (next_mz)
4669 			mz = next_mz;
4670 		else
4671 			mz = mem_cgroup_largest_soft_limit_node(mctz);
4672 		if (!mz)
4673 			break;
4674 
4675 		nr_scanned = 0;
4676 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4677 						    gfp_mask, &nr_scanned);
4678 		nr_reclaimed += reclaimed;
4679 		*total_scanned += nr_scanned;
4680 		spin_lock(&mctz->lock);
4681 
4682 		/*
4683 		 * If we failed to reclaim anything from this memory cgroup
4684 		 * it is time to move on to the next cgroup
4685 		 */
4686 		next_mz = NULL;
4687 		if (!reclaimed) {
4688 			do {
4689 				/*
4690 				 * Loop until we find yet another one.
4691 				 *
4692 				 * By the time we get the soft_limit lock
4693 				 * again, someone might have aded the
4694 				 * group back on the RB tree. Iterate to
4695 				 * make sure we get a different mem.
4696 				 * mem_cgroup_largest_soft_limit_node returns
4697 				 * NULL if no other cgroup is present on
4698 				 * the tree
4699 				 */
4700 				next_mz =
4701 				__mem_cgroup_largest_soft_limit_node(mctz);
4702 				if (next_mz == mz)
4703 					css_put(&next_mz->memcg->css);
4704 				else /* next_mz == NULL or other memcg */
4705 					break;
4706 			} while (1);
4707 		}
4708 		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4709 		excess = res_counter_soft_limit_excess(&mz->memcg->res);
4710 		/*
4711 		 * One school of thought says that we should not add
4712 		 * back the node to the tree if reclaim returns 0.
4713 		 * But our reclaim could return 0, simply because due
4714 		 * to priority we are exposing a smaller subset of
4715 		 * memory to reclaim from. Consider this as a longer
4716 		 * term TODO.
4717 		 */
4718 		/* If excess == 0, no tree ops */
4719 		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4720 		spin_unlock(&mctz->lock);
4721 		css_put(&mz->memcg->css);
4722 		loop++;
4723 		/*
4724 		 * Could not reclaim anything and there are no more
4725 		 * mem cgroups to try or we seem to be looping without
4726 		 * reclaiming anything.
4727 		 */
4728 		if (!nr_reclaimed &&
4729 			(next_mz == NULL ||
4730 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4731 			break;
4732 	} while (!nr_reclaimed);
4733 	if (next_mz)
4734 		css_put(&next_mz->memcg->css);
4735 	return nr_reclaimed;
4736 }
4737 
4738 /**
4739  * mem_cgroup_force_empty_list - clears LRU of a group
4740  * @memcg: group to clear
4741  * @node: NUMA node
4742  * @zid: zone id
4743  * @lru: lru to to clear
4744  *
4745  * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
4746  * reclaim the pages page themselves - pages are moved to the parent (or root)
4747  * group.
4748  */
4749 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
4750 				int node, int zid, enum lru_list lru)
4751 {
4752 	struct lruvec *lruvec;
4753 	unsigned long flags;
4754 	struct list_head *list;
4755 	struct page *busy;
4756 	struct zone *zone;
4757 
4758 	zone = &NODE_DATA(node)->node_zones[zid];
4759 	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4760 	list = &lruvec->lists[lru];
4761 
4762 	busy = NULL;
4763 	do {
4764 		struct page_cgroup *pc;
4765 		struct page *page;
4766 
4767 		spin_lock_irqsave(&zone->lru_lock, flags);
4768 		if (list_empty(list)) {
4769 			spin_unlock_irqrestore(&zone->lru_lock, flags);
4770 			break;
4771 		}
4772 		page = list_entry(list->prev, struct page, lru);
4773 		if (busy == page) {
4774 			list_move(&page->lru, list);
4775 			busy = NULL;
4776 			spin_unlock_irqrestore(&zone->lru_lock, flags);
4777 			continue;
4778 		}
4779 		spin_unlock_irqrestore(&zone->lru_lock, flags);
4780 
4781 		pc = lookup_page_cgroup(page);
4782 
4783 		if (mem_cgroup_move_parent(page, pc, memcg)) {
4784 			/* found lock contention or "pc" is obsolete. */
4785 			busy = page;
4786 			cond_resched();
4787 		} else
4788 			busy = NULL;
4789 	} while (!list_empty(list));
4790 }
4791 
4792 /*
4793  * make mem_cgroup's charge to be 0 if there is no task by moving
4794  * all the charges and pages to the parent.
4795  * This enables deleting this mem_cgroup.
4796  *
4797  * Caller is responsible for holding css reference on the memcg.
4798  */
4799 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4800 {
4801 	int node, zid;
4802 	u64 usage;
4803 
4804 	do {
4805 		/* This is for making all *used* pages to be on LRU. */
4806 		lru_add_drain_all();
4807 		drain_all_stock_sync(memcg);
4808 		mem_cgroup_start_move(memcg);
4809 		for_each_node_state(node, N_MEMORY) {
4810 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4811 				enum lru_list lru;
4812 				for_each_lru(lru) {
4813 					mem_cgroup_force_empty_list(memcg,
4814 							node, zid, lru);
4815 				}
4816 			}
4817 		}
4818 		mem_cgroup_end_move(memcg);
4819 		memcg_oom_recover(memcg);
4820 		cond_resched();
4821 
4822 		/*
4823 		 * Kernel memory may not necessarily be trackable to a specific
4824 		 * process. So they are not migrated, and therefore we can't
4825 		 * expect their value to drop to 0 here.
4826 		 * Having res filled up with kmem only is enough.
4827 		 *
4828 		 * This is a safety check because mem_cgroup_force_empty_list
4829 		 * could have raced with mem_cgroup_replace_page_cache callers
4830 		 * so the lru seemed empty but the page could have been added
4831 		 * right after the check. RES_USAGE should be safe as we always
4832 		 * charge before adding to the LRU.
4833 		 */
4834 		usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4835 			res_counter_read_u64(&memcg->kmem, RES_USAGE);
4836 	} while (usage > 0);
4837 }
4838 
4839 static inline bool memcg_has_children(struct mem_cgroup *memcg)
4840 {
4841 	lockdep_assert_held(&memcg_create_mutex);
4842 	/*
4843 	 * The lock does not prevent addition or deletion to the list
4844 	 * of children, but it prevents a new child from being
4845 	 * initialized based on this parent in css_online(), so it's
4846 	 * enough to decide whether hierarchically inherited
4847 	 * attributes can still be changed or not.
4848 	 */
4849 	return memcg->use_hierarchy &&
4850 		!list_empty(&memcg->css.cgroup->children);
4851 }
4852 
4853 /*
4854  * Reclaims as many pages from the given memcg as possible and moves
4855  * the rest to the parent.
4856  *
4857  * Caller is responsible for holding css reference for memcg.
4858  */
4859 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4860 {
4861 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4862 	struct cgroup *cgrp = memcg->css.cgroup;
4863 
4864 	/* returns EBUSY if there is a task or if we come here twice. */
4865 	if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
4866 		return -EBUSY;
4867 
4868 	/* we call try-to-free pages for make this cgroup empty */
4869 	lru_add_drain_all();
4870 	/* try to free all pages in this cgroup */
4871 	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4872 		int progress;
4873 
4874 		if (signal_pending(current))
4875 			return -EINTR;
4876 
4877 		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4878 						false);
4879 		if (!progress) {
4880 			nr_retries--;
4881 			/* maybe some writeback is necessary */
4882 			congestion_wait(BLK_RW_ASYNC, HZ/10);
4883 		}
4884 
4885 	}
4886 	lru_add_drain();
4887 	mem_cgroup_reparent_charges(memcg);
4888 
4889 	return 0;
4890 }
4891 
4892 static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4893 					unsigned int event)
4894 {
4895 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4896 
4897 	if (mem_cgroup_is_root(memcg))
4898 		return -EINVAL;
4899 	return mem_cgroup_force_empty(memcg);
4900 }
4901 
4902 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4903 				     struct cftype *cft)
4904 {
4905 	return mem_cgroup_from_css(css)->use_hierarchy;
4906 }
4907 
4908 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4909 				      struct cftype *cft, u64 val)
4910 {
4911 	int retval = 0;
4912 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4913 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4914 
4915 	mutex_lock(&memcg_create_mutex);
4916 
4917 	if (memcg->use_hierarchy == val)
4918 		goto out;
4919 
4920 	/*
4921 	 * If parent's use_hierarchy is set, we can't make any modifications
4922 	 * in the child subtrees. If it is unset, then the change can
4923 	 * occur, provided the current cgroup has no children.
4924 	 *
4925 	 * For the root cgroup, parent_mem is NULL, we allow value to be
4926 	 * set if there are no children.
4927 	 */
4928 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4929 				(val == 1 || val == 0)) {
4930 		if (list_empty(&memcg->css.cgroup->children))
4931 			memcg->use_hierarchy = val;
4932 		else
4933 			retval = -EBUSY;
4934 	} else
4935 		retval = -EINVAL;
4936 
4937 out:
4938 	mutex_unlock(&memcg_create_mutex);
4939 
4940 	return retval;
4941 }
4942 
4943 
4944 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4945 					       enum mem_cgroup_stat_index idx)
4946 {
4947 	struct mem_cgroup *iter;
4948 	long val = 0;
4949 
4950 	/* Per-cpu values can be negative, use a signed accumulator */
4951 	for_each_mem_cgroup_tree(iter, memcg)
4952 		val += mem_cgroup_read_stat(iter, idx);
4953 
4954 	if (val < 0) /* race ? */
4955 		val = 0;
4956 	return val;
4957 }
4958 
4959 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4960 {
4961 	u64 val;
4962 
4963 	if (!mem_cgroup_is_root(memcg)) {
4964 		if (!swap)
4965 			return res_counter_read_u64(&memcg->res, RES_USAGE);
4966 		else
4967 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4968 	}
4969 
4970 	/*
4971 	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4972 	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4973 	 */
4974 	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4975 	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4976 
4977 	if (swap)
4978 		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4979 
4980 	return val << PAGE_SHIFT;
4981 }
4982 
4983 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4984 				   struct cftype *cft)
4985 {
4986 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4987 	u64 val;
4988 	int name;
4989 	enum res_type type;
4990 
4991 	type = MEMFILE_TYPE(cft->private);
4992 	name = MEMFILE_ATTR(cft->private);
4993 
4994 	switch (type) {
4995 	case _MEM:
4996 		if (name == RES_USAGE)
4997 			val = mem_cgroup_usage(memcg, false);
4998 		else
4999 			val = res_counter_read_u64(&memcg->res, name);
5000 		break;
5001 	case _MEMSWAP:
5002 		if (name == RES_USAGE)
5003 			val = mem_cgroup_usage(memcg, true);
5004 		else
5005 			val = res_counter_read_u64(&memcg->memsw, name);
5006 		break;
5007 	case _KMEM:
5008 		val = res_counter_read_u64(&memcg->kmem, name);
5009 		break;
5010 	default:
5011 		BUG();
5012 	}
5013 
5014 	return val;
5015 }
5016 
5017 #ifdef CONFIG_MEMCG_KMEM
5018 /* should be called with activate_kmem_mutex held */
5019 static int __memcg_activate_kmem(struct mem_cgroup *memcg,
5020 				 unsigned long long limit)
5021 {
5022 	int err = 0;
5023 	int memcg_id;
5024 
5025 	if (memcg_kmem_is_active(memcg))
5026 		return 0;
5027 
5028 	/*
5029 	 * We are going to allocate memory for data shared by all memory
5030 	 * cgroups so let's stop accounting here.
5031 	 */
5032 	memcg_stop_kmem_account();
5033 
5034 	/*
5035 	 * For simplicity, we won't allow this to be disabled.  It also can't
5036 	 * be changed if the cgroup has children already, or if tasks had
5037 	 * already joined.
5038 	 *
5039 	 * If tasks join before we set the limit, a person looking at
5040 	 * kmem.usage_in_bytes will have no way to determine when it took
5041 	 * place, which makes the value quite meaningless.
5042 	 *
5043 	 * After it first became limited, changes in the value of the limit are
5044 	 * of course permitted.
5045 	 */
5046 	mutex_lock(&memcg_create_mutex);
5047 	if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
5048 		err = -EBUSY;
5049 	mutex_unlock(&memcg_create_mutex);
5050 	if (err)
5051 		goto out;
5052 
5053 	memcg_id = ida_simple_get(&kmem_limited_groups,
5054 				  0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
5055 	if (memcg_id < 0) {
5056 		err = memcg_id;
5057 		goto out;
5058 	}
5059 
5060 	/*
5061 	 * Make sure we have enough space for this cgroup in each root cache's
5062 	 * memcg_params.
5063 	 */
5064 	err = memcg_update_all_caches(memcg_id + 1);
5065 	if (err)
5066 		goto out_rmid;
5067 
5068 	memcg->kmemcg_id = memcg_id;
5069 	INIT_LIST_HEAD(&memcg->memcg_slab_caches);
5070 	mutex_init(&memcg->slab_caches_mutex);
5071 
5072 	/*
5073 	 * We couldn't have accounted to this cgroup, because it hasn't got the
5074 	 * active bit set yet, so this should succeed.
5075 	 */
5076 	err = res_counter_set_limit(&memcg->kmem, limit);
5077 	VM_BUG_ON(err);
5078 
5079 	static_key_slow_inc(&memcg_kmem_enabled_key);
5080 	/*
5081 	 * Setting the active bit after enabling static branching will
5082 	 * guarantee no one starts accounting before all call sites are
5083 	 * patched.
5084 	 */
5085 	memcg_kmem_set_active(memcg);
5086 out:
5087 	memcg_resume_kmem_account();
5088 	return err;
5089 
5090 out_rmid:
5091 	ida_simple_remove(&kmem_limited_groups, memcg_id);
5092 	goto out;
5093 }
5094 
5095 static int memcg_activate_kmem(struct mem_cgroup *memcg,
5096 			       unsigned long long limit)
5097 {
5098 	int ret;
5099 
5100 	mutex_lock(&activate_kmem_mutex);
5101 	ret = __memcg_activate_kmem(memcg, limit);
5102 	mutex_unlock(&activate_kmem_mutex);
5103 	return ret;
5104 }
5105 
5106 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5107 				   unsigned long long val)
5108 {
5109 	int ret;
5110 
5111 	if (!memcg_kmem_is_active(memcg))
5112 		ret = memcg_activate_kmem(memcg, val);
5113 	else
5114 		ret = res_counter_set_limit(&memcg->kmem, val);
5115 	return ret;
5116 }
5117 
5118 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
5119 {
5120 	int ret = 0;
5121 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5122 
5123 	if (!parent)
5124 		return 0;
5125 
5126 	mutex_lock(&activate_kmem_mutex);
5127 	/*
5128 	 * If the parent cgroup is not kmem-active now, it cannot be activated
5129 	 * after this point, because it has at least one child already.
5130 	 */
5131 	if (memcg_kmem_is_active(parent))
5132 		ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5133 	mutex_unlock(&activate_kmem_mutex);
5134 	return ret;
5135 }
5136 #else
5137 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5138 				   unsigned long long val)
5139 {
5140 	return -EINVAL;
5141 }
5142 #endif /* CONFIG_MEMCG_KMEM */
5143 
5144 /*
5145  * The user of this function is...
5146  * RES_LIMIT.
5147  */
5148 static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
5149 			    char *buffer)
5150 {
5151 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5152 	enum res_type type;
5153 	int name;
5154 	unsigned long long val;
5155 	int ret;
5156 
5157 	type = MEMFILE_TYPE(cft->private);
5158 	name = MEMFILE_ATTR(cft->private);
5159 
5160 	switch (name) {
5161 	case RES_LIMIT:
5162 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5163 			ret = -EINVAL;
5164 			break;
5165 		}
5166 		/* This function does all necessary parse...reuse it */
5167 		ret = res_counter_memparse_write_strategy(buffer, &val);
5168 		if (ret)
5169 			break;
5170 		if (type == _MEM)
5171 			ret = mem_cgroup_resize_limit(memcg, val);
5172 		else if (type == _MEMSWAP)
5173 			ret = mem_cgroup_resize_memsw_limit(memcg, val);
5174 		else if (type == _KMEM)
5175 			ret = memcg_update_kmem_limit(memcg, val);
5176 		else
5177 			return -EINVAL;
5178 		break;
5179 	case RES_SOFT_LIMIT:
5180 		ret = res_counter_memparse_write_strategy(buffer, &val);
5181 		if (ret)
5182 			break;
5183 		/*
5184 		 * For memsw, soft limits are hard to implement in terms
5185 		 * of semantics, for now, we support soft limits for
5186 		 * control without swap
5187 		 */
5188 		if (type == _MEM)
5189 			ret = res_counter_set_soft_limit(&memcg->res, val);
5190 		else
5191 			ret = -EINVAL;
5192 		break;
5193 	default:
5194 		ret = -EINVAL; /* should be BUG() ? */
5195 		break;
5196 	}
5197 	return ret;
5198 }
5199 
5200 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5201 		unsigned long long *mem_limit, unsigned long long *memsw_limit)
5202 {
5203 	unsigned long long min_limit, min_memsw_limit, tmp;
5204 
5205 	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5206 	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5207 	if (!memcg->use_hierarchy)
5208 		goto out;
5209 
5210 	while (css_parent(&memcg->css)) {
5211 		memcg = mem_cgroup_from_css(css_parent(&memcg->css));
5212 		if (!memcg->use_hierarchy)
5213 			break;
5214 		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5215 		min_limit = min(min_limit, tmp);
5216 		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5217 		min_memsw_limit = min(min_memsw_limit, tmp);
5218 	}
5219 out:
5220 	*mem_limit = min_limit;
5221 	*memsw_limit = min_memsw_limit;
5222 }
5223 
5224 static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
5225 {
5226 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5227 	int name;
5228 	enum res_type type;
5229 
5230 	type = MEMFILE_TYPE(event);
5231 	name = MEMFILE_ATTR(event);
5232 
5233 	switch (name) {
5234 	case RES_MAX_USAGE:
5235 		if (type == _MEM)
5236 			res_counter_reset_max(&memcg->res);
5237 		else if (type == _MEMSWAP)
5238 			res_counter_reset_max(&memcg->memsw);
5239 		else if (type == _KMEM)
5240 			res_counter_reset_max(&memcg->kmem);
5241 		else
5242 			return -EINVAL;
5243 		break;
5244 	case RES_FAILCNT:
5245 		if (type == _MEM)
5246 			res_counter_reset_failcnt(&memcg->res);
5247 		else if (type == _MEMSWAP)
5248 			res_counter_reset_failcnt(&memcg->memsw);
5249 		else if (type == _KMEM)
5250 			res_counter_reset_failcnt(&memcg->kmem);
5251 		else
5252 			return -EINVAL;
5253 		break;
5254 	}
5255 
5256 	return 0;
5257 }
5258 
5259 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
5260 					struct cftype *cft)
5261 {
5262 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
5263 }
5264 
5265 #ifdef CONFIG_MMU
5266 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5267 					struct cftype *cft, u64 val)
5268 {
5269 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5270 
5271 	if (val >= (1 << NR_MOVE_TYPE))
5272 		return -EINVAL;
5273 
5274 	/*
5275 	 * No kind of locking is needed in here, because ->can_attach() will
5276 	 * check this value once in the beginning of the process, and then carry
5277 	 * on with stale data. This means that changes to this value will only
5278 	 * affect task migrations starting after the change.
5279 	 */
5280 	memcg->move_charge_at_immigrate = val;
5281 	return 0;
5282 }
5283 #else
5284 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5285 					struct cftype *cft, u64 val)
5286 {
5287 	return -ENOSYS;
5288 }
5289 #endif
5290 
5291 #ifdef CONFIG_NUMA
5292 static int memcg_numa_stat_show(struct seq_file *m, void *v)
5293 {
5294 	struct numa_stat {
5295 		const char *name;
5296 		unsigned int lru_mask;
5297 	};
5298 
5299 	static const struct numa_stat stats[] = {
5300 		{ "total", LRU_ALL },
5301 		{ "file", LRU_ALL_FILE },
5302 		{ "anon", LRU_ALL_ANON },
5303 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
5304 	};
5305 	const struct numa_stat *stat;
5306 	int nid;
5307 	unsigned long nr;
5308 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5309 
5310 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5311 		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5312 		seq_printf(m, "%s=%lu", stat->name, nr);
5313 		for_each_node_state(nid, N_MEMORY) {
5314 			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5315 							  stat->lru_mask);
5316 			seq_printf(m, " N%d=%lu", nid, nr);
5317 		}
5318 		seq_putc(m, '\n');
5319 	}
5320 
5321 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5322 		struct mem_cgroup *iter;
5323 
5324 		nr = 0;
5325 		for_each_mem_cgroup_tree(iter, memcg)
5326 			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5327 		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5328 		for_each_node_state(nid, N_MEMORY) {
5329 			nr = 0;
5330 			for_each_mem_cgroup_tree(iter, memcg)
5331 				nr += mem_cgroup_node_nr_lru_pages(
5332 					iter, nid, stat->lru_mask);
5333 			seq_printf(m, " N%d=%lu", nid, nr);
5334 		}
5335 		seq_putc(m, '\n');
5336 	}
5337 
5338 	return 0;
5339 }
5340 #endif /* CONFIG_NUMA */
5341 
5342 static inline void mem_cgroup_lru_names_not_uptodate(void)
5343 {
5344 	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5345 }
5346 
5347 static int memcg_stat_show(struct seq_file *m, void *v)
5348 {
5349 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5350 	struct mem_cgroup *mi;
5351 	unsigned int i;
5352 
5353 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5354 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5355 			continue;
5356 		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5357 			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5358 	}
5359 
5360 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5361 		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5362 			   mem_cgroup_read_events(memcg, i));
5363 
5364 	for (i = 0; i < NR_LRU_LISTS; i++)
5365 		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5366 			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5367 
5368 	/* Hierarchical information */
5369 	{
5370 		unsigned long long limit, memsw_limit;
5371 		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5372 		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5373 		if (do_swap_account)
5374 			seq_printf(m, "hierarchical_memsw_limit %llu\n",
5375 				   memsw_limit);
5376 	}
5377 
5378 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5379 		long long val = 0;
5380 
5381 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5382 			continue;
5383 		for_each_mem_cgroup_tree(mi, memcg)
5384 			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5385 		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5386 	}
5387 
5388 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5389 		unsigned long long val = 0;
5390 
5391 		for_each_mem_cgroup_tree(mi, memcg)
5392 			val += mem_cgroup_read_events(mi, i);
5393 		seq_printf(m, "total_%s %llu\n",
5394 			   mem_cgroup_events_names[i], val);
5395 	}
5396 
5397 	for (i = 0; i < NR_LRU_LISTS; i++) {
5398 		unsigned long long val = 0;
5399 
5400 		for_each_mem_cgroup_tree(mi, memcg)
5401 			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5402 		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5403 	}
5404 
5405 #ifdef CONFIG_DEBUG_VM
5406 	{
5407 		int nid, zid;
5408 		struct mem_cgroup_per_zone *mz;
5409 		struct zone_reclaim_stat *rstat;
5410 		unsigned long recent_rotated[2] = {0, 0};
5411 		unsigned long recent_scanned[2] = {0, 0};
5412 
5413 		for_each_online_node(nid)
5414 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5415 				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5416 				rstat = &mz->lruvec.reclaim_stat;
5417 
5418 				recent_rotated[0] += rstat->recent_rotated[0];
5419 				recent_rotated[1] += rstat->recent_rotated[1];
5420 				recent_scanned[0] += rstat->recent_scanned[0];
5421 				recent_scanned[1] += rstat->recent_scanned[1];
5422 			}
5423 		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5424 		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5425 		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5426 		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
5427 	}
5428 #endif
5429 
5430 	return 0;
5431 }
5432 
5433 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5434 				      struct cftype *cft)
5435 {
5436 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5437 
5438 	return mem_cgroup_swappiness(memcg);
5439 }
5440 
5441 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5442 				       struct cftype *cft, u64 val)
5443 {
5444 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5445 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5446 
5447 	if (val > 100 || !parent)
5448 		return -EINVAL;
5449 
5450 	mutex_lock(&memcg_create_mutex);
5451 
5452 	/* If under hierarchy, only empty-root can set this value */
5453 	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5454 		mutex_unlock(&memcg_create_mutex);
5455 		return -EINVAL;
5456 	}
5457 
5458 	memcg->swappiness = val;
5459 
5460 	mutex_unlock(&memcg_create_mutex);
5461 
5462 	return 0;
5463 }
5464 
5465 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5466 {
5467 	struct mem_cgroup_threshold_ary *t;
5468 	u64 usage;
5469 	int i;
5470 
5471 	rcu_read_lock();
5472 	if (!swap)
5473 		t = rcu_dereference(memcg->thresholds.primary);
5474 	else
5475 		t = rcu_dereference(memcg->memsw_thresholds.primary);
5476 
5477 	if (!t)
5478 		goto unlock;
5479 
5480 	usage = mem_cgroup_usage(memcg, swap);
5481 
5482 	/*
5483 	 * current_threshold points to threshold just below or equal to usage.
5484 	 * If it's not true, a threshold was crossed after last
5485 	 * call of __mem_cgroup_threshold().
5486 	 */
5487 	i = t->current_threshold;
5488 
5489 	/*
5490 	 * Iterate backward over array of thresholds starting from
5491 	 * current_threshold and check if a threshold is crossed.
5492 	 * If none of thresholds below usage is crossed, we read
5493 	 * only one element of the array here.
5494 	 */
5495 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5496 		eventfd_signal(t->entries[i].eventfd, 1);
5497 
5498 	/* i = current_threshold + 1 */
5499 	i++;
5500 
5501 	/*
5502 	 * Iterate forward over array of thresholds starting from
5503 	 * current_threshold+1 and check if a threshold is crossed.
5504 	 * If none of thresholds above usage is crossed, we read
5505 	 * only one element of the array here.
5506 	 */
5507 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5508 		eventfd_signal(t->entries[i].eventfd, 1);
5509 
5510 	/* Update current_threshold */
5511 	t->current_threshold = i - 1;
5512 unlock:
5513 	rcu_read_unlock();
5514 }
5515 
5516 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5517 {
5518 	while (memcg) {
5519 		__mem_cgroup_threshold(memcg, false);
5520 		if (do_swap_account)
5521 			__mem_cgroup_threshold(memcg, true);
5522 
5523 		memcg = parent_mem_cgroup(memcg);
5524 	}
5525 }
5526 
5527 static int compare_thresholds(const void *a, const void *b)
5528 {
5529 	const struct mem_cgroup_threshold *_a = a;
5530 	const struct mem_cgroup_threshold *_b = b;
5531 
5532 	if (_a->threshold > _b->threshold)
5533 		return 1;
5534 
5535 	if (_a->threshold < _b->threshold)
5536 		return -1;
5537 
5538 	return 0;
5539 }
5540 
5541 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5542 {
5543 	struct mem_cgroup_eventfd_list *ev;
5544 
5545 	list_for_each_entry(ev, &memcg->oom_notify, list)
5546 		eventfd_signal(ev->eventfd, 1);
5547 	return 0;
5548 }
5549 
5550 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
5551 {
5552 	struct mem_cgroup *iter;
5553 
5554 	for_each_mem_cgroup_tree(iter, memcg)
5555 		mem_cgroup_oom_notify_cb(iter);
5556 }
5557 
5558 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5559 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
5560 {
5561 	struct mem_cgroup_thresholds *thresholds;
5562 	struct mem_cgroup_threshold_ary *new;
5563 	u64 threshold, usage;
5564 	int i, size, ret;
5565 
5566 	ret = res_counter_memparse_write_strategy(args, &threshold);
5567 	if (ret)
5568 		return ret;
5569 
5570 	mutex_lock(&memcg->thresholds_lock);
5571 
5572 	if (type == _MEM)
5573 		thresholds = &memcg->thresholds;
5574 	else if (type == _MEMSWAP)
5575 		thresholds = &memcg->memsw_thresholds;
5576 	else
5577 		BUG();
5578 
5579 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5580 
5581 	/* Check if a threshold crossed before adding a new one */
5582 	if (thresholds->primary)
5583 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5584 
5585 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5586 
5587 	/* Allocate memory for new array of thresholds */
5588 	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5589 			GFP_KERNEL);
5590 	if (!new) {
5591 		ret = -ENOMEM;
5592 		goto unlock;
5593 	}
5594 	new->size = size;
5595 
5596 	/* Copy thresholds (if any) to new array */
5597 	if (thresholds->primary) {
5598 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5599 				sizeof(struct mem_cgroup_threshold));
5600 	}
5601 
5602 	/* Add new threshold */
5603 	new->entries[size - 1].eventfd = eventfd;
5604 	new->entries[size - 1].threshold = threshold;
5605 
5606 	/* Sort thresholds. Registering of new threshold isn't time-critical */
5607 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5608 			compare_thresholds, NULL);
5609 
5610 	/* Find current threshold */
5611 	new->current_threshold = -1;
5612 	for (i = 0; i < size; i++) {
5613 		if (new->entries[i].threshold <= usage) {
5614 			/*
5615 			 * new->current_threshold will not be used until
5616 			 * rcu_assign_pointer(), so it's safe to increment
5617 			 * it here.
5618 			 */
5619 			++new->current_threshold;
5620 		} else
5621 			break;
5622 	}
5623 
5624 	/* Free old spare buffer and save old primary buffer as spare */
5625 	kfree(thresholds->spare);
5626 	thresholds->spare = thresholds->primary;
5627 
5628 	rcu_assign_pointer(thresholds->primary, new);
5629 
5630 	/* To be sure that nobody uses thresholds */
5631 	synchronize_rcu();
5632 
5633 unlock:
5634 	mutex_unlock(&memcg->thresholds_lock);
5635 
5636 	return ret;
5637 }
5638 
5639 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5640 	struct eventfd_ctx *eventfd, const char *args)
5641 {
5642 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
5643 }
5644 
5645 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
5646 	struct eventfd_ctx *eventfd, const char *args)
5647 {
5648 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
5649 }
5650 
5651 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5652 	struct eventfd_ctx *eventfd, enum res_type type)
5653 {
5654 	struct mem_cgroup_thresholds *thresholds;
5655 	struct mem_cgroup_threshold_ary *new;
5656 	u64 usage;
5657 	int i, j, size;
5658 
5659 	mutex_lock(&memcg->thresholds_lock);
5660 	if (type == _MEM)
5661 		thresholds = &memcg->thresholds;
5662 	else if (type == _MEMSWAP)
5663 		thresholds = &memcg->memsw_thresholds;
5664 	else
5665 		BUG();
5666 
5667 	if (!thresholds->primary)
5668 		goto unlock;
5669 
5670 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5671 
5672 	/* Check if a threshold crossed before removing */
5673 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5674 
5675 	/* Calculate new number of threshold */
5676 	size = 0;
5677 	for (i = 0; i < thresholds->primary->size; i++) {
5678 		if (thresholds->primary->entries[i].eventfd != eventfd)
5679 			size++;
5680 	}
5681 
5682 	new = thresholds->spare;
5683 
5684 	/* Set thresholds array to NULL if we don't have thresholds */
5685 	if (!size) {
5686 		kfree(new);
5687 		new = NULL;
5688 		goto swap_buffers;
5689 	}
5690 
5691 	new->size = size;
5692 
5693 	/* Copy thresholds and find current threshold */
5694 	new->current_threshold = -1;
5695 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5696 		if (thresholds->primary->entries[i].eventfd == eventfd)
5697 			continue;
5698 
5699 		new->entries[j] = thresholds->primary->entries[i];
5700 		if (new->entries[j].threshold <= usage) {
5701 			/*
5702 			 * new->current_threshold will not be used
5703 			 * until rcu_assign_pointer(), so it's safe to increment
5704 			 * it here.
5705 			 */
5706 			++new->current_threshold;
5707 		}
5708 		j++;
5709 	}
5710 
5711 swap_buffers:
5712 	/* Swap primary and spare array */
5713 	thresholds->spare = thresholds->primary;
5714 	/* If all events are unregistered, free the spare array */
5715 	if (!new) {
5716 		kfree(thresholds->spare);
5717 		thresholds->spare = NULL;
5718 	}
5719 
5720 	rcu_assign_pointer(thresholds->primary, new);
5721 
5722 	/* To be sure that nobody uses thresholds */
5723 	synchronize_rcu();
5724 unlock:
5725 	mutex_unlock(&memcg->thresholds_lock);
5726 }
5727 
5728 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5729 	struct eventfd_ctx *eventfd)
5730 {
5731 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
5732 }
5733 
5734 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5735 	struct eventfd_ctx *eventfd)
5736 {
5737 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
5738 }
5739 
5740 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
5741 	struct eventfd_ctx *eventfd, const char *args)
5742 {
5743 	struct mem_cgroup_eventfd_list *event;
5744 
5745 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
5746 	if (!event)
5747 		return -ENOMEM;
5748 
5749 	spin_lock(&memcg_oom_lock);
5750 
5751 	event->eventfd = eventfd;
5752 	list_add(&event->list, &memcg->oom_notify);
5753 
5754 	/* already in OOM ? */
5755 	if (atomic_read(&memcg->under_oom))
5756 		eventfd_signal(eventfd, 1);
5757 	spin_unlock(&memcg_oom_lock);
5758 
5759 	return 0;
5760 }
5761 
5762 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
5763 	struct eventfd_ctx *eventfd)
5764 {
5765 	struct mem_cgroup_eventfd_list *ev, *tmp;
5766 
5767 	spin_lock(&memcg_oom_lock);
5768 
5769 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
5770 		if (ev->eventfd == eventfd) {
5771 			list_del(&ev->list);
5772 			kfree(ev);
5773 		}
5774 	}
5775 
5776 	spin_unlock(&memcg_oom_lock);
5777 }
5778 
5779 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
5780 {
5781 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5782 
5783 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5784 	seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
5785 	return 0;
5786 }
5787 
5788 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5789 	struct cftype *cft, u64 val)
5790 {
5791 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5792 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5793 
5794 	/* cannot set to root cgroup and only 0 and 1 are allowed */
5795 	if (!parent || !((val == 0) || (val == 1)))
5796 		return -EINVAL;
5797 
5798 	mutex_lock(&memcg_create_mutex);
5799 	/* oom-kill-disable is a flag for subhierarchy. */
5800 	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5801 		mutex_unlock(&memcg_create_mutex);
5802 		return -EINVAL;
5803 	}
5804 	memcg->oom_kill_disable = val;
5805 	if (!val)
5806 		memcg_oom_recover(memcg);
5807 	mutex_unlock(&memcg_create_mutex);
5808 	return 0;
5809 }
5810 
5811 #ifdef CONFIG_MEMCG_KMEM
5812 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5813 {
5814 	int ret;
5815 
5816 	memcg->kmemcg_id = -1;
5817 	ret = memcg_propagate_kmem(memcg);
5818 	if (ret)
5819 		return ret;
5820 
5821 	return mem_cgroup_sockets_init(memcg, ss);
5822 }
5823 
5824 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5825 {
5826 	mem_cgroup_sockets_destroy(memcg);
5827 }
5828 
5829 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5830 {
5831 	if (!memcg_kmem_is_active(memcg))
5832 		return;
5833 
5834 	/*
5835 	 * kmem charges can outlive the cgroup. In the case of slab
5836 	 * pages, for instance, a page contain objects from various
5837 	 * processes. As we prevent from taking a reference for every
5838 	 * such allocation we have to be careful when doing uncharge
5839 	 * (see memcg_uncharge_kmem) and here during offlining.
5840 	 *
5841 	 * The idea is that that only the _last_ uncharge which sees
5842 	 * the dead memcg will drop the last reference. An additional
5843 	 * reference is taken here before the group is marked dead
5844 	 * which is then paired with css_put during uncharge resp. here.
5845 	 *
5846 	 * Although this might sound strange as this path is called from
5847 	 * css_offline() when the referencemight have dropped down to 0
5848 	 * and shouldn't be incremented anymore (css_tryget would fail)
5849 	 * we do not have other options because of the kmem allocations
5850 	 * lifetime.
5851 	 */
5852 	css_get(&memcg->css);
5853 
5854 	memcg_kmem_mark_dead(memcg);
5855 
5856 	if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5857 		return;
5858 
5859 	if (memcg_kmem_test_and_clear_dead(memcg))
5860 		css_put(&memcg->css);
5861 }
5862 #else
5863 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5864 {
5865 	return 0;
5866 }
5867 
5868 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5869 {
5870 }
5871 
5872 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5873 {
5874 }
5875 #endif
5876 
5877 /*
5878  * DO NOT USE IN NEW FILES.
5879  *
5880  * "cgroup.event_control" implementation.
5881  *
5882  * This is way over-engineered.  It tries to support fully configurable
5883  * events for each user.  Such level of flexibility is completely
5884  * unnecessary especially in the light of the planned unified hierarchy.
5885  *
5886  * Please deprecate this and replace with something simpler if at all
5887  * possible.
5888  */
5889 
5890 /*
5891  * Unregister event and free resources.
5892  *
5893  * Gets called from workqueue.
5894  */
5895 static void memcg_event_remove(struct work_struct *work)
5896 {
5897 	struct mem_cgroup_event *event =
5898 		container_of(work, struct mem_cgroup_event, remove);
5899 	struct mem_cgroup *memcg = event->memcg;
5900 
5901 	remove_wait_queue(event->wqh, &event->wait);
5902 
5903 	event->unregister_event(memcg, event->eventfd);
5904 
5905 	/* Notify userspace the event is going away. */
5906 	eventfd_signal(event->eventfd, 1);
5907 
5908 	eventfd_ctx_put(event->eventfd);
5909 	kfree(event);
5910 	css_put(&memcg->css);
5911 }
5912 
5913 /*
5914  * Gets called on POLLHUP on eventfd when user closes it.
5915  *
5916  * Called with wqh->lock held and interrupts disabled.
5917  */
5918 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5919 			    int sync, void *key)
5920 {
5921 	struct mem_cgroup_event *event =
5922 		container_of(wait, struct mem_cgroup_event, wait);
5923 	struct mem_cgroup *memcg = event->memcg;
5924 	unsigned long flags = (unsigned long)key;
5925 
5926 	if (flags & POLLHUP) {
5927 		/*
5928 		 * If the event has been detached at cgroup removal, we
5929 		 * can simply return knowing the other side will cleanup
5930 		 * for us.
5931 		 *
5932 		 * We can't race against event freeing since the other
5933 		 * side will require wqh->lock via remove_wait_queue(),
5934 		 * which we hold.
5935 		 */
5936 		spin_lock(&memcg->event_list_lock);
5937 		if (!list_empty(&event->list)) {
5938 			list_del_init(&event->list);
5939 			/*
5940 			 * We are in atomic context, but cgroup_event_remove()
5941 			 * may sleep, so we have to call it in workqueue.
5942 			 */
5943 			schedule_work(&event->remove);
5944 		}
5945 		spin_unlock(&memcg->event_list_lock);
5946 	}
5947 
5948 	return 0;
5949 }
5950 
5951 static void memcg_event_ptable_queue_proc(struct file *file,
5952 		wait_queue_head_t *wqh, poll_table *pt)
5953 {
5954 	struct mem_cgroup_event *event =
5955 		container_of(pt, struct mem_cgroup_event, pt);
5956 
5957 	event->wqh = wqh;
5958 	add_wait_queue(wqh, &event->wait);
5959 }
5960 
5961 /*
5962  * DO NOT USE IN NEW FILES.
5963  *
5964  * Parse input and register new cgroup event handler.
5965  *
5966  * Input must be in format '<event_fd> <control_fd> <args>'.
5967  * Interpretation of args is defined by control file implementation.
5968  */
5969 static int memcg_write_event_control(struct cgroup_subsys_state *css,
5970 				     struct cftype *cft, char *buffer)
5971 {
5972 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5973 	struct mem_cgroup_event *event;
5974 	struct cgroup_subsys_state *cfile_css;
5975 	unsigned int efd, cfd;
5976 	struct fd efile;
5977 	struct fd cfile;
5978 	const char *name;
5979 	char *endp;
5980 	int ret;
5981 
5982 	efd = simple_strtoul(buffer, &endp, 10);
5983 	if (*endp != ' ')
5984 		return -EINVAL;
5985 	buffer = endp + 1;
5986 
5987 	cfd = simple_strtoul(buffer, &endp, 10);
5988 	if ((*endp != ' ') && (*endp != '\0'))
5989 		return -EINVAL;
5990 	buffer = endp + 1;
5991 
5992 	event = kzalloc(sizeof(*event), GFP_KERNEL);
5993 	if (!event)
5994 		return -ENOMEM;
5995 
5996 	event->memcg = memcg;
5997 	INIT_LIST_HEAD(&event->list);
5998 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5999 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
6000 	INIT_WORK(&event->remove, memcg_event_remove);
6001 
6002 	efile = fdget(efd);
6003 	if (!efile.file) {
6004 		ret = -EBADF;
6005 		goto out_kfree;
6006 	}
6007 
6008 	event->eventfd = eventfd_ctx_fileget(efile.file);
6009 	if (IS_ERR(event->eventfd)) {
6010 		ret = PTR_ERR(event->eventfd);
6011 		goto out_put_efile;
6012 	}
6013 
6014 	cfile = fdget(cfd);
6015 	if (!cfile.file) {
6016 		ret = -EBADF;
6017 		goto out_put_eventfd;
6018 	}
6019 
6020 	/* the process need read permission on control file */
6021 	/* AV: shouldn't we check that it's been opened for read instead? */
6022 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
6023 	if (ret < 0)
6024 		goto out_put_cfile;
6025 
6026 	/*
6027 	 * Determine the event callbacks and set them in @event.  This used
6028 	 * to be done via struct cftype but cgroup core no longer knows
6029 	 * about these events.  The following is crude but the whole thing
6030 	 * is for compatibility anyway.
6031 	 *
6032 	 * DO NOT ADD NEW FILES.
6033 	 */
6034 	name = cfile.file->f_dentry->d_name.name;
6035 
6036 	if (!strcmp(name, "memory.usage_in_bytes")) {
6037 		event->register_event = mem_cgroup_usage_register_event;
6038 		event->unregister_event = mem_cgroup_usage_unregister_event;
6039 	} else if (!strcmp(name, "memory.oom_control")) {
6040 		event->register_event = mem_cgroup_oom_register_event;
6041 		event->unregister_event = mem_cgroup_oom_unregister_event;
6042 	} else if (!strcmp(name, "memory.pressure_level")) {
6043 		event->register_event = vmpressure_register_event;
6044 		event->unregister_event = vmpressure_unregister_event;
6045 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
6046 		event->register_event = memsw_cgroup_usage_register_event;
6047 		event->unregister_event = memsw_cgroup_usage_unregister_event;
6048 	} else {
6049 		ret = -EINVAL;
6050 		goto out_put_cfile;
6051 	}
6052 
6053 	/*
6054 	 * Verify @cfile should belong to @css.  Also, remaining events are
6055 	 * automatically removed on cgroup destruction but the removal is
6056 	 * asynchronous, so take an extra ref on @css.
6057 	 */
6058 	cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
6059 					&memory_cgrp_subsys);
6060 	ret = -EINVAL;
6061 	if (IS_ERR(cfile_css))
6062 		goto out_put_cfile;
6063 	if (cfile_css != css) {
6064 		css_put(cfile_css);
6065 		goto out_put_cfile;
6066 	}
6067 
6068 	ret = event->register_event(memcg, event->eventfd, buffer);
6069 	if (ret)
6070 		goto out_put_css;
6071 
6072 	efile.file->f_op->poll(efile.file, &event->pt);
6073 
6074 	spin_lock(&memcg->event_list_lock);
6075 	list_add(&event->list, &memcg->event_list);
6076 	spin_unlock(&memcg->event_list_lock);
6077 
6078 	fdput(cfile);
6079 	fdput(efile);
6080 
6081 	return 0;
6082 
6083 out_put_css:
6084 	css_put(css);
6085 out_put_cfile:
6086 	fdput(cfile);
6087 out_put_eventfd:
6088 	eventfd_ctx_put(event->eventfd);
6089 out_put_efile:
6090 	fdput(efile);
6091 out_kfree:
6092 	kfree(event);
6093 
6094 	return ret;
6095 }
6096 
6097 static struct cftype mem_cgroup_files[] = {
6098 	{
6099 		.name = "usage_in_bytes",
6100 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
6101 		.read_u64 = mem_cgroup_read_u64,
6102 	},
6103 	{
6104 		.name = "max_usage_in_bytes",
6105 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6106 		.trigger = mem_cgroup_reset,
6107 		.read_u64 = mem_cgroup_read_u64,
6108 	},
6109 	{
6110 		.name = "limit_in_bytes",
6111 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
6112 		.write_string = mem_cgroup_write,
6113 		.read_u64 = mem_cgroup_read_u64,
6114 	},
6115 	{
6116 		.name = "soft_limit_in_bytes",
6117 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
6118 		.write_string = mem_cgroup_write,
6119 		.read_u64 = mem_cgroup_read_u64,
6120 	},
6121 	{
6122 		.name = "failcnt",
6123 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6124 		.trigger = mem_cgroup_reset,
6125 		.read_u64 = mem_cgroup_read_u64,
6126 	},
6127 	{
6128 		.name = "stat",
6129 		.seq_show = memcg_stat_show,
6130 	},
6131 	{
6132 		.name = "force_empty",
6133 		.trigger = mem_cgroup_force_empty_write,
6134 	},
6135 	{
6136 		.name = "use_hierarchy",
6137 		.flags = CFTYPE_INSANE,
6138 		.write_u64 = mem_cgroup_hierarchy_write,
6139 		.read_u64 = mem_cgroup_hierarchy_read,
6140 	},
6141 	{
6142 		.name = "cgroup.event_control",		/* XXX: for compat */
6143 		.write_string = memcg_write_event_control,
6144 		.flags = CFTYPE_NO_PREFIX,
6145 		.mode = S_IWUGO,
6146 	},
6147 	{
6148 		.name = "swappiness",
6149 		.read_u64 = mem_cgroup_swappiness_read,
6150 		.write_u64 = mem_cgroup_swappiness_write,
6151 	},
6152 	{
6153 		.name = "move_charge_at_immigrate",
6154 		.read_u64 = mem_cgroup_move_charge_read,
6155 		.write_u64 = mem_cgroup_move_charge_write,
6156 	},
6157 	{
6158 		.name = "oom_control",
6159 		.seq_show = mem_cgroup_oom_control_read,
6160 		.write_u64 = mem_cgroup_oom_control_write,
6161 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6162 	},
6163 	{
6164 		.name = "pressure_level",
6165 	},
6166 #ifdef CONFIG_NUMA
6167 	{
6168 		.name = "numa_stat",
6169 		.seq_show = memcg_numa_stat_show,
6170 	},
6171 #endif
6172 #ifdef CONFIG_MEMCG_KMEM
6173 	{
6174 		.name = "kmem.limit_in_bytes",
6175 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6176 		.write_string = mem_cgroup_write,
6177 		.read_u64 = mem_cgroup_read_u64,
6178 	},
6179 	{
6180 		.name = "kmem.usage_in_bytes",
6181 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6182 		.read_u64 = mem_cgroup_read_u64,
6183 	},
6184 	{
6185 		.name = "kmem.failcnt",
6186 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6187 		.trigger = mem_cgroup_reset,
6188 		.read_u64 = mem_cgroup_read_u64,
6189 	},
6190 	{
6191 		.name = "kmem.max_usage_in_bytes",
6192 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6193 		.trigger = mem_cgroup_reset,
6194 		.read_u64 = mem_cgroup_read_u64,
6195 	},
6196 #ifdef CONFIG_SLABINFO
6197 	{
6198 		.name = "kmem.slabinfo",
6199 		.seq_show = mem_cgroup_slabinfo_read,
6200 	},
6201 #endif
6202 #endif
6203 	{ },	/* terminate */
6204 };
6205 
6206 #ifdef CONFIG_MEMCG_SWAP
6207 static struct cftype memsw_cgroup_files[] = {
6208 	{
6209 		.name = "memsw.usage_in_bytes",
6210 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6211 		.read_u64 = mem_cgroup_read_u64,
6212 	},
6213 	{
6214 		.name = "memsw.max_usage_in_bytes",
6215 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6216 		.trigger = mem_cgroup_reset,
6217 		.read_u64 = mem_cgroup_read_u64,
6218 	},
6219 	{
6220 		.name = "memsw.limit_in_bytes",
6221 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6222 		.write_string = mem_cgroup_write,
6223 		.read_u64 = mem_cgroup_read_u64,
6224 	},
6225 	{
6226 		.name = "memsw.failcnt",
6227 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6228 		.trigger = mem_cgroup_reset,
6229 		.read_u64 = mem_cgroup_read_u64,
6230 	},
6231 	{ },	/* terminate */
6232 };
6233 #endif
6234 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6235 {
6236 	struct mem_cgroup_per_node *pn;
6237 	struct mem_cgroup_per_zone *mz;
6238 	int zone, tmp = node;
6239 	/*
6240 	 * This routine is called against possible nodes.
6241 	 * But it's BUG to call kmalloc() against offline node.
6242 	 *
6243 	 * TODO: this routine can waste much memory for nodes which will
6244 	 *       never be onlined. It's better to use memory hotplug callback
6245 	 *       function.
6246 	 */
6247 	if (!node_state(node, N_NORMAL_MEMORY))
6248 		tmp = -1;
6249 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6250 	if (!pn)
6251 		return 1;
6252 
6253 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6254 		mz = &pn->zoneinfo[zone];
6255 		lruvec_init(&mz->lruvec);
6256 		mz->usage_in_excess = 0;
6257 		mz->on_tree = false;
6258 		mz->memcg = memcg;
6259 	}
6260 	memcg->nodeinfo[node] = pn;
6261 	return 0;
6262 }
6263 
6264 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6265 {
6266 	kfree(memcg->nodeinfo[node]);
6267 }
6268 
6269 static struct mem_cgroup *mem_cgroup_alloc(void)
6270 {
6271 	struct mem_cgroup *memcg;
6272 	size_t size;
6273 
6274 	size = sizeof(struct mem_cgroup);
6275 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
6276 
6277 	memcg = kzalloc(size, GFP_KERNEL);
6278 	if (!memcg)
6279 		return NULL;
6280 
6281 	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6282 	if (!memcg->stat)
6283 		goto out_free;
6284 	spin_lock_init(&memcg->pcp_counter_lock);
6285 	return memcg;
6286 
6287 out_free:
6288 	kfree(memcg);
6289 	return NULL;
6290 }
6291 
6292 /*
6293  * At destroying mem_cgroup, references from swap_cgroup can remain.
6294  * (scanning all at force_empty is too costly...)
6295  *
6296  * Instead of clearing all references at force_empty, we remember
6297  * the number of reference from swap_cgroup and free mem_cgroup when
6298  * it goes down to 0.
6299  *
6300  * Removal of cgroup itself succeeds regardless of refs from swap.
6301  */
6302 
6303 static void __mem_cgroup_free(struct mem_cgroup *memcg)
6304 {
6305 	int node;
6306 
6307 	mem_cgroup_remove_from_trees(memcg);
6308 
6309 	for_each_node(node)
6310 		free_mem_cgroup_per_zone_info(memcg, node);
6311 
6312 	free_percpu(memcg->stat);
6313 
6314 	/*
6315 	 * We need to make sure that (at least for now), the jump label
6316 	 * destruction code runs outside of the cgroup lock. This is because
6317 	 * get_online_cpus(), which is called from the static_branch update,
6318 	 * can't be called inside the cgroup_lock. cpusets are the ones
6319 	 * enforcing this dependency, so if they ever change, we might as well.
6320 	 *
6321 	 * schedule_work() will guarantee this happens. Be careful if you need
6322 	 * to move this code around, and make sure it is outside
6323 	 * the cgroup_lock.
6324 	 */
6325 	disarm_static_keys(memcg);
6326 	kfree(memcg);
6327 }
6328 
6329 /*
6330  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6331  */
6332 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6333 {
6334 	if (!memcg->res.parent)
6335 		return NULL;
6336 	return mem_cgroup_from_res_counter(memcg->res.parent, res);
6337 }
6338 EXPORT_SYMBOL(parent_mem_cgroup);
6339 
6340 static void __init mem_cgroup_soft_limit_tree_init(void)
6341 {
6342 	struct mem_cgroup_tree_per_node *rtpn;
6343 	struct mem_cgroup_tree_per_zone *rtpz;
6344 	int tmp, node, zone;
6345 
6346 	for_each_node(node) {
6347 		tmp = node;
6348 		if (!node_state(node, N_NORMAL_MEMORY))
6349 			tmp = -1;
6350 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6351 		BUG_ON(!rtpn);
6352 
6353 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6354 
6355 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6356 			rtpz = &rtpn->rb_tree_per_zone[zone];
6357 			rtpz->rb_root = RB_ROOT;
6358 			spin_lock_init(&rtpz->lock);
6359 		}
6360 	}
6361 }
6362 
6363 static struct cgroup_subsys_state * __ref
6364 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6365 {
6366 	struct mem_cgroup *memcg;
6367 	long error = -ENOMEM;
6368 	int node;
6369 
6370 	memcg = mem_cgroup_alloc();
6371 	if (!memcg)
6372 		return ERR_PTR(error);
6373 
6374 	for_each_node(node)
6375 		if (alloc_mem_cgroup_per_zone_info(memcg, node))
6376 			goto free_out;
6377 
6378 	/* root ? */
6379 	if (parent_css == NULL) {
6380 		root_mem_cgroup = memcg;
6381 		res_counter_init(&memcg->res, NULL);
6382 		res_counter_init(&memcg->memsw, NULL);
6383 		res_counter_init(&memcg->kmem, NULL);
6384 	}
6385 
6386 	memcg->last_scanned_node = MAX_NUMNODES;
6387 	INIT_LIST_HEAD(&memcg->oom_notify);
6388 	memcg->move_charge_at_immigrate = 0;
6389 	mutex_init(&memcg->thresholds_lock);
6390 	spin_lock_init(&memcg->move_lock);
6391 	vmpressure_init(&memcg->vmpressure);
6392 	INIT_LIST_HEAD(&memcg->event_list);
6393 	spin_lock_init(&memcg->event_list_lock);
6394 
6395 	return &memcg->css;
6396 
6397 free_out:
6398 	__mem_cgroup_free(memcg);
6399 	return ERR_PTR(error);
6400 }
6401 
6402 static int
6403 mem_cgroup_css_online(struct cgroup_subsys_state *css)
6404 {
6405 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6406 	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
6407 
6408 	if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6409 		return -ENOSPC;
6410 
6411 	if (!parent)
6412 		return 0;
6413 
6414 	mutex_lock(&memcg_create_mutex);
6415 
6416 	memcg->use_hierarchy = parent->use_hierarchy;
6417 	memcg->oom_kill_disable = parent->oom_kill_disable;
6418 	memcg->swappiness = mem_cgroup_swappiness(parent);
6419 
6420 	if (parent->use_hierarchy) {
6421 		res_counter_init(&memcg->res, &parent->res);
6422 		res_counter_init(&memcg->memsw, &parent->memsw);
6423 		res_counter_init(&memcg->kmem, &parent->kmem);
6424 
6425 		/*
6426 		 * No need to take a reference to the parent because cgroup
6427 		 * core guarantees its existence.
6428 		 */
6429 	} else {
6430 		res_counter_init(&memcg->res, NULL);
6431 		res_counter_init(&memcg->memsw, NULL);
6432 		res_counter_init(&memcg->kmem, NULL);
6433 		/*
6434 		 * Deeper hierachy with use_hierarchy == false doesn't make
6435 		 * much sense so let cgroup subsystem know about this
6436 		 * unfortunate state in our controller.
6437 		 */
6438 		if (parent != root_mem_cgroup)
6439 			memory_cgrp_subsys.broken_hierarchy = true;
6440 	}
6441 	mutex_unlock(&memcg_create_mutex);
6442 
6443 	return memcg_init_kmem(memcg, &memory_cgrp_subsys);
6444 }
6445 
6446 /*
6447  * Announce all parents that a group from their hierarchy is gone.
6448  */
6449 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6450 {
6451 	struct mem_cgroup *parent = memcg;
6452 
6453 	while ((parent = parent_mem_cgroup(parent)))
6454 		mem_cgroup_iter_invalidate(parent);
6455 
6456 	/*
6457 	 * if the root memcg is not hierarchical we have to check it
6458 	 * explicitely.
6459 	 */
6460 	if (!root_mem_cgroup->use_hierarchy)
6461 		mem_cgroup_iter_invalidate(root_mem_cgroup);
6462 }
6463 
6464 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6465 {
6466 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6467 	struct mem_cgroup_event *event, *tmp;
6468 	struct cgroup_subsys_state *iter;
6469 
6470 	/*
6471 	 * Unregister events and notify userspace.
6472 	 * Notify userspace about cgroup removing only after rmdir of cgroup
6473 	 * directory to avoid race between userspace and kernelspace.
6474 	 */
6475 	spin_lock(&memcg->event_list_lock);
6476 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
6477 		list_del_init(&event->list);
6478 		schedule_work(&event->remove);
6479 	}
6480 	spin_unlock(&memcg->event_list_lock);
6481 
6482 	kmem_cgroup_css_offline(memcg);
6483 
6484 	mem_cgroup_invalidate_reclaim_iterators(memcg);
6485 
6486 	/*
6487 	 * This requires that offlining is serialized.  Right now that is
6488 	 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6489 	 */
6490 	css_for_each_descendant_post(iter, css)
6491 		mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6492 
6493 	mem_cgroup_destroy_all_caches(memcg);
6494 	vmpressure_cleanup(&memcg->vmpressure);
6495 }
6496 
6497 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6498 {
6499 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6500 	/*
6501 	 * XXX: css_offline() would be where we should reparent all
6502 	 * memory to prepare the cgroup for destruction.  However,
6503 	 * memcg does not do css_tryget() and res_counter charging
6504 	 * under the same RCU lock region, which means that charging
6505 	 * could race with offlining.  Offlining only happens to
6506 	 * cgroups with no tasks in them but charges can show up
6507 	 * without any tasks from the swapin path when the target
6508 	 * memcg is looked up from the swapout record and not from the
6509 	 * current task as it usually is.  A race like this can leak
6510 	 * charges and put pages with stale cgroup pointers into
6511 	 * circulation:
6512 	 *
6513 	 * #0                        #1
6514 	 *                           lookup_swap_cgroup_id()
6515 	 *                           rcu_read_lock()
6516 	 *                           mem_cgroup_lookup()
6517 	 *                           css_tryget()
6518 	 *                           rcu_read_unlock()
6519 	 * disable css_tryget()
6520 	 * call_rcu()
6521 	 *   offline_css()
6522 	 *     reparent_charges()
6523 	 *                           res_counter_charge()
6524 	 *                           css_put()
6525 	 *                             css_free()
6526 	 *                           pc->mem_cgroup = dead memcg
6527 	 *                           add page to lru
6528 	 *
6529 	 * The bulk of the charges are still moved in offline_css() to
6530 	 * avoid pinning a lot of pages in case a long-term reference
6531 	 * like a swapout record is deferring the css_free() to long
6532 	 * after offlining.  But this makes sure we catch any charges
6533 	 * made after offlining:
6534 	 */
6535 	mem_cgroup_reparent_charges(memcg);
6536 
6537 	memcg_destroy_kmem(memcg);
6538 	__mem_cgroup_free(memcg);
6539 }
6540 
6541 #ifdef CONFIG_MMU
6542 /* Handlers for move charge at task migration. */
6543 #define PRECHARGE_COUNT_AT_ONCE	256
6544 static int mem_cgroup_do_precharge(unsigned long count)
6545 {
6546 	int ret = 0;
6547 	int batch_count = PRECHARGE_COUNT_AT_ONCE;
6548 	struct mem_cgroup *memcg = mc.to;
6549 
6550 	if (mem_cgroup_is_root(memcg)) {
6551 		mc.precharge += count;
6552 		/* we don't need css_get for root */
6553 		return ret;
6554 	}
6555 	/* try to charge at once */
6556 	if (count > 1) {
6557 		struct res_counter *dummy;
6558 		/*
6559 		 * "memcg" cannot be under rmdir() because we've already checked
6560 		 * by cgroup_lock_live_cgroup() that it is not removed and we
6561 		 * are still under the same cgroup_mutex. So we can postpone
6562 		 * css_get().
6563 		 */
6564 		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
6565 			goto one_by_one;
6566 		if (do_swap_account && res_counter_charge(&memcg->memsw,
6567 						PAGE_SIZE * count, &dummy)) {
6568 			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
6569 			goto one_by_one;
6570 		}
6571 		mc.precharge += count;
6572 		return ret;
6573 	}
6574 one_by_one:
6575 	/* fall back to one by one charge */
6576 	while (count--) {
6577 		if (signal_pending(current)) {
6578 			ret = -EINTR;
6579 			break;
6580 		}
6581 		if (!batch_count--) {
6582 			batch_count = PRECHARGE_COUNT_AT_ONCE;
6583 			cond_resched();
6584 		}
6585 		ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
6586 		if (ret)
6587 			/* mem_cgroup_clear_mc() will do uncharge later */
6588 			return ret;
6589 		mc.precharge++;
6590 	}
6591 	return ret;
6592 }
6593 
6594 /**
6595  * get_mctgt_type - get target type of moving charge
6596  * @vma: the vma the pte to be checked belongs
6597  * @addr: the address corresponding to the pte to be checked
6598  * @ptent: the pte to be checked
6599  * @target: the pointer the target page or swap ent will be stored(can be NULL)
6600  *
6601  * Returns
6602  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
6603  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6604  *     move charge. if @target is not NULL, the page is stored in target->page
6605  *     with extra refcnt got(Callers should handle it).
6606  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6607  *     target for charge migration. if @target is not NULL, the entry is stored
6608  *     in target->ent.
6609  *
6610  * Called with pte lock held.
6611  */
6612 union mc_target {
6613 	struct page	*page;
6614 	swp_entry_t	ent;
6615 };
6616 
6617 enum mc_target_type {
6618 	MC_TARGET_NONE = 0,
6619 	MC_TARGET_PAGE,
6620 	MC_TARGET_SWAP,
6621 };
6622 
6623 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6624 						unsigned long addr, pte_t ptent)
6625 {
6626 	struct page *page = vm_normal_page(vma, addr, ptent);
6627 
6628 	if (!page || !page_mapped(page))
6629 		return NULL;
6630 	if (PageAnon(page)) {
6631 		/* we don't move shared anon */
6632 		if (!move_anon())
6633 			return NULL;
6634 	} else if (!move_file())
6635 		/* we ignore mapcount for file pages */
6636 		return NULL;
6637 	if (!get_page_unless_zero(page))
6638 		return NULL;
6639 
6640 	return page;
6641 }
6642 
6643 #ifdef CONFIG_SWAP
6644 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6645 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6646 {
6647 	struct page *page = NULL;
6648 	swp_entry_t ent = pte_to_swp_entry(ptent);
6649 
6650 	if (!move_anon() || non_swap_entry(ent))
6651 		return NULL;
6652 	/*
6653 	 * Because lookup_swap_cache() updates some statistics counter,
6654 	 * we call find_get_page() with swapper_space directly.
6655 	 */
6656 	page = find_get_page(swap_address_space(ent), ent.val);
6657 	if (do_swap_account)
6658 		entry->val = ent.val;
6659 
6660 	return page;
6661 }
6662 #else
6663 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6664 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6665 {
6666 	return NULL;
6667 }
6668 #endif
6669 
6670 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6671 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6672 {
6673 	struct page *page = NULL;
6674 	struct address_space *mapping;
6675 	pgoff_t pgoff;
6676 
6677 	if (!vma->vm_file) /* anonymous vma */
6678 		return NULL;
6679 	if (!move_file())
6680 		return NULL;
6681 
6682 	mapping = vma->vm_file->f_mapping;
6683 	if (pte_none(ptent))
6684 		pgoff = linear_page_index(vma, addr);
6685 	else /* pte_file(ptent) is true */
6686 		pgoff = pte_to_pgoff(ptent);
6687 
6688 	/* page is moved even if it's not RSS of this task(page-faulted). */
6689 	page = find_get_page(mapping, pgoff);
6690 
6691 #ifdef CONFIG_SWAP
6692 	/* shmem/tmpfs may report page out on swap: account for that too. */
6693 	if (radix_tree_exceptional_entry(page)) {
6694 		swp_entry_t swap = radix_to_swp_entry(page);
6695 		if (do_swap_account)
6696 			*entry = swap;
6697 		page = find_get_page(swap_address_space(swap), swap.val);
6698 	}
6699 #endif
6700 	return page;
6701 }
6702 
6703 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6704 		unsigned long addr, pte_t ptent, union mc_target *target)
6705 {
6706 	struct page *page = NULL;
6707 	struct page_cgroup *pc;
6708 	enum mc_target_type ret = MC_TARGET_NONE;
6709 	swp_entry_t ent = { .val = 0 };
6710 
6711 	if (pte_present(ptent))
6712 		page = mc_handle_present_pte(vma, addr, ptent);
6713 	else if (is_swap_pte(ptent))
6714 		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6715 	else if (pte_none(ptent) || pte_file(ptent))
6716 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
6717 
6718 	if (!page && !ent.val)
6719 		return ret;
6720 	if (page) {
6721 		pc = lookup_page_cgroup(page);
6722 		/*
6723 		 * Do only loose check w/o page_cgroup lock.
6724 		 * mem_cgroup_move_account() checks the pc is valid or not under
6725 		 * the lock.
6726 		 */
6727 		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6728 			ret = MC_TARGET_PAGE;
6729 			if (target)
6730 				target->page = page;
6731 		}
6732 		if (!ret || !target)
6733 			put_page(page);
6734 	}
6735 	/* There is a swap entry and a page doesn't exist or isn't charged */
6736 	if (ent.val && !ret &&
6737 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6738 		ret = MC_TARGET_SWAP;
6739 		if (target)
6740 			target->ent = ent;
6741 	}
6742 	return ret;
6743 }
6744 
6745 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6746 /*
6747  * We don't consider swapping or file mapped pages because THP does not
6748  * support them for now.
6749  * Caller should make sure that pmd_trans_huge(pmd) is true.
6750  */
6751 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6752 		unsigned long addr, pmd_t pmd, union mc_target *target)
6753 {
6754 	struct page *page = NULL;
6755 	struct page_cgroup *pc;
6756 	enum mc_target_type ret = MC_TARGET_NONE;
6757 
6758 	page = pmd_page(pmd);
6759 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6760 	if (!move_anon())
6761 		return ret;
6762 	pc = lookup_page_cgroup(page);
6763 	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6764 		ret = MC_TARGET_PAGE;
6765 		if (target) {
6766 			get_page(page);
6767 			target->page = page;
6768 		}
6769 	}
6770 	return ret;
6771 }
6772 #else
6773 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6774 		unsigned long addr, pmd_t pmd, union mc_target *target)
6775 {
6776 	return MC_TARGET_NONE;
6777 }
6778 #endif
6779 
6780 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6781 					unsigned long addr, unsigned long end,
6782 					struct mm_walk *walk)
6783 {
6784 	struct vm_area_struct *vma = walk->private;
6785 	pte_t *pte;
6786 	spinlock_t *ptl;
6787 
6788 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6789 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6790 			mc.precharge += HPAGE_PMD_NR;
6791 		spin_unlock(ptl);
6792 		return 0;
6793 	}
6794 
6795 	if (pmd_trans_unstable(pmd))
6796 		return 0;
6797 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6798 	for (; addr != end; pte++, addr += PAGE_SIZE)
6799 		if (get_mctgt_type(vma, addr, *pte, NULL))
6800 			mc.precharge++;	/* increment precharge temporarily */
6801 	pte_unmap_unlock(pte - 1, ptl);
6802 	cond_resched();
6803 
6804 	return 0;
6805 }
6806 
6807 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6808 {
6809 	unsigned long precharge;
6810 	struct vm_area_struct *vma;
6811 
6812 	down_read(&mm->mmap_sem);
6813 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
6814 		struct mm_walk mem_cgroup_count_precharge_walk = {
6815 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
6816 			.mm = mm,
6817 			.private = vma,
6818 		};
6819 		if (is_vm_hugetlb_page(vma))
6820 			continue;
6821 		walk_page_range(vma->vm_start, vma->vm_end,
6822 					&mem_cgroup_count_precharge_walk);
6823 	}
6824 	up_read(&mm->mmap_sem);
6825 
6826 	precharge = mc.precharge;
6827 	mc.precharge = 0;
6828 
6829 	return precharge;
6830 }
6831 
6832 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6833 {
6834 	unsigned long precharge = mem_cgroup_count_precharge(mm);
6835 
6836 	VM_BUG_ON(mc.moving_task);
6837 	mc.moving_task = current;
6838 	return mem_cgroup_do_precharge(precharge);
6839 }
6840 
6841 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6842 static void __mem_cgroup_clear_mc(void)
6843 {
6844 	struct mem_cgroup *from = mc.from;
6845 	struct mem_cgroup *to = mc.to;
6846 	int i;
6847 
6848 	/* we must uncharge all the leftover precharges from mc.to */
6849 	if (mc.precharge) {
6850 		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
6851 		mc.precharge = 0;
6852 	}
6853 	/*
6854 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6855 	 * we must uncharge here.
6856 	 */
6857 	if (mc.moved_charge) {
6858 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6859 		mc.moved_charge = 0;
6860 	}
6861 	/* we must fixup refcnts and charges */
6862 	if (mc.moved_swap) {
6863 		/* uncharge swap account from the old cgroup */
6864 		if (!mem_cgroup_is_root(mc.from))
6865 			res_counter_uncharge(&mc.from->memsw,
6866 						PAGE_SIZE * mc.moved_swap);
6867 
6868 		for (i = 0; i < mc.moved_swap; i++)
6869 			css_put(&mc.from->css);
6870 
6871 		if (!mem_cgroup_is_root(mc.to)) {
6872 			/*
6873 			 * we charged both to->res and to->memsw, so we should
6874 			 * uncharge to->res.
6875 			 */
6876 			res_counter_uncharge(&mc.to->res,
6877 						PAGE_SIZE * mc.moved_swap);
6878 		}
6879 		/* we've already done css_get(mc.to) */
6880 		mc.moved_swap = 0;
6881 	}
6882 	memcg_oom_recover(from);
6883 	memcg_oom_recover(to);
6884 	wake_up_all(&mc.waitq);
6885 }
6886 
6887 static void mem_cgroup_clear_mc(void)
6888 {
6889 	struct mem_cgroup *from = mc.from;
6890 
6891 	/*
6892 	 * we must clear moving_task before waking up waiters at the end of
6893 	 * task migration.
6894 	 */
6895 	mc.moving_task = NULL;
6896 	__mem_cgroup_clear_mc();
6897 	spin_lock(&mc.lock);
6898 	mc.from = NULL;
6899 	mc.to = NULL;
6900 	spin_unlock(&mc.lock);
6901 	mem_cgroup_end_move(from);
6902 }
6903 
6904 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6905 				 struct cgroup_taskset *tset)
6906 {
6907 	struct task_struct *p = cgroup_taskset_first(tset);
6908 	int ret = 0;
6909 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6910 	unsigned long move_charge_at_immigrate;
6911 
6912 	/*
6913 	 * We are now commited to this value whatever it is. Changes in this
6914 	 * tunable will only affect upcoming migrations, not the current one.
6915 	 * So we need to save it, and keep it going.
6916 	 */
6917 	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
6918 	if (move_charge_at_immigrate) {
6919 		struct mm_struct *mm;
6920 		struct mem_cgroup *from = mem_cgroup_from_task(p);
6921 
6922 		VM_BUG_ON(from == memcg);
6923 
6924 		mm = get_task_mm(p);
6925 		if (!mm)
6926 			return 0;
6927 		/* We move charges only when we move a owner of the mm */
6928 		if (mm->owner == p) {
6929 			VM_BUG_ON(mc.from);
6930 			VM_BUG_ON(mc.to);
6931 			VM_BUG_ON(mc.precharge);
6932 			VM_BUG_ON(mc.moved_charge);
6933 			VM_BUG_ON(mc.moved_swap);
6934 			mem_cgroup_start_move(from);
6935 			spin_lock(&mc.lock);
6936 			mc.from = from;
6937 			mc.to = memcg;
6938 			mc.immigrate_flags = move_charge_at_immigrate;
6939 			spin_unlock(&mc.lock);
6940 			/* We set mc.moving_task later */
6941 
6942 			ret = mem_cgroup_precharge_mc(mm);
6943 			if (ret)
6944 				mem_cgroup_clear_mc();
6945 		}
6946 		mmput(mm);
6947 	}
6948 	return ret;
6949 }
6950 
6951 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6952 				     struct cgroup_taskset *tset)
6953 {
6954 	mem_cgroup_clear_mc();
6955 }
6956 
6957 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6958 				unsigned long addr, unsigned long end,
6959 				struct mm_walk *walk)
6960 {
6961 	int ret = 0;
6962 	struct vm_area_struct *vma = walk->private;
6963 	pte_t *pte;
6964 	spinlock_t *ptl;
6965 	enum mc_target_type target_type;
6966 	union mc_target target;
6967 	struct page *page;
6968 	struct page_cgroup *pc;
6969 
6970 	/*
6971 	 * We don't take compound_lock() here but no race with splitting thp
6972 	 * happens because:
6973 	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6974 	 *    under splitting, which means there's no concurrent thp split,
6975 	 *  - if another thread runs into split_huge_page() just after we
6976 	 *    entered this if-block, the thread must wait for page table lock
6977 	 *    to be unlocked in __split_huge_page_splitting(), where the main
6978 	 *    part of thp split is not executed yet.
6979 	 */
6980 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6981 		if (mc.precharge < HPAGE_PMD_NR) {
6982 			spin_unlock(ptl);
6983 			return 0;
6984 		}
6985 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6986 		if (target_type == MC_TARGET_PAGE) {
6987 			page = target.page;
6988 			if (!isolate_lru_page(page)) {
6989 				pc = lookup_page_cgroup(page);
6990 				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6991 							pc, mc.from, mc.to)) {
6992 					mc.precharge -= HPAGE_PMD_NR;
6993 					mc.moved_charge += HPAGE_PMD_NR;
6994 				}
6995 				putback_lru_page(page);
6996 			}
6997 			put_page(page);
6998 		}
6999 		spin_unlock(ptl);
7000 		return 0;
7001 	}
7002 
7003 	if (pmd_trans_unstable(pmd))
7004 		return 0;
7005 retry:
7006 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
7007 	for (; addr != end; addr += PAGE_SIZE) {
7008 		pte_t ptent = *(pte++);
7009 		swp_entry_t ent;
7010 
7011 		if (!mc.precharge)
7012 			break;
7013 
7014 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
7015 		case MC_TARGET_PAGE:
7016 			page = target.page;
7017 			if (isolate_lru_page(page))
7018 				goto put;
7019 			pc = lookup_page_cgroup(page);
7020 			if (!mem_cgroup_move_account(page, 1, pc,
7021 						     mc.from, mc.to)) {
7022 				mc.precharge--;
7023 				/* we uncharge from mc.from later. */
7024 				mc.moved_charge++;
7025 			}
7026 			putback_lru_page(page);
7027 put:			/* get_mctgt_type() gets the page */
7028 			put_page(page);
7029 			break;
7030 		case MC_TARGET_SWAP:
7031 			ent = target.ent;
7032 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
7033 				mc.precharge--;
7034 				/* we fixup refcnts and charges later. */
7035 				mc.moved_swap++;
7036 			}
7037 			break;
7038 		default:
7039 			break;
7040 		}
7041 	}
7042 	pte_unmap_unlock(pte - 1, ptl);
7043 	cond_resched();
7044 
7045 	if (addr != end) {
7046 		/*
7047 		 * We have consumed all precharges we got in can_attach().
7048 		 * We try charge one by one, but don't do any additional
7049 		 * charges to mc.to if we have failed in charge once in attach()
7050 		 * phase.
7051 		 */
7052 		ret = mem_cgroup_do_precharge(1);
7053 		if (!ret)
7054 			goto retry;
7055 	}
7056 
7057 	return ret;
7058 }
7059 
7060 static void mem_cgroup_move_charge(struct mm_struct *mm)
7061 {
7062 	struct vm_area_struct *vma;
7063 
7064 	lru_add_drain_all();
7065 retry:
7066 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
7067 		/*
7068 		 * Someone who are holding the mmap_sem might be waiting in
7069 		 * waitq. So we cancel all extra charges, wake up all waiters,
7070 		 * and retry. Because we cancel precharges, we might not be able
7071 		 * to move enough charges, but moving charge is a best-effort
7072 		 * feature anyway, so it wouldn't be a big problem.
7073 		 */
7074 		__mem_cgroup_clear_mc();
7075 		cond_resched();
7076 		goto retry;
7077 	}
7078 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
7079 		int ret;
7080 		struct mm_walk mem_cgroup_move_charge_walk = {
7081 			.pmd_entry = mem_cgroup_move_charge_pte_range,
7082 			.mm = mm,
7083 			.private = vma,
7084 		};
7085 		if (is_vm_hugetlb_page(vma))
7086 			continue;
7087 		ret = walk_page_range(vma->vm_start, vma->vm_end,
7088 						&mem_cgroup_move_charge_walk);
7089 		if (ret)
7090 			/*
7091 			 * means we have consumed all precharges and failed in
7092 			 * doing additional charge. Just abandon here.
7093 			 */
7094 			break;
7095 	}
7096 	up_read(&mm->mmap_sem);
7097 }
7098 
7099 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7100 				 struct cgroup_taskset *tset)
7101 {
7102 	struct task_struct *p = cgroup_taskset_first(tset);
7103 	struct mm_struct *mm = get_task_mm(p);
7104 
7105 	if (mm) {
7106 		if (mc.to)
7107 			mem_cgroup_move_charge(mm);
7108 		mmput(mm);
7109 	}
7110 	if (mc.to)
7111 		mem_cgroup_clear_mc();
7112 }
7113 #else	/* !CONFIG_MMU */
7114 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
7115 				 struct cgroup_taskset *tset)
7116 {
7117 	return 0;
7118 }
7119 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
7120 				     struct cgroup_taskset *tset)
7121 {
7122 }
7123 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7124 				 struct cgroup_taskset *tset)
7125 {
7126 }
7127 #endif
7128 
7129 /*
7130  * Cgroup retains root cgroups across [un]mount cycles making it necessary
7131  * to verify sane_behavior flag on each mount attempt.
7132  */
7133 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
7134 {
7135 	/*
7136 	 * use_hierarchy is forced with sane_behavior.  cgroup core
7137 	 * guarantees that @root doesn't have any children, so turning it
7138 	 * on for the root memcg is enough.
7139 	 */
7140 	if (cgroup_sane_behavior(root_css->cgroup))
7141 		mem_cgroup_from_css(root_css)->use_hierarchy = true;
7142 }
7143 
7144 struct cgroup_subsys memory_cgrp_subsys = {
7145 	.css_alloc = mem_cgroup_css_alloc,
7146 	.css_online = mem_cgroup_css_online,
7147 	.css_offline = mem_cgroup_css_offline,
7148 	.css_free = mem_cgroup_css_free,
7149 	.can_attach = mem_cgroup_can_attach,
7150 	.cancel_attach = mem_cgroup_cancel_attach,
7151 	.attach = mem_cgroup_move_task,
7152 	.bind = mem_cgroup_bind,
7153 	.base_cftypes = mem_cgroup_files,
7154 	.early_init = 0,
7155 };
7156 
7157 #ifdef CONFIG_MEMCG_SWAP
7158 static int __init enable_swap_account(char *s)
7159 {
7160 	if (!strcmp(s, "1"))
7161 		really_do_swap_account = 1;
7162 	else if (!strcmp(s, "0"))
7163 		really_do_swap_account = 0;
7164 	return 1;
7165 }
7166 __setup("swapaccount=", enable_swap_account);
7167 
7168 static void __init memsw_file_init(void)
7169 {
7170 	WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
7171 }
7172 
7173 static void __init enable_swap_cgroup(void)
7174 {
7175 	if (!mem_cgroup_disabled() && really_do_swap_account) {
7176 		do_swap_account = 1;
7177 		memsw_file_init();
7178 	}
7179 }
7180 
7181 #else
7182 static void __init enable_swap_cgroup(void)
7183 {
7184 }
7185 #endif
7186 
7187 /*
7188  * subsys_initcall() for memory controller.
7189  *
7190  * Some parts like hotcpu_notifier() have to be initialized from this context
7191  * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7192  * everything that doesn't depend on a specific mem_cgroup structure should
7193  * be initialized from here.
7194  */
7195 static int __init mem_cgroup_init(void)
7196 {
7197 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
7198 	enable_swap_cgroup();
7199 	mem_cgroup_soft_limit_tree_init();
7200 	memcg_stock_init();
7201 	return 0;
7202 }
7203 subsys_initcall(mem_cgroup_init);
7204