1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * This program is free software; you can redistribute it and/or modify 18 * it under the terms of the GNU General Public License as published by 19 * the Free Software Foundation; either version 2 of the License, or 20 * (at your option) any later version. 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 */ 27 28 #include <linux/res_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/mm.h> 32 #include <linux/hugetlb.h> 33 #include <linux/pagemap.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/sort.h> 49 #include <linux/fs.h> 50 #include <linux/seq_file.h> 51 #include <linux/vmalloc.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/page_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include "internal.h" 58 #include <net/sock.h> 59 #include <net/ip.h> 60 #include <net/tcp_memcontrol.h> 61 62 #include <asm/uaccess.h> 63 64 #include <trace/events/vmscan.h> 65 66 struct cgroup_subsys mem_cgroup_subsys __read_mostly; 67 EXPORT_SYMBOL(mem_cgroup_subsys); 68 69 #define MEM_CGROUP_RECLAIM_RETRIES 5 70 static struct mem_cgroup *root_mem_cgroup __read_mostly; 71 72 #ifdef CONFIG_MEMCG_SWAP 73 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 74 int do_swap_account __read_mostly; 75 76 /* for remember boot option*/ 77 #ifdef CONFIG_MEMCG_SWAP_ENABLED 78 static int really_do_swap_account __initdata = 1; 79 #else 80 static int really_do_swap_account __initdata = 0; 81 #endif 82 83 #else 84 #define do_swap_account 0 85 #endif 86 87 88 /* 89 * Statistics for memory cgroup. 90 */ 91 enum mem_cgroup_stat_index { 92 /* 93 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 94 */ 95 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 96 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 97 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 98 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 99 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 100 MEM_CGROUP_STAT_NSTATS, 101 }; 102 103 static const char * const mem_cgroup_stat_names[] = { 104 "cache", 105 "rss", 106 "rss_huge", 107 "mapped_file", 108 "swap", 109 }; 110 111 enum mem_cgroup_events_index { 112 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 113 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 114 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 115 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 116 MEM_CGROUP_EVENTS_NSTATS, 117 }; 118 119 static const char * const mem_cgroup_events_names[] = { 120 "pgpgin", 121 "pgpgout", 122 "pgfault", 123 "pgmajfault", 124 }; 125 126 static const char * const mem_cgroup_lru_names[] = { 127 "inactive_anon", 128 "active_anon", 129 "inactive_file", 130 "active_file", 131 "unevictable", 132 }; 133 134 /* 135 * Per memcg event counter is incremented at every pagein/pageout. With THP, 136 * it will be incremated by the number of pages. This counter is used for 137 * for trigger some periodic events. This is straightforward and better 138 * than using jiffies etc. to handle periodic memcg event. 139 */ 140 enum mem_cgroup_events_target { 141 MEM_CGROUP_TARGET_THRESH, 142 MEM_CGROUP_TARGET_SOFTLIMIT, 143 MEM_CGROUP_TARGET_NUMAINFO, 144 MEM_CGROUP_NTARGETS, 145 }; 146 #define THRESHOLDS_EVENTS_TARGET 128 147 #define SOFTLIMIT_EVENTS_TARGET 1024 148 #define NUMAINFO_EVENTS_TARGET 1024 149 150 struct mem_cgroup_stat_cpu { 151 long count[MEM_CGROUP_STAT_NSTATS]; 152 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 153 unsigned long nr_page_events; 154 unsigned long targets[MEM_CGROUP_NTARGETS]; 155 }; 156 157 struct mem_cgroup_reclaim_iter { 158 /* 159 * last scanned hierarchy member. Valid only if last_dead_count 160 * matches memcg->dead_count of the hierarchy root group. 161 */ 162 struct mem_cgroup *last_visited; 163 unsigned long last_dead_count; 164 165 /* scan generation, increased every round-trip */ 166 unsigned int generation; 167 }; 168 169 /* 170 * per-zone information in memory controller. 171 */ 172 struct mem_cgroup_per_zone { 173 struct lruvec lruvec; 174 unsigned long lru_size[NR_LRU_LISTS]; 175 176 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 177 178 struct rb_node tree_node; /* RB tree node */ 179 unsigned long long usage_in_excess;/* Set to the value by which */ 180 /* the soft limit is exceeded*/ 181 bool on_tree; 182 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 183 /* use container_of */ 184 }; 185 186 struct mem_cgroup_per_node { 187 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 188 }; 189 190 struct mem_cgroup_lru_info { 191 struct mem_cgroup_per_node *nodeinfo[0]; 192 }; 193 194 /* 195 * Cgroups above their limits are maintained in a RB-Tree, independent of 196 * their hierarchy representation 197 */ 198 199 struct mem_cgroup_tree_per_zone { 200 struct rb_root rb_root; 201 spinlock_t lock; 202 }; 203 204 struct mem_cgroup_tree_per_node { 205 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 206 }; 207 208 struct mem_cgroup_tree { 209 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 210 }; 211 212 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 213 214 struct mem_cgroup_threshold { 215 struct eventfd_ctx *eventfd; 216 u64 threshold; 217 }; 218 219 /* For threshold */ 220 struct mem_cgroup_threshold_ary { 221 /* An array index points to threshold just below or equal to usage. */ 222 int current_threshold; 223 /* Size of entries[] */ 224 unsigned int size; 225 /* Array of thresholds */ 226 struct mem_cgroup_threshold entries[0]; 227 }; 228 229 struct mem_cgroup_thresholds { 230 /* Primary thresholds array */ 231 struct mem_cgroup_threshold_ary *primary; 232 /* 233 * Spare threshold array. 234 * This is needed to make mem_cgroup_unregister_event() "never fail". 235 * It must be able to store at least primary->size - 1 entries. 236 */ 237 struct mem_cgroup_threshold_ary *spare; 238 }; 239 240 /* for OOM */ 241 struct mem_cgroup_eventfd_list { 242 struct list_head list; 243 struct eventfd_ctx *eventfd; 244 }; 245 246 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 247 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 248 249 /* 250 * The memory controller data structure. The memory controller controls both 251 * page cache and RSS per cgroup. We would eventually like to provide 252 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 253 * to help the administrator determine what knobs to tune. 254 * 255 * TODO: Add a water mark for the memory controller. Reclaim will begin when 256 * we hit the water mark. May be even add a low water mark, such that 257 * no reclaim occurs from a cgroup at it's low water mark, this is 258 * a feature that will be implemented much later in the future. 259 */ 260 struct mem_cgroup { 261 struct cgroup_subsys_state css; 262 /* 263 * the counter to account for memory usage 264 */ 265 struct res_counter res; 266 267 /* vmpressure notifications */ 268 struct vmpressure vmpressure; 269 270 union { 271 /* 272 * the counter to account for mem+swap usage. 273 */ 274 struct res_counter memsw; 275 276 /* 277 * rcu_freeing is used only when freeing struct mem_cgroup, 278 * so put it into a union to avoid wasting more memory. 279 * It must be disjoint from the css field. It could be 280 * in a union with the res field, but res plays a much 281 * larger part in mem_cgroup life than memsw, and might 282 * be of interest, even at time of free, when debugging. 283 * So share rcu_head with the less interesting memsw. 284 */ 285 struct rcu_head rcu_freeing; 286 /* 287 * We also need some space for a worker in deferred freeing. 288 * By the time we call it, rcu_freeing is no longer in use. 289 */ 290 struct work_struct work_freeing; 291 }; 292 293 /* 294 * the counter to account for kernel memory usage. 295 */ 296 struct res_counter kmem; 297 /* 298 * Should the accounting and control be hierarchical, per subtree? 299 */ 300 bool use_hierarchy; 301 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */ 302 303 bool oom_lock; 304 atomic_t under_oom; 305 306 atomic_t refcnt; 307 308 int swappiness; 309 /* OOM-Killer disable */ 310 int oom_kill_disable; 311 312 /* set when res.limit == memsw.limit */ 313 bool memsw_is_minimum; 314 315 /* protect arrays of thresholds */ 316 struct mutex thresholds_lock; 317 318 /* thresholds for memory usage. RCU-protected */ 319 struct mem_cgroup_thresholds thresholds; 320 321 /* thresholds for mem+swap usage. RCU-protected */ 322 struct mem_cgroup_thresholds memsw_thresholds; 323 324 /* For oom notifier event fd */ 325 struct list_head oom_notify; 326 327 /* 328 * Should we move charges of a task when a task is moved into this 329 * mem_cgroup ? And what type of charges should we move ? 330 */ 331 unsigned long move_charge_at_immigrate; 332 /* 333 * set > 0 if pages under this cgroup are moving to other cgroup. 334 */ 335 atomic_t moving_account; 336 /* taken only while moving_account > 0 */ 337 spinlock_t move_lock; 338 /* 339 * percpu counter. 340 */ 341 struct mem_cgroup_stat_cpu __percpu *stat; 342 /* 343 * used when a cpu is offlined or other synchronizations 344 * See mem_cgroup_read_stat(). 345 */ 346 struct mem_cgroup_stat_cpu nocpu_base; 347 spinlock_t pcp_counter_lock; 348 349 atomic_t dead_count; 350 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 351 struct tcp_memcontrol tcp_mem; 352 #endif 353 #if defined(CONFIG_MEMCG_KMEM) 354 /* analogous to slab_common's slab_caches list. per-memcg */ 355 struct list_head memcg_slab_caches; 356 /* Not a spinlock, we can take a lot of time walking the list */ 357 struct mutex slab_caches_mutex; 358 /* Index in the kmem_cache->memcg_params->memcg_caches array */ 359 int kmemcg_id; 360 #endif 361 362 int last_scanned_node; 363 #if MAX_NUMNODES > 1 364 nodemask_t scan_nodes; 365 atomic_t numainfo_events; 366 atomic_t numainfo_updating; 367 #endif 368 369 /* 370 * Per cgroup active and inactive list, similar to the 371 * per zone LRU lists. 372 * 373 * WARNING: This has to be the last element of the struct. Don't 374 * add new fields after this point. 375 */ 376 struct mem_cgroup_lru_info info; 377 }; 378 379 static size_t memcg_size(void) 380 { 381 return sizeof(struct mem_cgroup) + 382 nr_node_ids * sizeof(struct mem_cgroup_per_node); 383 } 384 385 /* internal only representation about the status of kmem accounting. */ 386 enum { 387 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ 388 KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ 389 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ 390 }; 391 392 /* We account when limit is on, but only after call sites are patched */ 393 #define KMEM_ACCOUNTED_MASK \ 394 ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) 395 396 #ifdef CONFIG_MEMCG_KMEM 397 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) 398 { 399 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 400 } 401 402 static bool memcg_kmem_is_active(struct mem_cgroup *memcg) 403 { 404 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 405 } 406 407 static void memcg_kmem_set_activated(struct mem_cgroup *memcg) 408 { 409 set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 410 } 411 412 static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) 413 { 414 clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 415 } 416 417 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) 418 { 419 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) 420 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); 421 } 422 423 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg) 424 { 425 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD, 426 &memcg->kmem_account_flags); 427 } 428 #endif 429 430 /* Stuffs for move charges at task migration. */ 431 /* 432 * Types of charges to be moved. "move_charge_at_immitgrate" and 433 * "immigrate_flags" are treated as a left-shifted bitmap of these types. 434 */ 435 enum move_type { 436 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 437 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 438 NR_MOVE_TYPE, 439 }; 440 441 /* "mc" and its members are protected by cgroup_mutex */ 442 static struct move_charge_struct { 443 spinlock_t lock; /* for from, to */ 444 struct mem_cgroup *from; 445 struct mem_cgroup *to; 446 unsigned long immigrate_flags; 447 unsigned long precharge; 448 unsigned long moved_charge; 449 unsigned long moved_swap; 450 struct task_struct *moving_task; /* a task moving charges */ 451 wait_queue_head_t waitq; /* a waitq for other context */ 452 } mc = { 453 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 454 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 455 }; 456 457 static bool move_anon(void) 458 { 459 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags); 460 } 461 462 static bool move_file(void) 463 { 464 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags); 465 } 466 467 /* 468 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 469 * limit reclaim to prevent infinite loops, if they ever occur. 470 */ 471 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 472 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 473 474 enum charge_type { 475 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 476 MEM_CGROUP_CHARGE_TYPE_ANON, 477 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 478 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 479 NR_CHARGE_TYPE, 480 }; 481 482 /* for encoding cft->private value on file */ 483 enum res_type { 484 _MEM, 485 _MEMSWAP, 486 _OOM_TYPE, 487 _KMEM, 488 }; 489 490 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 491 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 492 #define MEMFILE_ATTR(val) ((val) & 0xffff) 493 /* Used for OOM nofiier */ 494 #define OOM_CONTROL (0) 495 496 /* 497 * Reclaim flags for mem_cgroup_hierarchical_reclaim 498 */ 499 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 500 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 501 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 502 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 503 504 /* 505 * The memcg_create_mutex will be held whenever a new cgroup is created. 506 * As a consequence, any change that needs to protect against new child cgroups 507 * appearing has to hold it as well. 508 */ 509 static DEFINE_MUTEX(memcg_create_mutex); 510 511 static void mem_cgroup_get(struct mem_cgroup *memcg); 512 static void mem_cgroup_put(struct mem_cgroup *memcg); 513 514 static inline 515 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) 516 { 517 return container_of(s, struct mem_cgroup, css); 518 } 519 520 /* Some nice accessors for the vmpressure. */ 521 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 522 { 523 if (!memcg) 524 memcg = root_mem_cgroup; 525 return &memcg->vmpressure; 526 } 527 528 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 529 { 530 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 531 } 532 533 struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css) 534 { 535 return &mem_cgroup_from_css(css)->vmpressure; 536 } 537 538 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 539 { 540 return (memcg == root_mem_cgroup); 541 } 542 543 /* Writing them here to avoid exposing memcg's inner layout */ 544 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 545 546 void sock_update_memcg(struct sock *sk) 547 { 548 if (mem_cgroup_sockets_enabled) { 549 struct mem_cgroup *memcg; 550 struct cg_proto *cg_proto; 551 552 BUG_ON(!sk->sk_prot->proto_cgroup); 553 554 /* Socket cloning can throw us here with sk_cgrp already 555 * filled. It won't however, necessarily happen from 556 * process context. So the test for root memcg given 557 * the current task's memcg won't help us in this case. 558 * 559 * Respecting the original socket's memcg is a better 560 * decision in this case. 561 */ 562 if (sk->sk_cgrp) { 563 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 564 mem_cgroup_get(sk->sk_cgrp->memcg); 565 return; 566 } 567 568 rcu_read_lock(); 569 memcg = mem_cgroup_from_task(current); 570 cg_proto = sk->sk_prot->proto_cgroup(memcg); 571 if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) { 572 mem_cgroup_get(memcg); 573 sk->sk_cgrp = cg_proto; 574 } 575 rcu_read_unlock(); 576 } 577 } 578 EXPORT_SYMBOL(sock_update_memcg); 579 580 void sock_release_memcg(struct sock *sk) 581 { 582 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 583 struct mem_cgroup *memcg; 584 WARN_ON(!sk->sk_cgrp->memcg); 585 memcg = sk->sk_cgrp->memcg; 586 mem_cgroup_put(memcg); 587 } 588 } 589 590 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 591 { 592 if (!memcg || mem_cgroup_is_root(memcg)) 593 return NULL; 594 595 return &memcg->tcp_mem.cg_proto; 596 } 597 EXPORT_SYMBOL(tcp_proto_cgroup); 598 599 static void disarm_sock_keys(struct mem_cgroup *memcg) 600 { 601 if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto)) 602 return; 603 static_key_slow_dec(&memcg_socket_limit_enabled); 604 } 605 #else 606 static void disarm_sock_keys(struct mem_cgroup *memcg) 607 { 608 } 609 #endif 610 611 #ifdef CONFIG_MEMCG_KMEM 612 /* 613 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches. 614 * There are two main reasons for not using the css_id for this: 615 * 1) this works better in sparse environments, where we have a lot of memcgs, 616 * but only a few kmem-limited. Or also, if we have, for instance, 200 617 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 618 * 200 entry array for that. 619 * 620 * 2) In order not to violate the cgroup API, we would like to do all memory 621 * allocation in ->create(). At that point, we haven't yet allocated the 622 * css_id. Having a separate index prevents us from messing with the cgroup 623 * core for this 624 * 625 * The current size of the caches array is stored in 626 * memcg_limited_groups_array_size. It will double each time we have to 627 * increase it. 628 */ 629 static DEFINE_IDA(kmem_limited_groups); 630 int memcg_limited_groups_array_size; 631 632 /* 633 * MIN_SIZE is different than 1, because we would like to avoid going through 634 * the alloc/free process all the time. In a small machine, 4 kmem-limited 635 * cgroups is a reasonable guess. In the future, it could be a parameter or 636 * tunable, but that is strictly not necessary. 637 * 638 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get 639 * this constant directly from cgroup, but it is understandable that this is 640 * better kept as an internal representation in cgroup.c. In any case, the 641 * css_id space is not getting any smaller, and we don't have to necessarily 642 * increase ours as well if it increases. 643 */ 644 #define MEMCG_CACHES_MIN_SIZE 4 645 #define MEMCG_CACHES_MAX_SIZE 65535 646 647 /* 648 * A lot of the calls to the cache allocation functions are expected to be 649 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 650 * conditional to this static branch, we'll have to allow modules that does 651 * kmem_cache_alloc and the such to see this symbol as well 652 */ 653 struct static_key memcg_kmem_enabled_key; 654 EXPORT_SYMBOL(memcg_kmem_enabled_key); 655 656 static void disarm_kmem_keys(struct mem_cgroup *memcg) 657 { 658 if (memcg_kmem_is_active(memcg)) { 659 static_key_slow_dec(&memcg_kmem_enabled_key); 660 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); 661 } 662 /* 663 * This check can't live in kmem destruction function, 664 * since the charges will outlive the cgroup 665 */ 666 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0); 667 } 668 #else 669 static void disarm_kmem_keys(struct mem_cgroup *memcg) 670 { 671 } 672 #endif /* CONFIG_MEMCG_KMEM */ 673 674 static void disarm_static_keys(struct mem_cgroup *memcg) 675 { 676 disarm_sock_keys(memcg); 677 disarm_kmem_keys(memcg); 678 } 679 680 static void drain_all_stock_async(struct mem_cgroup *memcg); 681 682 static struct mem_cgroup_per_zone * 683 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 684 { 685 VM_BUG_ON((unsigned)nid >= nr_node_ids); 686 return &memcg->info.nodeinfo[nid]->zoneinfo[zid]; 687 } 688 689 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 690 { 691 return &memcg->css; 692 } 693 694 static struct mem_cgroup_per_zone * 695 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 696 { 697 int nid = page_to_nid(page); 698 int zid = page_zonenum(page); 699 700 return mem_cgroup_zoneinfo(memcg, nid, zid); 701 } 702 703 static struct mem_cgroup_tree_per_zone * 704 soft_limit_tree_node_zone(int nid, int zid) 705 { 706 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 707 } 708 709 static struct mem_cgroup_tree_per_zone * 710 soft_limit_tree_from_page(struct page *page) 711 { 712 int nid = page_to_nid(page); 713 int zid = page_zonenum(page); 714 715 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 716 } 717 718 static void 719 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, 720 struct mem_cgroup_per_zone *mz, 721 struct mem_cgroup_tree_per_zone *mctz, 722 unsigned long long new_usage_in_excess) 723 { 724 struct rb_node **p = &mctz->rb_root.rb_node; 725 struct rb_node *parent = NULL; 726 struct mem_cgroup_per_zone *mz_node; 727 728 if (mz->on_tree) 729 return; 730 731 mz->usage_in_excess = new_usage_in_excess; 732 if (!mz->usage_in_excess) 733 return; 734 while (*p) { 735 parent = *p; 736 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 737 tree_node); 738 if (mz->usage_in_excess < mz_node->usage_in_excess) 739 p = &(*p)->rb_left; 740 /* 741 * We can't avoid mem cgroups that are over their soft 742 * limit by the same amount 743 */ 744 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 745 p = &(*p)->rb_right; 746 } 747 rb_link_node(&mz->tree_node, parent, p); 748 rb_insert_color(&mz->tree_node, &mctz->rb_root); 749 mz->on_tree = true; 750 } 751 752 static void 753 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 754 struct mem_cgroup_per_zone *mz, 755 struct mem_cgroup_tree_per_zone *mctz) 756 { 757 if (!mz->on_tree) 758 return; 759 rb_erase(&mz->tree_node, &mctz->rb_root); 760 mz->on_tree = false; 761 } 762 763 static void 764 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 765 struct mem_cgroup_per_zone *mz, 766 struct mem_cgroup_tree_per_zone *mctz) 767 { 768 spin_lock(&mctz->lock); 769 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 770 spin_unlock(&mctz->lock); 771 } 772 773 774 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 775 { 776 unsigned long long excess; 777 struct mem_cgroup_per_zone *mz; 778 struct mem_cgroup_tree_per_zone *mctz; 779 int nid = page_to_nid(page); 780 int zid = page_zonenum(page); 781 mctz = soft_limit_tree_from_page(page); 782 783 /* 784 * Necessary to update all ancestors when hierarchy is used. 785 * because their event counter is not touched. 786 */ 787 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 788 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 789 excess = res_counter_soft_limit_excess(&memcg->res); 790 /* 791 * We have to update the tree if mz is on RB-tree or 792 * mem is over its softlimit. 793 */ 794 if (excess || mz->on_tree) { 795 spin_lock(&mctz->lock); 796 /* if on-tree, remove it */ 797 if (mz->on_tree) 798 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 799 /* 800 * Insert again. mz->usage_in_excess will be updated. 801 * If excess is 0, no tree ops. 802 */ 803 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); 804 spin_unlock(&mctz->lock); 805 } 806 } 807 } 808 809 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 810 { 811 int node, zone; 812 struct mem_cgroup_per_zone *mz; 813 struct mem_cgroup_tree_per_zone *mctz; 814 815 for_each_node(node) { 816 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 817 mz = mem_cgroup_zoneinfo(memcg, node, zone); 818 mctz = soft_limit_tree_node_zone(node, zone); 819 mem_cgroup_remove_exceeded(memcg, mz, mctz); 820 } 821 } 822 } 823 824 static struct mem_cgroup_per_zone * 825 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 826 { 827 struct rb_node *rightmost = NULL; 828 struct mem_cgroup_per_zone *mz; 829 830 retry: 831 mz = NULL; 832 rightmost = rb_last(&mctz->rb_root); 833 if (!rightmost) 834 goto done; /* Nothing to reclaim from */ 835 836 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 837 /* 838 * Remove the node now but someone else can add it back, 839 * we will to add it back at the end of reclaim to its correct 840 * position in the tree. 841 */ 842 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 843 if (!res_counter_soft_limit_excess(&mz->memcg->res) || 844 !css_tryget(&mz->memcg->css)) 845 goto retry; 846 done: 847 return mz; 848 } 849 850 static struct mem_cgroup_per_zone * 851 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 852 { 853 struct mem_cgroup_per_zone *mz; 854 855 spin_lock(&mctz->lock); 856 mz = __mem_cgroup_largest_soft_limit_node(mctz); 857 spin_unlock(&mctz->lock); 858 return mz; 859 } 860 861 /* 862 * Implementation Note: reading percpu statistics for memcg. 863 * 864 * Both of vmstat[] and percpu_counter has threshold and do periodic 865 * synchronization to implement "quick" read. There are trade-off between 866 * reading cost and precision of value. Then, we may have a chance to implement 867 * a periodic synchronizion of counter in memcg's counter. 868 * 869 * But this _read() function is used for user interface now. The user accounts 870 * memory usage by memory cgroup and he _always_ requires exact value because 871 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 872 * have to visit all online cpus and make sum. So, for now, unnecessary 873 * synchronization is not implemented. (just implemented for cpu hotplug) 874 * 875 * If there are kernel internal actions which can make use of some not-exact 876 * value, and reading all cpu value can be performance bottleneck in some 877 * common workload, threashold and synchonization as vmstat[] should be 878 * implemented. 879 */ 880 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 881 enum mem_cgroup_stat_index idx) 882 { 883 long val = 0; 884 int cpu; 885 886 get_online_cpus(); 887 for_each_online_cpu(cpu) 888 val += per_cpu(memcg->stat->count[idx], cpu); 889 #ifdef CONFIG_HOTPLUG_CPU 890 spin_lock(&memcg->pcp_counter_lock); 891 val += memcg->nocpu_base.count[idx]; 892 spin_unlock(&memcg->pcp_counter_lock); 893 #endif 894 put_online_cpus(); 895 return val; 896 } 897 898 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 899 bool charge) 900 { 901 int val = (charge) ? 1 : -1; 902 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 903 } 904 905 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 906 enum mem_cgroup_events_index idx) 907 { 908 unsigned long val = 0; 909 int cpu; 910 911 for_each_online_cpu(cpu) 912 val += per_cpu(memcg->stat->events[idx], cpu); 913 #ifdef CONFIG_HOTPLUG_CPU 914 spin_lock(&memcg->pcp_counter_lock); 915 val += memcg->nocpu_base.events[idx]; 916 spin_unlock(&memcg->pcp_counter_lock); 917 #endif 918 return val; 919 } 920 921 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 922 struct page *page, 923 bool anon, int nr_pages) 924 { 925 preempt_disable(); 926 927 /* 928 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 929 * counted as CACHE even if it's on ANON LRU. 930 */ 931 if (anon) 932 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 933 nr_pages); 934 else 935 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 936 nr_pages); 937 938 if (PageTransHuge(page)) 939 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 940 nr_pages); 941 942 /* pagein of a big page is an event. So, ignore page size */ 943 if (nr_pages > 0) 944 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 945 else { 946 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 947 nr_pages = -nr_pages; /* for event */ 948 } 949 950 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 951 952 preempt_enable(); 953 } 954 955 unsigned long 956 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 957 { 958 struct mem_cgroup_per_zone *mz; 959 960 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 961 return mz->lru_size[lru]; 962 } 963 964 static unsigned long 965 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 966 unsigned int lru_mask) 967 { 968 struct mem_cgroup_per_zone *mz; 969 enum lru_list lru; 970 unsigned long ret = 0; 971 972 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 973 974 for_each_lru(lru) { 975 if (BIT(lru) & lru_mask) 976 ret += mz->lru_size[lru]; 977 } 978 return ret; 979 } 980 981 static unsigned long 982 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 983 int nid, unsigned int lru_mask) 984 { 985 u64 total = 0; 986 int zid; 987 988 for (zid = 0; zid < MAX_NR_ZONES; zid++) 989 total += mem_cgroup_zone_nr_lru_pages(memcg, 990 nid, zid, lru_mask); 991 992 return total; 993 } 994 995 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 996 unsigned int lru_mask) 997 { 998 int nid; 999 u64 total = 0; 1000 1001 for_each_node_state(nid, N_MEMORY) 1002 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 1003 return total; 1004 } 1005 1006 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 1007 enum mem_cgroup_events_target target) 1008 { 1009 unsigned long val, next; 1010 1011 val = __this_cpu_read(memcg->stat->nr_page_events); 1012 next = __this_cpu_read(memcg->stat->targets[target]); 1013 /* from time_after() in jiffies.h */ 1014 if ((long)next - (long)val < 0) { 1015 switch (target) { 1016 case MEM_CGROUP_TARGET_THRESH: 1017 next = val + THRESHOLDS_EVENTS_TARGET; 1018 break; 1019 case MEM_CGROUP_TARGET_SOFTLIMIT: 1020 next = val + SOFTLIMIT_EVENTS_TARGET; 1021 break; 1022 case MEM_CGROUP_TARGET_NUMAINFO: 1023 next = val + NUMAINFO_EVENTS_TARGET; 1024 break; 1025 default: 1026 break; 1027 } 1028 __this_cpu_write(memcg->stat->targets[target], next); 1029 return true; 1030 } 1031 return false; 1032 } 1033 1034 /* 1035 * Check events in order. 1036 * 1037 */ 1038 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 1039 { 1040 preempt_disable(); 1041 /* threshold event is triggered in finer grain than soft limit */ 1042 if (unlikely(mem_cgroup_event_ratelimit(memcg, 1043 MEM_CGROUP_TARGET_THRESH))) { 1044 bool do_softlimit; 1045 bool do_numainfo __maybe_unused; 1046 1047 do_softlimit = mem_cgroup_event_ratelimit(memcg, 1048 MEM_CGROUP_TARGET_SOFTLIMIT); 1049 #if MAX_NUMNODES > 1 1050 do_numainfo = mem_cgroup_event_ratelimit(memcg, 1051 MEM_CGROUP_TARGET_NUMAINFO); 1052 #endif 1053 preempt_enable(); 1054 1055 mem_cgroup_threshold(memcg); 1056 if (unlikely(do_softlimit)) 1057 mem_cgroup_update_tree(memcg, page); 1058 #if MAX_NUMNODES > 1 1059 if (unlikely(do_numainfo)) 1060 atomic_inc(&memcg->numainfo_events); 1061 #endif 1062 } else 1063 preempt_enable(); 1064 } 1065 1066 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 1067 { 1068 return mem_cgroup_from_css( 1069 cgroup_subsys_state(cont, mem_cgroup_subsys_id)); 1070 } 1071 1072 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1073 { 1074 /* 1075 * mm_update_next_owner() may clear mm->owner to NULL 1076 * if it races with swapoff, page migration, etc. 1077 * So this can be called with p == NULL. 1078 */ 1079 if (unlikely(!p)) 1080 return NULL; 1081 1082 return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id)); 1083 } 1084 1085 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 1086 { 1087 struct mem_cgroup *memcg = NULL; 1088 1089 if (!mm) 1090 return NULL; 1091 /* 1092 * Because we have no locks, mm->owner's may be being moved to other 1093 * cgroup. We use css_tryget() here even if this looks 1094 * pessimistic (rather than adding locks here). 1095 */ 1096 rcu_read_lock(); 1097 do { 1098 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1099 if (unlikely(!memcg)) 1100 break; 1101 } while (!css_tryget(&memcg->css)); 1102 rcu_read_unlock(); 1103 return memcg; 1104 } 1105 1106 /* 1107 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1108 * ref. count) or NULL if the whole root's subtree has been visited. 1109 * 1110 * helper function to be used by mem_cgroup_iter 1111 */ 1112 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1113 struct mem_cgroup *last_visited) 1114 { 1115 struct cgroup *prev_cgroup, *next_cgroup; 1116 1117 /* 1118 * Root is not visited by cgroup iterators so it needs an 1119 * explicit visit. 1120 */ 1121 if (!last_visited) 1122 return root; 1123 1124 prev_cgroup = (last_visited == root) ? NULL 1125 : last_visited->css.cgroup; 1126 skip_node: 1127 next_cgroup = cgroup_next_descendant_pre( 1128 prev_cgroup, root->css.cgroup); 1129 1130 /* 1131 * Even if we found a group we have to make sure it is 1132 * alive. css && !memcg means that the groups should be 1133 * skipped and we should continue the tree walk. 1134 * last_visited css is safe to use because it is 1135 * protected by css_get and the tree walk is rcu safe. 1136 */ 1137 if (next_cgroup) { 1138 struct mem_cgroup *mem = mem_cgroup_from_cont( 1139 next_cgroup); 1140 if (css_tryget(&mem->css)) 1141 return mem; 1142 else { 1143 prev_cgroup = next_cgroup; 1144 goto skip_node; 1145 } 1146 } 1147 1148 return NULL; 1149 } 1150 1151 /** 1152 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1153 * @root: hierarchy root 1154 * @prev: previously returned memcg, NULL on first invocation 1155 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1156 * 1157 * Returns references to children of the hierarchy below @root, or 1158 * @root itself, or %NULL after a full round-trip. 1159 * 1160 * Caller must pass the return value in @prev on subsequent 1161 * invocations for reference counting, or use mem_cgroup_iter_break() 1162 * to cancel a hierarchy walk before the round-trip is complete. 1163 * 1164 * Reclaimers can specify a zone and a priority level in @reclaim to 1165 * divide up the memcgs in the hierarchy among all concurrent 1166 * reclaimers operating on the same zone and priority. 1167 */ 1168 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1169 struct mem_cgroup *prev, 1170 struct mem_cgroup_reclaim_cookie *reclaim) 1171 { 1172 struct mem_cgroup *memcg = NULL; 1173 struct mem_cgroup *last_visited = NULL; 1174 unsigned long uninitialized_var(dead_count); 1175 1176 if (mem_cgroup_disabled()) 1177 return NULL; 1178 1179 if (!root) 1180 root = root_mem_cgroup; 1181 1182 if (prev && !reclaim) 1183 last_visited = prev; 1184 1185 if (!root->use_hierarchy && root != root_mem_cgroup) { 1186 if (prev) 1187 goto out_css_put; 1188 return root; 1189 } 1190 1191 rcu_read_lock(); 1192 while (!memcg) { 1193 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1194 1195 if (reclaim) { 1196 int nid = zone_to_nid(reclaim->zone); 1197 int zid = zone_idx(reclaim->zone); 1198 struct mem_cgroup_per_zone *mz; 1199 1200 mz = mem_cgroup_zoneinfo(root, nid, zid); 1201 iter = &mz->reclaim_iter[reclaim->priority]; 1202 last_visited = iter->last_visited; 1203 if (prev && reclaim->generation != iter->generation) { 1204 iter->last_visited = NULL; 1205 goto out_unlock; 1206 } 1207 1208 /* 1209 * If the dead_count mismatches, a destruction 1210 * has happened or is happening concurrently. 1211 * If the dead_count matches, a destruction 1212 * might still happen concurrently, but since 1213 * we checked under RCU, that destruction 1214 * won't free the object until we release the 1215 * RCU reader lock. Thus, the dead_count 1216 * check verifies the pointer is still valid, 1217 * css_tryget() verifies the cgroup pointed to 1218 * is alive. 1219 */ 1220 dead_count = atomic_read(&root->dead_count); 1221 smp_rmb(); 1222 last_visited = iter->last_visited; 1223 if (last_visited) { 1224 if ((dead_count != iter->last_dead_count) || 1225 !css_tryget(&last_visited->css)) { 1226 last_visited = NULL; 1227 } 1228 } 1229 } 1230 1231 memcg = __mem_cgroup_iter_next(root, last_visited); 1232 1233 if (reclaim) { 1234 if (last_visited) 1235 css_put(&last_visited->css); 1236 1237 iter->last_visited = memcg; 1238 smp_wmb(); 1239 iter->last_dead_count = dead_count; 1240 1241 if (!memcg) 1242 iter->generation++; 1243 else if (!prev && memcg) 1244 reclaim->generation = iter->generation; 1245 } 1246 1247 if (prev && !memcg) 1248 goto out_unlock; 1249 } 1250 out_unlock: 1251 rcu_read_unlock(); 1252 out_css_put: 1253 if (prev && prev != root) 1254 css_put(&prev->css); 1255 1256 return memcg; 1257 } 1258 1259 /** 1260 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1261 * @root: hierarchy root 1262 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1263 */ 1264 void mem_cgroup_iter_break(struct mem_cgroup *root, 1265 struct mem_cgroup *prev) 1266 { 1267 if (!root) 1268 root = root_mem_cgroup; 1269 if (prev && prev != root) 1270 css_put(&prev->css); 1271 } 1272 1273 /* 1274 * Iteration constructs for visiting all cgroups (under a tree). If 1275 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1276 * be used for reference counting. 1277 */ 1278 #define for_each_mem_cgroup_tree(iter, root) \ 1279 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1280 iter != NULL; \ 1281 iter = mem_cgroup_iter(root, iter, NULL)) 1282 1283 #define for_each_mem_cgroup(iter) \ 1284 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1285 iter != NULL; \ 1286 iter = mem_cgroup_iter(NULL, iter, NULL)) 1287 1288 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 1289 { 1290 struct mem_cgroup *memcg; 1291 1292 rcu_read_lock(); 1293 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1294 if (unlikely(!memcg)) 1295 goto out; 1296 1297 switch (idx) { 1298 case PGFAULT: 1299 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 1300 break; 1301 case PGMAJFAULT: 1302 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 1303 break; 1304 default: 1305 BUG(); 1306 } 1307 out: 1308 rcu_read_unlock(); 1309 } 1310 EXPORT_SYMBOL(__mem_cgroup_count_vm_event); 1311 1312 /** 1313 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1314 * @zone: zone of the wanted lruvec 1315 * @memcg: memcg of the wanted lruvec 1316 * 1317 * Returns the lru list vector holding pages for the given @zone and 1318 * @mem. This can be the global zone lruvec, if the memory controller 1319 * is disabled. 1320 */ 1321 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1322 struct mem_cgroup *memcg) 1323 { 1324 struct mem_cgroup_per_zone *mz; 1325 struct lruvec *lruvec; 1326 1327 if (mem_cgroup_disabled()) { 1328 lruvec = &zone->lruvec; 1329 goto out; 1330 } 1331 1332 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1333 lruvec = &mz->lruvec; 1334 out: 1335 /* 1336 * Since a node can be onlined after the mem_cgroup was created, 1337 * we have to be prepared to initialize lruvec->zone here; 1338 * and if offlined then reonlined, we need to reinitialize it. 1339 */ 1340 if (unlikely(lruvec->zone != zone)) 1341 lruvec->zone = zone; 1342 return lruvec; 1343 } 1344 1345 /* 1346 * Following LRU functions are allowed to be used without PCG_LOCK. 1347 * Operations are called by routine of global LRU independently from memcg. 1348 * What we have to take care of here is validness of pc->mem_cgroup. 1349 * 1350 * Changes to pc->mem_cgroup happens when 1351 * 1. charge 1352 * 2. moving account 1353 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 1354 * It is added to LRU before charge. 1355 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 1356 * When moving account, the page is not on LRU. It's isolated. 1357 */ 1358 1359 /** 1360 * mem_cgroup_page_lruvec - return lruvec for adding an lru page 1361 * @page: the page 1362 * @zone: zone of the page 1363 */ 1364 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1365 { 1366 struct mem_cgroup_per_zone *mz; 1367 struct mem_cgroup *memcg; 1368 struct page_cgroup *pc; 1369 struct lruvec *lruvec; 1370 1371 if (mem_cgroup_disabled()) { 1372 lruvec = &zone->lruvec; 1373 goto out; 1374 } 1375 1376 pc = lookup_page_cgroup(page); 1377 memcg = pc->mem_cgroup; 1378 1379 /* 1380 * Surreptitiously switch any uncharged offlist page to root: 1381 * an uncharged page off lru does nothing to secure 1382 * its former mem_cgroup from sudden removal. 1383 * 1384 * Our caller holds lru_lock, and PageCgroupUsed is updated 1385 * under page_cgroup lock: between them, they make all uses 1386 * of pc->mem_cgroup safe. 1387 */ 1388 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1389 pc->mem_cgroup = memcg = root_mem_cgroup; 1390 1391 mz = page_cgroup_zoneinfo(memcg, page); 1392 lruvec = &mz->lruvec; 1393 out: 1394 /* 1395 * Since a node can be onlined after the mem_cgroup was created, 1396 * we have to be prepared to initialize lruvec->zone here; 1397 * and if offlined then reonlined, we need to reinitialize it. 1398 */ 1399 if (unlikely(lruvec->zone != zone)) 1400 lruvec->zone = zone; 1401 return lruvec; 1402 } 1403 1404 /** 1405 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1406 * @lruvec: mem_cgroup per zone lru vector 1407 * @lru: index of lru list the page is sitting on 1408 * @nr_pages: positive when adding or negative when removing 1409 * 1410 * This function must be called when a page is added to or removed from an 1411 * lru list. 1412 */ 1413 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1414 int nr_pages) 1415 { 1416 struct mem_cgroup_per_zone *mz; 1417 unsigned long *lru_size; 1418 1419 if (mem_cgroup_disabled()) 1420 return; 1421 1422 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1423 lru_size = mz->lru_size + lru; 1424 *lru_size += nr_pages; 1425 VM_BUG_ON((long)(*lru_size) < 0); 1426 } 1427 1428 /* 1429 * Checks whether given mem is same or in the root_mem_cgroup's 1430 * hierarchy subtree 1431 */ 1432 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1433 struct mem_cgroup *memcg) 1434 { 1435 if (root_memcg == memcg) 1436 return true; 1437 if (!root_memcg->use_hierarchy || !memcg) 1438 return false; 1439 return css_is_ancestor(&memcg->css, &root_memcg->css); 1440 } 1441 1442 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1443 struct mem_cgroup *memcg) 1444 { 1445 bool ret; 1446 1447 rcu_read_lock(); 1448 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); 1449 rcu_read_unlock(); 1450 return ret; 1451 } 1452 1453 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) 1454 { 1455 int ret; 1456 struct mem_cgroup *curr = NULL; 1457 struct task_struct *p; 1458 1459 p = find_lock_task_mm(task); 1460 if (p) { 1461 curr = try_get_mem_cgroup_from_mm(p->mm); 1462 task_unlock(p); 1463 } else { 1464 /* 1465 * All threads may have already detached their mm's, but the oom 1466 * killer still needs to detect if they have already been oom 1467 * killed to prevent needlessly killing additional tasks. 1468 */ 1469 task_lock(task); 1470 curr = mem_cgroup_from_task(task); 1471 if (curr) 1472 css_get(&curr->css); 1473 task_unlock(task); 1474 } 1475 if (!curr) 1476 return 0; 1477 /* 1478 * We should check use_hierarchy of "memcg" not "curr". Because checking 1479 * use_hierarchy of "curr" here make this function true if hierarchy is 1480 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* 1481 * hierarchy(even if use_hierarchy is disabled in "memcg"). 1482 */ 1483 ret = mem_cgroup_same_or_subtree(memcg, curr); 1484 css_put(&curr->css); 1485 return ret; 1486 } 1487 1488 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 1489 { 1490 unsigned long inactive_ratio; 1491 unsigned long inactive; 1492 unsigned long active; 1493 unsigned long gb; 1494 1495 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); 1496 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); 1497 1498 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1499 if (gb) 1500 inactive_ratio = int_sqrt(10 * gb); 1501 else 1502 inactive_ratio = 1; 1503 1504 return inactive * inactive_ratio < active; 1505 } 1506 1507 #define mem_cgroup_from_res_counter(counter, member) \ 1508 container_of(counter, struct mem_cgroup, member) 1509 1510 /** 1511 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1512 * @memcg: the memory cgroup 1513 * 1514 * Returns the maximum amount of memory @mem can be charged with, in 1515 * pages. 1516 */ 1517 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1518 { 1519 unsigned long long margin; 1520 1521 margin = res_counter_margin(&memcg->res); 1522 if (do_swap_account) 1523 margin = min(margin, res_counter_margin(&memcg->memsw)); 1524 return margin >> PAGE_SHIFT; 1525 } 1526 1527 int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1528 { 1529 struct cgroup *cgrp = memcg->css.cgroup; 1530 1531 /* root ? */ 1532 if (cgrp->parent == NULL) 1533 return vm_swappiness; 1534 1535 return memcg->swappiness; 1536 } 1537 1538 /* 1539 * memcg->moving_account is used for checking possibility that some thread is 1540 * calling move_account(). When a thread on CPU-A starts moving pages under 1541 * a memcg, other threads should check memcg->moving_account under 1542 * rcu_read_lock(), like this: 1543 * 1544 * CPU-A CPU-B 1545 * rcu_read_lock() 1546 * memcg->moving_account+1 if (memcg->mocing_account) 1547 * take heavy locks. 1548 * synchronize_rcu() update something. 1549 * rcu_read_unlock() 1550 * start move here. 1551 */ 1552 1553 /* for quick checking without looking up memcg */ 1554 atomic_t memcg_moving __read_mostly; 1555 1556 static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1557 { 1558 atomic_inc(&memcg_moving); 1559 atomic_inc(&memcg->moving_account); 1560 synchronize_rcu(); 1561 } 1562 1563 static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1564 { 1565 /* 1566 * Now, mem_cgroup_clear_mc() may call this function with NULL. 1567 * We check NULL in callee rather than caller. 1568 */ 1569 if (memcg) { 1570 atomic_dec(&memcg_moving); 1571 atomic_dec(&memcg->moving_account); 1572 } 1573 } 1574 1575 /* 1576 * 2 routines for checking "mem" is under move_account() or not. 1577 * 1578 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This 1579 * is used for avoiding races in accounting. If true, 1580 * pc->mem_cgroup may be overwritten. 1581 * 1582 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1583 * under hierarchy of moving cgroups. This is for 1584 * waiting at hith-memory prressure caused by "move". 1585 */ 1586 1587 static bool mem_cgroup_stolen(struct mem_cgroup *memcg) 1588 { 1589 VM_BUG_ON(!rcu_read_lock_held()); 1590 return atomic_read(&memcg->moving_account) > 0; 1591 } 1592 1593 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1594 { 1595 struct mem_cgroup *from; 1596 struct mem_cgroup *to; 1597 bool ret = false; 1598 /* 1599 * Unlike task_move routines, we access mc.to, mc.from not under 1600 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1601 */ 1602 spin_lock(&mc.lock); 1603 from = mc.from; 1604 to = mc.to; 1605 if (!from) 1606 goto unlock; 1607 1608 ret = mem_cgroup_same_or_subtree(memcg, from) 1609 || mem_cgroup_same_or_subtree(memcg, to); 1610 unlock: 1611 spin_unlock(&mc.lock); 1612 return ret; 1613 } 1614 1615 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1616 { 1617 if (mc.moving_task && current != mc.moving_task) { 1618 if (mem_cgroup_under_move(memcg)) { 1619 DEFINE_WAIT(wait); 1620 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1621 /* moving charge context might have finished. */ 1622 if (mc.moving_task) 1623 schedule(); 1624 finish_wait(&mc.waitq, &wait); 1625 return true; 1626 } 1627 } 1628 return false; 1629 } 1630 1631 /* 1632 * Take this lock when 1633 * - a code tries to modify page's memcg while it's USED. 1634 * - a code tries to modify page state accounting in a memcg. 1635 * see mem_cgroup_stolen(), too. 1636 */ 1637 static void move_lock_mem_cgroup(struct mem_cgroup *memcg, 1638 unsigned long *flags) 1639 { 1640 spin_lock_irqsave(&memcg->move_lock, *flags); 1641 } 1642 1643 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, 1644 unsigned long *flags) 1645 { 1646 spin_unlock_irqrestore(&memcg->move_lock, *flags); 1647 } 1648 1649 #define K(x) ((x) << (PAGE_SHIFT-10)) 1650 /** 1651 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1652 * @memcg: The memory cgroup that went over limit 1653 * @p: Task that is going to be killed 1654 * 1655 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1656 * enabled 1657 */ 1658 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1659 { 1660 struct cgroup *task_cgrp; 1661 struct cgroup *mem_cgrp; 1662 /* 1663 * Need a buffer in BSS, can't rely on allocations. The code relies 1664 * on the assumption that OOM is serialized for memory controller. 1665 * If this assumption is broken, revisit this code. 1666 */ 1667 static char memcg_name[PATH_MAX]; 1668 int ret; 1669 struct mem_cgroup *iter; 1670 unsigned int i; 1671 1672 if (!p) 1673 return; 1674 1675 rcu_read_lock(); 1676 1677 mem_cgrp = memcg->css.cgroup; 1678 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1679 1680 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1681 if (ret < 0) { 1682 /* 1683 * Unfortunately, we are unable to convert to a useful name 1684 * But we'll still print out the usage information 1685 */ 1686 rcu_read_unlock(); 1687 goto done; 1688 } 1689 rcu_read_unlock(); 1690 1691 pr_info("Task in %s killed", memcg_name); 1692 1693 rcu_read_lock(); 1694 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1695 if (ret < 0) { 1696 rcu_read_unlock(); 1697 goto done; 1698 } 1699 rcu_read_unlock(); 1700 1701 /* 1702 * Continues from above, so we don't need an KERN_ level 1703 */ 1704 pr_cont(" as a result of limit of %s\n", memcg_name); 1705 done: 1706 1707 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", 1708 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1709 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1710 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1711 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n", 1712 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1713 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1714 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1715 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n", 1716 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10, 1717 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10, 1718 res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); 1719 1720 for_each_mem_cgroup_tree(iter, memcg) { 1721 pr_info("Memory cgroup stats"); 1722 1723 rcu_read_lock(); 1724 ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX); 1725 if (!ret) 1726 pr_cont(" for %s", memcg_name); 1727 rcu_read_unlock(); 1728 pr_cont(":"); 1729 1730 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1731 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1732 continue; 1733 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1734 K(mem_cgroup_read_stat(iter, i))); 1735 } 1736 1737 for (i = 0; i < NR_LRU_LISTS; i++) 1738 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1739 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1740 1741 pr_cont("\n"); 1742 } 1743 } 1744 1745 /* 1746 * This function returns the number of memcg under hierarchy tree. Returns 1747 * 1(self count) if no children. 1748 */ 1749 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1750 { 1751 int num = 0; 1752 struct mem_cgroup *iter; 1753 1754 for_each_mem_cgroup_tree(iter, memcg) 1755 num++; 1756 return num; 1757 } 1758 1759 /* 1760 * Return the memory (and swap, if configured) limit for a memcg. 1761 */ 1762 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1763 { 1764 u64 limit; 1765 1766 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1767 1768 /* 1769 * Do not consider swap space if we cannot swap due to swappiness 1770 */ 1771 if (mem_cgroup_swappiness(memcg)) { 1772 u64 memsw; 1773 1774 limit += total_swap_pages << PAGE_SHIFT; 1775 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1776 1777 /* 1778 * If memsw is finite and limits the amount of swap space 1779 * available to this memcg, return that limit. 1780 */ 1781 limit = min(limit, memsw); 1782 } 1783 1784 return limit; 1785 } 1786 1787 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1788 int order) 1789 { 1790 struct mem_cgroup *iter; 1791 unsigned long chosen_points = 0; 1792 unsigned long totalpages; 1793 unsigned int points = 0; 1794 struct task_struct *chosen = NULL; 1795 1796 /* 1797 * If current has a pending SIGKILL or is exiting, then automatically 1798 * select it. The goal is to allow it to allocate so that it may 1799 * quickly exit and free its memory. 1800 */ 1801 if (fatal_signal_pending(current) || current->flags & PF_EXITING) { 1802 set_thread_flag(TIF_MEMDIE); 1803 return; 1804 } 1805 1806 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 1807 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 1808 for_each_mem_cgroup_tree(iter, memcg) { 1809 struct cgroup *cgroup = iter->css.cgroup; 1810 struct cgroup_iter it; 1811 struct task_struct *task; 1812 1813 cgroup_iter_start(cgroup, &it); 1814 while ((task = cgroup_iter_next(cgroup, &it))) { 1815 switch (oom_scan_process_thread(task, totalpages, NULL, 1816 false)) { 1817 case OOM_SCAN_SELECT: 1818 if (chosen) 1819 put_task_struct(chosen); 1820 chosen = task; 1821 chosen_points = ULONG_MAX; 1822 get_task_struct(chosen); 1823 /* fall through */ 1824 case OOM_SCAN_CONTINUE: 1825 continue; 1826 case OOM_SCAN_ABORT: 1827 cgroup_iter_end(cgroup, &it); 1828 mem_cgroup_iter_break(memcg, iter); 1829 if (chosen) 1830 put_task_struct(chosen); 1831 return; 1832 case OOM_SCAN_OK: 1833 break; 1834 }; 1835 points = oom_badness(task, memcg, NULL, totalpages); 1836 if (points > chosen_points) { 1837 if (chosen) 1838 put_task_struct(chosen); 1839 chosen = task; 1840 chosen_points = points; 1841 get_task_struct(chosen); 1842 } 1843 } 1844 cgroup_iter_end(cgroup, &it); 1845 } 1846 1847 if (!chosen) 1848 return; 1849 points = chosen_points * 1000 / totalpages; 1850 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, 1851 NULL, "Memory cgroup out of memory"); 1852 } 1853 1854 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1855 gfp_t gfp_mask, 1856 unsigned long flags) 1857 { 1858 unsigned long total = 0; 1859 bool noswap = false; 1860 int loop; 1861 1862 if (flags & MEM_CGROUP_RECLAIM_NOSWAP) 1863 noswap = true; 1864 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) 1865 noswap = true; 1866 1867 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { 1868 if (loop) 1869 drain_all_stock_async(memcg); 1870 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); 1871 /* 1872 * Allow limit shrinkers, which are triggered directly 1873 * by userspace, to catch signals and stop reclaim 1874 * after minimal progress, regardless of the margin. 1875 */ 1876 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) 1877 break; 1878 if (mem_cgroup_margin(memcg)) 1879 break; 1880 /* 1881 * If nothing was reclaimed after two attempts, there 1882 * may be no reclaimable pages in this hierarchy. 1883 */ 1884 if (loop && !total) 1885 break; 1886 } 1887 return total; 1888 } 1889 1890 /** 1891 * test_mem_cgroup_node_reclaimable 1892 * @memcg: the target memcg 1893 * @nid: the node ID to be checked. 1894 * @noswap : specify true here if the user wants flle only information. 1895 * 1896 * This function returns whether the specified memcg contains any 1897 * reclaimable pages on a node. Returns true if there are any reclaimable 1898 * pages in the node. 1899 */ 1900 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1901 int nid, bool noswap) 1902 { 1903 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1904 return true; 1905 if (noswap || !total_swap_pages) 1906 return false; 1907 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1908 return true; 1909 return false; 1910 1911 } 1912 #if MAX_NUMNODES > 1 1913 1914 /* 1915 * Always updating the nodemask is not very good - even if we have an empty 1916 * list or the wrong list here, we can start from some node and traverse all 1917 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1918 * 1919 */ 1920 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1921 { 1922 int nid; 1923 /* 1924 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1925 * pagein/pageout changes since the last update. 1926 */ 1927 if (!atomic_read(&memcg->numainfo_events)) 1928 return; 1929 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1930 return; 1931 1932 /* make a nodemask where this memcg uses memory from */ 1933 memcg->scan_nodes = node_states[N_MEMORY]; 1934 1935 for_each_node_mask(nid, node_states[N_MEMORY]) { 1936 1937 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1938 node_clear(nid, memcg->scan_nodes); 1939 } 1940 1941 atomic_set(&memcg->numainfo_events, 0); 1942 atomic_set(&memcg->numainfo_updating, 0); 1943 } 1944 1945 /* 1946 * Selecting a node where we start reclaim from. Because what we need is just 1947 * reducing usage counter, start from anywhere is O,K. Considering 1948 * memory reclaim from current node, there are pros. and cons. 1949 * 1950 * Freeing memory from current node means freeing memory from a node which 1951 * we'll use or we've used. So, it may make LRU bad. And if several threads 1952 * hit limits, it will see a contention on a node. But freeing from remote 1953 * node means more costs for memory reclaim because of memory latency. 1954 * 1955 * Now, we use round-robin. Better algorithm is welcomed. 1956 */ 1957 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1958 { 1959 int node; 1960 1961 mem_cgroup_may_update_nodemask(memcg); 1962 node = memcg->last_scanned_node; 1963 1964 node = next_node(node, memcg->scan_nodes); 1965 if (node == MAX_NUMNODES) 1966 node = first_node(memcg->scan_nodes); 1967 /* 1968 * We call this when we hit limit, not when pages are added to LRU. 1969 * No LRU may hold pages because all pages are UNEVICTABLE or 1970 * memcg is too small and all pages are not on LRU. In that case, 1971 * we use curret node. 1972 */ 1973 if (unlikely(node == MAX_NUMNODES)) 1974 node = numa_node_id(); 1975 1976 memcg->last_scanned_node = node; 1977 return node; 1978 } 1979 1980 /* 1981 * Check all nodes whether it contains reclaimable pages or not. 1982 * For quick scan, we make use of scan_nodes. This will allow us to skip 1983 * unused nodes. But scan_nodes is lazily updated and may not cotain 1984 * enough new information. We need to do double check. 1985 */ 1986 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1987 { 1988 int nid; 1989 1990 /* 1991 * quick check...making use of scan_node. 1992 * We can skip unused nodes. 1993 */ 1994 if (!nodes_empty(memcg->scan_nodes)) { 1995 for (nid = first_node(memcg->scan_nodes); 1996 nid < MAX_NUMNODES; 1997 nid = next_node(nid, memcg->scan_nodes)) { 1998 1999 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 2000 return true; 2001 } 2002 } 2003 /* 2004 * Check rest of nodes. 2005 */ 2006 for_each_node_state(nid, N_MEMORY) { 2007 if (node_isset(nid, memcg->scan_nodes)) 2008 continue; 2009 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 2010 return true; 2011 } 2012 return false; 2013 } 2014 2015 #else 2016 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 2017 { 2018 return 0; 2019 } 2020 2021 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 2022 { 2023 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 2024 } 2025 #endif 2026 2027 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 2028 struct zone *zone, 2029 gfp_t gfp_mask, 2030 unsigned long *total_scanned) 2031 { 2032 struct mem_cgroup *victim = NULL; 2033 int total = 0; 2034 int loop = 0; 2035 unsigned long excess; 2036 unsigned long nr_scanned; 2037 struct mem_cgroup_reclaim_cookie reclaim = { 2038 .zone = zone, 2039 .priority = 0, 2040 }; 2041 2042 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; 2043 2044 while (1) { 2045 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 2046 if (!victim) { 2047 loop++; 2048 if (loop >= 2) { 2049 /* 2050 * If we have not been able to reclaim 2051 * anything, it might because there are 2052 * no reclaimable pages under this hierarchy 2053 */ 2054 if (!total) 2055 break; 2056 /* 2057 * We want to do more targeted reclaim. 2058 * excess >> 2 is not to excessive so as to 2059 * reclaim too much, nor too less that we keep 2060 * coming back to reclaim from this cgroup 2061 */ 2062 if (total >= (excess >> 2) || 2063 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 2064 break; 2065 } 2066 continue; 2067 } 2068 if (!mem_cgroup_reclaimable(victim, false)) 2069 continue; 2070 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 2071 zone, &nr_scanned); 2072 *total_scanned += nr_scanned; 2073 if (!res_counter_soft_limit_excess(&root_memcg->res)) 2074 break; 2075 } 2076 mem_cgroup_iter_break(root_memcg, victim); 2077 return total; 2078 } 2079 2080 /* 2081 * Check OOM-Killer is already running under our hierarchy. 2082 * If someone is running, return false. 2083 * Has to be called with memcg_oom_lock 2084 */ 2085 static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg) 2086 { 2087 struct mem_cgroup *iter, *failed = NULL; 2088 2089 for_each_mem_cgroup_tree(iter, memcg) { 2090 if (iter->oom_lock) { 2091 /* 2092 * this subtree of our hierarchy is already locked 2093 * so we cannot give a lock. 2094 */ 2095 failed = iter; 2096 mem_cgroup_iter_break(memcg, iter); 2097 break; 2098 } else 2099 iter->oom_lock = true; 2100 } 2101 2102 if (!failed) 2103 return true; 2104 2105 /* 2106 * OK, we failed to lock the whole subtree so we have to clean up 2107 * what we set up to the failing subtree 2108 */ 2109 for_each_mem_cgroup_tree(iter, memcg) { 2110 if (iter == failed) { 2111 mem_cgroup_iter_break(memcg, iter); 2112 break; 2113 } 2114 iter->oom_lock = false; 2115 } 2116 return false; 2117 } 2118 2119 /* 2120 * Has to be called with memcg_oom_lock 2121 */ 2122 static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 2123 { 2124 struct mem_cgroup *iter; 2125 2126 for_each_mem_cgroup_tree(iter, memcg) 2127 iter->oom_lock = false; 2128 return 0; 2129 } 2130 2131 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 2132 { 2133 struct mem_cgroup *iter; 2134 2135 for_each_mem_cgroup_tree(iter, memcg) 2136 atomic_inc(&iter->under_oom); 2137 } 2138 2139 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 2140 { 2141 struct mem_cgroup *iter; 2142 2143 /* 2144 * When a new child is created while the hierarchy is under oom, 2145 * mem_cgroup_oom_lock() may not be called. We have to use 2146 * atomic_add_unless() here. 2147 */ 2148 for_each_mem_cgroup_tree(iter, memcg) 2149 atomic_add_unless(&iter->under_oom, -1, 0); 2150 } 2151 2152 static DEFINE_SPINLOCK(memcg_oom_lock); 2153 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 2154 2155 struct oom_wait_info { 2156 struct mem_cgroup *memcg; 2157 wait_queue_t wait; 2158 }; 2159 2160 static int memcg_oom_wake_function(wait_queue_t *wait, 2161 unsigned mode, int sync, void *arg) 2162 { 2163 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 2164 struct mem_cgroup *oom_wait_memcg; 2165 struct oom_wait_info *oom_wait_info; 2166 2167 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 2168 oom_wait_memcg = oom_wait_info->memcg; 2169 2170 /* 2171 * Both of oom_wait_info->memcg and wake_memcg are stable under us. 2172 * Then we can use css_is_ancestor without taking care of RCU. 2173 */ 2174 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 2175 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) 2176 return 0; 2177 return autoremove_wake_function(wait, mode, sync, arg); 2178 } 2179 2180 static void memcg_wakeup_oom(struct mem_cgroup *memcg) 2181 { 2182 /* for filtering, pass "memcg" as argument. */ 2183 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 2184 } 2185 2186 static void memcg_oom_recover(struct mem_cgroup *memcg) 2187 { 2188 if (memcg && atomic_read(&memcg->under_oom)) 2189 memcg_wakeup_oom(memcg); 2190 } 2191 2192 /* 2193 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 2194 */ 2195 static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, 2196 int order) 2197 { 2198 struct oom_wait_info owait; 2199 bool locked, need_to_kill; 2200 2201 owait.memcg = memcg; 2202 owait.wait.flags = 0; 2203 owait.wait.func = memcg_oom_wake_function; 2204 owait.wait.private = current; 2205 INIT_LIST_HEAD(&owait.wait.task_list); 2206 need_to_kill = true; 2207 mem_cgroup_mark_under_oom(memcg); 2208 2209 /* At first, try to OOM lock hierarchy under memcg.*/ 2210 spin_lock(&memcg_oom_lock); 2211 locked = mem_cgroup_oom_lock(memcg); 2212 /* 2213 * Even if signal_pending(), we can't quit charge() loop without 2214 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 2215 * under OOM is always welcomed, use TASK_KILLABLE here. 2216 */ 2217 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2218 if (!locked || memcg->oom_kill_disable) 2219 need_to_kill = false; 2220 if (locked) 2221 mem_cgroup_oom_notify(memcg); 2222 spin_unlock(&memcg_oom_lock); 2223 2224 if (need_to_kill) { 2225 finish_wait(&memcg_oom_waitq, &owait.wait); 2226 mem_cgroup_out_of_memory(memcg, mask, order); 2227 } else { 2228 schedule(); 2229 finish_wait(&memcg_oom_waitq, &owait.wait); 2230 } 2231 spin_lock(&memcg_oom_lock); 2232 if (locked) 2233 mem_cgroup_oom_unlock(memcg); 2234 memcg_wakeup_oom(memcg); 2235 spin_unlock(&memcg_oom_lock); 2236 2237 mem_cgroup_unmark_under_oom(memcg); 2238 2239 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 2240 return false; 2241 /* Give chance to dying process */ 2242 schedule_timeout_uninterruptible(1); 2243 return true; 2244 } 2245 2246 /* 2247 * Currently used to update mapped file statistics, but the routine can be 2248 * generalized to update other statistics as well. 2249 * 2250 * Notes: Race condition 2251 * 2252 * We usually use page_cgroup_lock() for accessing page_cgroup member but 2253 * it tends to be costly. But considering some conditions, we doesn't need 2254 * to do so _always_. 2255 * 2256 * Considering "charge", lock_page_cgroup() is not required because all 2257 * file-stat operations happen after a page is attached to radix-tree. There 2258 * are no race with "charge". 2259 * 2260 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 2261 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 2262 * if there are race with "uncharge". Statistics itself is properly handled 2263 * by flags. 2264 * 2265 * Considering "move", this is an only case we see a race. To make the race 2266 * small, we check mm->moving_account and detect there are possibility of race 2267 * If there is, we take a lock. 2268 */ 2269 2270 void __mem_cgroup_begin_update_page_stat(struct page *page, 2271 bool *locked, unsigned long *flags) 2272 { 2273 struct mem_cgroup *memcg; 2274 struct page_cgroup *pc; 2275 2276 pc = lookup_page_cgroup(page); 2277 again: 2278 memcg = pc->mem_cgroup; 2279 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2280 return; 2281 /* 2282 * If this memory cgroup is not under account moving, we don't 2283 * need to take move_lock_mem_cgroup(). Because we already hold 2284 * rcu_read_lock(), any calls to move_account will be delayed until 2285 * rcu_read_unlock() if mem_cgroup_stolen() == true. 2286 */ 2287 if (!mem_cgroup_stolen(memcg)) 2288 return; 2289 2290 move_lock_mem_cgroup(memcg, flags); 2291 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { 2292 move_unlock_mem_cgroup(memcg, flags); 2293 goto again; 2294 } 2295 *locked = true; 2296 } 2297 2298 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) 2299 { 2300 struct page_cgroup *pc = lookup_page_cgroup(page); 2301 2302 /* 2303 * It's guaranteed that pc->mem_cgroup never changes while 2304 * lock is held because a routine modifies pc->mem_cgroup 2305 * should take move_lock_mem_cgroup(). 2306 */ 2307 move_unlock_mem_cgroup(pc->mem_cgroup, flags); 2308 } 2309 2310 void mem_cgroup_update_page_stat(struct page *page, 2311 enum mem_cgroup_page_stat_item idx, int val) 2312 { 2313 struct mem_cgroup *memcg; 2314 struct page_cgroup *pc = lookup_page_cgroup(page); 2315 unsigned long uninitialized_var(flags); 2316 2317 if (mem_cgroup_disabled()) 2318 return; 2319 2320 memcg = pc->mem_cgroup; 2321 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2322 return; 2323 2324 switch (idx) { 2325 case MEMCG_NR_FILE_MAPPED: 2326 idx = MEM_CGROUP_STAT_FILE_MAPPED; 2327 break; 2328 default: 2329 BUG(); 2330 } 2331 2332 this_cpu_add(memcg->stat->count[idx], val); 2333 } 2334 2335 /* 2336 * size of first charge trial. "32" comes from vmscan.c's magic value. 2337 * TODO: maybe necessary to use big numbers in big irons. 2338 */ 2339 #define CHARGE_BATCH 32U 2340 struct memcg_stock_pcp { 2341 struct mem_cgroup *cached; /* this never be root cgroup */ 2342 unsigned int nr_pages; 2343 struct work_struct work; 2344 unsigned long flags; 2345 #define FLUSHING_CACHED_CHARGE 0 2346 }; 2347 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2348 static DEFINE_MUTEX(percpu_charge_mutex); 2349 2350 /** 2351 * consume_stock: Try to consume stocked charge on this cpu. 2352 * @memcg: memcg to consume from. 2353 * @nr_pages: how many pages to charge. 2354 * 2355 * The charges will only happen if @memcg matches the current cpu's memcg 2356 * stock, and at least @nr_pages are available in that stock. Failure to 2357 * service an allocation will refill the stock. 2358 * 2359 * returns true if successful, false otherwise. 2360 */ 2361 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2362 { 2363 struct memcg_stock_pcp *stock; 2364 bool ret = true; 2365 2366 if (nr_pages > CHARGE_BATCH) 2367 return false; 2368 2369 stock = &get_cpu_var(memcg_stock); 2370 if (memcg == stock->cached && stock->nr_pages >= nr_pages) 2371 stock->nr_pages -= nr_pages; 2372 else /* need to call res_counter_charge */ 2373 ret = false; 2374 put_cpu_var(memcg_stock); 2375 return ret; 2376 } 2377 2378 /* 2379 * Returns stocks cached in percpu to res_counter and reset cached information. 2380 */ 2381 static void drain_stock(struct memcg_stock_pcp *stock) 2382 { 2383 struct mem_cgroup *old = stock->cached; 2384 2385 if (stock->nr_pages) { 2386 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2387 2388 res_counter_uncharge(&old->res, bytes); 2389 if (do_swap_account) 2390 res_counter_uncharge(&old->memsw, bytes); 2391 stock->nr_pages = 0; 2392 } 2393 stock->cached = NULL; 2394 } 2395 2396 /* 2397 * This must be called under preempt disabled or must be called by 2398 * a thread which is pinned to local cpu. 2399 */ 2400 static void drain_local_stock(struct work_struct *dummy) 2401 { 2402 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2403 drain_stock(stock); 2404 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2405 } 2406 2407 static void __init memcg_stock_init(void) 2408 { 2409 int cpu; 2410 2411 for_each_possible_cpu(cpu) { 2412 struct memcg_stock_pcp *stock = 2413 &per_cpu(memcg_stock, cpu); 2414 INIT_WORK(&stock->work, drain_local_stock); 2415 } 2416 } 2417 2418 /* 2419 * Cache charges(val) which is from res_counter, to local per_cpu area. 2420 * This will be consumed by consume_stock() function, later. 2421 */ 2422 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2423 { 2424 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2425 2426 if (stock->cached != memcg) { /* reset if necessary */ 2427 drain_stock(stock); 2428 stock->cached = memcg; 2429 } 2430 stock->nr_pages += nr_pages; 2431 put_cpu_var(memcg_stock); 2432 } 2433 2434 /* 2435 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2436 * of the hierarchy under it. sync flag says whether we should block 2437 * until the work is done. 2438 */ 2439 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) 2440 { 2441 int cpu, curcpu; 2442 2443 /* Notify other cpus that system-wide "drain" is running */ 2444 get_online_cpus(); 2445 curcpu = get_cpu(); 2446 for_each_online_cpu(cpu) { 2447 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2448 struct mem_cgroup *memcg; 2449 2450 memcg = stock->cached; 2451 if (!memcg || !stock->nr_pages) 2452 continue; 2453 if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) 2454 continue; 2455 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2456 if (cpu == curcpu) 2457 drain_local_stock(&stock->work); 2458 else 2459 schedule_work_on(cpu, &stock->work); 2460 } 2461 } 2462 put_cpu(); 2463 2464 if (!sync) 2465 goto out; 2466 2467 for_each_online_cpu(cpu) { 2468 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2469 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2470 flush_work(&stock->work); 2471 } 2472 out: 2473 put_online_cpus(); 2474 } 2475 2476 /* 2477 * Tries to drain stocked charges in other cpus. This function is asynchronous 2478 * and just put a work per cpu for draining localy on each cpu. Caller can 2479 * expects some charges will be back to res_counter later but cannot wait for 2480 * it. 2481 */ 2482 static void drain_all_stock_async(struct mem_cgroup *root_memcg) 2483 { 2484 /* 2485 * If someone calls draining, avoid adding more kworker runs. 2486 */ 2487 if (!mutex_trylock(&percpu_charge_mutex)) 2488 return; 2489 drain_all_stock(root_memcg, false); 2490 mutex_unlock(&percpu_charge_mutex); 2491 } 2492 2493 /* This is a synchronous drain interface. */ 2494 static void drain_all_stock_sync(struct mem_cgroup *root_memcg) 2495 { 2496 /* called when force_empty is called */ 2497 mutex_lock(&percpu_charge_mutex); 2498 drain_all_stock(root_memcg, true); 2499 mutex_unlock(&percpu_charge_mutex); 2500 } 2501 2502 /* 2503 * This function drains percpu counter value from DEAD cpu and 2504 * move it to local cpu. Note that this function can be preempted. 2505 */ 2506 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2507 { 2508 int i; 2509 2510 spin_lock(&memcg->pcp_counter_lock); 2511 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 2512 long x = per_cpu(memcg->stat->count[i], cpu); 2513 2514 per_cpu(memcg->stat->count[i], cpu) = 0; 2515 memcg->nocpu_base.count[i] += x; 2516 } 2517 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2518 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2519 2520 per_cpu(memcg->stat->events[i], cpu) = 0; 2521 memcg->nocpu_base.events[i] += x; 2522 } 2523 spin_unlock(&memcg->pcp_counter_lock); 2524 } 2525 2526 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2527 unsigned long action, 2528 void *hcpu) 2529 { 2530 int cpu = (unsigned long)hcpu; 2531 struct memcg_stock_pcp *stock; 2532 struct mem_cgroup *iter; 2533 2534 if (action == CPU_ONLINE) 2535 return NOTIFY_OK; 2536 2537 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 2538 return NOTIFY_OK; 2539 2540 for_each_mem_cgroup(iter) 2541 mem_cgroup_drain_pcp_counter(iter, cpu); 2542 2543 stock = &per_cpu(memcg_stock, cpu); 2544 drain_stock(stock); 2545 return NOTIFY_OK; 2546 } 2547 2548 2549 /* See __mem_cgroup_try_charge() for details */ 2550 enum { 2551 CHARGE_OK, /* success */ 2552 CHARGE_RETRY, /* need to retry but retry is not bad */ 2553 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2554 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2555 CHARGE_OOM_DIE, /* the current is killed because of OOM */ 2556 }; 2557 2558 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2559 unsigned int nr_pages, unsigned int min_pages, 2560 bool oom_check) 2561 { 2562 unsigned long csize = nr_pages * PAGE_SIZE; 2563 struct mem_cgroup *mem_over_limit; 2564 struct res_counter *fail_res; 2565 unsigned long flags = 0; 2566 int ret; 2567 2568 ret = res_counter_charge(&memcg->res, csize, &fail_res); 2569 2570 if (likely(!ret)) { 2571 if (!do_swap_account) 2572 return CHARGE_OK; 2573 ret = res_counter_charge(&memcg->memsw, csize, &fail_res); 2574 if (likely(!ret)) 2575 return CHARGE_OK; 2576 2577 res_counter_uncharge(&memcg->res, csize); 2578 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2579 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2580 } else 2581 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2582 /* 2583 * Never reclaim on behalf of optional batching, retry with a 2584 * single page instead. 2585 */ 2586 if (nr_pages > min_pages) 2587 return CHARGE_RETRY; 2588 2589 if (!(gfp_mask & __GFP_WAIT)) 2590 return CHARGE_WOULDBLOCK; 2591 2592 if (gfp_mask & __GFP_NORETRY) 2593 return CHARGE_NOMEM; 2594 2595 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2596 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2597 return CHARGE_RETRY; 2598 /* 2599 * Even though the limit is exceeded at this point, reclaim 2600 * may have been able to free some pages. Retry the charge 2601 * before killing the task. 2602 * 2603 * Only for regular pages, though: huge pages are rather 2604 * unlikely to succeed so close to the limit, and we fall back 2605 * to regular pages anyway in case of failure. 2606 */ 2607 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret) 2608 return CHARGE_RETRY; 2609 2610 /* 2611 * At task move, charge accounts can be doubly counted. So, it's 2612 * better to wait until the end of task_move if something is going on. 2613 */ 2614 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2615 return CHARGE_RETRY; 2616 2617 /* If we don't need to call oom-killer at el, return immediately */ 2618 if (!oom_check) 2619 return CHARGE_NOMEM; 2620 /* check OOM */ 2621 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize))) 2622 return CHARGE_OOM_DIE; 2623 2624 return CHARGE_RETRY; 2625 } 2626 2627 /* 2628 * __mem_cgroup_try_charge() does 2629 * 1. detect memcg to be charged against from passed *mm and *ptr, 2630 * 2. update res_counter 2631 * 3. call memory reclaim if necessary. 2632 * 2633 * In some special case, if the task is fatal, fatal_signal_pending() or 2634 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup 2635 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon 2636 * as possible without any hazards. 2: all pages should have a valid 2637 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg 2638 * pointer, that is treated as a charge to root_mem_cgroup. 2639 * 2640 * So __mem_cgroup_try_charge() will return 2641 * 0 ... on success, filling *ptr with a valid memcg pointer. 2642 * -ENOMEM ... charge failure because of resource limits. 2643 * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup. 2644 * 2645 * Unlike the exported interface, an "oom" parameter is added. if oom==true, 2646 * the oom-killer can be invoked. 2647 */ 2648 static int __mem_cgroup_try_charge(struct mm_struct *mm, 2649 gfp_t gfp_mask, 2650 unsigned int nr_pages, 2651 struct mem_cgroup **ptr, 2652 bool oom) 2653 { 2654 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2655 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2656 struct mem_cgroup *memcg = NULL; 2657 int ret; 2658 2659 /* 2660 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2661 * in system level. So, allow to go ahead dying process in addition to 2662 * MEMDIE process. 2663 */ 2664 if (unlikely(test_thread_flag(TIF_MEMDIE) 2665 || fatal_signal_pending(current))) 2666 goto bypass; 2667 2668 /* 2669 * We always charge the cgroup the mm_struct belongs to. 2670 * The mm_struct's mem_cgroup changes on task migration if the 2671 * thread group leader migrates. It's possible that mm is not 2672 * set, if so charge the root memcg (happens for pagecache usage). 2673 */ 2674 if (!*ptr && !mm) 2675 *ptr = root_mem_cgroup; 2676 again: 2677 if (*ptr) { /* css should be a valid one */ 2678 memcg = *ptr; 2679 if (mem_cgroup_is_root(memcg)) 2680 goto done; 2681 if (consume_stock(memcg, nr_pages)) 2682 goto done; 2683 css_get(&memcg->css); 2684 } else { 2685 struct task_struct *p; 2686 2687 rcu_read_lock(); 2688 p = rcu_dereference(mm->owner); 2689 /* 2690 * Because we don't have task_lock(), "p" can exit. 2691 * In that case, "memcg" can point to root or p can be NULL with 2692 * race with swapoff. Then, we have small risk of mis-accouning. 2693 * But such kind of mis-account by race always happens because 2694 * we don't have cgroup_mutex(). It's overkill and we allo that 2695 * small race, here. 2696 * (*) swapoff at el will charge against mm-struct not against 2697 * task-struct. So, mm->owner can be NULL. 2698 */ 2699 memcg = mem_cgroup_from_task(p); 2700 if (!memcg) 2701 memcg = root_mem_cgroup; 2702 if (mem_cgroup_is_root(memcg)) { 2703 rcu_read_unlock(); 2704 goto done; 2705 } 2706 if (consume_stock(memcg, nr_pages)) { 2707 /* 2708 * It seems dagerous to access memcg without css_get(). 2709 * But considering how consume_stok works, it's not 2710 * necessary. If consume_stock success, some charges 2711 * from this memcg are cached on this cpu. So, we 2712 * don't need to call css_get()/css_tryget() before 2713 * calling consume_stock(). 2714 */ 2715 rcu_read_unlock(); 2716 goto done; 2717 } 2718 /* after here, we may be blocked. we need to get refcnt */ 2719 if (!css_tryget(&memcg->css)) { 2720 rcu_read_unlock(); 2721 goto again; 2722 } 2723 rcu_read_unlock(); 2724 } 2725 2726 do { 2727 bool oom_check; 2728 2729 /* If killed, bypass charge */ 2730 if (fatal_signal_pending(current)) { 2731 css_put(&memcg->css); 2732 goto bypass; 2733 } 2734 2735 oom_check = false; 2736 if (oom && !nr_oom_retries) { 2737 oom_check = true; 2738 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2739 } 2740 2741 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages, 2742 oom_check); 2743 switch (ret) { 2744 case CHARGE_OK: 2745 break; 2746 case CHARGE_RETRY: /* not in OOM situation but retry */ 2747 batch = nr_pages; 2748 css_put(&memcg->css); 2749 memcg = NULL; 2750 goto again; 2751 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2752 css_put(&memcg->css); 2753 goto nomem; 2754 case CHARGE_NOMEM: /* OOM routine works */ 2755 if (!oom) { 2756 css_put(&memcg->css); 2757 goto nomem; 2758 } 2759 /* If oom, we never return -ENOMEM */ 2760 nr_oom_retries--; 2761 break; 2762 case CHARGE_OOM_DIE: /* Killed by OOM Killer */ 2763 css_put(&memcg->css); 2764 goto bypass; 2765 } 2766 } while (ret != CHARGE_OK); 2767 2768 if (batch > nr_pages) 2769 refill_stock(memcg, batch - nr_pages); 2770 css_put(&memcg->css); 2771 done: 2772 *ptr = memcg; 2773 return 0; 2774 nomem: 2775 *ptr = NULL; 2776 return -ENOMEM; 2777 bypass: 2778 *ptr = root_mem_cgroup; 2779 return -EINTR; 2780 } 2781 2782 /* 2783 * Somemtimes we have to undo a charge we got by try_charge(). 2784 * This function is for that and do uncharge, put css's refcnt. 2785 * gotten by try_charge(). 2786 */ 2787 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 2788 unsigned int nr_pages) 2789 { 2790 if (!mem_cgroup_is_root(memcg)) { 2791 unsigned long bytes = nr_pages * PAGE_SIZE; 2792 2793 res_counter_uncharge(&memcg->res, bytes); 2794 if (do_swap_account) 2795 res_counter_uncharge(&memcg->memsw, bytes); 2796 } 2797 } 2798 2799 /* 2800 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup. 2801 * This is useful when moving usage to parent cgroup. 2802 */ 2803 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, 2804 unsigned int nr_pages) 2805 { 2806 unsigned long bytes = nr_pages * PAGE_SIZE; 2807 2808 if (mem_cgroup_is_root(memcg)) 2809 return; 2810 2811 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); 2812 if (do_swap_account) 2813 res_counter_uncharge_until(&memcg->memsw, 2814 memcg->memsw.parent, bytes); 2815 } 2816 2817 /* 2818 * A helper function to get mem_cgroup from ID. must be called under 2819 * rcu_read_lock(). The caller is responsible for calling css_tryget if 2820 * the mem_cgroup is used for charging. (dropping refcnt from swap can be 2821 * called against removed memcg.) 2822 */ 2823 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2824 { 2825 struct cgroup_subsys_state *css; 2826 2827 /* ID 0 is unused ID */ 2828 if (!id) 2829 return NULL; 2830 css = css_lookup(&mem_cgroup_subsys, id); 2831 if (!css) 2832 return NULL; 2833 return mem_cgroup_from_css(css); 2834 } 2835 2836 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2837 { 2838 struct mem_cgroup *memcg = NULL; 2839 struct page_cgroup *pc; 2840 unsigned short id; 2841 swp_entry_t ent; 2842 2843 VM_BUG_ON(!PageLocked(page)); 2844 2845 pc = lookup_page_cgroup(page); 2846 lock_page_cgroup(pc); 2847 if (PageCgroupUsed(pc)) { 2848 memcg = pc->mem_cgroup; 2849 if (memcg && !css_tryget(&memcg->css)) 2850 memcg = NULL; 2851 } else if (PageSwapCache(page)) { 2852 ent.val = page_private(page); 2853 id = lookup_swap_cgroup_id(ent); 2854 rcu_read_lock(); 2855 memcg = mem_cgroup_lookup(id); 2856 if (memcg && !css_tryget(&memcg->css)) 2857 memcg = NULL; 2858 rcu_read_unlock(); 2859 } 2860 unlock_page_cgroup(pc); 2861 return memcg; 2862 } 2863 2864 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2865 struct page *page, 2866 unsigned int nr_pages, 2867 enum charge_type ctype, 2868 bool lrucare) 2869 { 2870 struct page_cgroup *pc = lookup_page_cgroup(page); 2871 struct zone *uninitialized_var(zone); 2872 struct lruvec *lruvec; 2873 bool was_on_lru = false; 2874 bool anon; 2875 2876 lock_page_cgroup(pc); 2877 VM_BUG_ON(PageCgroupUsed(pc)); 2878 /* 2879 * we don't need page_cgroup_lock about tail pages, becase they are not 2880 * accessed by any other context at this point. 2881 */ 2882 2883 /* 2884 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2885 * may already be on some other mem_cgroup's LRU. Take care of it. 2886 */ 2887 if (lrucare) { 2888 zone = page_zone(page); 2889 spin_lock_irq(&zone->lru_lock); 2890 if (PageLRU(page)) { 2891 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2892 ClearPageLRU(page); 2893 del_page_from_lru_list(page, lruvec, page_lru(page)); 2894 was_on_lru = true; 2895 } 2896 } 2897 2898 pc->mem_cgroup = memcg; 2899 /* 2900 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2901 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2902 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2903 * before USED bit, we need memory barrier here. 2904 * See mem_cgroup_add_lru_list(), etc. 2905 */ 2906 smp_wmb(); 2907 SetPageCgroupUsed(pc); 2908 2909 if (lrucare) { 2910 if (was_on_lru) { 2911 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2912 VM_BUG_ON(PageLRU(page)); 2913 SetPageLRU(page); 2914 add_page_to_lru_list(page, lruvec, page_lru(page)); 2915 } 2916 spin_unlock_irq(&zone->lru_lock); 2917 } 2918 2919 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON) 2920 anon = true; 2921 else 2922 anon = false; 2923 2924 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages); 2925 unlock_page_cgroup(pc); 2926 2927 /* 2928 * "charge_statistics" updated event counter. Then, check it. 2929 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2930 * if they exceeds softlimit. 2931 */ 2932 memcg_check_events(memcg, page); 2933 } 2934 2935 static DEFINE_MUTEX(set_limit_mutex); 2936 2937 #ifdef CONFIG_MEMCG_KMEM 2938 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) 2939 { 2940 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && 2941 (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); 2942 } 2943 2944 /* 2945 * This is a bit cumbersome, but it is rarely used and avoids a backpointer 2946 * in the memcg_cache_params struct. 2947 */ 2948 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) 2949 { 2950 struct kmem_cache *cachep; 2951 2952 VM_BUG_ON(p->is_root_cache); 2953 cachep = p->root_cache; 2954 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; 2955 } 2956 2957 #ifdef CONFIG_SLABINFO 2958 static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft, 2959 struct seq_file *m) 2960 { 2961 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 2962 struct memcg_cache_params *params; 2963 2964 if (!memcg_can_account_kmem(memcg)) 2965 return -EIO; 2966 2967 print_slabinfo_header(m); 2968 2969 mutex_lock(&memcg->slab_caches_mutex); 2970 list_for_each_entry(params, &memcg->memcg_slab_caches, list) 2971 cache_show(memcg_params_to_cache(params), m); 2972 mutex_unlock(&memcg->slab_caches_mutex); 2973 2974 return 0; 2975 } 2976 #endif 2977 2978 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) 2979 { 2980 struct res_counter *fail_res; 2981 struct mem_cgroup *_memcg; 2982 int ret = 0; 2983 bool may_oom; 2984 2985 ret = res_counter_charge(&memcg->kmem, size, &fail_res); 2986 if (ret) 2987 return ret; 2988 2989 /* 2990 * Conditions under which we can wait for the oom_killer. Those are 2991 * the same conditions tested by the core page allocator 2992 */ 2993 may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY); 2994 2995 _memcg = memcg; 2996 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, 2997 &_memcg, may_oom); 2998 2999 if (ret == -EINTR) { 3000 /* 3001 * __mem_cgroup_try_charge() chosed to bypass to root due to 3002 * OOM kill or fatal signal. Since our only options are to 3003 * either fail the allocation or charge it to this cgroup, do 3004 * it as a temporary condition. But we can't fail. From a 3005 * kmem/slab perspective, the cache has already been selected, 3006 * by mem_cgroup_kmem_get_cache(), so it is too late to change 3007 * our minds. 3008 * 3009 * This condition will only trigger if the task entered 3010 * memcg_charge_kmem in a sane state, but was OOM-killed during 3011 * __mem_cgroup_try_charge() above. Tasks that were already 3012 * dying when the allocation triggers should have been already 3013 * directed to the root cgroup in memcontrol.h 3014 */ 3015 res_counter_charge_nofail(&memcg->res, size, &fail_res); 3016 if (do_swap_account) 3017 res_counter_charge_nofail(&memcg->memsw, size, 3018 &fail_res); 3019 ret = 0; 3020 } else if (ret) 3021 res_counter_uncharge(&memcg->kmem, size); 3022 3023 return ret; 3024 } 3025 3026 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) 3027 { 3028 res_counter_uncharge(&memcg->res, size); 3029 if (do_swap_account) 3030 res_counter_uncharge(&memcg->memsw, size); 3031 3032 /* Not down to 0 */ 3033 if (res_counter_uncharge(&memcg->kmem, size)) 3034 return; 3035 3036 if (memcg_kmem_test_and_clear_dead(memcg)) 3037 mem_cgroup_put(memcg); 3038 } 3039 3040 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) 3041 { 3042 if (!memcg) 3043 return; 3044 3045 mutex_lock(&memcg->slab_caches_mutex); 3046 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); 3047 mutex_unlock(&memcg->slab_caches_mutex); 3048 } 3049 3050 /* 3051 * helper for acessing a memcg's index. It will be used as an index in the 3052 * child cache array in kmem_cache, and also to derive its name. This function 3053 * will return -1 when this is not a kmem-limited memcg. 3054 */ 3055 int memcg_cache_id(struct mem_cgroup *memcg) 3056 { 3057 return memcg ? memcg->kmemcg_id : -1; 3058 } 3059 3060 /* 3061 * This ends up being protected by the set_limit mutex, during normal 3062 * operation, because that is its main call site. 3063 * 3064 * But when we create a new cache, we can call this as well if its parent 3065 * is kmem-limited. That will have to hold set_limit_mutex as well. 3066 */ 3067 int memcg_update_cache_sizes(struct mem_cgroup *memcg) 3068 { 3069 int num, ret; 3070 3071 num = ida_simple_get(&kmem_limited_groups, 3072 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 3073 if (num < 0) 3074 return num; 3075 /* 3076 * After this point, kmem_accounted (that we test atomically in 3077 * the beginning of this conditional), is no longer 0. This 3078 * guarantees only one process will set the following boolean 3079 * to true. We don't need test_and_set because we're protected 3080 * by the set_limit_mutex anyway. 3081 */ 3082 memcg_kmem_set_activated(memcg); 3083 3084 ret = memcg_update_all_caches(num+1); 3085 if (ret) { 3086 ida_simple_remove(&kmem_limited_groups, num); 3087 memcg_kmem_clear_activated(memcg); 3088 return ret; 3089 } 3090 3091 memcg->kmemcg_id = num; 3092 INIT_LIST_HEAD(&memcg->memcg_slab_caches); 3093 mutex_init(&memcg->slab_caches_mutex); 3094 return 0; 3095 } 3096 3097 static size_t memcg_caches_array_size(int num_groups) 3098 { 3099 ssize_t size; 3100 if (num_groups <= 0) 3101 return 0; 3102 3103 size = 2 * num_groups; 3104 if (size < MEMCG_CACHES_MIN_SIZE) 3105 size = MEMCG_CACHES_MIN_SIZE; 3106 else if (size > MEMCG_CACHES_MAX_SIZE) 3107 size = MEMCG_CACHES_MAX_SIZE; 3108 3109 return size; 3110 } 3111 3112 /* 3113 * We should update the current array size iff all caches updates succeed. This 3114 * can only be done from the slab side. The slab mutex needs to be held when 3115 * calling this. 3116 */ 3117 void memcg_update_array_size(int num) 3118 { 3119 if (num > memcg_limited_groups_array_size) 3120 memcg_limited_groups_array_size = memcg_caches_array_size(num); 3121 } 3122 3123 static void kmem_cache_destroy_work_func(struct work_struct *w); 3124 3125 int memcg_update_cache_size(struct kmem_cache *s, int num_groups) 3126 { 3127 struct memcg_cache_params *cur_params = s->memcg_params; 3128 3129 VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache); 3130 3131 if (num_groups > memcg_limited_groups_array_size) { 3132 int i; 3133 ssize_t size = memcg_caches_array_size(num_groups); 3134 3135 size *= sizeof(void *); 3136 size += sizeof(struct memcg_cache_params); 3137 3138 s->memcg_params = kzalloc(size, GFP_KERNEL); 3139 if (!s->memcg_params) { 3140 s->memcg_params = cur_params; 3141 return -ENOMEM; 3142 } 3143 3144 INIT_WORK(&s->memcg_params->destroy, 3145 kmem_cache_destroy_work_func); 3146 s->memcg_params->is_root_cache = true; 3147 3148 /* 3149 * There is the chance it will be bigger than 3150 * memcg_limited_groups_array_size, if we failed an allocation 3151 * in a cache, in which case all caches updated before it, will 3152 * have a bigger array. 3153 * 3154 * But if that is the case, the data after 3155 * memcg_limited_groups_array_size is certainly unused 3156 */ 3157 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3158 if (!cur_params->memcg_caches[i]) 3159 continue; 3160 s->memcg_params->memcg_caches[i] = 3161 cur_params->memcg_caches[i]; 3162 } 3163 3164 /* 3165 * Ideally, we would wait until all caches succeed, and only 3166 * then free the old one. But this is not worth the extra 3167 * pointer per-cache we'd have to have for this. 3168 * 3169 * It is not a big deal if some caches are left with a size 3170 * bigger than the others. And all updates will reset this 3171 * anyway. 3172 */ 3173 kfree(cur_params); 3174 } 3175 return 0; 3176 } 3177 3178 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 3179 struct kmem_cache *root_cache) 3180 { 3181 size_t size = sizeof(struct memcg_cache_params); 3182 3183 if (!memcg_kmem_enabled()) 3184 return 0; 3185 3186 if (!memcg) 3187 size += memcg_limited_groups_array_size * sizeof(void *); 3188 3189 s->memcg_params = kzalloc(size, GFP_KERNEL); 3190 if (!s->memcg_params) 3191 return -ENOMEM; 3192 3193 INIT_WORK(&s->memcg_params->destroy, 3194 kmem_cache_destroy_work_func); 3195 if (memcg) { 3196 s->memcg_params->memcg = memcg; 3197 s->memcg_params->root_cache = root_cache; 3198 } else 3199 s->memcg_params->is_root_cache = true; 3200 3201 return 0; 3202 } 3203 3204 void memcg_release_cache(struct kmem_cache *s) 3205 { 3206 struct kmem_cache *root; 3207 struct mem_cgroup *memcg; 3208 int id; 3209 3210 /* 3211 * This happens, for instance, when a root cache goes away before we 3212 * add any memcg. 3213 */ 3214 if (!s->memcg_params) 3215 return; 3216 3217 if (s->memcg_params->is_root_cache) 3218 goto out; 3219 3220 memcg = s->memcg_params->memcg; 3221 id = memcg_cache_id(memcg); 3222 3223 root = s->memcg_params->root_cache; 3224 root->memcg_params->memcg_caches[id] = NULL; 3225 3226 mutex_lock(&memcg->slab_caches_mutex); 3227 list_del(&s->memcg_params->list); 3228 mutex_unlock(&memcg->slab_caches_mutex); 3229 3230 mem_cgroup_put(memcg); 3231 out: 3232 kfree(s->memcg_params); 3233 } 3234 3235 /* 3236 * During the creation a new cache, we need to disable our accounting mechanism 3237 * altogether. This is true even if we are not creating, but rather just 3238 * enqueing new caches to be created. 3239 * 3240 * This is because that process will trigger allocations; some visible, like 3241 * explicit kmallocs to auxiliary data structures, name strings and internal 3242 * cache structures; some well concealed, like INIT_WORK() that can allocate 3243 * objects during debug. 3244 * 3245 * If any allocation happens during memcg_kmem_get_cache, we will recurse back 3246 * to it. This may not be a bounded recursion: since the first cache creation 3247 * failed to complete (waiting on the allocation), we'll just try to create the 3248 * cache again, failing at the same point. 3249 * 3250 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of 3251 * memcg_kmem_skip_account. So we enclose anything that might allocate memory 3252 * inside the following two functions. 3253 */ 3254 static inline void memcg_stop_kmem_account(void) 3255 { 3256 VM_BUG_ON(!current->mm); 3257 current->memcg_kmem_skip_account++; 3258 } 3259 3260 static inline void memcg_resume_kmem_account(void) 3261 { 3262 VM_BUG_ON(!current->mm); 3263 current->memcg_kmem_skip_account--; 3264 } 3265 3266 static void kmem_cache_destroy_work_func(struct work_struct *w) 3267 { 3268 struct kmem_cache *cachep; 3269 struct memcg_cache_params *p; 3270 3271 p = container_of(w, struct memcg_cache_params, destroy); 3272 3273 cachep = memcg_params_to_cache(p); 3274 3275 /* 3276 * If we get down to 0 after shrink, we could delete right away. 3277 * However, memcg_release_pages() already puts us back in the workqueue 3278 * in that case. If we proceed deleting, we'll get a dangling 3279 * reference, and removing the object from the workqueue in that case 3280 * is unnecessary complication. We are not a fast path. 3281 * 3282 * Note that this case is fundamentally different from racing with 3283 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in 3284 * kmem_cache_shrink, not only we would be reinserting a dead cache 3285 * into the queue, but doing so from inside the worker racing to 3286 * destroy it. 3287 * 3288 * So if we aren't down to zero, we'll just schedule a worker and try 3289 * again 3290 */ 3291 if (atomic_read(&cachep->memcg_params->nr_pages) != 0) { 3292 kmem_cache_shrink(cachep); 3293 if (atomic_read(&cachep->memcg_params->nr_pages) == 0) 3294 return; 3295 } else 3296 kmem_cache_destroy(cachep); 3297 } 3298 3299 void mem_cgroup_destroy_cache(struct kmem_cache *cachep) 3300 { 3301 if (!cachep->memcg_params->dead) 3302 return; 3303 3304 /* 3305 * There are many ways in which we can get here. 3306 * 3307 * We can get to a memory-pressure situation while the delayed work is 3308 * still pending to run. The vmscan shrinkers can then release all 3309 * cache memory and get us to destruction. If this is the case, we'll 3310 * be executed twice, which is a bug (the second time will execute over 3311 * bogus data). In this case, cancelling the work should be fine. 3312 * 3313 * But we can also get here from the worker itself, if 3314 * kmem_cache_shrink is enough to shake all the remaining objects and 3315 * get the page count to 0. In this case, we'll deadlock if we try to 3316 * cancel the work (the worker runs with an internal lock held, which 3317 * is the same lock we would hold for cancel_work_sync().) 3318 * 3319 * Since we can't possibly know who got us here, just refrain from 3320 * running if there is already work pending 3321 */ 3322 if (work_pending(&cachep->memcg_params->destroy)) 3323 return; 3324 /* 3325 * We have to defer the actual destroying to a workqueue, because 3326 * we might currently be in a context that cannot sleep. 3327 */ 3328 schedule_work(&cachep->memcg_params->destroy); 3329 } 3330 3331 /* 3332 * This lock protects updaters, not readers. We want readers to be as fast as 3333 * they can, and they will either see NULL or a valid cache value. Our model 3334 * allow them to see NULL, in which case the root memcg will be selected. 3335 * 3336 * We need this lock because multiple allocations to the same cache from a non 3337 * will span more than one worker. Only one of them can create the cache. 3338 */ 3339 static DEFINE_MUTEX(memcg_cache_mutex); 3340 3341 /* 3342 * Called with memcg_cache_mutex held 3343 */ 3344 static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, 3345 struct kmem_cache *s) 3346 { 3347 struct kmem_cache *new; 3348 static char *tmp_name = NULL; 3349 3350 lockdep_assert_held(&memcg_cache_mutex); 3351 3352 /* 3353 * kmem_cache_create_memcg duplicates the given name and 3354 * cgroup_name for this name requires RCU context. 3355 * This static temporary buffer is used to prevent from 3356 * pointless shortliving allocation. 3357 */ 3358 if (!tmp_name) { 3359 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); 3360 if (!tmp_name) 3361 return NULL; 3362 } 3363 3364 rcu_read_lock(); 3365 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, 3366 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); 3367 rcu_read_unlock(); 3368 3369 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, 3370 (s->flags & ~SLAB_PANIC), s->ctor, s); 3371 3372 if (new) 3373 new->allocflags |= __GFP_KMEMCG; 3374 3375 return new; 3376 } 3377 3378 static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3379 struct kmem_cache *cachep) 3380 { 3381 struct kmem_cache *new_cachep; 3382 int idx; 3383 3384 BUG_ON(!memcg_can_account_kmem(memcg)); 3385 3386 idx = memcg_cache_id(memcg); 3387 3388 mutex_lock(&memcg_cache_mutex); 3389 new_cachep = cachep->memcg_params->memcg_caches[idx]; 3390 if (new_cachep) 3391 goto out; 3392 3393 new_cachep = kmem_cache_dup(memcg, cachep); 3394 if (new_cachep == NULL) { 3395 new_cachep = cachep; 3396 goto out; 3397 } 3398 3399 mem_cgroup_get(memcg); 3400 atomic_set(&new_cachep->memcg_params->nr_pages , 0); 3401 3402 cachep->memcg_params->memcg_caches[idx] = new_cachep; 3403 /* 3404 * the readers won't lock, make sure everybody sees the updated value, 3405 * so they won't put stuff in the queue again for no reason 3406 */ 3407 wmb(); 3408 out: 3409 mutex_unlock(&memcg_cache_mutex); 3410 return new_cachep; 3411 } 3412 3413 void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 3414 { 3415 struct kmem_cache *c; 3416 int i; 3417 3418 if (!s->memcg_params) 3419 return; 3420 if (!s->memcg_params->is_root_cache) 3421 return; 3422 3423 /* 3424 * If the cache is being destroyed, we trust that there is no one else 3425 * requesting objects from it. Even if there are, the sanity checks in 3426 * kmem_cache_destroy should caught this ill-case. 3427 * 3428 * Still, we don't want anyone else freeing memcg_caches under our 3429 * noses, which can happen if a new memcg comes to life. As usual, 3430 * we'll take the set_limit_mutex to protect ourselves against this. 3431 */ 3432 mutex_lock(&set_limit_mutex); 3433 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3434 c = s->memcg_params->memcg_caches[i]; 3435 if (!c) 3436 continue; 3437 3438 /* 3439 * We will now manually delete the caches, so to avoid races 3440 * we need to cancel all pending destruction workers and 3441 * proceed with destruction ourselves. 3442 * 3443 * kmem_cache_destroy() will call kmem_cache_shrink internally, 3444 * and that could spawn the workers again: it is likely that 3445 * the cache still have active pages until this very moment. 3446 * This would lead us back to mem_cgroup_destroy_cache. 3447 * 3448 * But that will not execute at all if the "dead" flag is not 3449 * set, so flip it down to guarantee we are in control. 3450 */ 3451 c->memcg_params->dead = false; 3452 cancel_work_sync(&c->memcg_params->destroy); 3453 kmem_cache_destroy(c); 3454 } 3455 mutex_unlock(&set_limit_mutex); 3456 } 3457 3458 struct create_work { 3459 struct mem_cgroup *memcg; 3460 struct kmem_cache *cachep; 3461 struct work_struct work; 3462 }; 3463 3464 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3465 { 3466 struct kmem_cache *cachep; 3467 struct memcg_cache_params *params; 3468 3469 if (!memcg_kmem_is_active(memcg)) 3470 return; 3471 3472 mutex_lock(&memcg->slab_caches_mutex); 3473 list_for_each_entry(params, &memcg->memcg_slab_caches, list) { 3474 cachep = memcg_params_to_cache(params); 3475 cachep->memcg_params->dead = true; 3476 schedule_work(&cachep->memcg_params->destroy); 3477 } 3478 mutex_unlock(&memcg->slab_caches_mutex); 3479 } 3480 3481 static void memcg_create_cache_work_func(struct work_struct *w) 3482 { 3483 struct create_work *cw; 3484 3485 cw = container_of(w, struct create_work, work); 3486 memcg_create_kmem_cache(cw->memcg, cw->cachep); 3487 /* Drop the reference gotten when we enqueued. */ 3488 css_put(&cw->memcg->css); 3489 kfree(cw); 3490 } 3491 3492 /* 3493 * Enqueue the creation of a per-memcg kmem_cache. 3494 */ 3495 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3496 struct kmem_cache *cachep) 3497 { 3498 struct create_work *cw; 3499 3500 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); 3501 if (cw == NULL) { 3502 css_put(&memcg->css); 3503 return; 3504 } 3505 3506 cw->memcg = memcg; 3507 cw->cachep = cachep; 3508 3509 INIT_WORK(&cw->work, memcg_create_cache_work_func); 3510 schedule_work(&cw->work); 3511 } 3512 3513 static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3514 struct kmem_cache *cachep) 3515 { 3516 /* 3517 * We need to stop accounting when we kmalloc, because if the 3518 * corresponding kmalloc cache is not yet created, the first allocation 3519 * in __memcg_create_cache_enqueue will recurse. 3520 * 3521 * However, it is better to enclose the whole function. Depending on 3522 * the debugging options enabled, INIT_WORK(), for instance, can 3523 * trigger an allocation. This too, will make us recurse. Because at 3524 * this point we can't allow ourselves back into memcg_kmem_get_cache, 3525 * the safest choice is to do it like this, wrapping the whole function. 3526 */ 3527 memcg_stop_kmem_account(); 3528 __memcg_create_cache_enqueue(memcg, cachep); 3529 memcg_resume_kmem_account(); 3530 } 3531 /* 3532 * Return the kmem_cache we're supposed to use for a slab allocation. 3533 * We try to use the current memcg's version of the cache. 3534 * 3535 * If the cache does not exist yet, if we are the first user of it, 3536 * we either create it immediately, if possible, or create it asynchronously 3537 * in a workqueue. 3538 * In the latter case, we will let the current allocation go through with 3539 * the original cache. 3540 * 3541 * Can't be called in interrupt context or from kernel threads. 3542 * This function needs to be called with rcu_read_lock() held. 3543 */ 3544 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, 3545 gfp_t gfp) 3546 { 3547 struct mem_cgroup *memcg; 3548 int idx; 3549 3550 VM_BUG_ON(!cachep->memcg_params); 3551 VM_BUG_ON(!cachep->memcg_params->is_root_cache); 3552 3553 if (!current->mm || current->memcg_kmem_skip_account) 3554 return cachep; 3555 3556 rcu_read_lock(); 3557 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); 3558 3559 if (!memcg_can_account_kmem(memcg)) 3560 goto out; 3561 3562 idx = memcg_cache_id(memcg); 3563 3564 /* 3565 * barrier to mare sure we're always seeing the up to date value. The 3566 * code updating memcg_caches will issue a write barrier to match this. 3567 */ 3568 read_barrier_depends(); 3569 if (likely(cachep->memcg_params->memcg_caches[idx])) { 3570 cachep = cachep->memcg_params->memcg_caches[idx]; 3571 goto out; 3572 } 3573 3574 /* The corresponding put will be done in the workqueue. */ 3575 if (!css_tryget(&memcg->css)) 3576 goto out; 3577 rcu_read_unlock(); 3578 3579 /* 3580 * If we are in a safe context (can wait, and not in interrupt 3581 * context), we could be be predictable and return right away. 3582 * This would guarantee that the allocation being performed 3583 * already belongs in the new cache. 3584 * 3585 * However, there are some clashes that can arrive from locking. 3586 * For instance, because we acquire the slab_mutex while doing 3587 * kmem_cache_dup, this means no further allocation could happen 3588 * with the slab_mutex held. 3589 * 3590 * Also, because cache creation issue get_online_cpus(), this 3591 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, 3592 * that ends up reversed during cpu hotplug. (cpuset allocates 3593 * a bunch of GFP_KERNEL memory during cpuup). Due to all that, 3594 * better to defer everything. 3595 */ 3596 memcg_create_cache_enqueue(memcg, cachep); 3597 return cachep; 3598 out: 3599 rcu_read_unlock(); 3600 return cachep; 3601 } 3602 EXPORT_SYMBOL(__memcg_kmem_get_cache); 3603 3604 /* 3605 * We need to verify if the allocation against current->mm->owner's memcg is 3606 * possible for the given order. But the page is not allocated yet, so we'll 3607 * need a further commit step to do the final arrangements. 3608 * 3609 * It is possible for the task to switch cgroups in this mean time, so at 3610 * commit time, we can't rely on task conversion any longer. We'll then use 3611 * the handle argument to return to the caller which cgroup we should commit 3612 * against. We could also return the memcg directly and avoid the pointer 3613 * passing, but a boolean return value gives better semantics considering 3614 * the compiled-out case as well. 3615 * 3616 * Returning true means the allocation is possible. 3617 */ 3618 bool 3619 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) 3620 { 3621 struct mem_cgroup *memcg; 3622 int ret; 3623 3624 *_memcg = NULL; 3625 memcg = try_get_mem_cgroup_from_mm(current->mm); 3626 3627 /* 3628 * very rare case described in mem_cgroup_from_task. Unfortunately there 3629 * isn't much we can do without complicating this too much, and it would 3630 * be gfp-dependent anyway. Just let it go 3631 */ 3632 if (unlikely(!memcg)) 3633 return true; 3634 3635 if (!memcg_can_account_kmem(memcg)) { 3636 css_put(&memcg->css); 3637 return true; 3638 } 3639 3640 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); 3641 if (!ret) 3642 *_memcg = memcg; 3643 3644 css_put(&memcg->css); 3645 return (ret == 0); 3646 } 3647 3648 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, 3649 int order) 3650 { 3651 struct page_cgroup *pc; 3652 3653 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3654 3655 /* The page allocation failed. Revert */ 3656 if (!page) { 3657 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3658 return; 3659 } 3660 3661 pc = lookup_page_cgroup(page); 3662 lock_page_cgroup(pc); 3663 pc->mem_cgroup = memcg; 3664 SetPageCgroupUsed(pc); 3665 unlock_page_cgroup(pc); 3666 } 3667 3668 void __memcg_kmem_uncharge_pages(struct page *page, int order) 3669 { 3670 struct mem_cgroup *memcg = NULL; 3671 struct page_cgroup *pc; 3672 3673 3674 pc = lookup_page_cgroup(page); 3675 /* 3676 * Fast unlocked return. Theoretically might have changed, have to 3677 * check again after locking. 3678 */ 3679 if (!PageCgroupUsed(pc)) 3680 return; 3681 3682 lock_page_cgroup(pc); 3683 if (PageCgroupUsed(pc)) { 3684 memcg = pc->mem_cgroup; 3685 ClearPageCgroupUsed(pc); 3686 } 3687 unlock_page_cgroup(pc); 3688 3689 /* 3690 * We trust that only if there is a memcg associated with the page, it 3691 * is a valid allocation 3692 */ 3693 if (!memcg) 3694 return; 3695 3696 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3697 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3698 } 3699 #else 3700 static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3701 { 3702 } 3703 #endif /* CONFIG_MEMCG_KMEM */ 3704 3705 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3706 3707 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION) 3708 /* 3709 * Because tail pages are not marked as "used", set it. We're under 3710 * zone->lru_lock, 'splitting on pmd' and compound_lock. 3711 * charge/uncharge will be never happen and move_account() is done under 3712 * compound_lock(), so we don't have to take care of races. 3713 */ 3714 void mem_cgroup_split_huge_fixup(struct page *head) 3715 { 3716 struct page_cgroup *head_pc = lookup_page_cgroup(head); 3717 struct page_cgroup *pc; 3718 struct mem_cgroup *memcg; 3719 int i; 3720 3721 if (mem_cgroup_disabled()) 3722 return; 3723 3724 memcg = head_pc->mem_cgroup; 3725 for (i = 1; i < HPAGE_PMD_NR; i++) { 3726 pc = head_pc + i; 3727 pc->mem_cgroup = memcg; 3728 smp_wmb();/* see __commit_charge() */ 3729 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 3730 } 3731 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 3732 HPAGE_PMD_NR); 3733 } 3734 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3735 3736 /** 3737 * mem_cgroup_move_account - move account of the page 3738 * @page: the page 3739 * @nr_pages: number of regular pages (>1 for huge pages) 3740 * @pc: page_cgroup of the page. 3741 * @from: mem_cgroup which the page is moved from. 3742 * @to: mem_cgroup which the page is moved to. @from != @to. 3743 * 3744 * The caller must confirm following. 3745 * - page is not on LRU (isolate_page() is useful.) 3746 * - compound_lock is held when nr_pages > 1 3747 * 3748 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 3749 * from old cgroup. 3750 */ 3751 static int mem_cgroup_move_account(struct page *page, 3752 unsigned int nr_pages, 3753 struct page_cgroup *pc, 3754 struct mem_cgroup *from, 3755 struct mem_cgroup *to) 3756 { 3757 unsigned long flags; 3758 int ret; 3759 bool anon = PageAnon(page); 3760 3761 VM_BUG_ON(from == to); 3762 VM_BUG_ON(PageLRU(page)); 3763 /* 3764 * The page is isolated from LRU. So, collapse function 3765 * will not handle this page. But page splitting can happen. 3766 * Do this check under compound_page_lock(). The caller should 3767 * hold it. 3768 */ 3769 ret = -EBUSY; 3770 if (nr_pages > 1 && !PageTransHuge(page)) 3771 goto out; 3772 3773 lock_page_cgroup(pc); 3774 3775 ret = -EINVAL; 3776 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 3777 goto unlock; 3778 3779 move_lock_mem_cgroup(from, &flags); 3780 3781 if (!anon && page_mapped(page)) { 3782 /* Update mapped_file data for mem_cgroup */ 3783 preempt_disable(); 3784 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 3785 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 3786 preempt_enable(); 3787 } 3788 mem_cgroup_charge_statistics(from, page, anon, -nr_pages); 3789 3790 /* caller should have done css_get */ 3791 pc->mem_cgroup = to; 3792 mem_cgroup_charge_statistics(to, page, anon, nr_pages); 3793 move_unlock_mem_cgroup(from, &flags); 3794 ret = 0; 3795 unlock: 3796 unlock_page_cgroup(pc); 3797 /* 3798 * check events 3799 */ 3800 memcg_check_events(to, page); 3801 memcg_check_events(from, page); 3802 out: 3803 return ret; 3804 } 3805 3806 /** 3807 * mem_cgroup_move_parent - moves page to the parent group 3808 * @page: the page to move 3809 * @pc: page_cgroup of the page 3810 * @child: page's cgroup 3811 * 3812 * move charges to its parent or the root cgroup if the group has no 3813 * parent (aka use_hierarchy==0). 3814 * Although this might fail (get_page_unless_zero, isolate_lru_page or 3815 * mem_cgroup_move_account fails) the failure is always temporary and 3816 * it signals a race with a page removal/uncharge or migration. In the 3817 * first case the page is on the way out and it will vanish from the LRU 3818 * on the next attempt and the call should be retried later. 3819 * Isolation from the LRU fails only if page has been isolated from 3820 * the LRU since we looked at it and that usually means either global 3821 * reclaim or migration going on. The page will either get back to the 3822 * LRU or vanish. 3823 * Finaly mem_cgroup_move_account fails only if the page got uncharged 3824 * (!PageCgroupUsed) or moved to a different group. The page will 3825 * disappear in the next attempt. 3826 */ 3827 static int mem_cgroup_move_parent(struct page *page, 3828 struct page_cgroup *pc, 3829 struct mem_cgroup *child) 3830 { 3831 struct mem_cgroup *parent; 3832 unsigned int nr_pages; 3833 unsigned long uninitialized_var(flags); 3834 int ret; 3835 3836 VM_BUG_ON(mem_cgroup_is_root(child)); 3837 3838 ret = -EBUSY; 3839 if (!get_page_unless_zero(page)) 3840 goto out; 3841 if (isolate_lru_page(page)) 3842 goto put; 3843 3844 nr_pages = hpage_nr_pages(page); 3845 3846 parent = parent_mem_cgroup(child); 3847 /* 3848 * If no parent, move charges to root cgroup. 3849 */ 3850 if (!parent) 3851 parent = root_mem_cgroup; 3852 3853 if (nr_pages > 1) { 3854 VM_BUG_ON(!PageTransHuge(page)); 3855 flags = compound_lock_irqsave(page); 3856 } 3857 3858 ret = mem_cgroup_move_account(page, nr_pages, 3859 pc, child, parent); 3860 if (!ret) 3861 __mem_cgroup_cancel_local_charge(child, nr_pages); 3862 3863 if (nr_pages > 1) 3864 compound_unlock_irqrestore(page, flags); 3865 putback_lru_page(page); 3866 put: 3867 put_page(page); 3868 out: 3869 return ret; 3870 } 3871 3872 /* 3873 * Charge the memory controller for page usage. 3874 * Return 3875 * 0 if the charge was successful 3876 * < 0 if the cgroup is over its limit 3877 */ 3878 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 3879 gfp_t gfp_mask, enum charge_type ctype) 3880 { 3881 struct mem_cgroup *memcg = NULL; 3882 unsigned int nr_pages = 1; 3883 bool oom = true; 3884 int ret; 3885 3886 if (PageTransHuge(page)) { 3887 nr_pages <<= compound_order(page); 3888 VM_BUG_ON(!PageTransHuge(page)); 3889 /* 3890 * Never OOM-kill a process for a huge page. The 3891 * fault handler will fall back to regular pages. 3892 */ 3893 oom = false; 3894 } 3895 3896 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 3897 if (ret == -ENOMEM) 3898 return ret; 3899 __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); 3900 return 0; 3901 } 3902 3903 int mem_cgroup_newpage_charge(struct page *page, 3904 struct mm_struct *mm, gfp_t gfp_mask) 3905 { 3906 if (mem_cgroup_disabled()) 3907 return 0; 3908 VM_BUG_ON(page_mapped(page)); 3909 VM_BUG_ON(page->mapping && !PageAnon(page)); 3910 VM_BUG_ON(!mm); 3911 return mem_cgroup_charge_common(page, mm, gfp_mask, 3912 MEM_CGROUP_CHARGE_TYPE_ANON); 3913 } 3914 3915 /* 3916 * While swap-in, try_charge -> commit or cancel, the page is locked. 3917 * And when try_charge() successfully returns, one refcnt to memcg without 3918 * struct page_cgroup is acquired. This refcnt will be consumed by 3919 * "commit()" or removed by "cancel()" 3920 */ 3921 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm, 3922 struct page *page, 3923 gfp_t mask, 3924 struct mem_cgroup **memcgp) 3925 { 3926 struct mem_cgroup *memcg; 3927 struct page_cgroup *pc; 3928 int ret; 3929 3930 pc = lookup_page_cgroup(page); 3931 /* 3932 * Every swap fault against a single page tries to charge the 3933 * page, bail as early as possible. shmem_unuse() encounters 3934 * already charged pages, too. The USED bit is protected by 3935 * the page lock, which serializes swap cache removal, which 3936 * in turn serializes uncharging. 3937 */ 3938 if (PageCgroupUsed(pc)) 3939 return 0; 3940 if (!do_swap_account) 3941 goto charge_cur_mm; 3942 memcg = try_get_mem_cgroup_from_page(page); 3943 if (!memcg) 3944 goto charge_cur_mm; 3945 *memcgp = memcg; 3946 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); 3947 css_put(&memcg->css); 3948 if (ret == -EINTR) 3949 ret = 0; 3950 return ret; 3951 charge_cur_mm: 3952 ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); 3953 if (ret == -EINTR) 3954 ret = 0; 3955 return ret; 3956 } 3957 3958 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, 3959 gfp_t gfp_mask, struct mem_cgroup **memcgp) 3960 { 3961 *memcgp = NULL; 3962 if (mem_cgroup_disabled()) 3963 return 0; 3964 /* 3965 * A racing thread's fault, or swapoff, may have already 3966 * updated the pte, and even removed page from swap cache: in 3967 * those cases unuse_pte()'s pte_same() test will fail; but 3968 * there's also a KSM case which does need to charge the page. 3969 */ 3970 if (!PageSwapCache(page)) { 3971 int ret; 3972 3973 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true); 3974 if (ret == -EINTR) 3975 ret = 0; 3976 return ret; 3977 } 3978 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp); 3979 } 3980 3981 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 3982 { 3983 if (mem_cgroup_disabled()) 3984 return; 3985 if (!memcg) 3986 return; 3987 __mem_cgroup_cancel_charge(memcg, 1); 3988 } 3989 3990 static void 3991 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 3992 enum charge_type ctype) 3993 { 3994 if (mem_cgroup_disabled()) 3995 return; 3996 if (!memcg) 3997 return; 3998 3999 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); 4000 /* 4001 * Now swap is on-memory. This means this page may be 4002 * counted both as mem and swap....double count. 4003 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 4004 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 4005 * may call delete_from_swap_cache() before reach here. 4006 */ 4007 if (do_swap_account && PageSwapCache(page)) { 4008 swp_entry_t ent = {.val = page_private(page)}; 4009 mem_cgroup_uncharge_swap(ent); 4010 } 4011 } 4012 4013 void mem_cgroup_commit_charge_swapin(struct page *page, 4014 struct mem_cgroup *memcg) 4015 { 4016 __mem_cgroup_commit_charge_swapin(page, memcg, 4017 MEM_CGROUP_CHARGE_TYPE_ANON); 4018 } 4019 4020 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 4021 gfp_t gfp_mask) 4022 { 4023 struct mem_cgroup *memcg = NULL; 4024 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4025 int ret; 4026 4027 if (mem_cgroup_disabled()) 4028 return 0; 4029 if (PageCompound(page)) 4030 return 0; 4031 4032 if (!PageSwapCache(page)) 4033 ret = mem_cgroup_charge_common(page, mm, gfp_mask, type); 4034 else { /* page is swapcache/shmem */ 4035 ret = __mem_cgroup_try_charge_swapin(mm, page, 4036 gfp_mask, &memcg); 4037 if (!ret) 4038 __mem_cgroup_commit_charge_swapin(page, memcg, type); 4039 } 4040 return ret; 4041 } 4042 4043 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, 4044 unsigned int nr_pages, 4045 const enum charge_type ctype) 4046 { 4047 struct memcg_batch_info *batch = NULL; 4048 bool uncharge_memsw = true; 4049 4050 /* If swapout, usage of swap doesn't decrease */ 4051 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 4052 uncharge_memsw = false; 4053 4054 batch = ¤t->memcg_batch; 4055 /* 4056 * In usual, we do css_get() when we remember memcg pointer. 4057 * But in this case, we keep res->usage until end of a series of 4058 * uncharges. Then, it's ok to ignore memcg's refcnt. 4059 */ 4060 if (!batch->memcg) 4061 batch->memcg = memcg; 4062 /* 4063 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 4064 * In those cases, all pages freed continuously can be expected to be in 4065 * the same cgroup and we have chance to coalesce uncharges. 4066 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 4067 * because we want to do uncharge as soon as possible. 4068 */ 4069 4070 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 4071 goto direct_uncharge; 4072 4073 if (nr_pages > 1) 4074 goto direct_uncharge; 4075 4076 /* 4077 * In typical case, batch->memcg == mem. This means we can 4078 * merge a series of uncharges to an uncharge of res_counter. 4079 * If not, we uncharge res_counter ony by one. 4080 */ 4081 if (batch->memcg != memcg) 4082 goto direct_uncharge; 4083 /* remember freed charge and uncharge it later */ 4084 batch->nr_pages++; 4085 if (uncharge_memsw) 4086 batch->memsw_nr_pages++; 4087 return; 4088 direct_uncharge: 4089 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); 4090 if (uncharge_memsw) 4091 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); 4092 if (unlikely(batch->memcg != memcg)) 4093 memcg_oom_recover(memcg); 4094 } 4095 4096 /* 4097 * uncharge if !page_mapped(page) 4098 */ 4099 static struct mem_cgroup * 4100 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, 4101 bool end_migration) 4102 { 4103 struct mem_cgroup *memcg = NULL; 4104 unsigned int nr_pages = 1; 4105 struct page_cgroup *pc; 4106 bool anon; 4107 4108 if (mem_cgroup_disabled()) 4109 return NULL; 4110 4111 if (PageTransHuge(page)) { 4112 nr_pages <<= compound_order(page); 4113 VM_BUG_ON(!PageTransHuge(page)); 4114 } 4115 /* 4116 * Check if our page_cgroup is valid 4117 */ 4118 pc = lookup_page_cgroup(page); 4119 if (unlikely(!PageCgroupUsed(pc))) 4120 return NULL; 4121 4122 lock_page_cgroup(pc); 4123 4124 memcg = pc->mem_cgroup; 4125 4126 if (!PageCgroupUsed(pc)) 4127 goto unlock_out; 4128 4129 anon = PageAnon(page); 4130 4131 switch (ctype) { 4132 case MEM_CGROUP_CHARGE_TYPE_ANON: 4133 /* 4134 * Generally PageAnon tells if it's the anon statistics to be 4135 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is 4136 * used before page reached the stage of being marked PageAnon. 4137 */ 4138 anon = true; 4139 /* fallthrough */ 4140 case MEM_CGROUP_CHARGE_TYPE_DROP: 4141 /* See mem_cgroup_prepare_migration() */ 4142 if (page_mapped(page)) 4143 goto unlock_out; 4144 /* 4145 * Pages under migration may not be uncharged. But 4146 * end_migration() /must/ be the one uncharging the 4147 * unused post-migration page and so it has to call 4148 * here with the migration bit still set. See the 4149 * res_counter handling below. 4150 */ 4151 if (!end_migration && PageCgroupMigration(pc)) 4152 goto unlock_out; 4153 break; 4154 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 4155 if (!PageAnon(page)) { /* Shared memory */ 4156 if (page->mapping && !page_is_file_cache(page)) 4157 goto unlock_out; 4158 } else if (page_mapped(page)) /* Anon */ 4159 goto unlock_out; 4160 break; 4161 default: 4162 break; 4163 } 4164 4165 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages); 4166 4167 ClearPageCgroupUsed(pc); 4168 /* 4169 * pc->mem_cgroup is not cleared here. It will be accessed when it's 4170 * freed from LRU. This is safe because uncharged page is expected not 4171 * to be reused (freed soon). Exception is SwapCache, it's handled by 4172 * special functions. 4173 */ 4174 4175 unlock_page_cgroup(pc); 4176 /* 4177 * even after unlock, we have memcg->res.usage here and this memcg 4178 * will never be freed. 4179 */ 4180 memcg_check_events(memcg, page); 4181 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 4182 mem_cgroup_swap_statistics(memcg, true); 4183 mem_cgroup_get(memcg); 4184 } 4185 /* 4186 * Migration does not charge the res_counter for the 4187 * replacement page, so leave it alone when phasing out the 4188 * page that is unused after the migration. 4189 */ 4190 if (!end_migration && !mem_cgroup_is_root(memcg)) 4191 mem_cgroup_do_uncharge(memcg, nr_pages, ctype); 4192 4193 return memcg; 4194 4195 unlock_out: 4196 unlock_page_cgroup(pc); 4197 return NULL; 4198 } 4199 4200 void mem_cgroup_uncharge_page(struct page *page) 4201 { 4202 /* early check. */ 4203 if (page_mapped(page)) 4204 return; 4205 VM_BUG_ON(page->mapping && !PageAnon(page)); 4206 /* 4207 * If the page is in swap cache, uncharge should be deferred 4208 * to the swap path, which also properly accounts swap usage 4209 * and handles memcg lifetime. 4210 * 4211 * Note that this check is not stable and reclaim may add the 4212 * page to swap cache at any time after this. However, if the 4213 * page is not in swap cache by the time page->mapcount hits 4214 * 0, there won't be any page table references to the swap 4215 * slot, and reclaim will free it and not actually write the 4216 * page to disk. 4217 */ 4218 if (PageSwapCache(page)) 4219 return; 4220 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); 4221 } 4222 4223 void mem_cgroup_uncharge_cache_page(struct page *page) 4224 { 4225 VM_BUG_ON(page_mapped(page)); 4226 VM_BUG_ON(page->mapping); 4227 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4228 } 4229 4230 /* 4231 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 4232 * In that cases, pages are freed continuously and we can expect pages 4233 * are in the same memcg. All these calls itself limits the number of 4234 * pages freed at once, then uncharge_start/end() is called properly. 4235 * This may be called prural(2) times in a context, 4236 */ 4237 4238 void mem_cgroup_uncharge_start(void) 4239 { 4240 current->memcg_batch.do_batch++; 4241 /* We can do nest. */ 4242 if (current->memcg_batch.do_batch == 1) { 4243 current->memcg_batch.memcg = NULL; 4244 current->memcg_batch.nr_pages = 0; 4245 current->memcg_batch.memsw_nr_pages = 0; 4246 } 4247 } 4248 4249 void mem_cgroup_uncharge_end(void) 4250 { 4251 struct memcg_batch_info *batch = ¤t->memcg_batch; 4252 4253 if (!batch->do_batch) 4254 return; 4255 4256 batch->do_batch--; 4257 if (batch->do_batch) /* If stacked, do nothing. */ 4258 return; 4259 4260 if (!batch->memcg) 4261 return; 4262 /* 4263 * This "batch->memcg" is valid without any css_get/put etc... 4264 * bacause we hide charges behind us. 4265 */ 4266 if (batch->nr_pages) 4267 res_counter_uncharge(&batch->memcg->res, 4268 batch->nr_pages * PAGE_SIZE); 4269 if (batch->memsw_nr_pages) 4270 res_counter_uncharge(&batch->memcg->memsw, 4271 batch->memsw_nr_pages * PAGE_SIZE); 4272 memcg_oom_recover(batch->memcg); 4273 /* forget this pointer (for sanity check) */ 4274 batch->memcg = NULL; 4275 } 4276 4277 #ifdef CONFIG_SWAP 4278 /* 4279 * called after __delete_from_swap_cache() and drop "page" account. 4280 * memcg information is recorded to swap_cgroup of "ent" 4281 */ 4282 void 4283 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 4284 { 4285 struct mem_cgroup *memcg; 4286 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 4287 4288 if (!swapout) /* this was a swap cache but the swap is unused ! */ 4289 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 4290 4291 memcg = __mem_cgroup_uncharge_common(page, ctype, false); 4292 4293 /* 4294 * record memcg information, if swapout && memcg != NULL, 4295 * mem_cgroup_get() was called in uncharge(). 4296 */ 4297 if (do_swap_account && swapout && memcg) 4298 swap_cgroup_record(ent, css_id(&memcg->css)); 4299 } 4300 #endif 4301 4302 #ifdef CONFIG_MEMCG_SWAP 4303 /* 4304 * called from swap_entry_free(). remove record in swap_cgroup and 4305 * uncharge "memsw" account. 4306 */ 4307 void mem_cgroup_uncharge_swap(swp_entry_t ent) 4308 { 4309 struct mem_cgroup *memcg; 4310 unsigned short id; 4311 4312 if (!do_swap_account) 4313 return; 4314 4315 id = swap_cgroup_record(ent, 0); 4316 rcu_read_lock(); 4317 memcg = mem_cgroup_lookup(id); 4318 if (memcg) { 4319 /* 4320 * We uncharge this because swap is freed. 4321 * This memcg can be obsolete one. We avoid calling css_tryget 4322 */ 4323 if (!mem_cgroup_is_root(memcg)) 4324 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 4325 mem_cgroup_swap_statistics(memcg, false); 4326 mem_cgroup_put(memcg); 4327 } 4328 rcu_read_unlock(); 4329 } 4330 4331 /** 4332 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 4333 * @entry: swap entry to be moved 4334 * @from: mem_cgroup which the entry is moved from 4335 * @to: mem_cgroup which the entry is moved to 4336 * 4337 * It succeeds only when the swap_cgroup's record for this entry is the same 4338 * as the mem_cgroup's id of @from. 4339 * 4340 * Returns 0 on success, -EINVAL on failure. 4341 * 4342 * The caller must have charged to @to, IOW, called res_counter_charge() about 4343 * both res and memsw, and called css_get(). 4344 */ 4345 static int mem_cgroup_move_swap_account(swp_entry_t entry, 4346 struct mem_cgroup *from, struct mem_cgroup *to) 4347 { 4348 unsigned short old_id, new_id; 4349 4350 old_id = css_id(&from->css); 4351 new_id = css_id(&to->css); 4352 4353 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 4354 mem_cgroup_swap_statistics(from, false); 4355 mem_cgroup_swap_statistics(to, true); 4356 /* 4357 * This function is only called from task migration context now. 4358 * It postpones res_counter and refcount handling till the end 4359 * of task migration(mem_cgroup_clear_mc()) for performance 4360 * improvement. But we cannot postpone mem_cgroup_get(to) 4361 * because if the process that has been moved to @to does 4362 * swap-in, the refcount of @to might be decreased to 0. 4363 */ 4364 mem_cgroup_get(to); 4365 return 0; 4366 } 4367 return -EINVAL; 4368 } 4369 #else 4370 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 4371 struct mem_cgroup *from, struct mem_cgroup *to) 4372 { 4373 return -EINVAL; 4374 } 4375 #endif 4376 4377 /* 4378 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 4379 * page belongs to. 4380 */ 4381 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 4382 struct mem_cgroup **memcgp) 4383 { 4384 struct mem_cgroup *memcg = NULL; 4385 unsigned int nr_pages = 1; 4386 struct page_cgroup *pc; 4387 enum charge_type ctype; 4388 4389 *memcgp = NULL; 4390 4391 if (mem_cgroup_disabled()) 4392 return; 4393 4394 if (PageTransHuge(page)) 4395 nr_pages <<= compound_order(page); 4396 4397 pc = lookup_page_cgroup(page); 4398 lock_page_cgroup(pc); 4399 if (PageCgroupUsed(pc)) { 4400 memcg = pc->mem_cgroup; 4401 css_get(&memcg->css); 4402 /* 4403 * At migrating an anonymous page, its mapcount goes down 4404 * to 0 and uncharge() will be called. But, even if it's fully 4405 * unmapped, migration may fail and this page has to be 4406 * charged again. We set MIGRATION flag here and delay uncharge 4407 * until end_migration() is called 4408 * 4409 * Corner Case Thinking 4410 * A) 4411 * When the old page was mapped as Anon and it's unmap-and-freed 4412 * while migration was ongoing. 4413 * If unmap finds the old page, uncharge() of it will be delayed 4414 * until end_migration(). If unmap finds a new page, it's 4415 * uncharged when it make mapcount to be 1->0. If unmap code 4416 * finds swap_migration_entry, the new page will not be mapped 4417 * and end_migration() will find it(mapcount==0). 4418 * 4419 * B) 4420 * When the old page was mapped but migraion fails, the kernel 4421 * remaps it. A charge for it is kept by MIGRATION flag even 4422 * if mapcount goes down to 0. We can do remap successfully 4423 * without charging it again. 4424 * 4425 * C) 4426 * The "old" page is under lock_page() until the end of 4427 * migration, so, the old page itself will not be swapped-out. 4428 * If the new page is swapped out before end_migraton, our 4429 * hook to usual swap-out path will catch the event. 4430 */ 4431 if (PageAnon(page)) 4432 SetPageCgroupMigration(pc); 4433 } 4434 unlock_page_cgroup(pc); 4435 /* 4436 * If the page is not charged at this point, 4437 * we return here. 4438 */ 4439 if (!memcg) 4440 return; 4441 4442 *memcgp = memcg; 4443 /* 4444 * We charge new page before it's used/mapped. So, even if unlock_page() 4445 * is called before end_migration, we can catch all events on this new 4446 * page. In the case new page is migrated but not remapped, new page's 4447 * mapcount will be finally 0 and we call uncharge in end_migration(). 4448 */ 4449 if (PageAnon(page)) 4450 ctype = MEM_CGROUP_CHARGE_TYPE_ANON; 4451 else 4452 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 4453 /* 4454 * The page is committed to the memcg, but it's not actually 4455 * charged to the res_counter since we plan on replacing the 4456 * old one and only one page is going to be left afterwards. 4457 */ 4458 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false); 4459 } 4460 4461 /* remove redundant charge if migration failed*/ 4462 void mem_cgroup_end_migration(struct mem_cgroup *memcg, 4463 struct page *oldpage, struct page *newpage, bool migration_ok) 4464 { 4465 struct page *used, *unused; 4466 struct page_cgroup *pc; 4467 bool anon; 4468 4469 if (!memcg) 4470 return; 4471 4472 if (!migration_ok) { 4473 used = oldpage; 4474 unused = newpage; 4475 } else { 4476 used = newpage; 4477 unused = oldpage; 4478 } 4479 anon = PageAnon(used); 4480 __mem_cgroup_uncharge_common(unused, 4481 anon ? MEM_CGROUP_CHARGE_TYPE_ANON 4482 : MEM_CGROUP_CHARGE_TYPE_CACHE, 4483 true); 4484 css_put(&memcg->css); 4485 /* 4486 * We disallowed uncharge of pages under migration because mapcount 4487 * of the page goes down to zero, temporarly. 4488 * Clear the flag and check the page should be charged. 4489 */ 4490 pc = lookup_page_cgroup(oldpage); 4491 lock_page_cgroup(pc); 4492 ClearPageCgroupMigration(pc); 4493 unlock_page_cgroup(pc); 4494 4495 /* 4496 * If a page is a file cache, radix-tree replacement is very atomic 4497 * and we can skip this check. When it was an Anon page, its mapcount 4498 * goes down to 0. But because we added MIGRATION flage, it's not 4499 * uncharged yet. There are several case but page->mapcount check 4500 * and USED bit check in mem_cgroup_uncharge_page() will do enough 4501 * check. (see prepare_charge() also) 4502 */ 4503 if (anon) 4504 mem_cgroup_uncharge_page(used); 4505 } 4506 4507 /* 4508 * At replace page cache, newpage is not under any memcg but it's on 4509 * LRU. So, this function doesn't touch res_counter but handles LRU 4510 * in correct way. Both pages are locked so we cannot race with uncharge. 4511 */ 4512 void mem_cgroup_replace_page_cache(struct page *oldpage, 4513 struct page *newpage) 4514 { 4515 struct mem_cgroup *memcg = NULL; 4516 struct page_cgroup *pc; 4517 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4518 4519 if (mem_cgroup_disabled()) 4520 return; 4521 4522 pc = lookup_page_cgroup(oldpage); 4523 /* fix accounting on old pages */ 4524 lock_page_cgroup(pc); 4525 if (PageCgroupUsed(pc)) { 4526 memcg = pc->mem_cgroup; 4527 mem_cgroup_charge_statistics(memcg, oldpage, false, -1); 4528 ClearPageCgroupUsed(pc); 4529 } 4530 unlock_page_cgroup(pc); 4531 4532 /* 4533 * When called from shmem_replace_page(), in some cases the 4534 * oldpage has already been charged, and in some cases not. 4535 */ 4536 if (!memcg) 4537 return; 4538 /* 4539 * Even if newpage->mapping was NULL before starting replacement, 4540 * the newpage may be on LRU(or pagevec for LRU) already. We lock 4541 * LRU while we overwrite pc->mem_cgroup. 4542 */ 4543 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); 4544 } 4545 4546 #ifdef CONFIG_DEBUG_VM 4547 static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 4548 { 4549 struct page_cgroup *pc; 4550 4551 pc = lookup_page_cgroup(page); 4552 /* 4553 * Can be NULL while feeding pages into the page allocator for 4554 * the first time, i.e. during boot or memory hotplug; 4555 * or when mem_cgroup_disabled(). 4556 */ 4557 if (likely(pc) && PageCgroupUsed(pc)) 4558 return pc; 4559 return NULL; 4560 } 4561 4562 bool mem_cgroup_bad_page_check(struct page *page) 4563 { 4564 if (mem_cgroup_disabled()) 4565 return false; 4566 4567 return lookup_page_cgroup_used(page) != NULL; 4568 } 4569 4570 void mem_cgroup_print_bad_page(struct page *page) 4571 { 4572 struct page_cgroup *pc; 4573 4574 pc = lookup_page_cgroup_used(page); 4575 if (pc) { 4576 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n", 4577 pc, pc->flags, pc->mem_cgroup); 4578 } 4579 } 4580 #endif 4581 4582 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 4583 unsigned long long val) 4584 { 4585 int retry_count; 4586 u64 memswlimit, memlimit; 4587 int ret = 0; 4588 int children = mem_cgroup_count_children(memcg); 4589 u64 curusage, oldusage; 4590 int enlarge; 4591 4592 /* 4593 * For keeping hierarchical_reclaim simple, how long we should retry 4594 * is depends on callers. We set our retry-count to be function 4595 * of # of children which we should visit in this loop. 4596 */ 4597 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 4598 4599 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4600 4601 enlarge = 0; 4602 while (retry_count) { 4603 if (signal_pending(current)) { 4604 ret = -EINTR; 4605 break; 4606 } 4607 /* 4608 * Rather than hide all in some function, I do this in 4609 * open coded manner. You see what this really does. 4610 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4611 */ 4612 mutex_lock(&set_limit_mutex); 4613 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4614 if (memswlimit < val) { 4615 ret = -EINVAL; 4616 mutex_unlock(&set_limit_mutex); 4617 break; 4618 } 4619 4620 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4621 if (memlimit < val) 4622 enlarge = 1; 4623 4624 ret = res_counter_set_limit(&memcg->res, val); 4625 if (!ret) { 4626 if (memswlimit == val) 4627 memcg->memsw_is_minimum = true; 4628 else 4629 memcg->memsw_is_minimum = false; 4630 } 4631 mutex_unlock(&set_limit_mutex); 4632 4633 if (!ret) 4634 break; 4635 4636 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4637 MEM_CGROUP_RECLAIM_SHRINK); 4638 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4639 /* Usage is reduced ? */ 4640 if (curusage >= oldusage) 4641 retry_count--; 4642 else 4643 oldusage = curusage; 4644 } 4645 if (!ret && enlarge) 4646 memcg_oom_recover(memcg); 4647 4648 return ret; 4649 } 4650 4651 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 4652 unsigned long long val) 4653 { 4654 int retry_count; 4655 u64 memlimit, memswlimit, oldusage, curusage; 4656 int children = mem_cgroup_count_children(memcg); 4657 int ret = -EBUSY; 4658 int enlarge = 0; 4659 4660 /* see mem_cgroup_resize_res_limit */ 4661 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 4662 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4663 while (retry_count) { 4664 if (signal_pending(current)) { 4665 ret = -EINTR; 4666 break; 4667 } 4668 /* 4669 * Rather than hide all in some function, I do this in 4670 * open coded manner. You see what this really does. 4671 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4672 */ 4673 mutex_lock(&set_limit_mutex); 4674 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4675 if (memlimit > val) { 4676 ret = -EINVAL; 4677 mutex_unlock(&set_limit_mutex); 4678 break; 4679 } 4680 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4681 if (memswlimit < val) 4682 enlarge = 1; 4683 ret = res_counter_set_limit(&memcg->memsw, val); 4684 if (!ret) { 4685 if (memlimit == val) 4686 memcg->memsw_is_minimum = true; 4687 else 4688 memcg->memsw_is_minimum = false; 4689 } 4690 mutex_unlock(&set_limit_mutex); 4691 4692 if (!ret) 4693 break; 4694 4695 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4696 MEM_CGROUP_RECLAIM_NOSWAP | 4697 MEM_CGROUP_RECLAIM_SHRINK); 4698 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4699 /* Usage is reduced ? */ 4700 if (curusage >= oldusage) 4701 retry_count--; 4702 else 4703 oldusage = curusage; 4704 } 4705 if (!ret && enlarge) 4706 memcg_oom_recover(memcg); 4707 return ret; 4708 } 4709 4710 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 4711 gfp_t gfp_mask, 4712 unsigned long *total_scanned) 4713 { 4714 unsigned long nr_reclaimed = 0; 4715 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 4716 unsigned long reclaimed; 4717 int loop = 0; 4718 struct mem_cgroup_tree_per_zone *mctz; 4719 unsigned long long excess; 4720 unsigned long nr_scanned; 4721 4722 if (order > 0) 4723 return 0; 4724 4725 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 4726 /* 4727 * This loop can run a while, specially if mem_cgroup's continuously 4728 * keep exceeding their soft limit and putting the system under 4729 * pressure 4730 */ 4731 do { 4732 if (next_mz) 4733 mz = next_mz; 4734 else 4735 mz = mem_cgroup_largest_soft_limit_node(mctz); 4736 if (!mz) 4737 break; 4738 4739 nr_scanned = 0; 4740 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 4741 gfp_mask, &nr_scanned); 4742 nr_reclaimed += reclaimed; 4743 *total_scanned += nr_scanned; 4744 spin_lock(&mctz->lock); 4745 4746 /* 4747 * If we failed to reclaim anything from this memory cgroup 4748 * it is time to move on to the next cgroup 4749 */ 4750 next_mz = NULL; 4751 if (!reclaimed) { 4752 do { 4753 /* 4754 * Loop until we find yet another one. 4755 * 4756 * By the time we get the soft_limit lock 4757 * again, someone might have aded the 4758 * group back on the RB tree. Iterate to 4759 * make sure we get a different mem. 4760 * mem_cgroup_largest_soft_limit_node returns 4761 * NULL if no other cgroup is present on 4762 * the tree 4763 */ 4764 next_mz = 4765 __mem_cgroup_largest_soft_limit_node(mctz); 4766 if (next_mz == mz) 4767 css_put(&next_mz->memcg->css); 4768 else /* next_mz == NULL or other memcg */ 4769 break; 4770 } while (1); 4771 } 4772 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 4773 excess = res_counter_soft_limit_excess(&mz->memcg->res); 4774 /* 4775 * One school of thought says that we should not add 4776 * back the node to the tree if reclaim returns 0. 4777 * But our reclaim could return 0, simply because due 4778 * to priority we are exposing a smaller subset of 4779 * memory to reclaim from. Consider this as a longer 4780 * term TODO. 4781 */ 4782 /* If excess == 0, no tree ops */ 4783 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess); 4784 spin_unlock(&mctz->lock); 4785 css_put(&mz->memcg->css); 4786 loop++; 4787 /* 4788 * Could not reclaim anything and there are no more 4789 * mem cgroups to try or we seem to be looping without 4790 * reclaiming anything. 4791 */ 4792 if (!nr_reclaimed && 4793 (next_mz == NULL || 4794 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 4795 break; 4796 } while (!nr_reclaimed); 4797 if (next_mz) 4798 css_put(&next_mz->memcg->css); 4799 return nr_reclaimed; 4800 } 4801 4802 /** 4803 * mem_cgroup_force_empty_list - clears LRU of a group 4804 * @memcg: group to clear 4805 * @node: NUMA node 4806 * @zid: zone id 4807 * @lru: lru to to clear 4808 * 4809 * Traverse a specified page_cgroup list and try to drop them all. This doesn't 4810 * reclaim the pages page themselves - pages are moved to the parent (or root) 4811 * group. 4812 */ 4813 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 4814 int node, int zid, enum lru_list lru) 4815 { 4816 struct lruvec *lruvec; 4817 unsigned long flags; 4818 struct list_head *list; 4819 struct page *busy; 4820 struct zone *zone; 4821 4822 zone = &NODE_DATA(node)->node_zones[zid]; 4823 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 4824 list = &lruvec->lists[lru]; 4825 4826 busy = NULL; 4827 do { 4828 struct page_cgroup *pc; 4829 struct page *page; 4830 4831 spin_lock_irqsave(&zone->lru_lock, flags); 4832 if (list_empty(list)) { 4833 spin_unlock_irqrestore(&zone->lru_lock, flags); 4834 break; 4835 } 4836 page = list_entry(list->prev, struct page, lru); 4837 if (busy == page) { 4838 list_move(&page->lru, list); 4839 busy = NULL; 4840 spin_unlock_irqrestore(&zone->lru_lock, flags); 4841 continue; 4842 } 4843 spin_unlock_irqrestore(&zone->lru_lock, flags); 4844 4845 pc = lookup_page_cgroup(page); 4846 4847 if (mem_cgroup_move_parent(page, pc, memcg)) { 4848 /* found lock contention or "pc" is obsolete. */ 4849 busy = page; 4850 cond_resched(); 4851 } else 4852 busy = NULL; 4853 } while (!list_empty(list)); 4854 } 4855 4856 /* 4857 * make mem_cgroup's charge to be 0 if there is no task by moving 4858 * all the charges and pages to the parent. 4859 * This enables deleting this mem_cgroup. 4860 * 4861 * Caller is responsible for holding css reference on the memcg. 4862 */ 4863 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) 4864 { 4865 int node, zid; 4866 u64 usage; 4867 4868 do { 4869 /* This is for making all *used* pages to be on LRU. */ 4870 lru_add_drain_all(); 4871 drain_all_stock_sync(memcg); 4872 mem_cgroup_start_move(memcg); 4873 for_each_node_state(node, N_MEMORY) { 4874 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4875 enum lru_list lru; 4876 for_each_lru(lru) { 4877 mem_cgroup_force_empty_list(memcg, 4878 node, zid, lru); 4879 } 4880 } 4881 } 4882 mem_cgroup_end_move(memcg); 4883 memcg_oom_recover(memcg); 4884 cond_resched(); 4885 4886 /* 4887 * Kernel memory may not necessarily be trackable to a specific 4888 * process. So they are not migrated, and therefore we can't 4889 * expect their value to drop to 0 here. 4890 * Having res filled up with kmem only is enough. 4891 * 4892 * This is a safety check because mem_cgroup_force_empty_list 4893 * could have raced with mem_cgroup_replace_page_cache callers 4894 * so the lru seemed empty but the page could have been added 4895 * right after the check. RES_USAGE should be safe as we always 4896 * charge before adding to the LRU. 4897 */ 4898 usage = res_counter_read_u64(&memcg->res, RES_USAGE) - 4899 res_counter_read_u64(&memcg->kmem, RES_USAGE); 4900 } while (usage > 0); 4901 } 4902 4903 /* 4904 * This mainly exists for tests during the setting of set of use_hierarchy. 4905 * Since this is the very setting we are changing, the current hierarchy value 4906 * is meaningless 4907 */ 4908 static inline bool __memcg_has_children(struct mem_cgroup *memcg) 4909 { 4910 struct cgroup *pos; 4911 4912 /* bounce at first found */ 4913 cgroup_for_each_child(pos, memcg->css.cgroup) 4914 return true; 4915 return false; 4916 } 4917 4918 /* 4919 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed 4920 * to be already dead (as in mem_cgroup_force_empty, for instance). This is 4921 * from mem_cgroup_count_children(), in the sense that we don't really care how 4922 * many children we have; we only need to know if we have any. It also counts 4923 * any memcg without hierarchy as infertile. 4924 */ 4925 static inline bool memcg_has_children(struct mem_cgroup *memcg) 4926 { 4927 return memcg->use_hierarchy && __memcg_has_children(memcg); 4928 } 4929 4930 /* 4931 * Reclaims as many pages from the given memcg as possible and moves 4932 * the rest to the parent. 4933 * 4934 * Caller is responsible for holding css reference for memcg. 4935 */ 4936 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 4937 { 4938 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 4939 struct cgroup *cgrp = memcg->css.cgroup; 4940 4941 /* returns EBUSY if there is a task or if we come here twice. */ 4942 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 4943 return -EBUSY; 4944 4945 /* we call try-to-free pages for make this cgroup empty */ 4946 lru_add_drain_all(); 4947 /* try to free all pages in this cgroup */ 4948 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { 4949 int progress; 4950 4951 if (signal_pending(current)) 4952 return -EINTR; 4953 4954 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, 4955 false); 4956 if (!progress) { 4957 nr_retries--; 4958 /* maybe some writeback is necessary */ 4959 congestion_wait(BLK_RW_ASYNC, HZ/10); 4960 } 4961 4962 } 4963 lru_add_drain(); 4964 mem_cgroup_reparent_charges(memcg); 4965 4966 return 0; 4967 } 4968 4969 static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 4970 { 4971 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4972 int ret; 4973 4974 if (mem_cgroup_is_root(memcg)) 4975 return -EINVAL; 4976 css_get(&memcg->css); 4977 ret = mem_cgroup_force_empty(memcg); 4978 css_put(&memcg->css); 4979 4980 return ret; 4981 } 4982 4983 4984 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) 4985 { 4986 return mem_cgroup_from_cont(cont)->use_hierarchy; 4987 } 4988 4989 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, 4990 u64 val) 4991 { 4992 int retval = 0; 4993 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4994 struct cgroup *parent = cont->parent; 4995 struct mem_cgroup *parent_memcg = NULL; 4996 4997 if (parent) 4998 parent_memcg = mem_cgroup_from_cont(parent); 4999 5000 mutex_lock(&memcg_create_mutex); 5001 5002 if (memcg->use_hierarchy == val) 5003 goto out; 5004 5005 /* 5006 * If parent's use_hierarchy is set, we can't make any modifications 5007 * in the child subtrees. If it is unset, then the change can 5008 * occur, provided the current cgroup has no children. 5009 * 5010 * For the root cgroup, parent_mem is NULL, we allow value to be 5011 * set if there are no children. 5012 */ 5013 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 5014 (val == 1 || val == 0)) { 5015 if (!__memcg_has_children(memcg)) 5016 memcg->use_hierarchy = val; 5017 else 5018 retval = -EBUSY; 5019 } else 5020 retval = -EINVAL; 5021 5022 out: 5023 mutex_unlock(&memcg_create_mutex); 5024 5025 return retval; 5026 } 5027 5028 5029 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, 5030 enum mem_cgroup_stat_index idx) 5031 { 5032 struct mem_cgroup *iter; 5033 long val = 0; 5034 5035 /* Per-cpu values can be negative, use a signed accumulator */ 5036 for_each_mem_cgroup_tree(iter, memcg) 5037 val += mem_cgroup_read_stat(iter, idx); 5038 5039 if (val < 0) /* race ? */ 5040 val = 0; 5041 return val; 5042 } 5043 5044 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 5045 { 5046 u64 val; 5047 5048 if (!mem_cgroup_is_root(memcg)) { 5049 if (!swap) 5050 return res_counter_read_u64(&memcg->res, RES_USAGE); 5051 else 5052 return res_counter_read_u64(&memcg->memsw, RES_USAGE); 5053 } 5054 5055 /* 5056 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS 5057 * as well as in MEM_CGROUP_STAT_RSS_HUGE. 5058 */ 5059 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); 5060 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); 5061 5062 if (swap) 5063 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); 5064 5065 return val << PAGE_SHIFT; 5066 } 5067 5068 static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft, 5069 struct file *file, char __user *buf, 5070 size_t nbytes, loff_t *ppos) 5071 { 5072 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5073 char str[64]; 5074 u64 val; 5075 int name, len; 5076 enum res_type type; 5077 5078 type = MEMFILE_TYPE(cft->private); 5079 name = MEMFILE_ATTR(cft->private); 5080 5081 switch (type) { 5082 case _MEM: 5083 if (name == RES_USAGE) 5084 val = mem_cgroup_usage(memcg, false); 5085 else 5086 val = res_counter_read_u64(&memcg->res, name); 5087 break; 5088 case _MEMSWAP: 5089 if (name == RES_USAGE) 5090 val = mem_cgroup_usage(memcg, true); 5091 else 5092 val = res_counter_read_u64(&memcg->memsw, name); 5093 break; 5094 case _KMEM: 5095 val = res_counter_read_u64(&memcg->kmem, name); 5096 break; 5097 default: 5098 BUG(); 5099 } 5100 5101 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); 5102 return simple_read_from_buffer(buf, nbytes, ppos, str, len); 5103 } 5104 5105 static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) 5106 { 5107 int ret = -EINVAL; 5108 #ifdef CONFIG_MEMCG_KMEM 5109 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5110 /* 5111 * For simplicity, we won't allow this to be disabled. It also can't 5112 * be changed if the cgroup has children already, or if tasks had 5113 * already joined. 5114 * 5115 * If tasks join before we set the limit, a person looking at 5116 * kmem.usage_in_bytes will have no way to determine when it took 5117 * place, which makes the value quite meaningless. 5118 * 5119 * After it first became limited, changes in the value of the limit are 5120 * of course permitted. 5121 */ 5122 mutex_lock(&memcg_create_mutex); 5123 mutex_lock(&set_limit_mutex); 5124 if (!memcg->kmem_account_flags && val != RESOURCE_MAX) { 5125 if (cgroup_task_count(cont) || memcg_has_children(memcg)) { 5126 ret = -EBUSY; 5127 goto out; 5128 } 5129 ret = res_counter_set_limit(&memcg->kmem, val); 5130 VM_BUG_ON(ret); 5131 5132 ret = memcg_update_cache_sizes(memcg); 5133 if (ret) { 5134 res_counter_set_limit(&memcg->kmem, RESOURCE_MAX); 5135 goto out; 5136 } 5137 static_key_slow_inc(&memcg_kmem_enabled_key); 5138 /* 5139 * setting the active bit after the inc will guarantee no one 5140 * starts accounting before all call sites are patched 5141 */ 5142 memcg_kmem_set_active(memcg); 5143 5144 /* 5145 * kmem charges can outlive the cgroup. In the case of slab 5146 * pages, for instance, a page contain objects from various 5147 * processes, so it is unfeasible to migrate them away. We 5148 * need to reference count the memcg because of that. 5149 */ 5150 mem_cgroup_get(memcg); 5151 } else 5152 ret = res_counter_set_limit(&memcg->kmem, val); 5153 out: 5154 mutex_unlock(&set_limit_mutex); 5155 mutex_unlock(&memcg_create_mutex); 5156 #endif 5157 return ret; 5158 } 5159 5160 #ifdef CONFIG_MEMCG_KMEM 5161 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 5162 { 5163 int ret = 0; 5164 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5165 if (!parent) 5166 goto out; 5167 5168 memcg->kmem_account_flags = parent->kmem_account_flags; 5169 /* 5170 * When that happen, we need to disable the static branch only on those 5171 * memcgs that enabled it. To achieve this, we would be forced to 5172 * complicate the code by keeping track of which memcgs were the ones 5173 * that actually enabled limits, and which ones got it from its 5174 * parents. 5175 * 5176 * It is a lot simpler just to do static_key_slow_inc() on every child 5177 * that is accounted. 5178 */ 5179 if (!memcg_kmem_is_active(memcg)) 5180 goto out; 5181 5182 /* 5183 * destroy(), called if we fail, will issue static_key_slow_inc() and 5184 * mem_cgroup_put() if kmem is enabled. We have to either call them 5185 * unconditionally, or clear the KMEM_ACTIVE flag. I personally find 5186 * this more consistent, since it always leads to the same destroy path 5187 */ 5188 mem_cgroup_get(memcg); 5189 static_key_slow_inc(&memcg_kmem_enabled_key); 5190 5191 mutex_lock(&set_limit_mutex); 5192 ret = memcg_update_cache_sizes(memcg); 5193 mutex_unlock(&set_limit_mutex); 5194 out: 5195 return ret; 5196 } 5197 #endif /* CONFIG_MEMCG_KMEM */ 5198 5199 /* 5200 * The user of this function is... 5201 * RES_LIMIT. 5202 */ 5203 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, 5204 const char *buffer) 5205 { 5206 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5207 enum res_type type; 5208 int name; 5209 unsigned long long val; 5210 int ret; 5211 5212 type = MEMFILE_TYPE(cft->private); 5213 name = MEMFILE_ATTR(cft->private); 5214 5215 switch (name) { 5216 case RES_LIMIT: 5217 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 5218 ret = -EINVAL; 5219 break; 5220 } 5221 /* This function does all necessary parse...reuse it */ 5222 ret = res_counter_memparse_write_strategy(buffer, &val); 5223 if (ret) 5224 break; 5225 if (type == _MEM) 5226 ret = mem_cgroup_resize_limit(memcg, val); 5227 else if (type == _MEMSWAP) 5228 ret = mem_cgroup_resize_memsw_limit(memcg, val); 5229 else if (type == _KMEM) 5230 ret = memcg_update_kmem_limit(cont, val); 5231 else 5232 return -EINVAL; 5233 break; 5234 case RES_SOFT_LIMIT: 5235 ret = res_counter_memparse_write_strategy(buffer, &val); 5236 if (ret) 5237 break; 5238 /* 5239 * For memsw, soft limits are hard to implement in terms 5240 * of semantics, for now, we support soft limits for 5241 * control without swap 5242 */ 5243 if (type == _MEM) 5244 ret = res_counter_set_soft_limit(&memcg->res, val); 5245 else 5246 ret = -EINVAL; 5247 break; 5248 default: 5249 ret = -EINVAL; /* should be BUG() ? */ 5250 break; 5251 } 5252 return ret; 5253 } 5254 5255 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 5256 unsigned long long *mem_limit, unsigned long long *memsw_limit) 5257 { 5258 struct cgroup *cgroup; 5259 unsigned long long min_limit, min_memsw_limit, tmp; 5260 5261 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 5262 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5263 cgroup = memcg->css.cgroup; 5264 if (!memcg->use_hierarchy) 5265 goto out; 5266 5267 while (cgroup->parent) { 5268 cgroup = cgroup->parent; 5269 memcg = mem_cgroup_from_cont(cgroup); 5270 if (!memcg->use_hierarchy) 5271 break; 5272 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 5273 min_limit = min(min_limit, tmp); 5274 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5275 min_memsw_limit = min(min_memsw_limit, tmp); 5276 } 5277 out: 5278 *mem_limit = min_limit; 5279 *memsw_limit = min_memsw_limit; 5280 } 5281 5282 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 5283 { 5284 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5285 int name; 5286 enum res_type type; 5287 5288 type = MEMFILE_TYPE(event); 5289 name = MEMFILE_ATTR(event); 5290 5291 switch (name) { 5292 case RES_MAX_USAGE: 5293 if (type == _MEM) 5294 res_counter_reset_max(&memcg->res); 5295 else if (type == _MEMSWAP) 5296 res_counter_reset_max(&memcg->memsw); 5297 else if (type == _KMEM) 5298 res_counter_reset_max(&memcg->kmem); 5299 else 5300 return -EINVAL; 5301 break; 5302 case RES_FAILCNT: 5303 if (type == _MEM) 5304 res_counter_reset_failcnt(&memcg->res); 5305 else if (type == _MEMSWAP) 5306 res_counter_reset_failcnt(&memcg->memsw); 5307 else if (type == _KMEM) 5308 res_counter_reset_failcnt(&memcg->kmem); 5309 else 5310 return -EINVAL; 5311 break; 5312 } 5313 5314 return 0; 5315 } 5316 5317 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, 5318 struct cftype *cft) 5319 { 5320 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; 5321 } 5322 5323 #ifdef CONFIG_MMU 5324 static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 5325 struct cftype *cft, u64 val) 5326 { 5327 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5328 5329 if (val >= (1 << NR_MOVE_TYPE)) 5330 return -EINVAL; 5331 5332 /* 5333 * No kind of locking is needed in here, because ->can_attach() will 5334 * check this value once in the beginning of the process, and then carry 5335 * on with stale data. This means that changes to this value will only 5336 * affect task migrations starting after the change. 5337 */ 5338 memcg->move_charge_at_immigrate = val; 5339 return 0; 5340 } 5341 #else 5342 static int mem_cgroup_move_charge_write(struct cgroup *cgrp, 5343 struct cftype *cft, u64 val) 5344 { 5345 return -ENOSYS; 5346 } 5347 #endif 5348 5349 #ifdef CONFIG_NUMA 5350 static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft, 5351 struct seq_file *m) 5352 { 5353 int nid; 5354 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 5355 unsigned long node_nr; 5356 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5357 5358 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL); 5359 seq_printf(m, "total=%lu", total_nr); 5360 for_each_node_state(nid, N_MEMORY) { 5361 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL); 5362 seq_printf(m, " N%d=%lu", nid, node_nr); 5363 } 5364 seq_putc(m, '\n'); 5365 5366 file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE); 5367 seq_printf(m, "file=%lu", file_nr); 5368 for_each_node_state(nid, N_MEMORY) { 5369 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5370 LRU_ALL_FILE); 5371 seq_printf(m, " N%d=%lu", nid, node_nr); 5372 } 5373 seq_putc(m, '\n'); 5374 5375 anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON); 5376 seq_printf(m, "anon=%lu", anon_nr); 5377 for_each_node_state(nid, N_MEMORY) { 5378 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5379 LRU_ALL_ANON); 5380 seq_printf(m, " N%d=%lu", nid, node_nr); 5381 } 5382 seq_putc(m, '\n'); 5383 5384 unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); 5385 seq_printf(m, "unevictable=%lu", unevictable_nr); 5386 for_each_node_state(nid, N_MEMORY) { 5387 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5388 BIT(LRU_UNEVICTABLE)); 5389 seq_printf(m, " N%d=%lu", nid, node_nr); 5390 } 5391 seq_putc(m, '\n'); 5392 return 0; 5393 } 5394 #endif /* CONFIG_NUMA */ 5395 5396 static inline void mem_cgroup_lru_names_not_uptodate(void) 5397 { 5398 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 5399 } 5400 5401 static int memcg_stat_show(struct cgroup *cont, struct cftype *cft, 5402 struct seq_file *m) 5403 { 5404 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 5405 struct mem_cgroup *mi; 5406 unsigned int i; 5407 5408 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5409 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5410 continue; 5411 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 5412 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 5413 } 5414 5415 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 5416 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 5417 mem_cgroup_read_events(memcg, i)); 5418 5419 for (i = 0; i < NR_LRU_LISTS; i++) 5420 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 5421 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 5422 5423 /* Hierarchical information */ 5424 { 5425 unsigned long long limit, memsw_limit; 5426 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); 5427 seq_printf(m, "hierarchical_memory_limit %llu\n", limit); 5428 if (do_swap_account) 5429 seq_printf(m, "hierarchical_memsw_limit %llu\n", 5430 memsw_limit); 5431 } 5432 5433 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5434 long long val = 0; 5435 5436 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5437 continue; 5438 for_each_mem_cgroup_tree(mi, memcg) 5439 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 5440 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 5441 } 5442 5443 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 5444 unsigned long long val = 0; 5445 5446 for_each_mem_cgroup_tree(mi, memcg) 5447 val += mem_cgroup_read_events(mi, i); 5448 seq_printf(m, "total_%s %llu\n", 5449 mem_cgroup_events_names[i], val); 5450 } 5451 5452 for (i = 0; i < NR_LRU_LISTS; i++) { 5453 unsigned long long val = 0; 5454 5455 for_each_mem_cgroup_tree(mi, memcg) 5456 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 5457 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 5458 } 5459 5460 #ifdef CONFIG_DEBUG_VM 5461 { 5462 int nid, zid; 5463 struct mem_cgroup_per_zone *mz; 5464 struct zone_reclaim_stat *rstat; 5465 unsigned long recent_rotated[2] = {0, 0}; 5466 unsigned long recent_scanned[2] = {0, 0}; 5467 5468 for_each_online_node(nid) 5469 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 5470 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 5471 rstat = &mz->lruvec.reclaim_stat; 5472 5473 recent_rotated[0] += rstat->recent_rotated[0]; 5474 recent_rotated[1] += rstat->recent_rotated[1]; 5475 recent_scanned[0] += rstat->recent_scanned[0]; 5476 recent_scanned[1] += rstat->recent_scanned[1]; 5477 } 5478 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 5479 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 5480 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 5481 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 5482 } 5483 #endif 5484 5485 return 0; 5486 } 5487 5488 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) 5489 { 5490 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5491 5492 return mem_cgroup_swappiness(memcg); 5493 } 5494 5495 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, 5496 u64 val) 5497 { 5498 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5499 struct mem_cgroup *parent; 5500 5501 if (val > 100) 5502 return -EINVAL; 5503 5504 if (cgrp->parent == NULL) 5505 return -EINVAL; 5506 5507 parent = mem_cgroup_from_cont(cgrp->parent); 5508 5509 mutex_lock(&memcg_create_mutex); 5510 5511 /* If under hierarchy, only empty-root can set this value */ 5512 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5513 mutex_unlock(&memcg_create_mutex); 5514 return -EINVAL; 5515 } 5516 5517 memcg->swappiness = val; 5518 5519 mutex_unlock(&memcg_create_mutex); 5520 5521 return 0; 5522 } 5523 5524 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 5525 { 5526 struct mem_cgroup_threshold_ary *t; 5527 u64 usage; 5528 int i; 5529 5530 rcu_read_lock(); 5531 if (!swap) 5532 t = rcu_dereference(memcg->thresholds.primary); 5533 else 5534 t = rcu_dereference(memcg->memsw_thresholds.primary); 5535 5536 if (!t) 5537 goto unlock; 5538 5539 usage = mem_cgroup_usage(memcg, swap); 5540 5541 /* 5542 * current_threshold points to threshold just below or equal to usage. 5543 * If it's not true, a threshold was crossed after last 5544 * call of __mem_cgroup_threshold(). 5545 */ 5546 i = t->current_threshold; 5547 5548 /* 5549 * Iterate backward over array of thresholds starting from 5550 * current_threshold and check if a threshold is crossed. 5551 * If none of thresholds below usage is crossed, we read 5552 * only one element of the array here. 5553 */ 5554 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 5555 eventfd_signal(t->entries[i].eventfd, 1); 5556 5557 /* i = current_threshold + 1 */ 5558 i++; 5559 5560 /* 5561 * Iterate forward over array of thresholds starting from 5562 * current_threshold+1 and check if a threshold is crossed. 5563 * If none of thresholds above usage is crossed, we read 5564 * only one element of the array here. 5565 */ 5566 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 5567 eventfd_signal(t->entries[i].eventfd, 1); 5568 5569 /* Update current_threshold */ 5570 t->current_threshold = i - 1; 5571 unlock: 5572 rcu_read_unlock(); 5573 } 5574 5575 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 5576 { 5577 while (memcg) { 5578 __mem_cgroup_threshold(memcg, false); 5579 if (do_swap_account) 5580 __mem_cgroup_threshold(memcg, true); 5581 5582 memcg = parent_mem_cgroup(memcg); 5583 } 5584 } 5585 5586 static int compare_thresholds(const void *a, const void *b) 5587 { 5588 const struct mem_cgroup_threshold *_a = a; 5589 const struct mem_cgroup_threshold *_b = b; 5590 5591 return _a->threshold - _b->threshold; 5592 } 5593 5594 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 5595 { 5596 struct mem_cgroup_eventfd_list *ev; 5597 5598 list_for_each_entry(ev, &memcg->oom_notify, list) 5599 eventfd_signal(ev->eventfd, 1); 5600 return 0; 5601 } 5602 5603 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 5604 { 5605 struct mem_cgroup *iter; 5606 5607 for_each_mem_cgroup_tree(iter, memcg) 5608 mem_cgroup_oom_notify_cb(iter); 5609 } 5610 5611 static int mem_cgroup_usage_register_event(struct cgroup *cgrp, 5612 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 5613 { 5614 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5615 struct mem_cgroup_thresholds *thresholds; 5616 struct mem_cgroup_threshold_ary *new; 5617 enum res_type type = MEMFILE_TYPE(cft->private); 5618 u64 threshold, usage; 5619 int i, size, ret; 5620 5621 ret = res_counter_memparse_write_strategy(args, &threshold); 5622 if (ret) 5623 return ret; 5624 5625 mutex_lock(&memcg->thresholds_lock); 5626 5627 if (type == _MEM) 5628 thresholds = &memcg->thresholds; 5629 else if (type == _MEMSWAP) 5630 thresholds = &memcg->memsw_thresholds; 5631 else 5632 BUG(); 5633 5634 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5635 5636 /* Check if a threshold crossed before adding a new one */ 5637 if (thresholds->primary) 5638 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5639 5640 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 5641 5642 /* Allocate memory for new array of thresholds */ 5643 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 5644 GFP_KERNEL); 5645 if (!new) { 5646 ret = -ENOMEM; 5647 goto unlock; 5648 } 5649 new->size = size; 5650 5651 /* Copy thresholds (if any) to new array */ 5652 if (thresholds->primary) { 5653 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 5654 sizeof(struct mem_cgroup_threshold)); 5655 } 5656 5657 /* Add new threshold */ 5658 new->entries[size - 1].eventfd = eventfd; 5659 new->entries[size - 1].threshold = threshold; 5660 5661 /* Sort thresholds. Registering of new threshold isn't time-critical */ 5662 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 5663 compare_thresholds, NULL); 5664 5665 /* Find current threshold */ 5666 new->current_threshold = -1; 5667 for (i = 0; i < size; i++) { 5668 if (new->entries[i].threshold <= usage) { 5669 /* 5670 * new->current_threshold will not be used until 5671 * rcu_assign_pointer(), so it's safe to increment 5672 * it here. 5673 */ 5674 ++new->current_threshold; 5675 } else 5676 break; 5677 } 5678 5679 /* Free old spare buffer and save old primary buffer as spare */ 5680 kfree(thresholds->spare); 5681 thresholds->spare = thresholds->primary; 5682 5683 rcu_assign_pointer(thresholds->primary, new); 5684 5685 /* To be sure that nobody uses thresholds */ 5686 synchronize_rcu(); 5687 5688 unlock: 5689 mutex_unlock(&memcg->thresholds_lock); 5690 5691 return ret; 5692 } 5693 5694 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 5695 struct cftype *cft, struct eventfd_ctx *eventfd) 5696 { 5697 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5698 struct mem_cgroup_thresholds *thresholds; 5699 struct mem_cgroup_threshold_ary *new; 5700 enum res_type type = MEMFILE_TYPE(cft->private); 5701 u64 usage; 5702 int i, j, size; 5703 5704 mutex_lock(&memcg->thresholds_lock); 5705 if (type == _MEM) 5706 thresholds = &memcg->thresholds; 5707 else if (type == _MEMSWAP) 5708 thresholds = &memcg->memsw_thresholds; 5709 else 5710 BUG(); 5711 5712 if (!thresholds->primary) 5713 goto unlock; 5714 5715 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5716 5717 /* Check if a threshold crossed before removing */ 5718 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5719 5720 /* Calculate new number of threshold */ 5721 size = 0; 5722 for (i = 0; i < thresholds->primary->size; i++) { 5723 if (thresholds->primary->entries[i].eventfd != eventfd) 5724 size++; 5725 } 5726 5727 new = thresholds->spare; 5728 5729 /* Set thresholds array to NULL if we don't have thresholds */ 5730 if (!size) { 5731 kfree(new); 5732 new = NULL; 5733 goto swap_buffers; 5734 } 5735 5736 new->size = size; 5737 5738 /* Copy thresholds and find current threshold */ 5739 new->current_threshold = -1; 5740 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 5741 if (thresholds->primary->entries[i].eventfd == eventfd) 5742 continue; 5743 5744 new->entries[j] = thresholds->primary->entries[i]; 5745 if (new->entries[j].threshold <= usage) { 5746 /* 5747 * new->current_threshold will not be used 5748 * until rcu_assign_pointer(), so it's safe to increment 5749 * it here. 5750 */ 5751 ++new->current_threshold; 5752 } 5753 j++; 5754 } 5755 5756 swap_buffers: 5757 /* Swap primary and spare array */ 5758 thresholds->spare = thresholds->primary; 5759 /* If all events are unregistered, free the spare array */ 5760 if (!new) { 5761 kfree(thresholds->spare); 5762 thresholds->spare = NULL; 5763 } 5764 5765 rcu_assign_pointer(thresholds->primary, new); 5766 5767 /* To be sure that nobody uses thresholds */ 5768 synchronize_rcu(); 5769 unlock: 5770 mutex_unlock(&memcg->thresholds_lock); 5771 } 5772 5773 static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 5774 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 5775 { 5776 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5777 struct mem_cgroup_eventfd_list *event; 5778 enum res_type type = MEMFILE_TYPE(cft->private); 5779 5780 BUG_ON(type != _OOM_TYPE); 5781 event = kmalloc(sizeof(*event), GFP_KERNEL); 5782 if (!event) 5783 return -ENOMEM; 5784 5785 spin_lock(&memcg_oom_lock); 5786 5787 event->eventfd = eventfd; 5788 list_add(&event->list, &memcg->oom_notify); 5789 5790 /* already in OOM ? */ 5791 if (atomic_read(&memcg->under_oom)) 5792 eventfd_signal(eventfd, 1); 5793 spin_unlock(&memcg_oom_lock); 5794 5795 return 0; 5796 } 5797 5798 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 5799 struct cftype *cft, struct eventfd_ctx *eventfd) 5800 { 5801 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5802 struct mem_cgroup_eventfd_list *ev, *tmp; 5803 enum res_type type = MEMFILE_TYPE(cft->private); 5804 5805 BUG_ON(type != _OOM_TYPE); 5806 5807 spin_lock(&memcg_oom_lock); 5808 5809 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 5810 if (ev->eventfd == eventfd) { 5811 list_del(&ev->list); 5812 kfree(ev); 5813 } 5814 } 5815 5816 spin_unlock(&memcg_oom_lock); 5817 } 5818 5819 static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 5820 struct cftype *cft, struct cgroup_map_cb *cb) 5821 { 5822 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5823 5824 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); 5825 5826 if (atomic_read(&memcg->under_oom)) 5827 cb->fill(cb, "under_oom", 1); 5828 else 5829 cb->fill(cb, "under_oom", 0); 5830 return 0; 5831 } 5832 5833 static int mem_cgroup_oom_control_write(struct cgroup *cgrp, 5834 struct cftype *cft, u64 val) 5835 { 5836 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 5837 struct mem_cgroup *parent; 5838 5839 /* cannot set to root cgroup and only 0 and 1 are allowed */ 5840 if (!cgrp->parent || !((val == 0) || (val == 1))) 5841 return -EINVAL; 5842 5843 parent = mem_cgroup_from_cont(cgrp->parent); 5844 5845 mutex_lock(&memcg_create_mutex); 5846 /* oom-kill-disable is a flag for subhierarchy. */ 5847 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5848 mutex_unlock(&memcg_create_mutex); 5849 return -EINVAL; 5850 } 5851 memcg->oom_kill_disable = val; 5852 if (!val) 5853 memcg_oom_recover(memcg); 5854 mutex_unlock(&memcg_create_mutex); 5855 return 0; 5856 } 5857 5858 #ifdef CONFIG_MEMCG_KMEM 5859 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5860 { 5861 int ret; 5862 5863 memcg->kmemcg_id = -1; 5864 ret = memcg_propagate_kmem(memcg); 5865 if (ret) 5866 return ret; 5867 5868 return mem_cgroup_sockets_init(memcg, ss); 5869 } 5870 5871 static void kmem_cgroup_destroy(struct mem_cgroup *memcg) 5872 { 5873 mem_cgroup_sockets_destroy(memcg); 5874 5875 memcg_kmem_mark_dead(memcg); 5876 5877 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) 5878 return; 5879 5880 /* 5881 * Charges already down to 0, undo mem_cgroup_get() done in the charge 5882 * path here, being careful not to race with memcg_uncharge_kmem: it is 5883 * possible that the charges went down to 0 between mark_dead and the 5884 * res_counter read, so in that case, we don't need the put 5885 */ 5886 if (memcg_kmem_test_and_clear_dead(memcg)) 5887 mem_cgroup_put(memcg); 5888 } 5889 #else 5890 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5891 { 5892 return 0; 5893 } 5894 5895 static void kmem_cgroup_destroy(struct mem_cgroup *memcg) 5896 { 5897 } 5898 #endif 5899 5900 static struct cftype mem_cgroup_files[] = { 5901 { 5902 .name = "usage_in_bytes", 5903 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 5904 .read = mem_cgroup_read, 5905 .register_event = mem_cgroup_usage_register_event, 5906 .unregister_event = mem_cgroup_usage_unregister_event, 5907 }, 5908 { 5909 .name = "max_usage_in_bytes", 5910 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 5911 .trigger = mem_cgroup_reset, 5912 .read = mem_cgroup_read, 5913 }, 5914 { 5915 .name = "limit_in_bytes", 5916 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 5917 .write_string = mem_cgroup_write, 5918 .read = mem_cgroup_read, 5919 }, 5920 { 5921 .name = "soft_limit_in_bytes", 5922 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 5923 .write_string = mem_cgroup_write, 5924 .read = mem_cgroup_read, 5925 }, 5926 { 5927 .name = "failcnt", 5928 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5929 .trigger = mem_cgroup_reset, 5930 .read = mem_cgroup_read, 5931 }, 5932 { 5933 .name = "stat", 5934 .read_seq_string = memcg_stat_show, 5935 }, 5936 { 5937 .name = "force_empty", 5938 .trigger = mem_cgroup_force_empty_write, 5939 }, 5940 { 5941 .name = "use_hierarchy", 5942 .flags = CFTYPE_INSANE, 5943 .write_u64 = mem_cgroup_hierarchy_write, 5944 .read_u64 = mem_cgroup_hierarchy_read, 5945 }, 5946 { 5947 .name = "swappiness", 5948 .read_u64 = mem_cgroup_swappiness_read, 5949 .write_u64 = mem_cgroup_swappiness_write, 5950 }, 5951 { 5952 .name = "move_charge_at_immigrate", 5953 .read_u64 = mem_cgroup_move_charge_read, 5954 .write_u64 = mem_cgroup_move_charge_write, 5955 }, 5956 { 5957 .name = "oom_control", 5958 .read_map = mem_cgroup_oom_control_read, 5959 .write_u64 = mem_cgroup_oom_control_write, 5960 .register_event = mem_cgroup_oom_register_event, 5961 .unregister_event = mem_cgroup_oom_unregister_event, 5962 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 5963 }, 5964 { 5965 .name = "pressure_level", 5966 .register_event = vmpressure_register_event, 5967 .unregister_event = vmpressure_unregister_event, 5968 }, 5969 #ifdef CONFIG_NUMA 5970 { 5971 .name = "numa_stat", 5972 .read_seq_string = memcg_numa_stat_show, 5973 }, 5974 #endif 5975 #ifdef CONFIG_MEMCG_KMEM 5976 { 5977 .name = "kmem.limit_in_bytes", 5978 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5979 .write_string = mem_cgroup_write, 5980 .read = mem_cgroup_read, 5981 }, 5982 { 5983 .name = "kmem.usage_in_bytes", 5984 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5985 .read = mem_cgroup_read, 5986 }, 5987 { 5988 .name = "kmem.failcnt", 5989 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5990 .trigger = mem_cgroup_reset, 5991 .read = mem_cgroup_read, 5992 }, 5993 { 5994 .name = "kmem.max_usage_in_bytes", 5995 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5996 .trigger = mem_cgroup_reset, 5997 .read = mem_cgroup_read, 5998 }, 5999 #ifdef CONFIG_SLABINFO 6000 { 6001 .name = "kmem.slabinfo", 6002 .read_seq_string = mem_cgroup_slabinfo_read, 6003 }, 6004 #endif 6005 #endif 6006 { }, /* terminate */ 6007 }; 6008 6009 #ifdef CONFIG_MEMCG_SWAP 6010 static struct cftype memsw_cgroup_files[] = { 6011 { 6012 .name = "memsw.usage_in_bytes", 6013 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6014 .read = mem_cgroup_read, 6015 .register_event = mem_cgroup_usage_register_event, 6016 .unregister_event = mem_cgroup_usage_unregister_event, 6017 }, 6018 { 6019 .name = "memsw.max_usage_in_bytes", 6020 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6021 .trigger = mem_cgroup_reset, 6022 .read = mem_cgroup_read, 6023 }, 6024 { 6025 .name = "memsw.limit_in_bytes", 6026 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6027 .write_string = mem_cgroup_write, 6028 .read = mem_cgroup_read, 6029 }, 6030 { 6031 .name = "memsw.failcnt", 6032 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6033 .trigger = mem_cgroup_reset, 6034 .read = mem_cgroup_read, 6035 }, 6036 { }, /* terminate */ 6037 }; 6038 #endif 6039 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6040 { 6041 struct mem_cgroup_per_node *pn; 6042 struct mem_cgroup_per_zone *mz; 6043 int zone, tmp = node; 6044 /* 6045 * This routine is called against possible nodes. 6046 * But it's BUG to call kmalloc() against offline node. 6047 * 6048 * TODO: this routine can waste much memory for nodes which will 6049 * never be onlined. It's better to use memory hotplug callback 6050 * function. 6051 */ 6052 if (!node_state(node, N_NORMAL_MEMORY)) 6053 tmp = -1; 6054 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 6055 if (!pn) 6056 return 1; 6057 6058 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6059 mz = &pn->zoneinfo[zone]; 6060 lruvec_init(&mz->lruvec); 6061 mz->usage_in_excess = 0; 6062 mz->on_tree = false; 6063 mz->memcg = memcg; 6064 } 6065 memcg->info.nodeinfo[node] = pn; 6066 return 0; 6067 } 6068 6069 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6070 { 6071 kfree(memcg->info.nodeinfo[node]); 6072 } 6073 6074 static struct mem_cgroup *mem_cgroup_alloc(void) 6075 { 6076 struct mem_cgroup *memcg; 6077 size_t size = memcg_size(); 6078 6079 /* Can be very big if nr_node_ids is very big */ 6080 if (size < PAGE_SIZE) 6081 memcg = kzalloc(size, GFP_KERNEL); 6082 else 6083 memcg = vzalloc(size); 6084 6085 if (!memcg) 6086 return NULL; 6087 6088 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 6089 if (!memcg->stat) 6090 goto out_free; 6091 spin_lock_init(&memcg->pcp_counter_lock); 6092 return memcg; 6093 6094 out_free: 6095 if (size < PAGE_SIZE) 6096 kfree(memcg); 6097 else 6098 vfree(memcg); 6099 return NULL; 6100 } 6101 6102 /* 6103 * At destroying mem_cgroup, references from swap_cgroup can remain. 6104 * (scanning all at force_empty is too costly...) 6105 * 6106 * Instead of clearing all references at force_empty, we remember 6107 * the number of reference from swap_cgroup and free mem_cgroup when 6108 * it goes down to 0. 6109 * 6110 * Removal of cgroup itself succeeds regardless of refs from swap. 6111 */ 6112 6113 static void __mem_cgroup_free(struct mem_cgroup *memcg) 6114 { 6115 int node; 6116 size_t size = memcg_size(); 6117 6118 mem_cgroup_remove_from_trees(memcg); 6119 free_css_id(&mem_cgroup_subsys, &memcg->css); 6120 6121 for_each_node(node) 6122 free_mem_cgroup_per_zone_info(memcg, node); 6123 6124 free_percpu(memcg->stat); 6125 6126 /* 6127 * We need to make sure that (at least for now), the jump label 6128 * destruction code runs outside of the cgroup lock. This is because 6129 * get_online_cpus(), which is called from the static_branch update, 6130 * can't be called inside the cgroup_lock. cpusets are the ones 6131 * enforcing this dependency, so if they ever change, we might as well. 6132 * 6133 * schedule_work() will guarantee this happens. Be careful if you need 6134 * to move this code around, and make sure it is outside 6135 * the cgroup_lock. 6136 */ 6137 disarm_static_keys(memcg); 6138 if (size < PAGE_SIZE) 6139 kfree(memcg); 6140 else 6141 vfree(memcg); 6142 } 6143 6144 6145 /* 6146 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU, 6147 * but in process context. The work_freeing structure is overlaid 6148 * on the rcu_freeing structure, which itself is overlaid on memsw. 6149 */ 6150 static void free_work(struct work_struct *work) 6151 { 6152 struct mem_cgroup *memcg; 6153 6154 memcg = container_of(work, struct mem_cgroup, work_freeing); 6155 __mem_cgroup_free(memcg); 6156 } 6157 6158 static void free_rcu(struct rcu_head *rcu_head) 6159 { 6160 struct mem_cgroup *memcg; 6161 6162 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); 6163 INIT_WORK(&memcg->work_freeing, free_work); 6164 schedule_work(&memcg->work_freeing); 6165 } 6166 6167 static void mem_cgroup_get(struct mem_cgroup *memcg) 6168 { 6169 atomic_inc(&memcg->refcnt); 6170 } 6171 6172 static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) 6173 { 6174 if (atomic_sub_and_test(count, &memcg->refcnt)) { 6175 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 6176 call_rcu(&memcg->rcu_freeing, free_rcu); 6177 if (parent) 6178 mem_cgroup_put(parent); 6179 } 6180 } 6181 6182 static void mem_cgroup_put(struct mem_cgroup *memcg) 6183 { 6184 __mem_cgroup_put(memcg, 1); 6185 } 6186 6187 /* 6188 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 6189 */ 6190 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 6191 { 6192 if (!memcg->res.parent) 6193 return NULL; 6194 return mem_cgroup_from_res_counter(memcg->res.parent, res); 6195 } 6196 EXPORT_SYMBOL(parent_mem_cgroup); 6197 6198 static void __init mem_cgroup_soft_limit_tree_init(void) 6199 { 6200 struct mem_cgroup_tree_per_node *rtpn; 6201 struct mem_cgroup_tree_per_zone *rtpz; 6202 int tmp, node, zone; 6203 6204 for_each_node(node) { 6205 tmp = node; 6206 if (!node_state(node, N_NORMAL_MEMORY)) 6207 tmp = -1; 6208 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 6209 BUG_ON(!rtpn); 6210 6211 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6212 6213 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6214 rtpz = &rtpn->rb_tree_per_zone[zone]; 6215 rtpz->rb_root = RB_ROOT; 6216 spin_lock_init(&rtpz->lock); 6217 } 6218 } 6219 } 6220 6221 static struct cgroup_subsys_state * __ref 6222 mem_cgroup_css_alloc(struct cgroup *cont) 6223 { 6224 struct mem_cgroup *memcg; 6225 long error = -ENOMEM; 6226 int node; 6227 6228 memcg = mem_cgroup_alloc(); 6229 if (!memcg) 6230 return ERR_PTR(error); 6231 6232 for_each_node(node) 6233 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 6234 goto free_out; 6235 6236 /* root ? */ 6237 if (cont->parent == NULL) { 6238 root_mem_cgroup = memcg; 6239 res_counter_init(&memcg->res, NULL); 6240 res_counter_init(&memcg->memsw, NULL); 6241 res_counter_init(&memcg->kmem, NULL); 6242 } 6243 6244 memcg->last_scanned_node = MAX_NUMNODES; 6245 INIT_LIST_HEAD(&memcg->oom_notify); 6246 atomic_set(&memcg->refcnt, 1); 6247 memcg->move_charge_at_immigrate = 0; 6248 mutex_init(&memcg->thresholds_lock); 6249 spin_lock_init(&memcg->move_lock); 6250 vmpressure_init(&memcg->vmpressure); 6251 6252 return &memcg->css; 6253 6254 free_out: 6255 __mem_cgroup_free(memcg); 6256 return ERR_PTR(error); 6257 } 6258 6259 static int 6260 mem_cgroup_css_online(struct cgroup *cont) 6261 { 6262 struct mem_cgroup *memcg, *parent; 6263 int error = 0; 6264 6265 if (!cont->parent) 6266 return 0; 6267 6268 mutex_lock(&memcg_create_mutex); 6269 memcg = mem_cgroup_from_cont(cont); 6270 parent = mem_cgroup_from_cont(cont->parent); 6271 6272 memcg->use_hierarchy = parent->use_hierarchy; 6273 memcg->oom_kill_disable = parent->oom_kill_disable; 6274 memcg->swappiness = mem_cgroup_swappiness(parent); 6275 6276 if (parent->use_hierarchy) { 6277 res_counter_init(&memcg->res, &parent->res); 6278 res_counter_init(&memcg->memsw, &parent->memsw); 6279 res_counter_init(&memcg->kmem, &parent->kmem); 6280 6281 /* 6282 * We increment refcnt of the parent to ensure that we can 6283 * safely access it on res_counter_charge/uncharge. 6284 * This refcnt will be decremented when freeing this 6285 * mem_cgroup(see mem_cgroup_put). 6286 */ 6287 mem_cgroup_get(parent); 6288 } else { 6289 res_counter_init(&memcg->res, NULL); 6290 res_counter_init(&memcg->memsw, NULL); 6291 res_counter_init(&memcg->kmem, NULL); 6292 /* 6293 * Deeper hierachy with use_hierarchy == false doesn't make 6294 * much sense so let cgroup subsystem know about this 6295 * unfortunate state in our controller. 6296 */ 6297 if (parent != root_mem_cgroup) 6298 mem_cgroup_subsys.broken_hierarchy = true; 6299 } 6300 6301 error = memcg_init_kmem(memcg, &mem_cgroup_subsys); 6302 mutex_unlock(&memcg_create_mutex); 6303 if (error) { 6304 /* 6305 * We call put now because our (and parent's) refcnts 6306 * are already in place. mem_cgroup_put() will internally 6307 * call __mem_cgroup_free, so return directly 6308 */ 6309 mem_cgroup_put(memcg); 6310 if (parent->use_hierarchy) 6311 mem_cgroup_put(parent); 6312 } 6313 return error; 6314 } 6315 6316 /* 6317 * Announce all parents that a group from their hierarchy is gone. 6318 */ 6319 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) 6320 { 6321 struct mem_cgroup *parent = memcg; 6322 6323 while ((parent = parent_mem_cgroup(parent))) 6324 atomic_inc(&parent->dead_count); 6325 6326 /* 6327 * if the root memcg is not hierarchical we have to check it 6328 * explicitely. 6329 */ 6330 if (!root_mem_cgroup->use_hierarchy) 6331 atomic_inc(&root_mem_cgroup->dead_count); 6332 } 6333 6334 static void mem_cgroup_css_offline(struct cgroup *cont) 6335 { 6336 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 6337 6338 mem_cgroup_invalidate_reclaim_iterators(memcg); 6339 mem_cgroup_reparent_charges(memcg); 6340 mem_cgroup_destroy_all_caches(memcg); 6341 } 6342 6343 static void mem_cgroup_css_free(struct cgroup *cont) 6344 { 6345 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 6346 6347 kmem_cgroup_destroy(memcg); 6348 6349 mem_cgroup_put(memcg); 6350 } 6351 6352 #ifdef CONFIG_MMU 6353 /* Handlers for move charge at task migration. */ 6354 #define PRECHARGE_COUNT_AT_ONCE 256 6355 static int mem_cgroup_do_precharge(unsigned long count) 6356 { 6357 int ret = 0; 6358 int batch_count = PRECHARGE_COUNT_AT_ONCE; 6359 struct mem_cgroup *memcg = mc.to; 6360 6361 if (mem_cgroup_is_root(memcg)) { 6362 mc.precharge += count; 6363 /* we don't need css_get for root */ 6364 return ret; 6365 } 6366 /* try to charge at once */ 6367 if (count > 1) { 6368 struct res_counter *dummy; 6369 /* 6370 * "memcg" cannot be under rmdir() because we've already checked 6371 * by cgroup_lock_live_cgroup() that it is not removed and we 6372 * are still under the same cgroup_mutex. So we can postpone 6373 * css_get(). 6374 */ 6375 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) 6376 goto one_by_one; 6377 if (do_swap_account && res_counter_charge(&memcg->memsw, 6378 PAGE_SIZE * count, &dummy)) { 6379 res_counter_uncharge(&memcg->res, PAGE_SIZE * count); 6380 goto one_by_one; 6381 } 6382 mc.precharge += count; 6383 return ret; 6384 } 6385 one_by_one: 6386 /* fall back to one by one charge */ 6387 while (count--) { 6388 if (signal_pending(current)) { 6389 ret = -EINTR; 6390 break; 6391 } 6392 if (!batch_count--) { 6393 batch_count = PRECHARGE_COUNT_AT_ONCE; 6394 cond_resched(); 6395 } 6396 ret = __mem_cgroup_try_charge(NULL, 6397 GFP_KERNEL, 1, &memcg, false); 6398 if (ret) 6399 /* mem_cgroup_clear_mc() will do uncharge later */ 6400 return ret; 6401 mc.precharge++; 6402 } 6403 return ret; 6404 } 6405 6406 /** 6407 * get_mctgt_type - get target type of moving charge 6408 * @vma: the vma the pte to be checked belongs 6409 * @addr: the address corresponding to the pte to be checked 6410 * @ptent: the pte to be checked 6411 * @target: the pointer the target page or swap ent will be stored(can be NULL) 6412 * 6413 * Returns 6414 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 6415 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 6416 * move charge. if @target is not NULL, the page is stored in target->page 6417 * with extra refcnt got(Callers should handle it). 6418 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 6419 * target for charge migration. if @target is not NULL, the entry is stored 6420 * in target->ent. 6421 * 6422 * Called with pte lock held. 6423 */ 6424 union mc_target { 6425 struct page *page; 6426 swp_entry_t ent; 6427 }; 6428 6429 enum mc_target_type { 6430 MC_TARGET_NONE = 0, 6431 MC_TARGET_PAGE, 6432 MC_TARGET_SWAP, 6433 }; 6434 6435 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 6436 unsigned long addr, pte_t ptent) 6437 { 6438 struct page *page = vm_normal_page(vma, addr, ptent); 6439 6440 if (!page || !page_mapped(page)) 6441 return NULL; 6442 if (PageAnon(page)) { 6443 /* we don't move shared anon */ 6444 if (!move_anon()) 6445 return NULL; 6446 } else if (!move_file()) 6447 /* we ignore mapcount for file pages */ 6448 return NULL; 6449 if (!get_page_unless_zero(page)) 6450 return NULL; 6451 6452 return page; 6453 } 6454 6455 #ifdef CONFIG_SWAP 6456 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6457 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6458 { 6459 struct page *page = NULL; 6460 swp_entry_t ent = pte_to_swp_entry(ptent); 6461 6462 if (!move_anon() || non_swap_entry(ent)) 6463 return NULL; 6464 /* 6465 * Because lookup_swap_cache() updates some statistics counter, 6466 * we call find_get_page() with swapper_space directly. 6467 */ 6468 page = find_get_page(swap_address_space(ent), ent.val); 6469 if (do_swap_account) 6470 entry->val = ent.val; 6471 6472 return page; 6473 } 6474 #else 6475 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6476 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6477 { 6478 return NULL; 6479 } 6480 #endif 6481 6482 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 6483 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6484 { 6485 struct page *page = NULL; 6486 struct address_space *mapping; 6487 pgoff_t pgoff; 6488 6489 if (!vma->vm_file) /* anonymous vma */ 6490 return NULL; 6491 if (!move_file()) 6492 return NULL; 6493 6494 mapping = vma->vm_file->f_mapping; 6495 if (pte_none(ptent)) 6496 pgoff = linear_page_index(vma, addr); 6497 else /* pte_file(ptent) is true */ 6498 pgoff = pte_to_pgoff(ptent); 6499 6500 /* page is moved even if it's not RSS of this task(page-faulted). */ 6501 page = find_get_page(mapping, pgoff); 6502 6503 #ifdef CONFIG_SWAP 6504 /* shmem/tmpfs may report page out on swap: account for that too. */ 6505 if (radix_tree_exceptional_entry(page)) { 6506 swp_entry_t swap = radix_to_swp_entry(page); 6507 if (do_swap_account) 6508 *entry = swap; 6509 page = find_get_page(swap_address_space(swap), swap.val); 6510 } 6511 #endif 6512 return page; 6513 } 6514 6515 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 6516 unsigned long addr, pte_t ptent, union mc_target *target) 6517 { 6518 struct page *page = NULL; 6519 struct page_cgroup *pc; 6520 enum mc_target_type ret = MC_TARGET_NONE; 6521 swp_entry_t ent = { .val = 0 }; 6522 6523 if (pte_present(ptent)) 6524 page = mc_handle_present_pte(vma, addr, ptent); 6525 else if (is_swap_pte(ptent)) 6526 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 6527 else if (pte_none(ptent) || pte_file(ptent)) 6528 page = mc_handle_file_pte(vma, addr, ptent, &ent); 6529 6530 if (!page && !ent.val) 6531 return ret; 6532 if (page) { 6533 pc = lookup_page_cgroup(page); 6534 /* 6535 * Do only loose check w/o page_cgroup lock. 6536 * mem_cgroup_move_account() checks the pc is valid or not under 6537 * the lock. 6538 */ 6539 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6540 ret = MC_TARGET_PAGE; 6541 if (target) 6542 target->page = page; 6543 } 6544 if (!ret || !target) 6545 put_page(page); 6546 } 6547 /* There is a swap entry and a page doesn't exist or isn't charged */ 6548 if (ent.val && !ret && 6549 css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) { 6550 ret = MC_TARGET_SWAP; 6551 if (target) 6552 target->ent = ent; 6553 } 6554 return ret; 6555 } 6556 6557 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6558 /* 6559 * We don't consider swapping or file mapped pages because THP does not 6560 * support them for now. 6561 * Caller should make sure that pmd_trans_huge(pmd) is true. 6562 */ 6563 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6564 unsigned long addr, pmd_t pmd, union mc_target *target) 6565 { 6566 struct page *page = NULL; 6567 struct page_cgroup *pc; 6568 enum mc_target_type ret = MC_TARGET_NONE; 6569 6570 page = pmd_page(pmd); 6571 VM_BUG_ON(!page || !PageHead(page)); 6572 if (!move_anon()) 6573 return ret; 6574 pc = lookup_page_cgroup(page); 6575 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6576 ret = MC_TARGET_PAGE; 6577 if (target) { 6578 get_page(page); 6579 target->page = page; 6580 } 6581 } 6582 return ret; 6583 } 6584 #else 6585 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6586 unsigned long addr, pmd_t pmd, union mc_target *target) 6587 { 6588 return MC_TARGET_NONE; 6589 } 6590 #endif 6591 6592 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 6593 unsigned long addr, unsigned long end, 6594 struct mm_walk *walk) 6595 { 6596 struct vm_area_struct *vma = walk->private; 6597 pte_t *pte; 6598 spinlock_t *ptl; 6599 6600 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6601 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6602 mc.precharge += HPAGE_PMD_NR; 6603 spin_unlock(&vma->vm_mm->page_table_lock); 6604 return 0; 6605 } 6606 6607 if (pmd_trans_unstable(pmd)) 6608 return 0; 6609 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6610 for (; addr != end; pte++, addr += PAGE_SIZE) 6611 if (get_mctgt_type(vma, addr, *pte, NULL)) 6612 mc.precharge++; /* increment precharge temporarily */ 6613 pte_unmap_unlock(pte - 1, ptl); 6614 cond_resched(); 6615 6616 return 0; 6617 } 6618 6619 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 6620 { 6621 unsigned long precharge; 6622 struct vm_area_struct *vma; 6623 6624 down_read(&mm->mmap_sem); 6625 for (vma = mm->mmap; vma; vma = vma->vm_next) { 6626 struct mm_walk mem_cgroup_count_precharge_walk = { 6627 .pmd_entry = mem_cgroup_count_precharge_pte_range, 6628 .mm = mm, 6629 .private = vma, 6630 }; 6631 if (is_vm_hugetlb_page(vma)) 6632 continue; 6633 walk_page_range(vma->vm_start, vma->vm_end, 6634 &mem_cgroup_count_precharge_walk); 6635 } 6636 up_read(&mm->mmap_sem); 6637 6638 precharge = mc.precharge; 6639 mc.precharge = 0; 6640 6641 return precharge; 6642 } 6643 6644 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 6645 { 6646 unsigned long precharge = mem_cgroup_count_precharge(mm); 6647 6648 VM_BUG_ON(mc.moving_task); 6649 mc.moving_task = current; 6650 return mem_cgroup_do_precharge(precharge); 6651 } 6652 6653 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 6654 static void __mem_cgroup_clear_mc(void) 6655 { 6656 struct mem_cgroup *from = mc.from; 6657 struct mem_cgroup *to = mc.to; 6658 6659 /* we must uncharge all the leftover precharges from mc.to */ 6660 if (mc.precharge) { 6661 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 6662 mc.precharge = 0; 6663 } 6664 /* 6665 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 6666 * we must uncharge here. 6667 */ 6668 if (mc.moved_charge) { 6669 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 6670 mc.moved_charge = 0; 6671 } 6672 /* we must fixup refcnts and charges */ 6673 if (mc.moved_swap) { 6674 /* uncharge swap account from the old cgroup */ 6675 if (!mem_cgroup_is_root(mc.from)) 6676 res_counter_uncharge(&mc.from->memsw, 6677 PAGE_SIZE * mc.moved_swap); 6678 __mem_cgroup_put(mc.from, mc.moved_swap); 6679 6680 if (!mem_cgroup_is_root(mc.to)) { 6681 /* 6682 * we charged both to->res and to->memsw, so we should 6683 * uncharge to->res. 6684 */ 6685 res_counter_uncharge(&mc.to->res, 6686 PAGE_SIZE * mc.moved_swap); 6687 } 6688 /* we've already done mem_cgroup_get(mc.to) */ 6689 mc.moved_swap = 0; 6690 } 6691 memcg_oom_recover(from); 6692 memcg_oom_recover(to); 6693 wake_up_all(&mc.waitq); 6694 } 6695 6696 static void mem_cgroup_clear_mc(void) 6697 { 6698 struct mem_cgroup *from = mc.from; 6699 6700 /* 6701 * we must clear moving_task before waking up waiters at the end of 6702 * task migration. 6703 */ 6704 mc.moving_task = NULL; 6705 __mem_cgroup_clear_mc(); 6706 spin_lock(&mc.lock); 6707 mc.from = NULL; 6708 mc.to = NULL; 6709 spin_unlock(&mc.lock); 6710 mem_cgroup_end_move(from); 6711 } 6712 6713 static int mem_cgroup_can_attach(struct cgroup *cgroup, 6714 struct cgroup_taskset *tset) 6715 { 6716 struct task_struct *p = cgroup_taskset_first(tset); 6717 int ret = 0; 6718 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); 6719 unsigned long move_charge_at_immigrate; 6720 6721 /* 6722 * We are now commited to this value whatever it is. Changes in this 6723 * tunable will only affect upcoming migrations, not the current one. 6724 * So we need to save it, and keep it going. 6725 */ 6726 move_charge_at_immigrate = memcg->move_charge_at_immigrate; 6727 if (move_charge_at_immigrate) { 6728 struct mm_struct *mm; 6729 struct mem_cgroup *from = mem_cgroup_from_task(p); 6730 6731 VM_BUG_ON(from == memcg); 6732 6733 mm = get_task_mm(p); 6734 if (!mm) 6735 return 0; 6736 /* We move charges only when we move a owner of the mm */ 6737 if (mm->owner == p) { 6738 VM_BUG_ON(mc.from); 6739 VM_BUG_ON(mc.to); 6740 VM_BUG_ON(mc.precharge); 6741 VM_BUG_ON(mc.moved_charge); 6742 VM_BUG_ON(mc.moved_swap); 6743 mem_cgroup_start_move(from); 6744 spin_lock(&mc.lock); 6745 mc.from = from; 6746 mc.to = memcg; 6747 mc.immigrate_flags = move_charge_at_immigrate; 6748 spin_unlock(&mc.lock); 6749 /* We set mc.moving_task later */ 6750 6751 ret = mem_cgroup_precharge_mc(mm); 6752 if (ret) 6753 mem_cgroup_clear_mc(); 6754 } 6755 mmput(mm); 6756 } 6757 return ret; 6758 } 6759 6760 static void mem_cgroup_cancel_attach(struct cgroup *cgroup, 6761 struct cgroup_taskset *tset) 6762 { 6763 mem_cgroup_clear_mc(); 6764 } 6765 6766 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6767 unsigned long addr, unsigned long end, 6768 struct mm_walk *walk) 6769 { 6770 int ret = 0; 6771 struct vm_area_struct *vma = walk->private; 6772 pte_t *pte; 6773 spinlock_t *ptl; 6774 enum mc_target_type target_type; 6775 union mc_target target; 6776 struct page *page; 6777 struct page_cgroup *pc; 6778 6779 /* 6780 * We don't take compound_lock() here but no race with splitting thp 6781 * happens because: 6782 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 6783 * under splitting, which means there's no concurrent thp split, 6784 * - if another thread runs into split_huge_page() just after we 6785 * entered this if-block, the thread must wait for page table lock 6786 * to be unlocked in __split_huge_page_splitting(), where the main 6787 * part of thp split is not executed yet. 6788 */ 6789 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6790 if (mc.precharge < HPAGE_PMD_NR) { 6791 spin_unlock(&vma->vm_mm->page_table_lock); 6792 return 0; 6793 } 6794 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6795 if (target_type == MC_TARGET_PAGE) { 6796 page = target.page; 6797 if (!isolate_lru_page(page)) { 6798 pc = lookup_page_cgroup(page); 6799 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 6800 pc, mc.from, mc.to)) { 6801 mc.precharge -= HPAGE_PMD_NR; 6802 mc.moved_charge += HPAGE_PMD_NR; 6803 } 6804 putback_lru_page(page); 6805 } 6806 put_page(page); 6807 } 6808 spin_unlock(&vma->vm_mm->page_table_lock); 6809 return 0; 6810 } 6811 6812 if (pmd_trans_unstable(pmd)) 6813 return 0; 6814 retry: 6815 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6816 for (; addr != end; addr += PAGE_SIZE) { 6817 pte_t ptent = *(pte++); 6818 swp_entry_t ent; 6819 6820 if (!mc.precharge) 6821 break; 6822 6823 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6824 case MC_TARGET_PAGE: 6825 page = target.page; 6826 if (isolate_lru_page(page)) 6827 goto put; 6828 pc = lookup_page_cgroup(page); 6829 if (!mem_cgroup_move_account(page, 1, pc, 6830 mc.from, mc.to)) { 6831 mc.precharge--; 6832 /* we uncharge from mc.from later. */ 6833 mc.moved_charge++; 6834 } 6835 putback_lru_page(page); 6836 put: /* get_mctgt_type() gets the page */ 6837 put_page(page); 6838 break; 6839 case MC_TARGET_SWAP: 6840 ent = target.ent; 6841 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6842 mc.precharge--; 6843 /* we fixup refcnts and charges later. */ 6844 mc.moved_swap++; 6845 } 6846 break; 6847 default: 6848 break; 6849 } 6850 } 6851 pte_unmap_unlock(pte - 1, ptl); 6852 cond_resched(); 6853 6854 if (addr != end) { 6855 /* 6856 * We have consumed all precharges we got in can_attach(). 6857 * We try charge one by one, but don't do any additional 6858 * charges to mc.to if we have failed in charge once in attach() 6859 * phase. 6860 */ 6861 ret = mem_cgroup_do_precharge(1); 6862 if (!ret) 6863 goto retry; 6864 } 6865 6866 return ret; 6867 } 6868 6869 static void mem_cgroup_move_charge(struct mm_struct *mm) 6870 { 6871 struct vm_area_struct *vma; 6872 6873 lru_add_drain_all(); 6874 retry: 6875 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 6876 /* 6877 * Someone who are holding the mmap_sem might be waiting in 6878 * waitq. So we cancel all extra charges, wake up all waiters, 6879 * and retry. Because we cancel precharges, we might not be able 6880 * to move enough charges, but moving charge is a best-effort 6881 * feature anyway, so it wouldn't be a big problem. 6882 */ 6883 __mem_cgroup_clear_mc(); 6884 cond_resched(); 6885 goto retry; 6886 } 6887 for (vma = mm->mmap; vma; vma = vma->vm_next) { 6888 int ret; 6889 struct mm_walk mem_cgroup_move_charge_walk = { 6890 .pmd_entry = mem_cgroup_move_charge_pte_range, 6891 .mm = mm, 6892 .private = vma, 6893 }; 6894 if (is_vm_hugetlb_page(vma)) 6895 continue; 6896 ret = walk_page_range(vma->vm_start, vma->vm_end, 6897 &mem_cgroup_move_charge_walk); 6898 if (ret) 6899 /* 6900 * means we have consumed all precharges and failed in 6901 * doing additional charge. Just abandon here. 6902 */ 6903 break; 6904 } 6905 up_read(&mm->mmap_sem); 6906 } 6907 6908 static void mem_cgroup_move_task(struct cgroup *cont, 6909 struct cgroup_taskset *tset) 6910 { 6911 struct task_struct *p = cgroup_taskset_first(tset); 6912 struct mm_struct *mm = get_task_mm(p); 6913 6914 if (mm) { 6915 if (mc.to) 6916 mem_cgroup_move_charge(mm); 6917 mmput(mm); 6918 } 6919 if (mc.to) 6920 mem_cgroup_clear_mc(); 6921 } 6922 #else /* !CONFIG_MMU */ 6923 static int mem_cgroup_can_attach(struct cgroup *cgroup, 6924 struct cgroup_taskset *tset) 6925 { 6926 return 0; 6927 } 6928 static void mem_cgroup_cancel_attach(struct cgroup *cgroup, 6929 struct cgroup_taskset *tset) 6930 { 6931 } 6932 static void mem_cgroup_move_task(struct cgroup *cont, 6933 struct cgroup_taskset *tset) 6934 { 6935 } 6936 #endif 6937 6938 /* 6939 * Cgroup retains root cgroups across [un]mount cycles making it necessary 6940 * to verify sane_behavior flag on each mount attempt. 6941 */ 6942 static void mem_cgroup_bind(struct cgroup *root) 6943 { 6944 /* 6945 * use_hierarchy is forced with sane_behavior. cgroup core 6946 * guarantees that @root doesn't have any children, so turning it 6947 * on for the root memcg is enough. 6948 */ 6949 if (cgroup_sane_behavior(root)) 6950 mem_cgroup_from_cont(root)->use_hierarchy = true; 6951 } 6952 6953 struct cgroup_subsys mem_cgroup_subsys = { 6954 .name = "memory", 6955 .subsys_id = mem_cgroup_subsys_id, 6956 .css_alloc = mem_cgroup_css_alloc, 6957 .css_online = mem_cgroup_css_online, 6958 .css_offline = mem_cgroup_css_offline, 6959 .css_free = mem_cgroup_css_free, 6960 .can_attach = mem_cgroup_can_attach, 6961 .cancel_attach = mem_cgroup_cancel_attach, 6962 .attach = mem_cgroup_move_task, 6963 .bind = mem_cgroup_bind, 6964 .base_cftypes = mem_cgroup_files, 6965 .early_init = 0, 6966 .use_id = 1, 6967 }; 6968 6969 #ifdef CONFIG_MEMCG_SWAP 6970 static int __init enable_swap_account(char *s) 6971 { 6972 /* consider enabled if no parameter or 1 is given */ 6973 if (!strcmp(s, "1")) 6974 really_do_swap_account = 1; 6975 else if (!strcmp(s, "0")) 6976 really_do_swap_account = 0; 6977 return 1; 6978 } 6979 __setup("swapaccount=", enable_swap_account); 6980 6981 static void __init memsw_file_init(void) 6982 { 6983 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); 6984 } 6985 6986 static void __init enable_swap_cgroup(void) 6987 { 6988 if (!mem_cgroup_disabled() && really_do_swap_account) { 6989 do_swap_account = 1; 6990 memsw_file_init(); 6991 } 6992 } 6993 6994 #else 6995 static void __init enable_swap_cgroup(void) 6996 { 6997 } 6998 #endif 6999 7000 /* 7001 * subsys_initcall() for memory controller. 7002 * 7003 * Some parts like hotcpu_notifier() have to be initialized from this context 7004 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 7005 * everything that doesn't depend on a specific mem_cgroup structure should 7006 * be initialized from here. 7007 */ 7008 static int __init mem_cgroup_init(void) 7009 { 7010 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7011 enable_swap_cgroup(); 7012 mem_cgroup_soft_limit_tree_init(); 7013 memcg_stock_init(); 7014 return 0; 7015 } 7016 subsys_initcall(mem_cgroup_init); 7017