1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * This program is free software; you can redistribute it and/or modify 18 * it under the terms of the GNU General Public License as published by 19 * the Free Software Foundation; either version 2 of the License, or 20 * (at your option) any later version. 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 */ 27 28 #include <linux/res_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/mm.h> 32 #include <linux/hugetlb.h> 33 #include <linux/pagemap.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/sort.h> 49 #include <linux/fs.h> 50 #include <linux/seq_file.h> 51 #include <linux/vmalloc.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/page_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include "internal.h" 58 #include <net/sock.h> 59 #include <net/ip.h> 60 #include <net/tcp_memcontrol.h> 61 62 #include <asm/uaccess.h> 63 64 #include <trace/events/vmscan.h> 65 66 struct cgroup_subsys mem_cgroup_subsys __read_mostly; 67 EXPORT_SYMBOL(mem_cgroup_subsys); 68 69 #define MEM_CGROUP_RECLAIM_RETRIES 5 70 static struct mem_cgroup *root_mem_cgroup __read_mostly; 71 72 #ifdef CONFIG_MEMCG_SWAP 73 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 74 int do_swap_account __read_mostly; 75 76 /* for remember boot option*/ 77 #ifdef CONFIG_MEMCG_SWAP_ENABLED 78 static int really_do_swap_account __initdata = 1; 79 #else 80 static int really_do_swap_account __initdata = 0; 81 #endif 82 83 #else 84 #define do_swap_account 0 85 #endif 86 87 88 static const char * const mem_cgroup_stat_names[] = { 89 "cache", 90 "rss", 91 "rss_huge", 92 "mapped_file", 93 "writeback", 94 "swap", 95 }; 96 97 enum mem_cgroup_events_index { 98 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 99 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 100 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 101 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 102 MEM_CGROUP_EVENTS_NSTATS, 103 }; 104 105 static const char * const mem_cgroup_events_names[] = { 106 "pgpgin", 107 "pgpgout", 108 "pgfault", 109 "pgmajfault", 110 }; 111 112 static const char * const mem_cgroup_lru_names[] = { 113 "inactive_anon", 114 "active_anon", 115 "inactive_file", 116 "active_file", 117 "unevictable", 118 }; 119 120 /* 121 * Per memcg event counter is incremented at every pagein/pageout. With THP, 122 * it will be incremated by the number of pages. This counter is used for 123 * for trigger some periodic events. This is straightforward and better 124 * than using jiffies etc. to handle periodic memcg event. 125 */ 126 enum mem_cgroup_events_target { 127 MEM_CGROUP_TARGET_THRESH, 128 MEM_CGROUP_TARGET_SOFTLIMIT, 129 MEM_CGROUP_TARGET_NUMAINFO, 130 MEM_CGROUP_NTARGETS, 131 }; 132 #define THRESHOLDS_EVENTS_TARGET 128 133 #define SOFTLIMIT_EVENTS_TARGET 1024 134 #define NUMAINFO_EVENTS_TARGET 1024 135 136 struct mem_cgroup_stat_cpu { 137 long count[MEM_CGROUP_STAT_NSTATS]; 138 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 139 unsigned long nr_page_events; 140 unsigned long targets[MEM_CGROUP_NTARGETS]; 141 }; 142 143 struct mem_cgroup_reclaim_iter { 144 /* 145 * last scanned hierarchy member. Valid only if last_dead_count 146 * matches memcg->dead_count of the hierarchy root group. 147 */ 148 struct mem_cgroup *last_visited; 149 unsigned long last_dead_count; 150 151 /* scan generation, increased every round-trip */ 152 unsigned int generation; 153 }; 154 155 /* 156 * per-zone information in memory controller. 157 */ 158 struct mem_cgroup_per_zone { 159 struct lruvec lruvec; 160 unsigned long lru_size[NR_LRU_LISTS]; 161 162 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 163 164 struct rb_node tree_node; /* RB tree node */ 165 unsigned long long usage_in_excess;/* Set to the value by which */ 166 /* the soft limit is exceeded*/ 167 bool on_tree; 168 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 169 /* use container_of */ 170 }; 171 172 struct mem_cgroup_per_node { 173 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 174 }; 175 176 /* 177 * Cgroups above their limits are maintained in a RB-Tree, independent of 178 * their hierarchy representation 179 */ 180 181 struct mem_cgroup_tree_per_zone { 182 struct rb_root rb_root; 183 spinlock_t lock; 184 }; 185 186 struct mem_cgroup_tree_per_node { 187 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 188 }; 189 190 struct mem_cgroup_tree { 191 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 192 }; 193 194 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 195 196 struct mem_cgroup_threshold { 197 struct eventfd_ctx *eventfd; 198 u64 threshold; 199 }; 200 201 /* For threshold */ 202 struct mem_cgroup_threshold_ary { 203 /* An array index points to threshold just below or equal to usage. */ 204 int current_threshold; 205 /* Size of entries[] */ 206 unsigned int size; 207 /* Array of thresholds */ 208 struct mem_cgroup_threshold entries[0]; 209 }; 210 211 struct mem_cgroup_thresholds { 212 /* Primary thresholds array */ 213 struct mem_cgroup_threshold_ary *primary; 214 /* 215 * Spare threshold array. 216 * This is needed to make mem_cgroup_unregister_event() "never fail". 217 * It must be able to store at least primary->size - 1 entries. 218 */ 219 struct mem_cgroup_threshold_ary *spare; 220 }; 221 222 /* for OOM */ 223 struct mem_cgroup_eventfd_list { 224 struct list_head list; 225 struct eventfd_ctx *eventfd; 226 }; 227 228 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 229 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 230 231 /* 232 * The memory controller data structure. The memory controller controls both 233 * page cache and RSS per cgroup. We would eventually like to provide 234 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 235 * to help the administrator determine what knobs to tune. 236 * 237 * TODO: Add a water mark for the memory controller. Reclaim will begin when 238 * we hit the water mark. May be even add a low water mark, such that 239 * no reclaim occurs from a cgroup at it's low water mark, this is 240 * a feature that will be implemented much later in the future. 241 */ 242 struct mem_cgroup { 243 struct cgroup_subsys_state css; 244 /* 245 * the counter to account for memory usage 246 */ 247 struct res_counter res; 248 249 /* vmpressure notifications */ 250 struct vmpressure vmpressure; 251 252 /* 253 * the counter to account for mem+swap usage. 254 */ 255 struct res_counter memsw; 256 257 /* 258 * the counter to account for kernel memory usage. 259 */ 260 struct res_counter kmem; 261 /* 262 * Should the accounting and control be hierarchical, per subtree? 263 */ 264 bool use_hierarchy; 265 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */ 266 267 bool oom_lock; 268 atomic_t under_oom; 269 atomic_t oom_wakeups; 270 271 int swappiness; 272 /* OOM-Killer disable */ 273 int oom_kill_disable; 274 275 /* set when res.limit == memsw.limit */ 276 bool memsw_is_minimum; 277 278 /* protect arrays of thresholds */ 279 struct mutex thresholds_lock; 280 281 /* thresholds for memory usage. RCU-protected */ 282 struct mem_cgroup_thresholds thresholds; 283 284 /* thresholds for mem+swap usage. RCU-protected */ 285 struct mem_cgroup_thresholds memsw_thresholds; 286 287 /* For oom notifier event fd */ 288 struct list_head oom_notify; 289 290 /* 291 * Should we move charges of a task when a task is moved into this 292 * mem_cgroup ? And what type of charges should we move ? 293 */ 294 unsigned long move_charge_at_immigrate; 295 /* 296 * set > 0 if pages under this cgroup are moving to other cgroup. 297 */ 298 atomic_t moving_account; 299 /* taken only while moving_account > 0 */ 300 spinlock_t move_lock; 301 /* 302 * percpu counter. 303 */ 304 struct mem_cgroup_stat_cpu __percpu *stat; 305 /* 306 * used when a cpu is offlined or other synchronizations 307 * See mem_cgroup_read_stat(). 308 */ 309 struct mem_cgroup_stat_cpu nocpu_base; 310 spinlock_t pcp_counter_lock; 311 312 atomic_t dead_count; 313 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 314 struct tcp_memcontrol tcp_mem; 315 #endif 316 #if defined(CONFIG_MEMCG_KMEM) 317 /* analogous to slab_common's slab_caches list. per-memcg */ 318 struct list_head memcg_slab_caches; 319 /* Not a spinlock, we can take a lot of time walking the list */ 320 struct mutex slab_caches_mutex; 321 /* Index in the kmem_cache->memcg_params->memcg_caches array */ 322 int kmemcg_id; 323 #endif 324 325 int last_scanned_node; 326 #if MAX_NUMNODES > 1 327 nodemask_t scan_nodes; 328 atomic_t numainfo_events; 329 atomic_t numainfo_updating; 330 #endif 331 332 struct mem_cgroup_per_node *nodeinfo[0]; 333 /* WARNING: nodeinfo must be the last member here */ 334 }; 335 336 static size_t memcg_size(void) 337 { 338 return sizeof(struct mem_cgroup) + 339 nr_node_ids * sizeof(struct mem_cgroup_per_node); 340 } 341 342 /* internal only representation about the status of kmem accounting. */ 343 enum { 344 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ 345 KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ 346 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ 347 }; 348 349 /* We account when limit is on, but only after call sites are patched */ 350 #define KMEM_ACCOUNTED_MASK \ 351 ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) 352 353 #ifdef CONFIG_MEMCG_KMEM 354 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) 355 { 356 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 357 } 358 359 static bool memcg_kmem_is_active(struct mem_cgroup *memcg) 360 { 361 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 362 } 363 364 static void memcg_kmem_set_activated(struct mem_cgroup *memcg) 365 { 366 set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 367 } 368 369 static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) 370 { 371 clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); 372 } 373 374 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) 375 { 376 /* 377 * Our caller must use css_get() first, because memcg_uncharge_kmem() 378 * will call css_put() if it sees the memcg is dead. 379 */ 380 smp_wmb(); 381 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) 382 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); 383 } 384 385 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg) 386 { 387 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD, 388 &memcg->kmem_account_flags); 389 } 390 #endif 391 392 /* Stuffs for move charges at task migration. */ 393 /* 394 * Types of charges to be moved. "move_charge_at_immitgrate" and 395 * "immigrate_flags" are treated as a left-shifted bitmap of these types. 396 */ 397 enum move_type { 398 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 399 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ 400 NR_MOVE_TYPE, 401 }; 402 403 /* "mc" and its members are protected by cgroup_mutex */ 404 static struct move_charge_struct { 405 spinlock_t lock; /* for from, to */ 406 struct mem_cgroup *from; 407 struct mem_cgroup *to; 408 unsigned long immigrate_flags; 409 unsigned long precharge; 410 unsigned long moved_charge; 411 unsigned long moved_swap; 412 struct task_struct *moving_task; /* a task moving charges */ 413 wait_queue_head_t waitq; /* a waitq for other context */ 414 } mc = { 415 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 416 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 417 }; 418 419 static bool move_anon(void) 420 { 421 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags); 422 } 423 424 static bool move_file(void) 425 { 426 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags); 427 } 428 429 /* 430 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 431 * limit reclaim to prevent infinite loops, if they ever occur. 432 */ 433 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 434 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 435 436 enum charge_type { 437 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 438 MEM_CGROUP_CHARGE_TYPE_ANON, 439 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 440 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 441 NR_CHARGE_TYPE, 442 }; 443 444 /* for encoding cft->private value on file */ 445 enum res_type { 446 _MEM, 447 _MEMSWAP, 448 _OOM_TYPE, 449 _KMEM, 450 }; 451 452 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 453 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 454 #define MEMFILE_ATTR(val) ((val) & 0xffff) 455 /* Used for OOM nofiier */ 456 #define OOM_CONTROL (0) 457 458 /* 459 * Reclaim flags for mem_cgroup_hierarchical_reclaim 460 */ 461 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 462 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) 463 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 464 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 465 466 /* 467 * The memcg_create_mutex will be held whenever a new cgroup is created. 468 * As a consequence, any change that needs to protect against new child cgroups 469 * appearing has to hold it as well. 470 */ 471 static DEFINE_MUTEX(memcg_create_mutex); 472 473 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) 474 { 475 return s ? container_of(s, struct mem_cgroup, css) : NULL; 476 } 477 478 /* Some nice accessors for the vmpressure. */ 479 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 480 { 481 if (!memcg) 482 memcg = root_mem_cgroup; 483 return &memcg->vmpressure; 484 } 485 486 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 487 { 488 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 489 } 490 491 struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css) 492 { 493 return &mem_cgroup_from_css(css)->vmpressure; 494 } 495 496 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 497 { 498 return (memcg == root_mem_cgroup); 499 } 500 501 /* Writing them here to avoid exposing memcg's inner layout */ 502 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 503 504 void sock_update_memcg(struct sock *sk) 505 { 506 if (mem_cgroup_sockets_enabled) { 507 struct mem_cgroup *memcg; 508 struct cg_proto *cg_proto; 509 510 BUG_ON(!sk->sk_prot->proto_cgroup); 511 512 /* Socket cloning can throw us here with sk_cgrp already 513 * filled. It won't however, necessarily happen from 514 * process context. So the test for root memcg given 515 * the current task's memcg won't help us in this case. 516 * 517 * Respecting the original socket's memcg is a better 518 * decision in this case. 519 */ 520 if (sk->sk_cgrp) { 521 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 522 css_get(&sk->sk_cgrp->memcg->css); 523 return; 524 } 525 526 rcu_read_lock(); 527 memcg = mem_cgroup_from_task(current); 528 cg_proto = sk->sk_prot->proto_cgroup(memcg); 529 if (!mem_cgroup_is_root(memcg) && 530 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) { 531 sk->sk_cgrp = cg_proto; 532 } 533 rcu_read_unlock(); 534 } 535 } 536 EXPORT_SYMBOL(sock_update_memcg); 537 538 void sock_release_memcg(struct sock *sk) 539 { 540 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 541 struct mem_cgroup *memcg; 542 WARN_ON(!sk->sk_cgrp->memcg); 543 memcg = sk->sk_cgrp->memcg; 544 css_put(&sk->sk_cgrp->memcg->css); 545 } 546 } 547 548 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 549 { 550 if (!memcg || mem_cgroup_is_root(memcg)) 551 return NULL; 552 553 return &memcg->tcp_mem.cg_proto; 554 } 555 EXPORT_SYMBOL(tcp_proto_cgroup); 556 557 static void disarm_sock_keys(struct mem_cgroup *memcg) 558 { 559 if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto)) 560 return; 561 static_key_slow_dec(&memcg_socket_limit_enabled); 562 } 563 #else 564 static void disarm_sock_keys(struct mem_cgroup *memcg) 565 { 566 } 567 #endif 568 569 #ifdef CONFIG_MEMCG_KMEM 570 /* 571 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches. 572 * There are two main reasons for not using the css_id for this: 573 * 1) this works better in sparse environments, where we have a lot of memcgs, 574 * but only a few kmem-limited. Or also, if we have, for instance, 200 575 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 576 * 200 entry array for that. 577 * 578 * 2) In order not to violate the cgroup API, we would like to do all memory 579 * allocation in ->create(). At that point, we haven't yet allocated the 580 * css_id. Having a separate index prevents us from messing with the cgroup 581 * core for this 582 * 583 * The current size of the caches array is stored in 584 * memcg_limited_groups_array_size. It will double each time we have to 585 * increase it. 586 */ 587 static DEFINE_IDA(kmem_limited_groups); 588 int memcg_limited_groups_array_size; 589 590 /* 591 * MIN_SIZE is different than 1, because we would like to avoid going through 592 * the alloc/free process all the time. In a small machine, 4 kmem-limited 593 * cgroups is a reasonable guess. In the future, it could be a parameter or 594 * tunable, but that is strictly not necessary. 595 * 596 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get 597 * this constant directly from cgroup, but it is understandable that this is 598 * better kept as an internal representation in cgroup.c. In any case, the 599 * css_id space is not getting any smaller, and we don't have to necessarily 600 * increase ours as well if it increases. 601 */ 602 #define MEMCG_CACHES_MIN_SIZE 4 603 #define MEMCG_CACHES_MAX_SIZE 65535 604 605 /* 606 * A lot of the calls to the cache allocation functions are expected to be 607 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 608 * conditional to this static branch, we'll have to allow modules that does 609 * kmem_cache_alloc and the such to see this symbol as well 610 */ 611 struct static_key memcg_kmem_enabled_key; 612 EXPORT_SYMBOL(memcg_kmem_enabled_key); 613 614 static void disarm_kmem_keys(struct mem_cgroup *memcg) 615 { 616 if (memcg_kmem_is_active(memcg)) { 617 static_key_slow_dec(&memcg_kmem_enabled_key); 618 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); 619 } 620 /* 621 * This check can't live in kmem destruction function, 622 * since the charges will outlive the cgroup 623 */ 624 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0); 625 } 626 #else 627 static void disarm_kmem_keys(struct mem_cgroup *memcg) 628 { 629 } 630 #endif /* CONFIG_MEMCG_KMEM */ 631 632 static void disarm_static_keys(struct mem_cgroup *memcg) 633 { 634 disarm_sock_keys(memcg); 635 disarm_kmem_keys(memcg); 636 } 637 638 static void drain_all_stock_async(struct mem_cgroup *memcg); 639 640 static struct mem_cgroup_per_zone * 641 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 642 { 643 VM_BUG_ON((unsigned)nid >= nr_node_ids); 644 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 645 } 646 647 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 648 { 649 return &memcg->css; 650 } 651 652 static struct mem_cgroup_per_zone * 653 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 654 { 655 int nid = page_to_nid(page); 656 int zid = page_zonenum(page); 657 658 return mem_cgroup_zoneinfo(memcg, nid, zid); 659 } 660 661 static struct mem_cgroup_tree_per_zone * 662 soft_limit_tree_node_zone(int nid, int zid) 663 { 664 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 665 } 666 667 static struct mem_cgroup_tree_per_zone * 668 soft_limit_tree_from_page(struct page *page) 669 { 670 int nid = page_to_nid(page); 671 int zid = page_zonenum(page); 672 673 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 674 } 675 676 static void 677 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg, 678 struct mem_cgroup_per_zone *mz, 679 struct mem_cgroup_tree_per_zone *mctz, 680 unsigned long long new_usage_in_excess) 681 { 682 struct rb_node **p = &mctz->rb_root.rb_node; 683 struct rb_node *parent = NULL; 684 struct mem_cgroup_per_zone *mz_node; 685 686 if (mz->on_tree) 687 return; 688 689 mz->usage_in_excess = new_usage_in_excess; 690 if (!mz->usage_in_excess) 691 return; 692 while (*p) { 693 parent = *p; 694 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 695 tree_node); 696 if (mz->usage_in_excess < mz_node->usage_in_excess) 697 p = &(*p)->rb_left; 698 /* 699 * We can't avoid mem cgroups that are over their soft 700 * limit by the same amount 701 */ 702 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 703 p = &(*p)->rb_right; 704 } 705 rb_link_node(&mz->tree_node, parent, p); 706 rb_insert_color(&mz->tree_node, &mctz->rb_root); 707 mz->on_tree = true; 708 } 709 710 static void 711 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 712 struct mem_cgroup_per_zone *mz, 713 struct mem_cgroup_tree_per_zone *mctz) 714 { 715 if (!mz->on_tree) 716 return; 717 rb_erase(&mz->tree_node, &mctz->rb_root); 718 mz->on_tree = false; 719 } 720 721 static void 722 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg, 723 struct mem_cgroup_per_zone *mz, 724 struct mem_cgroup_tree_per_zone *mctz) 725 { 726 spin_lock(&mctz->lock); 727 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 728 spin_unlock(&mctz->lock); 729 } 730 731 732 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 733 { 734 unsigned long long excess; 735 struct mem_cgroup_per_zone *mz; 736 struct mem_cgroup_tree_per_zone *mctz; 737 int nid = page_to_nid(page); 738 int zid = page_zonenum(page); 739 mctz = soft_limit_tree_from_page(page); 740 741 /* 742 * Necessary to update all ancestors when hierarchy is used. 743 * because their event counter is not touched. 744 */ 745 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 746 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 747 excess = res_counter_soft_limit_excess(&memcg->res); 748 /* 749 * We have to update the tree if mz is on RB-tree or 750 * mem is over its softlimit. 751 */ 752 if (excess || mz->on_tree) { 753 spin_lock(&mctz->lock); 754 /* if on-tree, remove it */ 755 if (mz->on_tree) 756 __mem_cgroup_remove_exceeded(memcg, mz, mctz); 757 /* 758 * Insert again. mz->usage_in_excess will be updated. 759 * If excess is 0, no tree ops. 760 */ 761 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess); 762 spin_unlock(&mctz->lock); 763 } 764 } 765 } 766 767 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 768 { 769 int node, zone; 770 struct mem_cgroup_per_zone *mz; 771 struct mem_cgroup_tree_per_zone *mctz; 772 773 for_each_node(node) { 774 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 775 mz = mem_cgroup_zoneinfo(memcg, node, zone); 776 mctz = soft_limit_tree_node_zone(node, zone); 777 mem_cgroup_remove_exceeded(memcg, mz, mctz); 778 } 779 } 780 } 781 782 static struct mem_cgroup_per_zone * 783 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 784 { 785 struct rb_node *rightmost = NULL; 786 struct mem_cgroup_per_zone *mz; 787 788 retry: 789 mz = NULL; 790 rightmost = rb_last(&mctz->rb_root); 791 if (!rightmost) 792 goto done; /* Nothing to reclaim from */ 793 794 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 795 /* 796 * Remove the node now but someone else can add it back, 797 * we will to add it back at the end of reclaim to its correct 798 * position in the tree. 799 */ 800 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 801 if (!res_counter_soft_limit_excess(&mz->memcg->res) || 802 !css_tryget(&mz->memcg->css)) 803 goto retry; 804 done: 805 return mz; 806 } 807 808 static struct mem_cgroup_per_zone * 809 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 810 { 811 struct mem_cgroup_per_zone *mz; 812 813 spin_lock(&mctz->lock); 814 mz = __mem_cgroup_largest_soft_limit_node(mctz); 815 spin_unlock(&mctz->lock); 816 return mz; 817 } 818 819 /* 820 * Implementation Note: reading percpu statistics for memcg. 821 * 822 * Both of vmstat[] and percpu_counter has threshold and do periodic 823 * synchronization to implement "quick" read. There are trade-off between 824 * reading cost and precision of value. Then, we may have a chance to implement 825 * a periodic synchronizion of counter in memcg's counter. 826 * 827 * But this _read() function is used for user interface now. The user accounts 828 * memory usage by memory cgroup and he _always_ requires exact value because 829 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 830 * have to visit all online cpus and make sum. So, for now, unnecessary 831 * synchronization is not implemented. (just implemented for cpu hotplug) 832 * 833 * If there are kernel internal actions which can make use of some not-exact 834 * value, and reading all cpu value can be performance bottleneck in some 835 * common workload, threashold and synchonization as vmstat[] should be 836 * implemented. 837 */ 838 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 839 enum mem_cgroup_stat_index idx) 840 { 841 long val = 0; 842 int cpu; 843 844 get_online_cpus(); 845 for_each_online_cpu(cpu) 846 val += per_cpu(memcg->stat->count[idx], cpu); 847 #ifdef CONFIG_HOTPLUG_CPU 848 spin_lock(&memcg->pcp_counter_lock); 849 val += memcg->nocpu_base.count[idx]; 850 spin_unlock(&memcg->pcp_counter_lock); 851 #endif 852 put_online_cpus(); 853 return val; 854 } 855 856 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 857 bool charge) 858 { 859 int val = (charge) ? 1 : -1; 860 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 861 } 862 863 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 864 enum mem_cgroup_events_index idx) 865 { 866 unsigned long val = 0; 867 int cpu; 868 869 for_each_online_cpu(cpu) 870 val += per_cpu(memcg->stat->events[idx], cpu); 871 #ifdef CONFIG_HOTPLUG_CPU 872 spin_lock(&memcg->pcp_counter_lock); 873 val += memcg->nocpu_base.events[idx]; 874 spin_unlock(&memcg->pcp_counter_lock); 875 #endif 876 return val; 877 } 878 879 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 880 struct page *page, 881 bool anon, int nr_pages) 882 { 883 preempt_disable(); 884 885 /* 886 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 887 * counted as CACHE even if it's on ANON LRU. 888 */ 889 if (anon) 890 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 891 nr_pages); 892 else 893 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 894 nr_pages); 895 896 if (PageTransHuge(page)) 897 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 898 nr_pages); 899 900 /* pagein of a big page is an event. So, ignore page size */ 901 if (nr_pages > 0) 902 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 903 else { 904 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 905 nr_pages = -nr_pages; /* for event */ 906 } 907 908 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 909 910 preempt_enable(); 911 } 912 913 unsigned long 914 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 915 { 916 struct mem_cgroup_per_zone *mz; 917 918 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 919 return mz->lru_size[lru]; 920 } 921 922 static unsigned long 923 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 924 unsigned int lru_mask) 925 { 926 struct mem_cgroup_per_zone *mz; 927 enum lru_list lru; 928 unsigned long ret = 0; 929 930 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 931 932 for_each_lru(lru) { 933 if (BIT(lru) & lru_mask) 934 ret += mz->lru_size[lru]; 935 } 936 return ret; 937 } 938 939 static unsigned long 940 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 941 int nid, unsigned int lru_mask) 942 { 943 u64 total = 0; 944 int zid; 945 946 for (zid = 0; zid < MAX_NR_ZONES; zid++) 947 total += mem_cgroup_zone_nr_lru_pages(memcg, 948 nid, zid, lru_mask); 949 950 return total; 951 } 952 953 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 954 unsigned int lru_mask) 955 { 956 int nid; 957 u64 total = 0; 958 959 for_each_node_state(nid, N_MEMORY) 960 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 961 return total; 962 } 963 964 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 965 enum mem_cgroup_events_target target) 966 { 967 unsigned long val, next; 968 969 val = __this_cpu_read(memcg->stat->nr_page_events); 970 next = __this_cpu_read(memcg->stat->targets[target]); 971 /* from time_after() in jiffies.h */ 972 if ((long)next - (long)val < 0) { 973 switch (target) { 974 case MEM_CGROUP_TARGET_THRESH: 975 next = val + THRESHOLDS_EVENTS_TARGET; 976 break; 977 case MEM_CGROUP_TARGET_SOFTLIMIT: 978 next = val + SOFTLIMIT_EVENTS_TARGET; 979 break; 980 case MEM_CGROUP_TARGET_NUMAINFO: 981 next = val + NUMAINFO_EVENTS_TARGET; 982 break; 983 default: 984 break; 985 } 986 __this_cpu_write(memcg->stat->targets[target], next); 987 return true; 988 } 989 return false; 990 } 991 992 /* 993 * Check events in order. 994 * 995 */ 996 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 997 { 998 preempt_disable(); 999 /* threshold event is triggered in finer grain than soft limit */ 1000 if (unlikely(mem_cgroup_event_ratelimit(memcg, 1001 MEM_CGROUP_TARGET_THRESH))) { 1002 bool do_softlimit; 1003 bool do_numainfo __maybe_unused; 1004 1005 do_softlimit = mem_cgroup_event_ratelimit(memcg, 1006 MEM_CGROUP_TARGET_SOFTLIMIT); 1007 #if MAX_NUMNODES > 1 1008 do_numainfo = mem_cgroup_event_ratelimit(memcg, 1009 MEM_CGROUP_TARGET_NUMAINFO); 1010 #endif 1011 preempt_enable(); 1012 1013 mem_cgroup_threshold(memcg); 1014 if (unlikely(do_softlimit)) 1015 mem_cgroup_update_tree(memcg, page); 1016 #if MAX_NUMNODES > 1 1017 if (unlikely(do_numainfo)) 1018 atomic_inc(&memcg->numainfo_events); 1019 #endif 1020 } else 1021 preempt_enable(); 1022 } 1023 1024 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1025 { 1026 /* 1027 * mm_update_next_owner() may clear mm->owner to NULL 1028 * if it races with swapoff, page migration, etc. 1029 * So this can be called with p == NULL. 1030 */ 1031 if (unlikely(!p)) 1032 return NULL; 1033 1034 return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id)); 1035 } 1036 1037 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 1038 { 1039 struct mem_cgroup *memcg = NULL; 1040 1041 if (!mm) 1042 return NULL; 1043 /* 1044 * Because we have no locks, mm->owner's may be being moved to other 1045 * cgroup. We use css_tryget() here even if this looks 1046 * pessimistic (rather than adding locks here). 1047 */ 1048 rcu_read_lock(); 1049 do { 1050 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1051 if (unlikely(!memcg)) 1052 break; 1053 } while (!css_tryget(&memcg->css)); 1054 rcu_read_unlock(); 1055 return memcg; 1056 } 1057 1058 /* 1059 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1060 * ref. count) or NULL if the whole root's subtree has been visited. 1061 * 1062 * helper function to be used by mem_cgroup_iter 1063 */ 1064 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1065 struct mem_cgroup *last_visited) 1066 { 1067 struct cgroup_subsys_state *prev_css, *next_css; 1068 1069 prev_css = last_visited ? &last_visited->css : NULL; 1070 skip_node: 1071 next_css = css_next_descendant_pre(prev_css, &root->css); 1072 1073 /* 1074 * Even if we found a group we have to make sure it is 1075 * alive. css && !memcg means that the groups should be 1076 * skipped and we should continue the tree walk. 1077 * last_visited css is safe to use because it is 1078 * protected by css_get and the tree walk is rcu safe. 1079 */ 1080 if (next_css) { 1081 struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1082 1083 if (css_tryget(&mem->css)) 1084 return mem; 1085 else { 1086 prev_css = next_css; 1087 goto skip_node; 1088 } 1089 } 1090 1091 return NULL; 1092 } 1093 1094 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) 1095 { 1096 /* 1097 * When a group in the hierarchy below root is destroyed, the 1098 * hierarchy iterator can no longer be trusted since it might 1099 * have pointed to the destroyed group. Invalidate it. 1100 */ 1101 atomic_inc(&root->dead_count); 1102 } 1103 1104 static struct mem_cgroup * 1105 mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, 1106 struct mem_cgroup *root, 1107 int *sequence) 1108 { 1109 struct mem_cgroup *position = NULL; 1110 /* 1111 * A cgroup destruction happens in two stages: offlining and 1112 * release. They are separated by a RCU grace period. 1113 * 1114 * If the iterator is valid, we may still race with an 1115 * offlining. The RCU lock ensures the object won't be 1116 * released, tryget will fail if we lost the race. 1117 */ 1118 *sequence = atomic_read(&root->dead_count); 1119 if (iter->last_dead_count == *sequence) { 1120 smp_rmb(); 1121 position = iter->last_visited; 1122 if (position && !css_tryget(&position->css)) 1123 position = NULL; 1124 } 1125 return position; 1126 } 1127 1128 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, 1129 struct mem_cgroup *last_visited, 1130 struct mem_cgroup *new_position, 1131 int sequence) 1132 { 1133 if (last_visited) 1134 css_put(&last_visited->css); 1135 /* 1136 * We store the sequence count from the time @last_visited was 1137 * loaded successfully instead of rereading it here so that we 1138 * don't lose destruction events in between. We could have 1139 * raced with the destruction of @new_position after all. 1140 */ 1141 iter->last_visited = new_position; 1142 smp_wmb(); 1143 iter->last_dead_count = sequence; 1144 } 1145 1146 /** 1147 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1148 * @root: hierarchy root 1149 * @prev: previously returned memcg, NULL on first invocation 1150 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1151 * 1152 * Returns references to children of the hierarchy below @root, or 1153 * @root itself, or %NULL after a full round-trip. 1154 * 1155 * Caller must pass the return value in @prev on subsequent 1156 * invocations for reference counting, or use mem_cgroup_iter_break() 1157 * to cancel a hierarchy walk before the round-trip is complete. 1158 * 1159 * Reclaimers can specify a zone and a priority level in @reclaim to 1160 * divide up the memcgs in the hierarchy among all concurrent 1161 * reclaimers operating on the same zone and priority. 1162 */ 1163 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1164 struct mem_cgroup *prev, 1165 struct mem_cgroup_reclaim_cookie *reclaim) 1166 { 1167 struct mem_cgroup *memcg = NULL; 1168 struct mem_cgroup *last_visited = NULL; 1169 1170 if (mem_cgroup_disabled()) 1171 return NULL; 1172 1173 if (!root) 1174 root = root_mem_cgroup; 1175 1176 if (prev && !reclaim) 1177 last_visited = prev; 1178 1179 if (!root->use_hierarchy && root != root_mem_cgroup) { 1180 if (prev) 1181 goto out_css_put; 1182 return root; 1183 } 1184 1185 rcu_read_lock(); 1186 while (!memcg) { 1187 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 1188 int uninitialized_var(seq); 1189 1190 if (reclaim) { 1191 int nid = zone_to_nid(reclaim->zone); 1192 int zid = zone_idx(reclaim->zone); 1193 struct mem_cgroup_per_zone *mz; 1194 1195 mz = mem_cgroup_zoneinfo(root, nid, zid); 1196 iter = &mz->reclaim_iter[reclaim->priority]; 1197 if (prev && reclaim->generation != iter->generation) { 1198 iter->last_visited = NULL; 1199 goto out_unlock; 1200 } 1201 1202 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1203 } 1204 1205 memcg = __mem_cgroup_iter_next(root, last_visited); 1206 1207 if (reclaim) { 1208 mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1209 1210 if (!memcg) 1211 iter->generation++; 1212 else if (!prev && memcg) 1213 reclaim->generation = iter->generation; 1214 } 1215 1216 if (prev && !memcg) 1217 goto out_unlock; 1218 } 1219 out_unlock: 1220 rcu_read_unlock(); 1221 out_css_put: 1222 if (prev && prev != root) 1223 css_put(&prev->css); 1224 1225 return memcg; 1226 } 1227 1228 /** 1229 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1230 * @root: hierarchy root 1231 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1232 */ 1233 void mem_cgroup_iter_break(struct mem_cgroup *root, 1234 struct mem_cgroup *prev) 1235 { 1236 if (!root) 1237 root = root_mem_cgroup; 1238 if (prev && prev != root) 1239 css_put(&prev->css); 1240 } 1241 1242 /* 1243 * Iteration constructs for visiting all cgroups (under a tree). If 1244 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1245 * be used for reference counting. 1246 */ 1247 #define for_each_mem_cgroup_tree(iter, root) \ 1248 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1249 iter != NULL; \ 1250 iter = mem_cgroup_iter(root, iter, NULL)) 1251 1252 #define for_each_mem_cgroup(iter) \ 1253 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1254 iter != NULL; \ 1255 iter = mem_cgroup_iter(NULL, iter, NULL)) 1256 1257 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 1258 { 1259 struct mem_cgroup *memcg; 1260 1261 rcu_read_lock(); 1262 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1263 if (unlikely(!memcg)) 1264 goto out; 1265 1266 switch (idx) { 1267 case PGFAULT: 1268 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 1269 break; 1270 case PGMAJFAULT: 1271 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 1272 break; 1273 default: 1274 BUG(); 1275 } 1276 out: 1277 rcu_read_unlock(); 1278 } 1279 EXPORT_SYMBOL(__mem_cgroup_count_vm_event); 1280 1281 /** 1282 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1283 * @zone: zone of the wanted lruvec 1284 * @memcg: memcg of the wanted lruvec 1285 * 1286 * Returns the lru list vector holding pages for the given @zone and 1287 * @mem. This can be the global zone lruvec, if the memory controller 1288 * is disabled. 1289 */ 1290 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1291 struct mem_cgroup *memcg) 1292 { 1293 struct mem_cgroup_per_zone *mz; 1294 struct lruvec *lruvec; 1295 1296 if (mem_cgroup_disabled()) { 1297 lruvec = &zone->lruvec; 1298 goto out; 1299 } 1300 1301 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1302 lruvec = &mz->lruvec; 1303 out: 1304 /* 1305 * Since a node can be onlined after the mem_cgroup was created, 1306 * we have to be prepared to initialize lruvec->zone here; 1307 * and if offlined then reonlined, we need to reinitialize it. 1308 */ 1309 if (unlikely(lruvec->zone != zone)) 1310 lruvec->zone = zone; 1311 return lruvec; 1312 } 1313 1314 /* 1315 * Following LRU functions are allowed to be used without PCG_LOCK. 1316 * Operations are called by routine of global LRU independently from memcg. 1317 * What we have to take care of here is validness of pc->mem_cgroup. 1318 * 1319 * Changes to pc->mem_cgroup happens when 1320 * 1. charge 1321 * 2. moving account 1322 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. 1323 * It is added to LRU before charge. 1324 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. 1325 * When moving account, the page is not on LRU. It's isolated. 1326 */ 1327 1328 /** 1329 * mem_cgroup_page_lruvec - return lruvec for adding an lru page 1330 * @page: the page 1331 * @zone: zone of the page 1332 */ 1333 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1334 { 1335 struct mem_cgroup_per_zone *mz; 1336 struct mem_cgroup *memcg; 1337 struct page_cgroup *pc; 1338 struct lruvec *lruvec; 1339 1340 if (mem_cgroup_disabled()) { 1341 lruvec = &zone->lruvec; 1342 goto out; 1343 } 1344 1345 pc = lookup_page_cgroup(page); 1346 memcg = pc->mem_cgroup; 1347 1348 /* 1349 * Surreptitiously switch any uncharged offlist page to root: 1350 * an uncharged page off lru does nothing to secure 1351 * its former mem_cgroup from sudden removal. 1352 * 1353 * Our caller holds lru_lock, and PageCgroupUsed is updated 1354 * under page_cgroup lock: between them, they make all uses 1355 * of pc->mem_cgroup safe. 1356 */ 1357 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1358 pc->mem_cgroup = memcg = root_mem_cgroup; 1359 1360 mz = page_cgroup_zoneinfo(memcg, page); 1361 lruvec = &mz->lruvec; 1362 out: 1363 /* 1364 * Since a node can be onlined after the mem_cgroup was created, 1365 * we have to be prepared to initialize lruvec->zone here; 1366 * and if offlined then reonlined, we need to reinitialize it. 1367 */ 1368 if (unlikely(lruvec->zone != zone)) 1369 lruvec->zone = zone; 1370 return lruvec; 1371 } 1372 1373 /** 1374 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1375 * @lruvec: mem_cgroup per zone lru vector 1376 * @lru: index of lru list the page is sitting on 1377 * @nr_pages: positive when adding or negative when removing 1378 * 1379 * This function must be called when a page is added to or removed from an 1380 * lru list. 1381 */ 1382 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1383 int nr_pages) 1384 { 1385 struct mem_cgroup_per_zone *mz; 1386 unsigned long *lru_size; 1387 1388 if (mem_cgroup_disabled()) 1389 return; 1390 1391 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1392 lru_size = mz->lru_size + lru; 1393 *lru_size += nr_pages; 1394 VM_BUG_ON((long)(*lru_size) < 0); 1395 } 1396 1397 /* 1398 * Checks whether given mem is same or in the root_mem_cgroup's 1399 * hierarchy subtree 1400 */ 1401 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1402 struct mem_cgroup *memcg) 1403 { 1404 if (root_memcg == memcg) 1405 return true; 1406 if (!root_memcg->use_hierarchy || !memcg) 1407 return false; 1408 return css_is_ancestor(&memcg->css, &root_memcg->css); 1409 } 1410 1411 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1412 struct mem_cgroup *memcg) 1413 { 1414 bool ret; 1415 1416 rcu_read_lock(); 1417 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); 1418 rcu_read_unlock(); 1419 return ret; 1420 } 1421 1422 bool task_in_mem_cgroup(struct task_struct *task, 1423 const struct mem_cgroup *memcg) 1424 { 1425 struct mem_cgroup *curr = NULL; 1426 struct task_struct *p; 1427 bool ret; 1428 1429 p = find_lock_task_mm(task); 1430 if (p) { 1431 curr = try_get_mem_cgroup_from_mm(p->mm); 1432 task_unlock(p); 1433 } else { 1434 /* 1435 * All threads may have already detached their mm's, but the oom 1436 * killer still needs to detect if they have already been oom 1437 * killed to prevent needlessly killing additional tasks. 1438 */ 1439 rcu_read_lock(); 1440 curr = mem_cgroup_from_task(task); 1441 if (curr) 1442 css_get(&curr->css); 1443 rcu_read_unlock(); 1444 } 1445 if (!curr) 1446 return false; 1447 /* 1448 * We should check use_hierarchy of "memcg" not "curr". Because checking 1449 * use_hierarchy of "curr" here make this function true if hierarchy is 1450 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* 1451 * hierarchy(even if use_hierarchy is disabled in "memcg"). 1452 */ 1453 ret = mem_cgroup_same_or_subtree(memcg, curr); 1454 css_put(&curr->css); 1455 return ret; 1456 } 1457 1458 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 1459 { 1460 unsigned long inactive_ratio; 1461 unsigned long inactive; 1462 unsigned long active; 1463 unsigned long gb; 1464 1465 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); 1466 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); 1467 1468 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1469 if (gb) 1470 inactive_ratio = int_sqrt(10 * gb); 1471 else 1472 inactive_ratio = 1; 1473 1474 return inactive * inactive_ratio < active; 1475 } 1476 1477 #define mem_cgroup_from_res_counter(counter, member) \ 1478 container_of(counter, struct mem_cgroup, member) 1479 1480 /** 1481 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1482 * @memcg: the memory cgroup 1483 * 1484 * Returns the maximum amount of memory @mem can be charged with, in 1485 * pages. 1486 */ 1487 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1488 { 1489 unsigned long long margin; 1490 1491 margin = res_counter_margin(&memcg->res); 1492 if (do_swap_account) 1493 margin = min(margin, res_counter_margin(&memcg->memsw)); 1494 return margin >> PAGE_SHIFT; 1495 } 1496 1497 int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1498 { 1499 /* root ? */ 1500 if (!css_parent(&memcg->css)) 1501 return vm_swappiness; 1502 1503 return memcg->swappiness; 1504 } 1505 1506 /* 1507 * memcg->moving_account is used for checking possibility that some thread is 1508 * calling move_account(). When a thread on CPU-A starts moving pages under 1509 * a memcg, other threads should check memcg->moving_account under 1510 * rcu_read_lock(), like this: 1511 * 1512 * CPU-A CPU-B 1513 * rcu_read_lock() 1514 * memcg->moving_account+1 if (memcg->mocing_account) 1515 * take heavy locks. 1516 * synchronize_rcu() update something. 1517 * rcu_read_unlock() 1518 * start move here. 1519 */ 1520 1521 /* for quick checking without looking up memcg */ 1522 atomic_t memcg_moving __read_mostly; 1523 1524 static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1525 { 1526 atomic_inc(&memcg_moving); 1527 atomic_inc(&memcg->moving_account); 1528 synchronize_rcu(); 1529 } 1530 1531 static void mem_cgroup_end_move(struct mem_cgroup *memcg) 1532 { 1533 /* 1534 * Now, mem_cgroup_clear_mc() may call this function with NULL. 1535 * We check NULL in callee rather than caller. 1536 */ 1537 if (memcg) { 1538 atomic_dec(&memcg_moving); 1539 atomic_dec(&memcg->moving_account); 1540 } 1541 } 1542 1543 /* 1544 * 2 routines for checking "mem" is under move_account() or not. 1545 * 1546 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This 1547 * is used for avoiding races in accounting. If true, 1548 * pc->mem_cgroup may be overwritten. 1549 * 1550 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or 1551 * under hierarchy of moving cgroups. This is for 1552 * waiting at hith-memory prressure caused by "move". 1553 */ 1554 1555 static bool mem_cgroup_stolen(struct mem_cgroup *memcg) 1556 { 1557 VM_BUG_ON(!rcu_read_lock_held()); 1558 return atomic_read(&memcg->moving_account) > 0; 1559 } 1560 1561 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1562 { 1563 struct mem_cgroup *from; 1564 struct mem_cgroup *to; 1565 bool ret = false; 1566 /* 1567 * Unlike task_move routines, we access mc.to, mc.from not under 1568 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1569 */ 1570 spin_lock(&mc.lock); 1571 from = mc.from; 1572 to = mc.to; 1573 if (!from) 1574 goto unlock; 1575 1576 ret = mem_cgroup_same_or_subtree(memcg, from) 1577 || mem_cgroup_same_or_subtree(memcg, to); 1578 unlock: 1579 spin_unlock(&mc.lock); 1580 return ret; 1581 } 1582 1583 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1584 { 1585 if (mc.moving_task && current != mc.moving_task) { 1586 if (mem_cgroup_under_move(memcg)) { 1587 DEFINE_WAIT(wait); 1588 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1589 /* moving charge context might have finished. */ 1590 if (mc.moving_task) 1591 schedule(); 1592 finish_wait(&mc.waitq, &wait); 1593 return true; 1594 } 1595 } 1596 return false; 1597 } 1598 1599 /* 1600 * Take this lock when 1601 * - a code tries to modify page's memcg while it's USED. 1602 * - a code tries to modify page state accounting in a memcg. 1603 * see mem_cgroup_stolen(), too. 1604 */ 1605 static void move_lock_mem_cgroup(struct mem_cgroup *memcg, 1606 unsigned long *flags) 1607 { 1608 spin_lock_irqsave(&memcg->move_lock, *flags); 1609 } 1610 1611 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, 1612 unsigned long *flags) 1613 { 1614 spin_unlock_irqrestore(&memcg->move_lock, *flags); 1615 } 1616 1617 #define K(x) ((x) << (PAGE_SHIFT-10)) 1618 /** 1619 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1620 * @memcg: The memory cgroup that went over limit 1621 * @p: Task that is going to be killed 1622 * 1623 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1624 * enabled 1625 */ 1626 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1627 { 1628 struct cgroup *task_cgrp; 1629 struct cgroup *mem_cgrp; 1630 /* 1631 * Need a buffer in BSS, can't rely on allocations. The code relies 1632 * on the assumption that OOM is serialized for memory controller. 1633 * If this assumption is broken, revisit this code. 1634 */ 1635 static char memcg_name[PATH_MAX]; 1636 int ret; 1637 struct mem_cgroup *iter; 1638 unsigned int i; 1639 1640 if (!p) 1641 return; 1642 1643 rcu_read_lock(); 1644 1645 mem_cgrp = memcg->css.cgroup; 1646 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); 1647 1648 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); 1649 if (ret < 0) { 1650 /* 1651 * Unfortunately, we are unable to convert to a useful name 1652 * But we'll still print out the usage information 1653 */ 1654 rcu_read_unlock(); 1655 goto done; 1656 } 1657 rcu_read_unlock(); 1658 1659 pr_info("Task in %s killed", memcg_name); 1660 1661 rcu_read_lock(); 1662 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); 1663 if (ret < 0) { 1664 rcu_read_unlock(); 1665 goto done; 1666 } 1667 rcu_read_unlock(); 1668 1669 /* 1670 * Continues from above, so we don't need an KERN_ level 1671 */ 1672 pr_cont(" as a result of limit of %s\n", memcg_name); 1673 done: 1674 1675 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", 1676 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1677 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1678 res_counter_read_u64(&memcg->res, RES_FAILCNT)); 1679 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n", 1680 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, 1681 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, 1682 res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); 1683 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n", 1684 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10, 1685 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10, 1686 res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); 1687 1688 for_each_mem_cgroup_tree(iter, memcg) { 1689 pr_info("Memory cgroup stats"); 1690 1691 rcu_read_lock(); 1692 ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX); 1693 if (!ret) 1694 pr_cont(" for %s", memcg_name); 1695 rcu_read_unlock(); 1696 pr_cont(":"); 1697 1698 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1699 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1700 continue; 1701 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1702 K(mem_cgroup_read_stat(iter, i))); 1703 } 1704 1705 for (i = 0; i < NR_LRU_LISTS; i++) 1706 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1707 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1708 1709 pr_cont("\n"); 1710 } 1711 } 1712 1713 /* 1714 * This function returns the number of memcg under hierarchy tree. Returns 1715 * 1(self count) if no children. 1716 */ 1717 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1718 { 1719 int num = 0; 1720 struct mem_cgroup *iter; 1721 1722 for_each_mem_cgroup_tree(iter, memcg) 1723 num++; 1724 return num; 1725 } 1726 1727 /* 1728 * Return the memory (and swap, if configured) limit for a memcg. 1729 */ 1730 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 1731 { 1732 u64 limit; 1733 1734 limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 1735 1736 /* 1737 * Do not consider swap space if we cannot swap due to swappiness 1738 */ 1739 if (mem_cgroup_swappiness(memcg)) { 1740 u64 memsw; 1741 1742 limit += total_swap_pages << PAGE_SHIFT; 1743 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 1744 1745 /* 1746 * If memsw is finite and limits the amount of swap space 1747 * available to this memcg, return that limit. 1748 */ 1749 limit = min(limit, memsw); 1750 } 1751 1752 return limit; 1753 } 1754 1755 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1756 int order) 1757 { 1758 struct mem_cgroup *iter; 1759 unsigned long chosen_points = 0; 1760 unsigned long totalpages; 1761 unsigned int points = 0; 1762 struct task_struct *chosen = NULL; 1763 1764 /* 1765 * If current has a pending SIGKILL or is exiting, then automatically 1766 * select it. The goal is to allow it to allocate so that it may 1767 * quickly exit and free its memory. 1768 */ 1769 if (fatal_signal_pending(current) || current->flags & PF_EXITING) { 1770 set_thread_flag(TIF_MEMDIE); 1771 return; 1772 } 1773 1774 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 1775 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; 1776 for_each_mem_cgroup_tree(iter, memcg) { 1777 struct css_task_iter it; 1778 struct task_struct *task; 1779 1780 css_task_iter_start(&iter->css, &it); 1781 while ((task = css_task_iter_next(&it))) { 1782 switch (oom_scan_process_thread(task, totalpages, NULL, 1783 false)) { 1784 case OOM_SCAN_SELECT: 1785 if (chosen) 1786 put_task_struct(chosen); 1787 chosen = task; 1788 chosen_points = ULONG_MAX; 1789 get_task_struct(chosen); 1790 /* fall through */ 1791 case OOM_SCAN_CONTINUE: 1792 continue; 1793 case OOM_SCAN_ABORT: 1794 css_task_iter_end(&it); 1795 mem_cgroup_iter_break(memcg, iter); 1796 if (chosen) 1797 put_task_struct(chosen); 1798 return; 1799 case OOM_SCAN_OK: 1800 break; 1801 }; 1802 points = oom_badness(task, memcg, NULL, totalpages); 1803 if (points > chosen_points) { 1804 if (chosen) 1805 put_task_struct(chosen); 1806 chosen = task; 1807 chosen_points = points; 1808 get_task_struct(chosen); 1809 } 1810 } 1811 css_task_iter_end(&it); 1812 } 1813 1814 if (!chosen) 1815 return; 1816 points = chosen_points * 1000 / totalpages; 1817 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, 1818 NULL, "Memory cgroup out of memory"); 1819 } 1820 1821 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, 1822 gfp_t gfp_mask, 1823 unsigned long flags) 1824 { 1825 unsigned long total = 0; 1826 bool noswap = false; 1827 int loop; 1828 1829 if (flags & MEM_CGROUP_RECLAIM_NOSWAP) 1830 noswap = true; 1831 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum) 1832 noswap = true; 1833 1834 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { 1835 if (loop) 1836 drain_all_stock_async(memcg); 1837 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); 1838 /* 1839 * Allow limit shrinkers, which are triggered directly 1840 * by userspace, to catch signals and stop reclaim 1841 * after minimal progress, regardless of the margin. 1842 */ 1843 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) 1844 break; 1845 if (mem_cgroup_margin(memcg)) 1846 break; 1847 /* 1848 * If nothing was reclaimed after two attempts, there 1849 * may be no reclaimable pages in this hierarchy. 1850 */ 1851 if (loop && !total) 1852 break; 1853 } 1854 return total; 1855 } 1856 1857 /** 1858 * test_mem_cgroup_node_reclaimable 1859 * @memcg: the target memcg 1860 * @nid: the node ID to be checked. 1861 * @noswap : specify true here if the user wants flle only information. 1862 * 1863 * This function returns whether the specified memcg contains any 1864 * reclaimable pages on a node. Returns true if there are any reclaimable 1865 * pages in the node. 1866 */ 1867 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1868 int nid, bool noswap) 1869 { 1870 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1871 return true; 1872 if (noswap || !total_swap_pages) 1873 return false; 1874 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1875 return true; 1876 return false; 1877 1878 } 1879 #if MAX_NUMNODES > 1 1880 1881 /* 1882 * Always updating the nodemask is not very good - even if we have an empty 1883 * list or the wrong list here, we can start from some node and traverse all 1884 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1885 * 1886 */ 1887 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1888 { 1889 int nid; 1890 /* 1891 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1892 * pagein/pageout changes since the last update. 1893 */ 1894 if (!atomic_read(&memcg->numainfo_events)) 1895 return; 1896 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1897 return; 1898 1899 /* make a nodemask where this memcg uses memory from */ 1900 memcg->scan_nodes = node_states[N_MEMORY]; 1901 1902 for_each_node_mask(nid, node_states[N_MEMORY]) { 1903 1904 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1905 node_clear(nid, memcg->scan_nodes); 1906 } 1907 1908 atomic_set(&memcg->numainfo_events, 0); 1909 atomic_set(&memcg->numainfo_updating, 0); 1910 } 1911 1912 /* 1913 * Selecting a node where we start reclaim from. Because what we need is just 1914 * reducing usage counter, start from anywhere is O,K. Considering 1915 * memory reclaim from current node, there are pros. and cons. 1916 * 1917 * Freeing memory from current node means freeing memory from a node which 1918 * we'll use or we've used. So, it may make LRU bad. And if several threads 1919 * hit limits, it will see a contention on a node. But freeing from remote 1920 * node means more costs for memory reclaim because of memory latency. 1921 * 1922 * Now, we use round-robin. Better algorithm is welcomed. 1923 */ 1924 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1925 { 1926 int node; 1927 1928 mem_cgroup_may_update_nodemask(memcg); 1929 node = memcg->last_scanned_node; 1930 1931 node = next_node(node, memcg->scan_nodes); 1932 if (node == MAX_NUMNODES) 1933 node = first_node(memcg->scan_nodes); 1934 /* 1935 * We call this when we hit limit, not when pages are added to LRU. 1936 * No LRU may hold pages because all pages are UNEVICTABLE or 1937 * memcg is too small and all pages are not on LRU. In that case, 1938 * we use curret node. 1939 */ 1940 if (unlikely(node == MAX_NUMNODES)) 1941 node = numa_node_id(); 1942 1943 memcg->last_scanned_node = node; 1944 return node; 1945 } 1946 1947 /* 1948 * Check all nodes whether it contains reclaimable pages or not. 1949 * For quick scan, we make use of scan_nodes. This will allow us to skip 1950 * unused nodes. But scan_nodes is lazily updated and may not cotain 1951 * enough new information. We need to do double check. 1952 */ 1953 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1954 { 1955 int nid; 1956 1957 /* 1958 * quick check...making use of scan_node. 1959 * We can skip unused nodes. 1960 */ 1961 if (!nodes_empty(memcg->scan_nodes)) { 1962 for (nid = first_node(memcg->scan_nodes); 1963 nid < MAX_NUMNODES; 1964 nid = next_node(nid, memcg->scan_nodes)) { 1965 1966 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1967 return true; 1968 } 1969 } 1970 /* 1971 * Check rest of nodes. 1972 */ 1973 for_each_node_state(nid, N_MEMORY) { 1974 if (node_isset(nid, memcg->scan_nodes)) 1975 continue; 1976 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) 1977 return true; 1978 } 1979 return false; 1980 } 1981 1982 #else 1983 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1984 { 1985 return 0; 1986 } 1987 1988 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1989 { 1990 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 1991 } 1992 #endif 1993 1994 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1995 struct zone *zone, 1996 gfp_t gfp_mask, 1997 unsigned long *total_scanned) 1998 { 1999 struct mem_cgroup *victim = NULL; 2000 int total = 0; 2001 int loop = 0; 2002 unsigned long excess; 2003 unsigned long nr_scanned; 2004 struct mem_cgroup_reclaim_cookie reclaim = { 2005 .zone = zone, 2006 .priority = 0, 2007 }; 2008 2009 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; 2010 2011 while (1) { 2012 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 2013 if (!victim) { 2014 loop++; 2015 if (loop >= 2) { 2016 /* 2017 * If we have not been able to reclaim 2018 * anything, it might because there are 2019 * no reclaimable pages under this hierarchy 2020 */ 2021 if (!total) 2022 break; 2023 /* 2024 * We want to do more targeted reclaim. 2025 * excess >> 2 is not to excessive so as to 2026 * reclaim too much, nor too less that we keep 2027 * coming back to reclaim from this cgroup 2028 */ 2029 if (total >= (excess >> 2) || 2030 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 2031 break; 2032 } 2033 continue; 2034 } 2035 if (!mem_cgroup_reclaimable(victim, false)) 2036 continue; 2037 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 2038 zone, &nr_scanned); 2039 *total_scanned += nr_scanned; 2040 if (!res_counter_soft_limit_excess(&root_memcg->res)) 2041 break; 2042 } 2043 mem_cgroup_iter_break(root_memcg, victim); 2044 return total; 2045 } 2046 2047 static DEFINE_SPINLOCK(memcg_oom_lock); 2048 2049 /* 2050 * Check OOM-Killer is already running under our hierarchy. 2051 * If someone is running, return false. 2052 */ 2053 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 2054 { 2055 struct mem_cgroup *iter, *failed = NULL; 2056 2057 spin_lock(&memcg_oom_lock); 2058 2059 for_each_mem_cgroup_tree(iter, memcg) { 2060 if (iter->oom_lock) { 2061 /* 2062 * this subtree of our hierarchy is already locked 2063 * so we cannot give a lock. 2064 */ 2065 failed = iter; 2066 mem_cgroup_iter_break(memcg, iter); 2067 break; 2068 } else 2069 iter->oom_lock = true; 2070 } 2071 2072 if (failed) { 2073 /* 2074 * OK, we failed to lock the whole subtree so we have 2075 * to clean up what we set up to the failing subtree 2076 */ 2077 for_each_mem_cgroup_tree(iter, memcg) { 2078 if (iter == failed) { 2079 mem_cgroup_iter_break(memcg, iter); 2080 break; 2081 } 2082 iter->oom_lock = false; 2083 } 2084 } 2085 2086 spin_unlock(&memcg_oom_lock); 2087 2088 return !failed; 2089 } 2090 2091 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 2092 { 2093 struct mem_cgroup *iter; 2094 2095 spin_lock(&memcg_oom_lock); 2096 for_each_mem_cgroup_tree(iter, memcg) 2097 iter->oom_lock = false; 2098 spin_unlock(&memcg_oom_lock); 2099 } 2100 2101 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 2102 { 2103 struct mem_cgroup *iter; 2104 2105 for_each_mem_cgroup_tree(iter, memcg) 2106 atomic_inc(&iter->under_oom); 2107 } 2108 2109 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 2110 { 2111 struct mem_cgroup *iter; 2112 2113 /* 2114 * When a new child is created while the hierarchy is under oom, 2115 * mem_cgroup_oom_lock() may not be called. We have to use 2116 * atomic_add_unless() here. 2117 */ 2118 for_each_mem_cgroup_tree(iter, memcg) 2119 atomic_add_unless(&iter->under_oom, -1, 0); 2120 } 2121 2122 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 2123 2124 struct oom_wait_info { 2125 struct mem_cgroup *memcg; 2126 wait_queue_t wait; 2127 }; 2128 2129 static int memcg_oom_wake_function(wait_queue_t *wait, 2130 unsigned mode, int sync, void *arg) 2131 { 2132 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 2133 struct mem_cgroup *oom_wait_memcg; 2134 struct oom_wait_info *oom_wait_info; 2135 2136 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 2137 oom_wait_memcg = oom_wait_info->memcg; 2138 2139 /* 2140 * Both of oom_wait_info->memcg and wake_memcg are stable under us. 2141 * Then we can use css_is_ancestor without taking care of RCU. 2142 */ 2143 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) 2144 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) 2145 return 0; 2146 return autoremove_wake_function(wait, mode, sync, arg); 2147 } 2148 2149 static void memcg_wakeup_oom(struct mem_cgroup *memcg) 2150 { 2151 atomic_inc(&memcg->oom_wakeups); 2152 /* for filtering, pass "memcg" as argument. */ 2153 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 2154 } 2155 2156 static void memcg_oom_recover(struct mem_cgroup *memcg) 2157 { 2158 if (memcg && atomic_read(&memcg->under_oom)) 2159 memcg_wakeup_oom(memcg); 2160 } 2161 2162 /* 2163 * try to call OOM killer 2164 */ 2165 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 2166 { 2167 bool locked; 2168 int wakeups; 2169 2170 if (!current->memcg_oom.may_oom) 2171 return; 2172 2173 current->memcg_oom.in_memcg_oom = 1; 2174 2175 /* 2176 * As with any blocking lock, a contender needs to start 2177 * listening for wakeups before attempting the trylock, 2178 * otherwise it can miss the wakeup from the unlock and sleep 2179 * indefinitely. This is just open-coded because our locking 2180 * is so particular to memcg hierarchies. 2181 */ 2182 wakeups = atomic_read(&memcg->oom_wakeups); 2183 mem_cgroup_mark_under_oom(memcg); 2184 2185 locked = mem_cgroup_oom_trylock(memcg); 2186 2187 if (locked) 2188 mem_cgroup_oom_notify(memcg); 2189 2190 if (locked && !memcg->oom_kill_disable) { 2191 mem_cgroup_unmark_under_oom(memcg); 2192 mem_cgroup_out_of_memory(memcg, mask, order); 2193 mem_cgroup_oom_unlock(memcg); 2194 /* 2195 * There is no guarantee that an OOM-lock contender 2196 * sees the wakeups triggered by the OOM kill 2197 * uncharges. Wake any sleepers explicitely. 2198 */ 2199 memcg_oom_recover(memcg); 2200 } else { 2201 /* 2202 * A system call can just return -ENOMEM, but if this 2203 * is a page fault and somebody else is handling the 2204 * OOM already, we need to sleep on the OOM waitqueue 2205 * for this memcg until the situation is resolved. 2206 * Which can take some time because it might be 2207 * handled by a userspace task. 2208 * 2209 * However, this is the charge context, which means 2210 * that we may sit on a large call stack and hold 2211 * various filesystem locks, the mmap_sem etc. and we 2212 * don't want the OOM handler to deadlock on them 2213 * while we sit here and wait. Store the current OOM 2214 * context in the task_struct, then return -ENOMEM. 2215 * At the end of the page fault handler, with the 2216 * stack unwound, pagefault_out_of_memory() will check 2217 * back with us by calling 2218 * mem_cgroup_oom_synchronize(), possibly putting the 2219 * task to sleep. 2220 */ 2221 current->memcg_oom.oom_locked = locked; 2222 current->memcg_oom.wakeups = wakeups; 2223 css_get(&memcg->css); 2224 current->memcg_oom.wait_on_memcg = memcg; 2225 } 2226 } 2227 2228 /** 2229 * mem_cgroup_oom_synchronize - complete memcg OOM handling 2230 * 2231 * This has to be called at the end of a page fault if the the memcg 2232 * OOM handler was enabled and the fault is returning %VM_FAULT_OOM. 2233 * 2234 * Memcg supports userspace OOM handling, so failed allocations must 2235 * sleep on a waitqueue until the userspace task resolves the 2236 * situation. Sleeping directly in the charge context with all kinds 2237 * of locks held is not a good idea, instead we remember an OOM state 2238 * in the task and mem_cgroup_oom_synchronize() has to be called at 2239 * the end of the page fault to put the task to sleep and clean up the 2240 * OOM state. 2241 * 2242 * Returns %true if an ongoing memcg OOM situation was detected and 2243 * finalized, %false otherwise. 2244 */ 2245 bool mem_cgroup_oom_synchronize(void) 2246 { 2247 struct oom_wait_info owait; 2248 struct mem_cgroup *memcg; 2249 2250 /* OOM is global, do not handle */ 2251 if (!current->memcg_oom.in_memcg_oom) 2252 return false; 2253 2254 /* 2255 * We invoked the OOM killer but there is a chance that a kill 2256 * did not free up any charges. Everybody else might already 2257 * be sleeping, so restart the fault and keep the rampage 2258 * going until some charges are released. 2259 */ 2260 memcg = current->memcg_oom.wait_on_memcg; 2261 if (!memcg) 2262 goto out; 2263 2264 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 2265 goto out_memcg; 2266 2267 owait.memcg = memcg; 2268 owait.wait.flags = 0; 2269 owait.wait.func = memcg_oom_wake_function; 2270 owait.wait.private = current; 2271 INIT_LIST_HEAD(&owait.wait.task_list); 2272 2273 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2274 /* Only sleep if we didn't miss any wakeups since OOM */ 2275 if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups) 2276 schedule(); 2277 finish_wait(&memcg_oom_waitq, &owait.wait); 2278 out_memcg: 2279 mem_cgroup_unmark_under_oom(memcg); 2280 if (current->memcg_oom.oom_locked) { 2281 mem_cgroup_oom_unlock(memcg); 2282 /* 2283 * There is no guarantee that an OOM-lock contender 2284 * sees the wakeups triggered by the OOM kill 2285 * uncharges. Wake any sleepers explicitely. 2286 */ 2287 memcg_oom_recover(memcg); 2288 } 2289 css_put(&memcg->css); 2290 current->memcg_oom.wait_on_memcg = NULL; 2291 out: 2292 current->memcg_oom.in_memcg_oom = 0; 2293 return true; 2294 } 2295 2296 /* 2297 * Currently used to update mapped file statistics, but the routine can be 2298 * generalized to update other statistics as well. 2299 * 2300 * Notes: Race condition 2301 * 2302 * We usually use page_cgroup_lock() for accessing page_cgroup member but 2303 * it tends to be costly. But considering some conditions, we doesn't need 2304 * to do so _always_. 2305 * 2306 * Considering "charge", lock_page_cgroup() is not required because all 2307 * file-stat operations happen after a page is attached to radix-tree. There 2308 * are no race with "charge". 2309 * 2310 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup 2311 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even 2312 * if there are race with "uncharge". Statistics itself is properly handled 2313 * by flags. 2314 * 2315 * Considering "move", this is an only case we see a race. To make the race 2316 * small, we check mm->moving_account and detect there are possibility of race 2317 * If there is, we take a lock. 2318 */ 2319 2320 void __mem_cgroup_begin_update_page_stat(struct page *page, 2321 bool *locked, unsigned long *flags) 2322 { 2323 struct mem_cgroup *memcg; 2324 struct page_cgroup *pc; 2325 2326 pc = lookup_page_cgroup(page); 2327 again: 2328 memcg = pc->mem_cgroup; 2329 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2330 return; 2331 /* 2332 * If this memory cgroup is not under account moving, we don't 2333 * need to take move_lock_mem_cgroup(). Because we already hold 2334 * rcu_read_lock(), any calls to move_account will be delayed until 2335 * rcu_read_unlock() if mem_cgroup_stolen() == true. 2336 */ 2337 if (!mem_cgroup_stolen(memcg)) 2338 return; 2339 2340 move_lock_mem_cgroup(memcg, flags); 2341 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { 2342 move_unlock_mem_cgroup(memcg, flags); 2343 goto again; 2344 } 2345 *locked = true; 2346 } 2347 2348 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) 2349 { 2350 struct page_cgroup *pc = lookup_page_cgroup(page); 2351 2352 /* 2353 * It's guaranteed that pc->mem_cgroup never changes while 2354 * lock is held because a routine modifies pc->mem_cgroup 2355 * should take move_lock_mem_cgroup(). 2356 */ 2357 move_unlock_mem_cgroup(pc->mem_cgroup, flags); 2358 } 2359 2360 void mem_cgroup_update_page_stat(struct page *page, 2361 enum mem_cgroup_stat_index idx, int val) 2362 { 2363 struct mem_cgroup *memcg; 2364 struct page_cgroup *pc = lookup_page_cgroup(page); 2365 unsigned long uninitialized_var(flags); 2366 2367 if (mem_cgroup_disabled()) 2368 return; 2369 2370 VM_BUG_ON(!rcu_read_lock_held()); 2371 memcg = pc->mem_cgroup; 2372 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2373 return; 2374 2375 this_cpu_add(memcg->stat->count[idx], val); 2376 } 2377 2378 /* 2379 * size of first charge trial. "32" comes from vmscan.c's magic value. 2380 * TODO: maybe necessary to use big numbers in big irons. 2381 */ 2382 #define CHARGE_BATCH 32U 2383 struct memcg_stock_pcp { 2384 struct mem_cgroup *cached; /* this never be root cgroup */ 2385 unsigned int nr_pages; 2386 struct work_struct work; 2387 unsigned long flags; 2388 #define FLUSHING_CACHED_CHARGE 0 2389 }; 2390 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2391 static DEFINE_MUTEX(percpu_charge_mutex); 2392 2393 /** 2394 * consume_stock: Try to consume stocked charge on this cpu. 2395 * @memcg: memcg to consume from. 2396 * @nr_pages: how many pages to charge. 2397 * 2398 * The charges will only happen if @memcg matches the current cpu's memcg 2399 * stock, and at least @nr_pages are available in that stock. Failure to 2400 * service an allocation will refill the stock. 2401 * 2402 * returns true if successful, false otherwise. 2403 */ 2404 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2405 { 2406 struct memcg_stock_pcp *stock; 2407 bool ret = true; 2408 2409 if (nr_pages > CHARGE_BATCH) 2410 return false; 2411 2412 stock = &get_cpu_var(memcg_stock); 2413 if (memcg == stock->cached && stock->nr_pages >= nr_pages) 2414 stock->nr_pages -= nr_pages; 2415 else /* need to call res_counter_charge */ 2416 ret = false; 2417 put_cpu_var(memcg_stock); 2418 return ret; 2419 } 2420 2421 /* 2422 * Returns stocks cached in percpu to res_counter and reset cached information. 2423 */ 2424 static void drain_stock(struct memcg_stock_pcp *stock) 2425 { 2426 struct mem_cgroup *old = stock->cached; 2427 2428 if (stock->nr_pages) { 2429 unsigned long bytes = stock->nr_pages * PAGE_SIZE; 2430 2431 res_counter_uncharge(&old->res, bytes); 2432 if (do_swap_account) 2433 res_counter_uncharge(&old->memsw, bytes); 2434 stock->nr_pages = 0; 2435 } 2436 stock->cached = NULL; 2437 } 2438 2439 /* 2440 * This must be called under preempt disabled or must be called by 2441 * a thread which is pinned to local cpu. 2442 */ 2443 static void drain_local_stock(struct work_struct *dummy) 2444 { 2445 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 2446 drain_stock(stock); 2447 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2448 } 2449 2450 static void __init memcg_stock_init(void) 2451 { 2452 int cpu; 2453 2454 for_each_possible_cpu(cpu) { 2455 struct memcg_stock_pcp *stock = 2456 &per_cpu(memcg_stock, cpu); 2457 INIT_WORK(&stock->work, drain_local_stock); 2458 } 2459 } 2460 2461 /* 2462 * Cache charges(val) which is from res_counter, to local per_cpu area. 2463 * This will be consumed by consume_stock() function, later. 2464 */ 2465 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2466 { 2467 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2468 2469 if (stock->cached != memcg) { /* reset if necessary */ 2470 drain_stock(stock); 2471 stock->cached = memcg; 2472 } 2473 stock->nr_pages += nr_pages; 2474 put_cpu_var(memcg_stock); 2475 } 2476 2477 /* 2478 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2479 * of the hierarchy under it. sync flag says whether we should block 2480 * until the work is done. 2481 */ 2482 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) 2483 { 2484 int cpu, curcpu; 2485 2486 /* Notify other cpus that system-wide "drain" is running */ 2487 get_online_cpus(); 2488 curcpu = get_cpu(); 2489 for_each_online_cpu(cpu) { 2490 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2491 struct mem_cgroup *memcg; 2492 2493 memcg = stock->cached; 2494 if (!memcg || !stock->nr_pages) 2495 continue; 2496 if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) 2497 continue; 2498 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2499 if (cpu == curcpu) 2500 drain_local_stock(&stock->work); 2501 else 2502 schedule_work_on(cpu, &stock->work); 2503 } 2504 } 2505 put_cpu(); 2506 2507 if (!sync) 2508 goto out; 2509 2510 for_each_online_cpu(cpu) { 2511 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2512 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) 2513 flush_work(&stock->work); 2514 } 2515 out: 2516 put_online_cpus(); 2517 } 2518 2519 /* 2520 * Tries to drain stocked charges in other cpus. This function is asynchronous 2521 * and just put a work per cpu for draining localy on each cpu. Caller can 2522 * expects some charges will be back to res_counter later but cannot wait for 2523 * it. 2524 */ 2525 static void drain_all_stock_async(struct mem_cgroup *root_memcg) 2526 { 2527 /* 2528 * If someone calls draining, avoid adding more kworker runs. 2529 */ 2530 if (!mutex_trylock(&percpu_charge_mutex)) 2531 return; 2532 drain_all_stock(root_memcg, false); 2533 mutex_unlock(&percpu_charge_mutex); 2534 } 2535 2536 /* This is a synchronous drain interface. */ 2537 static void drain_all_stock_sync(struct mem_cgroup *root_memcg) 2538 { 2539 /* called when force_empty is called */ 2540 mutex_lock(&percpu_charge_mutex); 2541 drain_all_stock(root_memcg, true); 2542 mutex_unlock(&percpu_charge_mutex); 2543 } 2544 2545 /* 2546 * This function drains percpu counter value from DEAD cpu and 2547 * move it to local cpu. Note that this function can be preempted. 2548 */ 2549 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2550 { 2551 int i; 2552 2553 spin_lock(&memcg->pcp_counter_lock); 2554 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 2555 long x = per_cpu(memcg->stat->count[i], cpu); 2556 2557 per_cpu(memcg->stat->count[i], cpu) = 0; 2558 memcg->nocpu_base.count[i] += x; 2559 } 2560 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2561 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2562 2563 per_cpu(memcg->stat->events[i], cpu) = 0; 2564 memcg->nocpu_base.events[i] += x; 2565 } 2566 spin_unlock(&memcg->pcp_counter_lock); 2567 } 2568 2569 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 2570 unsigned long action, 2571 void *hcpu) 2572 { 2573 int cpu = (unsigned long)hcpu; 2574 struct memcg_stock_pcp *stock; 2575 struct mem_cgroup *iter; 2576 2577 if (action == CPU_ONLINE) 2578 return NOTIFY_OK; 2579 2580 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 2581 return NOTIFY_OK; 2582 2583 for_each_mem_cgroup(iter) 2584 mem_cgroup_drain_pcp_counter(iter, cpu); 2585 2586 stock = &per_cpu(memcg_stock, cpu); 2587 drain_stock(stock); 2588 return NOTIFY_OK; 2589 } 2590 2591 2592 /* See __mem_cgroup_try_charge() for details */ 2593 enum { 2594 CHARGE_OK, /* success */ 2595 CHARGE_RETRY, /* need to retry but retry is not bad */ 2596 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */ 2597 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */ 2598 }; 2599 2600 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2601 unsigned int nr_pages, unsigned int min_pages, 2602 bool invoke_oom) 2603 { 2604 unsigned long csize = nr_pages * PAGE_SIZE; 2605 struct mem_cgroup *mem_over_limit; 2606 struct res_counter *fail_res; 2607 unsigned long flags = 0; 2608 int ret; 2609 2610 ret = res_counter_charge(&memcg->res, csize, &fail_res); 2611 2612 if (likely(!ret)) { 2613 if (!do_swap_account) 2614 return CHARGE_OK; 2615 ret = res_counter_charge(&memcg->memsw, csize, &fail_res); 2616 if (likely(!ret)) 2617 return CHARGE_OK; 2618 2619 res_counter_uncharge(&memcg->res, csize); 2620 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); 2621 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 2622 } else 2623 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); 2624 /* 2625 * Never reclaim on behalf of optional batching, retry with a 2626 * single page instead. 2627 */ 2628 if (nr_pages > min_pages) 2629 return CHARGE_RETRY; 2630 2631 if (!(gfp_mask & __GFP_WAIT)) 2632 return CHARGE_WOULDBLOCK; 2633 2634 if (gfp_mask & __GFP_NORETRY) 2635 return CHARGE_NOMEM; 2636 2637 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); 2638 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2639 return CHARGE_RETRY; 2640 /* 2641 * Even though the limit is exceeded at this point, reclaim 2642 * may have been able to free some pages. Retry the charge 2643 * before killing the task. 2644 * 2645 * Only for regular pages, though: huge pages are rather 2646 * unlikely to succeed so close to the limit, and we fall back 2647 * to regular pages anyway in case of failure. 2648 */ 2649 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret) 2650 return CHARGE_RETRY; 2651 2652 /* 2653 * At task move, charge accounts can be doubly counted. So, it's 2654 * better to wait until the end of task_move if something is going on. 2655 */ 2656 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2657 return CHARGE_RETRY; 2658 2659 if (invoke_oom) 2660 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize)); 2661 2662 return CHARGE_NOMEM; 2663 } 2664 2665 /* 2666 * __mem_cgroup_try_charge() does 2667 * 1. detect memcg to be charged against from passed *mm and *ptr, 2668 * 2. update res_counter 2669 * 3. call memory reclaim if necessary. 2670 * 2671 * In some special case, if the task is fatal, fatal_signal_pending() or 2672 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup 2673 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon 2674 * as possible without any hazards. 2: all pages should have a valid 2675 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg 2676 * pointer, that is treated as a charge to root_mem_cgroup. 2677 * 2678 * So __mem_cgroup_try_charge() will return 2679 * 0 ... on success, filling *ptr with a valid memcg pointer. 2680 * -ENOMEM ... charge failure because of resource limits. 2681 * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup. 2682 * 2683 * Unlike the exported interface, an "oom" parameter is added. if oom==true, 2684 * the oom-killer can be invoked. 2685 */ 2686 static int __mem_cgroup_try_charge(struct mm_struct *mm, 2687 gfp_t gfp_mask, 2688 unsigned int nr_pages, 2689 struct mem_cgroup **ptr, 2690 bool oom) 2691 { 2692 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2693 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; 2694 struct mem_cgroup *memcg = NULL; 2695 int ret; 2696 2697 /* 2698 * Unlike gloval-vm's OOM-kill, we're not in memory shortage 2699 * in system level. So, allow to go ahead dying process in addition to 2700 * MEMDIE process. 2701 */ 2702 if (unlikely(test_thread_flag(TIF_MEMDIE) 2703 || fatal_signal_pending(current))) 2704 goto bypass; 2705 2706 /* 2707 * We always charge the cgroup the mm_struct belongs to. 2708 * The mm_struct's mem_cgroup changes on task migration if the 2709 * thread group leader migrates. It's possible that mm is not 2710 * set, if so charge the root memcg (happens for pagecache usage). 2711 */ 2712 if (!*ptr && !mm) 2713 *ptr = root_mem_cgroup; 2714 again: 2715 if (*ptr) { /* css should be a valid one */ 2716 memcg = *ptr; 2717 if (mem_cgroup_is_root(memcg)) 2718 goto done; 2719 if (consume_stock(memcg, nr_pages)) 2720 goto done; 2721 css_get(&memcg->css); 2722 } else { 2723 struct task_struct *p; 2724 2725 rcu_read_lock(); 2726 p = rcu_dereference(mm->owner); 2727 /* 2728 * Because we don't have task_lock(), "p" can exit. 2729 * In that case, "memcg" can point to root or p can be NULL with 2730 * race with swapoff. Then, we have small risk of mis-accouning. 2731 * But such kind of mis-account by race always happens because 2732 * we don't have cgroup_mutex(). It's overkill and we allo that 2733 * small race, here. 2734 * (*) swapoff at el will charge against mm-struct not against 2735 * task-struct. So, mm->owner can be NULL. 2736 */ 2737 memcg = mem_cgroup_from_task(p); 2738 if (!memcg) 2739 memcg = root_mem_cgroup; 2740 if (mem_cgroup_is_root(memcg)) { 2741 rcu_read_unlock(); 2742 goto done; 2743 } 2744 if (consume_stock(memcg, nr_pages)) { 2745 /* 2746 * It seems dagerous to access memcg without css_get(). 2747 * But considering how consume_stok works, it's not 2748 * necessary. If consume_stock success, some charges 2749 * from this memcg are cached on this cpu. So, we 2750 * don't need to call css_get()/css_tryget() before 2751 * calling consume_stock(). 2752 */ 2753 rcu_read_unlock(); 2754 goto done; 2755 } 2756 /* after here, we may be blocked. we need to get refcnt */ 2757 if (!css_tryget(&memcg->css)) { 2758 rcu_read_unlock(); 2759 goto again; 2760 } 2761 rcu_read_unlock(); 2762 } 2763 2764 do { 2765 bool invoke_oom = oom && !nr_oom_retries; 2766 2767 /* If killed, bypass charge */ 2768 if (fatal_signal_pending(current)) { 2769 css_put(&memcg->css); 2770 goto bypass; 2771 } 2772 2773 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, 2774 nr_pages, invoke_oom); 2775 switch (ret) { 2776 case CHARGE_OK: 2777 break; 2778 case CHARGE_RETRY: /* not in OOM situation but retry */ 2779 batch = nr_pages; 2780 css_put(&memcg->css); 2781 memcg = NULL; 2782 goto again; 2783 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ 2784 css_put(&memcg->css); 2785 goto nomem; 2786 case CHARGE_NOMEM: /* OOM routine works */ 2787 if (!oom || invoke_oom) { 2788 css_put(&memcg->css); 2789 goto nomem; 2790 } 2791 nr_oom_retries--; 2792 break; 2793 } 2794 } while (ret != CHARGE_OK); 2795 2796 if (batch > nr_pages) 2797 refill_stock(memcg, batch - nr_pages); 2798 css_put(&memcg->css); 2799 done: 2800 *ptr = memcg; 2801 return 0; 2802 nomem: 2803 *ptr = NULL; 2804 return -ENOMEM; 2805 bypass: 2806 *ptr = root_mem_cgroup; 2807 return -EINTR; 2808 } 2809 2810 /* 2811 * Somemtimes we have to undo a charge we got by try_charge(). 2812 * This function is for that and do uncharge, put css's refcnt. 2813 * gotten by try_charge(). 2814 */ 2815 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 2816 unsigned int nr_pages) 2817 { 2818 if (!mem_cgroup_is_root(memcg)) { 2819 unsigned long bytes = nr_pages * PAGE_SIZE; 2820 2821 res_counter_uncharge(&memcg->res, bytes); 2822 if (do_swap_account) 2823 res_counter_uncharge(&memcg->memsw, bytes); 2824 } 2825 } 2826 2827 /* 2828 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup. 2829 * This is useful when moving usage to parent cgroup. 2830 */ 2831 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, 2832 unsigned int nr_pages) 2833 { 2834 unsigned long bytes = nr_pages * PAGE_SIZE; 2835 2836 if (mem_cgroup_is_root(memcg)) 2837 return; 2838 2839 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); 2840 if (do_swap_account) 2841 res_counter_uncharge_until(&memcg->memsw, 2842 memcg->memsw.parent, bytes); 2843 } 2844 2845 /* 2846 * A helper function to get mem_cgroup from ID. must be called under 2847 * rcu_read_lock(). The caller is responsible for calling css_tryget if 2848 * the mem_cgroup is used for charging. (dropping refcnt from swap can be 2849 * called against removed memcg.) 2850 */ 2851 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2852 { 2853 struct cgroup_subsys_state *css; 2854 2855 /* ID 0 is unused ID */ 2856 if (!id) 2857 return NULL; 2858 css = css_lookup(&mem_cgroup_subsys, id); 2859 if (!css) 2860 return NULL; 2861 return mem_cgroup_from_css(css); 2862 } 2863 2864 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2865 { 2866 struct mem_cgroup *memcg = NULL; 2867 struct page_cgroup *pc; 2868 unsigned short id; 2869 swp_entry_t ent; 2870 2871 VM_BUG_ON(!PageLocked(page)); 2872 2873 pc = lookup_page_cgroup(page); 2874 lock_page_cgroup(pc); 2875 if (PageCgroupUsed(pc)) { 2876 memcg = pc->mem_cgroup; 2877 if (memcg && !css_tryget(&memcg->css)) 2878 memcg = NULL; 2879 } else if (PageSwapCache(page)) { 2880 ent.val = page_private(page); 2881 id = lookup_swap_cgroup_id(ent); 2882 rcu_read_lock(); 2883 memcg = mem_cgroup_lookup(id); 2884 if (memcg && !css_tryget(&memcg->css)) 2885 memcg = NULL; 2886 rcu_read_unlock(); 2887 } 2888 unlock_page_cgroup(pc); 2889 return memcg; 2890 } 2891 2892 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, 2893 struct page *page, 2894 unsigned int nr_pages, 2895 enum charge_type ctype, 2896 bool lrucare) 2897 { 2898 struct page_cgroup *pc = lookup_page_cgroup(page); 2899 struct zone *uninitialized_var(zone); 2900 struct lruvec *lruvec; 2901 bool was_on_lru = false; 2902 bool anon; 2903 2904 lock_page_cgroup(pc); 2905 VM_BUG_ON(PageCgroupUsed(pc)); 2906 /* 2907 * we don't need page_cgroup_lock about tail pages, becase they are not 2908 * accessed by any other context at this point. 2909 */ 2910 2911 /* 2912 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2913 * may already be on some other mem_cgroup's LRU. Take care of it. 2914 */ 2915 if (lrucare) { 2916 zone = page_zone(page); 2917 spin_lock_irq(&zone->lru_lock); 2918 if (PageLRU(page)) { 2919 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2920 ClearPageLRU(page); 2921 del_page_from_lru_list(page, lruvec, page_lru(page)); 2922 was_on_lru = true; 2923 } 2924 } 2925 2926 pc->mem_cgroup = memcg; 2927 /* 2928 * We access a page_cgroup asynchronously without lock_page_cgroup(). 2929 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup 2930 * is accessed after testing USED bit. To make pc->mem_cgroup visible 2931 * before USED bit, we need memory barrier here. 2932 * See mem_cgroup_add_lru_list(), etc. 2933 */ 2934 smp_wmb(); 2935 SetPageCgroupUsed(pc); 2936 2937 if (lrucare) { 2938 if (was_on_lru) { 2939 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2940 VM_BUG_ON(PageLRU(page)); 2941 SetPageLRU(page); 2942 add_page_to_lru_list(page, lruvec, page_lru(page)); 2943 } 2944 spin_unlock_irq(&zone->lru_lock); 2945 } 2946 2947 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON) 2948 anon = true; 2949 else 2950 anon = false; 2951 2952 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages); 2953 unlock_page_cgroup(pc); 2954 2955 /* 2956 * "charge_statistics" updated event counter. Then, check it. 2957 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2958 * if they exceeds softlimit. 2959 */ 2960 memcg_check_events(memcg, page); 2961 } 2962 2963 static DEFINE_MUTEX(set_limit_mutex); 2964 2965 #ifdef CONFIG_MEMCG_KMEM 2966 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) 2967 { 2968 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && 2969 (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); 2970 } 2971 2972 /* 2973 * This is a bit cumbersome, but it is rarely used and avoids a backpointer 2974 * in the memcg_cache_params struct. 2975 */ 2976 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) 2977 { 2978 struct kmem_cache *cachep; 2979 2980 VM_BUG_ON(p->is_root_cache); 2981 cachep = p->root_cache; 2982 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; 2983 } 2984 2985 #ifdef CONFIG_SLABINFO 2986 static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css, 2987 struct cftype *cft, struct seq_file *m) 2988 { 2989 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2990 struct memcg_cache_params *params; 2991 2992 if (!memcg_can_account_kmem(memcg)) 2993 return -EIO; 2994 2995 print_slabinfo_header(m); 2996 2997 mutex_lock(&memcg->slab_caches_mutex); 2998 list_for_each_entry(params, &memcg->memcg_slab_caches, list) 2999 cache_show(memcg_params_to_cache(params), m); 3000 mutex_unlock(&memcg->slab_caches_mutex); 3001 3002 return 0; 3003 } 3004 #endif 3005 3006 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) 3007 { 3008 struct res_counter *fail_res; 3009 struct mem_cgroup *_memcg; 3010 int ret = 0; 3011 bool may_oom; 3012 3013 ret = res_counter_charge(&memcg->kmem, size, &fail_res); 3014 if (ret) 3015 return ret; 3016 3017 /* 3018 * Conditions under which we can wait for the oom_killer. Those are 3019 * the same conditions tested by the core page allocator 3020 */ 3021 may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY); 3022 3023 _memcg = memcg; 3024 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, 3025 &_memcg, may_oom); 3026 3027 if (ret == -EINTR) { 3028 /* 3029 * __mem_cgroup_try_charge() chosed to bypass to root due to 3030 * OOM kill or fatal signal. Since our only options are to 3031 * either fail the allocation or charge it to this cgroup, do 3032 * it as a temporary condition. But we can't fail. From a 3033 * kmem/slab perspective, the cache has already been selected, 3034 * by mem_cgroup_kmem_get_cache(), so it is too late to change 3035 * our minds. 3036 * 3037 * This condition will only trigger if the task entered 3038 * memcg_charge_kmem in a sane state, but was OOM-killed during 3039 * __mem_cgroup_try_charge() above. Tasks that were already 3040 * dying when the allocation triggers should have been already 3041 * directed to the root cgroup in memcontrol.h 3042 */ 3043 res_counter_charge_nofail(&memcg->res, size, &fail_res); 3044 if (do_swap_account) 3045 res_counter_charge_nofail(&memcg->memsw, size, 3046 &fail_res); 3047 ret = 0; 3048 } else if (ret) 3049 res_counter_uncharge(&memcg->kmem, size); 3050 3051 return ret; 3052 } 3053 3054 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) 3055 { 3056 res_counter_uncharge(&memcg->res, size); 3057 if (do_swap_account) 3058 res_counter_uncharge(&memcg->memsw, size); 3059 3060 /* Not down to 0 */ 3061 if (res_counter_uncharge(&memcg->kmem, size)) 3062 return; 3063 3064 /* 3065 * Releases a reference taken in kmem_cgroup_css_offline in case 3066 * this last uncharge is racing with the offlining code or it is 3067 * outliving the memcg existence. 3068 * 3069 * The memory barrier imposed by test&clear is paired with the 3070 * explicit one in memcg_kmem_mark_dead(). 3071 */ 3072 if (memcg_kmem_test_and_clear_dead(memcg)) 3073 css_put(&memcg->css); 3074 } 3075 3076 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) 3077 { 3078 if (!memcg) 3079 return; 3080 3081 mutex_lock(&memcg->slab_caches_mutex); 3082 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); 3083 mutex_unlock(&memcg->slab_caches_mutex); 3084 } 3085 3086 /* 3087 * helper for acessing a memcg's index. It will be used as an index in the 3088 * child cache array in kmem_cache, and also to derive its name. This function 3089 * will return -1 when this is not a kmem-limited memcg. 3090 */ 3091 int memcg_cache_id(struct mem_cgroup *memcg) 3092 { 3093 return memcg ? memcg->kmemcg_id : -1; 3094 } 3095 3096 /* 3097 * This ends up being protected by the set_limit mutex, during normal 3098 * operation, because that is its main call site. 3099 * 3100 * But when we create a new cache, we can call this as well if its parent 3101 * is kmem-limited. That will have to hold set_limit_mutex as well. 3102 */ 3103 int memcg_update_cache_sizes(struct mem_cgroup *memcg) 3104 { 3105 int num, ret; 3106 3107 num = ida_simple_get(&kmem_limited_groups, 3108 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 3109 if (num < 0) 3110 return num; 3111 /* 3112 * After this point, kmem_accounted (that we test atomically in 3113 * the beginning of this conditional), is no longer 0. This 3114 * guarantees only one process will set the following boolean 3115 * to true. We don't need test_and_set because we're protected 3116 * by the set_limit_mutex anyway. 3117 */ 3118 memcg_kmem_set_activated(memcg); 3119 3120 ret = memcg_update_all_caches(num+1); 3121 if (ret) { 3122 ida_simple_remove(&kmem_limited_groups, num); 3123 memcg_kmem_clear_activated(memcg); 3124 return ret; 3125 } 3126 3127 memcg->kmemcg_id = num; 3128 INIT_LIST_HEAD(&memcg->memcg_slab_caches); 3129 mutex_init(&memcg->slab_caches_mutex); 3130 return 0; 3131 } 3132 3133 static size_t memcg_caches_array_size(int num_groups) 3134 { 3135 ssize_t size; 3136 if (num_groups <= 0) 3137 return 0; 3138 3139 size = 2 * num_groups; 3140 if (size < MEMCG_CACHES_MIN_SIZE) 3141 size = MEMCG_CACHES_MIN_SIZE; 3142 else if (size > MEMCG_CACHES_MAX_SIZE) 3143 size = MEMCG_CACHES_MAX_SIZE; 3144 3145 return size; 3146 } 3147 3148 /* 3149 * We should update the current array size iff all caches updates succeed. This 3150 * can only be done from the slab side. The slab mutex needs to be held when 3151 * calling this. 3152 */ 3153 void memcg_update_array_size(int num) 3154 { 3155 if (num > memcg_limited_groups_array_size) 3156 memcg_limited_groups_array_size = memcg_caches_array_size(num); 3157 } 3158 3159 static void kmem_cache_destroy_work_func(struct work_struct *w); 3160 3161 int memcg_update_cache_size(struct kmem_cache *s, int num_groups) 3162 { 3163 struct memcg_cache_params *cur_params = s->memcg_params; 3164 3165 VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache); 3166 3167 if (num_groups > memcg_limited_groups_array_size) { 3168 int i; 3169 ssize_t size = memcg_caches_array_size(num_groups); 3170 3171 size *= sizeof(void *); 3172 size += offsetof(struct memcg_cache_params, memcg_caches); 3173 3174 s->memcg_params = kzalloc(size, GFP_KERNEL); 3175 if (!s->memcg_params) { 3176 s->memcg_params = cur_params; 3177 return -ENOMEM; 3178 } 3179 3180 s->memcg_params->is_root_cache = true; 3181 3182 /* 3183 * There is the chance it will be bigger than 3184 * memcg_limited_groups_array_size, if we failed an allocation 3185 * in a cache, in which case all caches updated before it, will 3186 * have a bigger array. 3187 * 3188 * But if that is the case, the data after 3189 * memcg_limited_groups_array_size is certainly unused 3190 */ 3191 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3192 if (!cur_params->memcg_caches[i]) 3193 continue; 3194 s->memcg_params->memcg_caches[i] = 3195 cur_params->memcg_caches[i]; 3196 } 3197 3198 /* 3199 * Ideally, we would wait until all caches succeed, and only 3200 * then free the old one. But this is not worth the extra 3201 * pointer per-cache we'd have to have for this. 3202 * 3203 * It is not a big deal if some caches are left with a size 3204 * bigger than the others. And all updates will reset this 3205 * anyway. 3206 */ 3207 kfree(cur_params); 3208 } 3209 return 0; 3210 } 3211 3212 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 3213 struct kmem_cache *root_cache) 3214 { 3215 size_t size; 3216 3217 if (!memcg_kmem_enabled()) 3218 return 0; 3219 3220 if (!memcg) { 3221 size = offsetof(struct memcg_cache_params, memcg_caches); 3222 size += memcg_limited_groups_array_size * sizeof(void *); 3223 } else 3224 size = sizeof(struct memcg_cache_params); 3225 3226 s->memcg_params = kzalloc(size, GFP_KERNEL); 3227 if (!s->memcg_params) 3228 return -ENOMEM; 3229 3230 if (memcg) { 3231 s->memcg_params->memcg = memcg; 3232 s->memcg_params->root_cache = root_cache; 3233 INIT_WORK(&s->memcg_params->destroy, 3234 kmem_cache_destroy_work_func); 3235 } else 3236 s->memcg_params->is_root_cache = true; 3237 3238 return 0; 3239 } 3240 3241 void memcg_release_cache(struct kmem_cache *s) 3242 { 3243 struct kmem_cache *root; 3244 struct mem_cgroup *memcg; 3245 int id; 3246 3247 /* 3248 * This happens, for instance, when a root cache goes away before we 3249 * add any memcg. 3250 */ 3251 if (!s->memcg_params) 3252 return; 3253 3254 if (s->memcg_params->is_root_cache) 3255 goto out; 3256 3257 memcg = s->memcg_params->memcg; 3258 id = memcg_cache_id(memcg); 3259 3260 root = s->memcg_params->root_cache; 3261 root->memcg_params->memcg_caches[id] = NULL; 3262 3263 mutex_lock(&memcg->slab_caches_mutex); 3264 list_del(&s->memcg_params->list); 3265 mutex_unlock(&memcg->slab_caches_mutex); 3266 3267 css_put(&memcg->css); 3268 out: 3269 kfree(s->memcg_params); 3270 } 3271 3272 /* 3273 * During the creation a new cache, we need to disable our accounting mechanism 3274 * altogether. This is true even if we are not creating, but rather just 3275 * enqueing new caches to be created. 3276 * 3277 * This is because that process will trigger allocations; some visible, like 3278 * explicit kmallocs to auxiliary data structures, name strings and internal 3279 * cache structures; some well concealed, like INIT_WORK() that can allocate 3280 * objects during debug. 3281 * 3282 * If any allocation happens during memcg_kmem_get_cache, we will recurse back 3283 * to it. This may not be a bounded recursion: since the first cache creation 3284 * failed to complete (waiting on the allocation), we'll just try to create the 3285 * cache again, failing at the same point. 3286 * 3287 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of 3288 * memcg_kmem_skip_account. So we enclose anything that might allocate memory 3289 * inside the following two functions. 3290 */ 3291 static inline void memcg_stop_kmem_account(void) 3292 { 3293 VM_BUG_ON(!current->mm); 3294 current->memcg_kmem_skip_account++; 3295 } 3296 3297 static inline void memcg_resume_kmem_account(void) 3298 { 3299 VM_BUG_ON(!current->mm); 3300 current->memcg_kmem_skip_account--; 3301 } 3302 3303 static void kmem_cache_destroy_work_func(struct work_struct *w) 3304 { 3305 struct kmem_cache *cachep; 3306 struct memcg_cache_params *p; 3307 3308 p = container_of(w, struct memcg_cache_params, destroy); 3309 3310 cachep = memcg_params_to_cache(p); 3311 3312 /* 3313 * If we get down to 0 after shrink, we could delete right away. 3314 * However, memcg_release_pages() already puts us back in the workqueue 3315 * in that case. If we proceed deleting, we'll get a dangling 3316 * reference, and removing the object from the workqueue in that case 3317 * is unnecessary complication. We are not a fast path. 3318 * 3319 * Note that this case is fundamentally different from racing with 3320 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in 3321 * kmem_cache_shrink, not only we would be reinserting a dead cache 3322 * into the queue, but doing so from inside the worker racing to 3323 * destroy it. 3324 * 3325 * So if we aren't down to zero, we'll just schedule a worker and try 3326 * again 3327 */ 3328 if (atomic_read(&cachep->memcg_params->nr_pages) != 0) { 3329 kmem_cache_shrink(cachep); 3330 if (atomic_read(&cachep->memcg_params->nr_pages) == 0) 3331 return; 3332 } else 3333 kmem_cache_destroy(cachep); 3334 } 3335 3336 void mem_cgroup_destroy_cache(struct kmem_cache *cachep) 3337 { 3338 if (!cachep->memcg_params->dead) 3339 return; 3340 3341 /* 3342 * There are many ways in which we can get here. 3343 * 3344 * We can get to a memory-pressure situation while the delayed work is 3345 * still pending to run. The vmscan shrinkers can then release all 3346 * cache memory and get us to destruction. If this is the case, we'll 3347 * be executed twice, which is a bug (the second time will execute over 3348 * bogus data). In this case, cancelling the work should be fine. 3349 * 3350 * But we can also get here from the worker itself, if 3351 * kmem_cache_shrink is enough to shake all the remaining objects and 3352 * get the page count to 0. In this case, we'll deadlock if we try to 3353 * cancel the work (the worker runs with an internal lock held, which 3354 * is the same lock we would hold for cancel_work_sync().) 3355 * 3356 * Since we can't possibly know who got us here, just refrain from 3357 * running if there is already work pending 3358 */ 3359 if (work_pending(&cachep->memcg_params->destroy)) 3360 return; 3361 /* 3362 * We have to defer the actual destroying to a workqueue, because 3363 * we might currently be in a context that cannot sleep. 3364 */ 3365 schedule_work(&cachep->memcg_params->destroy); 3366 } 3367 3368 /* 3369 * This lock protects updaters, not readers. We want readers to be as fast as 3370 * they can, and they will either see NULL or a valid cache value. Our model 3371 * allow them to see NULL, in which case the root memcg will be selected. 3372 * 3373 * We need this lock because multiple allocations to the same cache from a non 3374 * will span more than one worker. Only one of them can create the cache. 3375 */ 3376 static DEFINE_MUTEX(memcg_cache_mutex); 3377 3378 /* 3379 * Called with memcg_cache_mutex held 3380 */ 3381 static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, 3382 struct kmem_cache *s) 3383 { 3384 struct kmem_cache *new; 3385 static char *tmp_name = NULL; 3386 3387 lockdep_assert_held(&memcg_cache_mutex); 3388 3389 /* 3390 * kmem_cache_create_memcg duplicates the given name and 3391 * cgroup_name for this name requires RCU context. 3392 * This static temporary buffer is used to prevent from 3393 * pointless shortliving allocation. 3394 */ 3395 if (!tmp_name) { 3396 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); 3397 if (!tmp_name) 3398 return NULL; 3399 } 3400 3401 rcu_read_lock(); 3402 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, 3403 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); 3404 rcu_read_unlock(); 3405 3406 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, 3407 (s->flags & ~SLAB_PANIC), s->ctor, s); 3408 3409 if (new) 3410 new->allocflags |= __GFP_KMEMCG; 3411 3412 return new; 3413 } 3414 3415 static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3416 struct kmem_cache *cachep) 3417 { 3418 struct kmem_cache *new_cachep; 3419 int idx; 3420 3421 BUG_ON(!memcg_can_account_kmem(memcg)); 3422 3423 idx = memcg_cache_id(memcg); 3424 3425 mutex_lock(&memcg_cache_mutex); 3426 new_cachep = cachep->memcg_params->memcg_caches[idx]; 3427 if (new_cachep) { 3428 css_put(&memcg->css); 3429 goto out; 3430 } 3431 3432 new_cachep = kmem_cache_dup(memcg, cachep); 3433 if (new_cachep == NULL) { 3434 new_cachep = cachep; 3435 css_put(&memcg->css); 3436 goto out; 3437 } 3438 3439 atomic_set(&new_cachep->memcg_params->nr_pages , 0); 3440 3441 cachep->memcg_params->memcg_caches[idx] = new_cachep; 3442 /* 3443 * the readers won't lock, make sure everybody sees the updated value, 3444 * so they won't put stuff in the queue again for no reason 3445 */ 3446 wmb(); 3447 out: 3448 mutex_unlock(&memcg_cache_mutex); 3449 return new_cachep; 3450 } 3451 3452 void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 3453 { 3454 struct kmem_cache *c; 3455 int i; 3456 3457 if (!s->memcg_params) 3458 return; 3459 if (!s->memcg_params->is_root_cache) 3460 return; 3461 3462 /* 3463 * If the cache is being destroyed, we trust that there is no one else 3464 * requesting objects from it. Even if there are, the sanity checks in 3465 * kmem_cache_destroy should caught this ill-case. 3466 * 3467 * Still, we don't want anyone else freeing memcg_caches under our 3468 * noses, which can happen if a new memcg comes to life. As usual, 3469 * we'll take the set_limit_mutex to protect ourselves against this. 3470 */ 3471 mutex_lock(&set_limit_mutex); 3472 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3473 c = s->memcg_params->memcg_caches[i]; 3474 if (!c) 3475 continue; 3476 3477 /* 3478 * We will now manually delete the caches, so to avoid races 3479 * we need to cancel all pending destruction workers and 3480 * proceed with destruction ourselves. 3481 * 3482 * kmem_cache_destroy() will call kmem_cache_shrink internally, 3483 * and that could spawn the workers again: it is likely that 3484 * the cache still have active pages until this very moment. 3485 * This would lead us back to mem_cgroup_destroy_cache. 3486 * 3487 * But that will not execute at all if the "dead" flag is not 3488 * set, so flip it down to guarantee we are in control. 3489 */ 3490 c->memcg_params->dead = false; 3491 cancel_work_sync(&c->memcg_params->destroy); 3492 kmem_cache_destroy(c); 3493 } 3494 mutex_unlock(&set_limit_mutex); 3495 } 3496 3497 struct create_work { 3498 struct mem_cgroup *memcg; 3499 struct kmem_cache *cachep; 3500 struct work_struct work; 3501 }; 3502 3503 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3504 { 3505 struct kmem_cache *cachep; 3506 struct memcg_cache_params *params; 3507 3508 if (!memcg_kmem_is_active(memcg)) 3509 return; 3510 3511 mutex_lock(&memcg->slab_caches_mutex); 3512 list_for_each_entry(params, &memcg->memcg_slab_caches, list) { 3513 cachep = memcg_params_to_cache(params); 3514 cachep->memcg_params->dead = true; 3515 schedule_work(&cachep->memcg_params->destroy); 3516 } 3517 mutex_unlock(&memcg->slab_caches_mutex); 3518 } 3519 3520 static void memcg_create_cache_work_func(struct work_struct *w) 3521 { 3522 struct create_work *cw; 3523 3524 cw = container_of(w, struct create_work, work); 3525 memcg_create_kmem_cache(cw->memcg, cw->cachep); 3526 kfree(cw); 3527 } 3528 3529 /* 3530 * Enqueue the creation of a per-memcg kmem_cache. 3531 */ 3532 static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3533 struct kmem_cache *cachep) 3534 { 3535 struct create_work *cw; 3536 3537 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); 3538 if (cw == NULL) { 3539 css_put(&memcg->css); 3540 return; 3541 } 3542 3543 cw->memcg = memcg; 3544 cw->cachep = cachep; 3545 3546 INIT_WORK(&cw->work, memcg_create_cache_work_func); 3547 schedule_work(&cw->work); 3548 } 3549 3550 static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, 3551 struct kmem_cache *cachep) 3552 { 3553 /* 3554 * We need to stop accounting when we kmalloc, because if the 3555 * corresponding kmalloc cache is not yet created, the first allocation 3556 * in __memcg_create_cache_enqueue will recurse. 3557 * 3558 * However, it is better to enclose the whole function. Depending on 3559 * the debugging options enabled, INIT_WORK(), for instance, can 3560 * trigger an allocation. This too, will make us recurse. Because at 3561 * this point we can't allow ourselves back into memcg_kmem_get_cache, 3562 * the safest choice is to do it like this, wrapping the whole function. 3563 */ 3564 memcg_stop_kmem_account(); 3565 __memcg_create_cache_enqueue(memcg, cachep); 3566 memcg_resume_kmem_account(); 3567 } 3568 /* 3569 * Return the kmem_cache we're supposed to use for a slab allocation. 3570 * We try to use the current memcg's version of the cache. 3571 * 3572 * If the cache does not exist yet, if we are the first user of it, 3573 * we either create it immediately, if possible, or create it asynchronously 3574 * in a workqueue. 3575 * In the latter case, we will let the current allocation go through with 3576 * the original cache. 3577 * 3578 * Can't be called in interrupt context or from kernel threads. 3579 * This function needs to be called with rcu_read_lock() held. 3580 */ 3581 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, 3582 gfp_t gfp) 3583 { 3584 struct mem_cgroup *memcg; 3585 int idx; 3586 3587 VM_BUG_ON(!cachep->memcg_params); 3588 VM_BUG_ON(!cachep->memcg_params->is_root_cache); 3589 3590 if (!current->mm || current->memcg_kmem_skip_account) 3591 return cachep; 3592 3593 rcu_read_lock(); 3594 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); 3595 3596 if (!memcg_can_account_kmem(memcg)) 3597 goto out; 3598 3599 idx = memcg_cache_id(memcg); 3600 3601 /* 3602 * barrier to mare sure we're always seeing the up to date value. The 3603 * code updating memcg_caches will issue a write barrier to match this. 3604 */ 3605 read_barrier_depends(); 3606 if (likely(cachep->memcg_params->memcg_caches[idx])) { 3607 cachep = cachep->memcg_params->memcg_caches[idx]; 3608 goto out; 3609 } 3610 3611 /* The corresponding put will be done in the workqueue. */ 3612 if (!css_tryget(&memcg->css)) 3613 goto out; 3614 rcu_read_unlock(); 3615 3616 /* 3617 * If we are in a safe context (can wait, and not in interrupt 3618 * context), we could be be predictable and return right away. 3619 * This would guarantee that the allocation being performed 3620 * already belongs in the new cache. 3621 * 3622 * However, there are some clashes that can arrive from locking. 3623 * For instance, because we acquire the slab_mutex while doing 3624 * kmem_cache_dup, this means no further allocation could happen 3625 * with the slab_mutex held. 3626 * 3627 * Also, because cache creation issue get_online_cpus(), this 3628 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, 3629 * that ends up reversed during cpu hotplug. (cpuset allocates 3630 * a bunch of GFP_KERNEL memory during cpuup). Due to all that, 3631 * better to defer everything. 3632 */ 3633 memcg_create_cache_enqueue(memcg, cachep); 3634 return cachep; 3635 out: 3636 rcu_read_unlock(); 3637 return cachep; 3638 } 3639 EXPORT_SYMBOL(__memcg_kmem_get_cache); 3640 3641 /* 3642 * We need to verify if the allocation against current->mm->owner's memcg is 3643 * possible for the given order. But the page is not allocated yet, so we'll 3644 * need a further commit step to do the final arrangements. 3645 * 3646 * It is possible for the task to switch cgroups in this mean time, so at 3647 * commit time, we can't rely on task conversion any longer. We'll then use 3648 * the handle argument to return to the caller which cgroup we should commit 3649 * against. We could also return the memcg directly and avoid the pointer 3650 * passing, but a boolean return value gives better semantics considering 3651 * the compiled-out case as well. 3652 * 3653 * Returning true means the allocation is possible. 3654 */ 3655 bool 3656 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) 3657 { 3658 struct mem_cgroup *memcg; 3659 int ret; 3660 3661 *_memcg = NULL; 3662 3663 /* 3664 * Disabling accounting is only relevant for some specific memcg 3665 * internal allocations. Therefore we would initially not have such 3666 * check here, since direct calls to the page allocator that are marked 3667 * with GFP_KMEMCG only happen outside memcg core. We are mostly 3668 * concerned with cache allocations, and by having this test at 3669 * memcg_kmem_get_cache, we are already able to relay the allocation to 3670 * the root cache and bypass the memcg cache altogether. 3671 * 3672 * There is one exception, though: the SLUB allocator does not create 3673 * large order caches, but rather service large kmallocs directly from 3674 * the page allocator. Therefore, the following sequence when backed by 3675 * the SLUB allocator: 3676 * 3677 * memcg_stop_kmem_account(); 3678 * kmalloc(<large_number>) 3679 * memcg_resume_kmem_account(); 3680 * 3681 * would effectively ignore the fact that we should skip accounting, 3682 * since it will drive us directly to this function without passing 3683 * through the cache selector memcg_kmem_get_cache. Such large 3684 * allocations are extremely rare but can happen, for instance, for the 3685 * cache arrays. We bring this test here. 3686 */ 3687 if (!current->mm || current->memcg_kmem_skip_account) 3688 return true; 3689 3690 memcg = try_get_mem_cgroup_from_mm(current->mm); 3691 3692 /* 3693 * very rare case described in mem_cgroup_from_task. Unfortunately there 3694 * isn't much we can do without complicating this too much, and it would 3695 * be gfp-dependent anyway. Just let it go 3696 */ 3697 if (unlikely(!memcg)) 3698 return true; 3699 3700 if (!memcg_can_account_kmem(memcg)) { 3701 css_put(&memcg->css); 3702 return true; 3703 } 3704 3705 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); 3706 if (!ret) 3707 *_memcg = memcg; 3708 3709 css_put(&memcg->css); 3710 return (ret == 0); 3711 } 3712 3713 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, 3714 int order) 3715 { 3716 struct page_cgroup *pc; 3717 3718 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3719 3720 /* The page allocation failed. Revert */ 3721 if (!page) { 3722 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3723 return; 3724 } 3725 3726 pc = lookup_page_cgroup(page); 3727 lock_page_cgroup(pc); 3728 pc->mem_cgroup = memcg; 3729 SetPageCgroupUsed(pc); 3730 unlock_page_cgroup(pc); 3731 } 3732 3733 void __memcg_kmem_uncharge_pages(struct page *page, int order) 3734 { 3735 struct mem_cgroup *memcg = NULL; 3736 struct page_cgroup *pc; 3737 3738 3739 pc = lookup_page_cgroup(page); 3740 /* 3741 * Fast unlocked return. Theoretically might have changed, have to 3742 * check again after locking. 3743 */ 3744 if (!PageCgroupUsed(pc)) 3745 return; 3746 3747 lock_page_cgroup(pc); 3748 if (PageCgroupUsed(pc)) { 3749 memcg = pc->mem_cgroup; 3750 ClearPageCgroupUsed(pc); 3751 } 3752 unlock_page_cgroup(pc); 3753 3754 /* 3755 * We trust that only if there is a memcg associated with the page, it 3756 * is a valid allocation 3757 */ 3758 if (!memcg) 3759 return; 3760 3761 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3762 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3763 } 3764 #else 3765 static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) 3766 { 3767 } 3768 #endif /* CONFIG_MEMCG_KMEM */ 3769 3770 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3771 3772 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION) 3773 /* 3774 * Because tail pages are not marked as "used", set it. We're under 3775 * zone->lru_lock, 'splitting on pmd' and compound_lock. 3776 * charge/uncharge will be never happen and move_account() is done under 3777 * compound_lock(), so we don't have to take care of races. 3778 */ 3779 void mem_cgroup_split_huge_fixup(struct page *head) 3780 { 3781 struct page_cgroup *head_pc = lookup_page_cgroup(head); 3782 struct page_cgroup *pc; 3783 struct mem_cgroup *memcg; 3784 int i; 3785 3786 if (mem_cgroup_disabled()) 3787 return; 3788 3789 memcg = head_pc->mem_cgroup; 3790 for (i = 1; i < HPAGE_PMD_NR; i++) { 3791 pc = head_pc + i; 3792 pc->mem_cgroup = memcg; 3793 smp_wmb();/* see __commit_charge() */ 3794 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 3795 } 3796 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 3797 HPAGE_PMD_NR); 3798 } 3799 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3800 3801 static inline 3802 void mem_cgroup_move_account_page_stat(struct mem_cgroup *from, 3803 struct mem_cgroup *to, 3804 unsigned int nr_pages, 3805 enum mem_cgroup_stat_index idx) 3806 { 3807 /* Update stat data for mem_cgroup */ 3808 preempt_disable(); 3809 WARN_ON_ONCE(from->stat->count[idx] < nr_pages); 3810 __this_cpu_add(from->stat->count[idx], -nr_pages); 3811 __this_cpu_add(to->stat->count[idx], nr_pages); 3812 preempt_enable(); 3813 } 3814 3815 /** 3816 * mem_cgroup_move_account - move account of the page 3817 * @page: the page 3818 * @nr_pages: number of regular pages (>1 for huge pages) 3819 * @pc: page_cgroup of the page. 3820 * @from: mem_cgroup which the page is moved from. 3821 * @to: mem_cgroup which the page is moved to. @from != @to. 3822 * 3823 * The caller must confirm following. 3824 * - page is not on LRU (isolate_page() is useful.) 3825 * - compound_lock is held when nr_pages > 1 3826 * 3827 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 3828 * from old cgroup. 3829 */ 3830 static int mem_cgroup_move_account(struct page *page, 3831 unsigned int nr_pages, 3832 struct page_cgroup *pc, 3833 struct mem_cgroup *from, 3834 struct mem_cgroup *to) 3835 { 3836 unsigned long flags; 3837 int ret; 3838 bool anon = PageAnon(page); 3839 3840 VM_BUG_ON(from == to); 3841 VM_BUG_ON(PageLRU(page)); 3842 /* 3843 * The page is isolated from LRU. So, collapse function 3844 * will not handle this page. But page splitting can happen. 3845 * Do this check under compound_page_lock(). The caller should 3846 * hold it. 3847 */ 3848 ret = -EBUSY; 3849 if (nr_pages > 1 && !PageTransHuge(page)) 3850 goto out; 3851 3852 lock_page_cgroup(pc); 3853 3854 ret = -EINVAL; 3855 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) 3856 goto unlock; 3857 3858 move_lock_mem_cgroup(from, &flags); 3859 3860 if (!anon && page_mapped(page)) 3861 mem_cgroup_move_account_page_stat(from, to, nr_pages, 3862 MEM_CGROUP_STAT_FILE_MAPPED); 3863 3864 if (PageWriteback(page)) 3865 mem_cgroup_move_account_page_stat(from, to, nr_pages, 3866 MEM_CGROUP_STAT_WRITEBACK); 3867 3868 mem_cgroup_charge_statistics(from, page, anon, -nr_pages); 3869 3870 /* caller should have done css_get */ 3871 pc->mem_cgroup = to; 3872 mem_cgroup_charge_statistics(to, page, anon, nr_pages); 3873 move_unlock_mem_cgroup(from, &flags); 3874 ret = 0; 3875 unlock: 3876 unlock_page_cgroup(pc); 3877 /* 3878 * check events 3879 */ 3880 memcg_check_events(to, page); 3881 memcg_check_events(from, page); 3882 out: 3883 return ret; 3884 } 3885 3886 /** 3887 * mem_cgroup_move_parent - moves page to the parent group 3888 * @page: the page to move 3889 * @pc: page_cgroup of the page 3890 * @child: page's cgroup 3891 * 3892 * move charges to its parent or the root cgroup if the group has no 3893 * parent (aka use_hierarchy==0). 3894 * Although this might fail (get_page_unless_zero, isolate_lru_page or 3895 * mem_cgroup_move_account fails) the failure is always temporary and 3896 * it signals a race with a page removal/uncharge or migration. In the 3897 * first case the page is on the way out and it will vanish from the LRU 3898 * on the next attempt and the call should be retried later. 3899 * Isolation from the LRU fails only if page has been isolated from 3900 * the LRU since we looked at it and that usually means either global 3901 * reclaim or migration going on. The page will either get back to the 3902 * LRU or vanish. 3903 * Finaly mem_cgroup_move_account fails only if the page got uncharged 3904 * (!PageCgroupUsed) or moved to a different group. The page will 3905 * disappear in the next attempt. 3906 */ 3907 static int mem_cgroup_move_parent(struct page *page, 3908 struct page_cgroup *pc, 3909 struct mem_cgroup *child) 3910 { 3911 struct mem_cgroup *parent; 3912 unsigned int nr_pages; 3913 unsigned long uninitialized_var(flags); 3914 int ret; 3915 3916 VM_BUG_ON(mem_cgroup_is_root(child)); 3917 3918 ret = -EBUSY; 3919 if (!get_page_unless_zero(page)) 3920 goto out; 3921 if (isolate_lru_page(page)) 3922 goto put; 3923 3924 nr_pages = hpage_nr_pages(page); 3925 3926 parent = parent_mem_cgroup(child); 3927 /* 3928 * If no parent, move charges to root cgroup. 3929 */ 3930 if (!parent) 3931 parent = root_mem_cgroup; 3932 3933 if (nr_pages > 1) { 3934 VM_BUG_ON(!PageTransHuge(page)); 3935 flags = compound_lock_irqsave(page); 3936 } 3937 3938 ret = mem_cgroup_move_account(page, nr_pages, 3939 pc, child, parent); 3940 if (!ret) 3941 __mem_cgroup_cancel_local_charge(child, nr_pages); 3942 3943 if (nr_pages > 1) 3944 compound_unlock_irqrestore(page, flags); 3945 putback_lru_page(page); 3946 put: 3947 put_page(page); 3948 out: 3949 return ret; 3950 } 3951 3952 /* 3953 * Charge the memory controller for page usage. 3954 * Return 3955 * 0 if the charge was successful 3956 * < 0 if the cgroup is over its limit 3957 */ 3958 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 3959 gfp_t gfp_mask, enum charge_type ctype) 3960 { 3961 struct mem_cgroup *memcg = NULL; 3962 unsigned int nr_pages = 1; 3963 bool oom = true; 3964 int ret; 3965 3966 if (PageTransHuge(page)) { 3967 nr_pages <<= compound_order(page); 3968 VM_BUG_ON(!PageTransHuge(page)); 3969 /* 3970 * Never OOM-kill a process for a huge page. The 3971 * fault handler will fall back to regular pages. 3972 */ 3973 oom = false; 3974 } 3975 3976 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 3977 if (ret == -ENOMEM) 3978 return ret; 3979 __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); 3980 return 0; 3981 } 3982 3983 int mem_cgroup_newpage_charge(struct page *page, 3984 struct mm_struct *mm, gfp_t gfp_mask) 3985 { 3986 if (mem_cgroup_disabled()) 3987 return 0; 3988 VM_BUG_ON(page_mapped(page)); 3989 VM_BUG_ON(page->mapping && !PageAnon(page)); 3990 VM_BUG_ON(!mm); 3991 return mem_cgroup_charge_common(page, mm, gfp_mask, 3992 MEM_CGROUP_CHARGE_TYPE_ANON); 3993 } 3994 3995 /* 3996 * While swap-in, try_charge -> commit or cancel, the page is locked. 3997 * And when try_charge() successfully returns, one refcnt to memcg without 3998 * struct page_cgroup is acquired. This refcnt will be consumed by 3999 * "commit()" or removed by "cancel()" 4000 */ 4001 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm, 4002 struct page *page, 4003 gfp_t mask, 4004 struct mem_cgroup **memcgp) 4005 { 4006 struct mem_cgroup *memcg; 4007 struct page_cgroup *pc; 4008 int ret; 4009 4010 pc = lookup_page_cgroup(page); 4011 /* 4012 * Every swap fault against a single page tries to charge the 4013 * page, bail as early as possible. shmem_unuse() encounters 4014 * already charged pages, too. The USED bit is protected by 4015 * the page lock, which serializes swap cache removal, which 4016 * in turn serializes uncharging. 4017 */ 4018 if (PageCgroupUsed(pc)) 4019 return 0; 4020 if (!do_swap_account) 4021 goto charge_cur_mm; 4022 memcg = try_get_mem_cgroup_from_page(page); 4023 if (!memcg) 4024 goto charge_cur_mm; 4025 *memcgp = memcg; 4026 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true); 4027 css_put(&memcg->css); 4028 if (ret == -EINTR) 4029 ret = 0; 4030 return ret; 4031 charge_cur_mm: 4032 ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true); 4033 if (ret == -EINTR) 4034 ret = 0; 4035 return ret; 4036 } 4037 4038 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, 4039 gfp_t gfp_mask, struct mem_cgroup **memcgp) 4040 { 4041 *memcgp = NULL; 4042 if (mem_cgroup_disabled()) 4043 return 0; 4044 /* 4045 * A racing thread's fault, or swapoff, may have already 4046 * updated the pte, and even removed page from swap cache: in 4047 * those cases unuse_pte()'s pte_same() test will fail; but 4048 * there's also a KSM case which does need to charge the page. 4049 */ 4050 if (!PageSwapCache(page)) { 4051 int ret; 4052 4053 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true); 4054 if (ret == -EINTR) 4055 ret = 0; 4056 return ret; 4057 } 4058 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp); 4059 } 4060 4061 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 4062 { 4063 if (mem_cgroup_disabled()) 4064 return; 4065 if (!memcg) 4066 return; 4067 __mem_cgroup_cancel_charge(memcg, 1); 4068 } 4069 4070 static void 4071 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, 4072 enum charge_type ctype) 4073 { 4074 if (mem_cgroup_disabled()) 4075 return; 4076 if (!memcg) 4077 return; 4078 4079 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); 4080 /* 4081 * Now swap is on-memory. This means this page may be 4082 * counted both as mem and swap....double count. 4083 * Fix it by uncharging from memsw. Basically, this SwapCache is stable 4084 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() 4085 * may call delete_from_swap_cache() before reach here. 4086 */ 4087 if (do_swap_account && PageSwapCache(page)) { 4088 swp_entry_t ent = {.val = page_private(page)}; 4089 mem_cgroup_uncharge_swap(ent); 4090 } 4091 } 4092 4093 void mem_cgroup_commit_charge_swapin(struct page *page, 4094 struct mem_cgroup *memcg) 4095 { 4096 __mem_cgroup_commit_charge_swapin(page, memcg, 4097 MEM_CGROUP_CHARGE_TYPE_ANON); 4098 } 4099 4100 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 4101 gfp_t gfp_mask) 4102 { 4103 struct mem_cgroup *memcg = NULL; 4104 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4105 int ret; 4106 4107 if (mem_cgroup_disabled()) 4108 return 0; 4109 if (PageCompound(page)) 4110 return 0; 4111 4112 if (!PageSwapCache(page)) 4113 ret = mem_cgroup_charge_common(page, mm, gfp_mask, type); 4114 else { /* page is swapcache/shmem */ 4115 ret = __mem_cgroup_try_charge_swapin(mm, page, 4116 gfp_mask, &memcg); 4117 if (!ret) 4118 __mem_cgroup_commit_charge_swapin(page, memcg, type); 4119 } 4120 return ret; 4121 } 4122 4123 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg, 4124 unsigned int nr_pages, 4125 const enum charge_type ctype) 4126 { 4127 struct memcg_batch_info *batch = NULL; 4128 bool uncharge_memsw = true; 4129 4130 /* If swapout, usage of swap doesn't decrease */ 4131 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 4132 uncharge_memsw = false; 4133 4134 batch = ¤t->memcg_batch; 4135 /* 4136 * In usual, we do css_get() when we remember memcg pointer. 4137 * But in this case, we keep res->usage until end of a series of 4138 * uncharges. Then, it's ok to ignore memcg's refcnt. 4139 */ 4140 if (!batch->memcg) 4141 batch->memcg = memcg; 4142 /* 4143 * do_batch > 0 when unmapping pages or inode invalidate/truncate. 4144 * In those cases, all pages freed continuously can be expected to be in 4145 * the same cgroup and we have chance to coalesce uncharges. 4146 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) 4147 * because we want to do uncharge as soon as possible. 4148 */ 4149 4150 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) 4151 goto direct_uncharge; 4152 4153 if (nr_pages > 1) 4154 goto direct_uncharge; 4155 4156 /* 4157 * In typical case, batch->memcg == mem. This means we can 4158 * merge a series of uncharges to an uncharge of res_counter. 4159 * If not, we uncharge res_counter ony by one. 4160 */ 4161 if (batch->memcg != memcg) 4162 goto direct_uncharge; 4163 /* remember freed charge and uncharge it later */ 4164 batch->nr_pages++; 4165 if (uncharge_memsw) 4166 batch->memsw_nr_pages++; 4167 return; 4168 direct_uncharge: 4169 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE); 4170 if (uncharge_memsw) 4171 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE); 4172 if (unlikely(batch->memcg != memcg)) 4173 memcg_oom_recover(memcg); 4174 } 4175 4176 /* 4177 * uncharge if !page_mapped(page) 4178 */ 4179 static struct mem_cgroup * 4180 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, 4181 bool end_migration) 4182 { 4183 struct mem_cgroup *memcg = NULL; 4184 unsigned int nr_pages = 1; 4185 struct page_cgroup *pc; 4186 bool anon; 4187 4188 if (mem_cgroup_disabled()) 4189 return NULL; 4190 4191 if (PageTransHuge(page)) { 4192 nr_pages <<= compound_order(page); 4193 VM_BUG_ON(!PageTransHuge(page)); 4194 } 4195 /* 4196 * Check if our page_cgroup is valid 4197 */ 4198 pc = lookup_page_cgroup(page); 4199 if (unlikely(!PageCgroupUsed(pc))) 4200 return NULL; 4201 4202 lock_page_cgroup(pc); 4203 4204 memcg = pc->mem_cgroup; 4205 4206 if (!PageCgroupUsed(pc)) 4207 goto unlock_out; 4208 4209 anon = PageAnon(page); 4210 4211 switch (ctype) { 4212 case MEM_CGROUP_CHARGE_TYPE_ANON: 4213 /* 4214 * Generally PageAnon tells if it's the anon statistics to be 4215 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is 4216 * used before page reached the stage of being marked PageAnon. 4217 */ 4218 anon = true; 4219 /* fallthrough */ 4220 case MEM_CGROUP_CHARGE_TYPE_DROP: 4221 /* See mem_cgroup_prepare_migration() */ 4222 if (page_mapped(page)) 4223 goto unlock_out; 4224 /* 4225 * Pages under migration may not be uncharged. But 4226 * end_migration() /must/ be the one uncharging the 4227 * unused post-migration page and so it has to call 4228 * here with the migration bit still set. See the 4229 * res_counter handling below. 4230 */ 4231 if (!end_migration && PageCgroupMigration(pc)) 4232 goto unlock_out; 4233 break; 4234 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 4235 if (!PageAnon(page)) { /* Shared memory */ 4236 if (page->mapping && !page_is_file_cache(page)) 4237 goto unlock_out; 4238 } else if (page_mapped(page)) /* Anon */ 4239 goto unlock_out; 4240 break; 4241 default: 4242 break; 4243 } 4244 4245 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages); 4246 4247 ClearPageCgroupUsed(pc); 4248 /* 4249 * pc->mem_cgroup is not cleared here. It will be accessed when it's 4250 * freed from LRU. This is safe because uncharged page is expected not 4251 * to be reused (freed soon). Exception is SwapCache, it's handled by 4252 * special functions. 4253 */ 4254 4255 unlock_page_cgroup(pc); 4256 /* 4257 * even after unlock, we have memcg->res.usage here and this memcg 4258 * will never be freed, so it's safe to call css_get(). 4259 */ 4260 memcg_check_events(memcg, page); 4261 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { 4262 mem_cgroup_swap_statistics(memcg, true); 4263 css_get(&memcg->css); 4264 } 4265 /* 4266 * Migration does not charge the res_counter for the 4267 * replacement page, so leave it alone when phasing out the 4268 * page that is unused after the migration. 4269 */ 4270 if (!end_migration && !mem_cgroup_is_root(memcg)) 4271 mem_cgroup_do_uncharge(memcg, nr_pages, ctype); 4272 4273 return memcg; 4274 4275 unlock_out: 4276 unlock_page_cgroup(pc); 4277 return NULL; 4278 } 4279 4280 void mem_cgroup_uncharge_page(struct page *page) 4281 { 4282 /* early check. */ 4283 if (page_mapped(page)) 4284 return; 4285 VM_BUG_ON(page->mapping && !PageAnon(page)); 4286 /* 4287 * If the page is in swap cache, uncharge should be deferred 4288 * to the swap path, which also properly accounts swap usage 4289 * and handles memcg lifetime. 4290 * 4291 * Note that this check is not stable and reclaim may add the 4292 * page to swap cache at any time after this. However, if the 4293 * page is not in swap cache by the time page->mapcount hits 4294 * 0, there won't be any page table references to the swap 4295 * slot, and reclaim will free it and not actually write the 4296 * page to disk. 4297 */ 4298 if (PageSwapCache(page)) 4299 return; 4300 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); 4301 } 4302 4303 void mem_cgroup_uncharge_cache_page(struct page *page) 4304 { 4305 VM_BUG_ON(page_mapped(page)); 4306 VM_BUG_ON(page->mapping); 4307 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4308 } 4309 4310 /* 4311 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. 4312 * In that cases, pages are freed continuously and we can expect pages 4313 * are in the same memcg. All these calls itself limits the number of 4314 * pages freed at once, then uncharge_start/end() is called properly. 4315 * This may be called prural(2) times in a context, 4316 */ 4317 4318 void mem_cgroup_uncharge_start(void) 4319 { 4320 current->memcg_batch.do_batch++; 4321 /* We can do nest. */ 4322 if (current->memcg_batch.do_batch == 1) { 4323 current->memcg_batch.memcg = NULL; 4324 current->memcg_batch.nr_pages = 0; 4325 current->memcg_batch.memsw_nr_pages = 0; 4326 } 4327 } 4328 4329 void mem_cgroup_uncharge_end(void) 4330 { 4331 struct memcg_batch_info *batch = ¤t->memcg_batch; 4332 4333 if (!batch->do_batch) 4334 return; 4335 4336 batch->do_batch--; 4337 if (batch->do_batch) /* If stacked, do nothing. */ 4338 return; 4339 4340 if (!batch->memcg) 4341 return; 4342 /* 4343 * This "batch->memcg" is valid without any css_get/put etc... 4344 * bacause we hide charges behind us. 4345 */ 4346 if (batch->nr_pages) 4347 res_counter_uncharge(&batch->memcg->res, 4348 batch->nr_pages * PAGE_SIZE); 4349 if (batch->memsw_nr_pages) 4350 res_counter_uncharge(&batch->memcg->memsw, 4351 batch->memsw_nr_pages * PAGE_SIZE); 4352 memcg_oom_recover(batch->memcg); 4353 /* forget this pointer (for sanity check) */ 4354 batch->memcg = NULL; 4355 } 4356 4357 #ifdef CONFIG_SWAP 4358 /* 4359 * called after __delete_from_swap_cache() and drop "page" account. 4360 * memcg information is recorded to swap_cgroup of "ent" 4361 */ 4362 void 4363 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 4364 { 4365 struct mem_cgroup *memcg; 4366 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; 4367 4368 if (!swapout) /* this was a swap cache but the swap is unused ! */ 4369 ctype = MEM_CGROUP_CHARGE_TYPE_DROP; 4370 4371 memcg = __mem_cgroup_uncharge_common(page, ctype, false); 4372 4373 /* 4374 * record memcg information, if swapout && memcg != NULL, 4375 * css_get() was called in uncharge(). 4376 */ 4377 if (do_swap_account && swapout && memcg) 4378 swap_cgroup_record(ent, css_id(&memcg->css)); 4379 } 4380 #endif 4381 4382 #ifdef CONFIG_MEMCG_SWAP 4383 /* 4384 * called from swap_entry_free(). remove record in swap_cgroup and 4385 * uncharge "memsw" account. 4386 */ 4387 void mem_cgroup_uncharge_swap(swp_entry_t ent) 4388 { 4389 struct mem_cgroup *memcg; 4390 unsigned short id; 4391 4392 if (!do_swap_account) 4393 return; 4394 4395 id = swap_cgroup_record(ent, 0); 4396 rcu_read_lock(); 4397 memcg = mem_cgroup_lookup(id); 4398 if (memcg) { 4399 /* 4400 * We uncharge this because swap is freed. 4401 * This memcg can be obsolete one. We avoid calling css_tryget 4402 */ 4403 if (!mem_cgroup_is_root(memcg)) 4404 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 4405 mem_cgroup_swap_statistics(memcg, false); 4406 css_put(&memcg->css); 4407 } 4408 rcu_read_unlock(); 4409 } 4410 4411 /** 4412 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 4413 * @entry: swap entry to be moved 4414 * @from: mem_cgroup which the entry is moved from 4415 * @to: mem_cgroup which the entry is moved to 4416 * 4417 * It succeeds only when the swap_cgroup's record for this entry is the same 4418 * as the mem_cgroup's id of @from. 4419 * 4420 * Returns 0 on success, -EINVAL on failure. 4421 * 4422 * The caller must have charged to @to, IOW, called res_counter_charge() about 4423 * both res and memsw, and called css_get(). 4424 */ 4425 static int mem_cgroup_move_swap_account(swp_entry_t entry, 4426 struct mem_cgroup *from, struct mem_cgroup *to) 4427 { 4428 unsigned short old_id, new_id; 4429 4430 old_id = css_id(&from->css); 4431 new_id = css_id(&to->css); 4432 4433 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 4434 mem_cgroup_swap_statistics(from, false); 4435 mem_cgroup_swap_statistics(to, true); 4436 /* 4437 * This function is only called from task migration context now. 4438 * It postpones res_counter and refcount handling till the end 4439 * of task migration(mem_cgroup_clear_mc()) for performance 4440 * improvement. But we cannot postpone css_get(to) because if 4441 * the process that has been moved to @to does swap-in, the 4442 * refcount of @to might be decreased to 0. 4443 * 4444 * We are in attach() phase, so the cgroup is guaranteed to be 4445 * alive, so we can just call css_get(). 4446 */ 4447 css_get(&to->css); 4448 return 0; 4449 } 4450 return -EINVAL; 4451 } 4452 #else 4453 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 4454 struct mem_cgroup *from, struct mem_cgroup *to) 4455 { 4456 return -EINVAL; 4457 } 4458 #endif 4459 4460 /* 4461 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 4462 * page belongs to. 4463 */ 4464 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 4465 struct mem_cgroup **memcgp) 4466 { 4467 struct mem_cgroup *memcg = NULL; 4468 unsigned int nr_pages = 1; 4469 struct page_cgroup *pc; 4470 enum charge_type ctype; 4471 4472 *memcgp = NULL; 4473 4474 if (mem_cgroup_disabled()) 4475 return; 4476 4477 if (PageTransHuge(page)) 4478 nr_pages <<= compound_order(page); 4479 4480 pc = lookup_page_cgroup(page); 4481 lock_page_cgroup(pc); 4482 if (PageCgroupUsed(pc)) { 4483 memcg = pc->mem_cgroup; 4484 css_get(&memcg->css); 4485 /* 4486 * At migrating an anonymous page, its mapcount goes down 4487 * to 0 and uncharge() will be called. But, even if it's fully 4488 * unmapped, migration may fail and this page has to be 4489 * charged again. We set MIGRATION flag here and delay uncharge 4490 * until end_migration() is called 4491 * 4492 * Corner Case Thinking 4493 * A) 4494 * When the old page was mapped as Anon and it's unmap-and-freed 4495 * while migration was ongoing. 4496 * If unmap finds the old page, uncharge() of it will be delayed 4497 * until end_migration(). If unmap finds a new page, it's 4498 * uncharged when it make mapcount to be 1->0. If unmap code 4499 * finds swap_migration_entry, the new page will not be mapped 4500 * and end_migration() will find it(mapcount==0). 4501 * 4502 * B) 4503 * When the old page was mapped but migraion fails, the kernel 4504 * remaps it. A charge for it is kept by MIGRATION flag even 4505 * if mapcount goes down to 0. We can do remap successfully 4506 * without charging it again. 4507 * 4508 * C) 4509 * The "old" page is under lock_page() until the end of 4510 * migration, so, the old page itself will not be swapped-out. 4511 * If the new page is swapped out before end_migraton, our 4512 * hook to usual swap-out path will catch the event. 4513 */ 4514 if (PageAnon(page)) 4515 SetPageCgroupMigration(pc); 4516 } 4517 unlock_page_cgroup(pc); 4518 /* 4519 * If the page is not charged at this point, 4520 * we return here. 4521 */ 4522 if (!memcg) 4523 return; 4524 4525 *memcgp = memcg; 4526 /* 4527 * We charge new page before it's used/mapped. So, even if unlock_page() 4528 * is called before end_migration, we can catch all events on this new 4529 * page. In the case new page is migrated but not remapped, new page's 4530 * mapcount will be finally 0 and we call uncharge in end_migration(). 4531 */ 4532 if (PageAnon(page)) 4533 ctype = MEM_CGROUP_CHARGE_TYPE_ANON; 4534 else 4535 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 4536 /* 4537 * The page is committed to the memcg, but it's not actually 4538 * charged to the res_counter since we plan on replacing the 4539 * old one and only one page is going to be left afterwards. 4540 */ 4541 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false); 4542 } 4543 4544 /* remove redundant charge if migration failed*/ 4545 void mem_cgroup_end_migration(struct mem_cgroup *memcg, 4546 struct page *oldpage, struct page *newpage, bool migration_ok) 4547 { 4548 struct page *used, *unused; 4549 struct page_cgroup *pc; 4550 bool anon; 4551 4552 if (!memcg) 4553 return; 4554 4555 if (!migration_ok) { 4556 used = oldpage; 4557 unused = newpage; 4558 } else { 4559 used = newpage; 4560 unused = oldpage; 4561 } 4562 anon = PageAnon(used); 4563 __mem_cgroup_uncharge_common(unused, 4564 anon ? MEM_CGROUP_CHARGE_TYPE_ANON 4565 : MEM_CGROUP_CHARGE_TYPE_CACHE, 4566 true); 4567 css_put(&memcg->css); 4568 /* 4569 * We disallowed uncharge of pages under migration because mapcount 4570 * of the page goes down to zero, temporarly. 4571 * Clear the flag and check the page should be charged. 4572 */ 4573 pc = lookup_page_cgroup(oldpage); 4574 lock_page_cgroup(pc); 4575 ClearPageCgroupMigration(pc); 4576 unlock_page_cgroup(pc); 4577 4578 /* 4579 * If a page is a file cache, radix-tree replacement is very atomic 4580 * and we can skip this check. When it was an Anon page, its mapcount 4581 * goes down to 0. But because we added MIGRATION flage, it's not 4582 * uncharged yet. There are several case but page->mapcount check 4583 * and USED bit check in mem_cgroup_uncharge_page() will do enough 4584 * check. (see prepare_charge() also) 4585 */ 4586 if (anon) 4587 mem_cgroup_uncharge_page(used); 4588 } 4589 4590 /* 4591 * At replace page cache, newpage is not under any memcg but it's on 4592 * LRU. So, this function doesn't touch res_counter but handles LRU 4593 * in correct way. Both pages are locked so we cannot race with uncharge. 4594 */ 4595 void mem_cgroup_replace_page_cache(struct page *oldpage, 4596 struct page *newpage) 4597 { 4598 struct mem_cgroup *memcg = NULL; 4599 struct page_cgroup *pc; 4600 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 4601 4602 if (mem_cgroup_disabled()) 4603 return; 4604 4605 pc = lookup_page_cgroup(oldpage); 4606 /* fix accounting on old pages */ 4607 lock_page_cgroup(pc); 4608 if (PageCgroupUsed(pc)) { 4609 memcg = pc->mem_cgroup; 4610 mem_cgroup_charge_statistics(memcg, oldpage, false, -1); 4611 ClearPageCgroupUsed(pc); 4612 } 4613 unlock_page_cgroup(pc); 4614 4615 /* 4616 * When called from shmem_replace_page(), in some cases the 4617 * oldpage has already been charged, and in some cases not. 4618 */ 4619 if (!memcg) 4620 return; 4621 /* 4622 * Even if newpage->mapping was NULL before starting replacement, 4623 * the newpage may be on LRU(or pagevec for LRU) already. We lock 4624 * LRU while we overwrite pc->mem_cgroup. 4625 */ 4626 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); 4627 } 4628 4629 #ifdef CONFIG_DEBUG_VM 4630 static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 4631 { 4632 struct page_cgroup *pc; 4633 4634 pc = lookup_page_cgroup(page); 4635 /* 4636 * Can be NULL while feeding pages into the page allocator for 4637 * the first time, i.e. during boot or memory hotplug; 4638 * or when mem_cgroup_disabled(). 4639 */ 4640 if (likely(pc) && PageCgroupUsed(pc)) 4641 return pc; 4642 return NULL; 4643 } 4644 4645 bool mem_cgroup_bad_page_check(struct page *page) 4646 { 4647 if (mem_cgroup_disabled()) 4648 return false; 4649 4650 return lookup_page_cgroup_used(page) != NULL; 4651 } 4652 4653 void mem_cgroup_print_bad_page(struct page *page) 4654 { 4655 struct page_cgroup *pc; 4656 4657 pc = lookup_page_cgroup_used(page); 4658 if (pc) { 4659 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n", 4660 pc, pc->flags, pc->mem_cgroup); 4661 } 4662 } 4663 #endif 4664 4665 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 4666 unsigned long long val) 4667 { 4668 int retry_count; 4669 u64 memswlimit, memlimit; 4670 int ret = 0; 4671 int children = mem_cgroup_count_children(memcg); 4672 u64 curusage, oldusage; 4673 int enlarge; 4674 4675 /* 4676 * For keeping hierarchical_reclaim simple, how long we should retry 4677 * is depends on callers. We set our retry-count to be function 4678 * of # of children which we should visit in this loop. 4679 */ 4680 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; 4681 4682 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4683 4684 enlarge = 0; 4685 while (retry_count) { 4686 if (signal_pending(current)) { 4687 ret = -EINTR; 4688 break; 4689 } 4690 /* 4691 * Rather than hide all in some function, I do this in 4692 * open coded manner. You see what this really does. 4693 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4694 */ 4695 mutex_lock(&set_limit_mutex); 4696 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4697 if (memswlimit < val) { 4698 ret = -EINVAL; 4699 mutex_unlock(&set_limit_mutex); 4700 break; 4701 } 4702 4703 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4704 if (memlimit < val) 4705 enlarge = 1; 4706 4707 ret = res_counter_set_limit(&memcg->res, val); 4708 if (!ret) { 4709 if (memswlimit == val) 4710 memcg->memsw_is_minimum = true; 4711 else 4712 memcg->memsw_is_minimum = false; 4713 } 4714 mutex_unlock(&set_limit_mutex); 4715 4716 if (!ret) 4717 break; 4718 4719 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4720 MEM_CGROUP_RECLAIM_SHRINK); 4721 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 4722 /* Usage is reduced ? */ 4723 if (curusage >= oldusage) 4724 retry_count--; 4725 else 4726 oldusage = curusage; 4727 } 4728 if (!ret && enlarge) 4729 memcg_oom_recover(memcg); 4730 4731 return ret; 4732 } 4733 4734 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 4735 unsigned long long val) 4736 { 4737 int retry_count; 4738 u64 memlimit, memswlimit, oldusage, curusage; 4739 int children = mem_cgroup_count_children(memcg); 4740 int ret = -EBUSY; 4741 int enlarge = 0; 4742 4743 /* see mem_cgroup_resize_res_limit */ 4744 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 4745 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4746 while (retry_count) { 4747 if (signal_pending(current)) { 4748 ret = -EINTR; 4749 break; 4750 } 4751 /* 4752 * Rather than hide all in some function, I do this in 4753 * open coded manner. You see what this really does. 4754 * We have to guarantee memcg->res.limit <= memcg->memsw.limit. 4755 */ 4756 mutex_lock(&set_limit_mutex); 4757 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); 4758 if (memlimit > val) { 4759 ret = -EINVAL; 4760 mutex_unlock(&set_limit_mutex); 4761 break; 4762 } 4763 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 4764 if (memswlimit < val) 4765 enlarge = 1; 4766 ret = res_counter_set_limit(&memcg->memsw, val); 4767 if (!ret) { 4768 if (memlimit == val) 4769 memcg->memsw_is_minimum = true; 4770 else 4771 memcg->memsw_is_minimum = false; 4772 } 4773 mutex_unlock(&set_limit_mutex); 4774 4775 if (!ret) 4776 break; 4777 4778 mem_cgroup_reclaim(memcg, GFP_KERNEL, 4779 MEM_CGROUP_RECLAIM_NOSWAP | 4780 MEM_CGROUP_RECLAIM_SHRINK); 4781 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 4782 /* Usage is reduced ? */ 4783 if (curusage >= oldusage) 4784 retry_count--; 4785 else 4786 oldusage = curusage; 4787 } 4788 if (!ret && enlarge) 4789 memcg_oom_recover(memcg); 4790 return ret; 4791 } 4792 4793 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 4794 gfp_t gfp_mask, 4795 unsigned long *total_scanned) 4796 { 4797 unsigned long nr_reclaimed = 0; 4798 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 4799 unsigned long reclaimed; 4800 int loop = 0; 4801 struct mem_cgroup_tree_per_zone *mctz; 4802 unsigned long long excess; 4803 unsigned long nr_scanned; 4804 4805 if (order > 0) 4806 return 0; 4807 4808 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 4809 /* 4810 * This loop can run a while, specially if mem_cgroup's continuously 4811 * keep exceeding their soft limit and putting the system under 4812 * pressure 4813 */ 4814 do { 4815 if (next_mz) 4816 mz = next_mz; 4817 else 4818 mz = mem_cgroup_largest_soft_limit_node(mctz); 4819 if (!mz) 4820 break; 4821 4822 nr_scanned = 0; 4823 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 4824 gfp_mask, &nr_scanned); 4825 nr_reclaimed += reclaimed; 4826 *total_scanned += nr_scanned; 4827 spin_lock(&mctz->lock); 4828 4829 /* 4830 * If we failed to reclaim anything from this memory cgroup 4831 * it is time to move on to the next cgroup 4832 */ 4833 next_mz = NULL; 4834 if (!reclaimed) { 4835 do { 4836 /* 4837 * Loop until we find yet another one. 4838 * 4839 * By the time we get the soft_limit lock 4840 * again, someone might have aded the 4841 * group back on the RB tree. Iterate to 4842 * make sure we get a different mem. 4843 * mem_cgroup_largest_soft_limit_node returns 4844 * NULL if no other cgroup is present on 4845 * the tree 4846 */ 4847 next_mz = 4848 __mem_cgroup_largest_soft_limit_node(mctz); 4849 if (next_mz == mz) 4850 css_put(&next_mz->memcg->css); 4851 else /* next_mz == NULL or other memcg */ 4852 break; 4853 } while (1); 4854 } 4855 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 4856 excess = res_counter_soft_limit_excess(&mz->memcg->res); 4857 /* 4858 * One school of thought says that we should not add 4859 * back the node to the tree if reclaim returns 0. 4860 * But our reclaim could return 0, simply because due 4861 * to priority we are exposing a smaller subset of 4862 * memory to reclaim from. Consider this as a longer 4863 * term TODO. 4864 */ 4865 /* If excess == 0, no tree ops */ 4866 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess); 4867 spin_unlock(&mctz->lock); 4868 css_put(&mz->memcg->css); 4869 loop++; 4870 /* 4871 * Could not reclaim anything and there are no more 4872 * mem cgroups to try or we seem to be looping without 4873 * reclaiming anything. 4874 */ 4875 if (!nr_reclaimed && 4876 (next_mz == NULL || 4877 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 4878 break; 4879 } while (!nr_reclaimed); 4880 if (next_mz) 4881 css_put(&next_mz->memcg->css); 4882 return nr_reclaimed; 4883 } 4884 4885 /** 4886 * mem_cgroup_force_empty_list - clears LRU of a group 4887 * @memcg: group to clear 4888 * @node: NUMA node 4889 * @zid: zone id 4890 * @lru: lru to to clear 4891 * 4892 * Traverse a specified page_cgroup list and try to drop them all. This doesn't 4893 * reclaim the pages page themselves - pages are moved to the parent (or root) 4894 * group. 4895 */ 4896 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 4897 int node, int zid, enum lru_list lru) 4898 { 4899 struct lruvec *lruvec; 4900 unsigned long flags; 4901 struct list_head *list; 4902 struct page *busy; 4903 struct zone *zone; 4904 4905 zone = &NODE_DATA(node)->node_zones[zid]; 4906 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 4907 list = &lruvec->lists[lru]; 4908 4909 busy = NULL; 4910 do { 4911 struct page_cgroup *pc; 4912 struct page *page; 4913 4914 spin_lock_irqsave(&zone->lru_lock, flags); 4915 if (list_empty(list)) { 4916 spin_unlock_irqrestore(&zone->lru_lock, flags); 4917 break; 4918 } 4919 page = list_entry(list->prev, struct page, lru); 4920 if (busy == page) { 4921 list_move(&page->lru, list); 4922 busy = NULL; 4923 spin_unlock_irqrestore(&zone->lru_lock, flags); 4924 continue; 4925 } 4926 spin_unlock_irqrestore(&zone->lru_lock, flags); 4927 4928 pc = lookup_page_cgroup(page); 4929 4930 if (mem_cgroup_move_parent(page, pc, memcg)) { 4931 /* found lock contention or "pc" is obsolete. */ 4932 busy = page; 4933 cond_resched(); 4934 } else 4935 busy = NULL; 4936 } while (!list_empty(list)); 4937 } 4938 4939 /* 4940 * make mem_cgroup's charge to be 0 if there is no task by moving 4941 * all the charges and pages to the parent. 4942 * This enables deleting this mem_cgroup. 4943 * 4944 * Caller is responsible for holding css reference on the memcg. 4945 */ 4946 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) 4947 { 4948 int node, zid; 4949 u64 usage; 4950 4951 do { 4952 /* This is for making all *used* pages to be on LRU. */ 4953 lru_add_drain_all(); 4954 drain_all_stock_sync(memcg); 4955 mem_cgroup_start_move(memcg); 4956 for_each_node_state(node, N_MEMORY) { 4957 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4958 enum lru_list lru; 4959 for_each_lru(lru) { 4960 mem_cgroup_force_empty_list(memcg, 4961 node, zid, lru); 4962 } 4963 } 4964 } 4965 mem_cgroup_end_move(memcg); 4966 memcg_oom_recover(memcg); 4967 cond_resched(); 4968 4969 /* 4970 * Kernel memory may not necessarily be trackable to a specific 4971 * process. So they are not migrated, and therefore we can't 4972 * expect their value to drop to 0 here. 4973 * Having res filled up with kmem only is enough. 4974 * 4975 * This is a safety check because mem_cgroup_force_empty_list 4976 * could have raced with mem_cgroup_replace_page_cache callers 4977 * so the lru seemed empty but the page could have been added 4978 * right after the check. RES_USAGE should be safe as we always 4979 * charge before adding to the LRU. 4980 */ 4981 usage = res_counter_read_u64(&memcg->res, RES_USAGE) - 4982 res_counter_read_u64(&memcg->kmem, RES_USAGE); 4983 } while (usage > 0); 4984 } 4985 4986 /* 4987 * This mainly exists for tests during the setting of set of use_hierarchy. 4988 * Since this is the very setting we are changing, the current hierarchy value 4989 * is meaningless 4990 */ 4991 static inline bool __memcg_has_children(struct mem_cgroup *memcg) 4992 { 4993 struct cgroup_subsys_state *pos; 4994 4995 /* bounce at first found */ 4996 css_for_each_child(pos, &memcg->css) 4997 return true; 4998 return false; 4999 } 5000 5001 /* 5002 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed 5003 * to be already dead (as in mem_cgroup_force_empty, for instance). This is 5004 * from mem_cgroup_count_children(), in the sense that we don't really care how 5005 * many children we have; we only need to know if we have any. It also counts 5006 * any memcg without hierarchy as infertile. 5007 */ 5008 static inline bool memcg_has_children(struct mem_cgroup *memcg) 5009 { 5010 return memcg->use_hierarchy && __memcg_has_children(memcg); 5011 } 5012 5013 /* 5014 * Reclaims as many pages from the given memcg as possible and moves 5015 * the rest to the parent. 5016 * 5017 * Caller is responsible for holding css reference for memcg. 5018 */ 5019 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 5020 { 5021 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 5022 struct cgroup *cgrp = memcg->css.cgroup; 5023 5024 /* returns EBUSY if there is a task or if we come here twice. */ 5025 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) 5026 return -EBUSY; 5027 5028 /* we call try-to-free pages for make this cgroup empty */ 5029 lru_add_drain_all(); 5030 /* try to free all pages in this cgroup */ 5031 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { 5032 int progress; 5033 5034 if (signal_pending(current)) 5035 return -EINTR; 5036 5037 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, 5038 false); 5039 if (!progress) { 5040 nr_retries--; 5041 /* maybe some writeback is necessary */ 5042 congestion_wait(BLK_RW_ASYNC, HZ/10); 5043 } 5044 5045 } 5046 lru_add_drain(); 5047 mem_cgroup_reparent_charges(memcg); 5048 5049 return 0; 5050 } 5051 5052 static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css, 5053 unsigned int event) 5054 { 5055 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5056 5057 if (mem_cgroup_is_root(memcg)) 5058 return -EINVAL; 5059 return mem_cgroup_force_empty(memcg); 5060 } 5061 5062 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 5063 struct cftype *cft) 5064 { 5065 return mem_cgroup_from_css(css)->use_hierarchy; 5066 } 5067 5068 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 5069 struct cftype *cft, u64 val) 5070 { 5071 int retval = 0; 5072 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5073 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css)); 5074 5075 mutex_lock(&memcg_create_mutex); 5076 5077 if (memcg->use_hierarchy == val) 5078 goto out; 5079 5080 /* 5081 * If parent's use_hierarchy is set, we can't make any modifications 5082 * in the child subtrees. If it is unset, then the change can 5083 * occur, provided the current cgroup has no children. 5084 * 5085 * For the root cgroup, parent_mem is NULL, we allow value to be 5086 * set if there are no children. 5087 */ 5088 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 5089 (val == 1 || val == 0)) { 5090 if (!__memcg_has_children(memcg)) 5091 memcg->use_hierarchy = val; 5092 else 5093 retval = -EBUSY; 5094 } else 5095 retval = -EINVAL; 5096 5097 out: 5098 mutex_unlock(&memcg_create_mutex); 5099 5100 return retval; 5101 } 5102 5103 5104 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, 5105 enum mem_cgroup_stat_index idx) 5106 { 5107 struct mem_cgroup *iter; 5108 long val = 0; 5109 5110 /* Per-cpu values can be negative, use a signed accumulator */ 5111 for_each_mem_cgroup_tree(iter, memcg) 5112 val += mem_cgroup_read_stat(iter, idx); 5113 5114 if (val < 0) /* race ? */ 5115 val = 0; 5116 return val; 5117 } 5118 5119 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 5120 { 5121 u64 val; 5122 5123 if (!mem_cgroup_is_root(memcg)) { 5124 if (!swap) 5125 return res_counter_read_u64(&memcg->res, RES_USAGE); 5126 else 5127 return res_counter_read_u64(&memcg->memsw, RES_USAGE); 5128 } 5129 5130 /* 5131 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS 5132 * as well as in MEM_CGROUP_STAT_RSS_HUGE. 5133 */ 5134 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); 5135 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); 5136 5137 if (swap) 5138 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); 5139 5140 return val << PAGE_SHIFT; 5141 } 5142 5143 static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css, 5144 struct cftype *cft, struct file *file, 5145 char __user *buf, size_t nbytes, loff_t *ppos) 5146 { 5147 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5148 char str[64]; 5149 u64 val; 5150 int name, len; 5151 enum res_type type; 5152 5153 type = MEMFILE_TYPE(cft->private); 5154 name = MEMFILE_ATTR(cft->private); 5155 5156 switch (type) { 5157 case _MEM: 5158 if (name == RES_USAGE) 5159 val = mem_cgroup_usage(memcg, false); 5160 else 5161 val = res_counter_read_u64(&memcg->res, name); 5162 break; 5163 case _MEMSWAP: 5164 if (name == RES_USAGE) 5165 val = mem_cgroup_usage(memcg, true); 5166 else 5167 val = res_counter_read_u64(&memcg->memsw, name); 5168 break; 5169 case _KMEM: 5170 val = res_counter_read_u64(&memcg->kmem, name); 5171 break; 5172 default: 5173 BUG(); 5174 } 5175 5176 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val); 5177 return simple_read_from_buffer(buf, nbytes, ppos, str, len); 5178 } 5179 5180 static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val) 5181 { 5182 int ret = -EINVAL; 5183 #ifdef CONFIG_MEMCG_KMEM 5184 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5185 /* 5186 * For simplicity, we won't allow this to be disabled. It also can't 5187 * be changed if the cgroup has children already, or if tasks had 5188 * already joined. 5189 * 5190 * If tasks join before we set the limit, a person looking at 5191 * kmem.usage_in_bytes will have no way to determine when it took 5192 * place, which makes the value quite meaningless. 5193 * 5194 * After it first became limited, changes in the value of the limit are 5195 * of course permitted. 5196 */ 5197 mutex_lock(&memcg_create_mutex); 5198 mutex_lock(&set_limit_mutex); 5199 if (!memcg->kmem_account_flags && val != RES_COUNTER_MAX) { 5200 if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) { 5201 ret = -EBUSY; 5202 goto out; 5203 } 5204 ret = res_counter_set_limit(&memcg->kmem, val); 5205 VM_BUG_ON(ret); 5206 5207 ret = memcg_update_cache_sizes(memcg); 5208 if (ret) { 5209 res_counter_set_limit(&memcg->kmem, RES_COUNTER_MAX); 5210 goto out; 5211 } 5212 static_key_slow_inc(&memcg_kmem_enabled_key); 5213 /* 5214 * setting the active bit after the inc will guarantee no one 5215 * starts accounting before all call sites are patched 5216 */ 5217 memcg_kmem_set_active(memcg); 5218 } else 5219 ret = res_counter_set_limit(&memcg->kmem, val); 5220 out: 5221 mutex_unlock(&set_limit_mutex); 5222 mutex_unlock(&memcg_create_mutex); 5223 #endif 5224 return ret; 5225 } 5226 5227 #ifdef CONFIG_MEMCG_KMEM 5228 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 5229 { 5230 int ret = 0; 5231 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5232 if (!parent) 5233 goto out; 5234 5235 memcg->kmem_account_flags = parent->kmem_account_flags; 5236 /* 5237 * When that happen, we need to disable the static branch only on those 5238 * memcgs that enabled it. To achieve this, we would be forced to 5239 * complicate the code by keeping track of which memcgs were the ones 5240 * that actually enabled limits, and which ones got it from its 5241 * parents. 5242 * 5243 * It is a lot simpler just to do static_key_slow_inc() on every child 5244 * that is accounted. 5245 */ 5246 if (!memcg_kmem_is_active(memcg)) 5247 goto out; 5248 5249 /* 5250 * __mem_cgroup_free() will issue static_key_slow_dec() because this 5251 * memcg is active already. If the later initialization fails then the 5252 * cgroup core triggers the cleanup so we do not have to do it here. 5253 */ 5254 static_key_slow_inc(&memcg_kmem_enabled_key); 5255 5256 mutex_lock(&set_limit_mutex); 5257 memcg_stop_kmem_account(); 5258 ret = memcg_update_cache_sizes(memcg); 5259 memcg_resume_kmem_account(); 5260 mutex_unlock(&set_limit_mutex); 5261 out: 5262 return ret; 5263 } 5264 #endif /* CONFIG_MEMCG_KMEM */ 5265 5266 /* 5267 * The user of this function is... 5268 * RES_LIMIT. 5269 */ 5270 static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, 5271 const char *buffer) 5272 { 5273 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5274 enum res_type type; 5275 int name; 5276 unsigned long long val; 5277 int ret; 5278 5279 type = MEMFILE_TYPE(cft->private); 5280 name = MEMFILE_ATTR(cft->private); 5281 5282 switch (name) { 5283 case RES_LIMIT: 5284 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 5285 ret = -EINVAL; 5286 break; 5287 } 5288 /* This function does all necessary parse...reuse it */ 5289 ret = res_counter_memparse_write_strategy(buffer, &val); 5290 if (ret) 5291 break; 5292 if (type == _MEM) 5293 ret = mem_cgroup_resize_limit(memcg, val); 5294 else if (type == _MEMSWAP) 5295 ret = mem_cgroup_resize_memsw_limit(memcg, val); 5296 else if (type == _KMEM) 5297 ret = memcg_update_kmem_limit(css, val); 5298 else 5299 return -EINVAL; 5300 break; 5301 case RES_SOFT_LIMIT: 5302 ret = res_counter_memparse_write_strategy(buffer, &val); 5303 if (ret) 5304 break; 5305 /* 5306 * For memsw, soft limits are hard to implement in terms 5307 * of semantics, for now, we support soft limits for 5308 * control without swap 5309 */ 5310 if (type == _MEM) 5311 ret = res_counter_set_soft_limit(&memcg->res, val); 5312 else 5313 ret = -EINVAL; 5314 break; 5315 default: 5316 ret = -EINVAL; /* should be BUG() ? */ 5317 break; 5318 } 5319 return ret; 5320 } 5321 5322 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, 5323 unsigned long long *mem_limit, unsigned long long *memsw_limit) 5324 { 5325 unsigned long long min_limit, min_memsw_limit, tmp; 5326 5327 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); 5328 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5329 if (!memcg->use_hierarchy) 5330 goto out; 5331 5332 while (css_parent(&memcg->css)) { 5333 memcg = mem_cgroup_from_css(css_parent(&memcg->css)); 5334 if (!memcg->use_hierarchy) 5335 break; 5336 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); 5337 min_limit = min(min_limit, tmp); 5338 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); 5339 min_memsw_limit = min(min_memsw_limit, tmp); 5340 } 5341 out: 5342 *mem_limit = min_limit; 5343 *memsw_limit = min_memsw_limit; 5344 } 5345 5346 static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event) 5347 { 5348 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5349 int name; 5350 enum res_type type; 5351 5352 type = MEMFILE_TYPE(event); 5353 name = MEMFILE_ATTR(event); 5354 5355 switch (name) { 5356 case RES_MAX_USAGE: 5357 if (type == _MEM) 5358 res_counter_reset_max(&memcg->res); 5359 else if (type == _MEMSWAP) 5360 res_counter_reset_max(&memcg->memsw); 5361 else if (type == _KMEM) 5362 res_counter_reset_max(&memcg->kmem); 5363 else 5364 return -EINVAL; 5365 break; 5366 case RES_FAILCNT: 5367 if (type == _MEM) 5368 res_counter_reset_failcnt(&memcg->res); 5369 else if (type == _MEMSWAP) 5370 res_counter_reset_failcnt(&memcg->memsw); 5371 else if (type == _KMEM) 5372 res_counter_reset_failcnt(&memcg->kmem); 5373 else 5374 return -EINVAL; 5375 break; 5376 } 5377 5378 return 0; 5379 } 5380 5381 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 5382 struct cftype *cft) 5383 { 5384 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 5385 } 5386 5387 #ifdef CONFIG_MMU 5388 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 5389 struct cftype *cft, u64 val) 5390 { 5391 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5392 5393 if (val >= (1 << NR_MOVE_TYPE)) 5394 return -EINVAL; 5395 5396 /* 5397 * No kind of locking is needed in here, because ->can_attach() will 5398 * check this value once in the beginning of the process, and then carry 5399 * on with stale data. This means that changes to this value will only 5400 * affect task migrations starting after the change. 5401 */ 5402 memcg->move_charge_at_immigrate = val; 5403 return 0; 5404 } 5405 #else 5406 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 5407 struct cftype *cft, u64 val) 5408 { 5409 return -ENOSYS; 5410 } 5411 #endif 5412 5413 #ifdef CONFIG_NUMA 5414 static int memcg_numa_stat_show(struct cgroup_subsys_state *css, 5415 struct cftype *cft, struct seq_file *m) 5416 { 5417 int nid; 5418 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 5419 unsigned long node_nr; 5420 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5421 5422 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL); 5423 seq_printf(m, "total=%lu", total_nr); 5424 for_each_node_state(nid, N_MEMORY) { 5425 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL); 5426 seq_printf(m, " N%d=%lu", nid, node_nr); 5427 } 5428 seq_putc(m, '\n'); 5429 5430 file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE); 5431 seq_printf(m, "file=%lu", file_nr); 5432 for_each_node_state(nid, N_MEMORY) { 5433 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5434 LRU_ALL_FILE); 5435 seq_printf(m, " N%d=%lu", nid, node_nr); 5436 } 5437 seq_putc(m, '\n'); 5438 5439 anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON); 5440 seq_printf(m, "anon=%lu", anon_nr); 5441 for_each_node_state(nid, N_MEMORY) { 5442 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5443 LRU_ALL_ANON); 5444 seq_printf(m, " N%d=%lu", nid, node_nr); 5445 } 5446 seq_putc(m, '\n'); 5447 5448 unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE)); 5449 seq_printf(m, "unevictable=%lu", unevictable_nr); 5450 for_each_node_state(nid, N_MEMORY) { 5451 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5452 BIT(LRU_UNEVICTABLE)); 5453 seq_printf(m, " N%d=%lu", nid, node_nr); 5454 } 5455 seq_putc(m, '\n'); 5456 return 0; 5457 } 5458 #endif /* CONFIG_NUMA */ 5459 5460 static inline void mem_cgroup_lru_names_not_uptodate(void) 5461 { 5462 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 5463 } 5464 5465 static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft, 5466 struct seq_file *m) 5467 { 5468 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5469 struct mem_cgroup *mi; 5470 unsigned int i; 5471 5472 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5473 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5474 continue; 5475 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 5476 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 5477 } 5478 5479 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 5480 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 5481 mem_cgroup_read_events(memcg, i)); 5482 5483 for (i = 0; i < NR_LRU_LISTS; i++) 5484 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 5485 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 5486 5487 /* Hierarchical information */ 5488 { 5489 unsigned long long limit, memsw_limit; 5490 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); 5491 seq_printf(m, "hierarchical_memory_limit %llu\n", limit); 5492 if (do_swap_account) 5493 seq_printf(m, "hierarchical_memsw_limit %llu\n", 5494 memsw_limit); 5495 } 5496 5497 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 5498 long long val = 0; 5499 5500 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 5501 continue; 5502 for_each_mem_cgroup_tree(mi, memcg) 5503 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 5504 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 5505 } 5506 5507 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 5508 unsigned long long val = 0; 5509 5510 for_each_mem_cgroup_tree(mi, memcg) 5511 val += mem_cgroup_read_events(mi, i); 5512 seq_printf(m, "total_%s %llu\n", 5513 mem_cgroup_events_names[i], val); 5514 } 5515 5516 for (i = 0; i < NR_LRU_LISTS; i++) { 5517 unsigned long long val = 0; 5518 5519 for_each_mem_cgroup_tree(mi, memcg) 5520 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 5521 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 5522 } 5523 5524 #ifdef CONFIG_DEBUG_VM 5525 { 5526 int nid, zid; 5527 struct mem_cgroup_per_zone *mz; 5528 struct zone_reclaim_stat *rstat; 5529 unsigned long recent_rotated[2] = {0, 0}; 5530 unsigned long recent_scanned[2] = {0, 0}; 5531 5532 for_each_online_node(nid) 5533 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 5534 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 5535 rstat = &mz->lruvec.reclaim_stat; 5536 5537 recent_rotated[0] += rstat->recent_rotated[0]; 5538 recent_rotated[1] += rstat->recent_rotated[1]; 5539 recent_scanned[0] += rstat->recent_scanned[0]; 5540 recent_scanned[1] += rstat->recent_scanned[1]; 5541 } 5542 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 5543 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 5544 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 5545 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 5546 } 5547 #endif 5548 5549 return 0; 5550 } 5551 5552 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 5553 struct cftype *cft) 5554 { 5555 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5556 5557 return mem_cgroup_swappiness(memcg); 5558 } 5559 5560 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 5561 struct cftype *cft, u64 val) 5562 { 5563 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5564 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css)); 5565 5566 if (val > 100 || !parent) 5567 return -EINVAL; 5568 5569 mutex_lock(&memcg_create_mutex); 5570 5571 /* If under hierarchy, only empty-root can set this value */ 5572 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5573 mutex_unlock(&memcg_create_mutex); 5574 return -EINVAL; 5575 } 5576 5577 memcg->swappiness = val; 5578 5579 mutex_unlock(&memcg_create_mutex); 5580 5581 return 0; 5582 } 5583 5584 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 5585 { 5586 struct mem_cgroup_threshold_ary *t; 5587 u64 usage; 5588 int i; 5589 5590 rcu_read_lock(); 5591 if (!swap) 5592 t = rcu_dereference(memcg->thresholds.primary); 5593 else 5594 t = rcu_dereference(memcg->memsw_thresholds.primary); 5595 5596 if (!t) 5597 goto unlock; 5598 5599 usage = mem_cgroup_usage(memcg, swap); 5600 5601 /* 5602 * current_threshold points to threshold just below or equal to usage. 5603 * If it's not true, a threshold was crossed after last 5604 * call of __mem_cgroup_threshold(). 5605 */ 5606 i = t->current_threshold; 5607 5608 /* 5609 * Iterate backward over array of thresholds starting from 5610 * current_threshold and check if a threshold is crossed. 5611 * If none of thresholds below usage is crossed, we read 5612 * only one element of the array here. 5613 */ 5614 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 5615 eventfd_signal(t->entries[i].eventfd, 1); 5616 5617 /* i = current_threshold + 1 */ 5618 i++; 5619 5620 /* 5621 * Iterate forward over array of thresholds starting from 5622 * current_threshold+1 and check if a threshold is crossed. 5623 * If none of thresholds above usage is crossed, we read 5624 * only one element of the array here. 5625 */ 5626 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 5627 eventfd_signal(t->entries[i].eventfd, 1); 5628 5629 /* Update current_threshold */ 5630 t->current_threshold = i - 1; 5631 unlock: 5632 rcu_read_unlock(); 5633 } 5634 5635 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 5636 { 5637 while (memcg) { 5638 __mem_cgroup_threshold(memcg, false); 5639 if (do_swap_account) 5640 __mem_cgroup_threshold(memcg, true); 5641 5642 memcg = parent_mem_cgroup(memcg); 5643 } 5644 } 5645 5646 static int compare_thresholds(const void *a, const void *b) 5647 { 5648 const struct mem_cgroup_threshold *_a = a; 5649 const struct mem_cgroup_threshold *_b = b; 5650 5651 if (_a->threshold > _b->threshold) 5652 return 1; 5653 5654 if (_a->threshold < _b->threshold) 5655 return -1; 5656 5657 return 0; 5658 } 5659 5660 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 5661 { 5662 struct mem_cgroup_eventfd_list *ev; 5663 5664 list_for_each_entry(ev, &memcg->oom_notify, list) 5665 eventfd_signal(ev->eventfd, 1); 5666 return 0; 5667 } 5668 5669 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 5670 { 5671 struct mem_cgroup *iter; 5672 5673 for_each_mem_cgroup_tree(iter, memcg) 5674 mem_cgroup_oom_notify_cb(iter); 5675 } 5676 5677 static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css, 5678 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 5679 { 5680 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5681 struct mem_cgroup_thresholds *thresholds; 5682 struct mem_cgroup_threshold_ary *new; 5683 enum res_type type = MEMFILE_TYPE(cft->private); 5684 u64 threshold, usage; 5685 int i, size, ret; 5686 5687 ret = res_counter_memparse_write_strategy(args, &threshold); 5688 if (ret) 5689 return ret; 5690 5691 mutex_lock(&memcg->thresholds_lock); 5692 5693 if (type == _MEM) 5694 thresholds = &memcg->thresholds; 5695 else if (type == _MEMSWAP) 5696 thresholds = &memcg->memsw_thresholds; 5697 else 5698 BUG(); 5699 5700 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5701 5702 /* Check if a threshold crossed before adding a new one */ 5703 if (thresholds->primary) 5704 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5705 5706 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 5707 5708 /* Allocate memory for new array of thresholds */ 5709 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 5710 GFP_KERNEL); 5711 if (!new) { 5712 ret = -ENOMEM; 5713 goto unlock; 5714 } 5715 new->size = size; 5716 5717 /* Copy thresholds (if any) to new array */ 5718 if (thresholds->primary) { 5719 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 5720 sizeof(struct mem_cgroup_threshold)); 5721 } 5722 5723 /* Add new threshold */ 5724 new->entries[size - 1].eventfd = eventfd; 5725 new->entries[size - 1].threshold = threshold; 5726 5727 /* Sort thresholds. Registering of new threshold isn't time-critical */ 5728 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 5729 compare_thresholds, NULL); 5730 5731 /* Find current threshold */ 5732 new->current_threshold = -1; 5733 for (i = 0; i < size; i++) { 5734 if (new->entries[i].threshold <= usage) { 5735 /* 5736 * new->current_threshold will not be used until 5737 * rcu_assign_pointer(), so it's safe to increment 5738 * it here. 5739 */ 5740 ++new->current_threshold; 5741 } else 5742 break; 5743 } 5744 5745 /* Free old spare buffer and save old primary buffer as spare */ 5746 kfree(thresholds->spare); 5747 thresholds->spare = thresholds->primary; 5748 5749 rcu_assign_pointer(thresholds->primary, new); 5750 5751 /* To be sure that nobody uses thresholds */ 5752 synchronize_rcu(); 5753 5754 unlock: 5755 mutex_unlock(&memcg->thresholds_lock); 5756 5757 return ret; 5758 } 5759 5760 static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css, 5761 struct cftype *cft, struct eventfd_ctx *eventfd) 5762 { 5763 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5764 struct mem_cgroup_thresholds *thresholds; 5765 struct mem_cgroup_threshold_ary *new; 5766 enum res_type type = MEMFILE_TYPE(cft->private); 5767 u64 usage; 5768 int i, j, size; 5769 5770 mutex_lock(&memcg->thresholds_lock); 5771 if (type == _MEM) 5772 thresholds = &memcg->thresholds; 5773 else if (type == _MEMSWAP) 5774 thresholds = &memcg->memsw_thresholds; 5775 else 5776 BUG(); 5777 5778 if (!thresholds->primary) 5779 goto unlock; 5780 5781 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 5782 5783 /* Check if a threshold crossed before removing */ 5784 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 5785 5786 /* Calculate new number of threshold */ 5787 size = 0; 5788 for (i = 0; i < thresholds->primary->size; i++) { 5789 if (thresholds->primary->entries[i].eventfd != eventfd) 5790 size++; 5791 } 5792 5793 new = thresholds->spare; 5794 5795 /* Set thresholds array to NULL if we don't have thresholds */ 5796 if (!size) { 5797 kfree(new); 5798 new = NULL; 5799 goto swap_buffers; 5800 } 5801 5802 new->size = size; 5803 5804 /* Copy thresholds and find current threshold */ 5805 new->current_threshold = -1; 5806 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 5807 if (thresholds->primary->entries[i].eventfd == eventfd) 5808 continue; 5809 5810 new->entries[j] = thresholds->primary->entries[i]; 5811 if (new->entries[j].threshold <= usage) { 5812 /* 5813 * new->current_threshold will not be used 5814 * until rcu_assign_pointer(), so it's safe to increment 5815 * it here. 5816 */ 5817 ++new->current_threshold; 5818 } 5819 j++; 5820 } 5821 5822 swap_buffers: 5823 /* Swap primary and spare array */ 5824 thresholds->spare = thresholds->primary; 5825 /* If all events are unregistered, free the spare array */ 5826 if (!new) { 5827 kfree(thresholds->spare); 5828 thresholds->spare = NULL; 5829 } 5830 5831 rcu_assign_pointer(thresholds->primary, new); 5832 5833 /* To be sure that nobody uses thresholds */ 5834 synchronize_rcu(); 5835 unlock: 5836 mutex_unlock(&memcg->thresholds_lock); 5837 } 5838 5839 static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css, 5840 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) 5841 { 5842 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5843 struct mem_cgroup_eventfd_list *event; 5844 enum res_type type = MEMFILE_TYPE(cft->private); 5845 5846 BUG_ON(type != _OOM_TYPE); 5847 event = kmalloc(sizeof(*event), GFP_KERNEL); 5848 if (!event) 5849 return -ENOMEM; 5850 5851 spin_lock(&memcg_oom_lock); 5852 5853 event->eventfd = eventfd; 5854 list_add(&event->list, &memcg->oom_notify); 5855 5856 /* already in OOM ? */ 5857 if (atomic_read(&memcg->under_oom)) 5858 eventfd_signal(eventfd, 1); 5859 spin_unlock(&memcg_oom_lock); 5860 5861 return 0; 5862 } 5863 5864 static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css, 5865 struct cftype *cft, struct eventfd_ctx *eventfd) 5866 { 5867 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5868 struct mem_cgroup_eventfd_list *ev, *tmp; 5869 enum res_type type = MEMFILE_TYPE(cft->private); 5870 5871 BUG_ON(type != _OOM_TYPE); 5872 5873 spin_lock(&memcg_oom_lock); 5874 5875 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 5876 if (ev->eventfd == eventfd) { 5877 list_del(&ev->list); 5878 kfree(ev); 5879 } 5880 } 5881 5882 spin_unlock(&memcg_oom_lock); 5883 } 5884 5885 static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css, 5886 struct cftype *cft, struct cgroup_map_cb *cb) 5887 { 5888 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5889 5890 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable); 5891 5892 if (atomic_read(&memcg->under_oom)) 5893 cb->fill(cb, "under_oom", 1); 5894 else 5895 cb->fill(cb, "under_oom", 0); 5896 return 0; 5897 } 5898 5899 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 5900 struct cftype *cft, u64 val) 5901 { 5902 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5903 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css)); 5904 5905 /* cannot set to root cgroup and only 0 and 1 are allowed */ 5906 if (!parent || !((val == 0) || (val == 1))) 5907 return -EINVAL; 5908 5909 mutex_lock(&memcg_create_mutex); 5910 /* oom-kill-disable is a flag for subhierarchy. */ 5911 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5912 mutex_unlock(&memcg_create_mutex); 5913 return -EINVAL; 5914 } 5915 memcg->oom_kill_disable = val; 5916 if (!val) 5917 memcg_oom_recover(memcg); 5918 mutex_unlock(&memcg_create_mutex); 5919 return 0; 5920 } 5921 5922 #ifdef CONFIG_MEMCG_KMEM 5923 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5924 { 5925 int ret; 5926 5927 memcg->kmemcg_id = -1; 5928 ret = memcg_propagate_kmem(memcg); 5929 if (ret) 5930 return ret; 5931 5932 return mem_cgroup_sockets_init(memcg, ss); 5933 } 5934 5935 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 5936 { 5937 mem_cgroup_sockets_destroy(memcg); 5938 } 5939 5940 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) 5941 { 5942 if (!memcg_kmem_is_active(memcg)) 5943 return; 5944 5945 /* 5946 * kmem charges can outlive the cgroup. In the case of slab 5947 * pages, for instance, a page contain objects from various 5948 * processes. As we prevent from taking a reference for every 5949 * such allocation we have to be careful when doing uncharge 5950 * (see memcg_uncharge_kmem) and here during offlining. 5951 * 5952 * The idea is that that only the _last_ uncharge which sees 5953 * the dead memcg will drop the last reference. An additional 5954 * reference is taken here before the group is marked dead 5955 * which is then paired with css_put during uncharge resp. here. 5956 * 5957 * Although this might sound strange as this path is called from 5958 * css_offline() when the referencemight have dropped down to 0 5959 * and shouldn't be incremented anymore (css_tryget would fail) 5960 * we do not have other options because of the kmem allocations 5961 * lifetime. 5962 */ 5963 css_get(&memcg->css); 5964 5965 memcg_kmem_mark_dead(memcg); 5966 5967 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) 5968 return; 5969 5970 if (memcg_kmem_test_and_clear_dead(memcg)) 5971 css_put(&memcg->css); 5972 } 5973 #else 5974 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 5975 { 5976 return 0; 5977 } 5978 5979 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 5980 { 5981 } 5982 5983 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) 5984 { 5985 } 5986 #endif 5987 5988 static struct cftype mem_cgroup_files[] = { 5989 { 5990 .name = "usage_in_bytes", 5991 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 5992 .read = mem_cgroup_read, 5993 .register_event = mem_cgroup_usage_register_event, 5994 .unregister_event = mem_cgroup_usage_unregister_event, 5995 }, 5996 { 5997 .name = "max_usage_in_bytes", 5998 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 5999 .trigger = mem_cgroup_reset, 6000 .read = mem_cgroup_read, 6001 }, 6002 { 6003 .name = "limit_in_bytes", 6004 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 6005 .write_string = mem_cgroup_write, 6006 .read = mem_cgroup_read, 6007 }, 6008 { 6009 .name = "soft_limit_in_bytes", 6010 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 6011 .write_string = mem_cgroup_write, 6012 .read = mem_cgroup_read, 6013 }, 6014 { 6015 .name = "failcnt", 6016 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 6017 .trigger = mem_cgroup_reset, 6018 .read = mem_cgroup_read, 6019 }, 6020 { 6021 .name = "stat", 6022 .read_seq_string = memcg_stat_show, 6023 }, 6024 { 6025 .name = "force_empty", 6026 .trigger = mem_cgroup_force_empty_write, 6027 }, 6028 { 6029 .name = "use_hierarchy", 6030 .flags = CFTYPE_INSANE, 6031 .write_u64 = mem_cgroup_hierarchy_write, 6032 .read_u64 = mem_cgroup_hierarchy_read, 6033 }, 6034 { 6035 .name = "swappiness", 6036 .read_u64 = mem_cgroup_swappiness_read, 6037 .write_u64 = mem_cgroup_swappiness_write, 6038 }, 6039 { 6040 .name = "move_charge_at_immigrate", 6041 .read_u64 = mem_cgroup_move_charge_read, 6042 .write_u64 = mem_cgroup_move_charge_write, 6043 }, 6044 { 6045 .name = "oom_control", 6046 .read_map = mem_cgroup_oom_control_read, 6047 .write_u64 = mem_cgroup_oom_control_write, 6048 .register_event = mem_cgroup_oom_register_event, 6049 .unregister_event = mem_cgroup_oom_unregister_event, 6050 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 6051 }, 6052 { 6053 .name = "pressure_level", 6054 .register_event = vmpressure_register_event, 6055 .unregister_event = vmpressure_unregister_event, 6056 }, 6057 #ifdef CONFIG_NUMA 6058 { 6059 .name = "numa_stat", 6060 .read_seq_string = memcg_numa_stat_show, 6061 }, 6062 #endif 6063 #ifdef CONFIG_MEMCG_KMEM 6064 { 6065 .name = "kmem.limit_in_bytes", 6066 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 6067 .write_string = mem_cgroup_write, 6068 .read = mem_cgroup_read, 6069 }, 6070 { 6071 .name = "kmem.usage_in_bytes", 6072 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 6073 .read = mem_cgroup_read, 6074 }, 6075 { 6076 .name = "kmem.failcnt", 6077 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 6078 .trigger = mem_cgroup_reset, 6079 .read = mem_cgroup_read, 6080 }, 6081 { 6082 .name = "kmem.max_usage_in_bytes", 6083 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 6084 .trigger = mem_cgroup_reset, 6085 .read = mem_cgroup_read, 6086 }, 6087 #ifdef CONFIG_SLABINFO 6088 { 6089 .name = "kmem.slabinfo", 6090 .read_seq_string = mem_cgroup_slabinfo_read, 6091 }, 6092 #endif 6093 #endif 6094 { }, /* terminate */ 6095 }; 6096 6097 #ifdef CONFIG_MEMCG_SWAP 6098 static struct cftype memsw_cgroup_files[] = { 6099 { 6100 .name = "memsw.usage_in_bytes", 6101 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6102 .read = mem_cgroup_read, 6103 .register_event = mem_cgroup_usage_register_event, 6104 .unregister_event = mem_cgroup_usage_unregister_event, 6105 }, 6106 { 6107 .name = "memsw.max_usage_in_bytes", 6108 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6109 .trigger = mem_cgroup_reset, 6110 .read = mem_cgroup_read, 6111 }, 6112 { 6113 .name = "memsw.limit_in_bytes", 6114 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6115 .write_string = mem_cgroup_write, 6116 .read = mem_cgroup_read, 6117 }, 6118 { 6119 .name = "memsw.failcnt", 6120 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6121 .trigger = mem_cgroup_reset, 6122 .read = mem_cgroup_read, 6123 }, 6124 { }, /* terminate */ 6125 }; 6126 #endif 6127 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6128 { 6129 struct mem_cgroup_per_node *pn; 6130 struct mem_cgroup_per_zone *mz; 6131 int zone, tmp = node; 6132 /* 6133 * This routine is called against possible nodes. 6134 * But it's BUG to call kmalloc() against offline node. 6135 * 6136 * TODO: this routine can waste much memory for nodes which will 6137 * never be onlined. It's better to use memory hotplug callback 6138 * function. 6139 */ 6140 if (!node_state(node, N_NORMAL_MEMORY)) 6141 tmp = -1; 6142 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 6143 if (!pn) 6144 return 1; 6145 6146 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6147 mz = &pn->zoneinfo[zone]; 6148 lruvec_init(&mz->lruvec); 6149 mz->usage_in_excess = 0; 6150 mz->on_tree = false; 6151 mz->memcg = memcg; 6152 } 6153 memcg->nodeinfo[node] = pn; 6154 return 0; 6155 } 6156 6157 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 6158 { 6159 kfree(memcg->nodeinfo[node]); 6160 } 6161 6162 static struct mem_cgroup *mem_cgroup_alloc(void) 6163 { 6164 struct mem_cgroup *memcg; 6165 size_t size = memcg_size(); 6166 6167 /* Can be very big if nr_node_ids is very big */ 6168 if (size < PAGE_SIZE) 6169 memcg = kzalloc(size, GFP_KERNEL); 6170 else 6171 memcg = vzalloc(size); 6172 6173 if (!memcg) 6174 return NULL; 6175 6176 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 6177 if (!memcg->stat) 6178 goto out_free; 6179 spin_lock_init(&memcg->pcp_counter_lock); 6180 return memcg; 6181 6182 out_free: 6183 if (size < PAGE_SIZE) 6184 kfree(memcg); 6185 else 6186 vfree(memcg); 6187 return NULL; 6188 } 6189 6190 /* 6191 * At destroying mem_cgroup, references from swap_cgroup can remain. 6192 * (scanning all at force_empty is too costly...) 6193 * 6194 * Instead of clearing all references at force_empty, we remember 6195 * the number of reference from swap_cgroup and free mem_cgroup when 6196 * it goes down to 0. 6197 * 6198 * Removal of cgroup itself succeeds regardless of refs from swap. 6199 */ 6200 6201 static void __mem_cgroup_free(struct mem_cgroup *memcg) 6202 { 6203 int node; 6204 size_t size = memcg_size(); 6205 6206 mem_cgroup_remove_from_trees(memcg); 6207 free_css_id(&mem_cgroup_subsys, &memcg->css); 6208 6209 for_each_node(node) 6210 free_mem_cgroup_per_zone_info(memcg, node); 6211 6212 free_percpu(memcg->stat); 6213 6214 /* 6215 * We need to make sure that (at least for now), the jump label 6216 * destruction code runs outside of the cgroup lock. This is because 6217 * get_online_cpus(), which is called from the static_branch update, 6218 * can't be called inside the cgroup_lock. cpusets are the ones 6219 * enforcing this dependency, so if they ever change, we might as well. 6220 * 6221 * schedule_work() will guarantee this happens. Be careful if you need 6222 * to move this code around, and make sure it is outside 6223 * the cgroup_lock. 6224 */ 6225 disarm_static_keys(memcg); 6226 if (size < PAGE_SIZE) 6227 kfree(memcg); 6228 else 6229 vfree(memcg); 6230 } 6231 6232 /* 6233 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 6234 */ 6235 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 6236 { 6237 if (!memcg->res.parent) 6238 return NULL; 6239 return mem_cgroup_from_res_counter(memcg->res.parent, res); 6240 } 6241 EXPORT_SYMBOL(parent_mem_cgroup); 6242 6243 static void __init mem_cgroup_soft_limit_tree_init(void) 6244 { 6245 struct mem_cgroup_tree_per_node *rtpn; 6246 struct mem_cgroup_tree_per_zone *rtpz; 6247 int tmp, node, zone; 6248 6249 for_each_node(node) { 6250 tmp = node; 6251 if (!node_state(node, N_NORMAL_MEMORY)) 6252 tmp = -1; 6253 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); 6254 BUG_ON(!rtpn); 6255 6256 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6257 6258 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6259 rtpz = &rtpn->rb_tree_per_zone[zone]; 6260 rtpz->rb_root = RB_ROOT; 6261 spin_lock_init(&rtpz->lock); 6262 } 6263 } 6264 } 6265 6266 static struct cgroup_subsys_state * __ref 6267 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6268 { 6269 struct mem_cgroup *memcg; 6270 long error = -ENOMEM; 6271 int node; 6272 6273 memcg = mem_cgroup_alloc(); 6274 if (!memcg) 6275 return ERR_PTR(error); 6276 6277 for_each_node(node) 6278 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 6279 goto free_out; 6280 6281 /* root ? */ 6282 if (parent_css == NULL) { 6283 root_mem_cgroup = memcg; 6284 res_counter_init(&memcg->res, NULL); 6285 res_counter_init(&memcg->memsw, NULL); 6286 res_counter_init(&memcg->kmem, NULL); 6287 } 6288 6289 memcg->last_scanned_node = MAX_NUMNODES; 6290 INIT_LIST_HEAD(&memcg->oom_notify); 6291 memcg->move_charge_at_immigrate = 0; 6292 mutex_init(&memcg->thresholds_lock); 6293 spin_lock_init(&memcg->move_lock); 6294 vmpressure_init(&memcg->vmpressure); 6295 6296 return &memcg->css; 6297 6298 free_out: 6299 __mem_cgroup_free(memcg); 6300 return ERR_PTR(error); 6301 } 6302 6303 static int 6304 mem_cgroup_css_online(struct cgroup_subsys_state *css) 6305 { 6306 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6307 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css)); 6308 int error = 0; 6309 6310 if (!parent) 6311 return 0; 6312 6313 mutex_lock(&memcg_create_mutex); 6314 6315 memcg->use_hierarchy = parent->use_hierarchy; 6316 memcg->oom_kill_disable = parent->oom_kill_disable; 6317 memcg->swappiness = mem_cgroup_swappiness(parent); 6318 6319 if (parent->use_hierarchy) { 6320 res_counter_init(&memcg->res, &parent->res); 6321 res_counter_init(&memcg->memsw, &parent->memsw); 6322 res_counter_init(&memcg->kmem, &parent->kmem); 6323 6324 /* 6325 * No need to take a reference to the parent because cgroup 6326 * core guarantees its existence. 6327 */ 6328 } else { 6329 res_counter_init(&memcg->res, NULL); 6330 res_counter_init(&memcg->memsw, NULL); 6331 res_counter_init(&memcg->kmem, NULL); 6332 /* 6333 * Deeper hierachy with use_hierarchy == false doesn't make 6334 * much sense so let cgroup subsystem know about this 6335 * unfortunate state in our controller. 6336 */ 6337 if (parent != root_mem_cgroup) 6338 mem_cgroup_subsys.broken_hierarchy = true; 6339 } 6340 6341 error = memcg_init_kmem(memcg, &mem_cgroup_subsys); 6342 mutex_unlock(&memcg_create_mutex); 6343 return error; 6344 } 6345 6346 /* 6347 * Announce all parents that a group from their hierarchy is gone. 6348 */ 6349 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) 6350 { 6351 struct mem_cgroup *parent = memcg; 6352 6353 while ((parent = parent_mem_cgroup(parent))) 6354 mem_cgroup_iter_invalidate(parent); 6355 6356 /* 6357 * if the root memcg is not hierarchical we have to check it 6358 * explicitely. 6359 */ 6360 if (!root_mem_cgroup->use_hierarchy) 6361 mem_cgroup_iter_invalidate(root_mem_cgroup); 6362 } 6363 6364 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 6365 { 6366 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6367 6368 kmem_cgroup_css_offline(memcg); 6369 6370 mem_cgroup_invalidate_reclaim_iterators(memcg); 6371 mem_cgroup_reparent_charges(memcg); 6372 mem_cgroup_destroy_all_caches(memcg); 6373 vmpressure_cleanup(&memcg->vmpressure); 6374 } 6375 6376 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 6377 { 6378 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6379 6380 memcg_destroy_kmem(memcg); 6381 __mem_cgroup_free(memcg); 6382 } 6383 6384 #ifdef CONFIG_MMU 6385 /* Handlers for move charge at task migration. */ 6386 #define PRECHARGE_COUNT_AT_ONCE 256 6387 static int mem_cgroup_do_precharge(unsigned long count) 6388 { 6389 int ret = 0; 6390 int batch_count = PRECHARGE_COUNT_AT_ONCE; 6391 struct mem_cgroup *memcg = mc.to; 6392 6393 if (mem_cgroup_is_root(memcg)) { 6394 mc.precharge += count; 6395 /* we don't need css_get for root */ 6396 return ret; 6397 } 6398 /* try to charge at once */ 6399 if (count > 1) { 6400 struct res_counter *dummy; 6401 /* 6402 * "memcg" cannot be under rmdir() because we've already checked 6403 * by cgroup_lock_live_cgroup() that it is not removed and we 6404 * are still under the same cgroup_mutex. So we can postpone 6405 * css_get(). 6406 */ 6407 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy)) 6408 goto one_by_one; 6409 if (do_swap_account && res_counter_charge(&memcg->memsw, 6410 PAGE_SIZE * count, &dummy)) { 6411 res_counter_uncharge(&memcg->res, PAGE_SIZE * count); 6412 goto one_by_one; 6413 } 6414 mc.precharge += count; 6415 return ret; 6416 } 6417 one_by_one: 6418 /* fall back to one by one charge */ 6419 while (count--) { 6420 if (signal_pending(current)) { 6421 ret = -EINTR; 6422 break; 6423 } 6424 if (!batch_count--) { 6425 batch_count = PRECHARGE_COUNT_AT_ONCE; 6426 cond_resched(); 6427 } 6428 ret = __mem_cgroup_try_charge(NULL, 6429 GFP_KERNEL, 1, &memcg, false); 6430 if (ret) 6431 /* mem_cgroup_clear_mc() will do uncharge later */ 6432 return ret; 6433 mc.precharge++; 6434 } 6435 return ret; 6436 } 6437 6438 /** 6439 * get_mctgt_type - get target type of moving charge 6440 * @vma: the vma the pte to be checked belongs 6441 * @addr: the address corresponding to the pte to be checked 6442 * @ptent: the pte to be checked 6443 * @target: the pointer the target page or swap ent will be stored(can be NULL) 6444 * 6445 * Returns 6446 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 6447 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 6448 * move charge. if @target is not NULL, the page is stored in target->page 6449 * with extra refcnt got(Callers should handle it). 6450 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 6451 * target for charge migration. if @target is not NULL, the entry is stored 6452 * in target->ent. 6453 * 6454 * Called with pte lock held. 6455 */ 6456 union mc_target { 6457 struct page *page; 6458 swp_entry_t ent; 6459 }; 6460 6461 enum mc_target_type { 6462 MC_TARGET_NONE = 0, 6463 MC_TARGET_PAGE, 6464 MC_TARGET_SWAP, 6465 }; 6466 6467 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 6468 unsigned long addr, pte_t ptent) 6469 { 6470 struct page *page = vm_normal_page(vma, addr, ptent); 6471 6472 if (!page || !page_mapped(page)) 6473 return NULL; 6474 if (PageAnon(page)) { 6475 /* we don't move shared anon */ 6476 if (!move_anon()) 6477 return NULL; 6478 } else if (!move_file()) 6479 /* we ignore mapcount for file pages */ 6480 return NULL; 6481 if (!get_page_unless_zero(page)) 6482 return NULL; 6483 6484 return page; 6485 } 6486 6487 #ifdef CONFIG_SWAP 6488 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6489 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6490 { 6491 struct page *page = NULL; 6492 swp_entry_t ent = pte_to_swp_entry(ptent); 6493 6494 if (!move_anon() || non_swap_entry(ent)) 6495 return NULL; 6496 /* 6497 * Because lookup_swap_cache() updates some statistics counter, 6498 * we call find_get_page() with swapper_space directly. 6499 */ 6500 page = find_get_page(swap_address_space(ent), ent.val); 6501 if (do_swap_account) 6502 entry->val = ent.val; 6503 6504 return page; 6505 } 6506 #else 6507 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 6508 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6509 { 6510 return NULL; 6511 } 6512 #endif 6513 6514 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 6515 unsigned long addr, pte_t ptent, swp_entry_t *entry) 6516 { 6517 struct page *page = NULL; 6518 struct address_space *mapping; 6519 pgoff_t pgoff; 6520 6521 if (!vma->vm_file) /* anonymous vma */ 6522 return NULL; 6523 if (!move_file()) 6524 return NULL; 6525 6526 mapping = vma->vm_file->f_mapping; 6527 if (pte_none(ptent)) 6528 pgoff = linear_page_index(vma, addr); 6529 else /* pte_file(ptent) is true */ 6530 pgoff = pte_to_pgoff(ptent); 6531 6532 /* page is moved even if it's not RSS of this task(page-faulted). */ 6533 page = find_get_page(mapping, pgoff); 6534 6535 #ifdef CONFIG_SWAP 6536 /* shmem/tmpfs may report page out on swap: account for that too. */ 6537 if (radix_tree_exceptional_entry(page)) { 6538 swp_entry_t swap = radix_to_swp_entry(page); 6539 if (do_swap_account) 6540 *entry = swap; 6541 page = find_get_page(swap_address_space(swap), swap.val); 6542 } 6543 #endif 6544 return page; 6545 } 6546 6547 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 6548 unsigned long addr, pte_t ptent, union mc_target *target) 6549 { 6550 struct page *page = NULL; 6551 struct page_cgroup *pc; 6552 enum mc_target_type ret = MC_TARGET_NONE; 6553 swp_entry_t ent = { .val = 0 }; 6554 6555 if (pte_present(ptent)) 6556 page = mc_handle_present_pte(vma, addr, ptent); 6557 else if (is_swap_pte(ptent)) 6558 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 6559 else if (pte_none(ptent) || pte_file(ptent)) 6560 page = mc_handle_file_pte(vma, addr, ptent, &ent); 6561 6562 if (!page && !ent.val) 6563 return ret; 6564 if (page) { 6565 pc = lookup_page_cgroup(page); 6566 /* 6567 * Do only loose check w/o page_cgroup lock. 6568 * mem_cgroup_move_account() checks the pc is valid or not under 6569 * the lock. 6570 */ 6571 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6572 ret = MC_TARGET_PAGE; 6573 if (target) 6574 target->page = page; 6575 } 6576 if (!ret || !target) 6577 put_page(page); 6578 } 6579 /* There is a swap entry and a page doesn't exist or isn't charged */ 6580 if (ent.val && !ret && 6581 css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) { 6582 ret = MC_TARGET_SWAP; 6583 if (target) 6584 target->ent = ent; 6585 } 6586 return ret; 6587 } 6588 6589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6590 /* 6591 * We don't consider swapping or file mapped pages because THP does not 6592 * support them for now. 6593 * Caller should make sure that pmd_trans_huge(pmd) is true. 6594 */ 6595 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6596 unsigned long addr, pmd_t pmd, union mc_target *target) 6597 { 6598 struct page *page = NULL; 6599 struct page_cgroup *pc; 6600 enum mc_target_type ret = MC_TARGET_NONE; 6601 6602 page = pmd_page(pmd); 6603 VM_BUG_ON(!page || !PageHead(page)); 6604 if (!move_anon()) 6605 return ret; 6606 pc = lookup_page_cgroup(page); 6607 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { 6608 ret = MC_TARGET_PAGE; 6609 if (target) { 6610 get_page(page); 6611 target->page = page; 6612 } 6613 } 6614 return ret; 6615 } 6616 #else 6617 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6618 unsigned long addr, pmd_t pmd, union mc_target *target) 6619 { 6620 return MC_TARGET_NONE; 6621 } 6622 #endif 6623 6624 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 6625 unsigned long addr, unsigned long end, 6626 struct mm_walk *walk) 6627 { 6628 struct vm_area_struct *vma = walk->private; 6629 pte_t *pte; 6630 spinlock_t *ptl; 6631 6632 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6633 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6634 mc.precharge += HPAGE_PMD_NR; 6635 spin_unlock(&vma->vm_mm->page_table_lock); 6636 return 0; 6637 } 6638 6639 if (pmd_trans_unstable(pmd)) 6640 return 0; 6641 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6642 for (; addr != end; pte++, addr += PAGE_SIZE) 6643 if (get_mctgt_type(vma, addr, *pte, NULL)) 6644 mc.precharge++; /* increment precharge temporarily */ 6645 pte_unmap_unlock(pte - 1, ptl); 6646 cond_resched(); 6647 6648 return 0; 6649 } 6650 6651 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 6652 { 6653 unsigned long precharge; 6654 struct vm_area_struct *vma; 6655 6656 down_read(&mm->mmap_sem); 6657 for (vma = mm->mmap; vma; vma = vma->vm_next) { 6658 struct mm_walk mem_cgroup_count_precharge_walk = { 6659 .pmd_entry = mem_cgroup_count_precharge_pte_range, 6660 .mm = mm, 6661 .private = vma, 6662 }; 6663 if (is_vm_hugetlb_page(vma)) 6664 continue; 6665 walk_page_range(vma->vm_start, vma->vm_end, 6666 &mem_cgroup_count_precharge_walk); 6667 } 6668 up_read(&mm->mmap_sem); 6669 6670 precharge = mc.precharge; 6671 mc.precharge = 0; 6672 6673 return precharge; 6674 } 6675 6676 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 6677 { 6678 unsigned long precharge = mem_cgroup_count_precharge(mm); 6679 6680 VM_BUG_ON(mc.moving_task); 6681 mc.moving_task = current; 6682 return mem_cgroup_do_precharge(precharge); 6683 } 6684 6685 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 6686 static void __mem_cgroup_clear_mc(void) 6687 { 6688 struct mem_cgroup *from = mc.from; 6689 struct mem_cgroup *to = mc.to; 6690 int i; 6691 6692 /* we must uncharge all the leftover precharges from mc.to */ 6693 if (mc.precharge) { 6694 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 6695 mc.precharge = 0; 6696 } 6697 /* 6698 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 6699 * we must uncharge here. 6700 */ 6701 if (mc.moved_charge) { 6702 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 6703 mc.moved_charge = 0; 6704 } 6705 /* we must fixup refcnts and charges */ 6706 if (mc.moved_swap) { 6707 /* uncharge swap account from the old cgroup */ 6708 if (!mem_cgroup_is_root(mc.from)) 6709 res_counter_uncharge(&mc.from->memsw, 6710 PAGE_SIZE * mc.moved_swap); 6711 6712 for (i = 0; i < mc.moved_swap; i++) 6713 css_put(&mc.from->css); 6714 6715 if (!mem_cgroup_is_root(mc.to)) { 6716 /* 6717 * we charged both to->res and to->memsw, so we should 6718 * uncharge to->res. 6719 */ 6720 res_counter_uncharge(&mc.to->res, 6721 PAGE_SIZE * mc.moved_swap); 6722 } 6723 /* we've already done css_get(mc.to) */ 6724 mc.moved_swap = 0; 6725 } 6726 memcg_oom_recover(from); 6727 memcg_oom_recover(to); 6728 wake_up_all(&mc.waitq); 6729 } 6730 6731 static void mem_cgroup_clear_mc(void) 6732 { 6733 struct mem_cgroup *from = mc.from; 6734 6735 /* 6736 * we must clear moving_task before waking up waiters at the end of 6737 * task migration. 6738 */ 6739 mc.moving_task = NULL; 6740 __mem_cgroup_clear_mc(); 6741 spin_lock(&mc.lock); 6742 mc.from = NULL; 6743 mc.to = NULL; 6744 spin_unlock(&mc.lock); 6745 mem_cgroup_end_move(from); 6746 } 6747 6748 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 6749 struct cgroup_taskset *tset) 6750 { 6751 struct task_struct *p = cgroup_taskset_first(tset); 6752 int ret = 0; 6753 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6754 unsigned long move_charge_at_immigrate; 6755 6756 /* 6757 * We are now commited to this value whatever it is. Changes in this 6758 * tunable will only affect upcoming migrations, not the current one. 6759 * So we need to save it, and keep it going. 6760 */ 6761 move_charge_at_immigrate = memcg->move_charge_at_immigrate; 6762 if (move_charge_at_immigrate) { 6763 struct mm_struct *mm; 6764 struct mem_cgroup *from = mem_cgroup_from_task(p); 6765 6766 VM_BUG_ON(from == memcg); 6767 6768 mm = get_task_mm(p); 6769 if (!mm) 6770 return 0; 6771 /* We move charges only when we move a owner of the mm */ 6772 if (mm->owner == p) { 6773 VM_BUG_ON(mc.from); 6774 VM_BUG_ON(mc.to); 6775 VM_BUG_ON(mc.precharge); 6776 VM_BUG_ON(mc.moved_charge); 6777 VM_BUG_ON(mc.moved_swap); 6778 mem_cgroup_start_move(from); 6779 spin_lock(&mc.lock); 6780 mc.from = from; 6781 mc.to = memcg; 6782 mc.immigrate_flags = move_charge_at_immigrate; 6783 spin_unlock(&mc.lock); 6784 /* We set mc.moving_task later */ 6785 6786 ret = mem_cgroup_precharge_mc(mm); 6787 if (ret) 6788 mem_cgroup_clear_mc(); 6789 } 6790 mmput(mm); 6791 } 6792 return ret; 6793 } 6794 6795 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 6796 struct cgroup_taskset *tset) 6797 { 6798 mem_cgroup_clear_mc(); 6799 } 6800 6801 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6802 unsigned long addr, unsigned long end, 6803 struct mm_walk *walk) 6804 { 6805 int ret = 0; 6806 struct vm_area_struct *vma = walk->private; 6807 pte_t *pte; 6808 spinlock_t *ptl; 6809 enum mc_target_type target_type; 6810 union mc_target target; 6811 struct page *page; 6812 struct page_cgroup *pc; 6813 6814 /* 6815 * We don't take compound_lock() here but no race with splitting thp 6816 * happens because: 6817 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 6818 * under splitting, which means there's no concurrent thp split, 6819 * - if another thread runs into split_huge_page() just after we 6820 * entered this if-block, the thread must wait for page table lock 6821 * to be unlocked in __split_huge_page_splitting(), where the main 6822 * part of thp split is not executed yet. 6823 */ 6824 if (pmd_trans_huge_lock(pmd, vma) == 1) { 6825 if (mc.precharge < HPAGE_PMD_NR) { 6826 spin_unlock(&vma->vm_mm->page_table_lock); 6827 return 0; 6828 } 6829 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6830 if (target_type == MC_TARGET_PAGE) { 6831 page = target.page; 6832 if (!isolate_lru_page(page)) { 6833 pc = lookup_page_cgroup(page); 6834 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 6835 pc, mc.from, mc.to)) { 6836 mc.precharge -= HPAGE_PMD_NR; 6837 mc.moved_charge += HPAGE_PMD_NR; 6838 } 6839 putback_lru_page(page); 6840 } 6841 put_page(page); 6842 } 6843 spin_unlock(&vma->vm_mm->page_table_lock); 6844 return 0; 6845 } 6846 6847 if (pmd_trans_unstable(pmd)) 6848 return 0; 6849 retry: 6850 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6851 for (; addr != end; addr += PAGE_SIZE) { 6852 pte_t ptent = *(pte++); 6853 swp_entry_t ent; 6854 6855 if (!mc.precharge) 6856 break; 6857 6858 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6859 case MC_TARGET_PAGE: 6860 page = target.page; 6861 if (isolate_lru_page(page)) 6862 goto put; 6863 pc = lookup_page_cgroup(page); 6864 if (!mem_cgroup_move_account(page, 1, pc, 6865 mc.from, mc.to)) { 6866 mc.precharge--; 6867 /* we uncharge from mc.from later. */ 6868 mc.moved_charge++; 6869 } 6870 putback_lru_page(page); 6871 put: /* get_mctgt_type() gets the page */ 6872 put_page(page); 6873 break; 6874 case MC_TARGET_SWAP: 6875 ent = target.ent; 6876 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6877 mc.precharge--; 6878 /* we fixup refcnts and charges later. */ 6879 mc.moved_swap++; 6880 } 6881 break; 6882 default: 6883 break; 6884 } 6885 } 6886 pte_unmap_unlock(pte - 1, ptl); 6887 cond_resched(); 6888 6889 if (addr != end) { 6890 /* 6891 * We have consumed all precharges we got in can_attach(). 6892 * We try charge one by one, but don't do any additional 6893 * charges to mc.to if we have failed in charge once in attach() 6894 * phase. 6895 */ 6896 ret = mem_cgroup_do_precharge(1); 6897 if (!ret) 6898 goto retry; 6899 } 6900 6901 return ret; 6902 } 6903 6904 static void mem_cgroup_move_charge(struct mm_struct *mm) 6905 { 6906 struct vm_area_struct *vma; 6907 6908 lru_add_drain_all(); 6909 retry: 6910 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 6911 /* 6912 * Someone who are holding the mmap_sem might be waiting in 6913 * waitq. So we cancel all extra charges, wake up all waiters, 6914 * and retry. Because we cancel precharges, we might not be able 6915 * to move enough charges, but moving charge is a best-effort 6916 * feature anyway, so it wouldn't be a big problem. 6917 */ 6918 __mem_cgroup_clear_mc(); 6919 cond_resched(); 6920 goto retry; 6921 } 6922 for (vma = mm->mmap; vma; vma = vma->vm_next) { 6923 int ret; 6924 struct mm_walk mem_cgroup_move_charge_walk = { 6925 .pmd_entry = mem_cgroup_move_charge_pte_range, 6926 .mm = mm, 6927 .private = vma, 6928 }; 6929 if (is_vm_hugetlb_page(vma)) 6930 continue; 6931 ret = walk_page_range(vma->vm_start, vma->vm_end, 6932 &mem_cgroup_move_charge_walk); 6933 if (ret) 6934 /* 6935 * means we have consumed all precharges and failed in 6936 * doing additional charge. Just abandon here. 6937 */ 6938 break; 6939 } 6940 up_read(&mm->mmap_sem); 6941 } 6942 6943 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 6944 struct cgroup_taskset *tset) 6945 { 6946 struct task_struct *p = cgroup_taskset_first(tset); 6947 struct mm_struct *mm = get_task_mm(p); 6948 6949 if (mm) { 6950 if (mc.to) 6951 mem_cgroup_move_charge(mm); 6952 mmput(mm); 6953 } 6954 if (mc.to) 6955 mem_cgroup_clear_mc(); 6956 } 6957 #else /* !CONFIG_MMU */ 6958 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 6959 struct cgroup_taskset *tset) 6960 { 6961 return 0; 6962 } 6963 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 6964 struct cgroup_taskset *tset) 6965 { 6966 } 6967 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 6968 struct cgroup_taskset *tset) 6969 { 6970 } 6971 #endif 6972 6973 /* 6974 * Cgroup retains root cgroups across [un]mount cycles making it necessary 6975 * to verify sane_behavior flag on each mount attempt. 6976 */ 6977 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 6978 { 6979 /* 6980 * use_hierarchy is forced with sane_behavior. cgroup core 6981 * guarantees that @root doesn't have any children, so turning it 6982 * on for the root memcg is enough. 6983 */ 6984 if (cgroup_sane_behavior(root_css->cgroup)) 6985 mem_cgroup_from_css(root_css)->use_hierarchy = true; 6986 } 6987 6988 struct cgroup_subsys mem_cgroup_subsys = { 6989 .name = "memory", 6990 .subsys_id = mem_cgroup_subsys_id, 6991 .css_alloc = mem_cgroup_css_alloc, 6992 .css_online = mem_cgroup_css_online, 6993 .css_offline = mem_cgroup_css_offline, 6994 .css_free = mem_cgroup_css_free, 6995 .can_attach = mem_cgroup_can_attach, 6996 .cancel_attach = mem_cgroup_cancel_attach, 6997 .attach = mem_cgroup_move_task, 6998 .bind = mem_cgroup_bind, 6999 .base_cftypes = mem_cgroup_files, 7000 .early_init = 0, 7001 .use_id = 1, 7002 }; 7003 7004 #ifdef CONFIG_MEMCG_SWAP 7005 static int __init enable_swap_account(char *s) 7006 { 7007 if (!strcmp(s, "1")) 7008 really_do_swap_account = 1; 7009 else if (!strcmp(s, "0")) 7010 really_do_swap_account = 0; 7011 return 1; 7012 } 7013 __setup("swapaccount=", enable_swap_account); 7014 7015 static void __init memsw_file_init(void) 7016 { 7017 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); 7018 } 7019 7020 static void __init enable_swap_cgroup(void) 7021 { 7022 if (!mem_cgroup_disabled() && really_do_swap_account) { 7023 do_swap_account = 1; 7024 memsw_file_init(); 7025 } 7026 } 7027 7028 #else 7029 static void __init enable_swap_cgroup(void) 7030 { 7031 } 7032 #endif 7033 7034 /* 7035 * subsys_initcall() for memory controller. 7036 * 7037 * Some parts like hotcpu_notifier() have to be initialized from this context 7038 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 7039 * everything that doesn't depend on a specific mem_cgroup structure should 7040 * be initialized from here. 7041 */ 7042 static int __init mem_cgroup_init(void) 7043 { 7044 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7045 enable_swap_cgroup(); 7046 mem_cgroup_soft_limit_tree_init(); 7047 memcg_stock_init(); 7048 return 0; 7049 } 7050 subsys_initcall(mem_cgroup_init); 7051