1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include <net/tcp_memcontrol.h> 69 #include "slab.h" 70 71 #include <asm/uaccess.h> 72 73 #include <trace/events/vmscan.h> 74 75 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 76 EXPORT_SYMBOL(memory_cgrp_subsys); 77 78 #define MEM_CGROUP_RECLAIM_RETRIES 5 79 static struct mem_cgroup *root_mem_cgroup __read_mostly; 80 81 /* Whether the swap controller is active */ 82 #ifdef CONFIG_MEMCG_SWAP 83 int do_swap_account __read_mostly; 84 #else 85 #define do_swap_account 0 86 #endif 87 88 static const char * const mem_cgroup_stat_names[] = { 89 "cache", 90 "rss", 91 "rss_huge", 92 "mapped_file", 93 "writeback", 94 "swap", 95 }; 96 97 static const char * const mem_cgroup_events_names[] = { 98 "pgpgin", 99 "pgpgout", 100 "pgfault", 101 "pgmajfault", 102 }; 103 104 static const char * const mem_cgroup_lru_names[] = { 105 "inactive_anon", 106 "active_anon", 107 "inactive_file", 108 "active_file", 109 "unevictable", 110 }; 111 112 /* 113 * Per memcg event counter is incremented at every pagein/pageout. With THP, 114 * it will be incremated by the number of pages. This counter is used for 115 * for trigger some periodic events. This is straightforward and better 116 * than using jiffies etc. to handle periodic memcg event. 117 */ 118 enum mem_cgroup_events_target { 119 MEM_CGROUP_TARGET_THRESH, 120 MEM_CGROUP_TARGET_SOFTLIMIT, 121 MEM_CGROUP_TARGET_NUMAINFO, 122 MEM_CGROUP_NTARGETS, 123 }; 124 #define THRESHOLDS_EVENTS_TARGET 128 125 #define SOFTLIMIT_EVENTS_TARGET 1024 126 #define NUMAINFO_EVENTS_TARGET 1024 127 128 struct mem_cgroup_stat_cpu { 129 long count[MEM_CGROUP_STAT_NSTATS]; 130 unsigned long events[MEMCG_NR_EVENTS]; 131 unsigned long nr_page_events; 132 unsigned long targets[MEM_CGROUP_NTARGETS]; 133 }; 134 135 struct reclaim_iter { 136 struct mem_cgroup *position; 137 /* scan generation, increased every round-trip */ 138 unsigned int generation; 139 }; 140 141 /* 142 * per-zone information in memory controller. 143 */ 144 struct mem_cgroup_per_zone { 145 struct lruvec lruvec; 146 unsigned long lru_size[NR_LRU_LISTS]; 147 148 struct reclaim_iter iter[DEF_PRIORITY + 1]; 149 150 struct rb_node tree_node; /* RB tree node */ 151 unsigned long usage_in_excess;/* Set to the value by which */ 152 /* the soft limit is exceeded*/ 153 bool on_tree; 154 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 155 /* use container_of */ 156 }; 157 158 struct mem_cgroup_per_node { 159 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 160 }; 161 162 /* 163 * Cgroups above their limits are maintained in a RB-Tree, independent of 164 * their hierarchy representation 165 */ 166 167 struct mem_cgroup_tree_per_zone { 168 struct rb_root rb_root; 169 spinlock_t lock; 170 }; 171 172 struct mem_cgroup_tree_per_node { 173 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 174 }; 175 176 struct mem_cgroup_tree { 177 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 178 }; 179 180 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 181 182 struct mem_cgroup_threshold { 183 struct eventfd_ctx *eventfd; 184 unsigned long threshold; 185 }; 186 187 /* For threshold */ 188 struct mem_cgroup_threshold_ary { 189 /* An array index points to threshold just below or equal to usage. */ 190 int current_threshold; 191 /* Size of entries[] */ 192 unsigned int size; 193 /* Array of thresholds */ 194 struct mem_cgroup_threshold entries[0]; 195 }; 196 197 struct mem_cgroup_thresholds { 198 /* Primary thresholds array */ 199 struct mem_cgroup_threshold_ary *primary; 200 /* 201 * Spare threshold array. 202 * This is needed to make mem_cgroup_unregister_event() "never fail". 203 * It must be able to store at least primary->size - 1 entries. 204 */ 205 struct mem_cgroup_threshold_ary *spare; 206 }; 207 208 /* for OOM */ 209 struct mem_cgroup_eventfd_list { 210 struct list_head list; 211 struct eventfd_ctx *eventfd; 212 }; 213 214 /* 215 * cgroup_event represents events which userspace want to receive. 216 */ 217 struct mem_cgroup_event { 218 /* 219 * memcg which the event belongs to. 220 */ 221 struct mem_cgroup *memcg; 222 /* 223 * eventfd to signal userspace about the event. 224 */ 225 struct eventfd_ctx *eventfd; 226 /* 227 * Each of these stored in a list by the cgroup. 228 */ 229 struct list_head list; 230 /* 231 * register_event() callback will be used to add new userspace 232 * waiter for changes related to this event. Use eventfd_signal() 233 * on eventfd to send notification to userspace. 234 */ 235 int (*register_event)(struct mem_cgroup *memcg, 236 struct eventfd_ctx *eventfd, const char *args); 237 /* 238 * unregister_event() callback will be called when userspace closes 239 * the eventfd or on cgroup removing. This callback must be set, 240 * if you want provide notification functionality. 241 */ 242 void (*unregister_event)(struct mem_cgroup *memcg, 243 struct eventfd_ctx *eventfd); 244 /* 245 * All fields below needed to unregister event when 246 * userspace closes eventfd. 247 */ 248 poll_table pt; 249 wait_queue_head_t *wqh; 250 wait_queue_t wait; 251 struct work_struct remove; 252 }; 253 254 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 255 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 256 257 /* 258 * The memory controller data structure. The memory controller controls both 259 * page cache and RSS per cgroup. We would eventually like to provide 260 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 261 * to help the administrator determine what knobs to tune. 262 */ 263 struct mem_cgroup { 264 struct cgroup_subsys_state css; 265 266 /* Accounted resources */ 267 struct page_counter memory; 268 struct page_counter memsw; 269 struct page_counter kmem; 270 271 /* Normal memory consumption range */ 272 unsigned long low; 273 unsigned long high; 274 275 unsigned long soft_limit; 276 277 /* vmpressure notifications */ 278 struct vmpressure vmpressure; 279 280 /* css_online() has been completed */ 281 int initialized; 282 283 /* 284 * Should the accounting and control be hierarchical, per subtree? 285 */ 286 bool use_hierarchy; 287 288 bool oom_lock; 289 atomic_t under_oom; 290 atomic_t oom_wakeups; 291 292 int swappiness; 293 /* OOM-Killer disable */ 294 int oom_kill_disable; 295 296 /* protect arrays of thresholds */ 297 struct mutex thresholds_lock; 298 299 /* thresholds for memory usage. RCU-protected */ 300 struct mem_cgroup_thresholds thresholds; 301 302 /* thresholds for mem+swap usage. RCU-protected */ 303 struct mem_cgroup_thresholds memsw_thresholds; 304 305 /* For oom notifier event fd */ 306 struct list_head oom_notify; 307 308 /* 309 * Should we move charges of a task when a task is moved into this 310 * mem_cgroup ? And what type of charges should we move ? 311 */ 312 unsigned long move_charge_at_immigrate; 313 /* 314 * set > 0 if pages under this cgroup are moving to other cgroup. 315 */ 316 atomic_t moving_account; 317 /* taken only while moving_account > 0 */ 318 spinlock_t move_lock; 319 struct task_struct *move_lock_task; 320 unsigned long move_lock_flags; 321 /* 322 * percpu counter. 323 */ 324 struct mem_cgroup_stat_cpu __percpu *stat; 325 /* 326 * used when a cpu is offlined or other synchronizations 327 * See mem_cgroup_read_stat(). 328 */ 329 struct mem_cgroup_stat_cpu nocpu_base; 330 spinlock_t pcp_counter_lock; 331 332 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 333 struct cg_proto tcp_mem; 334 #endif 335 #if defined(CONFIG_MEMCG_KMEM) 336 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 337 int kmemcg_id; 338 bool kmem_acct_activated; 339 bool kmem_acct_active; 340 #endif 341 342 int last_scanned_node; 343 #if MAX_NUMNODES > 1 344 nodemask_t scan_nodes; 345 atomic_t numainfo_events; 346 atomic_t numainfo_updating; 347 #endif 348 349 /* List of events which userspace want to receive */ 350 struct list_head event_list; 351 spinlock_t event_list_lock; 352 353 struct mem_cgroup_per_node *nodeinfo[0]; 354 /* WARNING: nodeinfo must be the last member here */ 355 }; 356 357 #ifdef CONFIG_MEMCG_KMEM 358 bool memcg_kmem_is_active(struct mem_cgroup *memcg) 359 { 360 return memcg->kmem_acct_active; 361 } 362 #endif 363 364 /* Stuffs for move charges at task migration. */ 365 /* 366 * Types of charges to be moved. 367 */ 368 #define MOVE_ANON 0x1U 369 #define MOVE_FILE 0x2U 370 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 371 372 /* "mc" and its members are protected by cgroup_mutex */ 373 static struct move_charge_struct { 374 spinlock_t lock; /* for from, to */ 375 struct mem_cgroup *from; 376 struct mem_cgroup *to; 377 unsigned long flags; 378 unsigned long precharge; 379 unsigned long moved_charge; 380 unsigned long moved_swap; 381 struct task_struct *moving_task; /* a task moving charges */ 382 wait_queue_head_t waitq; /* a waitq for other context */ 383 } mc = { 384 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 385 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 386 }; 387 388 /* 389 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 390 * limit reclaim to prevent infinite loops, if they ever occur. 391 */ 392 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 393 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 394 395 enum charge_type { 396 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 397 MEM_CGROUP_CHARGE_TYPE_ANON, 398 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 399 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 400 NR_CHARGE_TYPE, 401 }; 402 403 /* for encoding cft->private value on file */ 404 enum res_type { 405 _MEM, 406 _MEMSWAP, 407 _OOM_TYPE, 408 _KMEM, 409 }; 410 411 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 412 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 413 #define MEMFILE_ATTR(val) ((val) & 0xffff) 414 /* Used for OOM nofiier */ 415 #define OOM_CONTROL (0) 416 417 /* 418 * The memcg_create_mutex will be held whenever a new cgroup is created. 419 * As a consequence, any change that needs to protect against new child cgroups 420 * appearing has to hold it as well. 421 */ 422 static DEFINE_MUTEX(memcg_create_mutex); 423 424 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s) 425 { 426 return s ? container_of(s, struct mem_cgroup, css) : NULL; 427 } 428 429 /* Some nice accessors for the vmpressure. */ 430 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 431 { 432 if (!memcg) 433 memcg = root_mem_cgroup; 434 return &memcg->vmpressure; 435 } 436 437 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 438 { 439 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 440 } 441 442 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 443 { 444 return (memcg == root_mem_cgroup); 445 } 446 447 /* 448 * We restrict the id in the range of [1, 65535], so it can fit into 449 * an unsigned short. 450 */ 451 #define MEM_CGROUP_ID_MAX USHRT_MAX 452 453 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 454 { 455 return memcg->css.id; 456 } 457 458 /* 459 * A helper function to get mem_cgroup from ID. must be called under 460 * rcu_read_lock(). The caller is responsible for calling 461 * css_tryget_online() if the mem_cgroup is used for charging. (dropping 462 * refcnt from swap can be called against removed memcg.) 463 */ 464 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 465 { 466 struct cgroup_subsys_state *css; 467 468 css = css_from_id(id, &memory_cgrp_subsys); 469 return mem_cgroup_from_css(css); 470 } 471 472 /* Writing them here to avoid exposing memcg's inner layout */ 473 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 474 475 void sock_update_memcg(struct sock *sk) 476 { 477 if (mem_cgroup_sockets_enabled) { 478 struct mem_cgroup *memcg; 479 struct cg_proto *cg_proto; 480 481 BUG_ON(!sk->sk_prot->proto_cgroup); 482 483 /* Socket cloning can throw us here with sk_cgrp already 484 * filled. It won't however, necessarily happen from 485 * process context. So the test for root memcg given 486 * the current task's memcg won't help us in this case. 487 * 488 * Respecting the original socket's memcg is a better 489 * decision in this case. 490 */ 491 if (sk->sk_cgrp) { 492 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 493 css_get(&sk->sk_cgrp->memcg->css); 494 return; 495 } 496 497 rcu_read_lock(); 498 memcg = mem_cgroup_from_task(current); 499 cg_proto = sk->sk_prot->proto_cgroup(memcg); 500 if (!mem_cgroup_is_root(memcg) && 501 memcg_proto_active(cg_proto) && 502 css_tryget_online(&memcg->css)) { 503 sk->sk_cgrp = cg_proto; 504 } 505 rcu_read_unlock(); 506 } 507 } 508 EXPORT_SYMBOL(sock_update_memcg); 509 510 void sock_release_memcg(struct sock *sk) 511 { 512 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 513 struct mem_cgroup *memcg; 514 WARN_ON(!sk->sk_cgrp->memcg); 515 memcg = sk->sk_cgrp->memcg; 516 css_put(&sk->sk_cgrp->memcg->css); 517 } 518 } 519 520 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 521 { 522 if (!memcg || mem_cgroup_is_root(memcg)) 523 return NULL; 524 525 return &memcg->tcp_mem; 526 } 527 EXPORT_SYMBOL(tcp_proto_cgroup); 528 529 #endif 530 531 #ifdef CONFIG_MEMCG_KMEM 532 /* 533 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 534 * The main reason for not using cgroup id for this: 535 * this works better in sparse environments, where we have a lot of memcgs, 536 * but only a few kmem-limited. Or also, if we have, for instance, 200 537 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 538 * 200 entry array for that. 539 * 540 * The current size of the caches array is stored in memcg_nr_cache_ids. It 541 * will double each time we have to increase it. 542 */ 543 static DEFINE_IDA(memcg_cache_ida); 544 int memcg_nr_cache_ids; 545 546 /* Protects memcg_nr_cache_ids */ 547 static DECLARE_RWSEM(memcg_cache_ids_sem); 548 549 void memcg_get_cache_ids(void) 550 { 551 down_read(&memcg_cache_ids_sem); 552 } 553 554 void memcg_put_cache_ids(void) 555 { 556 up_read(&memcg_cache_ids_sem); 557 } 558 559 /* 560 * MIN_SIZE is different than 1, because we would like to avoid going through 561 * the alloc/free process all the time. In a small machine, 4 kmem-limited 562 * cgroups is a reasonable guess. In the future, it could be a parameter or 563 * tunable, but that is strictly not necessary. 564 * 565 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 566 * this constant directly from cgroup, but it is understandable that this is 567 * better kept as an internal representation in cgroup.c. In any case, the 568 * cgrp_id space is not getting any smaller, and we don't have to necessarily 569 * increase ours as well if it increases. 570 */ 571 #define MEMCG_CACHES_MIN_SIZE 4 572 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 573 574 /* 575 * A lot of the calls to the cache allocation functions are expected to be 576 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 577 * conditional to this static branch, we'll have to allow modules that does 578 * kmem_cache_alloc and the such to see this symbol as well 579 */ 580 struct static_key memcg_kmem_enabled_key; 581 EXPORT_SYMBOL(memcg_kmem_enabled_key); 582 583 #endif /* CONFIG_MEMCG_KMEM */ 584 585 static struct mem_cgroup_per_zone * 586 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) 587 { 588 int nid = zone_to_nid(zone); 589 int zid = zone_idx(zone); 590 591 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 592 } 593 594 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) 595 { 596 return &memcg->css; 597 } 598 599 static struct mem_cgroup_per_zone * 600 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) 601 { 602 int nid = page_to_nid(page); 603 int zid = page_zonenum(page); 604 605 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 606 } 607 608 static struct mem_cgroup_tree_per_zone * 609 soft_limit_tree_node_zone(int nid, int zid) 610 { 611 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 612 } 613 614 static struct mem_cgroup_tree_per_zone * 615 soft_limit_tree_from_page(struct page *page) 616 { 617 int nid = page_to_nid(page); 618 int zid = page_zonenum(page); 619 620 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 621 } 622 623 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, 624 struct mem_cgroup_tree_per_zone *mctz, 625 unsigned long new_usage_in_excess) 626 { 627 struct rb_node **p = &mctz->rb_root.rb_node; 628 struct rb_node *parent = NULL; 629 struct mem_cgroup_per_zone *mz_node; 630 631 if (mz->on_tree) 632 return; 633 634 mz->usage_in_excess = new_usage_in_excess; 635 if (!mz->usage_in_excess) 636 return; 637 while (*p) { 638 parent = *p; 639 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 640 tree_node); 641 if (mz->usage_in_excess < mz_node->usage_in_excess) 642 p = &(*p)->rb_left; 643 /* 644 * We can't avoid mem cgroups that are over their soft 645 * limit by the same amount 646 */ 647 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 648 p = &(*p)->rb_right; 649 } 650 rb_link_node(&mz->tree_node, parent, p); 651 rb_insert_color(&mz->tree_node, &mctz->rb_root); 652 mz->on_tree = true; 653 } 654 655 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 656 struct mem_cgroup_tree_per_zone *mctz) 657 { 658 if (!mz->on_tree) 659 return; 660 rb_erase(&mz->tree_node, &mctz->rb_root); 661 mz->on_tree = false; 662 } 663 664 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 665 struct mem_cgroup_tree_per_zone *mctz) 666 { 667 unsigned long flags; 668 669 spin_lock_irqsave(&mctz->lock, flags); 670 __mem_cgroup_remove_exceeded(mz, mctz); 671 spin_unlock_irqrestore(&mctz->lock, flags); 672 } 673 674 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 675 { 676 unsigned long nr_pages = page_counter_read(&memcg->memory); 677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 678 unsigned long excess = 0; 679 680 if (nr_pages > soft_limit) 681 excess = nr_pages - soft_limit; 682 683 return excess; 684 } 685 686 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 687 { 688 unsigned long excess; 689 struct mem_cgroup_per_zone *mz; 690 struct mem_cgroup_tree_per_zone *mctz; 691 692 mctz = soft_limit_tree_from_page(page); 693 /* 694 * Necessary to update all ancestors when hierarchy is used. 695 * because their event counter is not touched. 696 */ 697 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 698 mz = mem_cgroup_page_zoneinfo(memcg, page); 699 excess = soft_limit_excess(memcg); 700 /* 701 * We have to update the tree if mz is on RB-tree or 702 * mem is over its softlimit. 703 */ 704 if (excess || mz->on_tree) { 705 unsigned long flags; 706 707 spin_lock_irqsave(&mctz->lock, flags); 708 /* if on-tree, remove it */ 709 if (mz->on_tree) 710 __mem_cgroup_remove_exceeded(mz, mctz); 711 /* 712 * Insert again. mz->usage_in_excess will be updated. 713 * If excess is 0, no tree ops. 714 */ 715 __mem_cgroup_insert_exceeded(mz, mctz, excess); 716 spin_unlock_irqrestore(&mctz->lock, flags); 717 } 718 } 719 } 720 721 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 722 { 723 struct mem_cgroup_tree_per_zone *mctz; 724 struct mem_cgroup_per_zone *mz; 725 int nid, zid; 726 727 for_each_node(nid) { 728 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 729 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 730 mctz = soft_limit_tree_node_zone(nid, zid); 731 mem_cgroup_remove_exceeded(mz, mctz); 732 } 733 } 734 } 735 736 static struct mem_cgroup_per_zone * 737 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 738 { 739 struct rb_node *rightmost = NULL; 740 struct mem_cgroup_per_zone *mz; 741 742 retry: 743 mz = NULL; 744 rightmost = rb_last(&mctz->rb_root); 745 if (!rightmost) 746 goto done; /* Nothing to reclaim from */ 747 748 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 749 /* 750 * Remove the node now but someone else can add it back, 751 * we will to add it back at the end of reclaim to its correct 752 * position in the tree. 753 */ 754 __mem_cgroup_remove_exceeded(mz, mctz); 755 if (!soft_limit_excess(mz->memcg) || 756 !css_tryget_online(&mz->memcg->css)) 757 goto retry; 758 done: 759 return mz; 760 } 761 762 static struct mem_cgroup_per_zone * 763 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 764 { 765 struct mem_cgroup_per_zone *mz; 766 767 spin_lock_irq(&mctz->lock); 768 mz = __mem_cgroup_largest_soft_limit_node(mctz); 769 spin_unlock_irq(&mctz->lock); 770 return mz; 771 } 772 773 /* 774 * Implementation Note: reading percpu statistics for memcg. 775 * 776 * Both of vmstat[] and percpu_counter has threshold and do periodic 777 * synchronization to implement "quick" read. There are trade-off between 778 * reading cost and precision of value. Then, we may have a chance to implement 779 * a periodic synchronizion of counter in memcg's counter. 780 * 781 * But this _read() function is used for user interface now. The user accounts 782 * memory usage by memory cgroup and he _always_ requires exact value because 783 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 784 * have to visit all online cpus and make sum. So, for now, unnecessary 785 * synchronization is not implemented. (just implemented for cpu hotplug) 786 * 787 * If there are kernel internal actions which can make use of some not-exact 788 * value, and reading all cpu value can be performance bottleneck in some 789 * common workload, threashold and synchonization as vmstat[] should be 790 * implemented. 791 */ 792 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 793 enum mem_cgroup_stat_index idx) 794 { 795 long val = 0; 796 int cpu; 797 798 get_online_cpus(); 799 for_each_online_cpu(cpu) 800 val += per_cpu(memcg->stat->count[idx], cpu); 801 #ifdef CONFIG_HOTPLUG_CPU 802 spin_lock(&memcg->pcp_counter_lock); 803 val += memcg->nocpu_base.count[idx]; 804 spin_unlock(&memcg->pcp_counter_lock); 805 #endif 806 put_online_cpus(); 807 return val; 808 } 809 810 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 811 enum mem_cgroup_events_index idx) 812 { 813 unsigned long val = 0; 814 int cpu; 815 816 get_online_cpus(); 817 for_each_online_cpu(cpu) 818 val += per_cpu(memcg->stat->events[idx], cpu); 819 #ifdef CONFIG_HOTPLUG_CPU 820 spin_lock(&memcg->pcp_counter_lock); 821 val += memcg->nocpu_base.events[idx]; 822 spin_unlock(&memcg->pcp_counter_lock); 823 #endif 824 put_online_cpus(); 825 return val; 826 } 827 828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 829 struct page *page, 830 int nr_pages) 831 { 832 /* 833 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 834 * counted as CACHE even if it's on ANON LRU. 835 */ 836 if (PageAnon(page)) 837 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 838 nr_pages); 839 else 840 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 841 nr_pages); 842 843 if (PageTransHuge(page)) 844 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 845 nr_pages); 846 847 /* pagein of a big page is an event. So, ignore page size */ 848 if (nr_pages > 0) 849 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 850 else { 851 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 852 nr_pages = -nr_pages; /* for event */ 853 } 854 855 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 856 } 857 858 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 859 { 860 struct mem_cgroup_per_zone *mz; 861 862 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 863 return mz->lru_size[lru]; 864 } 865 866 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 867 int nid, 868 unsigned int lru_mask) 869 { 870 unsigned long nr = 0; 871 int zid; 872 873 VM_BUG_ON((unsigned)nid >= nr_node_ids); 874 875 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 876 struct mem_cgroup_per_zone *mz; 877 enum lru_list lru; 878 879 for_each_lru(lru) { 880 if (!(BIT(lru) & lru_mask)) 881 continue; 882 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 883 nr += mz->lru_size[lru]; 884 } 885 } 886 return nr; 887 } 888 889 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 890 unsigned int lru_mask) 891 { 892 unsigned long nr = 0; 893 int nid; 894 895 for_each_node_state(nid, N_MEMORY) 896 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 897 return nr; 898 } 899 900 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 901 enum mem_cgroup_events_target target) 902 { 903 unsigned long val, next; 904 905 val = __this_cpu_read(memcg->stat->nr_page_events); 906 next = __this_cpu_read(memcg->stat->targets[target]); 907 /* from time_after() in jiffies.h */ 908 if ((long)next - (long)val < 0) { 909 switch (target) { 910 case MEM_CGROUP_TARGET_THRESH: 911 next = val + THRESHOLDS_EVENTS_TARGET; 912 break; 913 case MEM_CGROUP_TARGET_SOFTLIMIT: 914 next = val + SOFTLIMIT_EVENTS_TARGET; 915 break; 916 case MEM_CGROUP_TARGET_NUMAINFO: 917 next = val + NUMAINFO_EVENTS_TARGET; 918 break; 919 default: 920 break; 921 } 922 __this_cpu_write(memcg->stat->targets[target], next); 923 return true; 924 } 925 return false; 926 } 927 928 /* 929 * Check events in order. 930 * 931 */ 932 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 933 { 934 /* threshold event is triggered in finer grain than soft limit */ 935 if (unlikely(mem_cgroup_event_ratelimit(memcg, 936 MEM_CGROUP_TARGET_THRESH))) { 937 bool do_softlimit; 938 bool do_numainfo __maybe_unused; 939 940 do_softlimit = mem_cgroup_event_ratelimit(memcg, 941 MEM_CGROUP_TARGET_SOFTLIMIT); 942 #if MAX_NUMNODES > 1 943 do_numainfo = mem_cgroup_event_ratelimit(memcg, 944 MEM_CGROUP_TARGET_NUMAINFO); 945 #endif 946 mem_cgroup_threshold(memcg); 947 if (unlikely(do_softlimit)) 948 mem_cgroup_update_tree(memcg, page); 949 #if MAX_NUMNODES > 1 950 if (unlikely(do_numainfo)) 951 atomic_inc(&memcg->numainfo_events); 952 #endif 953 } 954 } 955 956 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 957 { 958 /* 959 * mm_update_next_owner() may clear mm->owner to NULL 960 * if it races with swapoff, page migration, etc. 961 * So this can be called with p == NULL. 962 */ 963 if (unlikely(!p)) 964 return NULL; 965 966 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 967 } 968 969 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 970 { 971 struct mem_cgroup *memcg = NULL; 972 973 rcu_read_lock(); 974 do { 975 /* 976 * Page cache insertions can happen withou an 977 * actual mm context, e.g. during disk probing 978 * on boot, loopback IO, acct() writes etc. 979 */ 980 if (unlikely(!mm)) 981 memcg = root_mem_cgroup; 982 else { 983 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 984 if (unlikely(!memcg)) 985 memcg = root_mem_cgroup; 986 } 987 } while (!css_tryget_online(&memcg->css)); 988 rcu_read_unlock(); 989 return memcg; 990 } 991 992 /** 993 * mem_cgroup_iter - iterate over memory cgroup hierarchy 994 * @root: hierarchy root 995 * @prev: previously returned memcg, NULL on first invocation 996 * @reclaim: cookie for shared reclaim walks, NULL for full walks 997 * 998 * Returns references to children of the hierarchy below @root, or 999 * @root itself, or %NULL after a full round-trip. 1000 * 1001 * Caller must pass the return value in @prev on subsequent 1002 * invocations for reference counting, or use mem_cgroup_iter_break() 1003 * to cancel a hierarchy walk before the round-trip is complete. 1004 * 1005 * Reclaimers can specify a zone and a priority level in @reclaim to 1006 * divide up the memcgs in the hierarchy among all concurrent 1007 * reclaimers operating on the same zone and priority. 1008 */ 1009 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1010 struct mem_cgroup *prev, 1011 struct mem_cgroup_reclaim_cookie *reclaim) 1012 { 1013 struct reclaim_iter *uninitialized_var(iter); 1014 struct cgroup_subsys_state *css = NULL; 1015 struct mem_cgroup *memcg = NULL; 1016 struct mem_cgroup *pos = NULL; 1017 1018 if (mem_cgroup_disabled()) 1019 return NULL; 1020 1021 if (!root) 1022 root = root_mem_cgroup; 1023 1024 if (prev && !reclaim) 1025 pos = prev; 1026 1027 if (!root->use_hierarchy && root != root_mem_cgroup) { 1028 if (prev) 1029 goto out; 1030 return root; 1031 } 1032 1033 rcu_read_lock(); 1034 1035 if (reclaim) { 1036 struct mem_cgroup_per_zone *mz; 1037 1038 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); 1039 iter = &mz->iter[reclaim->priority]; 1040 1041 if (prev && reclaim->generation != iter->generation) 1042 goto out_unlock; 1043 1044 do { 1045 pos = READ_ONCE(iter->position); 1046 /* 1047 * A racing update may change the position and 1048 * put the last reference, hence css_tryget(), 1049 * or retry to see the updated position. 1050 */ 1051 } while (pos && !css_tryget(&pos->css)); 1052 } 1053 1054 if (pos) 1055 css = &pos->css; 1056 1057 for (;;) { 1058 css = css_next_descendant_pre(css, &root->css); 1059 if (!css) { 1060 /* 1061 * Reclaimers share the hierarchy walk, and a 1062 * new one might jump in right at the end of 1063 * the hierarchy - make sure they see at least 1064 * one group and restart from the beginning. 1065 */ 1066 if (!prev) 1067 continue; 1068 break; 1069 } 1070 1071 /* 1072 * Verify the css and acquire a reference. The root 1073 * is provided by the caller, so we know it's alive 1074 * and kicking, and don't take an extra reference. 1075 */ 1076 memcg = mem_cgroup_from_css(css); 1077 1078 if (css == &root->css) 1079 break; 1080 1081 if (css_tryget(css)) { 1082 /* 1083 * Make sure the memcg is initialized: 1084 * mem_cgroup_css_online() orders the the 1085 * initialization against setting the flag. 1086 */ 1087 if (smp_load_acquire(&memcg->initialized)) 1088 break; 1089 1090 css_put(css); 1091 } 1092 1093 memcg = NULL; 1094 } 1095 1096 if (reclaim) { 1097 if (cmpxchg(&iter->position, pos, memcg) == pos) { 1098 if (memcg) 1099 css_get(&memcg->css); 1100 if (pos) 1101 css_put(&pos->css); 1102 } 1103 1104 /* 1105 * pairs with css_tryget when dereferencing iter->position 1106 * above. 1107 */ 1108 if (pos) 1109 css_put(&pos->css); 1110 1111 if (!memcg) 1112 iter->generation++; 1113 else if (!prev) 1114 reclaim->generation = iter->generation; 1115 } 1116 1117 out_unlock: 1118 rcu_read_unlock(); 1119 out: 1120 if (prev && prev != root) 1121 css_put(&prev->css); 1122 1123 return memcg; 1124 } 1125 1126 /** 1127 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1128 * @root: hierarchy root 1129 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1130 */ 1131 void mem_cgroup_iter_break(struct mem_cgroup *root, 1132 struct mem_cgroup *prev) 1133 { 1134 if (!root) 1135 root = root_mem_cgroup; 1136 if (prev && prev != root) 1137 css_put(&prev->css); 1138 } 1139 1140 /* 1141 * Iteration constructs for visiting all cgroups (under a tree). If 1142 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1143 * be used for reference counting. 1144 */ 1145 #define for_each_mem_cgroup_tree(iter, root) \ 1146 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1147 iter != NULL; \ 1148 iter = mem_cgroup_iter(root, iter, NULL)) 1149 1150 #define for_each_mem_cgroup(iter) \ 1151 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1152 iter != NULL; \ 1153 iter = mem_cgroup_iter(NULL, iter, NULL)) 1154 1155 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 1156 { 1157 struct mem_cgroup *memcg; 1158 1159 rcu_read_lock(); 1160 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1161 if (unlikely(!memcg)) 1162 goto out; 1163 1164 switch (idx) { 1165 case PGFAULT: 1166 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); 1167 break; 1168 case PGMAJFAULT: 1169 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); 1170 break; 1171 default: 1172 BUG(); 1173 } 1174 out: 1175 rcu_read_unlock(); 1176 } 1177 EXPORT_SYMBOL(__mem_cgroup_count_vm_event); 1178 1179 /** 1180 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1181 * @zone: zone of the wanted lruvec 1182 * @memcg: memcg of the wanted lruvec 1183 * 1184 * Returns the lru list vector holding pages for the given @zone and 1185 * @mem. This can be the global zone lruvec, if the memory controller 1186 * is disabled. 1187 */ 1188 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1189 struct mem_cgroup *memcg) 1190 { 1191 struct mem_cgroup_per_zone *mz; 1192 struct lruvec *lruvec; 1193 1194 if (mem_cgroup_disabled()) { 1195 lruvec = &zone->lruvec; 1196 goto out; 1197 } 1198 1199 mz = mem_cgroup_zone_zoneinfo(memcg, zone); 1200 lruvec = &mz->lruvec; 1201 out: 1202 /* 1203 * Since a node can be onlined after the mem_cgroup was created, 1204 * we have to be prepared to initialize lruvec->zone here; 1205 * and if offlined then reonlined, we need to reinitialize it. 1206 */ 1207 if (unlikely(lruvec->zone != zone)) 1208 lruvec->zone = zone; 1209 return lruvec; 1210 } 1211 1212 /** 1213 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1214 * @page: the page 1215 * @zone: zone of the page 1216 * 1217 * This function is only safe when following the LRU page isolation 1218 * and putback protocol: the LRU lock must be held, and the page must 1219 * either be PageLRU() or the caller must have isolated/allocated it. 1220 */ 1221 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1222 { 1223 struct mem_cgroup_per_zone *mz; 1224 struct mem_cgroup *memcg; 1225 struct lruvec *lruvec; 1226 1227 if (mem_cgroup_disabled()) { 1228 lruvec = &zone->lruvec; 1229 goto out; 1230 } 1231 1232 memcg = page->mem_cgroup; 1233 /* 1234 * Swapcache readahead pages are added to the LRU - and 1235 * possibly migrated - before they are charged. 1236 */ 1237 if (!memcg) 1238 memcg = root_mem_cgroup; 1239 1240 mz = mem_cgroup_page_zoneinfo(memcg, page); 1241 lruvec = &mz->lruvec; 1242 out: 1243 /* 1244 * Since a node can be onlined after the mem_cgroup was created, 1245 * we have to be prepared to initialize lruvec->zone here; 1246 * and if offlined then reonlined, we need to reinitialize it. 1247 */ 1248 if (unlikely(lruvec->zone != zone)) 1249 lruvec->zone = zone; 1250 return lruvec; 1251 } 1252 1253 /** 1254 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1255 * @lruvec: mem_cgroup per zone lru vector 1256 * @lru: index of lru list the page is sitting on 1257 * @nr_pages: positive when adding or negative when removing 1258 * 1259 * This function must be called when a page is added to or removed from an 1260 * lru list. 1261 */ 1262 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1263 int nr_pages) 1264 { 1265 struct mem_cgroup_per_zone *mz; 1266 unsigned long *lru_size; 1267 1268 if (mem_cgroup_disabled()) 1269 return; 1270 1271 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1272 lru_size = mz->lru_size + lru; 1273 *lru_size += nr_pages; 1274 VM_BUG_ON((long)(*lru_size) < 0); 1275 } 1276 1277 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) 1278 { 1279 if (root == memcg) 1280 return true; 1281 if (!root->use_hierarchy) 1282 return false; 1283 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 1284 } 1285 1286 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1287 { 1288 struct mem_cgroup *task_memcg; 1289 struct task_struct *p; 1290 bool ret; 1291 1292 p = find_lock_task_mm(task); 1293 if (p) { 1294 task_memcg = get_mem_cgroup_from_mm(p->mm); 1295 task_unlock(p); 1296 } else { 1297 /* 1298 * All threads may have already detached their mm's, but the oom 1299 * killer still needs to detect if they have already been oom 1300 * killed to prevent needlessly killing additional tasks. 1301 */ 1302 rcu_read_lock(); 1303 task_memcg = mem_cgroup_from_task(task); 1304 css_get(&task_memcg->css); 1305 rcu_read_unlock(); 1306 } 1307 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1308 css_put(&task_memcg->css); 1309 return ret; 1310 } 1311 1312 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 1313 { 1314 unsigned long inactive_ratio; 1315 unsigned long inactive; 1316 unsigned long active; 1317 unsigned long gb; 1318 1319 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON); 1320 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON); 1321 1322 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1323 if (gb) 1324 inactive_ratio = int_sqrt(10 * gb); 1325 else 1326 inactive_ratio = 1; 1327 1328 return inactive * inactive_ratio < active; 1329 } 1330 1331 bool mem_cgroup_lruvec_online(struct lruvec *lruvec) 1332 { 1333 struct mem_cgroup_per_zone *mz; 1334 struct mem_cgroup *memcg; 1335 1336 if (mem_cgroup_disabled()) 1337 return true; 1338 1339 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1340 memcg = mz->memcg; 1341 1342 return !!(memcg->css.flags & CSS_ONLINE); 1343 } 1344 1345 #define mem_cgroup_from_counter(counter, member) \ 1346 container_of(counter, struct mem_cgroup, member) 1347 1348 /** 1349 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1350 * @memcg: the memory cgroup 1351 * 1352 * Returns the maximum amount of memory @mem can be charged with, in 1353 * pages. 1354 */ 1355 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1356 { 1357 unsigned long margin = 0; 1358 unsigned long count; 1359 unsigned long limit; 1360 1361 count = page_counter_read(&memcg->memory); 1362 limit = READ_ONCE(memcg->memory.limit); 1363 if (count < limit) 1364 margin = limit - count; 1365 1366 if (do_swap_account) { 1367 count = page_counter_read(&memcg->memsw); 1368 limit = READ_ONCE(memcg->memsw.limit); 1369 if (count <= limit) 1370 margin = min(margin, limit - count); 1371 } 1372 1373 return margin; 1374 } 1375 1376 int mem_cgroup_swappiness(struct mem_cgroup *memcg) 1377 { 1378 /* root ? */ 1379 if (mem_cgroup_disabled() || !memcg->css.parent) 1380 return vm_swappiness; 1381 1382 return memcg->swappiness; 1383 } 1384 1385 /* 1386 * A routine for checking "mem" is under move_account() or not. 1387 * 1388 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1389 * moving cgroups. This is for waiting at high-memory pressure 1390 * caused by "move". 1391 */ 1392 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1393 { 1394 struct mem_cgroup *from; 1395 struct mem_cgroup *to; 1396 bool ret = false; 1397 /* 1398 * Unlike task_move routines, we access mc.to, mc.from not under 1399 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1400 */ 1401 spin_lock(&mc.lock); 1402 from = mc.from; 1403 to = mc.to; 1404 if (!from) 1405 goto unlock; 1406 1407 ret = mem_cgroup_is_descendant(from, memcg) || 1408 mem_cgroup_is_descendant(to, memcg); 1409 unlock: 1410 spin_unlock(&mc.lock); 1411 return ret; 1412 } 1413 1414 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1415 { 1416 if (mc.moving_task && current != mc.moving_task) { 1417 if (mem_cgroup_under_move(memcg)) { 1418 DEFINE_WAIT(wait); 1419 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1420 /* moving charge context might have finished. */ 1421 if (mc.moving_task) 1422 schedule(); 1423 finish_wait(&mc.waitq, &wait); 1424 return true; 1425 } 1426 } 1427 return false; 1428 } 1429 1430 #define K(x) ((x) << (PAGE_SHIFT-10)) 1431 /** 1432 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1433 * @memcg: The memory cgroup that went over limit 1434 * @p: Task that is going to be killed 1435 * 1436 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1437 * enabled 1438 */ 1439 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1440 { 1441 /* oom_info_lock ensures that parallel ooms do not interleave */ 1442 static DEFINE_MUTEX(oom_info_lock); 1443 struct mem_cgroup *iter; 1444 unsigned int i; 1445 1446 mutex_lock(&oom_info_lock); 1447 rcu_read_lock(); 1448 1449 if (p) { 1450 pr_info("Task in "); 1451 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1452 pr_cont(" killed as a result of limit of "); 1453 } else { 1454 pr_info("Memory limit reached of cgroup "); 1455 } 1456 1457 pr_cont_cgroup_path(memcg->css.cgroup); 1458 pr_cont("\n"); 1459 1460 rcu_read_unlock(); 1461 1462 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1463 K((u64)page_counter_read(&memcg->memory)), 1464 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1465 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1466 K((u64)page_counter_read(&memcg->memsw)), 1467 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1468 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1469 K((u64)page_counter_read(&memcg->kmem)), 1470 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1471 1472 for_each_mem_cgroup_tree(iter, memcg) { 1473 pr_info("Memory cgroup stats for "); 1474 pr_cont_cgroup_path(iter->css.cgroup); 1475 pr_cont(":"); 1476 1477 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1478 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1479 continue; 1480 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1481 K(mem_cgroup_read_stat(iter, i))); 1482 } 1483 1484 for (i = 0; i < NR_LRU_LISTS; i++) 1485 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1486 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1487 1488 pr_cont("\n"); 1489 } 1490 mutex_unlock(&oom_info_lock); 1491 } 1492 1493 /* 1494 * This function returns the number of memcg under hierarchy tree. Returns 1495 * 1(self count) if no children. 1496 */ 1497 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1498 { 1499 int num = 0; 1500 struct mem_cgroup *iter; 1501 1502 for_each_mem_cgroup_tree(iter, memcg) 1503 num++; 1504 return num; 1505 } 1506 1507 /* 1508 * Return the memory (and swap, if configured) limit for a memcg. 1509 */ 1510 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1511 { 1512 unsigned long limit; 1513 1514 limit = memcg->memory.limit; 1515 if (mem_cgroup_swappiness(memcg)) { 1516 unsigned long memsw_limit; 1517 1518 memsw_limit = memcg->memsw.limit; 1519 limit = min(limit + total_swap_pages, memsw_limit); 1520 } 1521 return limit; 1522 } 1523 1524 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1525 int order) 1526 { 1527 struct mem_cgroup *iter; 1528 unsigned long chosen_points = 0; 1529 unsigned long totalpages; 1530 unsigned int points = 0; 1531 struct task_struct *chosen = NULL; 1532 1533 /* 1534 * If current has a pending SIGKILL or is exiting, then automatically 1535 * select it. The goal is to allow it to allocate so that it may 1536 * quickly exit and free its memory. 1537 */ 1538 if (fatal_signal_pending(current) || task_will_free_mem(current)) { 1539 mark_tsk_oom_victim(current); 1540 return; 1541 } 1542 1543 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); 1544 totalpages = mem_cgroup_get_limit(memcg) ? : 1; 1545 for_each_mem_cgroup_tree(iter, memcg) { 1546 struct css_task_iter it; 1547 struct task_struct *task; 1548 1549 css_task_iter_start(&iter->css, &it); 1550 while ((task = css_task_iter_next(&it))) { 1551 switch (oom_scan_process_thread(task, totalpages, NULL, 1552 false)) { 1553 case OOM_SCAN_SELECT: 1554 if (chosen) 1555 put_task_struct(chosen); 1556 chosen = task; 1557 chosen_points = ULONG_MAX; 1558 get_task_struct(chosen); 1559 /* fall through */ 1560 case OOM_SCAN_CONTINUE: 1561 continue; 1562 case OOM_SCAN_ABORT: 1563 css_task_iter_end(&it); 1564 mem_cgroup_iter_break(memcg, iter); 1565 if (chosen) 1566 put_task_struct(chosen); 1567 return; 1568 case OOM_SCAN_OK: 1569 break; 1570 }; 1571 points = oom_badness(task, memcg, NULL, totalpages); 1572 if (!points || points < chosen_points) 1573 continue; 1574 /* Prefer thread group leaders for display purposes */ 1575 if (points == chosen_points && 1576 thread_group_leader(chosen)) 1577 continue; 1578 1579 if (chosen) 1580 put_task_struct(chosen); 1581 chosen = task; 1582 chosen_points = points; 1583 get_task_struct(chosen); 1584 } 1585 css_task_iter_end(&it); 1586 } 1587 1588 if (!chosen) 1589 return; 1590 points = chosen_points * 1000 / totalpages; 1591 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, 1592 NULL, "Memory cgroup out of memory"); 1593 } 1594 1595 #if MAX_NUMNODES > 1 1596 1597 /** 1598 * test_mem_cgroup_node_reclaimable 1599 * @memcg: the target memcg 1600 * @nid: the node ID to be checked. 1601 * @noswap : specify true here if the user wants flle only information. 1602 * 1603 * This function returns whether the specified memcg contains any 1604 * reclaimable pages on a node. Returns true if there are any reclaimable 1605 * pages in the node. 1606 */ 1607 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1608 int nid, bool noswap) 1609 { 1610 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1611 return true; 1612 if (noswap || !total_swap_pages) 1613 return false; 1614 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1615 return true; 1616 return false; 1617 1618 } 1619 1620 /* 1621 * Always updating the nodemask is not very good - even if we have an empty 1622 * list or the wrong list here, we can start from some node and traverse all 1623 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1624 * 1625 */ 1626 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1627 { 1628 int nid; 1629 /* 1630 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1631 * pagein/pageout changes since the last update. 1632 */ 1633 if (!atomic_read(&memcg->numainfo_events)) 1634 return; 1635 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1636 return; 1637 1638 /* make a nodemask where this memcg uses memory from */ 1639 memcg->scan_nodes = node_states[N_MEMORY]; 1640 1641 for_each_node_mask(nid, node_states[N_MEMORY]) { 1642 1643 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1644 node_clear(nid, memcg->scan_nodes); 1645 } 1646 1647 atomic_set(&memcg->numainfo_events, 0); 1648 atomic_set(&memcg->numainfo_updating, 0); 1649 } 1650 1651 /* 1652 * Selecting a node where we start reclaim from. Because what we need is just 1653 * reducing usage counter, start from anywhere is O,K. Considering 1654 * memory reclaim from current node, there are pros. and cons. 1655 * 1656 * Freeing memory from current node means freeing memory from a node which 1657 * we'll use or we've used. So, it may make LRU bad. And if several threads 1658 * hit limits, it will see a contention on a node. But freeing from remote 1659 * node means more costs for memory reclaim because of memory latency. 1660 * 1661 * Now, we use round-robin. Better algorithm is welcomed. 1662 */ 1663 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1664 { 1665 int node; 1666 1667 mem_cgroup_may_update_nodemask(memcg); 1668 node = memcg->last_scanned_node; 1669 1670 node = next_node(node, memcg->scan_nodes); 1671 if (node == MAX_NUMNODES) 1672 node = first_node(memcg->scan_nodes); 1673 /* 1674 * We call this when we hit limit, not when pages are added to LRU. 1675 * No LRU may hold pages because all pages are UNEVICTABLE or 1676 * memcg is too small and all pages are not on LRU. In that case, 1677 * we use curret node. 1678 */ 1679 if (unlikely(node == MAX_NUMNODES)) 1680 node = numa_node_id(); 1681 1682 memcg->last_scanned_node = node; 1683 return node; 1684 } 1685 #else 1686 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1687 { 1688 return 0; 1689 } 1690 #endif 1691 1692 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1693 struct zone *zone, 1694 gfp_t gfp_mask, 1695 unsigned long *total_scanned) 1696 { 1697 struct mem_cgroup *victim = NULL; 1698 int total = 0; 1699 int loop = 0; 1700 unsigned long excess; 1701 unsigned long nr_scanned; 1702 struct mem_cgroup_reclaim_cookie reclaim = { 1703 .zone = zone, 1704 .priority = 0, 1705 }; 1706 1707 excess = soft_limit_excess(root_memcg); 1708 1709 while (1) { 1710 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1711 if (!victim) { 1712 loop++; 1713 if (loop >= 2) { 1714 /* 1715 * If we have not been able to reclaim 1716 * anything, it might because there are 1717 * no reclaimable pages under this hierarchy 1718 */ 1719 if (!total) 1720 break; 1721 /* 1722 * We want to do more targeted reclaim. 1723 * excess >> 2 is not to excessive so as to 1724 * reclaim too much, nor too less that we keep 1725 * coming back to reclaim from this cgroup 1726 */ 1727 if (total >= (excess >> 2) || 1728 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1729 break; 1730 } 1731 continue; 1732 } 1733 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1734 zone, &nr_scanned); 1735 *total_scanned += nr_scanned; 1736 if (!soft_limit_excess(root_memcg)) 1737 break; 1738 } 1739 mem_cgroup_iter_break(root_memcg, victim); 1740 return total; 1741 } 1742 1743 #ifdef CONFIG_LOCKDEP 1744 static struct lockdep_map memcg_oom_lock_dep_map = { 1745 .name = "memcg_oom_lock", 1746 }; 1747 #endif 1748 1749 static DEFINE_SPINLOCK(memcg_oom_lock); 1750 1751 /* 1752 * Check OOM-Killer is already running under our hierarchy. 1753 * If someone is running, return false. 1754 */ 1755 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1756 { 1757 struct mem_cgroup *iter, *failed = NULL; 1758 1759 spin_lock(&memcg_oom_lock); 1760 1761 for_each_mem_cgroup_tree(iter, memcg) { 1762 if (iter->oom_lock) { 1763 /* 1764 * this subtree of our hierarchy is already locked 1765 * so we cannot give a lock. 1766 */ 1767 failed = iter; 1768 mem_cgroup_iter_break(memcg, iter); 1769 break; 1770 } else 1771 iter->oom_lock = true; 1772 } 1773 1774 if (failed) { 1775 /* 1776 * OK, we failed to lock the whole subtree so we have 1777 * to clean up what we set up to the failing subtree 1778 */ 1779 for_each_mem_cgroup_tree(iter, memcg) { 1780 if (iter == failed) { 1781 mem_cgroup_iter_break(memcg, iter); 1782 break; 1783 } 1784 iter->oom_lock = false; 1785 } 1786 } else 1787 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1788 1789 spin_unlock(&memcg_oom_lock); 1790 1791 return !failed; 1792 } 1793 1794 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1795 { 1796 struct mem_cgroup *iter; 1797 1798 spin_lock(&memcg_oom_lock); 1799 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1800 for_each_mem_cgroup_tree(iter, memcg) 1801 iter->oom_lock = false; 1802 spin_unlock(&memcg_oom_lock); 1803 } 1804 1805 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1806 { 1807 struct mem_cgroup *iter; 1808 1809 for_each_mem_cgroup_tree(iter, memcg) 1810 atomic_inc(&iter->under_oom); 1811 } 1812 1813 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1814 { 1815 struct mem_cgroup *iter; 1816 1817 /* 1818 * When a new child is created while the hierarchy is under oom, 1819 * mem_cgroup_oom_lock() may not be called. We have to use 1820 * atomic_add_unless() here. 1821 */ 1822 for_each_mem_cgroup_tree(iter, memcg) 1823 atomic_add_unless(&iter->under_oom, -1, 0); 1824 } 1825 1826 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1827 1828 struct oom_wait_info { 1829 struct mem_cgroup *memcg; 1830 wait_queue_t wait; 1831 }; 1832 1833 static int memcg_oom_wake_function(wait_queue_t *wait, 1834 unsigned mode, int sync, void *arg) 1835 { 1836 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1837 struct mem_cgroup *oom_wait_memcg; 1838 struct oom_wait_info *oom_wait_info; 1839 1840 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1841 oom_wait_memcg = oom_wait_info->memcg; 1842 1843 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1844 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1845 return 0; 1846 return autoremove_wake_function(wait, mode, sync, arg); 1847 } 1848 1849 static void memcg_wakeup_oom(struct mem_cgroup *memcg) 1850 { 1851 atomic_inc(&memcg->oom_wakeups); 1852 /* for filtering, pass "memcg" as argument. */ 1853 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1854 } 1855 1856 static void memcg_oom_recover(struct mem_cgroup *memcg) 1857 { 1858 if (memcg && atomic_read(&memcg->under_oom)) 1859 memcg_wakeup_oom(memcg); 1860 } 1861 1862 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1863 { 1864 if (!current->memcg_oom.may_oom) 1865 return; 1866 /* 1867 * We are in the middle of the charge context here, so we 1868 * don't want to block when potentially sitting on a callstack 1869 * that holds all kinds of filesystem and mm locks. 1870 * 1871 * Also, the caller may handle a failed allocation gracefully 1872 * (like optional page cache readahead) and so an OOM killer 1873 * invocation might not even be necessary. 1874 * 1875 * That's why we don't do anything here except remember the 1876 * OOM context and then deal with it at the end of the page 1877 * fault when the stack is unwound, the locks are released, 1878 * and when we know whether the fault was overall successful. 1879 */ 1880 css_get(&memcg->css); 1881 current->memcg_oom.memcg = memcg; 1882 current->memcg_oom.gfp_mask = mask; 1883 current->memcg_oom.order = order; 1884 } 1885 1886 /** 1887 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1888 * @handle: actually kill/wait or just clean up the OOM state 1889 * 1890 * This has to be called at the end of a page fault if the memcg OOM 1891 * handler was enabled. 1892 * 1893 * Memcg supports userspace OOM handling where failed allocations must 1894 * sleep on a waitqueue until the userspace task resolves the 1895 * situation. Sleeping directly in the charge context with all kinds 1896 * of locks held is not a good idea, instead we remember an OOM state 1897 * in the task and mem_cgroup_oom_synchronize() has to be called at 1898 * the end of the page fault to complete the OOM handling. 1899 * 1900 * Returns %true if an ongoing memcg OOM situation was detected and 1901 * completed, %false otherwise. 1902 */ 1903 bool mem_cgroup_oom_synchronize(bool handle) 1904 { 1905 struct mem_cgroup *memcg = current->memcg_oom.memcg; 1906 struct oom_wait_info owait; 1907 bool locked; 1908 1909 /* OOM is global, do not handle */ 1910 if (!memcg) 1911 return false; 1912 1913 if (!handle || oom_killer_disabled) 1914 goto cleanup; 1915 1916 owait.memcg = memcg; 1917 owait.wait.flags = 0; 1918 owait.wait.func = memcg_oom_wake_function; 1919 owait.wait.private = current; 1920 INIT_LIST_HEAD(&owait.wait.task_list); 1921 1922 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1923 mem_cgroup_mark_under_oom(memcg); 1924 1925 locked = mem_cgroup_oom_trylock(memcg); 1926 1927 if (locked) 1928 mem_cgroup_oom_notify(memcg); 1929 1930 if (locked && !memcg->oom_kill_disable) { 1931 mem_cgroup_unmark_under_oom(memcg); 1932 finish_wait(&memcg_oom_waitq, &owait.wait); 1933 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, 1934 current->memcg_oom.order); 1935 } else { 1936 schedule(); 1937 mem_cgroup_unmark_under_oom(memcg); 1938 finish_wait(&memcg_oom_waitq, &owait.wait); 1939 } 1940 1941 if (locked) { 1942 mem_cgroup_oom_unlock(memcg); 1943 /* 1944 * There is no guarantee that an OOM-lock contender 1945 * sees the wakeups triggered by the OOM kill 1946 * uncharges. Wake any sleepers explicitely. 1947 */ 1948 memcg_oom_recover(memcg); 1949 } 1950 cleanup: 1951 current->memcg_oom.memcg = NULL; 1952 css_put(&memcg->css); 1953 return true; 1954 } 1955 1956 /** 1957 * mem_cgroup_begin_page_stat - begin a page state statistics transaction 1958 * @page: page that is going to change accounted state 1959 * 1960 * This function must mark the beginning of an accounted page state 1961 * change to prevent double accounting when the page is concurrently 1962 * being moved to another memcg: 1963 * 1964 * memcg = mem_cgroup_begin_page_stat(page); 1965 * if (TestClearPageState(page)) 1966 * mem_cgroup_update_page_stat(memcg, state, -1); 1967 * mem_cgroup_end_page_stat(memcg); 1968 */ 1969 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 1970 { 1971 struct mem_cgroup *memcg; 1972 unsigned long flags; 1973 1974 /* 1975 * The RCU lock is held throughout the transaction. The fast 1976 * path can get away without acquiring the memcg->move_lock 1977 * because page moving starts with an RCU grace period. 1978 * 1979 * The RCU lock also protects the memcg from being freed when 1980 * the page state that is going to change is the only thing 1981 * preventing the page from being uncharged. 1982 * E.g. end-writeback clearing PageWriteback(), which allows 1983 * migration to go ahead and uncharge the page before the 1984 * account transaction might be complete. 1985 */ 1986 rcu_read_lock(); 1987 1988 if (mem_cgroup_disabled()) 1989 return NULL; 1990 again: 1991 memcg = page->mem_cgroup; 1992 if (unlikely(!memcg)) 1993 return NULL; 1994 1995 if (atomic_read(&memcg->moving_account) <= 0) 1996 return memcg; 1997 1998 spin_lock_irqsave(&memcg->move_lock, flags); 1999 if (memcg != page->mem_cgroup) { 2000 spin_unlock_irqrestore(&memcg->move_lock, flags); 2001 goto again; 2002 } 2003 2004 /* 2005 * When charge migration first begins, we can have locked and 2006 * unlocked page stat updates happening concurrently. Track 2007 * the task who has the lock for mem_cgroup_end_page_stat(). 2008 */ 2009 memcg->move_lock_task = current; 2010 memcg->move_lock_flags = flags; 2011 2012 return memcg; 2013 } 2014 2015 /** 2016 * mem_cgroup_end_page_stat - finish a page state statistics transaction 2017 * @memcg: the memcg that was accounted against 2018 */ 2019 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 2020 { 2021 if (memcg && memcg->move_lock_task == current) { 2022 unsigned long flags = memcg->move_lock_flags; 2023 2024 memcg->move_lock_task = NULL; 2025 memcg->move_lock_flags = 0; 2026 2027 spin_unlock_irqrestore(&memcg->move_lock, flags); 2028 } 2029 2030 rcu_read_unlock(); 2031 } 2032 2033 /** 2034 * mem_cgroup_update_page_stat - update page state statistics 2035 * @memcg: memcg to account against 2036 * @idx: page state item to account 2037 * @val: number of pages (positive or negative) 2038 * 2039 * See mem_cgroup_begin_page_stat() for locking requirements. 2040 */ 2041 void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 2042 enum mem_cgroup_stat_index idx, int val) 2043 { 2044 VM_BUG_ON(!rcu_read_lock_held()); 2045 2046 if (memcg) 2047 this_cpu_add(memcg->stat->count[idx], val); 2048 } 2049 2050 /* 2051 * size of first charge trial. "32" comes from vmscan.c's magic value. 2052 * TODO: maybe necessary to use big numbers in big irons. 2053 */ 2054 #define CHARGE_BATCH 32U 2055 struct memcg_stock_pcp { 2056 struct mem_cgroup *cached; /* this never be root cgroup */ 2057 unsigned int nr_pages; 2058 struct work_struct work; 2059 unsigned long flags; 2060 #define FLUSHING_CACHED_CHARGE 0 2061 }; 2062 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2063 static DEFINE_MUTEX(percpu_charge_mutex); 2064 2065 /** 2066 * consume_stock: Try to consume stocked charge on this cpu. 2067 * @memcg: memcg to consume from. 2068 * @nr_pages: how many pages to charge. 2069 * 2070 * The charges will only happen if @memcg matches the current cpu's memcg 2071 * stock, and at least @nr_pages are available in that stock. Failure to 2072 * service an allocation will refill the stock. 2073 * 2074 * returns true if successful, false otherwise. 2075 */ 2076 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2077 { 2078 struct memcg_stock_pcp *stock; 2079 bool ret = false; 2080 2081 if (nr_pages > CHARGE_BATCH) 2082 return ret; 2083 2084 stock = &get_cpu_var(memcg_stock); 2085 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2086 stock->nr_pages -= nr_pages; 2087 ret = true; 2088 } 2089 put_cpu_var(memcg_stock); 2090 return ret; 2091 } 2092 2093 /* 2094 * Returns stocks cached in percpu and reset cached information. 2095 */ 2096 static void drain_stock(struct memcg_stock_pcp *stock) 2097 { 2098 struct mem_cgroup *old = stock->cached; 2099 2100 if (stock->nr_pages) { 2101 page_counter_uncharge(&old->memory, stock->nr_pages); 2102 if (do_swap_account) 2103 page_counter_uncharge(&old->memsw, stock->nr_pages); 2104 css_put_many(&old->css, stock->nr_pages); 2105 stock->nr_pages = 0; 2106 } 2107 stock->cached = NULL; 2108 } 2109 2110 /* 2111 * This must be called under preempt disabled or must be called by 2112 * a thread which is pinned to local cpu. 2113 */ 2114 static void drain_local_stock(struct work_struct *dummy) 2115 { 2116 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 2117 drain_stock(stock); 2118 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2119 } 2120 2121 /* 2122 * Cache charges(val) to local per_cpu area. 2123 * This will be consumed by consume_stock() function, later. 2124 */ 2125 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2126 { 2127 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 2128 2129 if (stock->cached != memcg) { /* reset if necessary */ 2130 drain_stock(stock); 2131 stock->cached = memcg; 2132 } 2133 stock->nr_pages += nr_pages; 2134 put_cpu_var(memcg_stock); 2135 } 2136 2137 /* 2138 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2139 * of the hierarchy under it. 2140 */ 2141 static void drain_all_stock(struct mem_cgroup *root_memcg) 2142 { 2143 int cpu, curcpu; 2144 2145 /* If someone's already draining, avoid adding running more workers. */ 2146 if (!mutex_trylock(&percpu_charge_mutex)) 2147 return; 2148 /* Notify other cpus that system-wide "drain" is running */ 2149 get_online_cpus(); 2150 curcpu = get_cpu(); 2151 for_each_online_cpu(cpu) { 2152 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2153 struct mem_cgroup *memcg; 2154 2155 memcg = stock->cached; 2156 if (!memcg || !stock->nr_pages) 2157 continue; 2158 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 2159 continue; 2160 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2161 if (cpu == curcpu) 2162 drain_local_stock(&stock->work); 2163 else 2164 schedule_work_on(cpu, &stock->work); 2165 } 2166 } 2167 put_cpu(); 2168 put_online_cpus(); 2169 mutex_unlock(&percpu_charge_mutex); 2170 } 2171 2172 /* 2173 * This function drains percpu counter value from DEAD cpu and 2174 * move it to local cpu. Note that this function can be preempted. 2175 */ 2176 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) 2177 { 2178 int i; 2179 2180 spin_lock(&memcg->pcp_counter_lock); 2181 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 2182 long x = per_cpu(memcg->stat->count[i], cpu); 2183 2184 per_cpu(memcg->stat->count[i], cpu) = 0; 2185 memcg->nocpu_base.count[i] += x; 2186 } 2187 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 2188 unsigned long x = per_cpu(memcg->stat->events[i], cpu); 2189 2190 per_cpu(memcg->stat->events[i], cpu) = 0; 2191 memcg->nocpu_base.events[i] += x; 2192 } 2193 spin_unlock(&memcg->pcp_counter_lock); 2194 } 2195 2196 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 2197 unsigned long action, 2198 void *hcpu) 2199 { 2200 int cpu = (unsigned long)hcpu; 2201 struct memcg_stock_pcp *stock; 2202 struct mem_cgroup *iter; 2203 2204 if (action == CPU_ONLINE) 2205 return NOTIFY_OK; 2206 2207 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 2208 return NOTIFY_OK; 2209 2210 for_each_mem_cgroup(iter) 2211 mem_cgroup_drain_pcp_counter(iter, cpu); 2212 2213 stock = &per_cpu(memcg_stock, cpu); 2214 drain_stock(stock); 2215 return NOTIFY_OK; 2216 } 2217 2218 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2219 unsigned int nr_pages) 2220 { 2221 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2222 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2223 struct mem_cgroup *mem_over_limit; 2224 struct page_counter *counter; 2225 unsigned long nr_reclaimed; 2226 bool may_swap = true; 2227 bool drained = false; 2228 int ret = 0; 2229 2230 if (mem_cgroup_is_root(memcg)) 2231 goto done; 2232 retry: 2233 if (consume_stock(memcg, nr_pages)) 2234 goto done; 2235 2236 if (!do_swap_account || 2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter)) 2239 goto done_restock; 2240 if (do_swap_account) 2241 page_counter_uncharge(&memcg->memsw, batch); 2242 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2243 } else { 2244 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2245 may_swap = false; 2246 } 2247 2248 if (batch > nr_pages) { 2249 batch = nr_pages; 2250 goto retry; 2251 } 2252 2253 /* 2254 * Unlike in global OOM situations, memcg is not in a physical 2255 * memory shortage. Allow dying and OOM-killed tasks to 2256 * bypass the last charges so that they can exit quickly and 2257 * free their memory. 2258 */ 2259 if (unlikely(test_thread_flag(TIF_MEMDIE) || 2260 fatal_signal_pending(current) || 2261 current->flags & PF_EXITING)) 2262 goto bypass; 2263 2264 if (unlikely(task_in_memcg_oom(current))) 2265 goto nomem; 2266 2267 if (!(gfp_mask & __GFP_WAIT)) 2268 goto nomem; 2269 2270 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 2271 2272 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2273 gfp_mask, may_swap); 2274 2275 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2276 goto retry; 2277 2278 if (!drained) { 2279 drain_all_stock(mem_over_limit); 2280 drained = true; 2281 goto retry; 2282 } 2283 2284 if (gfp_mask & __GFP_NORETRY) 2285 goto nomem; 2286 /* 2287 * Even though the limit is exceeded at this point, reclaim 2288 * may have been able to free some pages. Retry the charge 2289 * before killing the task. 2290 * 2291 * Only for regular pages, though: huge pages are rather 2292 * unlikely to succeed so close to the limit, and we fall back 2293 * to regular pages anyway in case of failure. 2294 */ 2295 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2296 goto retry; 2297 /* 2298 * At task move, charge accounts can be doubly counted. So, it's 2299 * better to wait until the end of task_move if something is going on. 2300 */ 2301 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2302 goto retry; 2303 2304 if (nr_retries--) 2305 goto retry; 2306 2307 if (gfp_mask & __GFP_NOFAIL) 2308 goto bypass; 2309 2310 if (fatal_signal_pending(current)) 2311 goto bypass; 2312 2313 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 2314 2315 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages)); 2316 nomem: 2317 if (!(gfp_mask & __GFP_NOFAIL)) 2318 return -ENOMEM; 2319 bypass: 2320 return -EINTR; 2321 2322 done_restock: 2323 css_get_many(&memcg->css, batch); 2324 if (batch > nr_pages) 2325 refill_stock(memcg, batch - nr_pages); 2326 /* 2327 * If the hierarchy is above the normal consumption range, 2328 * make the charging task trim their excess contribution. 2329 */ 2330 do { 2331 if (page_counter_read(&memcg->memory) <= memcg->high) 2332 continue; 2333 mem_cgroup_events(memcg, MEMCG_HIGH, 1); 2334 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 2335 } while ((memcg = parent_mem_cgroup(memcg))); 2336 done: 2337 return ret; 2338 } 2339 2340 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2341 { 2342 if (mem_cgroup_is_root(memcg)) 2343 return; 2344 2345 page_counter_uncharge(&memcg->memory, nr_pages); 2346 if (do_swap_account) 2347 page_counter_uncharge(&memcg->memsw, nr_pages); 2348 2349 css_put_many(&memcg->css, nr_pages); 2350 } 2351 2352 /* 2353 * try_get_mem_cgroup_from_page - look up page's memcg association 2354 * @page: the page 2355 * 2356 * Look up, get a css reference, and return the memcg that owns @page. 2357 * 2358 * The page must be locked to prevent racing with swap-in and page 2359 * cache charges. If coming from an unlocked page table, the caller 2360 * must ensure the page is on the LRU or this can race with charging. 2361 */ 2362 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 2363 { 2364 struct mem_cgroup *memcg; 2365 unsigned short id; 2366 swp_entry_t ent; 2367 2368 VM_BUG_ON_PAGE(!PageLocked(page), page); 2369 2370 memcg = page->mem_cgroup; 2371 if (memcg) { 2372 if (!css_tryget_online(&memcg->css)) 2373 memcg = NULL; 2374 } else if (PageSwapCache(page)) { 2375 ent.val = page_private(page); 2376 id = lookup_swap_cgroup_id(ent); 2377 rcu_read_lock(); 2378 memcg = mem_cgroup_from_id(id); 2379 if (memcg && !css_tryget_online(&memcg->css)) 2380 memcg = NULL; 2381 rcu_read_unlock(); 2382 } 2383 return memcg; 2384 } 2385 2386 static void lock_page_lru(struct page *page, int *isolated) 2387 { 2388 struct zone *zone = page_zone(page); 2389 2390 spin_lock_irq(&zone->lru_lock); 2391 if (PageLRU(page)) { 2392 struct lruvec *lruvec; 2393 2394 lruvec = mem_cgroup_page_lruvec(page, zone); 2395 ClearPageLRU(page); 2396 del_page_from_lru_list(page, lruvec, page_lru(page)); 2397 *isolated = 1; 2398 } else 2399 *isolated = 0; 2400 } 2401 2402 static void unlock_page_lru(struct page *page, int isolated) 2403 { 2404 struct zone *zone = page_zone(page); 2405 2406 if (isolated) { 2407 struct lruvec *lruvec; 2408 2409 lruvec = mem_cgroup_page_lruvec(page, zone); 2410 VM_BUG_ON_PAGE(PageLRU(page), page); 2411 SetPageLRU(page); 2412 add_page_to_lru_list(page, lruvec, page_lru(page)); 2413 } 2414 spin_unlock_irq(&zone->lru_lock); 2415 } 2416 2417 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2418 bool lrucare) 2419 { 2420 int isolated; 2421 2422 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2423 2424 /* 2425 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2426 * may already be on some other mem_cgroup's LRU. Take care of it. 2427 */ 2428 if (lrucare) 2429 lock_page_lru(page, &isolated); 2430 2431 /* 2432 * Nobody should be changing or seriously looking at 2433 * page->mem_cgroup at this point: 2434 * 2435 * - the page is uncharged 2436 * 2437 * - the page is off-LRU 2438 * 2439 * - an anonymous fault has exclusive page access, except for 2440 * a locked page table 2441 * 2442 * - a page cache insertion, a swapin fault, or a migration 2443 * have the page locked 2444 */ 2445 page->mem_cgroup = memcg; 2446 2447 if (lrucare) 2448 unlock_page_lru(page, isolated); 2449 } 2450 2451 #ifdef CONFIG_MEMCG_KMEM 2452 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, 2453 unsigned long nr_pages) 2454 { 2455 struct page_counter *counter; 2456 int ret = 0; 2457 2458 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter); 2459 if (ret < 0) 2460 return ret; 2461 2462 ret = try_charge(memcg, gfp, nr_pages); 2463 if (ret == -EINTR) { 2464 /* 2465 * try_charge() chose to bypass to root due to OOM kill or 2466 * fatal signal. Since our only options are to either fail 2467 * the allocation or charge it to this cgroup, do it as a 2468 * temporary condition. But we can't fail. From a kmem/slab 2469 * perspective, the cache has already been selected, by 2470 * mem_cgroup_kmem_get_cache(), so it is too late to change 2471 * our minds. 2472 * 2473 * This condition will only trigger if the task entered 2474 * memcg_charge_kmem in a sane state, but was OOM-killed 2475 * during try_charge() above. Tasks that were already dying 2476 * when the allocation triggers should have been already 2477 * directed to the root cgroup in memcontrol.h 2478 */ 2479 page_counter_charge(&memcg->memory, nr_pages); 2480 if (do_swap_account) 2481 page_counter_charge(&memcg->memsw, nr_pages); 2482 css_get_many(&memcg->css, nr_pages); 2483 ret = 0; 2484 } else if (ret) 2485 page_counter_uncharge(&memcg->kmem, nr_pages); 2486 2487 return ret; 2488 } 2489 2490 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) 2491 { 2492 page_counter_uncharge(&memcg->memory, nr_pages); 2493 if (do_swap_account) 2494 page_counter_uncharge(&memcg->memsw, nr_pages); 2495 2496 page_counter_uncharge(&memcg->kmem, nr_pages); 2497 2498 css_put_many(&memcg->css, nr_pages); 2499 } 2500 2501 /* 2502 * helper for acessing a memcg's index. It will be used as an index in the 2503 * child cache array in kmem_cache, and also to derive its name. This function 2504 * will return -1 when this is not a kmem-limited memcg. 2505 */ 2506 int memcg_cache_id(struct mem_cgroup *memcg) 2507 { 2508 return memcg ? memcg->kmemcg_id : -1; 2509 } 2510 2511 static int memcg_alloc_cache_id(void) 2512 { 2513 int id, size; 2514 int err; 2515 2516 id = ida_simple_get(&memcg_cache_ida, 2517 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2518 if (id < 0) 2519 return id; 2520 2521 if (id < memcg_nr_cache_ids) 2522 return id; 2523 2524 /* 2525 * There's no space for the new id in memcg_caches arrays, 2526 * so we have to grow them. 2527 */ 2528 down_write(&memcg_cache_ids_sem); 2529 2530 size = 2 * (id + 1); 2531 if (size < MEMCG_CACHES_MIN_SIZE) 2532 size = MEMCG_CACHES_MIN_SIZE; 2533 else if (size > MEMCG_CACHES_MAX_SIZE) 2534 size = MEMCG_CACHES_MAX_SIZE; 2535 2536 err = memcg_update_all_caches(size); 2537 if (!err) 2538 err = memcg_update_all_list_lrus(size); 2539 if (!err) 2540 memcg_nr_cache_ids = size; 2541 2542 up_write(&memcg_cache_ids_sem); 2543 2544 if (err) { 2545 ida_simple_remove(&memcg_cache_ida, id); 2546 return err; 2547 } 2548 return id; 2549 } 2550 2551 static void memcg_free_cache_id(int id) 2552 { 2553 ida_simple_remove(&memcg_cache_ida, id); 2554 } 2555 2556 struct memcg_kmem_cache_create_work { 2557 struct mem_cgroup *memcg; 2558 struct kmem_cache *cachep; 2559 struct work_struct work; 2560 }; 2561 2562 static void memcg_kmem_cache_create_func(struct work_struct *w) 2563 { 2564 struct memcg_kmem_cache_create_work *cw = 2565 container_of(w, struct memcg_kmem_cache_create_work, work); 2566 struct mem_cgroup *memcg = cw->memcg; 2567 struct kmem_cache *cachep = cw->cachep; 2568 2569 memcg_create_kmem_cache(memcg, cachep); 2570 2571 css_put(&memcg->css); 2572 kfree(cw); 2573 } 2574 2575 /* 2576 * Enqueue the creation of a per-memcg kmem_cache. 2577 */ 2578 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2579 struct kmem_cache *cachep) 2580 { 2581 struct memcg_kmem_cache_create_work *cw; 2582 2583 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2584 if (!cw) 2585 return; 2586 2587 css_get(&memcg->css); 2588 2589 cw->memcg = memcg; 2590 cw->cachep = cachep; 2591 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2592 2593 schedule_work(&cw->work); 2594 } 2595 2596 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2597 struct kmem_cache *cachep) 2598 { 2599 /* 2600 * We need to stop accounting when we kmalloc, because if the 2601 * corresponding kmalloc cache is not yet created, the first allocation 2602 * in __memcg_schedule_kmem_cache_create will recurse. 2603 * 2604 * However, it is better to enclose the whole function. Depending on 2605 * the debugging options enabled, INIT_WORK(), for instance, can 2606 * trigger an allocation. This too, will make us recurse. Because at 2607 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2608 * the safest choice is to do it like this, wrapping the whole function. 2609 */ 2610 current->memcg_kmem_skip_account = 1; 2611 __memcg_schedule_kmem_cache_create(memcg, cachep); 2612 current->memcg_kmem_skip_account = 0; 2613 } 2614 2615 /* 2616 * Return the kmem_cache we're supposed to use for a slab allocation. 2617 * We try to use the current memcg's version of the cache. 2618 * 2619 * If the cache does not exist yet, if we are the first user of it, 2620 * we either create it immediately, if possible, or create it asynchronously 2621 * in a workqueue. 2622 * In the latter case, we will let the current allocation go through with 2623 * the original cache. 2624 * 2625 * Can't be called in interrupt context or from kernel threads. 2626 * This function needs to be called with rcu_read_lock() held. 2627 */ 2628 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) 2629 { 2630 struct mem_cgroup *memcg; 2631 struct kmem_cache *memcg_cachep; 2632 int kmemcg_id; 2633 2634 VM_BUG_ON(!is_root_cache(cachep)); 2635 2636 if (current->memcg_kmem_skip_account) 2637 return cachep; 2638 2639 memcg = get_mem_cgroup_from_mm(current->mm); 2640 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2641 if (kmemcg_id < 0) 2642 goto out; 2643 2644 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2645 if (likely(memcg_cachep)) 2646 return memcg_cachep; 2647 2648 /* 2649 * If we are in a safe context (can wait, and not in interrupt 2650 * context), we could be be predictable and return right away. 2651 * This would guarantee that the allocation being performed 2652 * already belongs in the new cache. 2653 * 2654 * However, there are some clashes that can arrive from locking. 2655 * For instance, because we acquire the slab_mutex while doing 2656 * memcg_create_kmem_cache, this means no further allocation 2657 * could happen with the slab_mutex held. So it's better to 2658 * defer everything. 2659 */ 2660 memcg_schedule_kmem_cache_create(memcg, cachep); 2661 out: 2662 css_put(&memcg->css); 2663 return cachep; 2664 } 2665 2666 void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2667 { 2668 if (!is_root_cache(cachep)) 2669 css_put(&cachep->memcg_params.memcg->css); 2670 } 2671 2672 /* 2673 * We need to verify if the allocation against current->mm->owner's memcg is 2674 * possible for the given order. But the page is not allocated yet, so we'll 2675 * need a further commit step to do the final arrangements. 2676 * 2677 * It is possible for the task to switch cgroups in this mean time, so at 2678 * commit time, we can't rely on task conversion any longer. We'll then use 2679 * the handle argument to return to the caller which cgroup we should commit 2680 * against. We could also return the memcg directly and avoid the pointer 2681 * passing, but a boolean return value gives better semantics considering 2682 * the compiled-out case as well. 2683 * 2684 * Returning true means the allocation is possible. 2685 */ 2686 bool 2687 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) 2688 { 2689 struct mem_cgroup *memcg; 2690 int ret; 2691 2692 *_memcg = NULL; 2693 2694 memcg = get_mem_cgroup_from_mm(current->mm); 2695 2696 if (!memcg_kmem_is_active(memcg)) { 2697 css_put(&memcg->css); 2698 return true; 2699 } 2700 2701 ret = memcg_charge_kmem(memcg, gfp, 1 << order); 2702 if (!ret) 2703 *_memcg = memcg; 2704 2705 css_put(&memcg->css); 2706 return (ret == 0); 2707 } 2708 2709 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, 2710 int order) 2711 { 2712 VM_BUG_ON(mem_cgroup_is_root(memcg)); 2713 2714 /* The page allocation failed. Revert */ 2715 if (!page) { 2716 memcg_uncharge_kmem(memcg, 1 << order); 2717 return; 2718 } 2719 page->mem_cgroup = memcg; 2720 } 2721 2722 void __memcg_kmem_uncharge_pages(struct page *page, int order) 2723 { 2724 struct mem_cgroup *memcg = page->mem_cgroup; 2725 2726 if (!memcg) 2727 return; 2728 2729 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2730 2731 memcg_uncharge_kmem(memcg, 1 << order); 2732 page->mem_cgroup = NULL; 2733 } 2734 2735 struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr) 2736 { 2737 struct mem_cgroup *memcg = NULL; 2738 struct kmem_cache *cachep; 2739 struct page *page; 2740 2741 page = virt_to_head_page(ptr); 2742 if (PageSlab(page)) { 2743 cachep = page->slab_cache; 2744 if (!is_root_cache(cachep)) 2745 memcg = cachep->memcg_params.memcg; 2746 } else 2747 /* page allocated by alloc_kmem_pages */ 2748 memcg = page->mem_cgroup; 2749 2750 return memcg; 2751 } 2752 #endif /* CONFIG_MEMCG_KMEM */ 2753 2754 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2755 2756 /* 2757 * Because tail pages are not marked as "used", set it. We're under 2758 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2759 * charge/uncharge will be never happen and move_account() is done under 2760 * compound_lock(), so we don't have to take care of races. 2761 */ 2762 void mem_cgroup_split_huge_fixup(struct page *head) 2763 { 2764 int i; 2765 2766 if (mem_cgroup_disabled()) 2767 return; 2768 2769 for (i = 1; i < HPAGE_PMD_NR; i++) 2770 head[i].mem_cgroup = head->mem_cgroup; 2771 2772 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2773 HPAGE_PMD_NR); 2774 } 2775 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2776 2777 #ifdef CONFIG_MEMCG_SWAP 2778 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2779 bool charge) 2780 { 2781 int val = (charge) ? 1 : -1; 2782 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2783 } 2784 2785 /** 2786 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2787 * @entry: swap entry to be moved 2788 * @from: mem_cgroup which the entry is moved from 2789 * @to: mem_cgroup which the entry is moved to 2790 * 2791 * It succeeds only when the swap_cgroup's record for this entry is the same 2792 * as the mem_cgroup's id of @from. 2793 * 2794 * Returns 0 on success, -EINVAL on failure. 2795 * 2796 * The caller must have charged to @to, IOW, called page_counter_charge() about 2797 * both res and memsw, and called css_get(). 2798 */ 2799 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2800 struct mem_cgroup *from, struct mem_cgroup *to) 2801 { 2802 unsigned short old_id, new_id; 2803 2804 old_id = mem_cgroup_id(from); 2805 new_id = mem_cgroup_id(to); 2806 2807 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2808 mem_cgroup_swap_statistics(from, false); 2809 mem_cgroup_swap_statistics(to, true); 2810 return 0; 2811 } 2812 return -EINVAL; 2813 } 2814 #else 2815 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2816 struct mem_cgroup *from, struct mem_cgroup *to) 2817 { 2818 return -EINVAL; 2819 } 2820 #endif 2821 2822 static DEFINE_MUTEX(memcg_limit_mutex); 2823 2824 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2825 unsigned long limit) 2826 { 2827 unsigned long curusage; 2828 unsigned long oldusage; 2829 bool enlarge = false; 2830 int retry_count; 2831 int ret; 2832 2833 /* 2834 * For keeping hierarchical_reclaim simple, how long we should retry 2835 * is depends on callers. We set our retry-count to be function 2836 * of # of children which we should visit in this loop. 2837 */ 2838 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2839 mem_cgroup_count_children(memcg); 2840 2841 oldusage = page_counter_read(&memcg->memory); 2842 2843 do { 2844 if (signal_pending(current)) { 2845 ret = -EINTR; 2846 break; 2847 } 2848 2849 mutex_lock(&memcg_limit_mutex); 2850 if (limit > memcg->memsw.limit) { 2851 mutex_unlock(&memcg_limit_mutex); 2852 ret = -EINVAL; 2853 break; 2854 } 2855 if (limit > memcg->memory.limit) 2856 enlarge = true; 2857 ret = page_counter_limit(&memcg->memory, limit); 2858 mutex_unlock(&memcg_limit_mutex); 2859 2860 if (!ret) 2861 break; 2862 2863 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2864 2865 curusage = page_counter_read(&memcg->memory); 2866 /* Usage is reduced ? */ 2867 if (curusage >= oldusage) 2868 retry_count--; 2869 else 2870 oldusage = curusage; 2871 } while (retry_count); 2872 2873 if (!ret && enlarge) 2874 memcg_oom_recover(memcg); 2875 2876 return ret; 2877 } 2878 2879 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2880 unsigned long limit) 2881 { 2882 unsigned long curusage; 2883 unsigned long oldusage; 2884 bool enlarge = false; 2885 int retry_count; 2886 int ret; 2887 2888 /* see mem_cgroup_resize_res_limit */ 2889 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2890 mem_cgroup_count_children(memcg); 2891 2892 oldusage = page_counter_read(&memcg->memsw); 2893 2894 do { 2895 if (signal_pending(current)) { 2896 ret = -EINTR; 2897 break; 2898 } 2899 2900 mutex_lock(&memcg_limit_mutex); 2901 if (limit < memcg->memory.limit) { 2902 mutex_unlock(&memcg_limit_mutex); 2903 ret = -EINVAL; 2904 break; 2905 } 2906 if (limit > memcg->memsw.limit) 2907 enlarge = true; 2908 ret = page_counter_limit(&memcg->memsw, limit); 2909 mutex_unlock(&memcg_limit_mutex); 2910 2911 if (!ret) 2912 break; 2913 2914 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2915 2916 curusage = page_counter_read(&memcg->memsw); 2917 /* Usage is reduced ? */ 2918 if (curusage >= oldusage) 2919 retry_count--; 2920 else 2921 oldusage = curusage; 2922 } while (retry_count); 2923 2924 if (!ret && enlarge) 2925 memcg_oom_recover(memcg); 2926 2927 return ret; 2928 } 2929 2930 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 2931 gfp_t gfp_mask, 2932 unsigned long *total_scanned) 2933 { 2934 unsigned long nr_reclaimed = 0; 2935 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 2936 unsigned long reclaimed; 2937 int loop = 0; 2938 struct mem_cgroup_tree_per_zone *mctz; 2939 unsigned long excess; 2940 unsigned long nr_scanned; 2941 2942 if (order > 0) 2943 return 0; 2944 2945 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 2946 /* 2947 * This loop can run a while, specially if mem_cgroup's continuously 2948 * keep exceeding their soft limit and putting the system under 2949 * pressure 2950 */ 2951 do { 2952 if (next_mz) 2953 mz = next_mz; 2954 else 2955 mz = mem_cgroup_largest_soft_limit_node(mctz); 2956 if (!mz) 2957 break; 2958 2959 nr_scanned = 0; 2960 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 2961 gfp_mask, &nr_scanned); 2962 nr_reclaimed += reclaimed; 2963 *total_scanned += nr_scanned; 2964 spin_lock_irq(&mctz->lock); 2965 __mem_cgroup_remove_exceeded(mz, mctz); 2966 2967 /* 2968 * If we failed to reclaim anything from this memory cgroup 2969 * it is time to move on to the next cgroup 2970 */ 2971 next_mz = NULL; 2972 if (!reclaimed) 2973 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2974 2975 excess = soft_limit_excess(mz->memcg); 2976 /* 2977 * One school of thought says that we should not add 2978 * back the node to the tree if reclaim returns 0. 2979 * But our reclaim could return 0, simply because due 2980 * to priority we are exposing a smaller subset of 2981 * memory to reclaim from. Consider this as a longer 2982 * term TODO. 2983 */ 2984 /* If excess == 0, no tree ops */ 2985 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2986 spin_unlock_irq(&mctz->lock); 2987 css_put(&mz->memcg->css); 2988 loop++; 2989 /* 2990 * Could not reclaim anything and there are no more 2991 * mem cgroups to try or we seem to be looping without 2992 * reclaiming anything. 2993 */ 2994 if (!nr_reclaimed && 2995 (next_mz == NULL || 2996 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2997 break; 2998 } while (!nr_reclaimed); 2999 if (next_mz) 3000 css_put(&next_mz->memcg->css); 3001 return nr_reclaimed; 3002 } 3003 3004 /* 3005 * Test whether @memcg has children, dead or alive. Note that this 3006 * function doesn't care whether @memcg has use_hierarchy enabled and 3007 * returns %true if there are child csses according to the cgroup 3008 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 3009 */ 3010 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3011 { 3012 bool ret; 3013 3014 /* 3015 * The lock does not prevent addition or deletion of children, but 3016 * it prevents a new child from being initialized based on this 3017 * parent in css_online(), so it's enough to decide whether 3018 * hierarchically inherited attributes can still be changed or not. 3019 */ 3020 lockdep_assert_held(&memcg_create_mutex); 3021 3022 rcu_read_lock(); 3023 ret = css_next_child(NULL, &memcg->css); 3024 rcu_read_unlock(); 3025 return ret; 3026 } 3027 3028 /* 3029 * Reclaims as many pages from the given memcg as possible and moves 3030 * the rest to the parent. 3031 * 3032 * Caller is responsible for holding css reference for memcg. 3033 */ 3034 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3035 { 3036 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 3037 3038 /* we call try-to-free pages for make this cgroup empty */ 3039 lru_add_drain_all(); 3040 /* try to free all pages in this cgroup */ 3041 while (nr_retries && page_counter_read(&memcg->memory)) { 3042 int progress; 3043 3044 if (signal_pending(current)) 3045 return -EINTR; 3046 3047 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3048 GFP_KERNEL, true); 3049 if (!progress) { 3050 nr_retries--; 3051 /* maybe some writeback is necessary */ 3052 congestion_wait(BLK_RW_ASYNC, HZ/10); 3053 } 3054 3055 } 3056 3057 return 0; 3058 } 3059 3060 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3061 char *buf, size_t nbytes, 3062 loff_t off) 3063 { 3064 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3065 3066 if (mem_cgroup_is_root(memcg)) 3067 return -EINVAL; 3068 return mem_cgroup_force_empty(memcg) ?: nbytes; 3069 } 3070 3071 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3072 struct cftype *cft) 3073 { 3074 return mem_cgroup_from_css(css)->use_hierarchy; 3075 } 3076 3077 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3078 struct cftype *cft, u64 val) 3079 { 3080 int retval = 0; 3081 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3082 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3083 3084 mutex_lock(&memcg_create_mutex); 3085 3086 if (memcg->use_hierarchy == val) 3087 goto out; 3088 3089 /* 3090 * If parent's use_hierarchy is set, we can't make any modifications 3091 * in the child subtrees. If it is unset, then the change can 3092 * occur, provided the current cgroup has no children. 3093 * 3094 * For the root cgroup, parent_mem is NULL, we allow value to be 3095 * set if there are no children. 3096 */ 3097 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3098 (val == 1 || val == 0)) { 3099 if (!memcg_has_children(memcg)) 3100 memcg->use_hierarchy = val; 3101 else 3102 retval = -EBUSY; 3103 } else 3104 retval = -EINVAL; 3105 3106 out: 3107 mutex_unlock(&memcg_create_mutex); 3108 3109 return retval; 3110 } 3111 3112 static unsigned long tree_stat(struct mem_cgroup *memcg, 3113 enum mem_cgroup_stat_index idx) 3114 { 3115 struct mem_cgroup *iter; 3116 long val = 0; 3117 3118 /* Per-cpu values can be negative, use a signed accumulator */ 3119 for_each_mem_cgroup_tree(iter, memcg) 3120 val += mem_cgroup_read_stat(iter, idx); 3121 3122 if (val < 0) /* race ? */ 3123 val = 0; 3124 return val; 3125 } 3126 3127 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3128 { 3129 u64 val; 3130 3131 if (mem_cgroup_is_root(memcg)) { 3132 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); 3133 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); 3134 if (swap) 3135 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); 3136 } else { 3137 if (!swap) 3138 val = page_counter_read(&memcg->memory); 3139 else 3140 val = page_counter_read(&memcg->memsw); 3141 } 3142 return val << PAGE_SHIFT; 3143 } 3144 3145 enum { 3146 RES_USAGE, 3147 RES_LIMIT, 3148 RES_MAX_USAGE, 3149 RES_FAILCNT, 3150 RES_SOFT_LIMIT, 3151 }; 3152 3153 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3154 struct cftype *cft) 3155 { 3156 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3157 struct page_counter *counter; 3158 3159 switch (MEMFILE_TYPE(cft->private)) { 3160 case _MEM: 3161 counter = &memcg->memory; 3162 break; 3163 case _MEMSWAP: 3164 counter = &memcg->memsw; 3165 break; 3166 case _KMEM: 3167 counter = &memcg->kmem; 3168 break; 3169 default: 3170 BUG(); 3171 } 3172 3173 switch (MEMFILE_ATTR(cft->private)) { 3174 case RES_USAGE: 3175 if (counter == &memcg->memory) 3176 return mem_cgroup_usage(memcg, false); 3177 if (counter == &memcg->memsw) 3178 return mem_cgroup_usage(memcg, true); 3179 return (u64)page_counter_read(counter) * PAGE_SIZE; 3180 case RES_LIMIT: 3181 return (u64)counter->limit * PAGE_SIZE; 3182 case RES_MAX_USAGE: 3183 return (u64)counter->watermark * PAGE_SIZE; 3184 case RES_FAILCNT: 3185 return counter->failcnt; 3186 case RES_SOFT_LIMIT: 3187 return (u64)memcg->soft_limit * PAGE_SIZE; 3188 default: 3189 BUG(); 3190 } 3191 } 3192 3193 #ifdef CONFIG_MEMCG_KMEM 3194 static int memcg_activate_kmem(struct mem_cgroup *memcg, 3195 unsigned long nr_pages) 3196 { 3197 int err = 0; 3198 int memcg_id; 3199 3200 BUG_ON(memcg->kmemcg_id >= 0); 3201 BUG_ON(memcg->kmem_acct_activated); 3202 BUG_ON(memcg->kmem_acct_active); 3203 3204 /* 3205 * For simplicity, we won't allow this to be disabled. It also can't 3206 * be changed if the cgroup has children already, or if tasks had 3207 * already joined. 3208 * 3209 * If tasks join before we set the limit, a person looking at 3210 * kmem.usage_in_bytes will have no way to determine when it took 3211 * place, which makes the value quite meaningless. 3212 * 3213 * After it first became limited, changes in the value of the limit are 3214 * of course permitted. 3215 */ 3216 mutex_lock(&memcg_create_mutex); 3217 if (cgroup_has_tasks(memcg->css.cgroup) || 3218 (memcg->use_hierarchy && memcg_has_children(memcg))) 3219 err = -EBUSY; 3220 mutex_unlock(&memcg_create_mutex); 3221 if (err) 3222 goto out; 3223 3224 memcg_id = memcg_alloc_cache_id(); 3225 if (memcg_id < 0) { 3226 err = memcg_id; 3227 goto out; 3228 } 3229 3230 /* 3231 * We couldn't have accounted to this cgroup, because it hasn't got 3232 * activated yet, so this should succeed. 3233 */ 3234 err = page_counter_limit(&memcg->kmem, nr_pages); 3235 VM_BUG_ON(err); 3236 3237 static_key_slow_inc(&memcg_kmem_enabled_key); 3238 /* 3239 * A memory cgroup is considered kmem-active as soon as it gets 3240 * kmemcg_id. Setting the id after enabling static branching will 3241 * guarantee no one starts accounting before all call sites are 3242 * patched. 3243 */ 3244 memcg->kmemcg_id = memcg_id; 3245 memcg->kmem_acct_activated = true; 3246 memcg->kmem_acct_active = true; 3247 out: 3248 return err; 3249 } 3250 3251 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 3252 unsigned long limit) 3253 { 3254 int ret; 3255 3256 mutex_lock(&memcg_limit_mutex); 3257 if (!memcg_kmem_is_active(memcg)) 3258 ret = memcg_activate_kmem(memcg, limit); 3259 else 3260 ret = page_counter_limit(&memcg->kmem, limit); 3261 mutex_unlock(&memcg_limit_mutex); 3262 return ret; 3263 } 3264 3265 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 3266 { 3267 int ret = 0; 3268 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 3269 3270 if (!parent) 3271 return 0; 3272 3273 mutex_lock(&memcg_limit_mutex); 3274 /* 3275 * If the parent cgroup is not kmem-active now, it cannot be activated 3276 * after this point, because it has at least one child already. 3277 */ 3278 if (memcg_kmem_is_active(parent)) 3279 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); 3280 mutex_unlock(&memcg_limit_mutex); 3281 return ret; 3282 } 3283 #else 3284 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 3285 unsigned long limit) 3286 { 3287 return -EINVAL; 3288 } 3289 #endif /* CONFIG_MEMCG_KMEM */ 3290 3291 /* 3292 * The user of this function is... 3293 * RES_LIMIT. 3294 */ 3295 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3296 char *buf, size_t nbytes, loff_t off) 3297 { 3298 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3299 unsigned long nr_pages; 3300 int ret; 3301 3302 buf = strstrip(buf); 3303 ret = page_counter_memparse(buf, "-1", &nr_pages); 3304 if (ret) 3305 return ret; 3306 3307 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3308 case RES_LIMIT: 3309 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3310 ret = -EINVAL; 3311 break; 3312 } 3313 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3314 case _MEM: 3315 ret = mem_cgroup_resize_limit(memcg, nr_pages); 3316 break; 3317 case _MEMSWAP: 3318 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 3319 break; 3320 case _KMEM: 3321 ret = memcg_update_kmem_limit(memcg, nr_pages); 3322 break; 3323 } 3324 break; 3325 case RES_SOFT_LIMIT: 3326 memcg->soft_limit = nr_pages; 3327 ret = 0; 3328 break; 3329 } 3330 return ret ?: nbytes; 3331 } 3332 3333 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3334 size_t nbytes, loff_t off) 3335 { 3336 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3337 struct page_counter *counter; 3338 3339 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3340 case _MEM: 3341 counter = &memcg->memory; 3342 break; 3343 case _MEMSWAP: 3344 counter = &memcg->memsw; 3345 break; 3346 case _KMEM: 3347 counter = &memcg->kmem; 3348 break; 3349 default: 3350 BUG(); 3351 } 3352 3353 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3354 case RES_MAX_USAGE: 3355 page_counter_reset_watermark(counter); 3356 break; 3357 case RES_FAILCNT: 3358 counter->failcnt = 0; 3359 break; 3360 default: 3361 BUG(); 3362 } 3363 3364 return nbytes; 3365 } 3366 3367 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3368 struct cftype *cft) 3369 { 3370 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3371 } 3372 3373 #ifdef CONFIG_MMU 3374 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3375 struct cftype *cft, u64 val) 3376 { 3377 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3378 3379 if (val & ~MOVE_MASK) 3380 return -EINVAL; 3381 3382 /* 3383 * No kind of locking is needed in here, because ->can_attach() will 3384 * check this value once in the beginning of the process, and then carry 3385 * on with stale data. This means that changes to this value will only 3386 * affect task migrations starting after the change. 3387 */ 3388 memcg->move_charge_at_immigrate = val; 3389 return 0; 3390 } 3391 #else 3392 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3393 struct cftype *cft, u64 val) 3394 { 3395 return -ENOSYS; 3396 } 3397 #endif 3398 3399 #ifdef CONFIG_NUMA 3400 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3401 { 3402 struct numa_stat { 3403 const char *name; 3404 unsigned int lru_mask; 3405 }; 3406 3407 static const struct numa_stat stats[] = { 3408 { "total", LRU_ALL }, 3409 { "file", LRU_ALL_FILE }, 3410 { "anon", LRU_ALL_ANON }, 3411 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3412 }; 3413 const struct numa_stat *stat; 3414 int nid; 3415 unsigned long nr; 3416 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3417 3418 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3419 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3420 seq_printf(m, "%s=%lu", stat->name, nr); 3421 for_each_node_state(nid, N_MEMORY) { 3422 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3423 stat->lru_mask); 3424 seq_printf(m, " N%d=%lu", nid, nr); 3425 } 3426 seq_putc(m, '\n'); 3427 } 3428 3429 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3430 struct mem_cgroup *iter; 3431 3432 nr = 0; 3433 for_each_mem_cgroup_tree(iter, memcg) 3434 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3435 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3436 for_each_node_state(nid, N_MEMORY) { 3437 nr = 0; 3438 for_each_mem_cgroup_tree(iter, memcg) 3439 nr += mem_cgroup_node_nr_lru_pages( 3440 iter, nid, stat->lru_mask); 3441 seq_printf(m, " N%d=%lu", nid, nr); 3442 } 3443 seq_putc(m, '\n'); 3444 } 3445 3446 return 0; 3447 } 3448 #endif /* CONFIG_NUMA */ 3449 3450 static int memcg_stat_show(struct seq_file *m, void *v) 3451 { 3452 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3453 unsigned long memory, memsw; 3454 struct mem_cgroup *mi; 3455 unsigned int i; 3456 3457 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3458 MEM_CGROUP_STAT_NSTATS); 3459 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3460 MEM_CGROUP_EVENTS_NSTATS); 3461 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3462 3463 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3464 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3465 continue; 3466 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 3467 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3468 } 3469 3470 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3471 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3472 mem_cgroup_read_events(memcg, i)); 3473 3474 for (i = 0; i < NR_LRU_LISTS; i++) 3475 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3476 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3477 3478 /* Hierarchical information */ 3479 memory = memsw = PAGE_COUNTER_MAX; 3480 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3481 memory = min(memory, mi->memory.limit); 3482 memsw = min(memsw, mi->memsw.limit); 3483 } 3484 seq_printf(m, "hierarchical_memory_limit %llu\n", 3485 (u64)memory * PAGE_SIZE); 3486 if (do_swap_account) 3487 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3488 (u64)memsw * PAGE_SIZE); 3489 3490 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3491 long long val = 0; 3492 3493 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3494 continue; 3495 for_each_mem_cgroup_tree(mi, memcg) 3496 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3497 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 3498 } 3499 3500 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3501 unsigned long long val = 0; 3502 3503 for_each_mem_cgroup_tree(mi, memcg) 3504 val += mem_cgroup_read_events(mi, i); 3505 seq_printf(m, "total_%s %llu\n", 3506 mem_cgroup_events_names[i], val); 3507 } 3508 3509 for (i = 0; i < NR_LRU_LISTS; i++) { 3510 unsigned long long val = 0; 3511 3512 for_each_mem_cgroup_tree(mi, memcg) 3513 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3514 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3515 } 3516 3517 #ifdef CONFIG_DEBUG_VM 3518 { 3519 int nid, zid; 3520 struct mem_cgroup_per_zone *mz; 3521 struct zone_reclaim_stat *rstat; 3522 unsigned long recent_rotated[2] = {0, 0}; 3523 unsigned long recent_scanned[2] = {0, 0}; 3524 3525 for_each_online_node(nid) 3526 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 3527 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 3528 rstat = &mz->lruvec.reclaim_stat; 3529 3530 recent_rotated[0] += rstat->recent_rotated[0]; 3531 recent_rotated[1] += rstat->recent_rotated[1]; 3532 recent_scanned[0] += rstat->recent_scanned[0]; 3533 recent_scanned[1] += rstat->recent_scanned[1]; 3534 } 3535 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3536 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3537 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3538 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3539 } 3540 #endif 3541 3542 return 0; 3543 } 3544 3545 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3546 struct cftype *cft) 3547 { 3548 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3549 3550 return mem_cgroup_swappiness(memcg); 3551 } 3552 3553 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3554 struct cftype *cft, u64 val) 3555 { 3556 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3557 3558 if (val > 100) 3559 return -EINVAL; 3560 3561 if (css->parent) 3562 memcg->swappiness = val; 3563 else 3564 vm_swappiness = val; 3565 3566 return 0; 3567 } 3568 3569 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3570 { 3571 struct mem_cgroup_threshold_ary *t; 3572 unsigned long usage; 3573 int i; 3574 3575 rcu_read_lock(); 3576 if (!swap) 3577 t = rcu_dereference(memcg->thresholds.primary); 3578 else 3579 t = rcu_dereference(memcg->memsw_thresholds.primary); 3580 3581 if (!t) 3582 goto unlock; 3583 3584 usage = mem_cgroup_usage(memcg, swap); 3585 3586 /* 3587 * current_threshold points to threshold just below or equal to usage. 3588 * If it's not true, a threshold was crossed after last 3589 * call of __mem_cgroup_threshold(). 3590 */ 3591 i = t->current_threshold; 3592 3593 /* 3594 * Iterate backward over array of thresholds starting from 3595 * current_threshold and check if a threshold is crossed. 3596 * If none of thresholds below usage is crossed, we read 3597 * only one element of the array here. 3598 */ 3599 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3600 eventfd_signal(t->entries[i].eventfd, 1); 3601 3602 /* i = current_threshold + 1 */ 3603 i++; 3604 3605 /* 3606 * Iterate forward over array of thresholds starting from 3607 * current_threshold+1 and check if a threshold is crossed. 3608 * If none of thresholds above usage is crossed, we read 3609 * only one element of the array here. 3610 */ 3611 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3612 eventfd_signal(t->entries[i].eventfd, 1); 3613 3614 /* Update current_threshold */ 3615 t->current_threshold = i - 1; 3616 unlock: 3617 rcu_read_unlock(); 3618 } 3619 3620 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3621 { 3622 while (memcg) { 3623 __mem_cgroup_threshold(memcg, false); 3624 if (do_swap_account) 3625 __mem_cgroup_threshold(memcg, true); 3626 3627 memcg = parent_mem_cgroup(memcg); 3628 } 3629 } 3630 3631 static int compare_thresholds(const void *a, const void *b) 3632 { 3633 const struct mem_cgroup_threshold *_a = a; 3634 const struct mem_cgroup_threshold *_b = b; 3635 3636 if (_a->threshold > _b->threshold) 3637 return 1; 3638 3639 if (_a->threshold < _b->threshold) 3640 return -1; 3641 3642 return 0; 3643 } 3644 3645 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3646 { 3647 struct mem_cgroup_eventfd_list *ev; 3648 3649 spin_lock(&memcg_oom_lock); 3650 3651 list_for_each_entry(ev, &memcg->oom_notify, list) 3652 eventfd_signal(ev->eventfd, 1); 3653 3654 spin_unlock(&memcg_oom_lock); 3655 return 0; 3656 } 3657 3658 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3659 { 3660 struct mem_cgroup *iter; 3661 3662 for_each_mem_cgroup_tree(iter, memcg) 3663 mem_cgroup_oom_notify_cb(iter); 3664 } 3665 3666 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3667 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3668 { 3669 struct mem_cgroup_thresholds *thresholds; 3670 struct mem_cgroup_threshold_ary *new; 3671 unsigned long threshold; 3672 unsigned long usage; 3673 int i, size, ret; 3674 3675 ret = page_counter_memparse(args, "-1", &threshold); 3676 if (ret) 3677 return ret; 3678 3679 mutex_lock(&memcg->thresholds_lock); 3680 3681 if (type == _MEM) { 3682 thresholds = &memcg->thresholds; 3683 usage = mem_cgroup_usage(memcg, false); 3684 } else if (type == _MEMSWAP) { 3685 thresholds = &memcg->memsw_thresholds; 3686 usage = mem_cgroup_usage(memcg, true); 3687 } else 3688 BUG(); 3689 3690 /* Check if a threshold crossed before adding a new one */ 3691 if (thresholds->primary) 3692 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3693 3694 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3695 3696 /* Allocate memory for new array of thresholds */ 3697 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3698 GFP_KERNEL); 3699 if (!new) { 3700 ret = -ENOMEM; 3701 goto unlock; 3702 } 3703 new->size = size; 3704 3705 /* Copy thresholds (if any) to new array */ 3706 if (thresholds->primary) { 3707 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3708 sizeof(struct mem_cgroup_threshold)); 3709 } 3710 3711 /* Add new threshold */ 3712 new->entries[size - 1].eventfd = eventfd; 3713 new->entries[size - 1].threshold = threshold; 3714 3715 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3716 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3717 compare_thresholds, NULL); 3718 3719 /* Find current threshold */ 3720 new->current_threshold = -1; 3721 for (i = 0; i < size; i++) { 3722 if (new->entries[i].threshold <= usage) { 3723 /* 3724 * new->current_threshold will not be used until 3725 * rcu_assign_pointer(), so it's safe to increment 3726 * it here. 3727 */ 3728 ++new->current_threshold; 3729 } else 3730 break; 3731 } 3732 3733 /* Free old spare buffer and save old primary buffer as spare */ 3734 kfree(thresholds->spare); 3735 thresholds->spare = thresholds->primary; 3736 3737 rcu_assign_pointer(thresholds->primary, new); 3738 3739 /* To be sure that nobody uses thresholds */ 3740 synchronize_rcu(); 3741 3742 unlock: 3743 mutex_unlock(&memcg->thresholds_lock); 3744 3745 return ret; 3746 } 3747 3748 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3749 struct eventfd_ctx *eventfd, const char *args) 3750 { 3751 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3752 } 3753 3754 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3755 struct eventfd_ctx *eventfd, const char *args) 3756 { 3757 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3758 } 3759 3760 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3761 struct eventfd_ctx *eventfd, enum res_type type) 3762 { 3763 struct mem_cgroup_thresholds *thresholds; 3764 struct mem_cgroup_threshold_ary *new; 3765 unsigned long usage; 3766 int i, j, size; 3767 3768 mutex_lock(&memcg->thresholds_lock); 3769 3770 if (type == _MEM) { 3771 thresholds = &memcg->thresholds; 3772 usage = mem_cgroup_usage(memcg, false); 3773 } else if (type == _MEMSWAP) { 3774 thresholds = &memcg->memsw_thresholds; 3775 usage = mem_cgroup_usage(memcg, true); 3776 } else 3777 BUG(); 3778 3779 if (!thresholds->primary) 3780 goto unlock; 3781 3782 /* Check if a threshold crossed before removing */ 3783 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3784 3785 /* Calculate new number of threshold */ 3786 size = 0; 3787 for (i = 0; i < thresholds->primary->size; i++) { 3788 if (thresholds->primary->entries[i].eventfd != eventfd) 3789 size++; 3790 } 3791 3792 new = thresholds->spare; 3793 3794 /* Set thresholds array to NULL if we don't have thresholds */ 3795 if (!size) { 3796 kfree(new); 3797 new = NULL; 3798 goto swap_buffers; 3799 } 3800 3801 new->size = size; 3802 3803 /* Copy thresholds and find current threshold */ 3804 new->current_threshold = -1; 3805 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3806 if (thresholds->primary->entries[i].eventfd == eventfd) 3807 continue; 3808 3809 new->entries[j] = thresholds->primary->entries[i]; 3810 if (new->entries[j].threshold <= usage) { 3811 /* 3812 * new->current_threshold will not be used 3813 * until rcu_assign_pointer(), so it's safe to increment 3814 * it here. 3815 */ 3816 ++new->current_threshold; 3817 } 3818 j++; 3819 } 3820 3821 swap_buffers: 3822 /* Swap primary and spare array */ 3823 thresholds->spare = thresholds->primary; 3824 /* If all events are unregistered, free the spare array */ 3825 if (!new) { 3826 kfree(thresholds->spare); 3827 thresholds->spare = NULL; 3828 } 3829 3830 rcu_assign_pointer(thresholds->primary, new); 3831 3832 /* To be sure that nobody uses thresholds */ 3833 synchronize_rcu(); 3834 unlock: 3835 mutex_unlock(&memcg->thresholds_lock); 3836 } 3837 3838 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3839 struct eventfd_ctx *eventfd) 3840 { 3841 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3842 } 3843 3844 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3845 struct eventfd_ctx *eventfd) 3846 { 3847 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3848 } 3849 3850 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3851 struct eventfd_ctx *eventfd, const char *args) 3852 { 3853 struct mem_cgroup_eventfd_list *event; 3854 3855 event = kmalloc(sizeof(*event), GFP_KERNEL); 3856 if (!event) 3857 return -ENOMEM; 3858 3859 spin_lock(&memcg_oom_lock); 3860 3861 event->eventfd = eventfd; 3862 list_add(&event->list, &memcg->oom_notify); 3863 3864 /* already in OOM ? */ 3865 if (atomic_read(&memcg->under_oom)) 3866 eventfd_signal(eventfd, 1); 3867 spin_unlock(&memcg_oom_lock); 3868 3869 return 0; 3870 } 3871 3872 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3873 struct eventfd_ctx *eventfd) 3874 { 3875 struct mem_cgroup_eventfd_list *ev, *tmp; 3876 3877 spin_lock(&memcg_oom_lock); 3878 3879 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3880 if (ev->eventfd == eventfd) { 3881 list_del(&ev->list); 3882 kfree(ev); 3883 } 3884 } 3885 3886 spin_unlock(&memcg_oom_lock); 3887 } 3888 3889 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3890 { 3891 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3892 3893 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3894 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); 3895 return 0; 3896 } 3897 3898 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3899 struct cftype *cft, u64 val) 3900 { 3901 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3902 3903 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3904 if (!css->parent || !((val == 0) || (val == 1))) 3905 return -EINVAL; 3906 3907 memcg->oom_kill_disable = val; 3908 if (!val) 3909 memcg_oom_recover(memcg); 3910 3911 return 0; 3912 } 3913 3914 #ifdef CONFIG_MEMCG_KMEM 3915 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3916 { 3917 int ret; 3918 3919 ret = memcg_propagate_kmem(memcg); 3920 if (ret) 3921 return ret; 3922 3923 return mem_cgroup_sockets_init(memcg, ss); 3924 } 3925 3926 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3927 { 3928 struct cgroup_subsys_state *css; 3929 struct mem_cgroup *parent, *child; 3930 int kmemcg_id; 3931 3932 if (!memcg->kmem_acct_active) 3933 return; 3934 3935 /* 3936 * Clear the 'active' flag before clearing memcg_caches arrays entries. 3937 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it 3938 * guarantees no cache will be created for this cgroup after we are 3939 * done (see memcg_create_kmem_cache()). 3940 */ 3941 memcg->kmem_acct_active = false; 3942 3943 memcg_deactivate_kmem_caches(memcg); 3944 3945 kmemcg_id = memcg->kmemcg_id; 3946 BUG_ON(kmemcg_id < 0); 3947 3948 parent = parent_mem_cgroup(memcg); 3949 if (!parent) 3950 parent = root_mem_cgroup; 3951 3952 /* 3953 * Change kmemcg_id of this cgroup and all its descendants to the 3954 * parent's id, and then move all entries from this cgroup's list_lrus 3955 * to ones of the parent. After we have finished, all list_lrus 3956 * corresponding to this cgroup are guaranteed to remain empty. The 3957 * ordering is imposed by list_lru_node->lock taken by 3958 * memcg_drain_all_list_lrus(). 3959 */ 3960 css_for_each_descendant_pre(css, &memcg->css) { 3961 child = mem_cgroup_from_css(css); 3962 BUG_ON(child->kmemcg_id != kmemcg_id); 3963 child->kmemcg_id = parent->kmemcg_id; 3964 if (!memcg->use_hierarchy) 3965 break; 3966 } 3967 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 3968 3969 memcg_free_cache_id(kmemcg_id); 3970 } 3971 3972 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3973 { 3974 if (memcg->kmem_acct_activated) { 3975 memcg_destroy_kmem_caches(memcg); 3976 static_key_slow_dec(&memcg_kmem_enabled_key); 3977 WARN_ON(page_counter_read(&memcg->kmem)); 3978 } 3979 mem_cgroup_sockets_destroy(memcg); 3980 } 3981 #else 3982 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3983 { 3984 return 0; 3985 } 3986 3987 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3988 { 3989 } 3990 3991 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3992 { 3993 } 3994 #endif 3995 3996 /* 3997 * DO NOT USE IN NEW FILES. 3998 * 3999 * "cgroup.event_control" implementation. 4000 * 4001 * This is way over-engineered. It tries to support fully configurable 4002 * events for each user. Such level of flexibility is completely 4003 * unnecessary especially in the light of the planned unified hierarchy. 4004 * 4005 * Please deprecate this and replace with something simpler if at all 4006 * possible. 4007 */ 4008 4009 /* 4010 * Unregister event and free resources. 4011 * 4012 * Gets called from workqueue. 4013 */ 4014 static void memcg_event_remove(struct work_struct *work) 4015 { 4016 struct mem_cgroup_event *event = 4017 container_of(work, struct mem_cgroup_event, remove); 4018 struct mem_cgroup *memcg = event->memcg; 4019 4020 remove_wait_queue(event->wqh, &event->wait); 4021 4022 event->unregister_event(memcg, event->eventfd); 4023 4024 /* Notify userspace the event is going away. */ 4025 eventfd_signal(event->eventfd, 1); 4026 4027 eventfd_ctx_put(event->eventfd); 4028 kfree(event); 4029 css_put(&memcg->css); 4030 } 4031 4032 /* 4033 * Gets called on POLLHUP on eventfd when user closes it. 4034 * 4035 * Called with wqh->lock held and interrupts disabled. 4036 */ 4037 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 4038 int sync, void *key) 4039 { 4040 struct mem_cgroup_event *event = 4041 container_of(wait, struct mem_cgroup_event, wait); 4042 struct mem_cgroup *memcg = event->memcg; 4043 unsigned long flags = (unsigned long)key; 4044 4045 if (flags & POLLHUP) { 4046 /* 4047 * If the event has been detached at cgroup removal, we 4048 * can simply return knowing the other side will cleanup 4049 * for us. 4050 * 4051 * We can't race against event freeing since the other 4052 * side will require wqh->lock via remove_wait_queue(), 4053 * which we hold. 4054 */ 4055 spin_lock(&memcg->event_list_lock); 4056 if (!list_empty(&event->list)) { 4057 list_del_init(&event->list); 4058 /* 4059 * We are in atomic context, but cgroup_event_remove() 4060 * may sleep, so we have to call it in workqueue. 4061 */ 4062 schedule_work(&event->remove); 4063 } 4064 spin_unlock(&memcg->event_list_lock); 4065 } 4066 4067 return 0; 4068 } 4069 4070 static void memcg_event_ptable_queue_proc(struct file *file, 4071 wait_queue_head_t *wqh, poll_table *pt) 4072 { 4073 struct mem_cgroup_event *event = 4074 container_of(pt, struct mem_cgroup_event, pt); 4075 4076 event->wqh = wqh; 4077 add_wait_queue(wqh, &event->wait); 4078 } 4079 4080 /* 4081 * DO NOT USE IN NEW FILES. 4082 * 4083 * Parse input and register new cgroup event handler. 4084 * 4085 * Input must be in format '<event_fd> <control_fd> <args>'. 4086 * Interpretation of args is defined by control file implementation. 4087 */ 4088 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4089 char *buf, size_t nbytes, loff_t off) 4090 { 4091 struct cgroup_subsys_state *css = of_css(of); 4092 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4093 struct mem_cgroup_event *event; 4094 struct cgroup_subsys_state *cfile_css; 4095 unsigned int efd, cfd; 4096 struct fd efile; 4097 struct fd cfile; 4098 const char *name; 4099 char *endp; 4100 int ret; 4101 4102 buf = strstrip(buf); 4103 4104 efd = simple_strtoul(buf, &endp, 10); 4105 if (*endp != ' ') 4106 return -EINVAL; 4107 buf = endp + 1; 4108 4109 cfd = simple_strtoul(buf, &endp, 10); 4110 if ((*endp != ' ') && (*endp != '\0')) 4111 return -EINVAL; 4112 buf = endp + 1; 4113 4114 event = kzalloc(sizeof(*event), GFP_KERNEL); 4115 if (!event) 4116 return -ENOMEM; 4117 4118 event->memcg = memcg; 4119 INIT_LIST_HEAD(&event->list); 4120 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4121 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4122 INIT_WORK(&event->remove, memcg_event_remove); 4123 4124 efile = fdget(efd); 4125 if (!efile.file) { 4126 ret = -EBADF; 4127 goto out_kfree; 4128 } 4129 4130 event->eventfd = eventfd_ctx_fileget(efile.file); 4131 if (IS_ERR(event->eventfd)) { 4132 ret = PTR_ERR(event->eventfd); 4133 goto out_put_efile; 4134 } 4135 4136 cfile = fdget(cfd); 4137 if (!cfile.file) { 4138 ret = -EBADF; 4139 goto out_put_eventfd; 4140 } 4141 4142 /* the process need read permission on control file */ 4143 /* AV: shouldn't we check that it's been opened for read instead? */ 4144 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4145 if (ret < 0) 4146 goto out_put_cfile; 4147 4148 /* 4149 * Determine the event callbacks and set them in @event. This used 4150 * to be done via struct cftype but cgroup core no longer knows 4151 * about these events. The following is crude but the whole thing 4152 * is for compatibility anyway. 4153 * 4154 * DO NOT ADD NEW FILES. 4155 */ 4156 name = cfile.file->f_path.dentry->d_name.name; 4157 4158 if (!strcmp(name, "memory.usage_in_bytes")) { 4159 event->register_event = mem_cgroup_usage_register_event; 4160 event->unregister_event = mem_cgroup_usage_unregister_event; 4161 } else if (!strcmp(name, "memory.oom_control")) { 4162 event->register_event = mem_cgroup_oom_register_event; 4163 event->unregister_event = mem_cgroup_oom_unregister_event; 4164 } else if (!strcmp(name, "memory.pressure_level")) { 4165 event->register_event = vmpressure_register_event; 4166 event->unregister_event = vmpressure_unregister_event; 4167 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4168 event->register_event = memsw_cgroup_usage_register_event; 4169 event->unregister_event = memsw_cgroup_usage_unregister_event; 4170 } else { 4171 ret = -EINVAL; 4172 goto out_put_cfile; 4173 } 4174 4175 /* 4176 * Verify @cfile should belong to @css. Also, remaining events are 4177 * automatically removed on cgroup destruction but the removal is 4178 * asynchronous, so take an extra ref on @css. 4179 */ 4180 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4181 &memory_cgrp_subsys); 4182 ret = -EINVAL; 4183 if (IS_ERR(cfile_css)) 4184 goto out_put_cfile; 4185 if (cfile_css != css) { 4186 css_put(cfile_css); 4187 goto out_put_cfile; 4188 } 4189 4190 ret = event->register_event(memcg, event->eventfd, buf); 4191 if (ret) 4192 goto out_put_css; 4193 4194 efile.file->f_op->poll(efile.file, &event->pt); 4195 4196 spin_lock(&memcg->event_list_lock); 4197 list_add(&event->list, &memcg->event_list); 4198 spin_unlock(&memcg->event_list_lock); 4199 4200 fdput(cfile); 4201 fdput(efile); 4202 4203 return nbytes; 4204 4205 out_put_css: 4206 css_put(css); 4207 out_put_cfile: 4208 fdput(cfile); 4209 out_put_eventfd: 4210 eventfd_ctx_put(event->eventfd); 4211 out_put_efile: 4212 fdput(efile); 4213 out_kfree: 4214 kfree(event); 4215 4216 return ret; 4217 } 4218 4219 static struct cftype mem_cgroup_legacy_files[] = { 4220 { 4221 .name = "usage_in_bytes", 4222 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4223 .read_u64 = mem_cgroup_read_u64, 4224 }, 4225 { 4226 .name = "max_usage_in_bytes", 4227 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4228 .write = mem_cgroup_reset, 4229 .read_u64 = mem_cgroup_read_u64, 4230 }, 4231 { 4232 .name = "limit_in_bytes", 4233 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4234 .write = mem_cgroup_write, 4235 .read_u64 = mem_cgroup_read_u64, 4236 }, 4237 { 4238 .name = "soft_limit_in_bytes", 4239 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4240 .write = mem_cgroup_write, 4241 .read_u64 = mem_cgroup_read_u64, 4242 }, 4243 { 4244 .name = "failcnt", 4245 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4246 .write = mem_cgroup_reset, 4247 .read_u64 = mem_cgroup_read_u64, 4248 }, 4249 { 4250 .name = "stat", 4251 .seq_show = memcg_stat_show, 4252 }, 4253 { 4254 .name = "force_empty", 4255 .write = mem_cgroup_force_empty_write, 4256 }, 4257 { 4258 .name = "use_hierarchy", 4259 .write_u64 = mem_cgroup_hierarchy_write, 4260 .read_u64 = mem_cgroup_hierarchy_read, 4261 }, 4262 { 4263 .name = "cgroup.event_control", /* XXX: for compat */ 4264 .write = memcg_write_event_control, 4265 .flags = CFTYPE_NO_PREFIX, 4266 .mode = S_IWUGO, 4267 }, 4268 { 4269 .name = "swappiness", 4270 .read_u64 = mem_cgroup_swappiness_read, 4271 .write_u64 = mem_cgroup_swappiness_write, 4272 }, 4273 { 4274 .name = "move_charge_at_immigrate", 4275 .read_u64 = mem_cgroup_move_charge_read, 4276 .write_u64 = mem_cgroup_move_charge_write, 4277 }, 4278 { 4279 .name = "oom_control", 4280 .seq_show = mem_cgroup_oom_control_read, 4281 .write_u64 = mem_cgroup_oom_control_write, 4282 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4283 }, 4284 { 4285 .name = "pressure_level", 4286 }, 4287 #ifdef CONFIG_NUMA 4288 { 4289 .name = "numa_stat", 4290 .seq_show = memcg_numa_stat_show, 4291 }, 4292 #endif 4293 #ifdef CONFIG_MEMCG_KMEM 4294 { 4295 .name = "kmem.limit_in_bytes", 4296 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4297 .write = mem_cgroup_write, 4298 .read_u64 = mem_cgroup_read_u64, 4299 }, 4300 { 4301 .name = "kmem.usage_in_bytes", 4302 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4303 .read_u64 = mem_cgroup_read_u64, 4304 }, 4305 { 4306 .name = "kmem.failcnt", 4307 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4308 .write = mem_cgroup_reset, 4309 .read_u64 = mem_cgroup_read_u64, 4310 }, 4311 { 4312 .name = "kmem.max_usage_in_bytes", 4313 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4314 .write = mem_cgroup_reset, 4315 .read_u64 = mem_cgroup_read_u64, 4316 }, 4317 #ifdef CONFIG_SLABINFO 4318 { 4319 .name = "kmem.slabinfo", 4320 .seq_start = slab_start, 4321 .seq_next = slab_next, 4322 .seq_stop = slab_stop, 4323 .seq_show = memcg_slab_show, 4324 }, 4325 #endif 4326 #endif 4327 { }, /* terminate */ 4328 }; 4329 4330 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4331 { 4332 struct mem_cgroup_per_node *pn; 4333 struct mem_cgroup_per_zone *mz; 4334 int zone, tmp = node; 4335 /* 4336 * This routine is called against possible nodes. 4337 * But it's BUG to call kmalloc() against offline node. 4338 * 4339 * TODO: this routine can waste much memory for nodes which will 4340 * never be onlined. It's better to use memory hotplug callback 4341 * function. 4342 */ 4343 if (!node_state(node, N_NORMAL_MEMORY)) 4344 tmp = -1; 4345 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4346 if (!pn) 4347 return 1; 4348 4349 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4350 mz = &pn->zoneinfo[zone]; 4351 lruvec_init(&mz->lruvec); 4352 mz->usage_in_excess = 0; 4353 mz->on_tree = false; 4354 mz->memcg = memcg; 4355 } 4356 memcg->nodeinfo[node] = pn; 4357 return 0; 4358 } 4359 4360 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4361 { 4362 kfree(memcg->nodeinfo[node]); 4363 } 4364 4365 static struct mem_cgroup *mem_cgroup_alloc(void) 4366 { 4367 struct mem_cgroup *memcg; 4368 size_t size; 4369 4370 size = sizeof(struct mem_cgroup); 4371 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4372 4373 memcg = kzalloc(size, GFP_KERNEL); 4374 if (!memcg) 4375 return NULL; 4376 4377 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4378 if (!memcg->stat) 4379 goto out_free; 4380 spin_lock_init(&memcg->pcp_counter_lock); 4381 return memcg; 4382 4383 out_free: 4384 kfree(memcg); 4385 return NULL; 4386 } 4387 4388 /* 4389 * At destroying mem_cgroup, references from swap_cgroup can remain. 4390 * (scanning all at force_empty is too costly...) 4391 * 4392 * Instead of clearing all references at force_empty, we remember 4393 * the number of reference from swap_cgroup and free mem_cgroup when 4394 * it goes down to 0. 4395 * 4396 * Removal of cgroup itself succeeds regardless of refs from swap. 4397 */ 4398 4399 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4400 { 4401 int node; 4402 4403 mem_cgroup_remove_from_trees(memcg); 4404 4405 for_each_node(node) 4406 free_mem_cgroup_per_zone_info(memcg, node); 4407 4408 free_percpu(memcg->stat); 4409 kfree(memcg); 4410 } 4411 4412 /* 4413 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4414 */ 4415 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4416 { 4417 if (!memcg->memory.parent) 4418 return NULL; 4419 return mem_cgroup_from_counter(memcg->memory.parent, memory); 4420 } 4421 EXPORT_SYMBOL(parent_mem_cgroup); 4422 4423 static struct cgroup_subsys_state * __ref 4424 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4425 { 4426 struct mem_cgroup *memcg; 4427 long error = -ENOMEM; 4428 int node; 4429 4430 memcg = mem_cgroup_alloc(); 4431 if (!memcg) 4432 return ERR_PTR(error); 4433 4434 for_each_node(node) 4435 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 4436 goto free_out; 4437 4438 /* root ? */ 4439 if (parent_css == NULL) { 4440 root_mem_cgroup = memcg; 4441 page_counter_init(&memcg->memory, NULL); 4442 memcg->high = PAGE_COUNTER_MAX; 4443 memcg->soft_limit = PAGE_COUNTER_MAX; 4444 page_counter_init(&memcg->memsw, NULL); 4445 page_counter_init(&memcg->kmem, NULL); 4446 } 4447 4448 memcg->last_scanned_node = MAX_NUMNODES; 4449 INIT_LIST_HEAD(&memcg->oom_notify); 4450 memcg->move_charge_at_immigrate = 0; 4451 mutex_init(&memcg->thresholds_lock); 4452 spin_lock_init(&memcg->move_lock); 4453 vmpressure_init(&memcg->vmpressure); 4454 INIT_LIST_HEAD(&memcg->event_list); 4455 spin_lock_init(&memcg->event_list_lock); 4456 #ifdef CONFIG_MEMCG_KMEM 4457 memcg->kmemcg_id = -1; 4458 #endif 4459 4460 return &memcg->css; 4461 4462 free_out: 4463 __mem_cgroup_free(memcg); 4464 return ERR_PTR(error); 4465 } 4466 4467 static int 4468 mem_cgroup_css_online(struct cgroup_subsys_state *css) 4469 { 4470 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4471 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 4472 int ret; 4473 4474 if (css->id > MEM_CGROUP_ID_MAX) 4475 return -ENOSPC; 4476 4477 if (!parent) 4478 return 0; 4479 4480 mutex_lock(&memcg_create_mutex); 4481 4482 memcg->use_hierarchy = parent->use_hierarchy; 4483 memcg->oom_kill_disable = parent->oom_kill_disable; 4484 memcg->swappiness = mem_cgroup_swappiness(parent); 4485 4486 if (parent->use_hierarchy) { 4487 page_counter_init(&memcg->memory, &parent->memory); 4488 memcg->high = PAGE_COUNTER_MAX; 4489 memcg->soft_limit = PAGE_COUNTER_MAX; 4490 page_counter_init(&memcg->memsw, &parent->memsw); 4491 page_counter_init(&memcg->kmem, &parent->kmem); 4492 4493 /* 4494 * No need to take a reference to the parent because cgroup 4495 * core guarantees its existence. 4496 */ 4497 } else { 4498 page_counter_init(&memcg->memory, NULL); 4499 memcg->high = PAGE_COUNTER_MAX; 4500 memcg->soft_limit = PAGE_COUNTER_MAX; 4501 page_counter_init(&memcg->memsw, NULL); 4502 page_counter_init(&memcg->kmem, NULL); 4503 /* 4504 * Deeper hierachy with use_hierarchy == false doesn't make 4505 * much sense so let cgroup subsystem know about this 4506 * unfortunate state in our controller. 4507 */ 4508 if (parent != root_mem_cgroup) 4509 memory_cgrp_subsys.broken_hierarchy = true; 4510 } 4511 mutex_unlock(&memcg_create_mutex); 4512 4513 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); 4514 if (ret) 4515 return ret; 4516 4517 /* 4518 * Make sure the memcg is initialized: mem_cgroup_iter() 4519 * orders reading memcg->initialized against its callers 4520 * reading the memcg members. 4521 */ 4522 smp_store_release(&memcg->initialized, 1); 4523 4524 return 0; 4525 } 4526 4527 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4528 { 4529 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4530 struct mem_cgroup_event *event, *tmp; 4531 4532 /* 4533 * Unregister events and notify userspace. 4534 * Notify userspace about cgroup removing only after rmdir of cgroup 4535 * directory to avoid race between userspace and kernelspace. 4536 */ 4537 spin_lock(&memcg->event_list_lock); 4538 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4539 list_del_init(&event->list); 4540 schedule_work(&event->remove); 4541 } 4542 spin_unlock(&memcg->event_list_lock); 4543 4544 vmpressure_cleanup(&memcg->vmpressure); 4545 4546 memcg_deactivate_kmem(memcg); 4547 } 4548 4549 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4550 { 4551 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4552 4553 memcg_destroy_kmem(memcg); 4554 __mem_cgroup_free(memcg); 4555 } 4556 4557 /** 4558 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4559 * @css: the target css 4560 * 4561 * Reset the states of the mem_cgroup associated with @css. This is 4562 * invoked when the userland requests disabling on the default hierarchy 4563 * but the memcg is pinned through dependency. The memcg should stop 4564 * applying policies and should revert to the vanilla state as it may be 4565 * made visible again. 4566 * 4567 * The current implementation only resets the essential configurations. 4568 * This needs to be expanded to cover all the visible parts. 4569 */ 4570 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4571 { 4572 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4573 4574 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4575 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4576 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4577 memcg->low = 0; 4578 memcg->high = PAGE_COUNTER_MAX; 4579 memcg->soft_limit = PAGE_COUNTER_MAX; 4580 } 4581 4582 #ifdef CONFIG_MMU 4583 /* Handlers for move charge at task migration. */ 4584 static int mem_cgroup_do_precharge(unsigned long count) 4585 { 4586 int ret; 4587 4588 /* Try a single bulk charge without reclaim first */ 4589 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count); 4590 if (!ret) { 4591 mc.precharge += count; 4592 return ret; 4593 } 4594 if (ret == -EINTR) { 4595 cancel_charge(root_mem_cgroup, count); 4596 return ret; 4597 } 4598 4599 /* Try charges one by one with reclaim */ 4600 while (count--) { 4601 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4602 /* 4603 * In case of failure, any residual charges against 4604 * mc.to will be dropped by mem_cgroup_clear_mc() 4605 * later on. However, cancel any charges that are 4606 * bypassed to root right away or they'll be lost. 4607 */ 4608 if (ret == -EINTR) 4609 cancel_charge(root_mem_cgroup, 1); 4610 if (ret) 4611 return ret; 4612 mc.precharge++; 4613 cond_resched(); 4614 } 4615 return 0; 4616 } 4617 4618 /** 4619 * get_mctgt_type - get target type of moving charge 4620 * @vma: the vma the pte to be checked belongs 4621 * @addr: the address corresponding to the pte to be checked 4622 * @ptent: the pte to be checked 4623 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4624 * 4625 * Returns 4626 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4627 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4628 * move charge. if @target is not NULL, the page is stored in target->page 4629 * with extra refcnt got(Callers should handle it). 4630 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4631 * target for charge migration. if @target is not NULL, the entry is stored 4632 * in target->ent. 4633 * 4634 * Called with pte lock held. 4635 */ 4636 union mc_target { 4637 struct page *page; 4638 swp_entry_t ent; 4639 }; 4640 4641 enum mc_target_type { 4642 MC_TARGET_NONE = 0, 4643 MC_TARGET_PAGE, 4644 MC_TARGET_SWAP, 4645 }; 4646 4647 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4648 unsigned long addr, pte_t ptent) 4649 { 4650 struct page *page = vm_normal_page(vma, addr, ptent); 4651 4652 if (!page || !page_mapped(page)) 4653 return NULL; 4654 if (PageAnon(page)) { 4655 if (!(mc.flags & MOVE_ANON)) 4656 return NULL; 4657 } else { 4658 if (!(mc.flags & MOVE_FILE)) 4659 return NULL; 4660 } 4661 if (!get_page_unless_zero(page)) 4662 return NULL; 4663 4664 return page; 4665 } 4666 4667 #ifdef CONFIG_SWAP 4668 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4669 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4670 { 4671 struct page *page = NULL; 4672 swp_entry_t ent = pte_to_swp_entry(ptent); 4673 4674 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4675 return NULL; 4676 /* 4677 * Because lookup_swap_cache() updates some statistics counter, 4678 * we call find_get_page() with swapper_space directly. 4679 */ 4680 page = find_get_page(swap_address_space(ent), ent.val); 4681 if (do_swap_account) 4682 entry->val = ent.val; 4683 4684 return page; 4685 } 4686 #else 4687 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4688 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4689 { 4690 return NULL; 4691 } 4692 #endif 4693 4694 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4695 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4696 { 4697 struct page *page = NULL; 4698 struct address_space *mapping; 4699 pgoff_t pgoff; 4700 4701 if (!vma->vm_file) /* anonymous vma */ 4702 return NULL; 4703 if (!(mc.flags & MOVE_FILE)) 4704 return NULL; 4705 4706 mapping = vma->vm_file->f_mapping; 4707 pgoff = linear_page_index(vma, addr); 4708 4709 /* page is moved even if it's not RSS of this task(page-faulted). */ 4710 #ifdef CONFIG_SWAP 4711 /* shmem/tmpfs may report page out on swap: account for that too. */ 4712 if (shmem_mapping(mapping)) { 4713 page = find_get_entry(mapping, pgoff); 4714 if (radix_tree_exceptional_entry(page)) { 4715 swp_entry_t swp = radix_to_swp_entry(page); 4716 if (do_swap_account) 4717 *entry = swp; 4718 page = find_get_page(swap_address_space(swp), swp.val); 4719 } 4720 } else 4721 page = find_get_page(mapping, pgoff); 4722 #else 4723 page = find_get_page(mapping, pgoff); 4724 #endif 4725 return page; 4726 } 4727 4728 /** 4729 * mem_cgroup_move_account - move account of the page 4730 * @page: the page 4731 * @nr_pages: number of regular pages (>1 for huge pages) 4732 * @from: mem_cgroup which the page is moved from. 4733 * @to: mem_cgroup which the page is moved to. @from != @to. 4734 * 4735 * The caller must confirm following. 4736 * - page is not on LRU (isolate_page() is useful.) 4737 * - compound_lock is held when nr_pages > 1 4738 * 4739 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4740 * from old cgroup. 4741 */ 4742 static int mem_cgroup_move_account(struct page *page, 4743 unsigned int nr_pages, 4744 struct mem_cgroup *from, 4745 struct mem_cgroup *to) 4746 { 4747 unsigned long flags; 4748 int ret; 4749 4750 VM_BUG_ON(from == to); 4751 VM_BUG_ON_PAGE(PageLRU(page), page); 4752 /* 4753 * The page is isolated from LRU. So, collapse function 4754 * will not handle this page. But page splitting can happen. 4755 * Do this check under compound_page_lock(). The caller should 4756 * hold it. 4757 */ 4758 ret = -EBUSY; 4759 if (nr_pages > 1 && !PageTransHuge(page)) 4760 goto out; 4761 4762 /* 4763 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup 4764 * of its source page while we change it: page migration takes 4765 * both pages off the LRU, but page cache replacement doesn't. 4766 */ 4767 if (!trylock_page(page)) 4768 goto out; 4769 4770 ret = -EINVAL; 4771 if (page->mem_cgroup != from) 4772 goto out_unlock; 4773 4774 spin_lock_irqsave(&from->move_lock, flags); 4775 4776 if (!PageAnon(page) && page_mapped(page)) { 4777 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4778 nr_pages); 4779 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4780 nr_pages); 4781 } 4782 4783 if (PageWriteback(page)) { 4784 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4785 nr_pages); 4786 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4787 nr_pages); 4788 } 4789 4790 /* 4791 * It is safe to change page->mem_cgroup here because the page 4792 * is referenced, charged, and isolated - we can't race with 4793 * uncharging, charging, migration, or LRU putback. 4794 */ 4795 4796 /* caller should have done css_get */ 4797 page->mem_cgroup = to; 4798 spin_unlock_irqrestore(&from->move_lock, flags); 4799 4800 ret = 0; 4801 4802 local_irq_disable(); 4803 mem_cgroup_charge_statistics(to, page, nr_pages); 4804 memcg_check_events(to, page); 4805 mem_cgroup_charge_statistics(from, page, -nr_pages); 4806 memcg_check_events(from, page); 4807 local_irq_enable(); 4808 out_unlock: 4809 unlock_page(page); 4810 out: 4811 return ret; 4812 } 4813 4814 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4815 unsigned long addr, pte_t ptent, union mc_target *target) 4816 { 4817 struct page *page = NULL; 4818 enum mc_target_type ret = MC_TARGET_NONE; 4819 swp_entry_t ent = { .val = 0 }; 4820 4821 if (pte_present(ptent)) 4822 page = mc_handle_present_pte(vma, addr, ptent); 4823 else if (is_swap_pte(ptent)) 4824 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 4825 else if (pte_none(ptent)) 4826 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4827 4828 if (!page && !ent.val) 4829 return ret; 4830 if (page) { 4831 /* 4832 * Do only loose check w/o serialization. 4833 * mem_cgroup_move_account() checks the page is valid or 4834 * not under LRU exclusion. 4835 */ 4836 if (page->mem_cgroup == mc.from) { 4837 ret = MC_TARGET_PAGE; 4838 if (target) 4839 target->page = page; 4840 } 4841 if (!ret || !target) 4842 put_page(page); 4843 } 4844 /* There is a swap entry and a page doesn't exist or isn't charged */ 4845 if (ent.val && !ret && 4846 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4847 ret = MC_TARGET_SWAP; 4848 if (target) 4849 target->ent = ent; 4850 } 4851 return ret; 4852 } 4853 4854 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4855 /* 4856 * We don't consider swapping or file mapped pages because THP does not 4857 * support them for now. 4858 * Caller should make sure that pmd_trans_huge(pmd) is true. 4859 */ 4860 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4861 unsigned long addr, pmd_t pmd, union mc_target *target) 4862 { 4863 struct page *page = NULL; 4864 enum mc_target_type ret = MC_TARGET_NONE; 4865 4866 page = pmd_page(pmd); 4867 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4868 if (!(mc.flags & MOVE_ANON)) 4869 return ret; 4870 if (page->mem_cgroup == mc.from) { 4871 ret = MC_TARGET_PAGE; 4872 if (target) { 4873 get_page(page); 4874 target->page = page; 4875 } 4876 } 4877 return ret; 4878 } 4879 #else 4880 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4881 unsigned long addr, pmd_t pmd, union mc_target *target) 4882 { 4883 return MC_TARGET_NONE; 4884 } 4885 #endif 4886 4887 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4888 unsigned long addr, unsigned long end, 4889 struct mm_walk *walk) 4890 { 4891 struct vm_area_struct *vma = walk->vma; 4892 pte_t *pte; 4893 spinlock_t *ptl; 4894 4895 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4896 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4897 mc.precharge += HPAGE_PMD_NR; 4898 spin_unlock(ptl); 4899 return 0; 4900 } 4901 4902 if (pmd_trans_unstable(pmd)) 4903 return 0; 4904 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4905 for (; addr != end; pte++, addr += PAGE_SIZE) 4906 if (get_mctgt_type(vma, addr, *pte, NULL)) 4907 mc.precharge++; /* increment precharge temporarily */ 4908 pte_unmap_unlock(pte - 1, ptl); 4909 cond_resched(); 4910 4911 return 0; 4912 } 4913 4914 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4915 { 4916 unsigned long precharge; 4917 4918 struct mm_walk mem_cgroup_count_precharge_walk = { 4919 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4920 .mm = mm, 4921 }; 4922 down_read(&mm->mmap_sem); 4923 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk); 4924 up_read(&mm->mmap_sem); 4925 4926 precharge = mc.precharge; 4927 mc.precharge = 0; 4928 4929 return precharge; 4930 } 4931 4932 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4933 { 4934 unsigned long precharge = mem_cgroup_count_precharge(mm); 4935 4936 VM_BUG_ON(mc.moving_task); 4937 mc.moving_task = current; 4938 return mem_cgroup_do_precharge(precharge); 4939 } 4940 4941 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4942 static void __mem_cgroup_clear_mc(void) 4943 { 4944 struct mem_cgroup *from = mc.from; 4945 struct mem_cgroup *to = mc.to; 4946 4947 /* we must uncharge all the leftover precharges from mc.to */ 4948 if (mc.precharge) { 4949 cancel_charge(mc.to, mc.precharge); 4950 mc.precharge = 0; 4951 } 4952 /* 4953 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4954 * we must uncharge here. 4955 */ 4956 if (mc.moved_charge) { 4957 cancel_charge(mc.from, mc.moved_charge); 4958 mc.moved_charge = 0; 4959 } 4960 /* we must fixup refcnts and charges */ 4961 if (mc.moved_swap) { 4962 /* uncharge swap account from the old cgroup */ 4963 if (!mem_cgroup_is_root(mc.from)) 4964 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4965 4966 /* 4967 * we charged both to->memory and to->memsw, so we 4968 * should uncharge to->memory. 4969 */ 4970 if (!mem_cgroup_is_root(mc.to)) 4971 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4972 4973 css_put_many(&mc.from->css, mc.moved_swap); 4974 4975 /* we've already done css_get(mc.to) */ 4976 mc.moved_swap = 0; 4977 } 4978 memcg_oom_recover(from); 4979 memcg_oom_recover(to); 4980 wake_up_all(&mc.waitq); 4981 } 4982 4983 static void mem_cgroup_clear_mc(void) 4984 { 4985 /* 4986 * we must clear moving_task before waking up waiters at the end of 4987 * task migration. 4988 */ 4989 mc.moving_task = NULL; 4990 __mem_cgroup_clear_mc(); 4991 spin_lock(&mc.lock); 4992 mc.from = NULL; 4993 mc.to = NULL; 4994 spin_unlock(&mc.lock); 4995 } 4996 4997 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 4998 struct cgroup_taskset *tset) 4999 { 5000 struct task_struct *p = cgroup_taskset_first(tset); 5001 int ret = 0; 5002 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5003 unsigned long move_flags; 5004 5005 /* 5006 * We are now commited to this value whatever it is. Changes in this 5007 * tunable will only affect upcoming migrations, not the current one. 5008 * So we need to save it, and keep it going. 5009 */ 5010 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5011 if (move_flags) { 5012 struct mm_struct *mm; 5013 struct mem_cgroup *from = mem_cgroup_from_task(p); 5014 5015 VM_BUG_ON(from == memcg); 5016 5017 mm = get_task_mm(p); 5018 if (!mm) 5019 return 0; 5020 /* We move charges only when we move a owner of the mm */ 5021 if (mm->owner == p) { 5022 VM_BUG_ON(mc.from); 5023 VM_BUG_ON(mc.to); 5024 VM_BUG_ON(mc.precharge); 5025 VM_BUG_ON(mc.moved_charge); 5026 VM_BUG_ON(mc.moved_swap); 5027 5028 spin_lock(&mc.lock); 5029 mc.from = from; 5030 mc.to = memcg; 5031 mc.flags = move_flags; 5032 spin_unlock(&mc.lock); 5033 /* We set mc.moving_task later */ 5034 5035 ret = mem_cgroup_precharge_mc(mm); 5036 if (ret) 5037 mem_cgroup_clear_mc(); 5038 } 5039 mmput(mm); 5040 } 5041 return ret; 5042 } 5043 5044 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 5045 struct cgroup_taskset *tset) 5046 { 5047 if (mc.to) 5048 mem_cgroup_clear_mc(); 5049 } 5050 5051 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5052 unsigned long addr, unsigned long end, 5053 struct mm_walk *walk) 5054 { 5055 int ret = 0; 5056 struct vm_area_struct *vma = walk->vma; 5057 pte_t *pte; 5058 spinlock_t *ptl; 5059 enum mc_target_type target_type; 5060 union mc_target target; 5061 struct page *page; 5062 5063 /* 5064 * We don't take compound_lock() here but no race with splitting thp 5065 * happens because: 5066 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 5067 * under splitting, which means there's no concurrent thp split, 5068 * - if another thread runs into split_huge_page() just after we 5069 * entered this if-block, the thread must wait for page table lock 5070 * to be unlocked in __split_huge_page_splitting(), where the main 5071 * part of thp split is not executed yet. 5072 */ 5073 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 5074 if (mc.precharge < HPAGE_PMD_NR) { 5075 spin_unlock(ptl); 5076 return 0; 5077 } 5078 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5079 if (target_type == MC_TARGET_PAGE) { 5080 page = target.page; 5081 if (!isolate_lru_page(page)) { 5082 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 5083 mc.from, mc.to)) { 5084 mc.precharge -= HPAGE_PMD_NR; 5085 mc.moved_charge += HPAGE_PMD_NR; 5086 } 5087 putback_lru_page(page); 5088 } 5089 put_page(page); 5090 } 5091 spin_unlock(ptl); 5092 return 0; 5093 } 5094 5095 if (pmd_trans_unstable(pmd)) 5096 return 0; 5097 retry: 5098 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5099 for (; addr != end; addr += PAGE_SIZE) { 5100 pte_t ptent = *(pte++); 5101 swp_entry_t ent; 5102 5103 if (!mc.precharge) 5104 break; 5105 5106 switch (get_mctgt_type(vma, addr, ptent, &target)) { 5107 case MC_TARGET_PAGE: 5108 page = target.page; 5109 if (isolate_lru_page(page)) 5110 goto put; 5111 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { 5112 mc.precharge--; 5113 /* we uncharge from mc.from later. */ 5114 mc.moved_charge++; 5115 } 5116 putback_lru_page(page); 5117 put: /* get_mctgt_type() gets the page */ 5118 put_page(page); 5119 break; 5120 case MC_TARGET_SWAP: 5121 ent = target.ent; 5122 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 5123 mc.precharge--; 5124 /* we fixup refcnts and charges later. */ 5125 mc.moved_swap++; 5126 } 5127 break; 5128 default: 5129 break; 5130 } 5131 } 5132 pte_unmap_unlock(pte - 1, ptl); 5133 cond_resched(); 5134 5135 if (addr != end) { 5136 /* 5137 * We have consumed all precharges we got in can_attach(). 5138 * We try charge one by one, but don't do any additional 5139 * charges to mc.to if we have failed in charge once in attach() 5140 * phase. 5141 */ 5142 ret = mem_cgroup_do_precharge(1); 5143 if (!ret) 5144 goto retry; 5145 } 5146 5147 return ret; 5148 } 5149 5150 static void mem_cgroup_move_charge(struct mm_struct *mm) 5151 { 5152 struct mm_walk mem_cgroup_move_charge_walk = { 5153 .pmd_entry = mem_cgroup_move_charge_pte_range, 5154 .mm = mm, 5155 }; 5156 5157 lru_add_drain_all(); 5158 /* 5159 * Signal mem_cgroup_begin_page_stat() to take the memcg's 5160 * move_lock while we're moving its pages to another memcg. 5161 * Then wait for already started RCU-only updates to finish. 5162 */ 5163 atomic_inc(&mc.from->moving_account); 5164 synchronize_rcu(); 5165 retry: 5166 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5167 /* 5168 * Someone who are holding the mmap_sem might be waiting in 5169 * waitq. So we cancel all extra charges, wake up all waiters, 5170 * and retry. Because we cancel precharges, we might not be able 5171 * to move enough charges, but moving charge is a best-effort 5172 * feature anyway, so it wouldn't be a big problem. 5173 */ 5174 __mem_cgroup_clear_mc(); 5175 cond_resched(); 5176 goto retry; 5177 } 5178 /* 5179 * When we have consumed all precharges and failed in doing 5180 * additional charge, the page walk just aborts. 5181 */ 5182 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 5183 up_read(&mm->mmap_sem); 5184 atomic_dec(&mc.from->moving_account); 5185 } 5186 5187 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5188 struct cgroup_taskset *tset) 5189 { 5190 struct task_struct *p = cgroup_taskset_first(tset); 5191 struct mm_struct *mm = get_task_mm(p); 5192 5193 if (mm) { 5194 if (mc.to) 5195 mem_cgroup_move_charge(mm); 5196 mmput(mm); 5197 } 5198 if (mc.to) 5199 mem_cgroup_clear_mc(); 5200 } 5201 #else /* !CONFIG_MMU */ 5202 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 5203 struct cgroup_taskset *tset) 5204 { 5205 return 0; 5206 } 5207 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 5208 struct cgroup_taskset *tset) 5209 { 5210 } 5211 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5212 struct cgroup_taskset *tset) 5213 { 5214 } 5215 #endif 5216 5217 /* 5218 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5219 * to verify whether we're attached to the default hierarchy on each mount 5220 * attempt. 5221 */ 5222 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5223 { 5224 /* 5225 * use_hierarchy is forced on the default hierarchy. cgroup core 5226 * guarantees that @root doesn't have any children, so turning it 5227 * on for the root memcg is enough. 5228 */ 5229 if (cgroup_on_dfl(root_css->cgroup)) 5230 root_mem_cgroup->use_hierarchy = true; 5231 else 5232 root_mem_cgroup->use_hierarchy = false; 5233 } 5234 5235 static u64 memory_current_read(struct cgroup_subsys_state *css, 5236 struct cftype *cft) 5237 { 5238 return mem_cgroup_usage(mem_cgroup_from_css(css), false); 5239 } 5240 5241 static int memory_low_show(struct seq_file *m, void *v) 5242 { 5243 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5244 unsigned long low = READ_ONCE(memcg->low); 5245 5246 if (low == PAGE_COUNTER_MAX) 5247 seq_puts(m, "max\n"); 5248 else 5249 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5250 5251 return 0; 5252 } 5253 5254 static ssize_t memory_low_write(struct kernfs_open_file *of, 5255 char *buf, size_t nbytes, loff_t off) 5256 { 5257 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5258 unsigned long low; 5259 int err; 5260 5261 buf = strstrip(buf); 5262 err = page_counter_memparse(buf, "max", &low); 5263 if (err) 5264 return err; 5265 5266 memcg->low = low; 5267 5268 return nbytes; 5269 } 5270 5271 static int memory_high_show(struct seq_file *m, void *v) 5272 { 5273 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5274 unsigned long high = READ_ONCE(memcg->high); 5275 5276 if (high == PAGE_COUNTER_MAX) 5277 seq_puts(m, "max\n"); 5278 else 5279 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5280 5281 return 0; 5282 } 5283 5284 static ssize_t memory_high_write(struct kernfs_open_file *of, 5285 char *buf, size_t nbytes, loff_t off) 5286 { 5287 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5288 unsigned long high; 5289 int err; 5290 5291 buf = strstrip(buf); 5292 err = page_counter_memparse(buf, "max", &high); 5293 if (err) 5294 return err; 5295 5296 memcg->high = high; 5297 5298 return nbytes; 5299 } 5300 5301 static int memory_max_show(struct seq_file *m, void *v) 5302 { 5303 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5304 unsigned long max = READ_ONCE(memcg->memory.limit); 5305 5306 if (max == PAGE_COUNTER_MAX) 5307 seq_puts(m, "max\n"); 5308 else 5309 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5310 5311 return 0; 5312 } 5313 5314 static ssize_t memory_max_write(struct kernfs_open_file *of, 5315 char *buf, size_t nbytes, loff_t off) 5316 { 5317 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5318 unsigned long max; 5319 int err; 5320 5321 buf = strstrip(buf); 5322 err = page_counter_memparse(buf, "max", &max); 5323 if (err) 5324 return err; 5325 5326 err = mem_cgroup_resize_limit(memcg, max); 5327 if (err) 5328 return err; 5329 5330 return nbytes; 5331 } 5332 5333 static int memory_events_show(struct seq_file *m, void *v) 5334 { 5335 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5336 5337 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5338 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5339 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5340 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5341 5342 return 0; 5343 } 5344 5345 static struct cftype memory_files[] = { 5346 { 5347 .name = "current", 5348 .read_u64 = memory_current_read, 5349 }, 5350 { 5351 .name = "low", 5352 .flags = CFTYPE_NOT_ON_ROOT, 5353 .seq_show = memory_low_show, 5354 .write = memory_low_write, 5355 }, 5356 { 5357 .name = "high", 5358 .flags = CFTYPE_NOT_ON_ROOT, 5359 .seq_show = memory_high_show, 5360 .write = memory_high_write, 5361 }, 5362 { 5363 .name = "max", 5364 .flags = CFTYPE_NOT_ON_ROOT, 5365 .seq_show = memory_max_show, 5366 .write = memory_max_write, 5367 }, 5368 { 5369 .name = "events", 5370 .flags = CFTYPE_NOT_ON_ROOT, 5371 .seq_show = memory_events_show, 5372 }, 5373 { } /* terminate */ 5374 }; 5375 5376 struct cgroup_subsys memory_cgrp_subsys = { 5377 .css_alloc = mem_cgroup_css_alloc, 5378 .css_online = mem_cgroup_css_online, 5379 .css_offline = mem_cgroup_css_offline, 5380 .css_free = mem_cgroup_css_free, 5381 .css_reset = mem_cgroup_css_reset, 5382 .can_attach = mem_cgroup_can_attach, 5383 .cancel_attach = mem_cgroup_cancel_attach, 5384 .attach = mem_cgroup_move_task, 5385 .bind = mem_cgroup_bind, 5386 .dfl_cftypes = memory_files, 5387 .legacy_cftypes = mem_cgroup_legacy_files, 5388 .early_init = 0, 5389 }; 5390 5391 /** 5392 * mem_cgroup_events - count memory events against a cgroup 5393 * @memcg: the memory cgroup 5394 * @idx: the event index 5395 * @nr: the number of events to account for 5396 */ 5397 void mem_cgroup_events(struct mem_cgroup *memcg, 5398 enum mem_cgroup_events_index idx, 5399 unsigned int nr) 5400 { 5401 this_cpu_add(memcg->stat->events[idx], nr); 5402 } 5403 5404 /** 5405 * mem_cgroup_low - check if memory consumption is below the normal range 5406 * @root: the highest ancestor to consider 5407 * @memcg: the memory cgroup to check 5408 * 5409 * Returns %true if memory consumption of @memcg, and that of all 5410 * configurable ancestors up to @root, is below the normal range. 5411 */ 5412 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5413 { 5414 if (mem_cgroup_disabled()) 5415 return false; 5416 5417 /* 5418 * The toplevel group doesn't have a configurable range, so 5419 * it's never low when looked at directly, and it is not 5420 * considered an ancestor when assessing the hierarchy. 5421 */ 5422 5423 if (memcg == root_mem_cgroup) 5424 return false; 5425 5426 if (page_counter_read(&memcg->memory) >= memcg->low) 5427 return false; 5428 5429 while (memcg != root) { 5430 memcg = parent_mem_cgroup(memcg); 5431 5432 if (memcg == root_mem_cgroup) 5433 break; 5434 5435 if (page_counter_read(&memcg->memory) >= memcg->low) 5436 return false; 5437 } 5438 return true; 5439 } 5440 5441 /** 5442 * mem_cgroup_try_charge - try charging a page 5443 * @page: page to charge 5444 * @mm: mm context of the victim 5445 * @gfp_mask: reclaim mode 5446 * @memcgp: charged memcg return 5447 * 5448 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5449 * pages according to @gfp_mask if necessary. 5450 * 5451 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5452 * Otherwise, an error code is returned. 5453 * 5454 * After page->mapping has been set up, the caller must finalize the 5455 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5456 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5457 */ 5458 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5459 gfp_t gfp_mask, struct mem_cgroup **memcgp) 5460 { 5461 struct mem_cgroup *memcg = NULL; 5462 unsigned int nr_pages = 1; 5463 int ret = 0; 5464 5465 if (mem_cgroup_disabled()) 5466 goto out; 5467 5468 if (PageSwapCache(page)) { 5469 /* 5470 * Every swap fault against a single page tries to charge the 5471 * page, bail as early as possible. shmem_unuse() encounters 5472 * already charged pages, too. The USED bit is protected by 5473 * the page lock, which serializes swap cache removal, which 5474 * in turn serializes uncharging. 5475 */ 5476 if (page->mem_cgroup) 5477 goto out; 5478 } 5479 5480 if (PageTransHuge(page)) { 5481 nr_pages <<= compound_order(page); 5482 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5483 } 5484 5485 if (do_swap_account && PageSwapCache(page)) 5486 memcg = try_get_mem_cgroup_from_page(page); 5487 if (!memcg) 5488 memcg = get_mem_cgroup_from_mm(mm); 5489 5490 ret = try_charge(memcg, gfp_mask, nr_pages); 5491 5492 css_put(&memcg->css); 5493 5494 if (ret == -EINTR) { 5495 memcg = root_mem_cgroup; 5496 ret = 0; 5497 } 5498 out: 5499 *memcgp = memcg; 5500 return ret; 5501 } 5502 5503 /** 5504 * mem_cgroup_commit_charge - commit a page charge 5505 * @page: page to charge 5506 * @memcg: memcg to charge the page to 5507 * @lrucare: page might be on LRU already 5508 * 5509 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5510 * after page->mapping has been set up. This must happen atomically 5511 * as part of the page instantiation, i.e. under the page table lock 5512 * for anonymous pages, under the page lock for page and swap cache. 5513 * 5514 * In addition, the page must not be on the LRU during the commit, to 5515 * prevent racing with task migration. If it might be, use @lrucare. 5516 * 5517 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5518 */ 5519 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5520 bool lrucare) 5521 { 5522 unsigned int nr_pages = 1; 5523 5524 VM_BUG_ON_PAGE(!page->mapping, page); 5525 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5526 5527 if (mem_cgroup_disabled()) 5528 return; 5529 /* 5530 * Swap faults will attempt to charge the same page multiple 5531 * times. But reuse_swap_page() might have removed the page 5532 * from swapcache already, so we can't check PageSwapCache(). 5533 */ 5534 if (!memcg) 5535 return; 5536 5537 commit_charge(page, memcg, lrucare); 5538 5539 if (PageTransHuge(page)) { 5540 nr_pages <<= compound_order(page); 5541 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5542 } 5543 5544 local_irq_disable(); 5545 mem_cgroup_charge_statistics(memcg, page, nr_pages); 5546 memcg_check_events(memcg, page); 5547 local_irq_enable(); 5548 5549 if (do_swap_account && PageSwapCache(page)) { 5550 swp_entry_t entry = { .val = page_private(page) }; 5551 /* 5552 * The swap entry might not get freed for a long time, 5553 * let's not wait for it. The page already received a 5554 * memory+swap charge, drop the swap entry duplicate. 5555 */ 5556 mem_cgroup_uncharge_swap(entry); 5557 } 5558 } 5559 5560 /** 5561 * mem_cgroup_cancel_charge - cancel a page charge 5562 * @page: page to charge 5563 * @memcg: memcg to charge the page to 5564 * 5565 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5566 */ 5567 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 5568 { 5569 unsigned int nr_pages = 1; 5570 5571 if (mem_cgroup_disabled()) 5572 return; 5573 /* 5574 * Swap faults will attempt to charge the same page multiple 5575 * times. But reuse_swap_page() might have removed the page 5576 * from swapcache already, so we can't check PageSwapCache(). 5577 */ 5578 if (!memcg) 5579 return; 5580 5581 if (PageTransHuge(page)) { 5582 nr_pages <<= compound_order(page); 5583 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5584 } 5585 5586 cancel_charge(memcg, nr_pages); 5587 } 5588 5589 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5590 unsigned long nr_anon, unsigned long nr_file, 5591 unsigned long nr_huge, struct page *dummy_page) 5592 { 5593 unsigned long nr_pages = nr_anon + nr_file; 5594 unsigned long flags; 5595 5596 if (!mem_cgroup_is_root(memcg)) { 5597 page_counter_uncharge(&memcg->memory, nr_pages); 5598 if (do_swap_account) 5599 page_counter_uncharge(&memcg->memsw, nr_pages); 5600 memcg_oom_recover(memcg); 5601 } 5602 5603 local_irq_save(flags); 5604 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5605 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5606 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5607 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5608 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5609 memcg_check_events(memcg, dummy_page); 5610 local_irq_restore(flags); 5611 5612 if (!mem_cgroup_is_root(memcg)) 5613 css_put_many(&memcg->css, nr_pages); 5614 } 5615 5616 static void uncharge_list(struct list_head *page_list) 5617 { 5618 struct mem_cgroup *memcg = NULL; 5619 unsigned long nr_anon = 0; 5620 unsigned long nr_file = 0; 5621 unsigned long nr_huge = 0; 5622 unsigned long pgpgout = 0; 5623 struct list_head *next; 5624 struct page *page; 5625 5626 next = page_list->next; 5627 do { 5628 unsigned int nr_pages = 1; 5629 5630 page = list_entry(next, struct page, lru); 5631 next = page->lru.next; 5632 5633 VM_BUG_ON_PAGE(PageLRU(page), page); 5634 VM_BUG_ON_PAGE(page_count(page), page); 5635 5636 if (!page->mem_cgroup) 5637 continue; 5638 5639 /* 5640 * Nobody should be changing or seriously looking at 5641 * page->mem_cgroup at this point, we have fully 5642 * exclusive access to the page. 5643 */ 5644 5645 if (memcg != page->mem_cgroup) { 5646 if (memcg) { 5647 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5648 nr_huge, page); 5649 pgpgout = nr_anon = nr_file = nr_huge = 0; 5650 } 5651 memcg = page->mem_cgroup; 5652 } 5653 5654 if (PageTransHuge(page)) { 5655 nr_pages <<= compound_order(page); 5656 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5657 nr_huge += nr_pages; 5658 } 5659 5660 if (PageAnon(page)) 5661 nr_anon += nr_pages; 5662 else 5663 nr_file += nr_pages; 5664 5665 page->mem_cgroup = NULL; 5666 5667 pgpgout++; 5668 } while (next != page_list); 5669 5670 if (memcg) 5671 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5672 nr_huge, page); 5673 } 5674 5675 /** 5676 * mem_cgroup_uncharge - uncharge a page 5677 * @page: page to uncharge 5678 * 5679 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5680 * mem_cgroup_commit_charge(). 5681 */ 5682 void mem_cgroup_uncharge(struct page *page) 5683 { 5684 if (mem_cgroup_disabled()) 5685 return; 5686 5687 /* Don't touch page->lru of any random page, pre-check: */ 5688 if (!page->mem_cgroup) 5689 return; 5690 5691 INIT_LIST_HEAD(&page->lru); 5692 uncharge_list(&page->lru); 5693 } 5694 5695 /** 5696 * mem_cgroup_uncharge_list - uncharge a list of page 5697 * @page_list: list of pages to uncharge 5698 * 5699 * Uncharge a list of pages previously charged with 5700 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5701 */ 5702 void mem_cgroup_uncharge_list(struct list_head *page_list) 5703 { 5704 if (mem_cgroup_disabled()) 5705 return; 5706 5707 if (!list_empty(page_list)) 5708 uncharge_list(page_list); 5709 } 5710 5711 /** 5712 * mem_cgroup_migrate - migrate a charge to another page 5713 * @oldpage: currently charged page 5714 * @newpage: page to transfer the charge to 5715 * @lrucare: either or both pages might be on the LRU already 5716 * 5717 * Migrate the charge from @oldpage to @newpage. 5718 * 5719 * Both pages must be locked, @newpage->mapping must be set up. 5720 */ 5721 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, 5722 bool lrucare) 5723 { 5724 struct mem_cgroup *memcg; 5725 int isolated; 5726 5727 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5728 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5729 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage); 5730 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage); 5731 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5732 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5733 newpage); 5734 5735 if (mem_cgroup_disabled()) 5736 return; 5737 5738 /* Page cache replacement: new page already charged? */ 5739 if (newpage->mem_cgroup) 5740 return; 5741 5742 /* 5743 * Swapcache readahead pages can get migrated before being 5744 * charged, and migration from compaction can happen to an 5745 * uncharged page when the PFN walker finds a page that 5746 * reclaim just put back on the LRU but has not released yet. 5747 */ 5748 memcg = oldpage->mem_cgroup; 5749 if (!memcg) 5750 return; 5751 5752 if (lrucare) 5753 lock_page_lru(oldpage, &isolated); 5754 5755 oldpage->mem_cgroup = NULL; 5756 5757 if (lrucare) 5758 unlock_page_lru(oldpage, isolated); 5759 5760 commit_charge(newpage, memcg, lrucare); 5761 } 5762 5763 /* 5764 * subsys_initcall() for memory controller. 5765 * 5766 * Some parts like hotcpu_notifier() have to be initialized from this context 5767 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5768 * everything that doesn't depend on a specific mem_cgroup structure should 5769 * be initialized from here. 5770 */ 5771 static int __init mem_cgroup_init(void) 5772 { 5773 int cpu, node; 5774 5775 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5776 5777 for_each_possible_cpu(cpu) 5778 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5779 drain_local_stock); 5780 5781 for_each_node(node) { 5782 struct mem_cgroup_tree_per_node *rtpn; 5783 int zone; 5784 5785 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5786 node_online(node) ? node : NUMA_NO_NODE); 5787 5788 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5789 struct mem_cgroup_tree_per_zone *rtpz; 5790 5791 rtpz = &rtpn->rb_tree_per_zone[zone]; 5792 rtpz->rb_root = RB_ROOT; 5793 spin_lock_init(&rtpz->lock); 5794 } 5795 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5796 } 5797 5798 return 0; 5799 } 5800 subsys_initcall(mem_cgroup_init); 5801 5802 #ifdef CONFIG_MEMCG_SWAP 5803 /** 5804 * mem_cgroup_swapout - transfer a memsw charge to swap 5805 * @page: page whose memsw charge to transfer 5806 * @entry: swap entry to move the charge to 5807 * 5808 * Transfer the memsw charge of @page to @entry. 5809 */ 5810 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5811 { 5812 struct mem_cgroup *memcg; 5813 unsigned short oldid; 5814 5815 VM_BUG_ON_PAGE(PageLRU(page), page); 5816 VM_BUG_ON_PAGE(page_count(page), page); 5817 5818 if (!do_swap_account) 5819 return; 5820 5821 memcg = page->mem_cgroup; 5822 5823 /* Readahead page, never charged */ 5824 if (!memcg) 5825 return; 5826 5827 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5828 VM_BUG_ON_PAGE(oldid, page); 5829 mem_cgroup_swap_statistics(memcg, true); 5830 5831 page->mem_cgroup = NULL; 5832 5833 if (!mem_cgroup_is_root(memcg)) 5834 page_counter_uncharge(&memcg->memory, 1); 5835 5836 /* XXX: caller holds IRQ-safe mapping->tree_lock */ 5837 VM_BUG_ON(!irqs_disabled()); 5838 5839 mem_cgroup_charge_statistics(memcg, page, -1); 5840 memcg_check_events(memcg, page); 5841 } 5842 5843 /** 5844 * mem_cgroup_uncharge_swap - uncharge a swap entry 5845 * @entry: swap entry to uncharge 5846 * 5847 * Drop the memsw charge associated with @entry. 5848 */ 5849 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5850 { 5851 struct mem_cgroup *memcg; 5852 unsigned short id; 5853 5854 if (!do_swap_account) 5855 return; 5856 5857 id = swap_cgroup_record(entry, 0); 5858 rcu_read_lock(); 5859 memcg = mem_cgroup_from_id(id); 5860 if (memcg) { 5861 if (!mem_cgroup_is_root(memcg)) 5862 page_counter_uncharge(&memcg->memsw, 1); 5863 mem_cgroup_swap_statistics(memcg, false); 5864 css_put(&memcg->css); 5865 } 5866 rcu_read_unlock(); 5867 } 5868 5869 /* for remember boot option*/ 5870 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5871 static int really_do_swap_account __initdata = 1; 5872 #else 5873 static int really_do_swap_account __initdata; 5874 #endif 5875 5876 static int __init enable_swap_account(char *s) 5877 { 5878 if (!strcmp(s, "1")) 5879 really_do_swap_account = 1; 5880 else if (!strcmp(s, "0")) 5881 really_do_swap_account = 0; 5882 return 1; 5883 } 5884 __setup("swapaccount=", enable_swap_account); 5885 5886 static struct cftype memsw_cgroup_files[] = { 5887 { 5888 .name = "memsw.usage_in_bytes", 5889 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 5890 .read_u64 = mem_cgroup_read_u64, 5891 }, 5892 { 5893 .name = "memsw.max_usage_in_bytes", 5894 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 5895 .write = mem_cgroup_reset, 5896 .read_u64 = mem_cgroup_read_u64, 5897 }, 5898 { 5899 .name = "memsw.limit_in_bytes", 5900 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 5901 .write = mem_cgroup_write, 5902 .read_u64 = mem_cgroup_read_u64, 5903 }, 5904 { 5905 .name = "memsw.failcnt", 5906 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 5907 .write = mem_cgroup_reset, 5908 .read_u64 = mem_cgroup_read_u64, 5909 }, 5910 { }, /* terminate */ 5911 }; 5912 5913 static int __init mem_cgroup_swap_init(void) 5914 { 5915 if (!mem_cgroup_disabled() && really_do_swap_account) { 5916 do_swap_account = 1; 5917 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 5918 memsw_cgroup_files)); 5919 } 5920 return 0; 5921 } 5922 subsys_initcall(mem_cgroup_swap_init); 5923 5924 #endif /* CONFIG_MEMCG_SWAP */ 5925