1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include <linux/tracehook.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include <net/tcp_memcontrol.h> 70 #include "slab.h" 71 72 #include <asm/uaccess.h> 73 74 #include <trace/events/vmscan.h> 75 76 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 77 EXPORT_SYMBOL(memory_cgrp_subsys); 78 79 #define MEM_CGROUP_RECLAIM_RETRIES 5 80 static struct mem_cgroup *root_mem_cgroup __read_mostly; 81 struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly; 82 83 /* Whether the swap controller is active */ 84 #ifdef CONFIG_MEMCG_SWAP 85 int do_swap_account __read_mostly; 86 #else 87 #define do_swap_account 0 88 #endif 89 90 static const char * const mem_cgroup_stat_names[] = { 91 "cache", 92 "rss", 93 "rss_huge", 94 "mapped_file", 95 "dirty", 96 "writeback", 97 "swap", 98 }; 99 100 static const char * const mem_cgroup_events_names[] = { 101 "pgpgin", 102 "pgpgout", 103 "pgfault", 104 "pgmajfault", 105 }; 106 107 static const char * const mem_cgroup_lru_names[] = { 108 "inactive_anon", 109 "active_anon", 110 "inactive_file", 111 "active_file", 112 "unevictable", 113 }; 114 115 #define THRESHOLDS_EVENTS_TARGET 128 116 #define SOFTLIMIT_EVENTS_TARGET 1024 117 #define NUMAINFO_EVENTS_TARGET 1024 118 119 /* 120 * Cgroups above their limits are maintained in a RB-Tree, independent of 121 * their hierarchy representation 122 */ 123 124 struct mem_cgroup_tree_per_zone { 125 struct rb_root rb_root; 126 spinlock_t lock; 127 }; 128 129 struct mem_cgroup_tree_per_node { 130 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 131 }; 132 133 struct mem_cgroup_tree { 134 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 135 }; 136 137 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 138 139 /* for OOM */ 140 struct mem_cgroup_eventfd_list { 141 struct list_head list; 142 struct eventfd_ctx *eventfd; 143 }; 144 145 /* 146 * cgroup_event represents events which userspace want to receive. 147 */ 148 struct mem_cgroup_event { 149 /* 150 * memcg which the event belongs to. 151 */ 152 struct mem_cgroup *memcg; 153 /* 154 * eventfd to signal userspace about the event. 155 */ 156 struct eventfd_ctx *eventfd; 157 /* 158 * Each of these stored in a list by the cgroup. 159 */ 160 struct list_head list; 161 /* 162 * register_event() callback will be used to add new userspace 163 * waiter for changes related to this event. Use eventfd_signal() 164 * on eventfd to send notification to userspace. 165 */ 166 int (*register_event)(struct mem_cgroup *memcg, 167 struct eventfd_ctx *eventfd, const char *args); 168 /* 169 * unregister_event() callback will be called when userspace closes 170 * the eventfd or on cgroup removing. This callback must be set, 171 * if you want provide notification functionality. 172 */ 173 void (*unregister_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd); 175 /* 176 * All fields below needed to unregister event when 177 * userspace closes eventfd. 178 */ 179 poll_table pt; 180 wait_queue_head_t *wqh; 181 wait_queue_t wait; 182 struct work_struct remove; 183 }; 184 185 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 187 188 /* Stuffs for move charges at task migration. */ 189 /* 190 * Types of charges to be moved. 191 */ 192 #define MOVE_ANON 0x1U 193 #define MOVE_FILE 0x2U 194 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 195 196 /* "mc" and its members are protected by cgroup_mutex */ 197 static struct move_charge_struct { 198 spinlock_t lock; /* for from, to */ 199 struct mem_cgroup *from; 200 struct mem_cgroup *to; 201 unsigned long flags; 202 unsigned long precharge; 203 unsigned long moved_charge; 204 unsigned long moved_swap; 205 struct task_struct *moving_task; /* a task moving charges */ 206 wait_queue_head_t waitq; /* a waitq for other context */ 207 } mc = { 208 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 209 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 210 }; 211 212 /* 213 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 214 * limit reclaim to prevent infinite loops, if they ever occur. 215 */ 216 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 217 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 218 219 enum charge_type { 220 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 221 MEM_CGROUP_CHARGE_TYPE_ANON, 222 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 223 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 224 NR_CHARGE_TYPE, 225 }; 226 227 /* for encoding cft->private value on file */ 228 enum res_type { 229 _MEM, 230 _MEMSWAP, 231 _OOM_TYPE, 232 _KMEM, 233 }; 234 235 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 236 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 237 #define MEMFILE_ATTR(val) ((val) & 0xffff) 238 /* Used for OOM nofiier */ 239 #define OOM_CONTROL (0) 240 241 /* 242 * The memcg_create_mutex will be held whenever a new cgroup is created. 243 * As a consequence, any change that needs to protect against new child cgroups 244 * appearing has to hold it as well. 245 */ 246 static DEFINE_MUTEX(memcg_create_mutex); 247 248 /* Some nice accessors for the vmpressure. */ 249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 250 { 251 if (!memcg) 252 memcg = root_mem_cgroup; 253 return &memcg->vmpressure; 254 } 255 256 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 257 { 258 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 259 } 260 261 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 262 { 263 return (memcg == root_mem_cgroup); 264 } 265 266 /* 267 * We restrict the id in the range of [1, 65535], so it can fit into 268 * an unsigned short. 269 */ 270 #define MEM_CGROUP_ID_MAX USHRT_MAX 271 272 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 273 { 274 return memcg->css.id; 275 } 276 277 /* 278 * A helper function to get mem_cgroup from ID. must be called under 279 * rcu_read_lock(). The caller is responsible for calling 280 * css_tryget_online() if the mem_cgroup is used for charging. (dropping 281 * refcnt from swap can be called against removed memcg.) 282 */ 283 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 284 { 285 struct cgroup_subsys_state *css; 286 287 css = css_from_id(id, &memory_cgrp_subsys); 288 return mem_cgroup_from_css(css); 289 } 290 291 /* Writing them here to avoid exposing memcg's inner layout */ 292 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 293 294 void sock_update_memcg(struct sock *sk) 295 { 296 if (mem_cgroup_sockets_enabled) { 297 struct mem_cgroup *memcg; 298 struct cg_proto *cg_proto; 299 300 BUG_ON(!sk->sk_prot->proto_cgroup); 301 302 /* Socket cloning can throw us here with sk_cgrp already 303 * filled. It won't however, necessarily happen from 304 * process context. So the test for root memcg given 305 * the current task's memcg won't help us in this case. 306 * 307 * Respecting the original socket's memcg is a better 308 * decision in this case. 309 */ 310 if (sk->sk_cgrp) { 311 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 312 css_get(&sk->sk_cgrp->memcg->css); 313 return; 314 } 315 316 rcu_read_lock(); 317 memcg = mem_cgroup_from_task(current); 318 cg_proto = sk->sk_prot->proto_cgroup(memcg); 319 if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) && 320 css_tryget_online(&memcg->css)) { 321 sk->sk_cgrp = cg_proto; 322 } 323 rcu_read_unlock(); 324 } 325 } 326 EXPORT_SYMBOL(sock_update_memcg); 327 328 void sock_release_memcg(struct sock *sk) 329 { 330 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 331 struct mem_cgroup *memcg; 332 WARN_ON(!sk->sk_cgrp->memcg); 333 memcg = sk->sk_cgrp->memcg; 334 css_put(&sk->sk_cgrp->memcg->css); 335 } 336 } 337 338 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 339 { 340 if (!memcg || mem_cgroup_is_root(memcg)) 341 return NULL; 342 343 return &memcg->tcp_mem; 344 } 345 EXPORT_SYMBOL(tcp_proto_cgroup); 346 347 #endif 348 349 #ifdef CONFIG_MEMCG_KMEM 350 /* 351 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 352 * The main reason for not using cgroup id for this: 353 * this works better in sparse environments, where we have a lot of memcgs, 354 * but only a few kmem-limited. Or also, if we have, for instance, 200 355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 356 * 200 entry array for that. 357 * 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It 359 * will double each time we have to increase it. 360 */ 361 static DEFINE_IDA(memcg_cache_ida); 362 int memcg_nr_cache_ids; 363 364 /* Protects memcg_nr_cache_ids */ 365 static DECLARE_RWSEM(memcg_cache_ids_sem); 366 367 void memcg_get_cache_ids(void) 368 { 369 down_read(&memcg_cache_ids_sem); 370 } 371 372 void memcg_put_cache_ids(void) 373 { 374 up_read(&memcg_cache_ids_sem); 375 } 376 377 /* 378 * MIN_SIZE is different than 1, because we would like to avoid going through 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited 380 * cgroups is a reasonable guess. In the future, it could be a parameter or 381 * tunable, but that is strictly not necessary. 382 * 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 384 * this constant directly from cgroup, but it is understandable that this is 385 * better kept as an internal representation in cgroup.c. In any case, the 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily 387 * increase ours as well if it increases. 388 */ 389 #define MEMCG_CACHES_MIN_SIZE 4 390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 391 392 /* 393 * A lot of the calls to the cache allocation functions are expected to be 394 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 395 * conditional to this static branch, we'll have to allow modules that does 396 * kmem_cache_alloc and the such to see this symbol as well 397 */ 398 struct static_key memcg_kmem_enabled_key; 399 EXPORT_SYMBOL(memcg_kmem_enabled_key); 400 401 #endif /* CONFIG_MEMCG_KMEM */ 402 403 static struct mem_cgroup_per_zone * 404 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) 405 { 406 int nid = zone_to_nid(zone); 407 int zid = zone_idx(zone); 408 409 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 410 } 411 412 /** 413 * mem_cgroup_css_from_page - css of the memcg associated with a page 414 * @page: page of interest 415 * 416 * If memcg is bound to the default hierarchy, css of the memcg associated 417 * with @page is returned. The returned css remains associated with @page 418 * until it is released. 419 * 420 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 421 * is returned. 422 * 423 * XXX: The above description of behavior on the default hierarchy isn't 424 * strictly true yet as replace_page_cache_page() can modify the 425 * association before @page is released even on the default hierarchy; 426 * however, the current and planned usages don't mix the the two functions 427 * and replace_page_cache_page() will soon be updated to make the invariant 428 * actually true. 429 */ 430 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 431 { 432 struct mem_cgroup *memcg; 433 434 rcu_read_lock(); 435 436 memcg = page->mem_cgroup; 437 438 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 439 memcg = root_mem_cgroup; 440 441 rcu_read_unlock(); 442 return &memcg->css; 443 } 444 445 /** 446 * page_cgroup_ino - return inode number of the memcg a page is charged to 447 * @page: the page 448 * 449 * Look up the closest online ancestor of the memory cgroup @page is charged to 450 * and return its inode number or 0 if @page is not charged to any cgroup. It 451 * is safe to call this function without holding a reference to @page. 452 * 453 * Note, this function is inherently racy, because there is nothing to prevent 454 * the cgroup inode from getting torn down and potentially reallocated a moment 455 * after page_cgroup_ino() returns, so it only should be used by callers that 456 * do not care (such as procfs interfaces). 457 */ 458 ino_t page_cgroup_ino(struct page *page) 459 { 460 struct mem_cgroup *memcg; 461 unsigned long ino = 0; 462 463 rcu_read_lock(); 464 memcg = READ_ONCE(page->mem_cgroup); 465 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 466 memcg = parent_mem_cgroup(memcg); 467 if (memcg) 468 ino = cgroup_ino(memcg->css.cgroup); 469 rcu_read_unlock(); 470 return ino; 471 } 472 473 static struct mem_cgroup_per_zone * 474 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) 475 { 476 int nid = page_to_nid(page); 477 int zid = page_zonenum(page); 478 479 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 480 } 481 482 static struct mem_cgroup_tree_per_zone * 483 soft_limit_tree_node_zone(int nid, int zid) 484 { 485 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 486 } 487 488 static struct mem_cgroup_tree_per_zone * 489 soft_limit_tree_from_page(struct page *page) 490 { 491 int nid = page_to_nid(page); 492 int zid = page_zonenum(page); 493 494 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 495 } 496 497 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, 498 struct mem_cgroup_tree_per_zone *mctz, 499 unsigned long new_usage_in_excess) 500 { 501 struct rb_node **p = &mctz->rb_root.rb_node; 502 struct rb_node *parent = NULL; 503 struct mem_cgroup_per_zone *mz_node; 504 505 if (mz->on_tree) 506 return; 507 508 mz->usage_in_excess = new_usage_in_excess; 509 if (!mz->usage_in_excess) 510 return; 511 while (*p) { 512 parent = *p; 513 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 514 tree_node); 515 if (mz->usage_in_excess < mz_node->usage_in_excess) 516 p = &(*p)->rb_left; 517 /* 518 * We can't avoid mem cgroups that are over their soft 519 * limit by the same amount 520 */ 521 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 522 p = &(*p)->rb_right; 523 } 524 rb_link_node(&mz->tree_node, parent, p); 525 rb_insert_color(&mz->tree_node, &mctz->rb_root); 526 mz->on_tree = true; 527 } 528 529 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 530 struct mem_cgroup_tree_per_zone *mctz) 531 { 532 if (!mz->on_tree) 533 return; 534 rb_erase(&mz->tree_node, &mctz->rb_root); 535 mz->on_tree = false; 536 } 537 538 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 539 struct mem_cgroup_tree_per_zone *mctz) 540 { 541 unsigned long flags; 542 543 spin_lock_irqsave(&mctz->lock, flags); 544 __mem_cgroup_remove_exceeded(mz, mctz); 545 spin_unlock_irqrestore(&mctz->lock, flags); 546 } 547 548 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 549 { 550 unsigned long nr_pages = page_counter_read(&memcg->memory); 551 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 552 unsigned long excess = 0; 553 554 if (nr_pages > soft_limit) 555 excess = nr_pages - soft_limit; 556 557 return excess; 558 } 559 560 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 561 { 562 unsigned long excess; 563 struct mem_cgroup_per_zone *mz; 564 struct mem_cgroup_tree_per_zone *mctz; 565 566 mctz = soft_limit_tree_from_page(page); 567 /* 568 * Necessary to update all ancestors when hierarchy is used. 569 * because their event counter is not touched. 570 */ 571 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 572 mz = mem_cgroup_page_zoneinfo(memcg, page); 573 excess = soft_limit_excess(memcg); 574 /* 575 * We have to update the tree if mz is on RB-tree or 576 * mem is over its softlimit. 577 */ 578 if (excess || mz->on_tree) { 579 unsigned long flags; 580 581 spin_lock_irqsave(&mctz->lock, flags); 582 /* if on-tree, remove it */ 583 if (mz->on_tree) 584 __mem_cgroup_remove_exceeded(mz, mctz); 585 /* 586 * Insert again. mz->usage_in_excess will be updated. 587 * If excess is 0, no tree ops. 588 */ 589 __mem_cgroup_insert_exceeded(mz, mctz, excess); 590 spin_unlock_irqrestore(&mctz->lock, flags); 591 } 592 } 593 } 594 595 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 596 { 597 struct mem_cgroup_tree_per_zone *mctz; 598 struct mem_cgroup_per_zone *mz; 599 int nid, zid; 600 601 for_each_node(nid) { 602 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 603 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 604 mctz = soft_limit_tree_node_zone(nid, zid); 605 mem_cgroup_remove_exceeded(mz, mctz); 606 } 607 } 608 } 609 610 static struct mem_cgroup_per_zone * 611 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 612 { 613 struct rb_node *rightmost = NULL; 614 struct mem_cgroup_per_zone *mz; 615 616 retry: 617 mz = NULL; 618 rightmost = rb_last(&mctz->rb_root); 619 if (!rightmost) 620 goto done; /* Nothing to reclaim from */ 621 622 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 623 /* 624 * Remove the node now but someone else can add it back, 625 * we will to add it back at the end of reclaim to its correct 626 * position in the tree. 627 */ 628 __mem_cgroup_remove_exceeded(mz, mctz); 629 if (!soft_limit_excess(mz->memcg) || 630 !css_tryget_online(&mz->memcg->css)) 631 goto retry; 632 done: 633 return mz; 634 } 635 636 static struct mem_cgroup_per_zone * 637 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 638 { 639 struct mem_cgroup_per_zone *mz; 640 641 spin_lock_irq(&mctz->lock); 642 mz = __mem_cgroup_largest_soft_limit_node(mctz); 643 spin_unlock_irq(&mctz->lock); 644 return mz; 645 } 646 647 /* 648 * Return page count for single (non recursive) @memcg. 649 * 650 * Implementation Note: reading percpu statistics for memcg. 651 * 652 * Both of vmstat[] and percpu_counter has threshold and do periodic 653 * synchronization to implement "quick" read. There are trade-off between 654 * reading cost and precision of value. Then, we may have a chance to implement 655 * a periodic synchronization of counter in memcg's counter. 656 * 657 * But this _read() function is used for user interface now. The user accounts 658 * memory usage by memory cgroup and he _always_ requires exact value because 659 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 660 * have to visit all online cpus and make sum. So, for now, unnecessary 661 * synchronization is not implemented. (just implemented for cpu hotplug) 662 * 663 * If there are kernel internal actions which can make use of some not-exact 664 * value, and reading all cpu value can be performance bottleneck in some 665 * common workload, threshold and synchronization as vmstat[] should be 666 * implemented. 667 */ 668 static unsigned long 669 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 670 { 671 long val = 0; 672 int cpu; 673 674 /* Per-cpu values can be negative, use a signed accumulator */ 675 for_each_possible_cpu(cpu) 676 val += per_cpu(memcg->stat->count[idx], cpu); 677 /* 678 * Summing races with updates, so val may be negative. Avoid exposing 679 * transient negative values. 680 */ 681 if (val < 0) 682 val = 0; 683 return val; 684 } 685 686 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 687 enum mem_cgroup_events_index idx) 688 { 689 unsigned long val = 0; 690 int cpu; 691 692 for_each_possible_cpu(cpu) 693 val += per_cpu(memcg->stat->events[idx], cpu); 694 return val; 695 } 696 697 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 698 struct page *page, 699 int nr_pages) 700 { 701 /* 702 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 703 * counted as CACHE even if it's on ANON LRU. 704 */ 705 if (PageAnon(page)) 706 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 707 nr_pages); 708 else 709 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 710 nr_pages); 711 712 if (PageTransHuge(page)) 713 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 714 nr_pages); 715 716 /* pagein of a big page is an event. So, ignore page size */ 717 if (nr_pages > 0) 718 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 719 else { 720 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 721 nr_pages = -nr_pages; /* for event */ 722 } 723 724 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 725 } 726 727 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 728 int nid, 729 unsigned int lru_mask) 730 { 731 unsigned long nr = 0; 732 int zid; 733 734 VM_BUG_ON((unsigned)nid >= nr_node_ids); 735 736 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 737 struct mem_cgroup_per_zone *mz; 738 enum lru_list lru; 739 740 for_each_lru(lru) { 741 if (!(BIT(lru) & lru_mask)) 742 continue; 743 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 744 nr += mz->lru_size[lru]; 745 } 746 } 747 return nr; 748 } 749 750 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 751 unsigned int lru_mask) 752 { 753 unsigned long nr = 0; 754 int nid; 755 756 for_each_node_state(nid, N_MEMORY) 757 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 758 return nr; 759 } 760 761 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 762 enum mem_cgroup_events_target target) 763 { 764 unsigned long val, next; 765 766 val = __this_cpu_read(memcg->stat->nr_page_events); 767 next = __this_cpu_read(memcg->stat->targets[target]); 768 /* from time_after() in jiffies.h */ 769 if ((long)next - (long)val < 0) { 770 switch (target) { 771 case MEM_CGROUP_TARGET_THRESH: 772 next = val + THRESHOLDS_EVENTS_TARGET; 773 break; 774 case MEM_CGROUP_TARGET_SOFTLIMIT: 775 next = val + SOFTLIMIT_EVENTS_TARGET; 776 break; 777 case MEM_CGROUP_TARGET_NUMAINFO: 778 next = val + NUMAINFO_EVENTS_TARGET; 779 break; 780 default: 781 break; 782 } 783 __this_cpu_write(memcg->stat->targets[target], next); 784 return true; 785 } 786 return false; 787 } 788 789 /* 790 * Check events in order. 791 * 792 */ 793 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 794 { 795 /* threshold event is triggered in finer grain than soft limit */ 796 if (unlikely(mem_cgroup_event_ratelimit(memcg, 797 MEM_CGROUP_TARGET_THRESH))) { 798 bool do_softlimit; 799 bool do_numainfo __maybe_unused; 800 801 do_softlimit = mem_cgroup_event_ratelimit(memcg, 802 MEM_CGROUP_TARGET_SOFTLIMIT); 803 #if MAX_NUMNODES > 1 804 do_numainfo = mem_cgroup_event_ratelimit(memcg, 805 MEM_CGROUP_TARGET_NUMAINFO); 806 #endif 807 mem_cgroup_threshold(memcg); 808 if (unlikely(do_softlimit)) 809 mem_cgroup_update_tree(memcg, page); 810 #if MAX_NUMNODES > 1 811 if (unlikely(do_numainfo)) 812 atomic_inc(&memcg->numainfo_events); 813 #endif 814 } 815 } 816 817 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 818 { 819 /* 820 * mm_update_next_owner() may clear mm->owner to NULL 821 * if it races with swapoff, page migration, etc. 822 * So this can be called with p == NULL. 823 */ 824 if (unlikely(!p)) 825 return NULL; 826 827 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 828 } 829 EXPORT_SYMBOL(mem_cgroup_from_task); 830 831 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 832 { 833 struct mem_cgroup *memcg = NULL; 834 835 rcu_read_lock(); 836 do { 837 /* 838 * Page cache insertions can happen withou an 839 * actual mm context, e.g. during disk probing 840 * on boot, loopback IO, acct() writes etc. 841 */ 842 if (unlikely(!mm)) 843 memcg = root_mem_cgroup; 844 else { 845 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 846 if (unlikely(!memcg)) 847 memcg = root_mem_cgroup; 848 } 849 } while (!css_tryget_online(&memcg->css)); 850 rcu_read_unlock(); 851 return memcg; 852 } 853 854 /** 855 * mem_cgroup_iter - iterate over memory cgroup hierarchy 856 * @root: hierarchy root 857 * @prev: previously returned memcg, NULL on first invocation 858 * @reclaim: cookie for shared reclaim walks, NULL for full walks 859 * 860 * Returns references to children of the hierarchy below @root, or 861 * @root itself, or %NULL after a full round-trip. 862 * 863 * Caller must pass the return value in @prev on subsequent 864 * invocations for reference counting, or use mem_cgroup_iter_break() 865 * to cancel a hierarchy walk before the round-trip is complete. 866 * 867 * Reclaimers can specify a zone and a priority level in @reclaim to 868 * divide up the memcgs in the hierarchy among all concurrent 869 * reclaimers operating on the same zone and priority. 870 */ 871 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 872 struct mem_cgroup *prev, 873 struct mem_cgroup_reclaim_cookie *reclaim) 874 { 875 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 876 struct cgroup_subsys_state *css = NULL; 877 struct mem_cgroup *memcg = NULL; 878 struct mem_cgroup *pos = NULL; 879 880 if (mem_cgroup_disabled()) 881 return NULL; 882 883 if (!root) 884 root = root_mem_cgroup; 885 886 if (prev && !reclaim) 887 pos = prev; 888 889 if (!root->use_hierarchy && root != root_mem_cgroup) { 890 if (prev) 891 goto out; 892 return root; 893 } 894 895 rcu_read_lock(); 896 897 if (reclaim) { 898 struct mem_cgroup_per_zone *mz; 899 900 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); 901 iter = &mz->iter[reclaim->priority]; 902 903 if (prev && reclaim->generation != iter->generation) 904 goto out_unlock; 905 906 do { 907 pos = READ_ONCE(iter->position); 908 /* 909 * A racing update may change the position and 910 * put the last reference, hence css_tryget(), 911 * or retry to see the updated position. 912 */ 913 } while (pos && !css_tryget(&pos->css)); 914 } 915 916 if (pos) 917 css = &pos->css; 918 919 for (;;) { 920 css = css_next_descendant_pre(css, &root->css); 921 if (!css) { 922 /* 923 * Reclaimers share the hierarchy walk, and a 924 * new one might jump in right at the end of 925 * the hierarchy - make sure they see at least 926 * one group and restart from the beginning. 927 */ 928 if (!prev) 929 continue; 930 break; 931 } 932 933 /* 934 * Verify the css and acquire a reference. The root 935 * is provided by the caller, so we know it's alive 936 * and kicking, and don't take an extra reference. 937 */ 938 memcg = mem_cgroup_from_css(css); 939 940 if (css == &root->css) 941 break; 942 943 if (css_tryget(css)) { 944 /* 945 * Make sure the memcg is initialized: 946 * mem_cgroup_css_online() orders the the 947 * initialization against setting the flag. 948 */ 949 if (smp_load_acquire(&memcg->initialized)) 950 break; 951 952 css_put(css); 953 } 954 955 memcg = NULL; 956 } 957 958 if (reclaim) { 959 if (cmpxchg(&iter->position, pos, memcg) == pos) { 960 if (memcg) 961 css_get(&memcg->css); 962 if (pos) 963 css_put(&pos->css); 964 } 965 966 /* 967 * pairs with css_tryget when dereferencing iter->position 968 * above. 969 */ 970 if (pos) 971 css_put(&pos->css); 972 973 if (!memcg) 974 iter->generation++; 975 else if (!prev) 976 reclaim->generation = iter->generation; 977 } 978 979 out_unlock: 980 rcu_read_unlock(); 981 out: 982 if (prev && prev != root) 983 css_put(&prev->css); 984 985 return memcg; 986 } 987 988 /** 989 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 990 * @root: hierarchy root 991 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 992 */ 993 void mem_cgroup_iter_break(struct mem_cgroup *root, 994 struct mem_cgroup *prev) 995 { 996 if (!root) 997 root = root_mem_cgroup; 998 if (prev && prev != root) 999 css_put(&prev->css); 1000 } 1001 1002 /* 1003 * Iteration constructs for visiting all cgroups (under a tree). If 1004 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1005 * be used for reference counting. 1006 */ 1007 #define for_each_mem_cgroup_tree(iter, root) \ 1008 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1009 iter != NULL; \ 1010 iter = mem_cgroup_iter(root, iter, NULL)) 1011 1012 #define for_each_mem_cgroup(iter) \ 1013 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1014 iter != NULL; \ 1015 iter = mem_cgroup_iter(NULL, iter, NULL)) 1016 1017 /** 1018 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1019 * @zone: zone of the wanted lruvec 1020 * @memcg: memcg of the wanted lruvec 1021 * 1022 * Returns the lru list vector holding pages for the given @zone and 1023 * @mem. This can be the global zone lruvec, if the memory controller 1024 * is disabled. 1025 */ 1026 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1027 struct mem_cgroup *memcg) 1028 { 1029 struct mem_cgroup_per_zone *mz; 1030 struct lruvec *lruvec; 1031 1032 if (mem_cgroup_disabled()) { 1033 lruvec = &zone->lruvec; 1034 goto out; 1035 } 1036 1037 mz = mem_cgroup_zone_zoneinfo(memcg, zone); 1038 lruvec = &mz->lruvec; 1039 out: 1040 /* 1041 * Since a node can be onlined after the mem_cgroup was created, 1042 * we have to be prepared to initialize lruvec->zone here; 1043 * and if offlined then reonlined, we need to reinitialize it. 1044 */ 1045 if (unlikely(lruvec->zone != zone)) 1046 lruvec->zone = zone; 1047 return lruvec; 1048 } 1049 1050 /** 1051 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1052 * @page: the page 1053 * @zone: zone of the page 1054 * 1055 * This function is only safe when following the LRU page isolation 1056 * and putback protocol: the LRU lock must be held, and the page must 1057 * either be PageLRU() or the caller must have isolated/allocated it. 1058 */ 1059 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1060 { 1061 struct mem_cgroup_per_zone *mz; 1062 struct mem_cgroup *memcg; 1063 struct lruvec *lruvec; 1064 1065 if (mem_cgroup_disabled()) { 1066 lruvec = &zone->lruvec; 1067 goto out; 1068 } 1069 1070 memcg = page->mem_cgroup; 1071 /* 1072 * Swapcache readahead pages are added to the LRU - and 1073 * possibly migrated - before they are charged. 1074 */ 1075 if (!memcg) 1076 memcg = root_mem_cgroup; 1077 1078 mz = mem_cgroup_page_zoneinfo(memcg, page); 1079 lruvec = &mz->lruvec; 1080 out: 1081 /* 1082 * Since a node can be onlined after the mem_cgroup was created, 1083 * we have to be prepared to initialize lruvec->zone here; 1084 * and if offlined then reonlined, we need to reinitialize it. 1085 */ 1086 if (unlikely(lruvec->zone != zone)) 1087 lruvec->zone = zone; 1088 return lruvec; 1089 } 1090 1091 /** 1092 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1093 * @lruvec: mem_cgroup per zone lru vector 1094 * @lru: index of lru list the page is sitting on 1095 * @nr_pages: positive when adding or negative when removing 1096 * 1097 * This function must be called when a page is added to or removed from an 1098 * lru list. 1099 */ 1100 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1101 int nr_pages) 1102 { 1103 struct mem_cgroup_per_zone *mz; 1104 unsigned long *lru_size; 1105 1106 if (mem_cgroup_disabled()) 1107 return; 1108 1109 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1110 lru_size = mz->lru_size + lru; 1111 *lru_size += nr_pages; 1112 VM_BUG_ON((long)(*lru_size) < 0); 1113 } 1114 1115 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1116 { 1117 struct mem_cgroup *task_memcg; 1118 struct task_struct *p; 1119 bool ret; 1120 1121 p = find_lock_task_mm(task); 1122 if (p) { 1123 task_memcg = get_mem_cgroup_from_mm(p->mm); 1124 task_unlock(p); 1125 } else { 1126 /* 1127 * All threads may have already detached their mm's, but the oom 1128 * killer still needs to detect if they have already been oom 1129 * killed to prevent needlessly killing additional tasks. 1130 */ 1131 rcu_read_lock(); 1132 task_memcg = mem_cgroup_from_task(task); 1133 css_get(&task_memcg->css); 1134 rcu_read_unlock(); 1135 } 1136 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1137 css_put(&task_memcg->css); 1138 return ret; 1139 } 1140 1141 #define mem_cgroup_from_counter(counter, member) \ 1142 container_of(counter, struct mem_cgroup, member) 1143 1144 /** 1145 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1146 * @memcg: the memory cgroup 1147 * 1148 * Returns the maximum amount of memory @mem can be charged with, in 1149 * pages. 1150 */ 1151 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1152 { 1153 unsigned long margin = 0; 1154 unsigned long count; 1155 unsigned long limit; 1156 1157 count = page_counter_read(&memcg->memory); 1158 limit = READ_ONCE(memcg->memory.limit); 1159 if (count < limit) 1160 margin = limit - count; 1161 1162 if (do_swap_account) { 1163 count = page_counter_read(&memcg->memsw); 1164 limit = READ_ONCE(memcg->memsw.limit); 1165 if (count <= limit) 1166 margin = min(margin, limit - count); 1167 } 1168 1169 return margin; 1170 } 1171 1172 /* 1173 * A routine for checking "mem" is under move_account() or not. 1174 * 1175 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1176 * moving cgroups. This is for waiting at high-memory pressure 1177 * caused by "move". 1178 */ 1179 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1180 { 1181 struct mem_cgroup *from; 1182 struct mem_cgroup *to; 1183 bool ret = false; 1184 /* 1185 * Unlike task_move routines, we access mc.to, mc.from not under 1186 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1187 */ 1188 spin_lock(&mc.lock); 1189 from = mc.from; 1190 to = mc.to; 1191 if (!from) 1192 goto unlock; 1193 1194 ret = mem_cgroup_is_descendant(from, memcg) || 1195 mem_cgroup_is_descendant(to, memcg); 1196 unlock: 1197 spin_unlock(&mc.lock); 1198 return ret; 1199 } 1200 1201 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1202 { 1203 if (mc.moving_task && current != mc.moving_task) { 1204 if (mem_cgroup_under_move(memcg)) { 1205 DEFINE_WAIT(wait); 1206 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1207 /* moving charge context might have finished. */ 1208 if (mc.moving_task) 1209 schedule(); 1210 finish_wait(&mc.waitq, &wait); 1211 return true; 1212 } 1213 } 1214 return false; 1215 } 1216 1217 #define K(x) ((x) << (PAGE_SHIFT-10)) 1218 /** 1219 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1220 * @memcg: The memory cgroup that went over limit 1221 * @p: Task that is going to be killed 1222 * 1223 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1224 * enabled 1225 */ 1226 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1227 { 1228 /* oom_info_lock ensures that parallel ooms do not interleave */ 1229 static DEFINE_MUTEX(oom_info_lock); 1230 struct mem_cgroup *iter; 1231 unsigned int i; 1232 1233 mutex_lock(&oom_info_lock); 1234 rcu_read_lock(); 1235 1236 if (p) { 1237 pr_info("Task in "); 1238 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1239 pr_cont(" killed as a result of limit of "); 1240 } else { 1241 pr_info("Memory limit reached of cgroup "); 1242 } 1243 1244 pr_cont_cgroup_path(memcg->css.cgroup); 1245 pr_cont("\n"); 1246 1247 rcu_read_unlock(); 1248 1249 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1250 K((u64)page_counter_read(&memcg->memory)), 1251 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1252 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1253 K((u64)page_counter_read(&memcg->memsw)), 1254 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1255 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1256 K((u64)page_counter_read(&memcg->kmem)), 1257 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1258 1259 for_each_mem_cgroup_tree(iter, memcg) { 1260 pr_info("Memory cgroup stats for "); 1261 pr_cont_cgroup_path(iter->css.cgroup); 1262 pr_cont(":"); 1263 1264 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1265 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1266 continue; 1267 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1268 K(mem_cgroup_read_stat(iter, i))); 1269 } 1270 1271 for (i = 0; i < NR_LRU_LISTS; i++) 1272 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1273 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1274 1275 pr_cont("\n"); 1276 } 1277 mutex_unlock(&oom_info_lock); 1278 } 1279 1280 /* 1281 * This function returns the number of memcg under hierarchy tree. Returns 1282 * 1(self count) if no children. 1283 */ 1284 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1285 { 1286 int num = 0; 1287 struct mem_cgroup *iter; 1288 1289 for_each_mem_cgroup_tree(iter, memcg) 1290 num++; 1291 return num; 1292 } 1293 1294 /* 1295 * Return the memory (and swap, if configured) limit for a memcg. 1296 */ 1297 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1298 { 1299 unsigned long limit; 1300 1301 limit = memcg->memory.limit; 1302 if (mem_cgroup_swappiness(memcg)) { 1303 unsigned long memsw_limit; 1304 1305 memsw_limit = memcg->memsw.limit; 1306 limit = min(limit + total_swap_pages, memsw_limit); 1307 } 1308 return limit; 1309 } 1310 1311 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1312 int order) 1313 { 1314 struct oom_control oc = { 1315 .zonelist = NULL, 1316 .nodemask = NULL, 1317 .gfp_mask = gfp_mask, 1318 .order = order, 1319 }; 1320 struct mem_cgroup *iter; 1321 unsigned long chosen_points = 0; 1322 unsigned long totalpages; 1323 unsigned int points = 0; 1324 struct task_struct *chosen = NULL; 1325 1326 mutex_lock(&oom_lock); 1327 1328 /* 1329 * If current has a pending SIGKILL or is exiting, then automatically 1330 * select it. The goal is to allow it to allocate so that it may 1331 * quickly exit and free its memory. 1332 */ 1333 if (fatal_signal_pending(current) || task_will_free_mem(current)) { 1334 mark_oom_victim(current); 1335 goto unlock; 1336 } 1337 1338 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); 1339 totalpages = mem_cgroup_get_limit(memcg) ? : 1; 1340 for_each_mem_cgroup_tree(iter, memcg) { 1341 struct css_task_iter it; 1342 struct task_struct *task; 1343 1344 css_task_iter_start(&iter->css, &it); 1345 while ((task = css_task_iter_next(&it))) { 1346 switch (oom_scan_process_thread(&oc, task, totalpages)) { 1347 case OOM_SCAN_SELECT: 1348 if (chosen) 1349 put_task_struct(chosen); 1350 chosen = task; 1351 chosen_points = ULONG_MAX; 1352 get_task_struct(chosen); 1353 /* fall through */ 1354 case OOM_SCAN_CONTINUE: 1355 continue; 1356 case OOM_SCAN_ABORT: 1357 css_task_iter_end(&it); 1358 mem_cgroup_iter_break(memcg, iter); 1359 if (chosen) 1360 put_task_struct(chosen); 1361 goto unlock; 1362 case OOM_SCAN_OK: 1363 break; 1364 }; 1365 points = oom_badness(task, memcg, NULL, totalpages); 1366 if (!points || points < chosen_points) 1367 continue; 1368 /* Prefer thread group leaders for display purposes */ 1369 if (points == chosen_points && 1370 thread_group_leader(chosen)) 1371 continue; 1372 1373 if (chosen) 1374 put_task_struct(chosen); 1375 chosen = task; 1376 chosen_points = points; 1377 get_task_struct(chosen); 1378 } 1379 css_task_iter_end(&it); 1380 } 1381 1382 if (chosen) { 1383 points = chosen_points * 1000 / totalpages; 1384 oom_kill_process(&oc, chosen, points, totalpages, memcg, 1385 "Memory cgroup out of memory"); 1386 } 1387 unlock: 1388 mutex_unlock(&oom_lock); 1389 } 1390 1391 #if MAX_NUMNODES > 1 1392 1393 /** 1394 * test_mem_cgroup_node_reclaimable 1395 * @memcg: the target memcg 1396 * @nid: the node ID to be checked. 1397 * @noswap : specify true here if the user wants flle only information. 1398 * 1399 * This function returns whether the specified memcg contains any 1400 * reclaimable pages on a node. Returns true if there are any reclaimable 1401 * pages in the node. 1402 */ 1403 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1404 int nid, bool noswap) 1405 { 1406 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1407 return true; 1408 if (noswap || !total_swap_pages) 1409 return false; 1410 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1411 return true; 1412 return false; 1413 1414 } 1415 1416 /* 1417 * Always updating the nodemask is not very good - even if we have an empty 1418 * list or the wrong list here, we can start from some node and traverse all 1419 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1420 * 1421 */ 1422 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1423 { 1424 int nid; 1425 /* 1426 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1427 * pagein/pageout changes since the last update. 1428 */ 1429 if (!atomic_read(&memcg->numainfo_events)) 1430 return; 1431 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1432 return; 1433 1434 /* make a nodemask where this memcg uses memory from */ 1435 memcg->scan_nodes = node_states[N_MEMORY]; 1436 1437 for_each_node_mask(nid, node_states[N_MEMORY]) { 1438 1439 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1440 node_clear(nid, memcg->scan_nodes); 1441 } 1442 1443 atomic_set(&memcg->numainfo_events, 0); 1444 atomic_set(&memcg->numainfo_updating, 0); 1445 } 1446 1447 /* 1448 * Selecting a node where we start reclaim from. Because what we need is just 1449 * reducing usage counter, start from anywhere is O,K. Considering 1450 * memory reclaim from current node, there are pros. and cons. 1451 * 1452 * Freeing memory from current node means freeing memory from a node which 1453 * we'll use or we've used. So, it may make LRU bad. And if several threads 1454 * hit limits, it will see a contention on a node. But freeing from remote 1455 * node means more costs for memory reclaim because of memory latency. 1456 * 1457 * Now, we use round-robin. Better algorithm is welcomed. 1458 */ 1459 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1460 { 1461 int node; 1462 1463 mem_cgroup_may_update_nodemask(memcg); 1464 node = memcg->last_scanned_node; 1465 1466 node = next_node(node, memcg->scan_nodes); 1467 if (node == MAX_NUMNODES) 1468 node = first_node(memcg->scan_nodes); 1469 /* 1470 * We call this when we hit limit, not when pages are added to LRU. 1471 * No LRU may hold pages because all pages are UNEVICTABLE or 1472 * memcg is too small and all pages are not on LRU. In that case, 1473 * we use curret node. 1474 */ 1475 if (unlikely(node == MAX_NUMNODES)) 1476 node = numa_node_id(); 1477 1478 memcg->last_scanned_node = node; 1479 return node; 1480 } 1481 #else 1482 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1483 { 1484 return 0; 1485 } 1486 #endif 1487 1488 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1489 struct zone *zone, 1490 gfp_t gfp_mask, 1491 unsigned long *total_scanned) 1492 { 1493 struct mem_cgroup *victim = NULL; 1494 int total = 0; 1495 int loop = 0; 1496 unsigned long excess; 1497 unsigned long nr_scanned; 1498 struct mem_cgroup_reclaim_cookie reclaim = { 1499 .zone = zone, 1500 .priority = 0, 1501 }; 1502 1503 excess = soft_limit_excess(root_memcg); 1504 1505 while (1) { 1506 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1507 if (!victim) { 1508 loop++; 1509 if (loop >= 2) { 1510 /* 1511 * If we have not been able to reclaim 1512 * anything, it might because there are 1513 * no reclaimable pages under this hierarchy 1514 */ 1515 if (!total) 1516 break; 1517 /* 1518 * We want to do more targeted reclaim. 1519 * excess >> 2 is not to excessive so as to 1520 * reclaim too much, nor too less that we keep 1521 * coming back to reclaim from this cgroup 1522 */ 1523 if (total >= (excess >> 2) || 1524 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1525 break; 1526 } 1527 continue; 1528 } 1529 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1530 zone, &nr_scanned); 1531 *total_scanned += nr_scanned; 1532 if (!soft_limit_excess(root_memcg)) 1533 break; 1534 } 1535 mem_cgroup_iter_break(root_memcg, victim); 1536 return total; 1537 } 1538 1539 #ifdef CONFIG_LOCKDEP 1540 static struct lockdep_map memcg_oom_lock_dep_map = { 1541 .name = "memcg_oom_lock", 1542 }; 1543 #endif 1544 1545 static DEFINE_SPINLOCK(memcg_oom_lock); 1546 1547 /* 1548 * Check OOM-Killer is already running under our hierarchy. 1549 * If someone is running, return false. 1550 */ 1551 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1552 { 1553 struct mem_cgroup *iter, *failed = NULL; 1554 1555 spin_lock(&memcg_oom_lock); 1556 1557 for_each_mem_cgroup_tree(iter, memcg) { 1558 if (iter->oom_lock) { 1559 /* 1560 * this subtree of our hierarchy is already locked 1561 * so we cannot give a lock. 1562 */ 1563 failed = iter; 1564 mem_cgroup_iter_break(memcg, iter); 1565 break; 1566 } else 1567 iter->oom_lock = true; 1568 } 1569 1570 if (failed) { 1571 /* 1572 * OK, we failed to lock the whole subtree so we have 1573 * to clean up what we set up to the failing subtree 1574 */ 1575 for_each_mem_cgroup_tree(iter, memcg) { 1576 if (iter == failed) { 1577 mem_cgroup_iter_break(memcg, iter); 1578 break; 1579 } 1580 iter->oom_lock = false; 1581 } 1582 } else 1583 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1584 1585 spin_unlock(&memcg_oom_lock); 1586 1587 return !failed; 1588 } 1589 1590 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1591 { 1592 struct mem_cgroup *iter; 1593 1594 spin_lock(&memcg_oom_lock); 1595 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1596 for_each_mem_cgroup_tree(iter, memcg) 1597 iter->oom_lock = false; 1598 spin_unlock(&memcg_oom_lock); 1599 } 1600 1601 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1602 { 1603 struct mem_cgroup *iter; 1604 1605 spin_lock(&memcg_oom_lock); 1606 for_each_mem_cgroup_tree(iter, memcg) 1607 iter->under_oom++; 1608 spin_unlock(&memcg_oom_lock); 1609 } 1610 1611 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1612 { 1613 struct mem_cgroup *iter; 1614 1615 /* 1616 * When a new child is created while the hierarchy is under oom, 1617 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1618 */ 1619 spin_lock(&memcg_oom_lock); 1620 for_each_mem_cgroup_tree(iter, memcg) 1621 if (iter->under_oom > 0) 1622 iter->under_oom--; 1623 spin_unlock(&memcg_oom_lock); 1624 } 1625 1626 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1627 1628 struct oom_wait_info { 1629 struct mem_cgroup *memcg; 1630 wait_queue_t wait; 1631 }; 1632 1633 static int memcg_oom_wake_function(wait_queue_t *wait, 1634 unsigned mode, int sync, void *arg) 1635 { 1636 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1637 struct mem_cgroup *oom_wait_memcg; 1638 struct oom_wait_info *oom_wait_info; 1639 1640 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1641 oom_wait_memcg = oom_wait_info->memcg; 1642 1643 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1644 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1645 return 0; 1646 return autoremove_wake_function(wait, mode, sync, arg); 1647 } 1648 1649 static void memcg_oom_recover(struct mem_cgroup *memcg) 1650 { 1651 /* 1652 * For the following lockless ->under_oom test, the only required 1653 * guarantee is that it must see the state asserted by an OOM when 1654 * this function is called as a result of userland actions 1655 * triggered by the notification of the OOM. This is trivially 1656 * achieved by invoking mem_cgroup_mark_under_oom() before 1657 * triggering notification. 1658 */ 1659 if (memcg && memcg->under_oom) 1660 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1661 } 1662 1663 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1664 { 1665 if (!current->memcg_may_oom) 1666 return; 1667 /* 1668 * We are in the middle of the charge context here, so we 1669 * don't want to block when potentially sitting on a callstack 1670 * that holds all kinds of filesystem and mm locks. 1671 * 1672 * Also, the caller may handle a failed allocation gracefully 1673 * (like optional page cache readahead) and so an OOM killer 1674 * invocation might not even be necessary. 1675 * 1676 * That's why we don't do anything here except remember the 1677 * OOM context and then deal with it at the end of the page 1678 * fault when the stack is unwound, the locks are released, 1679 * and when we know whether the fault was overall successful. 1680 */ 1681 css_get(&memcg->css); 1682 current->memcg_in_oom = memcg; 1683 current->memcg_oom_gfp_mask = mask; 1684 current->memcg_oom_order = order; 1685 } 1686 1687 /** 1688 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1689 * @handle: actually kill/wait or just clean up the OOM state 1690 * 1691 * This has to be called at the end of a page fault if the memcg OOM 1692 * handler was enabled. 1693 * 1694 * Memcg supports userspace OOM handling where failed allocations must 1695 * sleep on a waitqueue until the userspace task resolves the 1696 * situation. Sleeping directly in the charge context with all kinds 1697 * of locks held is not a good idea, instead we remember an OOM state 1698 * in the task and mem_cgroup_oom_synchronize() has to be called at 1699 * the end of the page fault to complete the OOM handling. 1700 * 1701 * Returns %true if an ongoing memcg OOM situation was detected and 1702 * completed, %false otherwise. 1703 */ 1704 bool mem_cgroup_oom_synchronize(bool handle) 1705 { 1706 struct mem_cgroup *memcg = current->memcg_in_oom; 1707 struct oom_wait_info owait; 1708 bool locked; 1709 1710 /* OOM is global, do not handle */ 1711 if (!memcg) 1712 return false; 1713 1714 if (!handle || oom_killer_disabled) 1715 goto cleanup; 1716 1717 owait.memcg = memcg; 1718 owait.wait.flags = 0; 1719 owait.wait.func = memcg_oom_wake_function; 1720 owait.wait.private = current; 1721 INIT_LIST_HEAD(&owait.wait.task_list); 1722 1723 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1724 mem_cgroup_mark_under_oom(memcg); 1725 1726 locked = mem_cgroup_oom_trylock(memcg); 1727 1728 if (locked) 1729 mem_cgroup_oom_notify(memcg); 1730 1731 if (locked && !memcg->oom_kill_disable) { 1732 mem_cgroup_unmark_under_oom(memcg); 1733 finish_wait(&memcg_oom_waitq, &owait.wait); 1734 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1735 current->memcg_oom_order); 1736 } else { 1737 schedule(); 1738 mem_cgroup_unmark_under_oom(memcg); 1739 finish_wait(&memcg_oom_waitq, &owait.wait); 1740 } 1741 1742 if (locked) { 1743 mem_cgroup_oom_unlock(memcg); 1744 /* 1745 * There is no guarantee that an OOM-lock contender 1746 * sees the wakeups triggered by the OOM kill 1747 * uncharges. Wake any sleepers explicitely. 1748 */ 1749 memcg_oom_recover(memcg); 1750 } 1751 cleanup: 1752 current->memcg_in_oom = NULL; 1753 css_put(&memcg->css); 1754 return true; 1755 } 1756 1757 /** 1758 * mem_cgroup_begin_page_stat - begin a page state statistics transaction 1759 * @page: page that is going to change accounted state 1760 * 1761 * This function must mark the beginning of an accounted page state 1762 * change to prevent double accounting when the page is concurrently 1763 * being moved to another memcg: 1764 * 1765 * memcg = mem_cgroup_begin_page_stat(page); 1766 * if (TestClearPageState(page)) 1767 * mem_cgroup_update_page_stat(memcg, state, -1); 1768 * mem_cgroup_end_page_stat(memcg); 1769 */ 1770 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 1771 { 1772 struct mem_cgroup *memcg; 1773 unsigned long flags; 1774 1775 /* 1776 * The RCU lock is held throughout the transaction. The fast 1777 * path can get away without acquiring the memcg->move_lock 1778 * because page moving starts with an RCU grace period. 1779 * 1780 * The RCU lock also protects the memcg from being freed when 1781 * the page state that is going to change is the only thing 1782 * preventing the page from being uncharged. 1783 * E.g. end-writeback clearing PageWriteback(), which allows 1784 * migration to go ahead and uncharge the page before the 1785 * account transaction might be complete. 1786 */ 1787 rcu_read_lock(); 1788 1789 if (mem_cgroup_disabled()) 1790 return NULL; 1791 again: 1792 memcg = page->mem_cgroup; 1793 if (unlikely(!memcg)) 1794 return NULL; 1795 1796 if (atomic_read(&memcg->moving_account) <= 0) 1797 return memcg; 1798 1799 spin_lock_irqsave(&memcg->move_lock, flags); 1800 if (memcg != page->mem_cgroup) { 1801 spin_unlock_irqrestore(&memcg->move_lock, flags); 1802 goto again; 1803 } 1804 1805 /* 1806 * When charge migration first begins, we can have locked and 1807 * unlocked page stat updates happening concurrently. Track 1808 * the task who has the lock for mem_cgroup_end_page_stat(). 1809 */ 1810 memcg->move_lock_task = current; 1811 memcg->move_lock_flags = flags; 1812 1813 return memcg; 1814 } 1815 EXPORT_SYMBOL(mem_cgroup_begin_page_stat); 1816 1817 /** 1818 * mem_cgroup_end_page_stat - finish a page state statistics transaction 1819 * @memcg: the memcg that was accounted against 1820 */ 1821 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 1822 { 1823 if (memcg && memcg->move_lock_task == current) { 1824 unsigned long flags = memcg->move_lock_flags; 1825 1826 memcg->move_lock_task = NULL; 1827 memcg->move_lock_flags = 0; 1828 1829 spin_unlock_irqrestore(&memcg->move_lock, flags); 1830 } 1831 1832 rcu_read_unlock(); 1833 } 1834 EXPORT_SYMBOL(mem_cgroup_end_page_stat); 1835 1836 /* 1837 * size of first charge trial. "32" comes from vmscan.c's magic value. 1838 * TODO: maybe necessary to use big numbers in big irons. 1839 */ 1840 #define CHARGE_BATCH 32U 1841 struct memcg_stock_pcp { 1842 struct mem_cgroup *cached; /* this never be root cgroup */ 1843 unsigned int nr_pages; 1844 struct work_struct work; 1845 unsigned long flags; 1846 #define FLUSHING_CACHED_CHARGE 0 1847 }; 1848 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1849 static DEFINE_MUTEX(percpu_charge_mutex); 1850 1851 /** 1852 * consume_stock: Try to consume stocked charge on this cpu. 1853 * @memcg: memcg to consume from. 1854 * @nr_pages: how many pages to charge. 1855 * 1856 * The charges will only happen if @memcg matches the current cpu's memcg 1857 * stock, and at least @nr_pages are available in that stock. Failure to 1858 * service an allocation will refill the stock. 1859 * 1860 * returns true if successful, false otherwise. 1861 */ 1862 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1863 { 1864 struct memcg_stock_pcp *stock; 1865 bool ret = false; 1866 1867 if (nr_pages > CHARGE_BATCH) 1868 return ret; 1869 1870 stock = &get_cpu_var(memcg_stock); 1871 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1872 stock->nr_pages -= nr_pages; 1873 ret = true; 1874 } 1875 put_cpu_var(memcg_stock); 1876 return ret; 1877 } 1878 1879 /* 1880 * Returns stocks cached in percpu and reset cached information. 1881 */ 1882 static void drain_stock(struct memcg_stock_pcp *stock) 1883 { 1884 struct mem_cgroup *old = stock->cached; 1885 1886 if (stock->nr_pages) { 1887 page_counter_uncharge(&old->memory, stock->nr_pages); 1888 if (do_swap_account) 1889 page_counter_uncharge(&old->memsw, stock->nr_pages); 1890 css_put_many(&old->css, stock->nr_pages); 1891 stock->nr_pages = 0; 1892 } 1893 stock->cached = NULL; 1894 } 1895 1896 /* 1897 * This must be called under preempt disabled or must be called by 1898 * a thread which is pinned to local cpu. 1899 */ 1900 static void drain_local_stock(struct work_struct *dummy) 1901 { 1902 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1903 drain_stock(stock); 1904 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1905 } 1906 1907 /* 1908 * Cache charges(val) to local per_cpu area. 1909 * This will be consumed by consume_stock() function, later. 1910 */ 1911 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1912 { 1913 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1914 1915 if (stock->cached != memcg) { /* reset if necessary */ 1916 drain_stock(stock); 1917 stock->cached = memcg; 1918 } 1919 stock->nr_pages += nr_pages; 1920 put_cpu_var(memcg_stock); 1921 } 1922 1923 /* 1924 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1925 * of the hierarchy under it. 1926 */ 1927 static void drain_all_stock(struct mem_cgroup *root_memcg) 1928 { 1929 int cpu, curcpu; 1930 1931 /* If someone's already draining, avoid adding running more workers. */ 1932 if (!mutex_trylock(&percpu_charge_mutex)) 1933 return; 1934 /* Notify other cpus that system-wide "drain" is running */ 1935 get_online_cpus(); 1936 curcpu = get_cpu(); 1937 for_each_online_cpu(cpu) { 1938 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1939 struct mem_cgroup *memcg; 1940 1941 memcg = stock->cached; 1942 if (!memcg || !stock->nr_pages) 1943 continue; 1944 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1945 continue; 1946 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1947 if (cpu == curcpu) 1948 drain_local_stock(&stock->work); 1949 else 1950 schedule_work_on(cpu, &stock->work); 1951 } 1952 } 1953 put_cpu(); 1954 put_online_cpus(); 1955 mutex_unlock(&percpu_charge_mutex); 1956 } 1957 1958 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1959 unsigned long action, 1960 void *hcpu) 1961 { 1962 int cpu = (unsigned long)hcpu; 1963 struct memcg_stock_pcp *stock; 1964 1965 if (action == CPU_ONLINE) 1966 return NOTIFY_OK; 1967 1968 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1969 return NOTIFY_OK; 1970 1971 stock = &per_cpu(memcg_stock, cpu); 1972 drain_stock(stock); 1973 return NOTIFY_OK; 1974 } 1975 1976 /* 1977 * Scheduled by try_charge() to be executed from the userland return path 1978 * and reclaims memory over the high limit. 1979 */ 1980 void mem_cgroup_handle_over_high(void) 1981 { 1982 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1983 struct mem_cgroup *memcg, *pos; 1984 1985 if (likely(!nr_pages)) 1986 return; 1987 1988 pos = memcg = get_mem_cgroup_from_mm(current->mm); 1989 1990 do { 1991 if (page_counter_read(&pos->memory) <= pos->high) 1992 continue; 1993 mem_cgroup_events(pos, MEMCG_HIGH, 1); 1994 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true); 1995 } while ((pos = parent_mem_cgroup(pos))); 1996 1997 css_put(&memcg->css); 1998 current->memcg_nr_pages_over_high = 0; 1999 } 2000 2001 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2002 unsigned int nr_pages) 2003 { 2004 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2005 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2006 struct mem_cgroup *mem_over_limit; 2007 struct page_counter *counter; 2008 unsigned long nr_reclaimed; 2009 bool may_swap = true; 2010 bool drained = false; 2011 2012 if (mem_cgroup_is_root(memcg)) 2013 return 0; 2014 retry: 2015 if (consume_stock(memcg, nr_pages)) 2016 return 0; 2017 2018 if (!do_swap_account || 2019 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2020 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2021 goto done_restock; 2022 if (do_swap_account) 2023 page_counter_uncharge(&memcg->memsw, batch); 2024 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2025 } else { 2026 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2027 may_swap = false; 2028 } 2029 2030 if (batch > nr_pages) { 2031 batch = nr_pages; 2032 goto retry; 2033 } 2034 2035 /* 2036 * Unlike in global OOM situations, memcg is not in a physical 2037 * memory shortage. Allow dying and OOM-killed tasks to 2038 * bypass the last charges so that they can exit quickly and 2039 * free their memory. 2040 */ 2041 if (unlikely(test_thread_flag(TIF_MEMDIE) || 2042 fatal_signal_pending(current) || 2043 current->flags & PF_EXITING)) 2044 goto force; 2045 2046 if (unlikely(task_in_memcg_oom(current))) 2047 goto nomem; 2048 2049 if (!gfpflags_allow_blocking(gfp_mask)) 2050 goto nomem; 2051 2052 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 2053 2054 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2055 gfp_mask, may_swap); 2056 2057 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2058 goto retry; 2059 2060 if (!drained) { 2061 drain_all_stock(mem_over_limit); 2062 drained = true; 2063 goto retry; 2064 } 2065 2066 if (gfp_mask & __GFP_NORETRY) 2067 goto nomem; 2068 /* 2069 * Even though the limit is exceeded at this point, reclaim 2070 * may have been able to free some pages. Retry the charge 2071 * before killing the task. 2072 * 2073 * Only for regular pages, though: huge pages are rather 2074 * unlikely to succeed so close to the limit, and we fall back 2075 * to regular pages anyway in case of failure. 2076 */ 2077 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2078 goto retry; 2079 /* 2080 * At task move, charge accounts can be doubly counted. So, it's 2081 * better to wait until the end of task_move if something is going on. 2082 */ 2083 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2084 goto retry; 2085 2086 if (nr_retries--) 2087 goto retry; 2088 2089 if (gfp_mask & __GFP_NOFAIL) 2090 goto force; 2091 2092 if (fatal_signal_pending(current)) 2093 goto force; 2094 2095 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 2096 2097 mem_cgroup_oom(mem_over_limit, gfp_mask, 2098 get_order(nr_pages * PAGE_SIZE)); 2099 nomem: 2100 if (!(gfp_mask & __GFP_NOFAIL)) 2101 return -ENOMEM; 2102 force: 2103 /* 2104 * The allocation either can't fail or will lead to more memory 2105 * being freed very soon. Allow memory usage go over the limit 2106 * temporarily by force charging it. 2107 */ 2108 page_counter_charge(&memcg->memory, nr_pages); 2109 if (do_swap_account) 2110 page_counter_charge(&memcg->memsw, nr_pages); 2111 css_get_many(&memcg->css, nr_pages); 2112 2113 return 0; 2114 2115 done_restock: 2116 css_get_many(&memcg->css, batch); 2117 if (batch > nr_pages) 2118 refill_stock(memcg, batch - nr_pages); 2119 2120 /* 2121 * If the hierarchy is above the normal consumption range, schedule 2122 * reclaim on returning to userland. We can perform reclaim here 2123 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2124 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2125 * not recorded as it most likely matches current's and won't 2126 * change in the meantime. As high limit is checked again before 2127 * reclaim, the cost of mismatch is negligible. 2128 */ 2129 do { 2130 if (page_counter_read(&memcg->memory) > memcg->high) { 2131 current->memcg_nr_pages_over_high += nr_pages; 2132 set_notify_resume(current); 2133 break; 2134 } 2135 } while ((memcg = parent_mem_cgroup(memcg))); 2136 2137 return 0; 2138 } 2139 2140 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2141 { 2142 if (mem_cgroup_is_root(memcg)) 2143 return; 2144 2145 page_counter_uncharge(&memcg->memory, nr_pages); 2146 if (do_swap_account) 2147 page_counter_uncharge(&memcg->memsw, nr_pages); 2148 2149 css_put_many(&memcg->css, nr_pages); 2150 } 2151 2152 static void lock_page_lru(struct page *page, int *isolated) 2153 { 2154 struct zone *zone = page_zone(page); 2155 2156 spin_lock_irq(&zone->lru_lock); 2157 if (PageLRU(page)) { 2158 struct lruvec *lruvec; 2159 2160 lruvec = mem_cgroup_page_lruvec(page, zone); 2161 ClearPageLRU(page); 2162 del_page_from_lru_list(page, lruvec, page_lru(page)); 2163 *isolated = 1; 2164 } else 2165 *isolated = 0; 2166 } 2167 2168 static void unlock_page_lru(struct page *page, int isolated) 2169 { 2170 struct zone *zone = page_zone(page); 2171 2172 if (isolated) { 2173 struct lruvec *lruvec; 2174 2175 lruvec = mem_cgroup_page_lruvec(page, zone); 2176 VM_BUG_ON_PAGE(PageLRU(page), page); 2177 SetPageLRU(page); 2178 add_page_to_lru_list(page, lruvec, page_lru(page)); 2179 } 2180 spin_unlock_irq(&zone->lru_lock); 2181 } 2182 2183 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2184 bool lrucare) 2185 { 2186 int isolated; 2187 2188 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2189 2190 /* 2191 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2192 * may already be on some other mem_cgroup's LRU. Take care of it. 2193 */ 2194 if (lrucare) 2195 lock_page_lru(page, &isolated); 2196 2197 /* 2198 * Nobody should be changing or seriously looking at 2199 * page->mem_cgroup at this point: 2200 * 2201 * - the page is uncharged 2202 * 2203 * - the page is off-LRU 2204 * 2205 * - an anonymous fault has exclusive page access, except for 2206 * a locked page table 2207 * 2208 * - a page cache insertion, a swapin fault, or a migration 2209 * have the page locked 2210 */ 2211 page->mem_cgroup = memcg; 2212 2213 if (lrucare) 2214 unlock_page_lru(page, isolated); 2215 } 2216 2217 #ifdef CONFIG_MEMCG_KMEM 2218 static int memcg_alloc_cache_id(void) 2219 { 2220 int id, size; 2221 int err; 2222 2223 id = ida_simple_get(&memcg_cache_ida, 2224 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2225 if (id < 0) 2226 return id; 2227 2228 if (id < memcg_nr_cache_ids) 2229 return id; 2230 2231 /* 2232 * There's no space for the new id in memcg_caches arrays, 2233 * so we have to grow them. 2234 */ 2235 down_write(&memcg_cache_ids_sem); 2236 2237 size = 2 * (id + 1); 2238 if (size < MEMCG_CACHES_MIN_SIZE) 2239 size = MEMCG_CACHES_MIN_SIZE; 2240 else if (size > MEMCG_CACHES_MAX_SIZE) 2241 size = MEMCG_CACHES_MAX_SIZE; 2242 2243 err = memcg_update_all_caches(size); 2244 if (!err) 2245 err = memcg_update_all_list_lrus(size); 2246 if (!err) 2247 memcg_nr_cache_ids = size; 2248 2249 up_write(&memcg_cache_ids_sem); 2250 2251 if (err) { 2252 ida_simple_remove(&memcg_cache_ida, id); 2253 return err; 2254 } 2255 return id; 2256 } 2257 2258 static void memcg_free_cache_id(int id) 2259 { 2260 ida_simple_remove(&memcg_cache_ida, id); 2261 } 2262 2263 struct memcg_kmem_cache_create_work { 2264 struct mem_cgroup *memcg; 2265 struct kmem_cache *cachep; 2266 struct work_struct work; 2267 }; 2268 2269 static void memcg_kmem_cache_create_func(struct work_struct *w) 2270 { 2271 struct memcg_kmem_cache_create_work *cw = 2272 container_of(w, struct memcg_kmem_cache_create_work, work); 2273 struct mem_cgroup *memcg = cw->memcg; 2274 struct kmem_cache *cachep = cw->cachep; 2275 2276 memcg_create_kmem_cache(memcg, cachep); 2277 2278 css_put(&memcg->css); 2279 kfree(cw); 2280 } 2281 2282 /* 2283 * Enqueue the creation of a per-memcg kmem_cache. 2284 */ 2285 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2286 struct kmem_cache *cachep) 2287 { 2288 struct memcg_kmem_cache_create_work *cw; 2289 2290 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2291 if (!cw) 2292 return; 2293 2294 css_get(&memcg->css); 2295 2296 cw->memcg = memcg; 2297 cw->cachep = cachep; 2298 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2299 2300 schedule_work(&cw->work); 2301 } 2302 2303 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2304 struct kmem_cache *cachep) 2305 { 2306 /* 2307 * We need to stop accounting when we kmalloc, because if the 2308 * corresponding kmalloc cache is not yet created, the first allocation 2309 * in __memcg_schedule_kmem_cache_create will recurse. 2310 * 2311 * However, it is better to enclose the whole function. Depending on 2312 * the debugging options enabled, INIT_WORK(), for instance, can 2313 * trigger an allocation. This too, will make us recurse. Because at 2314 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2315 * the safest choice is to do it like this, wrapping the whole function. 2316 */ 2317 current->memcg_kmem_skip_account = 1; 2318 __memcg_schedule_kmem_cache_create(memcg, cachep); 2319 current->memcg_kmem_skip_account = 0; 2320 } 2321 2322 /* 2323 * Return the kmem_cache we're supposed to use for a slab allocation. 2324 * We try to use the current memcg's version of the cache. 2325 * 2326 * If the cache does not exist yet, if we are the first user of it, 2327 * we either create it immediately, if possible, or create it asynchronously 2328 * in a workqueue. 2329 * In the latter case, we will let the current allocation go through with 2330 * the original cache. 2331 * 2332 * Can't be called in interrupt context or from kernel threads. 2333 * This function needs to be called with rcu_read_lock() held. 2334 */ 2335 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) 2336 { 2337 struct mem_cgroup *memcg; 2338 struct kmem_cache *memcg_cachep; 2339 int kmemcg_id; 2340 2341 VM_BUG_ON(!is_root_cache(cachep)); 2342 2343 if (current->memcg_kmem_skip_account) 2344 return cachep; 2345 2346 memcg = get_mem_cgroup_from_mm(current->mm); 2347 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2348 if (kmemcg_id < 0) 2349 goto out; 2350 2351 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2352 if (likely(memcg_cachep)) 2353 return memcg_cachep; 2354 2355 /* 2356 * If we are in a safe context (can wait, and not in interrupt 2357 * context), we could be be predictable and return right away. 2358 * This would guarantee that the allocation being performed 2359 * already belongs in the new cache. 2360 * 2361 * However, there are some clashes that can arrive from locking. 2362 * For instance, because we acquire the slab_mutex while doing 2363 * memcg_create_kmem_cache, this means no further allocation 2364 * could happen with the slab_mutex held. So it's better to 2365 * defer everything. 2366 */ 2367 memcg_schedule_kmem_cache_create(memcg, cachep); 2368 out: 2369 css_put(&memcg->css); 2370 return cachep; 2371 } 2372 2373 void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2374 { 2375 if (!is_root_cache(cachep)) 2376 css_put(&cachep->memcg_params.memcg->css); 2377 } 2378 2379 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2380 struct mem_cgroup *memcg) 2381 { 2382 unsigned int nr_pages = 1 << order; 2383 struct page_counter *counter; 2384 int ret; 2385 2386 if (!memcg_kmem_is_active(memcg)) 2387 return 0; 2388 2389 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) 2390 return -ENOMEM; 2391 2392 ret = try_charge(memcg, gfp, nr_pages); 2393 if (ret) { 2394 page_counter_uncharge(&memcg->kmem, nr_pages); 2395 return ret; 2396 } 2397 2398 page->mem_cgroup = memcg; 2399 2400 return 0; 2401 } 2402 2403 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2404 { 2405 struct mem_cgroup *memcg; 2406 int ret; 2407 2408 memcg = get_mem_cgroup_from_mm(current->mm); 2409 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2410 css_put(&memcg->css); 2411 return ret; 2412 } 2413 2414 void __memcg_kmem_uncharge(struct page *page, int order) 2415 { 2416 struct mem_cgroup *memcg = page->mem_cgroup; 2417 unsigned int nr_pages = 1 << order; 2418 2419 if (!memcg) 2420 return; 2421 2422 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2423 2424 page_counter_uncharge(&memcg->kmem, nr_pages); 2425 page_counter_uncharge(&memcg->memory, nr_pages); 2426 if (do_swap_account) 2427 page_counter_uncharge(&memcg->memsw, nr_pages); 2428 2429 page->mem_cgroup = NULL; 2430 css_put_many(&memcg->css, nr_pages); 2431 } 2432 #endif /* CONFIG_MEMCG_KMEM */ 2433 2434 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2435 2436 /* 2437 * Because tail pages are not marked as "used", set it. We're under 2438 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2439 * charge/uncharge will be never happen and move_account() is done under 2440 * compound_lock(), so we don't have to take care of races. 2441 */ 2442 void mem_cgroup_split_huge_fixup(struct page *head) 2443 { 2444 int i; 2445 2446 if (mem_cgroup_disabled()) 2447 return; 2448 2449 for (i = 1; i < HPAGE_PMD_NR; i++) 2450 head[i].mem_cgroup = head->mem_cgroup; 2451 2452 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2453 HPAGE_PMD_NR); 2454 } 2455 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2456 2457 #ifdef CONFIG_MEMCG_SWAP 2458 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2459 bool charge) 2460 { 2461 int val = (charge) ? 1 : -1; 2462 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2463 } 2464 2465 /** 2466 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2467 * @entry: swap entry to be moved 2468 * @from: mem_cgroup which the entry is moved from 2469 * @to: mem_cgroup which the entry is moved to 2470 * 2471 * It succeeds only when the swap_cgroup's record for this entry is the same 2472 * as the mem_cgroup's id of @from. 2473 * 2474 * Returns 0 on success, -EINVAL on failure. 2475 * 2476 * The caller must have charged to @to, IOW, called page_counter_charge() about 2477 * both res and memsw, and called css_get(). 2478 */ 2479 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2480 struct mem_cgroup *from, struct mem_cgroup *to) 2481 { 2482 unsigned short old_id, new_id; 2483 2484 old_id = mem_cgroup_id(from); 2485 new_id = mem_cgroup_id(to); 2486 2487 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2488 mem_cgroup_swap_statistics(from, false); 2489 mem_cgroup_swap_statistics(to, true); 2490 return 0; 2491 } 2492 return -EINVAL; 2493 } 2494 #else 2495 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2496 struct mem_cgroup *from, struct mem_cgroup *to) 2497 { 2498 return -EINVAL; 2499 } 2500 #endif 2501 2502 static DEFINE_MUTEX(memcg_limit_mutex); 2503 2504 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2505 unsigned long limit) 2506 { 2507 unsigned long curusage; 2508 unsigned long oldusage; 2509 bool enlarge = false; 2510 int retry_count; 2511 int ret; 2512 2513 /* 2514 * For keeping hierarchical_reclaim simple, how long we should retry 2515 * is depends on callers. We set our retry-count to be function 2516 * of # of children which we should visit in this loop. 2517 */ 2518 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2519 mem_cgroup_count_children(memcg); 2520 2521 oldusage = page_counter_read(&memcg->memory); 2522 2523 do { 2524 if (signal_pending(current)) { 2525 ret = -EINTR; 2526 break; 2527 } 2528 2529 mutex_lock(&memcg_limit_mutex); 2530 if (limit > memcg->memsw.limit) { 2531 mutex_unlock(&memcg_limit_mutex); 2532 ret = -EINVAL; 2533 break; 2534 } 2535 if (limit > memcg->memory.limit) 2536 enlarge = true; 2537 ret = page_counter_limit(&memcg->memory, limit); 2538 mutex_unlock(&memcg_limit_mutex); 2539 2540 if (!ret) 2541 break; 2542 2543 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2544 2545 curusage = page_counter_read(&memcg->memory); 2546 /* Usage is reduced ? */ 2547 if (curusage >= oldusage) 2548 retry_count--; 2549 else 2550 oldusage = curusage; 2551 } while (retry_count); 2552 2553 if (!ret && enlarge) 2554 memcg_oom_recover(memcg); 2555 2556 return ret; 2557 } 2558 2559 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2560 unsigned long limit) 2561 { 2562 unsigned long curusage; 2563 unsigned long oldusage; 2564 bool enlarge = false; 2565 int retry_count; 2566 int ret; 2567 2568 /* see mem_cgroup_resize_res_limit */ 2569 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2570 mem_cgroup_count_children(memcg); 2571 2572 oldusage = page_counter_read(&memcg->memsw); 2573 2574 do { 2575 if (signal_pending(current)) { 2576 ret = -EINTR; 2577 break; 2578 } 2579 2580 mutex_lock(&memcg_limit_mutex); 2581 if (limit < memcg->memory.limit) { 2582 mutex_unlock(&memcg_limit_mutex); 2583 ret = -EINVAL; 2584 break; 2585 } 2586 if (limit > memcg->memsw.limit) 2587 enlarge = true; 2588 ret = page_counter_limit(&memcg->memsw, limit); 2589 mutex_unlock(&memcg_limit_mutex); 2590 2591 if (!ret) 2592 break; 2593 2594 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2595 2596 curusage = page_counter_read(&memcg->memsw); 2597 /* Usage is reduced ? */ 2598 if (curusage >= oldusage) 2599 retry_count--; 2600 else 2601 oldusage = curusage; 2602 } while (retry_count); 2603 2604 if (!ret && enlarge) 2605 memcg_oom_recover(memcg); 2606 2607 return ret; 2608 } 2609 2610 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 2611 gfp_t gfp_mask, 2612 unsigned long *total_scanned) 2613 { 2614 unsigned long nr_reclaimed = 0; 2615 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 2616 unsigned long reclaimed; 2617 int loop = 0; 2618 struct mem_cgroup_tree_per_zone *mctz; 2619 unsigned long excess; 2620 unsigned long nr_scanned; 2621 2622 if (order > 0) 2623 return 0; 2624 2625 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 2626 /* 2627 * This loop can run a while, specially if mem_cgroup's continuously 2628 * keep exceeding their soft limit and putting the system under 2629 * pressure 2630 */ 2631 do { 2632 if (next_mz) 2633 mz = next_mz; 2634 else 2635 mz = mem_cgroup_largest_soft_limit_node(mctz); 2636 if (!mz) 2637 break; 2638 2639 nr_scanned = 0; 2640 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 2641 gfp_mask, &nr_scanned); 2642 nr_reclaimed += reclaimed; 2643 *total_scanned += nr_scanned; 2644 spin_lock_irq(&mctz->lock); 2645 __mem_cgroup_remove_exceeded(mz, mctz); 2646 2647 /* 2648 * If we failed to reclaim anything from this memory cgroup 2649 * it is time to move on to the next cgroup 2650 */ 2651 next_mz = NULL; 2652 if (!reclaimed) 2653 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2654 2655 excess = soft_limit_excess(mz->memcg); 2656 /* 2657 * One school of thought says that we should not add 2658 * back the node to the tree if reclaim returns 0. 2659 * But our reclaim could return 0, simply because due 2660 * to priority we are exposing a smaller subset of 2661 * memory to reclaim from. Consider this as a longer 2662 * term TODO. 2663 */ 2664 /* If excess == 0, no tree ops */ 2665 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2666 spin_unlock_irq(&mctz->lock); 2667 css_put(&mz->memcg->css); 2668 loop++; 2669 /* 2670 * Could not reclaim anything and there are no more 2671 * mem cgroups to try or we seem to be looping without 2672 * reclaiming anything. 2673 */ 2674 if (!nr_reclaimed && 2675 (next_mz == NULL || 2676 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2677 break; 2678 } while (!nr_reclaimed); 2679 if (next_mz) 2680 css_put(&next_mz->memcg->css); 2681 return nr_reclaimed; 2682 } 2683 2684 /* 2685 * Test whether @memcg has children, dead or alive. Note that this 2686 * function doesn't care whether @memcg has use_hierarchy enabled and 2687 * returns %true if there are child csses according to the cgroup 2688 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2689 */ 2690 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2691 { 2692 bool ret; 2693 2694 /* 2695 * The lock does not prevent addition or deletion of children, but 2696 * it prevents a new child from being initialized based on this 2697 * parent in css_online(), so it's enough to decide whether 2698 * hierarchically inherited attributes can still be changed or not. 2699 */ 2700 lockdep_assert_held(&memcg_create_mutex); 2701 2702 rcu_read_lock(); 2703 ret = css_next_child(NULL, &memcg->css); 2704 rcu_read_unlock(); 2705 return ret; 2706 } 2707 2708 /* 2709 * Reclaims as many pages from the given memcg as possible and moves 2710 * the rest to the parent. 2711 * 2712 * Caller is responsible for holding css reference for memcg. 2713 */ 2714 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2715 { 2716 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2717 2718 /* we call try-to-free pages for make this cgroup empty */ 2719 lru_add_drain_all(); 2720 /* try to free all pages in this cgroup */ 2721 while (nr_retries && page_counter_read(&memcg->memory)) { 2722 int progress; 2723 2724 if (signal_pending(current)) 2725 return -EINTR; 2726 2727 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2728 GFP_KERNEL, true); 2729 if (!progress) { 2730 nr_retries--; 2731 /* maybe some writeback is necessary */ 2732 congestion_wait(BLK_RW_ASYNC, HZ/10); 2733 } 2734 2735 } 2736 2737 return 0; 2738 } 2739 2740 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2741 char *buf, size_t nbytes, 2742 loff_t off) 2743 { 2744 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2745 2746 if (mem_cgroup_is_root(memcg)) 2747 return -EINVAL; 2748 return mem_cgroup_force_empty(memcg) ?: nbytes; 2749 } 2750 2751 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2752 struct cftype *cft) 2753 { 2754 return mem_cgroup_from_css(css)->use_hierarchy; 2755 } 2756 2757 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2758 struct cftype *cft, u64 val) 2759 { 2760 int retval = 0; 2761 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2762 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2763 2764 mutex_lock(&memcg_create_mutex); 2765 2766 if (memcg->use_hierarchy == val) 2767 goto out; 2768 2769 /* 2770 * If parent's use_hierarchy is set, we can't make any modifications 2771 * in the child subtrees. If it is unset, then the change can 2772 * occur, provided the current cgroup has no children. 2773 * 2774 * For the root cgroup, parent_mem is NULL, we allow value to be 2775 * set if there are no children. 2776 */ 2777 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2778 (val == 1 || val == 0)) { 2779 if (!memcg_has_children(memcg)) 2780 memcg->use_hierarchy = val; 2781 else 2782 retval = -EBUSY; 2783 } else 2784 retval = -EINVAL; 2785 2786 out: 2787 mutex_unlock(&memcg_create_mutex); 2788 2789 return retval; 2790 } 2791 2792 static unsigned long tree_stat(struct mem_cgroup *memcg, 2793 enum mem_cgroup_stat_index idx) 2794 { 2795 struct mem_cgroup *iter; 2796 unsigned long val = 0; 2797 2798 for_each_mem_cgroup_tree(iter, memcg) 2799 val += mem_cgroup_read_stat(iter, idx); 2800 2801 return val; 2802 } 2803 2804 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2805 { 2806 unsigned long val; 2807 2808 if (mem_cgroup_is_root(memcg)) { 2809 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); 2810 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); 2811 if (swap) 2812 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); 2813 } else { 2814 if (!swap) 2815 val = page_counter_read(&memcg->memory); 2816 else 2817 val = page_counter_read(&memcg->memsw); 2818 } 2819 return val; 2820 } 2821 2822 enum { 2823 RES_USAGE, 2824 RES_LIMIT, 2825 RES_MAX_USAGE, 2826 RES_FAILCNT, 2827 RES_SOFT_LIMIT, 2828 }; 2829 2830 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2831 struct cftype *cft) 2832 { 2833 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2834 struct page_counter *counter; 2835 2836 switch (MEMFILE_TYPE(cft->private)) { 2837 case _MEM: 2838 counter = &memcg->memory; 2839 break; 2840 case _MEMSWAP: 2841 counter = &memcg->memsw; 2842 break; 2843 case _KMEM: 2844 counter = &memcg->kmem; 2845 break; 2846 default: 2847 BUG(); 2848 } 2849 2850 switch (MEMFILE_ATTR(cft->private)) { 2851 case RES_USAGE: 2852 if (counter == &memcg->memory) 2853 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2854 if (counter == &memcg->memsw) 2855 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2856 return (u64)page_counter_read(counter) * PAGE_SIZE; 2857 case RES_LIMIT: 2858 return (u64)counter->limit * PAGE_SIZE; 2859 case RES_MAX_USAGE: 2860 return (u64)counter->watermark * PAGE_SIZE; 2861 case RES_FAILCNT: 2862 return counter->failcnt; 2863 case RES_SOFT_LIMIT: 2864 return (u64)memcg->soft_limit * PAGE_SIZE; 2865 default: 2866 BUG(); 2867 } 2868 } 2869 2870 #ifdef CONFIG_MEMCG_KMEM 2871 static int memcg_activate_kmem(struct mem_cgroup *memcg, 2872 unsigned long nr_pages) 2873 { 2874 int err = 0; 2875 int memcg_id; 2876 2877 BUG_ON(memcg->kmemcg_id >= 0); 2878 BUG_ON(memcg->kmem_acct_activated); 2879 BUG_ON(memcg->kmem_acct_active); 2880 2881 /* 2882 * For simplicity, we won't allow this to be disabled. It also can't 2883 * be changed if the cgroup has children already, or if tasks had 2884 * already joined. 2885 * 2886 * If tasks join before we set the limit, a person looking at 2887 * kmem.usage_in_bytes will have no way to determine when it took 2888 * place, which makes the value quite meaningless. 2889 * 2890 * After it first became limited, changes in the value of the limit are 2891 * of course permitted. 2892 */ 2893 mutex_lock(&memcg_create_mutex); 2894 if (cgroup_is_populated(memcg->css.cgroup) || 2895 (memcg->use_hierarchy && memcg_has_children(memcg))) 2896 err = -EBUSY; 2897 mutex_unlock(&memcg_create_mutex); 2898 if (err) 2899 goto out; 2900 2901 memcg_id = memcg_alloc_cache_id(); 2902 if (memcg_id < 0) { 2903 err = memcg_id; 2904 goto out; 2905 } 2906 2907 /* 2908 * We couldn't have accounted to this cgroup, because it hasn't got 2909 * activated yet, so this should succeed. 2910 */ 2911 err = page_counter_limit(&memcg->kmem, nr_pages); 2912 VM_BUG_ON(err); 2913 2914 static_key_slow_inc(&memcg_kmem_enabled_key); 2915 /* 2916 * A memory cgroup is considered kmem-active as soon as it gets 2917 * kmemcg_id. Setting the id after enabling static branching will 2918 * guarantee no one starts accounting before all call sites are 2919 * patched. 2920 */ 2921 memcg->kmemcg_id = memcg_id; 2922 memcg->kmem_acct_activated = true; 2923 memcg->kmem_acct_active = true; 2924 out: 2925 return err; 2926 } 2927 2928 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2929 unsigned long limit) 2930 { 2931 int ret; 2932 2933 mutex_lock(&memcg_limit_mutex); 2934 if (!memcg_kmem_is_active(memcg)) 2935 ret = memcg_activate_kmem(memcg, limit); 2936 else 2937 ret = page_counter_limit(&memcg->kmem, limit); 2938 mutex_unlock(&memcg_limit_mutex); 2939 return ret; 2940 } 2941 2942 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 2943 { 2944 int ret = 0; 2945 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 2946 2947 if (!parent) 2948 return 0; 2949 2950 mutex_lock(&memcg_limit_mutex); 2951 /* 2952 * If the parent cgroup is not kmem-active now, it cannot be activated 2953 * after this point, because it has at least one child already. 2954 */ 2955 if (memcg_kmem_is_active(parent)) 2956 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); 2957 mutex_unlock(&memcg_limit_mutex); 2958 return ret; 2959 } 2960 #else 2961 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2962 unsigned long limit) 2963 { 2964 return -EINVAL; 2965 } 2966 #endif /* CONFIG_MEMCG_KMEM */ 2967 2968 /* 2969 * The user of this function is... 2970 * RES_LIMIT. 2971 */ 2972 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2973 char *buf, size_t nbytes, loff_t off) 2974 { 2975 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2976 unsigned long nr_pages; 2977 int ret; 2978 2979 buf = strstrip(buf); 2980 ret = page_counter_memparse(buf, "-1", &nr_pages); 2981 if (ret) 2982 return ret; 2983 2984 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2985 case RES_LIMIT: 2986 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2987 ret = -EINVAL; 2988 break; 2989 } 2990 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2991 case _MEM: 2992 ret = mem_cgroup_resize_limit(memcg, nr_pages); 2993 break; 2994 case _MEMSWAP: 2995 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 2996 break; 2997 case _KMEM: 2998 ret = memcg_update_kmem_limit(memcg, nr_pages); 2999 break; 3000 } 3001 break; 3002 case RES_SOFT_LIMIT: 3003 memcg->soft_limit = nr_pages; 3004 ret = 0; 3005 break; 3006 } 3007 return ret ?: nbytes; 3008 } 3009 3010 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3011 size_t nbytes, loff_t off) 3012 { 3013 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3014 struct page_counter *counter; 3015 3016 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3017 case _MEM: 3018 counter = &memcg->memory; 3019 break; 3020 case _MEMSWAP: 3021 counter = &memcg->memsw; 3022 break; 3023 case _KMEM: 3024 counter = &memcg->kmem; 3025 break; 3026 default: 3027 BUG(); 3028 } 3029 3030 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3031 case RES_MAX_USAGE: 3032 page_counter_reset_watermark(counter); 3033 break; 3034 case RES_FAILCNT: 3035 counter->failcnt = 0; 3036 break; 3037 default: 3038 BUG(); 3039 } 3040 3041 return nbytes; 3042 } 3043 3044 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3045 struct cftype *cft) 3046 { 3047 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3048 } 3049 3050 #ifdef CONFIG_MMU 3051 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3052 struct cftype *cft, u64 val) 3053 { 3054 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3055 3056 if (val & ~MOVE_MASK) 3057 return -EINVAL; 3058 3059 /* 3060 * No kind of locking is needed in here, because ->can_attach() will 3061 * check this value once in the beginning of the process, and then carry 3062 * on with stale data. This means that changes to this value will only 3063 * affect task migrations starting after the change. 3064 */ 3065 memcg->move_charge_at_immigrate = val; 3066 return 0; 3067 } 3068 #else 3069 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3070 struct cftype *cft, u64 val) 3071 { 3072 return -ENOSYS; 3073 } 3074 #endif 3075 3076 #ifdef CONFIG_NUMA 3077 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3078 { 3079 struct numa_stat { 3080 const char *name; 3081 unsigned int lru_mask; 3082 }; 3083 3084 static const struct numa_stat stats[] = { 3085 { "total", LRU_ALL }, 3086 { "file", LRU_ALL_FILE }, 3087 { "anon", LRU_ALL_ANON }, 3088 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3089 }; 3090 const struct numa_stat *stat; 3091 int nid; 3092 unsigned long nr; 3093 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3094 3095 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3096 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3097 seq_printf(m, "%s=%lu", stat->name, nr); 3098 for_each_node_state(nid, N_MEMORY) { 3099 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3100 stat->lru_mask); 3101 seq_printf(m, " N%d=%lu", nid, nr); 3102 } 3103 seq_putc(m, '\n'); 3104 } 3105 3106 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3107 struct mem_cgroup *iter; 3108 3109 nr = 0; 3110 for_each_mem_cgroup_tree(iter, memcg) 3111 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3112 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3113 for_each_node_state(nid, N_MEMORY) { 3114 nr = 0; 3115 for_each_mem_cgroup_tree(iter, memcg) 3116 nr += mem_cgroup_node_nr_lru_pages( 3117 iter, nid, stat->lru_mask); 3118 seq_printf(m, " N%d=%lu", nid, nr); 3119 } 3120 seq_putc(m, '\n'); 3121 } 3122 3123 return 0; 3124 } 3125 #endif /* CONFIG_NUMA */ 3126 3127 static int memcg_stat_show(struct seq_file *m, void *v) 3128 { 3129 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3130 unsigned long memory, memsw; 3131 struct mem_cgroup *mi; 3132 unsigned int i; 3133 3134 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3135 MEM_CGROUP_STAT_NSTATS); 3136 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3137 MEM_CGROUP_EVENTS_NSTATS); 3138 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3139 3140 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3141 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3142 continue; 3143 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3144 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3145 } 3146 3147 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3148 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3149 mem_cgroup_read_events(memcg, i)); 3150 3151 for (i = 0; i < NR_LRU_LISTS; i++) 3152 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3153 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3154 3155 /* Hierarchical information */ 3156 memory = memsw = PAGE_COUNTER_MAX; 3157 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3158 memory = min(memory, mi->memory.limit); 3159 memsw = min(memsw, mi->memsw.limit); 3160 } 3161 seq_printf(m, "hierarchical_memory_limit %llu\n", 3162 (u64)memory * PAGE_SIZE); 3163 if (do_swap_account) 3164 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3165 (u64)memsw * PAGE_SIZE); 3166 3167 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3168 unsigned long long val = 0; 3169 3170 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3171 continue; 3172 for_each_mem_cgroup_tree(mi, memcg) 3173 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3174 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3175 } 3176 3177 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3178 unsigned long long val = 0; 3179 3180 for_each_mem_cgroup_tree(mi, memcg) 3181 val += mem_cgroup_read_events(mi, i); 3182 seq_printf(m, "total_%s %llu\n", 3183 mem_cgroup_events_names[i], val); 3184 } 3185 3186 for (i = 0; i < NR_LRU_LISTS; i++) { 3187 unsigned long long val = 0; 3188 3189 for_each_mem_cgroup_tree(mi, memcg) 3190 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3191 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3192 } 3193 3194 #ifdef CONFIG_DEBUG_VM 3195 { 3196 int nid, zid; 3197 struct mem_cgroup_per_zone *mz; 3198 struct zone_reclaim_stat *rstat; 3199 unsigned long recent_rotated[2] = {0, 0}; 3200 unsigned long recent_scanned[2] = {0, 0}; 3201 3202 for_each_online_node(nid) 3203 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 3204 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 3205 rstat = &mz->lruvec.reclaim_stat; 3206 3207 recent_rotated[0] += rstat->recent_rotated[0]; 3208 recent_rotated[1] += rstat->recent_rotated[1]; 3209 recent_scanned[0] += rstat->recent_scanned[0]; 3210 recent_scanned[1] += rstat->recent_scanned[1]; 3211 } 3212 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3213 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3214 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3215 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3216 } 3217 #endif 3218 3219 return 0; 3220 } 3221 3222 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3223 struct cftype *cft) 3224 { 3225 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3226 3227 return mem_cgroup_swappiness(memcg); 3228 } 3229 3230 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3231 struct cftype *cft, u64 val) 3232 { 3233 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3234 3235 if (val > 100) 3236 return -EINVAL; 3237 3238 if (css->parent) 3239 memcg->swappiness = val; 3240 else 3241 vm_swappiness = val; 3242 3243 return 0; 3244 } 3245 3246 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3247 { 3248 struct mem_cgroup_threshold_ary *t; 3249 unsigned long usage; 3250 int i; 3251 3252 rcu_read_lock(); 3253 if (!swap) 3254 t = rcu_dereference(memcg->thresholds.primary); 3255 else 3256 t = rcu_dereference(memcg->memsw_thresholds.primary); 3257 3258 if (!t) 3259 goto unlock; 3260 3261 usage = mem_cgroup_usage(memcg, swap); 3262 3263 /* 3264 * current_threshold points to threshold just below or equal to usage. 3265 * If it's not true, a threshold was crossed after last 3266 * call of __mem_cgroup_threshold(). 3267 */ 3268 i = t->current_threshold; 3269 3270 /* 3271 * Iterate backward over array of thresholds starting from 3272 * current_threshold and check if a threshold is crossed. 3273 * If none of thresholds below usage is crossed, we read 3274 * only one element of the array here. 3275 */ 3276 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3277 eventfd_signal(t->entries[i].eventfd, 1); 3278 3279 /* i = current_threshold + 1 */ 3280 i++; 3281 3282 /* 3283 * Iterate forward over array of thresholds starting from 3284 * current_threshold+1 and check if a threshold is crossed. 3285 * If none of thresholds above usage is crossed, we read 3286 * only one element of the array here. 3287 */ 3288 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3289 eventfd_signal(t->entries[i].eventfd, 1); 3290 3291 /* Update current_threshold */ 3292 t->current_threshold = i - 1; 3293 unlock: 3294 rcu_read_unlock(); 3295 } 3296 3297 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3298 { 3299 while (memcg) { 3300 __mem_cgroup_threshold(memcg, false); 3301 if (do_swap_account) 3302 __mem_cgroup_threshold(memcg, true); 3303 3304 memcg = parent_mem_cgroup(memcg); 3305 } 3306 } 3307 3308 static int compare_thresholds(const void *a, const void *b) 3309 { 3310 const struct mem_cgroup_threshold *_a = a; 3311 const struct mem_cgroup_threshold *_b = b; 3312 3313 if (_a->threshold > _b->threshold) 3314 return 1; 3315 3316 if (_a->threshold < _b->threshold) 3317 return -1; 3318 3319 return 0; 3320 } 3321 3322 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3323 { 3324 struct mem_cgroup_eventfd_list *ev; 3325 3326 spin_lock(&memcg_oom_lock); 3327 3328 list_for_each_entry(ev, &memcg->oom_notify, list) 3329 eventfd_signal(ev->eventfd, 1); 3330 3331 spin_unlock(&memcg_oom_lock); 3332 return 0; 3333 } 3334 3335 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3336 { 3337 struct mem_cgroup *iter; 3338 3339 for_each_mem_cgroup_tree(iter, memcg) 3340 mem_cgroup_oom_notify_cb(iter); 3341 } 3342 3343 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3344 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3345 { 3346 struct mem_cgroup_thresholds *thresholds; 3347 struct mem_cgroup_threshold_ary *new; 3348 unsigned long threshold; 3349 unsigned long usage; 3350 int i, size, ret; 3351 3352 ret = page_counter_memparse(args, "-1", &threshold); 3353 if (ret) 3354 return ret; 3355 3356 mutex_lock(&memcg->thresholds_lock); 3357 3358 if (type == _MEM) { 3359 thresholds = &memcg->thresholds; 3360 usage = mem_cgroup_usage(memcg, false); 3361 } else if (type == _MEMSWAP) { 3362 thresholds = &memcg->memsw_thresholds; 3363 usage = mem_cgroup_usage(memcg, true); 3364 } else 3365 BUG(); 3366 3367 /* Check if a threshold crossed before adding a new one */ 3368 if (thresholds->primary) 3369 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3370 3371 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3372 3373 /* Allocate memory for new array of thresholds */ 3374 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3375 GFP_KERNEL); 3376 if (!new) { 3377 ret = -ENOMEM; 3378 goto unlock; 3379 } 3380 new->size = size; 3381 3382 /* Copy thresholds (if any) to new array */ 3383 if (thresholds->primary) { 3384 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3385 sizeof(struct mem_cgroup_threshold)); 3386 } 3387 3388 /* Add new threshold */ 3389 new->entries[size - 1].eventfd = eventfd; 3390 new->entries[size - 1].threshold = threshold; 3391 3392 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3393 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3394 compare_thresholds, NULL); 3395 3396 /* Find current threshold */ 3397 new->current_threshold = -1; 3398 for (i = 0; i < size; i++) { 3399 if (new->entries[i].threshold <= usage) { 3400 /* 3401 * new->current_threshold will not be used until 3402 * rcu_assign_pointer(), so it's safe to increment 3403 * it here. 3404 */ 3405 ++new->current_threshold; 3406 } else 3407 break; 3408 } 3409 3410 /* Free old spare buffer and save old primary buffer as spare */ 3411 kfree(thresholds->spare); 3412 thresholds->spare = thresholds->primary; 3413 3414 rcu_assign_pointer(thresholds->primary, new); 3415 3416 /* To be sure that nobody uses thresholds */ 3417 synchronize_rcu(); 3418 3419 unlock: 3420 mutex_unlock(&memcg->thresholds_lock); 3421 3422 return ret; 3423 } 3424 3425 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3426 struct eventfd_ctx *eventfd, const char *args) 3427 { 3428 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3429 } 3430 3431 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3432 struct eventfd_ctx *eventfd, const char *args) 3433 { 3434 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3435 } 3436 3437 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3438 struct eventfd_ctx *eventfd, enum res_type type) 3439 { 3440 struct mem_cgroup_thresholds *thresholds; 3441 struct mem_cgroup_threshold_ary *new; 3442 unsigned long usage; 3443 int i, j, size; 3444 3445 mutex_lock(&memcg->thresholds_lock); 3446 3447 if (type == _MEM) { 3448 thresholds = &memcg->thresholds; 3449 usage = mem_cgroup_usage(memcg, false); 3450 } else if (type == _MEMSWAP) { 3451 thresholds = &memcg->memsw_thresholds; 3452 usage = mem_cgroup_usage(memcg, true); 3453 } else 3454 BUG(); 3455 3456 if (!thresholds->primary) 3457 goto unlock; 3458 3459 /* Check if a threshold crossed before removing */ 3460 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3461 3462 /* Calculate new number of threshold */ 3463 size = 0; 3464 for (i = 0; i < thresholds->primary->size; i++) { 3465 if (thresholds->primary->entries[i].eventfd != eventfd) 3466 size++; 3467 } 3468 3469 new = thresholds->spare; 3470 3471 /* Set thresholds array to NULL if we don't have thresholds */ 3472 if (!size) { 3473 kfree(new); 3474 new = NULL; 3475 goto swap_buffers; 3476 } 3477 3478 new->size = size; 3479 3480 /* Copy thresholds and find current threshold */ 3481 new->current_threshold = -1; 3482 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3483 if (thresholds->primary->entries[i].eventfd == eventfd) 3484 continue; 3485 3486 new->entries[j] = thresholds->primary->entries[i]; 3487 if (new->entries[j].threshold <= usage) { 3488 /* 3489 * new->current_threshold will not be used 3490 * until rcu_assign_pointer(), so it's safe to increment 3491 * it here. 3492 */ 3493 ++new->current_threshold; 3494 } 3495 j++; 3496 } 3497 3498 swap_buffers: 3499 /* Swap primary and spare array */ 3500 thresholds->spare = thresholds->primary; 3501 /* If all events are unregistered, free the spare array */ 3502 if (!new) { 3503 kfree(thresholds->spare); 3504 thresholds->spare = NULL; 3505 } 3506 3507 rcu_assign_pointer(thresholds->primary, new); 3508 3509 /* To be sure that nobody uses thresholds */ 3510 synchronize_rcu(); 3511 unlock: 3512 mutex_unlock(&memcg->thresholds_lock); 3513 } 3514 3515 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3516 struct eventfd_ctx *eventfd) 3517 { 3518 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3519 } 3520 3521 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3522 struct eventfd_ctx *eventfd) 3523 { 3524 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3525 } 3526 3527 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3528 struct eventfd_ctx *eventfd, const char *args) 3529 { 3530 struct mem_cgroup_eventfd_list *event; 3531 3532 event = kmalloc(sizeof(*event), GFP_KERNEL); 3533 if (!event) 3534 return -ENOMEM; 3535 3536 spin_lock(&memcg_oom_lock); 3537 3538 event->eventfd = eventfd; 3539 list_add(&event->list, &memcg->oom_notify); 3540 3541 /* already in OOM ? */ 3542 if (memcg->under_oom) 3543 eventfd_signal(eventfd, 1); 3544 spin_unlock(&memcg_oom_lock); 3545 3546 return 0; 3547 } 3548 3549 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3550 struct eventfd_ctx *eventfd) 3551 { 3552 struct mem_cgroup_eventfd_list *ev, *tmp; 3553 3554 spin_lock(&memcg_oom_lock); 3555 3556 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3557 if (ev->eventfd == eventfd) { 3558 list_del(&ev->list); 3559 kfree(ev); 3560 } 3561 } 3562 3563 spin_unlock(&memcg_oom_lock); 3564 } 3565 3566 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3567 { 3568 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3569 3570 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3571 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3572 return 0; 3573 } 3574 3575 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3576 struct cftype *cft, u64 val) 3577 { 3578 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3579 3580 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3581 if (!css->parent || !((val == 0) || (val == 1))) 3582 return -EINVAL; 3583 3584 memcg->oom_kill_disable = val; 3585 if (!val) 3586 memcg_oom_recover(memcg); 3587 3588 return 0; 3589 } 3590 3591 #ifdef CONFIG_MEMCG_KMEM 3592 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3593 { 3594 int ret; 3595 3596 ret = memcg_propagate_kmem(memcg); 3597 if (ret) 3598 return ret; 3599 3600 return mem_cgroup_sockets_init(memcg, ss); 3601 } 3602 3603 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3604 { 3605 struct cgroup_subsys_state *css; 3606 struct mem_cgroup *parent, *child; 3607 int kmemcg_id; 3608 3609 if (!memcg->kmem_acct_active) 3610 return; 3611 3612 /* 3613 * Clear the 'active' flag before clearing memcg_caches arrays entries. 3614 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it 3615 * guarantees no cache will be created for this cgroup after we are 3616 * done (see memcg_create_kmem_cache()). 3617 */ 3618 memcg->kmem_acct_active = false; 3619 3620 memcg_deactivate_kmem_caches(memcg); 3621 3622 kmemcg_id = memcg->kmemcg_id; 3623 BUG_ON(kmemcg_id < 0); 3624 3625 parent = parent_mem_cgroup(memcg); 3626 if (!parent) 3627 parent = root_mem_cgroup; 3628 3629 /* 3630 * Change kmemcg_id of this cgroup and all its descendants to the 3631 * parent's id, and then move all entries from this cgroup's list_lrus 3632 * to ones of the parent. After we have finished, all list_lrus 3633 * corresponding to this cgroup are guaranteed to remain empty. The 3634 * ordering is imposed by list_lru_node->lock taken by 3635 * memcg_drain_all_list_lrus(). 3636 */ 3637 css_for_each_descendant_pre(css, &memcg->css) { 3638 child = mem_cgroup_from_css(css); 3639 BUG_ON(child->kmemcg_id != kmemcg_id); 3640 child->kmemcg_id = parent->kmemcg_id; 3641 if (!memcg->use_hierarchy) 3642 break; 3643 } 3644 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 3645 3646 memcg_free_cache_id(kmemcg_id); 3647 } 3648 3649 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3650 { 3651 if (memcg->kmem_acct_activated) { 3652 memcg_destroy_kmem_caches(memcg); 3653 static_key_slow_dec(&memcg_kmem_enabled_key); 3654 WARN_ON(page_counter_read(&memcg->kmem)); 3655 } 3656 mem_cgroup_sockets_destroy(memcg); 3657 } 3658 #else 3659 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3660 { 3661 return 0; 3662 } 3663 3664 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3665 { 3666 } 3667 3668 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3669 { 3670 } 3671 #endif 3672 3673 #ifdef CONFIG_CGROUP_WRITEBACK 3674 3675 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3676 { 3677 return &memcg->cgwb_list; 3678 } 3679 3680 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3681 { 3682 return wb_domain_init(&memcg->cgwb_domain, gfp); 3683 } 3684 3685 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3686 { 3687 wb_domain_exit(&memcg->cgwb_domain); 3688 } 3689 3690 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3691 { 3692 wb_domain_size_changed(&memcg->cgwb_domain); 3693 } 3694 3695 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3696 { 3697 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3698 3699 if (!memcg->css.parent) 3700 return NULL; 3701 3702 return &memcg->cgwb_domain; 3703 } 3704 3705 /** 3706 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3707 * @wb: bdi_writeback in question 3708 * @pfilepages: out parameter for number of file pages 3709 * @pheadroom: out parameter for number of allocatable pages according to memcg 3710 * @pdirty: out parameter for number of dirty pages 3711 * @pwriteback: out parameter for number of pages under writeback 3712 * 3713 * Determine the numbers of file, headroom, dirty, and writeback pages in 3714 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3715 * is a bit more involved. 3716 * 3717 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3718 * headroom is calculated as the lowest headroom of itself and the 3719 * ancestors. Note that this doesn't consider the actual amount of 3720 * available memory in the system. The caller should further cap 3721 * *@pheadroom accordingly. 3722 */ 3723 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3724 unsigned long *pheadroom, unsigned long *pdirty, 3725 unsigned long *pwriteback) 3726 { 3727 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3728 struct mem_cgroup *parent; 3729 3730 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3731 3732 /* this should eventually include NR_UNSTABLE_NFS */ 3733 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3734 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3735 (1 << LRU_ACTIVE_FILE)); 3736 *pheadroom = PAGE_COUNTER_MAX; 3737 3738 while ((parent = parent_mem_cgroup(memcg))) { 3739 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3740 unsigned long used = page_counter_read(&memcg->memory); 3741 3742 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3743 memcg = parent; 3744 } 3745 } 3746 3747 #else /* CONFIG_CGROUP_WRITEBACK */ 3748 3749 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3750 { 3751 return 0; 3752 } 3753 3754 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3755 { 3756 } 3757 3758 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3759 { 3760 } 3761 3762 #endif /* CONFIG_CGROUP_WRITEBACK */ 3763 3764 /* 3765 * DO NOT USE IN NEW FILES. 3766 * 3767 * "cgroup.event_control" implementation. 3768 * 3769 * This is way over-engineered. It tries to support fully configurable 3770 * events for each user. Such level of flexibility is completely 3771 * unnecessary especially in the light of the planned unified hierarchy. 3772 * 3773 * Please deprecate this and replace with something simpler if at all 3774 * possible. 3775 */ 3776 3777 /* 3778 * Unregister event and free resources. 3779 * 3780 * Gets called from workqueue. 3781 */ 3782 static void memcg_event_remove(struct work_struct *work) 3783 { 3784 struct mem_cgroup_event *event = 3785 container_of(work, struct mem_cgroup_event, remove); 3786 struct mem_cgroup *memcg = event->memcg; 3787 3788 remove_wait_queue(event->wqh, &event->wait); 3789 3790 event->unregister_event(memcg, event->eventfd); 3791 3792 /* Notify userspace the event is going away. */ 3793 eventfd_signal(event->eventfd, 1); 3794 3795 eventfd_ctx_put(event->eventfd); 3796 kfree(event); 3797 css_put(&memcg->css); 3798 } 3799 3800 /* 3801 * Gets called on POLLHUP on eventfd when user closes it. 3802 * 3803 * Called with wqh->lock held and interrupts disabled. 3804 */ 3805 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3806 int sync, void *key) 3807 { 3808 struct mem_cgroup_event *event = 3809 container_of(wait, struct mem_cgroup_event, wait); 3810 struct mem_cgroup *memcg = event->memcg; 3811 unsigned long flags = (unsigned long)key; 3812 3813 if (flags & POLLHUP) { 3814 /* 3815 * If the event has been detached at cgroup removal, we 3816 * can simply return knowing the other side will cleanup 3817 * for us. 3818 * 3819 * We can't race against event freeing since the other 3820 * side will require wqh->lock via remove_wait_queue(), 3821 * which we hold. 3822 */ 3823 spin_lock(&memcg->event_list_lock); 3824 if (!list_empty(&event->list)) { 3825 list_del_init(&event->list); 3826 /* 3827 * We are in atomic context, but cgroup_event_remove() 3828 * may sleep, so we have to call it in workqueue. 3829 */ 3830 schedule_work(&event->remove); 3831 } 3832 spin_unlock(&memcg->event_list_lock); 3833 } 3834 3835 return 0; 3836 } 3837 3838 static void memcg_event_ptable_queue_proc(struct file *file, 3839 wait_queue_head_t *wqh, poll_table *pt) 3840 { 3841 struct mem_cgroup_event *event = 3842 container_of(pt, struct mem_cgroup_event, pt); 3843 3844 event->wqh = wqh; 3845 add_wait_queue(wqh, &event->wait); 3846 } 3847 3848 /* 3849 * DO NOT USE IN NEW FILES. 3850 * 3851 * Parse input and register new cgroup event handler. 3852 * 3853 * Input must be in format '<event_fd> <control_fd> <args>'. 3854 * Interpretation of args is defined by control file implementation. 3855 */ 3856 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3857 char *buf, size_t nbytes, loff_t off) 3858 { 3859 struct cgroup_subsys_state *css = of_css(of); 3860 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3861 struct mem_cgroup_event *event; 3862 struct cgroup_subsys_state *cfile_css; 3863 unsigned int efd, cfd; 3864 struct fd efile; 3865 struct fd cfile; 3866 const char *name; 3867 char *endp; 3868 int ret; 3869 3870 buf = strstrip(buf); 3871 3872 efd = simple_strtoul(buf, &endp, 10); 3873 if (*endp != ' ') 3874 return -EINVAL; 3875 buf = endp + 1; 3876 3877 cfd = simple_strtoul(buf, &endp, 10); 3878 if ((*endp != ' ') && (*endp != '\0')) 3879 return -EINVAL; 3880 buf = endp + 1; 3881 3882 event = kzalloc(sizeof(*event), GFP_KERNEL); 3883 if (!event) 3884 return -ENOMEM; 3885 3886 event->memcg = memcg; 3887 INIT_LIST_HEAD(&event->list); 3888 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3889 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3890 INIT_WORK(&event->remove, memcg_event_remove); 3891 3892 efile = fdget(efd); 3893 if (!efile.file) { 3894 ret = -EBADF; 3895 goto out_kfree; 3896 } 3897 3898 event->eventfd = eventfd_ctx_fileget(efile.file); 3899 if (IS_ERR(event->eventfd)) { 3900 ret = PTR_ERR(event->eventfd); 3901 goto out_put_efile; 3902 } 3903 3904 cfile = fdget(cfd); 3905 if (!cfile.file) { 3906 ret = -EBADF; 3907 goto out_put_eventfd; 3908 } 3909 3910 /* the process need read permission on control file */ 3911 /* AV: shouldn't we check that it's been opened for read instead? */ 3912 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3913 if (ret < 0) 3914 goto out_put_cfile; 3915 3916 /* 3917 * Determine the event callbacks and set them in @event. This used 3918 * to be done via struct cftype but cgroup core no longer knows 3919 * about these events. The following is crude but the whole thing 3920 * is for compatibility anyway. 3921 * 3922 * DO NOT ADD NEW FILES. 3923 */ 3924 name = cfile.file->f_path.dentry->d_name.name; 3925 3926 if (!strcmp(name, "memory.usage_in_bytes")) { 3927 event->register_event = mem_cgroup_usage_register_event; 3928 event->unregister_event = mem_cgroup_usage_unregister_event; 3929 } else if (!strcmp(name, "memory.oom_control")) { 3930 event->register_event = mem_cgroup_oom_register_event; 3931 event->unregister_event = mem_cgroup_oom_unregister_event; 3932 } else if (!strcmp(name, "memory.pressure_level")) { 3933 event->register_event = vmpressure_register_event; 3934 event->unregister_event = vmpressure_unregister_event; 3935 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3936 event->register_event = memsw_cgroup_usage_register_event; 3937 event->unregister_event = memsw_cgroup_usage_unregister_event; 3938 } else { 3939 ret = -EINVAL; 3940 goto out_put_cfile; 3941 } 3942 3943 /* 3944 * Verify @cfile should belong to @css. Also, remaining events are 3945 * automatically removed on cgroup destruction but the removal is 3946 * asynchronous, so take an extra ref on @css. 3947 */ 3948 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3949 &memory_cgrp_subsys); 3950 ret = -EINVAL; 3951 if (IS_ERR(cfile_css)) 3952 goto out_put_cfile; 3953 if (cfile_css != css) { 3954 css_put(cfile_css); 3955 goto out_put_cfile; 3956 } 3957 3958 ret = event->register_event(memcg, event->eventfd, buf); 3959 if (ret) 3960 goto out_put_css; 3961 3962 efile.file->f_op->poll(efile.file, &event->pt); 3963 3964 spin_lock(&memcg->event_list_lock); 3965 list_add(&event->list, &memcg->event_list); 3966 spin_unlock(&memcg->event_list_lock); 3967 3968 fdput(cfile); 3969 fdput(efile); 3970 3971 return nbytes; 3972 3973 out_put_css: 3974 css_put(css); 3975 out_put_cfile: 3976 fdput(cfile); 3977 out_put_eventfd: 3978 eventfd_ctx_put(event->eventfd); 3979 out_put_efile: 3980 fdput(efile); 3981 out_kfree: 3982 kfree(event); 3983 3984 return ret; 3985 } 3986 3987 static struct cftype mem_cgroup_legacy_files[] = { 3988 { 3989 .name = "usage_in_bytes", 3990 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3991 .read_u64 = mem_cgroup_read_u64, 3992 }, 3993 { 3994 .name = "max_usage_in_bytes", 3995 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3996 .write = mem_cgroup_reset, 3997 .read_u64 = mem_cgroup_read_u64, 3998 }, 3999 { 4000 .name = "limit_in_bytes", 4001 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4002 .write = mem_cgroup_write, 4003 .read_u64 = mem_cgroup_read_u64, 4004 }, 4005 { 4006 .name = "soft_limit_in_bytes", 4007 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4008 .write = mem_cgroup_write, 4009 .read_u64 = mem_cgroup_read_u64, 4010 }, 4011 { 4012 .name = "failcnt", 4013 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4014 .write = mem_cgroup_reset, 4015 .read_u64 = mem_cgroup_read_u64, 4016 }, 4017 { 4018 .name = "stat", 4019 .seq_show = memcg_stat_show, 4020 }, 4021 { 4022 .name = "force_empty", 4023 .write = mem_cgroup_force_empty_write, 4024 }, 4025 { 4026 .name = "use_hierarchy", 4027 .write_u64 = mem_cgroup_hierarchy_write, 4028 .read_u64 = mem_cgroup_hierarchy_read, 4029 }, 4030 { 4031 .name = "cgroup.event_control", /* XXX: for compat */ 4032 .write = memcg_write_event_control, 4033 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4034 }, 4035 { 4036 .name = "swappiness", 4037 .read_u64 = mem_cgroup_swappiness_read, 4038 .write_u64 = mem_cgroup_swappiness_write, 4039 }, 4040 { 4041 .name = "move_charge_at_immigrate", 4042 .read_u64 = mem_cgroup_move_charge_read, 4043 .write_u64 = mem_cgroup_move_charge_write, 4044 }, 4045 { 4046 .name = "oom_control", 4047 .seq_show = mem_cgroup_oom_control_read, 4048 .write_u64 = mem_cgroup_oom_control_write, 4049 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4050 }, 4051 { 4052 .name = "pressure_level", 4053 }, 4054 #ifdef CONFIG_NUMA 4055 { 4056 .name = "numa_stat", 4057 .seq_show = memcg_numa_stat_show, 4058 }, 4059 #endif 4060 #ifdef CONFIG_MEMCG_KMEM 4061 { 4062 .name = "kmem.limit_in_bytes", 4063 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4064 .write = mem_cgroup_write, 4065 .read_u64 = mem_cgroup_read_u64, 4066 }, 4067 { 4068 .name = "kmem.usage_in_bytes", 4069 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4070 .read_u64 = mem_cgroup_read_u64, 4071 }, 4072 { 4073 .name = "kmem.failcnt", 4074 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4075 .write = mem_cgroup_reset, 4076 .read_u64 = mem_cgroup_read_u64, 4077 }, 4078 { 4079 .name = "kmem.max_usage_in_bytes", 4080 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4081 .write = mem_cgroup_reset, 4082 .read_u64 = mem_cgroup_read_u64, 4083 }, 4084 #ifdef CONFIG_SLABINFO 4085 { 4086 .name = "kmem.slabinfo", 4087 .seq_start = slab_start, 4088 .seq_next = slab_next, 4089 .seq_stop = slab_stop, 4090 .seq_show = memcg_slab_show, 4091 }, 4092 #endif 4093 #endif 4094 { }, /* terminate */ 4095 }; 4096 4097 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4098 { 4099 struct mem_cgroup_per_node *pn; 4100 struct mem_cgroup_per_zone *mz; 4101 int zone, tmp = node; 4102 /* 4103 * This routine is called against possible nodes. 4104 * But it's BUG to call kmalloc() against offline node. 4105 * 4106 * TODO: this routine can waste much memory for nodes which will 4107 * never be onlined. It's better to use memory hotplug callback 4108 * function. 4109 */ 4110 if (!node_state(node, N_NORMAL_MEMORY)) 4111 tmp = -1; 4112 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4113 if (!pn) 4114 return 1; 4115 4116 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4117 mz = &pn->zoneinfo[zone]; 4118 lruvec_init(&mz->lruvec); 4119 mz->usage_in_excess = 0; 4120 mz->on_tree = false; 4121 mz->memcg = memcg; 4122 } 4123 memcg->nodeinfo[node] = pn; 4124 return 0; 4125 } 4126 4127 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4128 { 4129 kfree(memcg->nodeinfo[node]); 4130 } 4131 4132 static struct mem_cgroup *mem_cgroup_alloc(void) 4133 { 4134 struct mem_cgroup *memcg; 4135 size_t size; 4136 4137 size = sizeof(struct mem_cgroup); 4138 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4139 4140 memcg = kzalloc(size, GFP_KERNEL); 4141 if (!memcg) 4142 return NULL; 4143 4144 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4145 if (!memcg->stat) 4146 goto out_free; 4147 4148 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4149 goto out_free_stat; 4150 4151 return memcg; 4152 4153 out_free_stat: 4154 free_percpu(memcg->stat); 4155 out_free: 4156 kfree(memcg); 4157 return NULL; 4158 } 4159 4160 /* 4161 * At destroying mem_cgroup, references from swap_cgroup can remain. 4162 * (scanning all at force_empty is too costly...) 4163 * 4164 * Instead of clearing all references at force_empty, we remember 4165 * the number of reference from swap_cgroup and free mem_cgroup when 4166 * it goes down to 0. 4167 * 4168 * Removal of cgroup itself succeeds regardless of refs from swap. 4169 */ 4170 4171 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4172 { 4173 int node; 4174 4175 mem_cgroup_remove_from_trees(memcg); 4176 4177 for_each_node(node) 4178 free_mem_cgroup_per_zone_info(memcg, node); 4179 4180 free_percpu(memcg->stat); 4181 memcg_wb_domain_exit(memcg); 4182 kfree(memcg); 4183 } 4184 4185 /* 4186 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4187 */ 4188 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4189 { 4190 if (!memcg->memory.parent) 4191 return NULL; 4192 return mem_cgroup_from_counter(memcg->memory.parent, memory); 4193 } 4194 EXPORT_SYMBOL(parent_mem_cgroup); 4195 4196 static struct cgroup_subsys_state * __ref 4197 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4198 { 4199 struct mem_cgroup *memcg; 4200 long error = -ENOMEM; 4201 int node; 4202 4203 memcg = mem_cgroup_alloc(); 4204 if (!memcg) 4205 return ERR_PTR(error); 4206 4207 for_each_node(node) 4208 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 4209 goto free_out; 4210 4211 /* root ? */ 4212 if (parent_css == NULL) { 4213 root_mem_cgroup = memcg; 4214 mem_cgroup_root_css = &memcg->css; 4215 page_counter_init(&memcg->memory, NULL); 4216 memcg->high = PAGE_COUNTER_MAX; 4217 memcg->soft_limit = PAGE_COUNTER_MAX; 4218 page_counter_init(&memcg->memsw, NULL); 4219 page_counter_init(&memcg->kmem, NULL); 4220 } 4221 4222 memcg->last_scanned_node = MAX_NUMNODES; 4223 INIT_LIST_HEAD(&memcg->oom_notify); 4224 memcg->move_charge_at_immigrate = 0; 4225 mutex_init(&memcg->thresholds_lock); 4226 spin_lock_init(&memcg->move_lock); 4227 vmpressure_init(&memcg->vmpressure); 4228 INIT_LIST_HEAD(&memcg->event_list); 4229 spin_lock_init(&memcg->event_list_lock); 4230 #ifdef CONFIG_MEMCG_KMEM 4231 memcg->kmemcg_id = -1; 4232 #endif 4233 #ifdef CONFIG_CGROUP_WRITEBACK 4234 INIT_LIST_HEAD(&memcg->cgwb_list); 4235 #endif 4236 return &memcg->css; 4237 4238 free_out: 4239 __mem_cgroup_free(memcg); 4240 return ERR_PTR(error); 4241 } 4242 4243 static int 4244 mem_cgroup_css_online(struct cgroup_subsys_state *css) 4245 { 4246 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4247 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 4248 int ret; 4249 4250 if (css->id > MEM_CGROUP_ID_MAX) 4251 return -ENOSPC; 4252 4253 if (!parent) 4254 return 0; 4255 4256 mutex_lock(&memcg_create_mutex); 4257 4258 memcg->use_hierarchy = parent->use_hierarchy; 4259 memcg->oom_kill_disable = parent->oom_kill_disable; 4260 memcg->swappiness = mem_cgroup_swappiness(parent); 4261 4262 if (parent->use_hierarchy) { 4263 page_counter_init(&memcg->memory, &parent->memory); 4264 memcg->high = PAGE_COUNTER_MAX; 4265 memcg->soft_limit = PAGE_COUNTER_MAX; 4266 page_counter_init(&memcg->memsw, &parent->memsw); 4267 page_counter_init(&memcg->kmem, &parent->kmem); 4268 4269 /* 4270 * No need to take a reference to the parent because cgroup 4271 * core guarantees its existence. 4272 */ 4273 } else { 4274 page_counter_init(&memcg->memory, NULL); 4275 memcg->high = PAGE_COUNTER_MAX; 4276 memcg->soft_limit = PAGE_COUNTER_MAX; 4277 page_counter_init(&memcg->memsw, NULL); 4278 page_counter_init(&memcg->kmem, NULL); 4279 /* 4280 * Deeper hierachy with use_hierarchy == false doesn't make 4281 * much sense so let cgroup subsystem know about this 4282 * unfortunate state in our controller. 4283 */ 4284 if (parent != root_mem_cgroup) 4285 memory_cgrp_subsys.broken_hierarchy = true; 4286 } 4287 mutex_unlock(&memcg_create_mutex); 4288 4289 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); 4290 if (ret) 4291 return ret; 4292 4293 /* 4294 * Make sure the memcg is initialized: mem_cgroup_iter() 4295 * orders reading memcg->initialized against its callers 4296 * reading the memcg members. 4297 */ 4298 smp_store_release(&memcg->initialized, 1); 4299 4300 return 0; 4301 } 4302 4303 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4304 { 4305 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4306 struct mem_cgroup_event *event, *tmp; 4307 4308 /* 4309 * Unregister events and notify userspace. 4310 * Notify userspace about cgroup removing only after rmdir of cgroup 4311 * directory to avoid race between userspace and kernelspace. 4312 */ 4313 spin_lock(&memcg->event_list_lock); 4314 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4315 list_del_init(&event->list); 4316 schedule_work(&event->remove); 4317 } 4318 spin_unlock(&memcg->event_list_lock); 4319 4320 vmpressure_cleanup(&memcg->vmpressure); 4321 4322 memcg_deactivate_kmem(memcg); 4323 4324 wb_memcg_offline(memcg); 4325 } 4326 4327 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4328 { 4329 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4330 4331 memcg_destroy_kmem(memcg); 4332 __mem_cgroup_free(memcg); 4333 } 4334 4335 /** 4336 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4337 * @css: the target css 4338 * 4339 * Reset the states of the mem_cgroup associated with @css. This is 4340 * invoked when the userland requests disabling on the default hierarchy 4341 * but the memcg is pinned through dependency. The memcg should stop 4342 * applying policies and should revert to the vanilla state as it may be 4343 * made visible again. 4344 * 4345 * The current implementation only resets the essential configurations. 4346 * This needs to be expanded to cover all the visible parts. 4347 */ 4348 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4349 { 4350 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4351 4352 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4353 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4354 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4355 memcg->low = 0; 4356 memcg->high = PAGE_COUNTER_MAX; 4357 memcg->soft_limit = PAGE_COUNTER_MAX; 4358 memcg_wb_domain_size_changed(memcg); 4359 } 4360 4361 #ifdef CONFIG_MMU 4362 /* Handlers for move charge at task migration. */ 4363 static int mem_cgroup_do_precharge(unsigned long count) 4364 { 4365 int ret; 4366 4367 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4368 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4369 if (!ret) { 4370 mc.precharge += count; 4371 return ret; 4372 } 4373 4374 /* Try charges one by one with reclaim */ 4375 while (count--) { 4376 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4377 if (ret) 4378 return ret; 4379 mc.precharge++; 4380 cond_resched(); 4381 } 4382 return 0; 4383 } 4384 4385 /** 4386 * get_mctgt_type - get target type of moving charge 4387 * @vma: the vma the pte to be checked belongs 4388 * @addr: the address corresponding to the pte to be checked 4389 * @ptent: the pte to be checked 4390 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4391 * 4392 * Returns 4393 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4394 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4395 * move charge. if @target is not NULL, the page is stored in target->page 4396 * with extra refcnt got(Callers should handle it). 4397 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4398 * target for charge migration. if @target is not NULL, the entry is stored 4399 * in target->ent. 4400 * 4401 * Called with pte lock held. 4402 */ 4403 union mc_target { 4404 struct page *page; 4405 swp_entry_t ent; 4406 }; 4407 4408 enum mc_target_type { 4409 MC_TARGET_NONE = 0, 4410 MC_TARGET_PAGE, 4411 MC_TARGET_SWAP, 4412 }; 4413 4414 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4415 unsigned long addr, pte_t ptent) 4416 { 4417 struct page *page = vm_normal_page(vma, addr, ptent); 4418 4419 if (!page || !page_mapped(page)) 4420 return NULL; 4421 if (PageAnon(page)) { 4422 if (!(mc.flags & MOVE_ANON)) 4423 return NULL; 4424 } else { 4425 if (!(mc.flags & MOVE_FILE)) 4426 return NULL; 4427 } 4428 if (!get_page_unless_zero(page)) 4429 return NULL; 4430 4431 return page; 4432 } 4433 4434 #ifdef CONFIG_SWAP 4435 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4436 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4437 { 4438 struct page *page = NULL; 4439 swp_entry_t ent = pte_to_swp_entry(ptent); 4440 4441 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4442 return NULL; 4443 /* 4444 * Because lookup_swap_cache() updates some statistics counter, 4445 * we call find_get_page() with swapper_space directly. 4446 */ 4447 page = find_get_page(swap_address_space(ent), ent.val); 4448 if (do_swap_account) 4449 entry->val = ent.val; 4450 4451 return page; 4452 } 4453 #else 4454 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4455 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4456 { 4457 return NULL; 4458 } 4459 #endif 4460 4461 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4462 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4463 { 4464 struct page *page = NULL; 4465 struct address_space *mapping; 4466 pgoff_t pgoff; 4467 4468 if (!vma->vm_file) /* anonymous vma */ 4469 return NULL; 4470 if (!(mc.flags & MOVE_FILE)) 4471 return NULL; 4472 4473 mapping = vma->vm_file->f_mapping; 4474 pgoff = linear_page_index(vma, addr); 4475 4476 /* page is moved even if it's not RSS of this task(page-faulted). */ 4477 #ifdef CONFIG_SWAP 4478 /* shmem/tmpfs may report page out on swap: account for that too. */ 4479 if (shmem_mapping(mapping)) { 4480 page = find_get_entry(mapping, pgoff); 4481 if (radix_tree_exceptional_entry(page)) { 4482 swp_entry_t swp = radix_to_swp_entry(page); 4483 if (do_swap_account) 4484 *entry = swp; 4485 page = find_get_page(swap_address_space(swp), swp.val); 4486 } 4487 } else 4488 page = find_get_page(mapping, pgoff); 4489 #else 4490 page = find_get_page(mapping, pgoff); 4491 #endif 4492 return page; 4493 } 4494 4495 /** 4496 * mem_cgroup_move_account - move account of the page 4497 * @page: the page 4498 * @nr_pages: number of regular pages (>1 for huge pages) 4499 * @from: mem_cgroup which the page is moved from. 4500 * @to: mem_cgroup which the page is moved to. @from != @to. 4501 * 4502 * The caller must confirm following. 4503 * - page is not on LRU (isolate_page() is useful.) 4504 * - compound_lock is held when nr_pages > 1 4505 * 4506 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4507 * from old cgroup. 4508 */ 4509 static int mem_cgroup_move_account(struct page *page, 4510 unsigned int nr_pages, 4511 struct mem_cgroup *from, 4512 struct mem_cgroup *to) 4513 { 4514 unsigned long flags; 4515 int ret; 4516 bool anon; 4517 4518 VM_BUG_ON(from == to); 4519 VM_BUG_ON_PAGE(PageLRU(page), page); 4520 /* 4521 * The page is isolated from LRU. So, collapse function 4522 * will not handle this page. But page splitting can happen. 4523 * Do this check under compound_page_lock(). The caller should 4524 * hold it. 4525 */ 4526 ret = -EBUSY; 4527 if (nr_pages > 1 && !PageTransHuge(page)) 4528 goto out; 4529 4530 /* 4531 * Prevent mem_cgroup_replace_page() from looking at 4532 * page->mem_cgroup of its source page while we change it. 4533 */ 4534 if (!trylock_page(page)) 4535 goto out; 4536 4537 ret = -EINVAL; 4538 if (page->mem_cgroup != from) 4539 goto out_unlock; 4540 4541 anon = PageAnon(page); 4542 4543 spin_lock_irqsave(&from->move_lock, flags); 4544 4545 if (!anon && page_mapped(page)) { 4546 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4547 nr_pages); 4548 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4549 nr_pages); 4550 } 4551 4552 /* 4553 * move_lock grabbed above and caller set from->moving_account, so 4554 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4555 * So mapping should be stable for dirty pages. 4556 */ 4557 if (!anon && PageDirty(page)) { 4558 struct address_space *mapping = page_mapping(page); 4559 4560 if (mapping_cap_account_dirty(mapping)) { 4561 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4562 nr_pages); 4563 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4564 nr_pages); 4565 } 4566 } 4567 4568 if (PageWriteback(page)) { 4569 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4570 nr_pages); 4571 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4572 nr_pages); 4573 } 4574 4575 /* 4576 * It is safe to change page->mem_cgroup here because the page 4577 * is referenced, charged, and isolated - we can't race with 4578 * uncharging, charging, migration, or LRU putback. 4579 */ 4580 4581 /* caller should have done css_get */ 4582 page->mem_cgroup = to; 4583 spin_unlock_irqrestore(&from->move_lock, flags); 4584 4585 ret = 0; 4586 4587 local_irq_disable(); 4588 mem_cgroup_charge_statistics(to, page, nr_pages); 4589 memcg_check_events(to, page); 4590 mem_cgroup_charge_statistics(from, page, -nr_pages); 4591 memcg_check_events(from, page); 4592 local_irq_enable(); 4593 out_unlock: 4594 unlock_page(page); 4595 out: 4596 return ret; 4597 } 4598 4599 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4600 unsigned long addr, pte_t ptent, union mc_target *target) 4601 { 4602 struct page *page = NULL; 4603 enum mc_target_type ret = MC_TARGET_NONE; 4604 swp_entry_t ent = { .val = 0 }; 4605 4606 if (pte_present(ptent)) 4607 page = mc_handle_present_pte(vma, addr, ptent); 4608 else if (is_swap_pte(ptent)) 4609 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 4610 else if (pte_none(ptent)) 4611 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4612 4613 if (!page && !ent.val) 4614 return ret; 4615 if (page) { 4616 /* 4617 * Do only loose check w/o serialization. 4618 * mem_cgroup_move_account() checks the page is valid or 4619 * not under LRU exclusion. 4620 */ 4621 if (page->mem_cgroup == mc.from) { 4622 ret = MC_TARGET_PAGE; 4623 if (target) 4624 target->page = page; 4625 } 4626 if (!ret || !target) 4627 put_page(page); 4628 } 4629 /* There is a swap entry and a page doesn't exist or isn't charged */ 4630 if (ent.val && !ret && 4631 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4632 ret = MC_TARGET_SWAP; 4633 if (target) 4634 target->ent = ent; 4635 } 4636 return ret; 4637 } 4638 4639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4640 /* 4641 * We don't consider swapping or file mapped pages because THP does not 4642 * support them for now. 4643 * Caller should make sure that pmd_trans_huge(pmd) is true. 4644 */ 4645 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4646 unsigned long addr, pmd_t pmd, union mc_target *target) 4647 { 4648 struct page *page = NULL; 4649 enum mc_target_type ret = MC_TARGET_NONE; 4650 4651 page = pmd_page(pmd); 4652 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4653 if (!(mc.flags & MOVE_ANON)) 4654 return ret; 4655 if (page->mem_cgroup == mc.from) { 4656 ret = MC_TARGET_PAGE; 4657 if (target) { 4658 get_page(page); 4659 target->page = page; 4660 } 4661 } 4662 return ret; 4663 } 4664 #else 4665 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4666 unsigned long addr, pmd_t pmd, union mc_target *target) 4667 { 4668 return MC_TARGET_NONE; 4669 } 4670 #endif 4671 4672 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4673 unsigned long addr, unsigned long end, 4674 struct mm_walk *walk) 4675 { 4676 struct vm_area_struct *vma = walk->vma; 4677 pte_t *pte; 4678 spinlock_t *ptl; 4679 4680 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4681 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4682 mc.precharge += HPAGE_PMD_NR; 4683 spin_unlock(ptl); 4684 return 0; 4685 } 4686 4687 if (pmd_trans_unstable(pmd)) 4688 return 0; 4689 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4690 for (; addr != end; pte++, addr += PAGE_SIZE) 4691 if (get_mctgt_type(vma, addr, *pte, NULL)) 4692 mc.precharge++; /* increment precharge temporarily */ 4693 pte_unmap_unlock(pte - 1, ptl); 4694 cond_resched(); 4695 4696 return 0; 4697 } 4698 4699 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4700 { 4701 unsigned long precharge; 4702 4703 struct mm_walk mem_cgroup_count_precharge_walk = { 4704 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4705 .mm = mm, 4706 }; 4707 down_read(&mm->mmap_sem); 4708 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk); 4709 up_read(&mm->mmap_sem); 4710 4711 precharge = mc.precharge; 4712 mc.precharge = 0; 4713 4714 return precharge; 4715 } 4716 4717 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4718 { 4719 unsigned long precharge = mem_cgroup_count_precharge(mm); 4720 4721 VM_BUG_ON(mc.moving_task); 4722 mc.moving_task = current; 4723 return mem_cgroup_do_precharge(precharge); 4724 } 4725 4726 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4727 static void __mem_cgroup_clear_mc(void) 4728 { 4729 struct mem_cgroup *from = mc.from; 4730 struct mem_cgroup *to = mc.to; 4731 4732 /* we must uncharge all the leftover precharges from mc.to */ 4733 if (mc.precharge) { 4734 cancel_charge(mc.to, mc.precharge); 4735 mc.precharge = 0; 4736 } 4737 /* 4738 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4739 * we must uncharge here. 4740 */ 4741 if (mc.moved_charge) { 4742 cancel_charge(mc.from, mc.moved_charge); 4743 mc.moved_charge = 0; 4744 } 4745 /* we must fixup refcnts and charges */ 4746 if (mc.moved_swap) { 4747 /* uncharge swap account from the old cgroup */ 4748 if (!mem_cgroup_is_root(mc.from)) 4749 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4750 4751 /* 4752 * we charged both to->memory and to->memsw, so we 4753 * should uncharge to->memory. 4754 */ 4755 if (!mem_cgroup_is_root(mc.to)) 4756 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4757 4758 css_put_many(&mc.from->css, mc.moved_swap); 4759 4760 /* we've already done css_get(mc.to) */ 4761 mc.moved_swap = 0; 4762 } 4763 memcg_oom_recover(from); 4764 memcg_oom_recover(to); 4765 wake_up_all(&mc.waitq); 4766 } 4767 4768 static void mem_cgroup_clear_mc(void) 4769 { 4770 /* 4771 * we must clear moving_task before waking up waiters at the end of 4772 * task migration. 4773 */ 4774 mc.moving_task = NULL; 4775 __mem_cgroup_clear_mc(); 4776 spin_lock(&mc.lock); 4777 mc.from = NULL; 4778 mc.to = NULL; 4779 spin_unlock(&mc.lock); 4780 } 4781 4782 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 4783 struct cgroup_taskset *tset) 4784 { 4785 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4786 struct mem_cgroup *from; 4787 struct task_struct *leader, *p; 4788 struct mm_struct *mm; 4789 unsigned long move_flags; 4790 int ret = 0; 4791 4792 /* 4793 * We are now commited to this value whatever it is. Changes in this 4794 * tunable will only affect upcoming migrations, not the current one. 4795 * So we need to save it, and keep it going. 4796 */ 4797 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4798 if (!move_flags) 4799 return 0; 4800 4801 /* 4802 * Multi-process migrations only happen on the default hierarchy 4803 * where charge immigration is not used. Perform charge 4804 * immigration if @tset contains a leader and whine if there are 4805 * multiple. 4806 */ 4807 p = NULL; 4808 cgroup_taskset_for_each_leader(leader, tset) { 4809 WARN_ON_ONCE(p); 4810 p = leader; 4811 } 4812 if (!p) 4813 return 0; 4814 4815 from = mem_cgroup_from_task(p); 4816 4817 VM_BUG_ON(from == memcg); 4818 4819 mm = get_task_mm(p); 4820 if (!mm) 4821 return 0; 4822 /* We move charges only when we move a owner of the mm */ 4823 if (mm->owner == p) { 4824 VM_BUG_ON(mc.from); 4825 VM_BUG_ON(mc.to); 4826 VM_BUG_ON(mc.precharge); 4827 VM_BUG_ON(mc.moved_charge); 4828 VM_BUG_ON(mc.moved_swap); 4829 4830 spin_lock(&mc.lock); 4831 mc.from = from; 4832 mc.to = memcg; 4833 mc.flags = move_flags; 4834 spin_unlock(&mc.lock); 4835 /* We set mc.moving_task later */ 4836 4837 ret = mem_cgroup_precharge_mc(mm); 4838 if (ret) 4839 mem_cgroup_clear_mc(); 4840 } 4841 mmput(mm); 4842 return ret; 4843 } 4844 4845 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 4846 struct cgroup_taskset *tset) 4847 { 4848 if (mc.to) 4849 mem_cgroup_clear_mc(); 4850 } 4851 4852 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4853 unsigned long addr, unsigned long end, 4854 struct mm_walk *walk) 4855 { 4856 int ret = 0; 4857 struct vm_area_struct *vma = walk->vma; 4858 pte_t *pte; 4859 spinlock_t *ptl; 4860 enum mc_target_type target_type; 4861 union mc_target target; 4862 struct page *page; 4863 4864 /* 4865 * We don't take compound_lock() here but no race with splitting thp 4866 * happens because: 4867 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 4868 * under splitting, which means there's no concurrent thp split, 4869 * - if another thread runs into split_huge_page() just after we 4870 * entered this if-block, the thread must wait for page table lock 4871 * to be unlocked in __split_huge_page_splitting(), where the main 4872 * part of thp split is not executed yet. 4873 */ 4874 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4875 if (mc.precharge < HPAGE_PMD_NR) { 4876 spin_unlock(ptl); 4877 return 0; 4878 } 4879 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4880 if (target_type == MC_TARGET_PAGE) { 4881 page = target.page; 4882 if (!isolate_lru_page(page)) { 4883 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 4884 mc.from, mc.to)) { 4885 mc.precharge -= HPAGE_PMD_NR; 4886 mc.moved_charge += HPAGE_PMD_NR; 4887 } 4888 putback_lru_page(page); 4889 } 4890 put_page(page); 4891 } 4892 spin_unlock(ptl); 4893 return 0; 4894 } 4895 4896 if (pmd_trans_unstable(pmd)) 4897 return 0; 4898 retry: 4899 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4900 for (; addr != end; addr += PAGE_SIZE) { 4901 pte_t ptent = *(pte++); 4902 swp_entry_t ent; 4903 4904 if (!mc.precharge) 4905 break; 4906 4907 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4908 case MC_TARGET_PAGE: 4909 page = target.page; 4910 if (isolate_lru_page(page)) 4911 goto put; 4912 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { 4913 mc.precharge--; 4914 /* we uncharge from mc.from later. */ 4915 mc.moved_charge++; 4916 } 4917 putback_lru_page(page); 4918 put: /* get_mctgt_type() gets the page */ 4919 put_page(page); 4920 break; 4921 case MC_TARGET_SWAP: 4922 ent = target.ent; 4923 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4924 mc.precharge--; 4925 /* we fixup refcnts and charges later. */ 4926 mc.moved_swap++; 4927 } 4928 break; 4929 default: 4930 break; 4931 } 4932 } 4933 pte_unmap_unlock(pte - 1, ptl); 4934 cond_resched(); 4935 4936 if (addr != end) { 4937 /* 4938 * We have consumed all precharges we got in can_attach(). 4939 * We try charge one by one, but don't do any additional 4940 * charges to mc.to if we have failed in charge once in attach() 4941 * phase. 4942 */ 4943 ret = mem_cgroup_do_precharge(1); 4944 if (!ret) 4945 goto retry; 4946 } 4947 4948 return ret; 4949 } 4950 4951 static void mem_cgroup_move_charge(struct mm_struct *mm) 4952 { 4953 struct mm_walk mem_cgroup_move_charge_walk = { 4954 .pmd_entry = mem_cgroup_move_charge_pte_range, 4955 .mm = mm, 4956 }; 4957 4958 lru_add_drain_all(); 4959 /* 4960 * Signal mem_cgroup_begin_page_stat() to take the memcg's 4961 * move_lock while we're moving its pages to another memcg. 4962 * Then wait for already started RCU-only updates to finish. 4963 */ 4964 atomic_inc(&mc.from->moving_account); 4965 synchronize_rcu(); 4966 retry: 4967 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 4968 /* 4969 * Someone who are holding the mmap_sem might be waiting in 4970 * waitq. So we cancel all extra charges, wake up all waiters, 4971 * and retry. Because we cancel precharges, we might not be able 4972 * to move enough charges, but moving charge is a best-effort 4973 * feature anyway, so it wouldn't be a big problem. 4974 */ 4975 __mem_cgroup_clear_mc(); 4976 cond_resched(); 4977 goto retry; 4978 } 4979 /* 4980 * When we have consumed all precharges and failed in doing 4981 * additional charge, the page walk just aborts. 4982 */ 4983 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 4984 up_read(&mm->mmap_sem); 4985 atomic_dec(&mc.from->moving_account); 4986 } 4987 4988 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 4989 struct cgroup_taskset *tset) 4990 { 4991 struct task_struct *p = cgroup_taskset_first(tset); 4992 struct mm_struct *mm = get_task_mm(p); 4993 4994 if (mm) { 4995 if (mc.to) 4996 mem_cgroup_move_charge(mm); 4997 mmput(mm); 4998 } 4999 if (mc.to) 5000 mem_cgroup_clear_mc(); 5001 } 5002 #else /* !CONFIG_MMU */ 5003 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 5004 struct cgroup_taskset *tset) 5005 { 5006 return 0; 5007 } 5008 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 5009 struct cgroup_taskset *tset) 5010 { 5011 } 5012 static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5013 struct cgroup_taskset *tset) 5014 { 5015 } 5016 #endif 5017 5018 /* 5019 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5020 * to verify whether we're attached to the default hierarchy on each mount 5021 * attempt. 5022 */ 5023 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5024 { 5025 /* 5026 * use_hierarchy is forced on the default hierarchy. cgroup core 5027 * guarantees that @root doesn't have any children, so turning it 5028 * on for the root memcg is enough. 5029 */ 5030 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5031 root_mem_cgroup->use_hierarchy = true; 5032 else 5033 root_mem_cgroup->use_hierarchy = false; 5034 } 5035 5036 static u64 memory_current_read(struct cgroup_subsys_state *css, 5037 struct cftype *cft) 5038 { 5039 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5040 5041 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5042 } 5043 5044 static int memory_low_show(struct seq_file *m, void *v) 5045 { 5046 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5047 unsigned long low = READ_ONCE(memcg->low); 5048 5049 if (low == PAGE_COUNTER_MAX) 5050 seq_puts(m, "max\n"); 5051 else 5052 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5053 5054 return 0; 5055 } 5056 5057 static ssize_t memory_low_write(struct kernfs_open_file *of, 5058 char *buf, size_t nbytes, loff_t off) 5059 { 5060 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5061 unsigned long low; 5062 int err; 5063 5064 buf = strstrip(buf); 5065 err = page_counter_memparse(buf, "max", &low); 5066 if (err) 5067 return err; 5068 5069 memcg->low = low; 5070 5071 return nbytes; 5072 } 5073 5074 static int memory_high_show(struct seq_file *m, void *v) 5075 { 5076 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5077 unsigned long high = READ_ONCE(memcg->high); 5078 5079 if (high == PAGE_COUNTER_MAX) 5080 seq_puts(m, "max\n"); 5081 else 5082 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5083 5084 return 0; 5085 } 5086 5087 static ssize_t memory_high_write(struct kernfs_open_file *of, 5088 char *buf, size_t nbytes, loff_t off) 5089 { 5090 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5091 unsigned long high; 5092 int err; 5093 5094 buf = strstrip(buf); 5095 err = page_counter_memparse(buf, "max", &high); 5096 if (err) 5097 return err; 5098 5099 memcg->high = high; 5100 5101 memcg_wb_domain_size_changed(memcg); 5102 return nbytes; 5103 } 5104 5105 static int memory_max_show(struct seq_file *m, void *v) 5106 { 5107 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5108 unsigned long max = READ_ONCE(memcg->memory.limit); 5109 5110 if (max == PAGE_COUNTER_MAX) 5111 seq_puts(m, "max\n"); 5112 else 5113 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5114 5115 return 0; 5116 } 5117 5118 static ssize_t memory_max_write(struct kernfs_open_file *of, 5119 char *buf, size_t nbytes, loff_t off) 5120 { 5121 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5122 unsigned long max; 5123 int err; 5124 5125 buf = strstrip(buf); 5126 err = page_counter_memparse(buf, "max", &max); 5127 if (err) 5128 return err; 5129 5130 err = mem_cgroup_resize_limit(memcg, max); 5131 if (err) 5132 return err; 5133 5134 memcg_wb_domain_size_changed(memcg); 5135 return nbytes; 5136 } 5137 5138 static int memory_events_show(struct seq_file *m, void *v) 5139 { 5140 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5141 5142 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5143 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5144 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5145 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5146 5147 return 0; 5148 } 5149 5150 static struct cftype memory_files[] = { 5151 { 5152 .name = "current", 5153 .flags = CFTYPE_NOT_ON_ROOT, 5154 .read_u64 = memory_current_read, 5155 }, 5156 { 5157 .name = "low", 5158 .flags = CFTYPE_NOT_ON_ROOT, 5159 .seq_show = memory_low_show, 5160 .write = memory_low_write, 5161 }, 5162 { 5163 .name = "high", 5164 .flags = CFTYPE_NOT_ON_ROOT, 5165 .seq_show = memory_high_show, 5166 .write = memory_high_write, 5167 }, 5168 { 5169 .name = "max", 5170 .flags = CFTYPE_NOT_ON_ROOT, 5171 .seq_show = memory_max_show, 5172 .write = memory_max_write, 5173 }, 5174 { 5175 .name = "events", 5176 .flags = CFTYPE_NOT_ON_ROOT, 5177 .file_offset = offsetof(struct mem_cgroup, events_file), 5178 .seq_show = memory_events_show, 5179 }, 5180 { } /* terminate */ 5181 }; 5182 5183 struct cgroup_subsys memory_cgrp_subsys = { 5184 .css_alloc = mem_cgroup_css_alloc, 5185 .css_online = mem_cgroup_css_online, 5186 .css_offline = mem_cgroup_css_offline, 5187 .css_free = mem_cgroup_css_free, 5188 .css_reset = mem_cgroup_css_reset, 5189 .can_attach = mem_cgroup_can_attach, 5190 .cancel_attach = mem_cgroup_cancel_attach, 5191 .attach = mem_cgroup_move_task, 5192 .bind = mem_cgroup_bind, 5193 .dfl_cftypes = memory_files, 5194 .legacy_cftypes = mem_cgroup_legacy_files, 5195 .early_init = 0, 5196 }; 5197 5198 /** 5199 * mem_cgroup_low - check if memory consumption is below the normal range 5200 * @root: the highest ancestor to consider 5201 * @memcg: the memory cgroup to check 5202 * 5203 * Returns %true if memory consumption of @memcg, and that of all 5204 * configurable ancestors up to @root, is below the normal range. 5205 */ 5206 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5207 { 5208 if (mem_cgroup_disabled()) 5209 return false; 5210 5211 /* 5212 * The toplevel group doesn't have a configurable range, so 5213 * it's never low when looked at directly, and it is not 5214 * considered an ancestor when assessing the hierarchy. 5215 */ 5216 5217 if (memcg == root_mem_cgroup) 5218 return false; 5219 5220 if (page_counter_read(&memcg->memory) >= memcg->low) 5221 return false; 5222 5223 while (memcg != root) { 5224 memcg = parent_mem_cgroup(memcg); 5225 5226 if (memcg == root_mem_cgroup) 5227 break; 5228 5229 if (page_counter_read(&memcg->memory) >= memcg->low) 5230 return false; 5231 } 5232 return true; 5233 } 5234 5235 /** 5236 * mem_cgroup_try_charge - try charging a page 5237 * @page: page to charge 5238 * @mm: mm context of the victim 5239 * @gfp_mask: reclaim mode 5240 * @memcgp: charged memcg return 5241 * 5242 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5243 * pages according to @gfp_mask if necessary. 5244 * 5245 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5246 * Otherwise, an error code is returned. 5247 * 5248 * After page->mapping has been set up, the caller must finalize the 5249 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5250 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5251 */ 5252 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5253 gfp_t gfp_mask, struct mem_cgroup **memcgp) 5254 { 5255 struct mem_cgroup *memcg = NULL; 5256 unsigned int nr_pages = 1; 5257 int ret = 0; 5258 5259 if (mem_cgroup_disabled()) 5260 goto out; 5261 5262 if (PageSwapCache(page)) { 5263 /* 5264 * Every swap fault against a single page tries to charge the 5265 * page, bail as early as possible. shmem_unuse() encounters 5266 * already charged pages, too. The USED bit is protected by 5267 * the page lock, which serializes swap cache removal, which 5268 * in turn serializes uncharging. 5269 */ 5270 VM_BUG_ON_PAGE(!PageLocked(page), page); 5271 if (page->mem_cgroup) 5272 goto out; 5273 5274 if (do_swap_account) { 5275 swp_entry_t ent = { .val = page_private(page), }; 5276 unsigned short id = lookup_swap_cgroup_id(ent); 5277 5278 rcu_read_lock(); 5279 memcg = mem_cgroup_from_id(id); 5280 if (memcg && !css_tryget_online(&memcg->css)) 5281 memcg = NULL; 5282 rcu_read_unlock(); 5283 } 5284 } 5285 5286 if (PageTransHuge(page)) { 5287 nr_pages <<= compound_order(page); 5288 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5289 } 5290 5291 if (!memcg) 5292 memcg = get_mem_cgroup_from_mm(mm); 5293 5294 ret = try_charge(memcg, gfp_mask, nr_pages); 5295 5296 css_put(&memcg->css); 5297 out: 5298 *memcgp = memcg; 5299 return ret; 5300 } 5301 5302 /** 5303 * mem_cgroup_commit_charge - commit a page charge 5304 * @page: page to charge 5305 * @memcg: memcg to charge the page to 5306 * @lrucare: page might be on LRU already 5307 * 5308 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5309 * after page->mapping has been set up. This must happen atomically 5310 * as part of the page instantiation, i.e. under the page table lock 5311 * for anonymous pages, under the page lock for page and swap cache. 5312 * 5313 * In addition, the page must not be on the LRU during the commit, to 5314 * prevent racing with task migration. If it might be, use @lrucare. 5315 * 5316 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5317 */ 5318 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5319 bool lrucare) 5320 { 5321 unsigned int nr_pages = 1; 5322 5323 VM_BUG_ON_PAGE(!page->mapping, page); 5324 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5325 5326 if (mem_cgroup_disabled()) 5327 return; 5328 /* 5329 * Swap faults will attempt to charge the same page multiple 5330 * times. But reuse_swap_page() might have removed the page 5331 * from swapcache already, so we can't check PageSwapCache(). 5332 */ 5333 if (!memcg) 5334 return; 5335 5336 commit_charge(page, memcg, lrucare); 5337 5338 if (PageTransHuge(page)) { 5339 nr_pages <<= compound_order(page); 5340 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5341 } 5342 5343 local_irq_disable(); 5344 mem_cgroup_charge_statistics(memcg, page, nr_pages); 5345 memcg_check_events(memcg, page); 5346 local_irq_enable(); 5347 5348 if (do_swap_account && PageSwapCache(page)) { 5349 swp_entry_t entry = { .val = page_private(page) }; 5350 /* 5351 * The swap entry might not get freed for a long time, 5352 * let's not wait for it. The page already received a 5353 * memory+swap charge, drop the swap entry duplicate. 5354 */ 5355 mem_cgroup_uncharge_swap(entry); 5356 } 5357 } 5358 5359 /** 5360 * mem_cgroup_cancel_charge - cancel a page charge 5361 * @page: page to charge 5362 * @memcg: memcg to charge the page to 5363 * 5364 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5365 */ 5366 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 5367 { 5368 unsigned int nr_pages = 1; 5369 5370 if (mem_cgroup_disabled()) 5371 return; 5372 /* 5373 * Swap faults will attempt to charge the same page multiple 5374 * times. But reuse_swap_page() might have removed the page 5375 * from swapcache already, so we can't check PageSwapCache(). 5376 */ 5377 if (!memcg) 5378 return; 5379 5380 if (PageTransHuge(page)) { 5381 nr_pages <<= compound_order(page); 5382 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5383 } 5384 5385 cancel_charge(memcg, nr_pages); 5386 } 5387 5388 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5389 unsigned long nr_anon, unsigned long nr_file, 5390 unsigned long nr_huge, struct page *dummy_page) 5391 { 5392 unsigned long nr_pages = nr_anon + nr_file; 5393 unsigned long flags; 5394 5395 if (!mem_cgroup_is_root(memcg)) { 5396 page_counter_uncharge(&memcg->memory, nr_pages); 5397 if (do_swap_account) 5398 page_counter_uncharge(&memcg->memsw, nr_pages); 5399 memcg_oom_recover(memcg); 5400 } 5401 5402 local_irq_save(flags); 5403 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5404 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5405 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5406 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5407 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5408 memcg_check_events(memcg, dummy_page); 5409 local_irq_restore(flags); 5410 5411 if (!mem_cgroup_is_root(memcg)) 5412 css_put_many(&memcg->css, nr_pages); 5413 } 5414 5415 static void uncharge_list(struct list_head *page_list) 5416 { 5417 struct mem_cgroup *memcg = NULL; 5418 unsigned long nr_anon = 0; 5419 unsigned long nr_file = 0; 5420 unsigned long nr_huge = 0; 5421 unsigned long pgpgout = 0; 5422 struct list_head *next; 5423 struct page *page; 5424 5425 next = page_list->next; 5426 do { 5427 unsigned int nr_pages = 1; 5428 5429 page = list_entry(next, struct page, lru); 5430 next = page->lru.next; 5431 5432 VM_BUG_ON_PAGE(PageLRU(page), page); 5433 VM_BUG_ON_PAGE(page_count(page), page); 5434 5435 if (!page->mem_cgroup) 5436 continue; 5437 5438 /* 5439 * Nobody should be changing or seriously looking at 5440 * page->mem_cgroup at this point, we have fully 5441 * exclusive access to the page. 5442 */ 5443 5444 if (memcg != page->mem_cgroup) { 5445 if (memcg) { 5446 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5447 nr_huge, page); 5448 pgpgout = nr_anon = nr_file = nr_huge = 0; 5449 } 5450 memcg = page->mem_cgroup; 5451 } 5452 5453 if (PageTransHuge(page)) { 5454 nr_pages <<= compound_order(page); 5455 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5456 nr_huge += nr_pages; 5457 } 5458 5459 if (PageAnon(page)) 5460 nr_anon += nr_pages; 5461 else 5462 nr_file += nr_pages; 5463 5464 page->mem_cgroup = NULL; 5465 5466 pgpgout++; 5467 } while (next != page_list); 5468 5469 if (memcg) 5470 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5471 nr_huge, page); 5472 } 5473 5474 /** 5475 * mem_cgroup_uncharge - uncharge a page 5476 * @page: page to uncharge 5477 * 5478 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5479 * mem_cgroup_commit_charge(). 5480 */ 5481 void mem_cgroup_uncharge(struct page *page) 5482 { 5483 if (mem_cgroup_disabled()) 5484 return; 5485 5486 /* Don't touch page->lru of any random page, pre-check: */ 5487 if (!page->mem_cgroup) 5488 return; 5489 5490 INIT_LIST_HEAD(&page->lru); 5491 uncharge_list(&page->lru); 5492 } 5493 5494 /** 5495 * mem_cgroup_uncharge_list - uncharge a list of page 5496 * @page_list: list of pages to uncharge 5497 * 5498 * Uncharge a list of pages previously charged with 5499 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5500 */ 5501 void mem_cgroup_uncharge_list(struct list_head *page_list) 5502 { 5503 if (mem_cgroup_disabled()) 5504 return; 5505 5506 if (!list_empty(page_list)) 5507 uncharge_list(page_list); 5508 } 5509 5510 /** 5511 * mem_cgroup_replace_page - migrate a charge to another page 5512 * @oldpage: currently charged page 5513 * @newpage: page to transfer the charge to 5514 * @lrucare: either or both pages might be on the LRU already 5515 * 5516 * Migrate the charge from @oldpage to @newpage. 5517 * 5518 * Both pages must be locked, @newpage->mapping must be set up. 5519 */ 5520 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5521 { 5522 struct mem_cgroup *memcg; 5523 int isolated; 5524 5525 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5526 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5527 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5528 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5529 newpage); 5530 5531 if (mem_cgroup_disabled()) 5532 return; 5533 5534 /* Page cache replacement: new page already charged? */ 5535 if (newpage->mem_cgroup) 5536 return; 5537 5538 /* Swapcache readahead pages can get replaced before being charged */ 5539 memcg = oldpage->mem_cgroup; 5540 if (!memcg) 5541 return; 5542 5543 lock_page_lru(oldpage, &isolated); 5544 oldpage->mem_cgroup = NULL; 5545 unlock_page_lru(oldpage, isolated); 5546 5547 commit_charge(newpage, memcg, true); 5548 } 5549 5550 /* 5551 * subsys_initcall() for memory controller. 5552 * 5553 * Some parts like hotcpu_notifier() have to be initialized from this context 5554 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5555 * everything that doesn't depend on a specific mem_cgroup structure should 5556 * be initialized from here. 5557 */ 5558 static int __init mem_cgroup_init(void) 5559 { 5560 int cpu, node; 5561 5562 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5563 5564 for_each_possible_cpu(cpu) 5565 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5566 drain_local_stock); 5567 5568 for_each_node(node) { 5569 struct mem_cgroup_tree_per_node *rtpn; 5570 int zone; 5571 5572 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5573 node_online(node) ? node : NUMA_NO_NODE); 5574 5575 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5576 struct mem_cgroup_tree_per_zone *rtpz; 5577 5578 rtpz = &rtpn->rb_tree_per_zone[zone]; 5579 rtpz->rb_root = RB_ROOT; 5580 spin_lock_init(&rtpz->lock); 5581 } 5582 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5583 } 5584 5585 return 0; 5586 } 5587 subsys_initcall(mem_cgroup_init); 5588 5589 #ifdef CONFIG_MEMCG_SWAP 5590 /** 5591 * mem_cgroup_swapout - transfer a memsw charge to swap 5592 * @page: page whose memsw charge to transfer 5593 * @entry: swap entry to move the charge to 5594 * 5595 * Transfer the memsw charge of @page to @entry. 5596 */ 5597 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5598 { 5599 struct mem_cgroup *memcg; 5600 unsigned short oldid; 5601 5602 VM_BUG_ON_PAGE(PageLRU(page), page); 5603 VM_BUG_ON_PAGE(page_count(page), page); 5604 5605 if (!do_swap_account) 5606 return; 5607 5608 memcg = page->mem_cgroup; 5609 5610 /* Readahead page, never charged */ 5611 if (!memcg) 5612 return; 5613 5614 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5615 VM_BUG_ON_PAGE(oldid, page); 5616 mem_cgroup_swap_statistics(memcg, true); 5617 5618 page->mem_cgroup = NULL; 5619 5620 if (!mem_cgroup_is_root(memcg)) 5621 page_counter_uncharge(&memcg->memory, 1); 5622 5623 /* 5624 * Interrupts should be disabled here because the caller holds the 5625 * mapping->tree_lock lock which is taken with interrupts-off. It is 5626 * important here to have the interrupts disabled because it is the 5627 * only synchronisation we have for udpating the per-CPU variables. 5628 */ 5629 VM_BUG_ON(!irqs_disabled()); 5630 mem_cgroup_charge_statistics(memcg, page, -1); 5631 memcg_check_events(memcg, page); 5632 } 5633 5634 /** 5635 * mem_cgroup_uncharge_swap - uncharge a swap entry 5636 * @entry: swap entry to uncharge 5637 * 5638 * Drop the memsw charge associated with @entry. 5639 */ 5640 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5641 { 5642 struct mem_cgroup *memcg; 5643 unsigned short id; 5644 5645 if (!do_swap_account) 5646 return; 5647 5648 id = swap_cgroup_record(entry, 0); 5649 rcu_read_lock(); 5650 memcg = mem_cgroup_from_id(id); 5651 if (memcg) { 5652 if (!mem_cgroup_is_root(memcg)) 5653 page_counter_uncharge(&memcg->memsw, 1); 5654 mem_cgroup_swap_statistics(memcg, false); 5655 css_put(&memcg->css); 5656 } 5657 rcu_read_unlock(); 5658 } 5659 5660 /* for remember boot option*/ 5661 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5662 static int really_do_swap_account __initdata = 1; 5663 #else 5664 static int really_do_swap_account __initdata; 5665 #endif 5666 5667 static int __init enable_swap_account(char *s) 5668 { 5669 if (!strcmp(s, "1")) 5670 really_do_swap_account = 1; 5671 else if (!strcmp(s, "0")) 5672 really_do_swap_account = 0; 5673 return 1; 5674 } 5675 __setup("swapaccount=", enable_swap_account); 5676 5677 static struct cftype memsw_cgroup_files[] = { 5678 { 5679 .name = "memsw.usage_in_bytes", 5680 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 5681 .read_u64 = mem_cgroup_read_u64, 5682 }, 5683 { 5684 .name = "memsw.max_usage_in_bytes", 5685 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 5686 .write = mem_cgroup_reset, 5687 .read_u64 = mem_cgroup_read_u64, 5688 }, 5689 { 5690 .name = "memsw.limit_in_bytes", 5691 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 5692 .write = mem_cgroup_write, 5693 .read_u64 = mem_cgroup_read_u64, 5694 }, 5695 { 5696 .name = "memsw.failcnt", 5697 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 5698 .write = mem_cgroup_reset, 5699 .read_u64 = mem_cgroup_read_u64, 5700 }, 5701 { }, /* terminate */ 5702 }; 5703 5704 static int __init mem_cgroup_swap_init(void) 5705 { 5706 if (!mem_cgroup_disabled() && really_do_swap_account) { 5707 do_swap_account = 1; 5708 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 5709 memsw_cgroup_files)); 5710 } 5711 return 0; 5712 } 5713 subsys_initcall(mem_cgroup_swap_init); 5714 5715 #endif /* CONFIG_MEMCG_SWAP */ 5716