1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/hugetlb.h> 39 #include <linux/pagemap.h> 40 #include <linux/smp.h> 41 #include <linux/page-flags.h> 42 #include <linux/backing-dev.h> 43 #include <linux/bit_spinlock.h> 44 #include <linux/rcupdate.h> 45 #include <linux/limits.h> 46 #include <linux/export.h> 47 #include <linux/mutex.h> 48 #include <linux/rbtree.h> 49 #include <linux/slab.h> 50 #include <linux/swap.h> 51 #include <linux/swapops.h> 52 #include <linux/spinlock.h> 53 #include <linux/eventfd.h> 54 #include <linux/poll.h> 55 #include <linux/sort.h> 56 #include <linux/fs.h> 57 #include <linux/seq_file.h> 58 #include <linux/vmpressure.h> 59 #include <linux/mm_inline.h> 60 #include <linux/swap_cgroup.h> 61 #include <linux/cpu.h> 62 #include <linux/oom.h> 63 #include <linux/lockdep.h> 64 #include <linux/file.h> 65 #include <linux/tracehook.h> 66 #include "internal.h" 67 #include <net/sock.h> 68 #include <net/ip.h> 69 #include <net/tcp_memcontrol.h> 70 #include "slab.h" 71 72 #include <asm/uaccess.h> 73 74 #include <trace/events/vmscan.h> 75 76 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 77 EXPORT_SYMBOL(memory_cgrp_subsys); 78 79 #define MEM_CGROUP_RECLAIM_RETRIES 5 80 static struct mem_cgroup *root_mem_cgroup __read_mostly; 81 struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly; 82 83 /* Whether the swap controller is active */ 84 #ifdef CONFIG_MEMCG_SWAP 85 int do_swap_account __read_mostly; 86 #else 87 #define do_swap_account 0 88 #endif 89 90 static const char * const mem_cgroup_stat_names[] = { 91 "cache", 92 "rss", 93 "rss_huge", 94 "mapped_file", 95 "dirty", 96 "writeback", 97 "swap", 98 }; 99 100 static const char * const mem_cgroup_events_names[] = { 101 "pgpgin", 102 "pgpgout", 103 "pgfault", 104 "pgmajfault", 105 }; 106 107 static const char * const mem_cgroup_lru_names[] = { 108 "inactive_anon", 109 "active_anon", 110 "inactive_file", 111 "active_file", 112 "unevictable", 113 }; 114 115 #define THRESHOLDS_EVENTS_TARGET 128 116 #define SOFTLIMIT_EVENTS_TARGET 1024 117 #define NUMAINFO_EVENTS_TARGET 1024 118 119 /* 120 * Cgroups above their limits are maintained in a RB-Tree, independent of 121 * their hierarchy representation 122 */ 123 124 struct mem_cgroup_tree_per_zone { 125 struct rb_root rb_root; 126 spinlock_t lock; 127 }; 128 129 struct mem_cgroup_tree_per_node { 130 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 131 }; 132 133 struct mem_cgroup_tree { 134 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 135 }; 136 137 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 138 139 /* for OOM */ 140 struct mem_cgroup_eventfd_list { 141 struct list_head list; 142 struct eventfd_ctx *eventfd; 143 }; 144 145 /* 146 * cgroup_event represents events which userspace want to receive. 147 */ 148 struct mem_cgroup_event { 149 /* 150 * memcg which the event belongs to. 151 */ 152 struct mem_cgroup *memcg; 153 /* 154 * eventfd to signal userspace about the event. 155 */ 156 struct eventfd_ctx *eventfd; 157 /* 158 * Each of these stored in a list by the cgroup. 159 */ 160 struct list_head list; 161 /* 162 * register_event() callback will be used to add new userspace 163 * waiter for changes related to this event. Use eventfd_signal() 164 * on eventfd to send notification to userspace. 165 */ 166 int (*register_event)(struct mem_cgroup *memcg, 167 struct eventfd_ctx *eventfd, const char *args); 168 /* 169 * unregister_event() callback will be called when userspace closes 170 * the eventfd or on cgroup removing. This callback must be set, 171 * if you want provide notification functionality. 172 */ 173 void (*unregister_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd); 175 /* 176 * All fields below needed to unregister event when 177 * userspace closes eventfd. 178 */ 179 poll_table pt; 180 wait_queue_head_t *wqh; 181 wait_queue_t wait; 182 struct work_struct remove; 183 }; 184 185 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 187 188 /* Stuffs for move charges at task migration. */ 189 /* 190 * Types of charges to be moved. 191 */ 192 #define MOVE_ANON 0x1U 193 #define MOVE_FILE 0x2U 194 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 195 196 /* "mc" and its members are protected by cgroup_mutex */ 197 static struct move_charge_struct { 198 spinlock_t lock; /* for from, to */ 199 struct mem_cgroup *from; 200 struct mem_cgroup *to; 201 unsigned long flags; 202 unsigned long precharge; 203 unsigned long moved_charge; 204 unsigned long moved_swap; 205 struct task_struct *moving_task; /* a task moving charges */ 206 wait_queue_head_t waitq; /* a waitq for other context */ 207 } mc = { 208 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 209 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 210 }; 211 212 /* 213 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 214 * limit reclaim to prevent infinite loops, if they ever occur. 215 */ 216 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 217 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 218 219 enum charge_type { 220 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 221 MEM_CGROUP_CHARGE_TYPE_ANON, 222 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 223 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 224 NR_CHARGE_TYPE, 225 }; 226 227 /* for encoding cft->private value on file */ 228 enum res_type { 229 _MEM, 230 _MEMSWAP, 231 _OOM_TYPE, 232 _KMEM, 233 }; 234 235 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 236 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 237 #define MEMFILE_ATTR(val) ((val) & 0xffff) 238 /* Used for OOM nofiier */ 239 #define OOM_CONTROL (0) 240 241 /* 242 * The memcg_create_mutex will be held whenever a new cgroup is created. 243 * As a consequence, any change that needs to protect against new child cgroups 244 * appearing has to hold it as well. 245 */ 246 static DEFINE_MUTEX(memcg_create_mutex); 247 248 /* Some nice accessors for the vmpressure. */ 249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 250 { 251 if (!memcg) 252 memcg = root_mem_cgroup; 253 return &memcg->vmpressure; 254 } 255 256 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 257 { 258 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 259 } 260 261 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 262 { 263 return (memcg == root_mem_cgroup); 264 } 265 266 /* 267 * We restrict the id in the range of [1, 65535], so it can fit into 268 * an unsigned short. 269 */ 270 #define MEM_CGROUP_ID_MAX USHRT_MAX 271 272 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 273 { 274 return memcg->css.id; 275 } 276 277 /* 278 * A helper function to get mem_cgroup from ID. must be called under 279 * rcu_read_lock(). The caller is responsible for calling 280 * css_tryget_online() if the mem_cgroup is used for charging. (dropping 281 * refcnt from swap can be called against removed memcg.) 282 */ 283 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 284 { 285 struct cgroup_subsys_state *css; 286 287 css = css_from_id(id, &memory_cgrp_subsys); 288 return mem_cgroup_from_css(css); 289 } 290 291 /* Writing them here to avoid exposing memcg's inner layout */ 292 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 293 294 void sock_update_memcg(struct sock *sk) 295 { 296 if (mem_cgroup_sockets_enabled) { 297 struct mem_cgroup *memcg; 298 struct cg_proto *cg_proto; 299 300 BUG_ON(!sk->sk_prot->proto_cgroup); 301 302 /* Socket cloning can throw us here with sk_cgrp already 303 * filled. It won't however, necessarily happen from 304 * process context. So the test for root memcg given 305 * the current task's memcg won't help us in this case. 306 * 307 * Respecting the original socket's memcg is a better 308 * decision in this case. 309 */ 310 if (sk->sk_cgrp) { 311 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 312 css_get(&sk->sk_cgrp->memcg->css); 313 return; 314 } 315 316 rcu_read_lock(); 317 memcg = mem_cgroup_from_task(current); 318 cg_proto = sk->sk_prot->proto_cgroup(memcg); 319 if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) && 320 css_tryget_online(&memcg->css)) { 321 sk->sk_cgrp = cg_proto; 322 } 323 rcu_read_unlock(); 324 } 325 } 326 EXPORT_SYMBOL(sock_update_memcg); 327 328 void sock_release_memcg(struct sock *sk) 329 { 330 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 331 struct mem_cgroup *memcg; 332 WARN_ON(!sk->sk_cgrp->memcg); 333 memcg = sk->sk_cgrp->memcg; 334 css_put(&sk->sk_cgrp->memcg->css); 335 } 336 } 337 338 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 339 { 340 if (!memcg || mem_cgroup_is_root(memcg)) 341 return NULL; 342 343 return &memcg->tcp_mem; 344 } 345 EXPORT_SYMBOL(tcp_proto_cgroup); 346 347 #endif 348 349 #ifdef CONFIG_MEMCG_KMEM 350 /* 351 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 352 * The main reason for not using cgroup id for this: 353 * this works better in sparse environments, where we have a lot of memcgs, 354 * but only a few kmem-limited. Or also, if we have, for instance, 200 355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 356 * 200 entry array for that. 357 * 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It 359 * will double each time we have to increase it. 360 */ 361 static DEFINE_IDA(memcg_cache_ida); 362 int memcg_nr_cache_ids; 363 364 /* Protects memcg_nr_cache_ids */ 365 static DECLARE_RWSEM(memcg_cache_ids_sem); 366 367 void memcg_get_cache_ids(void) 368 { 369 down_read(&memcg_cache_ids_sem); 370 } 371 372 void memcg_put_cache_ids(void) 373 { 374 up_read(&memcg_cache_ids_sem); 375 } 376 377 /* 378 * MIN_SIZE is different than 1, because we would like to avoid going through 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited 380 * cgroups is a reasonable guess. In the future, it could be a parameter or 381 * tunable, but that is strictly not necessary. 382 * 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 384 * this constant directly from cgroup, but it is understandable that this is 385 * better kept as an internal representation in cgroup.c. In any case, the 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily 387 * increase ours as well if it increases. 388 */ 389 #define MEMCG_CACHES_MIN_SIZE 4 390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 391 392 /* 393 * A lot of the calls to the cache allocation functions are expected to be 394 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 395 * conditional to this static branch, we'll have to allow modules that does 396 * kmem_cache_alloc and the such to see this symbol as well 397 */ 398 struct static_key memcg_kmem_enabled_key; 399 EXPORT_SYMBOL(memcg_kmem_enabled_key); 400 401 #endif /* CONFIG_MEMCG_KMEM */ 402 403 static struct mem_cgroup_per_zone * 404 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) 405 { 406 int nid = zone_to_nid(zone); 407 int zid = zone_idx(zone); 408 409 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 410 } 411 412 /** 413 * mem_cgroup_css_from_page - css of the memcg associated with a page 414 * @page: page of interest 415 * 416 * If memcg is bound to the default hierarchy, css of the memcg associated 417 * with @page is returned. The returned css remains associated with @page 418 * until it is released. 419 * 420 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 421 * is returned. 422 * 423 * XXX: The above description of behavior on the default hierarchy isn't 424 * strictly true yet as replace_page_cache_page() can modify the 425 * association before @page is released even on the default hierarchy; 426 * however, the current and planned usages don't mix the the two functions 427 * and replace_page_cache_page() will soon be updated to make the invariant 428 * actually true. 429 */ 430 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 431 { 432 struct mem_cgroup *memcg; 433 434 rcu_read_lock(); 435 436 memcg = page->mem_cgroup; 437 438 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 439 memcg = root_mem_cgroup; 440 441 rcu_read_unlock(); 442 return &memcg->css; 443 } 444 445 /** 446 * page_cgroup_ino - return inode number of the memcg a page is charged to 447 * @page: the page 448 * 449 * Look up the closest online ancestor of the memory cgroup @page is charged to 450 * and return its inode number or 0 if @page is not charged to any cgroup. It 451 * is safe to call this function without holding a reference to @page. 452 * 453 * Note, this function is inherently racy, because there is nothing to prevent 454 * the cgroup inode from getting torn down and potentially reallocated a moment 455 * after page_cgroup_ino() returns, so it only should be used by callers that 456 * do not care (such as procfs interfaces). 457 */ 458 ino_t page_cgroup_ino(struct page *page) 459 { 460 struct mem_cgroup *memcg; 461 unsigned long ino = 0; 462 463 rcu_read_lock(); 464 memcg = READ_ONCE(page->mem_cgroup); 465 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 466 memcg = parent_mem_cgroup(memcg); 467 if (memcg) 468 ino = cgroup_ino(memcg->css.cgroup); 469 rcu_read_unlock(); 470 return ino; 471 } 472 473 static struct mem_cgroup_per_zone * 474 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) 475 { 476 int nid = page_to_nid(page); 477 int zid = page_zonenum(page); 478 479 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 480 } 481 482 static struct mem_cgroup_tree_per_zone * 483 soft_limit_tree_node_zone(int nid, int zid) 484 { 485 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 486 } 487 488 static struct mem_cgroup_tree_per_zone * 489 soft_limit_tree_from_page(struct page *page) 490 { 491 int nid = page_to_nid(page); 492 int zid = page_zonenum(page); 493 494 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 495 } 496 497 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, 498 struct mem_cgroup_tree_per_zone *mctz, 499 unsigned long new_usage_in_excess) 500 { 501 struct rb_node **p = &mctz->rb_root.rb_node; 502 struct rb_node *parent = NULL; 503 struct mem_cgroup_per_zone *mz_node; 504 505 if (mz->on_tree) 506 return; 507 508 mz->usage_in_excess = new_usage_in_excess; 509 if (!mz->usage_in_excess) 510 return; 511 while (*p) { 512 parent = *p; 513 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 514 tree_node); 515 if (mz->usage_in_excess < mz_node->usage_in_excess) 516 p = &(*p)->rb_left; 517 /* 518 * We can't avoid mem cgroups that are over their soft 519 * limit by the same amount 520 */ 521 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 522 p = &(*p)->rb_right; 523 } 524 rb_link_node(&mz->tree_node, parent, p); 525 rb_insert_color(&mz->tree_node, &mctz->rb_root); 526 mz->on_tree = true; 527 } 528 529 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 530 struct mem_cgroup_tree_per_zone *mctz) 531 { 532 if (!mz->on_tree) 533 return; 534 rb_erase(&mz->tree_node, &mctz->rb_root); 535 mz->on_tree = false; 536 } 537 538 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 539 struct mem_cgroup_tree_per_zone *mctz) 540 { 541 unsigned long flags; 542 543 spin_lock_irqsave(&mctz->lock, flags); 544 __mem_cgroup_remove_exceeded(mz, mctz); 545 spin_unlock_irqrestore(&mctz->lock, flags); 546 } 547 548 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 549 { 550 unsigned long nr_pages = page_counter_read(&memcg->memory); 551 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 552 unsigned long excess = 0; 553 554 if (nr_pages > soft_limit) 555 excess = nr_pages - soft_limit; 556 557 return excess; 558 } 559 560 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 561 { 562 unsigned long excess; 563 struct mem_cgroup_per_zone *mz; 564 struct mem_cgroup_tree_per_zone *mctz; 565 566 mctz = soft_limit_tree_from_page(page); 567 /* 568 * Necessary to update all ancestors when hierarchy is used. 569 * because their event counter is not touched. 570 */ 571 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 572 mz = mem_cgroup_page_zoneinfo(memcg, page); 573 excess = soft_limit_excess(memcg); 574 /* 575 * We have to update the tree if mz is on RB-tree or 576 * mem is over its softlimit. 577 */ 578 if (excess || mz->on_tree) { 579 unsigned long flags; 580 581 spin_lock_irqsave(&mctz->lock, flags); 582 /* if on-tree, remove it */ 583 if (mz->on_tree) 584 __mem_cgroup_remove_exceeded(mz, mctz); 585 /* 586 * Insert again. mz->usage_in_excess will be updated. 587 * If excess is 0, no tree ops. 588 */ 589 __mem_cgroup_insert_exceeded(mz, mctz, excess); 590 spin_unlock_irqrestore(&mctz->lock, flags); 591 } 592 } 593 } 594 595 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 596 { 597 struct mem_cgroup_tree_per_zone *mctz; 598 struct mem_cgroup_per_zone *mz; 599 int nid, zid; 600 601 for_each_node(nid) { 602 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 603 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 604 mctz = soft_limit_tree_node_zone(nid, zid); 605 mem_cgroup_remove_exceeded(mz, mctz); 606 } 607 } 608 } 609 610 static struct mem_cgroup_per_zone * 611 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 612 { 613 struct rb_node *rightmost = NULL; 614 struct mem_cgroup_per_zone *mz; 615 616 retry: 617 mz = NULL; 618 rightmost = rb_last(&mctz->rb_root); 619 if (!rightmost) 620 goto done; /* Nothing to reclaim from */ 621 622 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 623 /* 624 * Remove the node now but someone else can add it back, 625 * we will to add it back at the end of reclaim to its correct 626 * position in the tree. 627 */ 628 __mem_cgroup_remove_exceeded(mz, mctz); 629 if (!soft_limit_excess(mz->memcg) || 630 !css_tryget_online(&mz->memcg->css)) 631 goto retry; 632 done: 633 return mz; 634 } 635 636 static struct mem_cgroup_per_zone * 637 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 638 { 639 struct mem_cgroup_per_zone *mz; 640 641 spin_lock_irq(&mctz->lock); 642 mz = __mem_cgroup_largest_soft_limit_node(mctz); 643 spin_unlock_irq(&mctz->lock); 644 return mz; 645 } 646 647 /* 648 * Return page count for single (non recursive) @memcg. 649 * 650 * Implementation Note: reading percpu statistics for memcg. 651 * 652 * Both of vmstat[] and percpu_counter has threshold and do periodic 653 * synchronization to implement "quick" read. There are trade-off between 654 * reading cost and precision of value. Then, we may have a chance to implement 655 * a periodic synchronization of counter in memcg's counter. 656 * 657 * But this _read() function is used for user interface now. The user accounts 658 * memory usage by memory cgroup and he _always_ requires exact value because 659 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 660 * have to visit all online cpus and make sum. So, for now, unnecessary 661 * synchronization is not implemented. (just implemented for cpu hotplug) 662 * 663 * If there are kernel internal actions which can make use of some not-exact 664 * value, and reading all cpu value can be performance bottleneck in some 665 * common workload, threshold and synchronization as vmstat[] should be 666 * implemented. 667 */ 668 static unsigned long 669 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 670 { 671 long val = 0; 672 int cpu; 673 674 /* Per-cpu values can be negative, use a signed accumulator */ 675 for_each_possible_cpu(cpu) 676 val += per_cpu(memcg->stat->count[idx], cpu); 677 /* 678 * Summing races with updates, so val may be negative. Avoid exposing 679 * transient negative values. 680 */ 681 if (val < 0) 682 val = 0; 683 return val; 684 } 685 686 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 687 enum mem_cgroup_events_index idx) 688 { 689 unsigned long val = 0; 690 int cpu; 691 692 for_each_possible_cpu(cpu) 693 val += per_cpu(memcg->stat->events[idx], cpu); 694 return val; 695 } 696 697 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 698 struct page *page, 699 int nr_pages) 700 { 701 /* 702 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 703 * counted as CACHE even if it's on ANON LRU. 704 */ 705 if (PageAnon(page)) 706 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 707 nr_pages); 708 else 709 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 710 nr_pages); 711 712 if (PageTransHuge(page)) 713 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 714 nr_pages); 715 716 /* pagein of a big page is an event. So, ignore page size */ 717 if (nr_pages > 0) 718 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 719 else { 720 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 721 nr_pages = -nr_pages; /* for event */ 722 } 723 724 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 725 } 726 727 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 728 int nid, 729 unsigned int lru_mask) 730 { 731 unsigned long nr = 0; 732 int zid; 733 734 VM_BUG_ON((unsigned)nid >= nr_node_ids); 735 736 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 737 struct mem_cgroup_per_zone *mz; 738 enum lru_list lru; 739 740 for_each_lru(lru) { 741 if (!(BIT(lru) & lru_mask)) 742 continue; 743 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 744 nr += mz->lru_size[lru]; 745 } 746 } 747 return nr; 748 } 749 750 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 751 unsigned int lru_mask) 752 { 753 unsigned long nr = 0; 754 int nid; 755 756 for_each_node_state(nid, N_MEMORY) 757 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 758 return nr; 759 } 760 761 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 762 enum mem_cgroup_events_target target) 763 { 764 unsigned long val, next; 765 766 val = __this_cpu_read(memcg->stat->nr_page_events); 767 next = __this_cpu_read(memcg->stat->targets[target]); 768 /* from time_after() in jiffies.h */ 769 if ((long)next - (long)val < 0) { 770 switch (target) { 771 case MEM_CGROUP_TARGET_THRESH: 772 next = val + THRESHOLDS_EVENTS_TARGET; 773 break; 774 case MEM_CGROUP_TARGET_SOFTLIMIT: 775 next = val + SOFTLIMIT_EVENTS_TARGET; 776 break; 777 case MEM_CGROUP_TARGET_NUMAINFO: 778 next = val + NUMAINFO_EVENTS_TARGET; 779 break; 780 default: 781 break; 782 } 783 __this_cpu_write(memcg->stat->targets[target], next); 784 return true; 785 } 786 return false; 787 } 788 789 /* 790 * Check events in order. 791 * 792 */ 793 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 794 { 795 /* threshold event is triggered in finer grain than soft limit */ 796 if (unlikely(mem_cgroup_event_ratelimit(memcg, 797 MEM_CGROUP_TARGET_THRESH))) { 798 bool do_softlimit; 799 bool do_numainfo __maybe_unused; 800 801 do_softlimit = mem_cgroup_event_ratelimit(memcg, 802 MEM_CGROUP_TARGET_SOFTLIMIT); 803 #if MAX_NUMNODES > 1 804 do_numainfo = mem_cgroup_event_ratelimit(memcg, 805 MEM_CGROUP_TARGET_NUMAINFO); 806 #endif 807 mem_cgroup_threshold(memcg); 808 if (unlikely(do_softlimit)) 809 mem_cgroup_update_tree(memcg, page); 810 #if MAX_NUMNODES > 1 811 if (unlikely(do_numainfo)) 812 atomic_inc(&memcg->numainfo_events); 813 #endif 814 } 815 } 816 817 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 818 { 819 /* 820 * mm_update_next_owner() may clear mm->owner to NULL 821 * if it races with swapoff, page migration, etc. 822 * So this can be called with p == NULL. 823 */ 824 if (unlikely(!p)) 825 return NULL; 826 827 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 828 } 829 EXPORT_SYMBOL(mem_cgroup_from_task); 830 831 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 832 { 833 struct mem_cgroup *memcg = NULL; 834 835 rcu_read_lock(); 836 do { 837 /* 838 * Page cache insertions can happen withou an 839 * actual mm context, e.g. during disk probing 840 * on boot, loopback IO, acct() writes etc. 841 */ 842 if (unlikely(!mm)) 843 memcg = root_mem_cgroup; 844 else { 845 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 846 if (unlikely(!memcg)) 847 memcg = root_mem_cgroup; 848 } 849 } while (!css_tryget_online(&memcg->css)); 850 rcu_read_unlock(); 851 return memcg; 852 } 853 854 /** 855 * mem_cgroup_iter - iterate over memory cgroup hierarchy 856 * @root: hierarchy root 857 * @prev: previously returned memcg, NULL on first invocation 858 * @reclaim: cookie for shared reclaim walks, NULL for full walks 859 * 860 * Returns references to children of the hierarchy below @root, or 861 * @root itself, or %NULL after a full round-trip. 862 * 863 * Caller must pass the return value in @prev on subsequent 864 * invocations for reference counting, or use mem_cgroup_iter_break() 865 * to cancel a hierarchy walk before the round-trip is complete. 866 * 867 * Reclaimers can specify a zone and a priority level in @reclaim to 868 * divide up the memcgs in the hierarchy among all concurrent 869 * reclaimers operating on the same zone and priority. 870 */ 871 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 872 struct mem_cgroup *prev, 873 struct mem_cgroup_reclaim_cookie *reclaim) 874 { 875 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 876 struct cgroup_subsys_state *css = NULL; 877 struct mem_cgroup *memcg = NULL; 878 struct mem_cgroup *pos = NULL; 879 880 if (mem_cgroup_disabled()) 881 return NULL; 882 883 if (!root) 884 root = root_mem_cgroup; 885 886 if (prev && !reclaim) 887 pos = prev; 888 889 if (!root->use_hierarchy && root != root_mem_cgroup) { 890 if (prev) 891 goto out; 892 return root; 893 } 894 895 rcu_read_lock(); 896 897 if (reclaim) { 898 struct mem_cgroup_per_zone *mz; 899 900 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); 901 iter = &mz->iter[reclaim->priority]; 902 903 if (prev && reclaim->generation != iter->generation) 904 goto out_unlock; 905 906 while (1) { 907 pos = READ_ONCE(iter->position); 908 if (!pos || css_tryget(&pos->css)) 909 break; 910 /* 911 * css reference reached zero, so iter->position will 912 * be cleared by ->css_released. However, we should not 913 * rely on this happening soon, because ->css_released 914 * is called from a work queue, and by busy-waiting we 915 * might block it. So we clear iter->position right 916 * away. 917 */ 918 (void)cmpxchg(&iter->position, pos, NULL); 919 } 920 } 921 922 if (pos) 923 css = &pos->css; 924 925 for (;;) { 926 css = css_next_descendant_pre(css, &root->css); 927 if (!css) { 928 /* 929 * Reclaimers share the hierarchy walk, and a 930 * new one might jump in right at the end of 931 * the hierarchy - make sure they see at least 932 * one group and restart from the beginning. 933 */ 934 if (!prev) 935 continue; 936 break; 937 } 938 939 /* 940 * Verify the css and acquire a reference. The root 941 * is provided by the caller, so we know it's alive 942 * and kicking, and don't take an extra reference. 943 */ 944 memcg = mem_cgroup_from_css(css); 945 946 if (css == &root->css) 947 break; 948 949 if (css_tryget(css)) { 950 /* 951 * Make sure the memcg is initialized: 952 * mem_cgroup_css_online() orders the the 953 * initialization against setting the flag. 954 */ 955 if (smp_load_acquire(&memcg->initialized)) 956 break; 957 958 css_put(css); 959 } 960 961 memcg = NULL; 962 } 963 964 if (reclaim) { 965 /* 966 * The position could have already been updated by a competing 967 * thread, so check that the value hasn't changed since we read 968 * it to avoid reclaiming from the same cgroup twice. 969 */ 970 (void)cmpxchg(&iter->position, pos, memcg); 971 972 if (pos) 973 css_put(&pos->css); 974 975 if (!memcg) 976 iter->generation++; 977 else if (!prev) 978 reclaim->generation = iter->generation; 979 } 980 981 out_unlock: 982 rcu_read_unlock(); 983 out: 984 if (prev && prev != root) 985 css_put(&prev->css); 986 987 return memcg; 988 } 989 990 /** 991 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 992 * @root: hierarchy root 993 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 994 */ 995 void mem_cgroup_iter_break(struct mem_cgroup *root, 996 struct mem_cgroup *prev) 997 { 998 if (!root) 999 root = root_mem_cgroup; 1000 if (prev && prev != root) 1001 css_put(&prev->css); 1002 } 1003 1004 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1005 { 1006 struct mem_cgroup *memcg = dead_memcg; 1007 struct mem_cgroup_reclaim_iter *iter; 1008 struct mem_cgroup_per_zone *mz; 1009 int nid, zid; 1010 int i; 1011 1012 while ((memcg = parent_mem_cgroup(memcg))) { 1013 for_each_node(nid) { 1014 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1015 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 1016 for (i = 0; i <= DEF_PRIORITY; i++) { 1017 iter = &mz->iter[i]; 1018 cmpxchg(&iter->position, 1019 dead_memcg, NULL); 1020 } 1021 } 1022 } 1023 } 1024 } 1025 1026 /* 1027 * Iteration constructs for visiting all cgroups (under a tree). If 1028 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1029 * be used for reference counting. 1030 */ 1031 #define for_each_mem_cgroup_tree(iter, root) \ 1032 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1033 iter != NULL; \ 1034 iter = mem_cgroup_iter(root, iter, NULL)) 1035 1036 #define for_each_mem_cgroup(iter) \ 1037 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1038 iter != NULL; \ 1039 iter = mem_cgroup_iter(NULL, iter, NULL)) 1040 1041 /** 1042 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1043 * @zone: zone of the wanted lruvec 1044 * @memcg: memcg of the wanted lruvec 1045 * 1046 * Returns the lru list vector holding pages for the given @zone and 1047 * @mem. This can be the global zone lruvec, if the memory controller 1048 * is disabled. 1049 */ 1050 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1051 struct mem_cgroup *memcg) 1052 { 1053 struct mem_cgroup_per_zone *mz; 1054 struct lruvec *lruvec; 1055 1056 if (mem_cgroup_disabled()) { 1057 lruvec = &zone->lruvec; 1058 goto out; 1059 } 1060 1061 mz = mem_cgroup_zone_zoneinfo(memcg, zone); 1062 lruvec = &mz->lruvec; 1063 out: 1064 /* 1065 * Since a node can be onlined after the mem_cgroup was created, 1066 * we have to be prepared to initialize lruvec->zone here; 1067 * and if offlined then reonlined, we need to reinitialize it. 1068 */ 1069 if (unlikely(lruvec->zone != zone)) 1070 lruvec->zone = zone; 1071 return lruvec; 1072 } 1073 1074 /** 1075 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1076 * @page: the page 1077 * @zone: zone of the page 1078 * 1079 * This function is only safe when following the LRU page isolation 1080 * and putback protocol: the LRU lock must be held, and the page must 1081 * either be PageLRU() or the caller must have isolated/allocated it. 1082 */ 1083 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1084 { 1085 struct mem_cgroup_per_zone *mz; 1086 struct mem_cgroup *memcg; 1087 struct lruvec *lruvec; 1088 1089 if (mem_cgroup_disabled()) { 1090 lruvec = &zone->lruvec; 1091 goto out; 1092 } 1093 1094 memcg = page->mem_cgroup; 1095 /* 1096 * Swapcache readahead pages are added to the LRU - and 1097 * possibly migrated - before they are charged. 1098 */ 1099 if (!memcg) 1100 memcg = root_mem_cgroup; 1101 1102 mz = mem_cgroup_page_zoneinfo(memcg, page); 1103 lruvec = &mz->lruvec; 1104 out: 1105 /* 1106 * Since a node can be onlined after the mem_cgroup was created, 1107 * we have to be prepared to initialize lruvec->zone here; 1108 * and if offlined then reonlined, we need to reinitialize it. 1109 */ 1110 if (unlikely(lruvec->zone != zone)) 1111 lruvec->zone = zone; 1112 return lruvec; 1113 } 1114 1115 /** 1116 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1117 * @lruvec: mem_cgroup per zone lru vector 1118 * @lru: index of lru list the page is sitting on 1119 * @nr_pages: positive when adding or negative when removing 1120 * 1121 * This function must be called when a page is added to or removed from an 1122 * lru list. 1123 */ 1124 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1125 int nr_pages) 1126 { 1127 struct mem_cgroup_per_zone *mz; 1128 unsigned long *lru_size; 1129 1130 if (mem_cgroup_disabled()) 1131 return; 1132 1133 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1134 lru_size = mz->lru_size + lru; 1135 *lru_size += nr_pages; 1136 VM_BUG_ON((long)(*lru_size) < 0); 1137 } 1138 1139 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1140 { 1141 struct mem_cgroup *task_memcg; 1142 struct task_struct *p; 1143 bool ret; 1144 1145 p = find_lock_task_mm(task); 1146 if (p) { 1147 task_memcg = get_mem_cgroup_from_mm(p->mm); 1148 task_unlock(p); 1149 } else { 1150 /* 1151 * All threads may have already detached their mm's, but the oom 1152 * killer still needs to detect if they have already been oom 1153 * killed to prevent needlessly killing additional tasks. 1154 */ 1155 rcu_read_lock(); 1156 task_memcg = mem_cgroup_from_task(task); 1157 css_get(&task_memcg->css); 1158 rcu_read_unlock(); 1159 } 1160 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1161 css_put(&task_memcg->css); 1162 return ret; 1163 } 1164 1165 #define mem_cgroup_from_counter(counter, member) \ 1166 container_of(counter, struct mem_cgroup, member) 1167 1168 /** 1169 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1170 * @memcg: the memory cgroup 1171 * 1172 * Returns the maximum amount of memory @mem can be charged with, in 1173 * pages. 1174 */ 1175 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1176 { 1177 unsigned long margin = 0; 1178 unsigned long count; 1179 unsigned long limit; 1180 1181 count = page_counter_read(&memcg->memory); 1182 limit = READ_ONCE(memcg->memory.limit); 1183 if (count < limit) 1184 margin = limit - count; 1185 1186 if (do_swap_account) { 1187 count = page_counter_read(&memcg->memsw); 1188 limit = READ_ONCE(memcg->memsw.limit); 1189 if (count <= limit) 1190 margin = min(margin, limit - count); 1191 } 1192 1193 return margin; 1194 } 1195 1196 /* 1197 * A routine for checking "mem" is under move_account() or not. 1198 * 1199 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1200 * moving cgroups. This is for waiting at high-memory pressure 1201 * caused by "move". 1202 */ 1203 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1204 { 1205 struct mem_cgroup *from; 1206 struct mem_cgroup *to; 1207 bool ret = false; 1208 /* 1209 * Unlike task_move routines, we access mc.to, mc.from not under 1210 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1211 */ 1212 spin_lock(&mc.lock); 1213 from = mc.from; 1214 to = mc.to; 1215 if (!from) 1216 goto unlock; 1217 1218 ret = mem_cgroup_is_descendant(from, memcg) || 1219 mem_cgroup_is_descendant(to, memcg); 1220 unlock: 1221 spin_unlock(&mc.lock); 1222 return ret; 1223 } 1224 1225 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1226 { 1227 if (mc.moving_task && current != mc.moving_task) { 1228 if (mem_cgroup_under_move(memcg)) { 1229 DEFINE_WAIT(wait); 1230 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1231 /* moving charge context might have finished. */ 1232 if (mc.moving_task) 1233 schedule(); 1234 finish_wait(&mc.waitq, &wait); 1235 return true; 1236 } 1237 } 1238 return false; 1239 } 1240 1241 #define K(x) ((x) << (PAGE_SHIFT-10)) 1242 /** 1243 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1244 * @memcg: The memory cgroup that went over limit 1245 * @p: Task that is going to be killed 1246 * 1247 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1248 * enabled 1249 */ 1250 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1251 { 1252 /* oom_info_lock ensures that parallel ooms do not interleave */ 1253 static DEFINE_MUTEX(oom_info_lock); 1254 struct mem_cgroup *iter; 1255 unsigned int i; 1256 1257 mutex_lock(&oom_info_lock); 1258 rcu_read_lock(); 1259 1260 if (p) { 1261 pr_info("Task in "); 1262 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1263 pr_cont(" killed as a result of limit of "); 1264 } else { 1265 pr_info("Memory limit reached of cgroup "); 1266 } 1267 1268 pr_cont_cgroup_path(memcg->css.cgroup); 1269 pr_cont("\n"); 1270 1271 rcu_read_unlock(); 1272 1273 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1274 K((u64)page_counter_read(&memcg->memory)), 1275 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1276 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1277 K((u64)page_counter_read(&memcg->memsw)), 1278 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1279 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1280 K((u64)page_counter_read(&memcg->kmem)), 1281 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1282 1283 for_each_mem_cgroup_tree(iter, memcg) { 1284 pr_info("Memory cgroup stats for "); 1285 pr_cont_cgroup_path(iter->css.cgroup); 1286 pr_cont(":"); 1287 1288 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1289 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1290 continue; 1291 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1292 K(mem_cgroup_read_stat(iter, i))); 1293 } 1294 1295 for (i = 0; i < NR_LRU_LISTS; i++) 1296 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1297 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1298 1299 pr_cont("\n"); 1300 } 1301 mutex_unlock(&oom_info_lock); 1302 } 1303 1304 /* 1305 * This function returns the number of memcg under hierarchy tree. Returns 1306 * 1(self count) if no children. 1307 */ 1308 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1309 { 1310 int num = 0; 1311 struct mem_cgroup *iter; 1312 1313 for_each_mem_cgroup_tree(iter, memcg) 1314 num++; 1315 return num; 1316 } 1317 1318 /* 1319 * Return the memory (and swap, if configured) limit for a memcg. 1320 */ 1321 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1322 { 1323 unsigned long limit; 1324 1325 limit = memcg->memory.limit; 1326 if (mem_cgroup_swappiness(memcg)) { 1327 unsigned long memsw_limit; 1328 1329 memsw_limit = memcg->memsw.limit; 1330 limit = min(limit + total_swap_pages, memsw_limit); 1331 } 1332 return limit; 1333 } 1334 1335 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1336 int order) 1337 { 1338 struct oom_control oc = { 1339 .zonelist = NULL, 1340 .nodemask = NULL, 1341 .gfp_mask = gfp_mask, 1342 .order = order, 1343 }; 1344 struct mem_cgroup *iter; 1345 unsigned long chosen_points = 0; 1346 unsigned long totalpages; 1347 unsigned int points = 0; 1348 struct task_struct *chosen = NULL; 1349 1350 mutex_lock(&oom_lock); 1351 1352 /* 1353 * If current has a pending SIGKILL or is exiting, then automatically 1354 * select it. The goal is to allow it to allocate so that it may 1355 * quickly exit and free its memory. 1356 */ 1357 if (fatal_signal_pending(current) || task_will_free_mem(current)) { 1358 mark_oom_victim(current); 1359 goto unlock; 1360 } 1361 1362 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); 1363 totalpages = mem_cgroup_get_limit(memcg) ? : 1; 1364 for_each_mem_cgroup_tree(iter, memcg) { 1365 struct css_task_iter it; 1366 struct task_struct *task; 1367 1368 css_task_iter_start(&iter->css, &it); 1369 while ((task = css_task_iter_next(&it))) { 1370 switch (oom_scan_process_thread(&oc, task, totalpages)) { 1371 case OOM_SCAN_SELECT: 1372 if (chosen) 1373 put_task_struct(chosen); 1374 chosen = task; 1375 chosen_points = ULONG_MAX; 1376 get_task_struct(chosen); 1377 /* fall through */ 1378 case OOM_SCAN_CONTINUE: 1379 continue; 1380 case OOM_SCAN_ABORT: 1381 css_task_iter_end(&it); 1382 mem_cgroup_iter_break(memcg, iter); 1383 if (chosen) 1384 put_task_struct(chosen); 1385 goto unlock; 1386 case OOM_SCAN_OK: 1387 break; 1388 }; 1389 points = oom_badness(task, memcg, NULL, totalpages); 1390 if (!points || points < chosen_points) 1391 continue; 1392 /* Prefer thread group leaders for display purposes */ 1393 if (points == chosen_points && 1394 thread_group_leader(chosen)) 1395 continue; 1396 1397 if (chosen) 1398 put_task_struct(chosen); 1399 chosen = task; 1400 chosen_points = points; 1401 get_task_struct(chosen); 1402 } 1403 css_task_iter_end(&it); 1404 } 1405 1406 if (chosen) { 1407 points = chosen_points * 1000 / totalpages; 1408 oom_kill_process(&oc, chosen, points, totalpages, memcg, 1409 "Memory cgroup out of memory"); 1410 } 1411 unlock: 1412 mutex_unlock(&oom_lock); 1413 } 1414 1415 #if MAX_NUMNODES > 1 1416 1417 /** 1418 * test_mem_cgroup_node_reclaimable 1419 * @memcg: the target memcg 1420 * @nid: the node ID to be checked. 1421 * @noswap : specify true here if the user wants flle only information. 1422 * 1423 * This function returns whether the specified memcg contains any 1424 * reclaimable pages on a node. Returns true if there are any reclaimable 1425 * pages in the node. 1426 */ 1427 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1428 int nid, bool noswap) 1429 { 1430 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1431 return true; 1432 if (noswap || !total_swap_pages) 1433 return false; 1434 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1435 return true; 1436 return false; 1437 1438 } 1439 1440 /* 1441 * Always updating the nodemask is not very good - even if we have an empty 1442 * list or the wrong list here, we can start from some node and traverse all 1443 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1444 * 1445 */ 1446 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1447 { 1448 int nid; 1449 /* 1450 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1451 * pagein/pageout changes since the last update. 1452 */ 1453 if (!atomic_read(&memcg->numainfo_events)) 1454 return; 1455 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1456 return; 1457 1458 /* make a nodemask where this memcg uses memory from */ 1459 memcg->scan_nodes = node_states[N_MEMORY]; 1460 1461 for_each_node_mask(nid, node_states[N_MEMORY]) { 1462 1463 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1464 node_clear(nid, memcg->scan_nodes); 1465 } 1466 1467 atomic_set(&memcg->numainfo_events, 0); 1468 atomic_set(&memcg->numainfo_updating, 0); 1469 } 1470 1471 /* 1472 * Selecting a node where we start reclaim from. Because what we need is just 1473 * reducing usage counter, start from anywhere is O,K. Considering 1474 * memory reclaim from current node, there are pros. and cons. 1475 * 1476 * Freeing memory from current node means freeing memory from a node which 1477 * we'll use or we've used. So, it may make LRU bad. And if several threads 1478 * hit limits, it will see a contention on a node. But freeing from remote 1479 * node means more costs for memory reclaim because of memory latency. 1480 * 1481 * Now, we use round-robin. Better algorithm is welcomed. 1482 */ 1483 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1484 { 1485 int node; 1486 1487 mem_cgroup_may_update_nodemask(memcg); 1488 node = memcg->last_scanned_node; 1489 1490 node = next_node(node, memcg->scan_nodes); 1491 if (node == MAX_NUMNODES) 1492 node = first_node(memcg->scan_nodes); 1493 /* 1494 * We call this when we hit limit, not when pages are added to LRU. 1495 * No LRU may hold pages because all pages are UNEVICTABLE or 1496 * memcg is too small and all pages are not on LRU. In that case, 1497 * we use curret node. 1498 */ 1499 if (unlikely(node == MAX_NUMNODES)) 1500 node = numa_node_id(); 1501 1502 memcg->last_scanned_node = node; 1503 return node; 1504 } 1505 #else 1506 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1507 { 1508 return 0; 1509 } 1510 #endif 1511 1512 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1513 struct zone *zone, 1514 gfp_t gfp_mask, 1515 unsigned long *total_scanned) 1516 { 1517 struct mem_cgroup *victim = NULL; 1518 int total = 0; 1519 int loop = 0; 1520 unsigned long excess; 1521 unsigned long nr_scanned; 1522 struct mem_cgroup_reclaim_cookie reclaim = { 1523 .zone = zone, 1524 .priority = 0, 1525 }; 1526 1527 excess = soft_limit_excess(root_memcg); 1528 1529 while (1) { 1530 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1531 if (!victim) { 1532 loop++; 1533 if (loop >= 2) { 1534 /* 1535 * If we have not been able to reclaim 1536 * anything, it might because there are 1537 * no reclaimable pages under this hierarchy 1538 */ 1539 if (!total) 1540 break; 1541 /* 1542 * We want to do more targeted reclaim. 1543 * excess >> 2 is not to excessive so as to 1544 * reclaim too much, nor too less that we keep 1545 * coming back to reclaim from this cgroup 1546 */ 1547 if (total >= (excess >> 2) || 1548 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1549 break; 1550 } 1551 continue; 1552 } 1553 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1554 zone, &nr_scanned); 1555 *total_scanned += nr_scanned; 1556 if (!soft_limit_excess(root_memcg)) 1557 break; 1558 } 1559 mem_cgroup_iter_break(root_memcg, victim); 1560 return total; 1561 } 1562 1563 #ifdef CONFIG_LOCKDEP 1564 static struct lockdep_map memcg_oom_lock_dep_map = { 1565 .name = "memcg_oom_lock", 1566 }; 1567 #endif 1568 1569 static DEFINE_SPINLOCK(memcg_oom_lock); 1570 1571 /* 1572 * Check OOM-Killer is already running under our hierarchy. 1573 * If someone is running, return false. 1574 */ 1575 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1576 { 1577 struct mem_cgroup *iter, *failed = NULL; 1578 1579 spin_lock(&memcg_oom_lock); 1580 1581 for_each_mem_cgroup_tree(iter, memcg) { 1582 if (iter->oom_lock) { 1583 /* 1584 * this subtree of our hierarchy is already locked 1585 * so we cannot give a lock. 1586 */ 1587 failed = iter; 1588 mem_cgroup_iter_break(memcg, iter); 1589 break; 1590 } else 1591 iter->oom_lock = true; 1592 } 1593 1594 if (failed) { 1595 /* 1596 * OK, we failed to lock the whole subtree so we have 1597 * to clean up what we set up to the failing subtree 1598 */ 1599 for_each_mem_cgroup_tree(iter, memcg) { 1600 if (iter == failed) { 1601 mem_cgroup_iter_break(memcg, iter); 1602 break; 1603 } 1604 iter->oom_lock = false; 1605 } 1606 } else 1607 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1608 1609 spin_unlock(&memcg_oom_lock); 1610 1611 return !failed; 1612 } 1613 1614 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1615 { 1616 struct mem_cgroup *iter; 1617 1618 spin_lock(&memcg_oom_lock); 1619 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1620 for_each_mem_cgroup_tree(iter, memcg) 1621 iter->oom_lock = false; 1622 spin_unlock(&memcg_oom_lock); 1623 } 1624 1625 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1626 { 1627 struct mem_cgroup *iter; 1628 1629 spin_lock(&memcg_oom_lock); 1630 for_each_mem_cgroup_tree(iter, memcg) 1631 iter->under_oom++; 1632 spin_unlock(&memcg_oom_lock); 1633 } 1634 1635 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1636 { 1637 struct mem_cgroup *iter; 1638 1639 /* 1640 * When a new child is created while the hierarchy is under oom, 1641 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1642 */ 1643 spin_lock(&memcg_oom_lock); 1644 for_each_mem_cgroup_tree(iter, memcg) 1645 if (iter->under_oom > 0) 1646 iter->under_oom--; 1647 spin_unlock(&memcg_oom_lock); 1648 } 1649 1650 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1651 1652 struct oom_wait_info { 1653 struct mem_cgroup *memcg; 1654 wait_queue_t wait; 1655 }; 1656 1657 static int memcg_oom_wake_function(wait_queue_t *wait, 1658 unsigned mode, int sync, void *arg) 1659 { 1660 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1661 struct mem_cgroup *oom_wait_memcg; 1662 struct oom_wait_info *oom_wait_info; 1663 1664 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1665 oom_wait_memcg = oom_wait_info->memcg; 1666 1667 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1668 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1669 return 0; 1670 return autoremove_wake_function(wait, mode, sync, arg); 1671 } 1672 1673 static void memcg_oom_recover(struct mem_cgroup *memcg) 1674 { 1675 /* 1676 * For the following lockless ->under_oom test, the only required 1677 * guarantee is that it must see the state asserted by an OOM when 1678 * this function is called as a result of userland actions 1679 * triggered by the notification of the OOM. This is trivially 1680 * achieved by invoking mem_cgroup_mark_under_oom() before 1681 * triggering notification. 1682 */ 1683 if (memcg && memcg->under_oom) 1684 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1685 } 1686 1687 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1688 { 1689 if (!current->memcg_may_oom) 1690 return; 1691 /* 1692 * We are in the middle of the charge context here, so we 1693 * don't want to block when potentially sitting on a callstack 1694 * that holds all kinds of filesystem and mm locks. 1695 * 1696 * Also, the caller may handle a failed allocation gracefully 1697 * (like optional page cache readahead) and so an OOM killer 1698 * invocation might not even be necessary. 1699 * 1700 * That's why we don't do anything here except remember the 1701 * OOM context and then deal with it at the end of the page 1702 * fault when the stack is unwound, the locks are released, 1703 * and when we know whether the fault was overall successful. 1704 */ 1705 css_get(&memcg->css); 1706 current->memcg_in_oom = memcg; 1707 current->memcg_oom_gfp_mask = mask; 1708 current->memcg_oom_order = order; 1709 } 1710 1711 /** 1712 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1713 * @handle: actually kill/wait or just clean up the OOM state 1714 * 1715 * This has to be called at the end of a page fault if the memcg OOM 1716 * handler was enabled. 1717 * 1718 * Memcg supports userspace OOM handling where failed allocations must 1719 * sleep on a waitqueue until the userspace task resolves the 1720 * situation. Sleeping directly in the charge context with all kinds 1721 * of locks held is not a good idea, instead we remember an OOM state 1722 * in the task and mem_cgroup_oom_synchronize() has to be called at 1723 * the end of the page fault to complete the OOM handling. 1724 * 1725 * Returns %true if an ongoing memcg OOM situation was detected and 1726 * completed, %false otherwise. 1727 */ 1728 bool mem_cgroup_oom_synchronize(bool handle) 1729 { 1730 struct mem_cgroup *memcg = current->memcg_in_oom; 1731 struct oom_wait_info owait; 1732 bool locked; 1733 1734 /* OOM is global, do not handle */ 1735 if (!memcg) 1736 return false; 1737 1738 if (!handle || oom_killer_disabled) 1739 goto cleanup; 1740 1741 owait.memcg = memcg; 1742 owait.wait.flags = 0; 1743 owait.wait.func = memcg_oom_wake_function; 1744 owait.wait.private = current; 1745 INIT_LIST_HEAD(&owait.wait.task_list); 1746 1747 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1748 mem_cgroup_mark_under_oom(memcg); 1749 1750 locked = mem_cgroup_oom_trylock(memcg); 1751 1752 if (locked) 1753 mem_cgroup_oom_notify(memcg); 1754 1755 if (locked && !memcg->oom_kill_disable) { 1756 mem_cgroup_unmark_under_oom(memcg); 1757 finish_wait(&memcg_oom_waitq, &owait.wait); 1758 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1759 current->memcg_oom_order); 1760 } else { 1761 schedule(); 1762 mem_cgroup_unmark_under_oom(memcg); 1763 finish_wait(&memcg_oom_waitq, &owait.wait); 1764 } 1765 1766 if (locked) { 1767 mem_cgroup_oom_unlock(memcg); 1768 /* 1769 * There is no guarantee that an OOM-lock contender 1770 * sees the wakeups triggered by the OOM kill 1771 * uncharges. Wake any sleepers explicitely. 1772 */ 1773 memcg_oom_recover(memcg); 1774 } 1775 cleanup: 1776 current->memcg_in_oom = NULL; 1777 css_put(&memcg->css); 1778 return true; 1779 } 1780 1781 /** 1782 * mem_cgroup_begin_page_stat - begin a page state statistics transaction 1783 * @page: page that is going to change accounted state 1784 * 1785 * This function must mark the beginning of an accounted page state 1786 * change to prevent double accounting when the page is concurrently 1787 * being moved to another memcg: 1788 * 1789 * memcg = mem_cgroup_begin_page_stat(page); 1790 * if (TestClearPageState(page)) 1791 * mem_cgroup_update_page_stat(memcg, state, -1); 1792 * mem_cgroup_end_page_stat(memcg); 1793 */ 1794 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 1795 { 1796 struct mem_cgroup *memcg; 1797 unsigned long flags; 1798 1799 /* 1800 * The RCU lock is held throughout the transaction. The fast 1801 * path can get away without acquiring the memcg->move_lock 1802 * because page moving starts with an RCU grace period. 1803 * 1804 * The RCU lock also protects the memcg from being freed when 1805 * the page state that is going to change is the only thing 1806 * preventing the page from being uncharged. 1807 * E.g. end-writeback clearing PageWriteback(), which allows 1808 * migration to go ahead and uncharge the page before the 1809 * account transaction might be complete. 1810 */ 1811 rcu_read_lock(); 1812 1813 if (mem_cgroup_disabled()) 1814 return NULL; 1815 again: 1816 memcg = page->mem_cgroup; 1817 if (unlikely(!memcg)) 1818 return NULL; 1819 1820 if (atomic_read(&memcg->moving_account) <= 0) 1821 return memcg; 1822 1823 spin_lock_irqsave(&memcg->move_lock, flags); 1824 if (memcg != page->mem_cgroup) { 1825 spin_unlock_irqrestore(&memcg->move_lock, flags); 1826 goto again; 1827 } 1828 1829 /* 1830 * When charge migration first begins, we can have locked and 1831 * unlocked page stat updates happening concurrently. Track 1832 * the task who has the lock for mem_cgroup_end_page_stat(). 1833 */ 1834 memcg->move_lock_task = current; 1835 memcg->move_lock_flags = flags; 1836 1837 return memcg; 1838 } 1839 EXPORT_SYMBOL(mem_cgroup_begin_page_stat); 1840 1841 /** 1842 * mem_cgroup_end_page_stat - finish a page state statistics transaction 1843 * @memcg: the memcg that was accounted against 1844 */ 1845 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 1846 { 1847 if (memcg && memcg->move_lock_task == current) { 1848 unsigned long flags = memcg->move_lock_flags; 1849 1850 memcg->move_lock_task = NULL; 1851 memcg->move_lock_flags = 0; 1852 1853 spin_unlock_irqrestore(&memcg->move_lock, flags); 1854 } 1855 1856 rcu_read_unlock(); 1857 } 1858 EXPORT_SYMBOL(mem_cgroup_end_page_stat); 1859 1860 /* 1861 * size of first charge trial. "32" comes from vmscan.c's magic value. 1862 * TODO: maybe necessary to use big numbers in big irons. 1863 */ 1864 #define CHARGE_BATCH 32U 1865 struct memcg_stock_pcp { 1866 struct mem_cgroup *cached; /* this never be root cgroup */ 1867 unsigned int nr_pages; 1868 struct work_struct work; 1869 unsigned long flags; 1870 #define FLUSHING_CACHED_CHARGE 0 1871 }; 1872 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1873 static DEFINE_MUTEX(percpu_charge_mutex); 1874 1875 /** 1876 * consume_stock: Try to consume stocked charge on this cpu. 1877 * @memcg: memcg to consume from. 1878 * @nr_pages: how many pages to charge. 1879 * 1880 * The charges will only happen if @memcg matches the current cpu's memcg 1881 * stock, and at least @nr_pages are available in that stock. Failure to 1882 * service an allocation will refill the stock. 1883 * 1884 * returns true if successful, false otherwise. 1885 */ 1886 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1887 { 1888 struct memcg_stock_pcp *stock; 1889 bool ret = false; 1890 1891 if (nr_pages > CHARGE_BATCH) 1892 return ret; 1893 1894 stock = &get_cpu_var(memcg_stock); 1895 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1896 stock->nr_pages -= nr_pages; 1897 ret = true; 1898 } 1899 put_cpu_var(memcg_stock); 1900 return ret; 1901 } 1902 1903 /* 1904 * Returns stocks cached in percpu and reset cached information. 1905 */ 1906 static void drain_stock(struct memcg_stock_pcp *stock) 1907 { 1908 struct mem_cgroup *old = stock->cached; 1909 1910 if (stock->nr_pages) { 1911 page_counter_uncharge(&old->memory, stock->nr_pages); 1912 if (do_swap_account) 1913 page_counter_uncharge(&old->memsw, stock->nr_pages); 1914 css_put_many(&old->css, stock->nr_pages); 1915 stock->nr_pages = 0; 1916 } 1917 stock->cached = NULL; 1918 } 1919 1920 /* 1921 * This must be called under preempt disabled or must be called by 1922 * a thread which is pinned to local cpu. 1923 */ 1924 static void drain_local_stock(struct work_struct *dummy) 1925 { 1926 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1927 drain_stock(stock); 1928 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1929 } 1930 1931 /* 1932 * Cache charges(val) to local per_cpu area. 1933 * This will be consumed by consume_stock() function, later. 1934 */ 1935 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1936 { 1937 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1938 1939 if (stock->cached != memcg) { /* reset if necessary */ 1940 drain_stock(stock); 1941 stock->cached = memcg; 1942 } 1943 stock->nr_pages += nr_pages; 1944 put_cpu_var(memcg_stock); 1945 } 1946 1947 /* 1948 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1949 * of the hierarchy under it. 1950 */ 1951 static void drain_all_stock(struct mem_cgroup *root_memcg) 1952 { 1953 int cpu, curcpu; 1954 1955 /* If someone's already draining, avoid adding running more workers. */ 1956 if (!mutex_trylock(&percpu_charge_mutex)) 1957 return; 1958 /* Notify other cpus that system-wide "drain" is running */ 1959 get_online_cpus(); 1960 curcpu = get_cpu(); 1961 for_each_online_cpu(cpu) { 1962 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1963 struct mem_cgroup *memcg; 1964 1965 memcg = stock->cached; 1966 if (!memcg || !stock->nr_pages) 1967 continue; 1968 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1969 continue; 1970 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1971 if (cpu == curcpu) 1972 drain_local_stock(&stock->work); 1973 else 1974 schedule_work_on(cpu, &stock->work); 1975 } 1976 } 1977 put_cpu(); 1978 put_online_cpus(); 1979 mutex_unlock(&percpu_charge_mutex); 1980 } 1981 1982 static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1983 unsigned long action, 1984 void *hcpu) 1985 { 1986 int cpu = (unsigned long)hcpu; 1987 struct memcg_stock_pcp *stock; 1988 1989 if (action == CPU_ONLINE) 1990 return NOTIFY_OK; 1991 1992 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1993 return NOTIFY_OK; 1994 1995 stock = &per_cpu(memcg_stock, cpu); 1996 drain_stock(stock); 1997 return NOTIFY_OK; 1998 } 1999 2000 /* 2001 * Scheduled by try_charge() to be executed from the userland return path 2002 * and reclaims memory over the high limit. 2003 */ 2004 void mem_cgroup_handle_over_high(void) 2005 { 2006 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2007 struct mem_cgroup *memcg, *pos; 2008 2009 if (likely(!nr_pages)) 2010 return; 2011 2012 pos = memcg = get_mem_cgroup_from_mm(current->mm); 2013 2014 do { 2015 if (page_counter_read(&pos->memory) <= pos->high) 2016 continue; 2017 mem_cgroup_events(pos, MEMCG_HIGH, 1); 2018 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true); 2019 } while ((pos = parent_mem_cgroup(pos))); 2020 2021 css_put(&memcg->css); 2022 current->memcg_nr_pages_over_high = 0; 2023 } 2024 2025 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2026 unsigned int nr_pages) 2027 { 2028 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2029 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2030 struct mem_cgroup *mem_over_limit; 2031 struct page_counter *counter; 2032 unsigned long nr_reclaimed; 2033 bool may_swap = true; 2034 bool drained = false; 2035 2036 if (mem_cgroup_is_root(memcg)) 2037 return 0; 2038 retry: 2039 if (consume_stock(memcg, nr_pages)) 2040 return 0; 2041 2042 if (!do_swap_account || 2043 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2044 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2045 goto done_restock; 2046 if (do_swap_account) 2047 page_counter_uncharge(&memcg->memsw, batch); 2048 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2049 } else { 2050 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2051 may_swap = false; 2052 } 2053 2054 if (batch > nr_pages) { 2055 batch = nr_pages; 2056 goto retry; 2057 } 2058 2059 /* 2060 * Unlike in global OOM situations, memcg is not in a physical 2061 * memory shortage. Allow dying and OOM-killed tasks to 2062 * bypass the last charges so that they can exit quickly and 2063 * free their memory. 2064 */ 2065 if (unlikely(test_thread_flag(TIF_MEMDIE) || 2066 fatal_signal_pending(current) || 2067 current->flags & PF_EXITING)) 2068 goto force; 2069 2070 if (unlikely(task_in_memcg_oom(current))) 2071 goto nomem; 2072 2073 if (!gfpflags_allow_blocking(gfp_mask)) 2074 goto nomem; 2075 2076 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 2077 2078 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2079 gfp_mask, may_swap); 2080 2081 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2082 goto retry; 2083 2084 if (!drained) { 2085 drain_all_stock(mem_over_limit); 2086 drained = true; 2087 goto retry; 2088 } 2089 2090 if (gfp_mask & __GFP_NORETRY) 2091 goto nomem; 2092 /* 2093 * Even though the limit is exceeded at this point, reclaim 2094 * may have been able to free some pages. Retry the charge 2095 * before killing the task. 2096 * 2097 * Only for regular pages, though: huge pages are rather 2098 * unlikely to succeed so close to the limit, and we fall back 2099 * to regular pages anyway in case of failure. 2100 */ 2101 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2102 goto retry; 2103 /* 2104 * At task move, charge accounts can be doubly counted. So, it's 2105 * better to wait until the end of task_move if something is going on. 2106 */ 2107 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2108 goto retry; 2109 2110 if (nr_retries--) 2111 goto retry; 2112 2113 if (gfp_mask & __GFP_NOFAIL) 2114 goto force; 2115 2116 if (fatal_signal_pending(current)) 2117 goto force; 2118 2119 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 2120 2121 mem_cgroup_oom(mem_over_limit, gfp_mask, 2122 get_order(nr_pages * PAGE_SIZE)); 2123 nomem: 2124 if (!(gfp_mask & __GFP_NOFAIL)) 2125 return -ENOMEM; 2126 force: 2127 /* 2128 * The allocation either can't fail or will lead to more memory 2129 * being freed very soon. Allow memory usage go over the limit 2130 * temporarily by force charging it. 2131 */ 2132 page_counter_charge(&memcg->memory, nr_pages); 2133 if (do_swap_account) 2134 page_counter_charge(&memcg->memsw, nr_pages); 2135 css_get_many(&memcg->css, nr_pages); 2136 2137 return 0; 2138 2139 done_restock: 2140 css_get_many(&memcg->css, batch); 2141 if (batch > nr_pages) 2142 refill_stock(memcg, batch - nr_pages); 2143 2144 /* 2145 * If the hierarchy is above the normal consumption range, schedule 2146 * reclaim on returning to userland. We can perform reclaim here 2147 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2148 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2149 * not recorded as it most likely matches current's and won't 2150 * change in the meantime. As high limit is checked again before 2151 * reclaim, the cost of mismatch is negligible. 2152 */ 2153 do { 2154 if (page_counter_read(&memcg->memory) > memcg->high) { 2155 current->memcg_nr_pages_over_high += batch; 2156 set_notify_resume(current); 2157 break; 2158 } 2159 } while ((memcg = parent_mem_cgroup(memcg))); 2160 2161 return 0; 2162 } 2163 2164 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2165 { 2166 if (mem_cgroup_is_root(memcg)) 2167 return; 2168 2169 page_counter_uncharge(&memcg->memory, nr_pages); 2170 if (do_swap_account) 2171 page_counter_uncharge(&memcg->memsw, nr_pages); 2172 2173 css_put_many(&memcg->css, nr_pages); 2174 } 2175 2176 static void lock_page_lru(struct page *page, int *isolated) 2177 { 2178 struct zone *zone = page_zone(page); 2179 2180 spin_lock_irq(&zone->lru_lock); 2181 if (PageLRU(page)) { 2182 struct lruvec *lruvec; 2183 2184 lruvec = mem_cgroup_page_lruvec(page, zone); 2185 ClearPageLRU(page); 2186 del_page_from_lru_list(page, lruvec, page_lru(page)); 2187 *isolated = 1; 2188 } else 2189 *isolated = 0; 2190 } 2191 2192 static void unlock_page_lru(struct page *page, int isolated) 2193 { 2194 struct zone *zone = page_zone(page); 2195 2196 if (isolated) { 2197 struct lruvec *lruvec; 2198 2199 lruvec = mem_cgroup_page_lruvec(page, zone); 2200 VM_BUG_ON_PAGE(PageLRU(page), page); 2201 SetPageLRU(page); 2202 add_page_to_lru_list(page, lruvec, page_lru(page)); 2203 } 2204 spin_unlock_irq(&zone->lru_lock); 2205 } 2206 2207 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2208 bool lrucare) 2209 { 2210 int isolated; 2211 2212 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2213 2214 /* 2215 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2216 * may already be on some other mem_cgroup's LRU. Take care of it. 2217 */ 2218 if (lrucare) 2219 lock_page_lru(page, &isolated); 2220 2221 /* 2222 * Nobody should be changing or seriously looking at 2223 * page->mem_cgroup at this point: 2224 * 2225 * - the page is uncharged 2226 * 2227 * - the page is off-LRU 2228 * 2229 * - an anonymous fault has exclusive page access, except for 2230 * a locked page table 2231 * 2232 * - a page cache insertion, a swapin fault, or a migration 2233 * have the page locked 2234 */ 2235 page->mem_cgroup = memcg; 2236 2237 if (lrucare) 2238 unlock_page_lru(page, isolated); 2239 } 2240 2241 #ifdef CONFIG_MEMCG_KMEM 2242 static int memcg_alloc_cache_id(void) 2243 { 2244 int id, size; 2245 int err; 2246 2247 id = ida_simple_get(&memcg_cache_ida, 2248 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2249 if (id < 0) 2250 return id; 2251 2252 if (id < memcg_nr_cache_ids) 2253 return id; 2254 2255 /* 2256 * There's no space for the new id in memcg_caches arrays, 2257 * so we have to grow them. 2258 */ 2259 down_write(&memcg_cache_ids_sem); 2260 2261 size = 2 * (id + 1); 2262 if (size < MEMCG_CACHES_MIN_SIZE) 2263 size = MEMCG_CACHES_MIN_SIZE; 2264 else if (size > MEMCG_CACHES_MAX_SIZE) 2265 size = MEMCG_CACHES_MAX_SIZE; 2266 2267 err = memcg_update_all_caches(size); 2268 if (!err) 2269 err = memcg_update_all_list_lrus(size); 2270 if (!err) 2271 memcg_nr_cache_ids = size; 2272 2273 up_write(&memcg_cache_ids_sem); 2274 2275 if (err) { 2276 ida_simple_remove(&memcg_cache_ida, id); 2277 return err; 2278 } 2279 return id; 2280 } 2281 2282 static void memcg_free_cache_id(int id) 2283 { 2284 ida_simple_remove(&memcg_cache_ida, id); 2285 } 2286 2287 struct memcg_kmem_cache_create_work { 2288 struct mem_cgroup *memcg; 2289 struct kmem_cache *cachep; 2290 struct work_struct work; 2291 }; 2292 2293 static void memcg_kmem_cache_create_func(struct work_struct *w) 2294 { 2295 struct memcg_kmem_cache_create_work *cw = 2296 container_of(w, struct memcg_kmem_cache_create_work, work); 2297 struct mem_cgroup *memcg = cw->memcg; 2298 struct kmem_cache *cachep = cw->cachep; 2299 2300 memcg_create_kmem_cache(memcg, cachep); 2301 2302 css_put(&memcg->css); 2303 kfree(cw); 2304 } 2305 2306 /* 2307 * Enqueue the creation of a per-memcg kmem_cache. 2308 */ 2309 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2310 struct kmem_cache *cachep) 2311 { 2312 struct memcg_kmem_cache_create_work *cw; 2313 2314 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2315 if (!cw) 2316 return; 2317 2318 css_get(&memcg->css); 2319 2320 cw->memcg = memcg; 2321 cw->cachep = cachep; 2322 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2323 2324 schedule_work(&cw->work); 2325 } 2326 2327 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2328 struct kmem_cache *cachep) 2329 { 2330 /* 2331 * We need to stop accounting when we kmalloc, because if the 2332 * corresponding kmalloc cache is not yet created, the first allocation 2333 * in __memcg_schedule_kmem_cache_create will recurse. 2334 * 2335 * However, it is better to enclose the whole function. Depending on 2336 * the debugging options enabled, INIT_WORK(), for instance, can 2337 * trigger an allocation. This too, will make us recurse. Because at 2338 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2339 * the safest choice is to do it like this, wrapping the whole function. 2340 */ 2341 current->memcg_kmem_skip_account = 1; 2342 __memcg_schedule_kmem_cache_create(memcg, cachep); 2343 current->memcg_kmem_skip_account = 0; 2344 } 2345 2346 /* 2347 * Return the kmem_cache we're supposed to use for a slab allocation. 2348 * We try to use the current memcg's version of the cache. 2349 * 2350 * If the cache does not exist yet, if we are the first user of it, 2351 * we either create it immediately, if possible, or create it asynchronously 2352 * in a workqueue. 2353 * In the latter case, we will let the current allocation go through with 2354 * the original cache. 2355 * 2356 * Can't be called in interrupt context or from kernel threads. 2357 * This function needs to be called with rcu_read_lock() held. 2358 */ 2359 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) 2360 { 2361 struct mem_cgroup *memcg; 2362 struct kmem_cache *memcg_cachep; 2363 int kmemcg_id; 2364 2365 VM_BUG_ON(!is_root_cache(cachep)); 2366 2367 if (current->memcg_kmem_skip_account) 2368 return cachep; 2369 2370 memcg = get_mem_cgroup_from_mm(current->mm); 2371 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2372 if (kmemcg_id < 0) 2373 goto out; 2374 2375 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2376 if (likely(memcg_cachep)) 2377 return memcg_cachep; 2378 2379 /* 2380 * If we are in a safe context (can wait, and not in interrupt 2381 * context), we could be be predictable and return right away. 2382 * This would guarantee that the allocation being performed 2383 * already belongs in the new cache. 2384 * 2385 * However, there are some clashes that can arrive from locking. 2386 * For instance, because we acquire the slab_mutex while doing 2387 * memcg_create_kmem_cache, this means no further allocation 2388 * could happen with the slab_mutex held. So it's better to 2389 * defer everything. 2390 */ 2391 memcg_schedule_kmem_cache_create(memcg, cachep); 2392 out: 2393 css_put(&memcg->css); 2394 return cachep; 2395 } 2396 2397 void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2398 { 2399 if (!is_root_cache(cachep)) 2400 css_put(&cachep->memcg_params.memcg->css); 2401 } 2402 2403 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2404 struct mem_cgroup *memcg) 2405 { 2406 unsigned int nr_pages = 1 << order; 2407 struct page_counter *counter; 2408 int ret; 2409 2410 if (!memcg_kmem_is_active(memcg)) 2411 return 0; 2412 2413 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) 2414 return -ENOMEM; 2415 2416 ret = try_charge(memcg, gfp, nr_pages); 2417 if (ret) { 2418 page_counter_uncharge(&memcg->kmem, nr_pages); 2419 return ret; 2420 } 2421 2422 page->mem_cgroup = memcg; 2423 2424 return 0; 2425 } 2426 2427 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2428 { 2429 struct mem_cgroup *memcg; 2430 int ret; 2431 2432 memcg = get_mem_cgroup_from_mm(current->mm); 2433 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2434 css_put(&memcg->css); 2435 return ret; 2436 } 2437 2438 void __memcg_kmem_uncharge(struct page *page, int order) 2439 { 2440 struct mem_cgroup *memcg = page->mem_cgroup; 2441 unsigned int nr_pages = 1 << order; 2442 2443 if (!memcg) 2444 return; 2445 2446 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2447 2448 page_counter_uncharge(&memcg->kmem, nr_pages); 2449 page_counter_uncharge(&memcg->memory, nr_pages); 2450 if (do_swap_account) 2451 page_counter_uncharge(&memcg->memsw, nr_pages); 2452 2453 page->mem_cgroup = NULL; 2454 css_put_many(&memcg->css, nr_pages); 2455 } 2456 #endif /* CONFIG_MEMCG_KMEM */ 2457 2458 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2459 2460 /* 2461 * Because tail pages are not marked as "used", set it. We're under 2462 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2463 * charge/uncharge will be never happen and move_account() is done under 2464 * compound_lock(), so we don't have to take care of races. 2465 */ 2466 void mem_cgroup_split_huge_fixup(struct page *head) 2467 { 2468 int i; 2469 2470 if (mem_cgroup_disabled()) 2471 return; 2472 2473 for (i = 1; i < HPAGE_PMD_NR; i++) 2474 head[i].mem_cgroup = head->mem_cgroup; 2475 2476 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2477 HPAGE_PMD_NR); 2478 } 2479 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2480 2481 #ifdef CONFIG_MEMCG_SWAP 2482 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2483 bool charge) 2484 { 2485 int val = (charge) ? 1 : -1; 2486 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2487 } 2488 2489 /** 2490 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2491 * @entry: swap entry to be moved 2492 * @from: mem_cgroup which the entry is moved from 2493 * @to: mem_cgroup which the entry is moved to 2494 * 2495 * It succeeds only when the swap_cgroup's record for this entry is the same 2496 * as the mem_cgroup's id of @from. 2497 * 2498 * Returns 0 on success, -EINVAL on failure. 2499 * 2500 * The caller must have charged to @to, IOW, called page_counter_charge() about 2501 * both res and memsw, and called css_get(). 2502 */ 2503 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2504 struct mem_cgroup *from, struct mem_cgroup *to) 2505 { 2506 unsigned short old_id, new_id; 2507 2508 old_id = mem_cgroup_id(from); 2509 new_id = mem_cgroup_id(to); 2510 2511 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2512 mem_cgroup_swap_statistics(from, false); 2513 mem_cgroup_swap_statistics(to, true); 2514 return 0; 2515 } 2516 return -EINVAL; 2517 } 2518 #else 2519 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2520 struct mem_cgroup *from, struct mem_cgroup *to) 2521 { 2522 return -EINVAL; 2523 } 2524 #endif 2525 2526 static DEFINE_MUTEX(memcg_limit_mutex); 2527 2528 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2529 unsigned long limit) 2530 { 2531 unsigned long curusage; 2532 unsigned long oldusage; 2533 bool enlarge = false; 2534 int retry_count; 2535 int ret; 2536 2537 /* 2538 * For keeping hierarchical_reclaim simple, how long we should retry 2539 * is depends on callers. We set our retry-count to be function 2540 * of # of children which we should visit in this loop. 2541 */ 2542 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2543 mem_cgroup_count_children(memcg); 2544 2545 oldusage = page_counter_read(&memcg->memory); 2546 2547 do { 2548 if (signal_pending(current)) { 2549 ret = -EINTR; 2550 break; 2551 } 2552 2553 mutex_lock(&memcg_limit_mutex); 2554 if (limit > memcg->memsw.limit) { 2555 mutex_unlock(&memcg_limit_mutex); 2556 ret = -EINVAL; 2557 break; 2558 } 2559 if (limit > memcg->memory.limit) 2560 enlarge = true; 2561 ret = page_counter_limit(&memcg->memory, limit); 2562 mutex_unlock(&memcg_limit_mutex); 2563 2564 if (!ret) 2565 break; 2566 2567 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2568 2569 curusage = page_counter_read(&memcg->memory); 2570 /* Usage is reduced ? */ 2571 if (curusage >= oldusage) 2572 retry_count--; 2573 else 2574 oldusage = curusage; 2575 } while (retry_count); 2576 2577 if (!ret && enlarge) 2578 memcg_oom_recover(memcg); 2579 2580 return ret; 2581 } 2582 2583 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2584 unsigned long limit) 2585 { 2586 unsigned long curusage; 2587 unsigned long oldusage; 2588 bool enlarge = false; 2589 int retry_count; 2590 int ret; 2591 2592 /* see mem_cgroup_resize_res_limit */ 2593 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2594 mem_cgroup_count_children(memcg); 2595 2596 oldusage = page_counter_read(&memcg->memsw); 2597 2598 do { 2599 if (signal_pending(current)) { 2600 ret = -EINTR; 2601 break; 2602 } 2603 2604 mutex_lock(&memcg_limit_mutex); 2605 if (limit < memcg->memory.limit) { 2606 mutex_unlock(&memcg_limit_mutex); 2607 ret = -EINVAL; 2608 break; 2609 } 2610 if (limit > memcg->memsw.limit) 2611 enlarge = true; 2612 ret = page_counter_limit(&memcg->memsw, limit); 2613 mutex_unlock(&memcg_limit_mutex); 2614 2615 if (!ret) 2616 break; 2617 2618 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2619 2620 curusage = page_counter_read(&memcg->memsw); 2621 /* Usage is reduced ? */ 2622 if (curusage >= oldusage) 2623 retry_count--; 2624 else 2625 oldusage = curusage; 2626 } while (retry_count); 2627 2628 if (!ret && enlarge) 2629 memcg_oom_recover(memcg); 2630 2631 return ret; 2632 } 2633 2634 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 2635 gfp_t gfp_mask, 2636 unsigned long *total_scanned) 2637 { 2638 unsigned long nr_reclaimed = 0; 2639 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 2640 unsigned long reclaimed; 2641 int loop = 0; 2642 struct mem_cgroup_tree_per_zone *mctz; 2643 unsigned long excess; 2644 unsigned long nr_scanned; 2645 2646 if (order > 0) 2647 return 0; 2648 2649 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 2650 /* 2651 * This loop can run a while, specially if mem_cgroup's continuously 2652 * keep exceeding their soft limit and putting the system under 2653 * pressure 2654 */ 2655 do { 2656 if (next_mz) 2657 mz = next_mz; 2658 else 2659 mz = mem_cgroup_largest_soft_limit_node(mctz); 2660 if (!mz) 2661 break; 2662 2663 nr_scanned = 0; 2664 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 2665 gfp_mask, &nr_scanned); 2666 nr_reclaimed += reclaimed; 2667 *total_scanned += nr_scanned; 2668 spin_lock_irq(&mctz->lock); 2669 __mem_cgroup_remove_exceeded(mz, mctz); 2670 2671 /* 2672 * If we failed to reclaim anything from this memory cgroup 2673 * it is time to move on to the next cgroup 2674 */ 2675 next_mz = NULL; 2676 if (!reclaimed) 2677 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2678 2679 excess = soft_limit_excess(mz->memcg); 2680 /* 2681 * One school of thought says that we should not add 2682 * back the node to the tree if reclaim returns 0. 2683 * But our reclaim could return 0, simply because due 2684 * to priority we are exposing a smaller subset of 2685 * memory to reclaim from. Consider this as a longer 2686 * term TODO. 2687 */ 2688 /* If excess == 0, no tree ops */ 2689 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2690 spin_unlock_irq(&mctz->lock); 2691 css_put(&mz->memcg->css); 2692 loop++; 2693 /* 2694 * Could not reclaim anything and there are no more 2695 * mem cgroups to try or we seem to be looping without 2696 * reclaiming anything. 2697 */ 2698 if (!nr_reclaimed && 2699 (next_mz == NULL || 2700 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2701 break; 2702 } while (!nr_reclaimed); 2703 if (next_mz) 2704 css_put(&next_mz->memcg->css); 2705 return nr_reclaimed; 2706 } 2707 2708 /* 2709 * Test whether @memcg has children, dead or alive. Note that this 2710 * function doesn't care whether @memcg has use_hierarchy enabled and 2711 * returns %true if there are child csses according to the cgroup 2712 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2713 */ 2714 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2715 { 2716 bool ret; 2717 2718 /* 2719 * The lock does not prevent addition or deletion of children, but 2720 * it prevents a new child from being initialized based on this 2721 * parent in css_online(), so it's enough to decide whether 2722 * hierarchically inherited attributes can still be changed or not. 2723 */ 2724 lockdep_assert_held(&memcg_create_mutex); 2725 2726 rcu_read_lock(); 2727 ret = css_next_child(NULL, &memcg->css); 2728 rcu_read_unlock(); 2729 return ret; 2730 } 2731 2732 /* 2733 * Reclaims as many pages from the given memcg as possible and moves 2734 * the rest to the parent. 2735 * 2736 * Caller is responsible for holding css reference for memcg. 2737 */ 2738 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2739 { 2740 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2741 2742 /* we call try-to-free pages for make this cgroup empty */ 2743 lru_add_drain_all(); 2744 /* try to free all pages in this cgroup */ 2745 while (nr_retries && page_counter_read(&memcg->memory)) { 2746 int progress; 2747 2748 if (signal_pending(current)) 2749 return -EINTR; 2750 2751 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2752 GFP_KERNEL, true); 2753 if (!progress) { 2754 nr_retries--; 2755 /* maybe some writeback is necessary */ 2756 congestion_wait(BLK_RW_ASYNC, HZ/10); 2757 } 2758 2759 } 2760 2761 return 0; 2762 } 2763 2764 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2765 char *buf, size_t nbytes, 2766 loff_t off) 2767 { 2768 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2769 2770 if (mem_cgroup_is_root(memcg)) 2771 return -EINVAL; 2772 return mem_cgroup_force_empty(memcg) ?: nbytes; 2773 } 2774 2775 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2776 struct cftype *cft) 2777 { 2778 return mem_cgroup_from_css(css)->use_hierarchy; 2779 } 2780 2781 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2782 struct cftype *cft, u64 val) 2783 { 2784 int retval = 0; 2785 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2786 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2787 2788 mutex_lock(&memcg_create_mutex); 2789 2790 if (memcg->use_hierarchy == val) 2791 goto out; 2792 2793 /* 2794 * If parent's use_hierarchy is set, we can't make any modifications 2795 * in the child subtrees. If it is unset, then the change can 2796 * occur, provided the current cgroup has no children. 2797 * 2798 * For the root cgroup, parent_mem is NULL, we allow value to be 2799 * set if there are no children. 2800 */ 2801 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2802 (val == 1 || val == 0)) { 2803 if (!memcg_has_children(memcg)) 2804 memcg->use_hierarchy = val; 2805 else 2806 retval = -EBUSY; 2807 } else 2808 retval = -EINVAL; 2809 2810 out: 2811 mutex_unlock(&memcg_create_mutex); 2812 2813 return retval; 2814 } 2815 2816 static unsigned long tree_stat(struct mem_cgroup *memcg, 2817 enum mem_cgroup_stat_index idx) 2818 { 2819 struct mem_cgroup *iter; 2820 unsigned long val = 0; 2821 2822 for_each_mem_cgroup_tree(iter, memcg) 2823 val += mem_cgroup_read_stat(iter, idx); 2824 2825 return val; 2826 } 2827 2828 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2829 { 2830 unsigned long val; 2831 2832 if (mem_cgroup_is_root(memcg)) { 2833 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); 2834 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); 2835 if (swap) 2836 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); 2837 } else { 2838 if (!swap) 2839 val = page_counter_read(&memcg->memory); 2840 else 2841 val = page_counter_read(&memcg->memsw); 2842 } 2843 return val; 2844 } 2845 2846 enum { 2847 RES_USAGE, 2848 RES_LIMIT, 2849 RES_MAX_USAGE, 2850 RES_FAILCNT, 2851 RES_SOFT_LIMIT, 2852 }; 2853 2854 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2855 struct cftype *cft) 2856 { 2857 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2858 struct page_counter *counter; 2859 2860 switch (MEMFILE_TYPE(cft->private)) { 2861 case _MEM: 2862 counter = &memcg->memory; 2863 break; 2864 case _MEMSWAP: 2865 counter = &memcg->memsw; 2866 break; 2867 case _KMEM: 2868 counter = &memcg->kmem; 2869 break; 2870 default: 2871 BUG(); 2872 } 2873 2874 switch (MEMFILE_ATTR(cft->private)) { 2875 case RES_USAGE: 2876 if (counter == &memcg->memory) 2877 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2878 if (counter == &memcg->memsw) 2879 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2880 return (u64)page_counter_read(counter) * PAGE_SIZE; 2881 case RES_LIMIT: 2882 return (u64)counter->limit * PAGE_SIZE; 2883 case RES_MAX_USAGE: 2884 return (u64)counter->watermark * PAGE_SIZE; 2885 case RES_FAILCNT: 2886 return counter->failcnt; 2887 case RES_SOFT_LIMIT: 2888 return (u64)memcg->soft_limit * PAGE_SIZE; 2889 default: 2890 BUG(); 2891 } 2892 } 2893 2894 #ifdef CONFIG_MEMCG_KMEM 2895 static int memcg_activate_kmem(struct mem_cgroup *memcg, 2896 unsigned long nr_pages) 2897 { 2898 int err = 0; 2899 int memcg_id; 2900 2901 BUG_ON(memcg->kmemcg_id >= 0); 2902 BUG_ON(memcg->kmem_acct_activated); 2903 BUG_ON(memcg->kmem_acct_active); 2904 2905 /* 2906 * For simplicity, we won't allow this to be disabled. It also can't 2907 * be changed if the cgroup has children already, or if tasks had 2908 * already joined. 2909 * 2910 * If tasks join before we set the limit, a person looking at 2911 * kmem.usage_in_bytes will have no way to determine when it took 2912 * place, which makes the value quite meaningless. 2913 * 2914 * After it first became limited, changes in the value of the limit are 2915 * of course permitted. 2916 */ 2917 mutex_lock(&memcg_create_mutex); 2918 if (cgroup_is_populated(memcg->css.cgroup) || 2919 (memcg->use_hierarchy && memcg_has_children(memcg))) 2920 err = -EBUSY; 2921 mutex_unlock(&memcg_create_mutex); 2922 if (err) 2923 goto out; 2924 2925 memcg_id = memcg_alloc_cache_id(); 2926 if (memcg_id < 0) { 2927 err = memcg_id; 2928 goto out; 2929 } 2930 2931 /* 2932 * We couldn't have accounted to this cgroup, because it hasn't got 2933 * activated yet, so this should succeed. 2934 */ 2935 err = page_counter_limit(&memcg->kmem, nr_pages); 2936 VM_BUG_ON(err); 2937 2938 static_key_slow_inc(&memcg_kmem_enabled_key); 2939 /* 2940 * A memory cgroup is considered kmem-active as soon as it gets 2941 * kmemcg_id. Setting the id after enabling static branching will 2942 * guarantee no one starts accounting before all call sites are 2943 * patched. 2944 */ 2945 memcg->kmemcg_id = memcg_id; 2946 memcg->kmem_acct_activated = true; 2947 memcg->kmem_acct_active = true; 2948 out: 2949 return err; 2950 } 2951 2952 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2953 unsigned long limit) 2954 { 2955 int ret; 2956 2957 mutex_lock(&memcg_limit_mutex); 2958 if (!memcg_kmem_is_active(memcg)) 2959 ret = memcg_activate_kmem(memcg, limit); 2960 else 2961 ret = page_counter_limit(&memcg->kmem, limit); 2962 mutex_unlock(&memcg_limit_mutex); 2963 return ret; 2964 } 2965 2966 static int memcg_propagate_kmem(struct mem_cgroup *memcg) 2967 { 2968 int ret = 0; 2969 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 2970 2971 if (!parent) 2972 return 0; 2973 2974 mutex_lock(&memcg_limit_mutex); 2975 /* 2976 * If the parent cgroup is not kmem-active now, it cannot be activated 2977 * after this point, because it has at least one child already. 2978 */ 2979 if (memcg_kmem_is_active(parent)) 2980 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); 2981 mutex_unlock(&memcg_limit_mutex); 2982 return ret; 2983 } 2984 #else 2985 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2986 unsigned long limit) 2987 { 2988 return -EINVAL; 2989 } 2990 #endif /* CONFIG_MEMCG_KMEM */ 2991 2992 /* 2993 * The user of this function is... 2994 * RES_LIMIT. 2995 */ 2996 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2997 char *buf, size_t nbytes, loff_t off) 2998 { 2999 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3000 unsigned long nr_pages; 3001 int ret; 3002 3003 buf = strstrip(buf); 3004 ret = page_counter_memparse(buf, "-1", &nr_pages); 3005 if (ret) 3006 return ret; 3007 3008 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3009 case RES_LIMIT: 3010 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3011 ret = -EINVAL; 3012 break; 3013 } 3014 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3015 case _MEM: 3016 ret = mem_cgroup_resize_limit(memcg, nr_pages); 3017 break; 3018 case _MEMSWAP: 3019 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 3020 break; 3021 case _KMEM: 3022 ret = memcg_update_kmem_limit(memcg, nr_pages); 3023 break; 3024 } 3025 break; 3026 case RES_SOFT_LIMIT: 3027 memcg->soft_limit = nr_pages; 3028 ret = 0; 3029 break; 3030 } 3031 return ret ?: nbytes; 3032 } 3033 3034 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3035 size_t nbytes, loff_t off) 3036 { 3037 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3038 struct page_counter *counter; 3039 3040 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3041 case _MEM: 3042 counter = &memcg->memory; 3043 break; 3044 case _MEMSWAP: 3045 counter = &memcg->memsw; 3046 break; 3047 case _KMEM: 3048 counter = &memcg->kmem; 3049 break; 3050 default: 3051 BUG(); 3052 } 3053 3054 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3055 case RES_MAX_USAGE: 3056 page_counter_reset_watermark(counter); 3057 break; 3058 case RES_FAILCNT: 3059 counter->failcnt = 0; 3060 break; 3061 default: 3062 BUG(); 3063 } 3064 3065 return nbytes; 3066 } 3067 3068 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3069 struct cftype *cft) 3070 { 3071 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3072 } 3073 3074 #ifdef CONFIG_MMU 3075 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3076 struct cftype *cft, u64 val) 3077 { 3078 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3079 3080 if (val & ~MOVE_MASK) 3081 return -EINVAL; 3082 3083 /* 3084 * No kind of locking is needed in here, because ->can_attach() will 3085 * check this value once in the beginning of the process, and then carry 3086 * on with stale data. This means that changes to this value will only 3087 * affect task migrations starting after the change. 3088 */ 3089 memcg->move_charge_at_immigrate = val; 3090 return 0; 3091 } 3092 #else 3093 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3094 struct cftype *cft, u64 val) 3095 { 3096 return -ENOSYS; 3097 } 3098 #endif 3099 3100 #ifdef CONFIG_NUMA 3101 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3102 { 3103 struct numa_stat { 3104 const char *name; 3105 unsigned int lru_mask; 3106 }; 3107 3108 static const struct numa_stat stats[] = { 3109 { "total", LRU_ALL }, 3110 { "file", LRU_ALL_FILE }, 3111 { "anon", LRU_ALL_ANON }, 3112 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3113 }; 3114 const struct numa_stat *stat; 3115 int nid; 3116 unsigned long nr; 3117 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3118 3119 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3120 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3121 seq_printf(m, "%s=%lu", stat->name, nr); 3122 for_each_node_state(nid, N_MEMORY) { 3123 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3124 stat->lru_mask); 3125 seq_printf(m, " N%d=%lu", nid, nr); 3126 } 3127 seq_putc(m, '\n'); 3128 } 3129 3130 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3131 struct mem_cgroup *iter; 3132 3133 nr = 0; 3134 for_each_mem_cgroup_tree(iter, memcg) 3135 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3136 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3137 for_each_node_state(nid, N_MEMORY) { 3138 nr = 0; 3139 for_each_mem_cgroup_tree(iter, memcg) 3140 nr += mem_cgroup_node_nr_lru_pages( 3141 iter, nid, stat->lru_mask); 3142 seq_printf(m, " N%d=%lu", nid, nr); 3143 } 3144 seq_putc(m, '\n'); 3145 } 3146 3147 return 0; 3148 } 3149 #endif /* CONFIG_NUMA */ 3150 3151 static int memcg_stat_show(struct seq_file *m, void *v) 3152 { 3153 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3154 unsigned long memory, memsw; 3155 struct mem_cgroup *mi; 3156 unsigned int i; 3157 3158 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3159 MEM_CGROUP_STAT_NSTATS); 3160 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3161 MEM_CGROUP_EVENTS_NSTATS); 3162 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3163 3164 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3165 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3166 continue; 3167 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3168 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3169 } 3170 3171 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3172 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3173 mem_cgroup_read_events(memcg, i)); 3174 3175 for (i = 0; i < NR_LRU_LISTS; i++) 3176 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3177 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3178 3179 /* Hierarchical information */ 3180 memory = memsw = PAGE_COUNTER_MAX; 3181 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3182 memory = min(memory, mi->memory.limit); 3183 memsw = min(memsw, mi->memsw.limit); 3184 } 3185 seq_printf(m, "hierarchical_memory_limit %llu\n", 3186 (u64)memory * PAGE_SIZE); 3187 if (do_swap_account) 3188 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3189 (u64)memsw * PAGE_SIZE); 3190 3191 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3192 unsigned long long val = 0; 3193 3194 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3195 continue; 3196 for_each_mem_cgroup_tree(mi, memcg) 3197 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3198 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3199 } 3200 3201 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3202 unsigned long long val = 0; 3203 3204 for_each_mem_cgroup_tree(mi, memcg) 3205 val += mem_cgroup_read_events(mi, i); 3206 seq_printf(m, "total_%s %llu\n", 3207 mem_cgroup_events_names[i], val); 3208 } 3209 3210 for (i = 0; i < NR_LRU_LISTS; i++) { 3211 unsigned long long val = 0; 3212 3213 for_each_mem_cgroup_tree(mi, memcg) 3214 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3215 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3216 } 3217 3218 #ifdef CONFIG_DEBUG_VM 3219 { 3220 int nid, zid; 3221 struct mem_cgroup_per_zone *mz; 3222 struct zone_reclaim_stat *rstat; 3223 unsigned long recent_rotated[2] = {0, 0}; 3224 unsigned long recent_scanned[2] = {0, 0}; 3225 3226 for_each_online_node(nid) 3227 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 3228 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 3229 rstat = &mz->lruvec.reclaim_stat; 3230 3231 recent_rotated[0] += rstat->recent_rotated[0]; 3232 recent_rotated[1] += rstat->recent_rotated[1]; 3233 recent_scanned[0] += rstat->recent_scanned[0]; 3234 recent_scanned[1] += rstat->recent_scanned[1]; 3235 } 3236 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3237 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3238 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3239 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3240 } 3241 #endif 3242 3243 return 0; 3244 } 3245 3246 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3247 struct cftype *cft) 3248 { 3249 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3250 3251 return mem_cgroup_swappiness(memcg); 3252 } 3253 3254 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3255 struct cftype *cft, u64 val) 3256 { 3257 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3258 3259 if (val > 100) 3260 return -EINVAL; 3261 3262 if (css->parent) 3263 memcg->swappiness = val; 3264 else 3265 vm_swappiness = val; 3266 3267 return 0; 3268 } 3269 3270 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3271 { 3272 struct mem_cgroup_threshold_ary *t; 3273 unsigned long usage; 3274 int i; 3275 3276 rcu_read_lock(); 3277 if (!swap) 3278 t = rcu_dereference(memcg->thresholds.primary); 3279 else 3280 t = rcu_dereference(memcg->memsw_thresholds.primary); 3281 3282 if (!t) 3283 goto unlock; 3284 3285 usage = mem_cgroup_usage(memcg, swap); 3286 3287 /* 3288 * current_threshold points to threshold just below or equal to usage. 3289 * If it's not true, a threshold was crossed after last 3290 * call of __mem_cgroup_threshold(). 3291 */ 3292 i = t->current_threshold; 3293 3294 /* 3295 * Iterate backward over array of thresholds starting from 3296 * current_threshold and check if a threshold is crossed. 3297 * If none of thresholds below usage is crossed, we read 3298 * only one element of the array here. 3299 */ 3300 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3301 eventfd_signal(t->entries[i].eventfd, 1); 3302 3303 /* i = current_threshold + 1 */ 3304 i++; 3305 3306 /* 3307 * Iterate forward over array of thresholds starting from 3308 * current_threshold+1 and check if a threshold is crossed. 3309 * If none of thresholds above usage is crossed, we read 3310 * only one element of the array here. 3311 */ 3312 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3313 eventfd_signal(t->entries[i].eventfd, 1); 3314 3315 /* Update current_threshold */ 3316 t->current_threshold = i - 1; 3317 unlock: 3318 rcu_read_unlock(); 3319 } 3320 3321 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3322 { 3323 while (memcg) { 3324 __mem_cgroup_threshold(memcg, false); 3325 if (do_swap_account) 3326 __mem_cgroup_threshold(memcg, true); 3327 3328 memcg = parent_mem_cgroup(memcg); 3329 } 3330 } 3331 3332 static int compare_thresholds(const void *a, const void *b) 3333 { 3334 const struct mem_cgroup_threshold *_a = a; 3335 const struct mem_cgroup_threshold *_b = b; 3336 3337 if (_a->threshold > _b->threshold) 3338 return 1; 3339 3340 if (_a->threshold < _b->threshold) 3341 return -1; 3342 3343 return 0; 3344 } 3345 3346 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3347 { 3348 struct mem_cgroup_eventfd_list *ev; 3349 3350 spin_lock(&memcg_oom_lock); 3351 3352 list_for_each_entry(ev, &memcg->oom_notify, list) 3353 eventfd_signal(ev->eventfd, 1); 3354 3355 spin_unlock(&memcg_oom_lock); 3356 return 0; 3357 } 3358 3359 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3360 { 3361 struct mem_cgroup *iter; 3362 3363 for_each_mem_cgroup_tree(iter, memcg) 3364 mem_cgroup_oom_notify_cb(iter); 3365 } 3366 3367 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3368 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3369 { 3370 struct mem_cgroup_thresholds *thresholds; 3371 struct mem_cgroup_threshold_ary *new; 3372 unsigned long threshold; 3373 unsigned long usage; 3374 int i, size, ret; 3375 3376 ret = page_counter_memparse(args, "-1", &threshold); 3377 if (ret) 3378 return ret; 3379 3380 mutex_lock(&memcg->thresholds_lock); 3381 3382 if (type == _MEM) { 3383 thresholds = &memcg->thresholds; 3384 usage = mem_cgroup_usage(memcg, false); 3385 } else if (type == _MEMSWAP) { 3386 thresholds = &memcg->memsw_thresholds; 3387 usage = mem_cgroup_usage(memcg, true); 3388 } else 3389 BUG(); 3390 3391 /* Check if a threshold crossed before adding a new one */ 3392 if (thresholds->primary) 3393 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3394 3395 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3396 3397 /* Allocate memory for new array of thresholds */ 3398 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3399 GFP_KERNEL); 3400 if (!new) { 3401 ret = -ENOMEM; 3402 goto unlock; 3403 } 3404 new->size = size; 3405 3406 /* Copy thresholds (if any) to new array */ 3407 if (thresholds->primary) { 3408 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3409 sizeof(struct mem_cgroup_threshold)); 3410 } 3411 3412 /* Add new threshold */ 3413 new->entries[size - 1].eventfd = eventfd; 3414 new->entries[size - 1].threshold = threshold; 3415 3416 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3417 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3418 compare_thresholds, NULL); 3419 3420 /* Find current threshold */ 3421 new->current_threshold = -1; 3422 for (i = 0; i < size; i++) { 3423 if (new->entries[i].threshold <= usage) { 3424 /* 3425 * new->current_threshold will not be used until 3426 * rcu_assign_pointer(), so it's safe to increment 3427 * it here. 3428 */ 3429 ++new->current_threshold; 3430 } else 3431 break; 3432 } 3433 3434 /* Free old spare buffer and save old primary buffer as spare */ 3435 kfree(thresholds->spare); 3436 thresholds->spare = thresholds->primary; 3437 3438 rcu_assign_pointer(thresholds->primary, new); 3439 3440 /* To be sure that nobody uses thresholds */ 3441 synchronize_rcu(); 3442 3443 unlock: 3444 mutex_unlock(&memcg->thresholds_lock); 3445 3446 return ret; 3447 } 3448 3449 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3450 struct eventfd_ctx *eventfd, const char *args) 3451 { 3452 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3453 } 3454 3455 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3456 struct eventfd_ctx *eventfd, const char *args) 3457 { 3458 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3459 } 3460 3461 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3462 struct eventfd_ctx *eventfd, enum res_type type) 3463 { 3464 struct mem_cgroup_thresholds *thresholds; 3465 struct mem_cgroup_threshold_ary *new; 3466 unsigned long usage; 3467 int i, j, size; 3468 3469 mutex_lock(&memcg->thresholds_lock); 3470 3471 if (type == _MEM) { 3472 thresholds = &memcg->thresholds; 3473 usage = mem_cgroup_usage(memcg, false); 3474 } else if (type == _MEMSWAP) { 3475 thresholds = &memcg->memsw_thresholds; 3476 usage = mem_cgroup_usage(memcg, true); 3477 } else 3478 BUG(); 3479 3480 if (!thresholds->primary) 3481 goto unlock; 3482 3483 /* Check if a threshold crossed before removing */ 3484 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3485 3486 /* Calculate new number of threshold */ 3487 size = 0; 3488 for (i = 0; i < thresholds->primary->size; i++) { 3489 if (thresholds->primary->entries[i].eventfd != eventfd) 3490 size++; 3491 } 3492 3493 new = thresholds->spare; 3494 3495 /* Set thresholds array to NULL if we don't have thresholds */ 3496 if (!size) { 3497 kfree(new); 3498 new = NULL; 3499 goto swap_buffers; 3500 } 3501 3502 new->size = size; 3503 3504 /* Copy thresholds and find current threshold */ 3505 new->current_threshold = -1; 3506 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3507 if (thresholds->primary->entries[i].eventfd == eventfd) 3508 continue; 3509 3510 new->entries[j] = thresholds->primary->entries[i]; 3511 if (new->entries[j].threshold <= usage) { 3512 /* 3513 * new->current_threshold will not be used 3514 * until rcu_assign_pointer(), so it's safe to increment 3515 * it here. 3516 */ 3517 ++new->current_threshold; 3518 } 3519 j++; 3520 } 3521 3522 swap_buffers: 3523 /* Swap primary and spare array */ 3524 thresholds->spare = thresholds->primary; 3525 /* If all events are unregistered, free the spare array */ 3526 if (!new) { 3527 kfree(thresholds->spare); 3528 thresholds->spare = NULL; 3529 } 3530 3531 rcu_assign_pointer(thresholds->primary, new); 3532 3533 /* To be sure that nobody uses thresholds */ 3534 synchronize_rcu(); 3535 unlock: 3536 mutex_unlock(&memcg->thresholds_lock); 3537 } 3538 3539 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3540 struct eventfd_ctx *eventfd) 3541 { 3542 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3543 } 3544 3545 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3546 struct eventfd_ctx *eventfd) 3547 { 3548 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3549 } 3550 3551 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3552 struct eventfd_ctx *eventfd, const char *args) 3553 { 3554 struct mem_cgroup_eventfd_list *event; 3555 3556 event = kmalloc(sizeof(*event), GFP_KERNEL); 3557 if (!event) 3558 return -ENOMEM; 3559 3560 spin_lock(&memcg_oom_lock); 3561 3562 event->eventfd = eventfd; 3563 list_add(&event->list, &memcg->oom_notify); 3564 3565 /* already in OOM ? */ 3566 if (memcg->under_oom) 3567 eventfd_signal(eventfd, 1); 3568 spin_unlock(&memcg_oom_lock); 3569 3570 return 0; 3571 } 3572 3573 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3574 struct eventfd_ctx *eventfd) 3575 { 3576 struct mem_cgroup_eventfd_list *ev, *tmp; 3577 3578 spin_lock(&memcg_oom_lock); 3579 3580 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3581 if (ev->eventfd == eventfd) { 3582 list_del(&ev->list); 3583 kfree(ev); 3584 } 3585 } 3586 3587 spin_unlock(&memcg_oom_lock); 3588 } 3589 3590 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3591 { 3592 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3593 3594 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3595 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3596 return 0; 3597 } 3598 3599 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3600 struct cftype *cft, u64 val) 3601 { 3602 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3603 3604 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3605 if (!css->parent || !((val == 0) || (val == 1))) 3606 return -EINVAL; 3607 3608 memcg->oom_kill_disable = val; 3609 if (!val) 3610 memcg_oom_recover(memcg); 3611 3612 return 0; 3613 } 3614 3615 #ifdef CONFIG_MEMCG_KMEM 3616 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3617 { 3618 int ret; 3619 3620 ret = memcg_propagate_kmem(memcg); 3621 if (ret) 3622 return ret; 3623 3624 return mem_cgroup_sockets_init(memcg, ss); 3625 } 3626 3627 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3628 { 3629 struct cgroup_subsys_state *css; 3630 struct mem_cgroup *parent, *child; 3631 int kmemcg_id; 3632 3633 if (!memcg->kmem_acct_active) 3634 return; 3635 3636 /* 3637 * Clear the 'active' flag before clearing memcg_caches arrays entries. 3638 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it 3639 * guarantees no cache will be created for this cgroup after we are 3640 * done (see memcg_create_kmem_cache()). 3641 */ 3642 memcg->kmem_acct_active = false; 3643 3644 memcg_deactivate_kmem_caches(memcg); 3645 3646 kmemcg_id = memcg->kmemcg_id; 3647 BUG_ON(kmemcg_id < 0); 3648 3649 parent = parent_mem_cgroup(memcg); 3650 if (!parent) 3651 parent = root_mem_cgroup; 3652 3653 /* 3654 * Change kmemcg_id of this cgroup and all its descendants to the 3655 * parent's id, and then move all entries from this cgroup's list_lrus 3656 * to ones of the parent. After we have finished, all list_lrus 3657 * corresponding to this cgroup are guaranteed to remain empty. The 3658 * ordering is imposed by list_lru_node->lock taken by 3659 * memcg_drain_all_list_lrus(). 3660 */ 3661 css_for_each_descendant_pre(css, &memcg->css) { 3662 child = mem_cgroup_from_css(css); 3663 BUG_ON(child->kmemcg_id != kmemcg_id); 3664 child->kmemcg_id = parent->kmemcg_id; 3665 if (!memcg->use_hierarchy) 3666 break; 3667 } 3668 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 3669 3670 memcg_free_cache_id(kmemcg_id); 3671 } 3672 3673 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3674 { 3675 if (memcg->kmem_acct_activated) { 3676 memcg_destroy_kmem_caches(memcg); 3677 static_key_slow_dec(&memcg_kmem_enabled_key); 3678 WARN_ON(page_counter_read(&memcg->kmem)); 3679 } 3680 mem_cgroup_sockets_destroy(memcg); 3681 } 3682 #else 3683 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3684 { 3685 return 0; 3686 } 3687 3688 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3689 { 3690 } 3691 3692 static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3693 { 3694 } 3695 #endif 3696 3697 #ifdef CONFIG_CGROUP_WRITEBACK 3698 3699 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3700 { 3701 return &memcg->cgwb_list; 3702 } 3703 3704 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3705 { 3706 return wb_domain_init(&memcg->cgwb_domain, gfp); 3707 } 3708 3709 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3710 { 3711 wb_domain_exit(&memcg->cgwb_domain); 3712 } 3713 3714 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3715 { 3716 wb_domain_size_changed(&memcg->cgwb_domain); 3717 } 3718 3719 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3720 { 3721 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3722 3723 if (!memcg->css.parent) 3724 return NULL; 3725 3726 return &memcg->cgwb_domain; 3727 } 3728 3729 /** 3730 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3731 * @wb: bdi_writeback in question 3732 * @pfilepages: out parameter for number of file pages 3733 * @pheadroom: out parameter for number of allocatable pages according to memcg 3734 * @pdirty: out parameter for number of dirty pages 3735 * @pwriteback: out parameter for number of pages under writeback 3736 * 3737 * Determine the numbers of file, headroom, dirty, and writeback pages in 3738 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3739 * is a bit more involved. 3740 * 3741 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3742 * headroom is calculated as the lowest headroom of itself and the 3743 * ancestors. Note that this doesn't consider the actual amount of 3744 * available memory in the system. The caller should further cap 3745 * *@pheadroom accordingly. 3746 */ 3747 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3748 unsigned long *pheadroom, unsigned long *pdirty, 3749 unsigned long *pwriteback) 3750 { 3751 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3752 struct mem_cgroup *parent; 3753 3754 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3755 3756 /* this should eventually include NR_UNSTABLE_NFS */ 3757 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3758 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3759 (1 << LRU_ACTIVE_FILE)); 3760 *pheadroom = PAGE_COUNTER_MAX; 3761 3762 while ((parent = parent_mem_cgroup(memcg))) { 3763 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3764 unsigned long used = page_counter_read(&memcg->memory); 3765 3766 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3767 memcg = parent; 3768 } 3769 } 3770 3771 #else /* CONFIG_CGROUP_WRITEBACK */ 3772 3773 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3774 { 3775 return 0; 3776 } 3777 3778 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3779 { 3780 } 3781 3782 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3783 { 3784 } 3785 3786 #endif /* CONFIG_CGROUP_WRITEBACK */ 3787 3788 /* 3789 * DO NOT USE IN NEW FILES. 3790 * 3791 * "cgroup.event_control" implementation. 3792 * 3793 * This is way over-engineered. It tries to support fully configurable 3794 * events for each user. Such level of flexibility is completely 3795 * unnecessary especially in the light of the planned unified hierarchy. 3796 * 3797 * Please deprecate this and replace with something simpler if at all 3798 * possible. 3799 */ 3800 3801 /* 3802 * Unregister event and free resources. 3803 * 3804 * Gets called from workqueue. 3805 */ 3806 static void memcg_event_remove(struct work_struct *work) 3807 { 3808 struct mem_cgroup_event *event = 3809 container_of(work, struct mem_cgroup_event, remove); 3810 struct mem_cgroup *memcg = event->memcg; 3811 3812 remove_wait_queue(event->wqh, &event->wait); 3813 3814 event->unregister_event(memcg, event->eventfd); 3815 3816 /* Notify userspace the event is going away. */ 3817 eventfd_signal(event->eventfd, 1); 3818 3819 eventfd_ctx_put(event->eventfd); 3820 kfree(event); 3821 css_put(&memcg->css); 3822 } 3823 3824 /* 3825 * Gets called on POLLHUP on eventfd when user closes it. 3826 * 3827 * Called with wqh->lock held and interrupts disabled. 3828 */ 3829 static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3830 int sync, void *key) 3831 { 3832 struct mem_cgroup_event *event = 3833 container_of(wait, struct mem_cgroup_event, wait); 3834 struct mem_cgroup *memcg = event->memcg; 3835 unsigned long flags = (unsigned long)key; 3836 3837 if (flags & POLLHUP) { 3838 /* 3839 * If the event has been detached at cgroup removal, we 3840 * can simply return knowing the other side will cleanup 3841 * for us. 3842 * 3843 * We can't race against event freeing since the other 3844 * side will require wqh->lock via remove_wait_queue(), 3845 * which we hold. 3846 */ 3847 spin_lock(&memcg->event_list_lock); 3848 if (!list_empty(&event->list)) { 3849 list_del_init(&event->list); 3850 /* 3851 * We are in atomic context, but cgroup_event_remove() 3852 * may sleep, so we have to call it in workqueue. 3853 */ 3854 schedule_work(&event->remove); 3855 } 3856 spin_unlock(&memcg->event_list_lock); 3857 } 3858 3859 return 0; 3860 } 3861 3862 static void memcg_event_ptable_queue_proc(struct file *file, 3863 wait_queue_head_t *wqh, poll_table *pt) 3864 { 3865 struct mem_cgroup_event *event = 3866 container_of(pt, struct mem_cgroup_event, pt); 3867 3868 event->wqh = wqh; 3869 add_wait_queue(wqh, &event->wait); 3870 } 3871 3872 /* 3873 * DO NOT USE IN NEW FILES. 3874 * 3875 * Parse input and register new cgroup event handler. 3876 * 3877 * Input must be in format '<event_fd> <control_fd> <args>'. 3878 * Interpretation of args is defined by control file implementation. 3879 */ 3880 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3881 char *buf, size_t nbytes, loff_t off) 3882 { 3883 struct cgroup_subsys_state *css = of_css(of); 3884 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3885 struct mem_cgroup_event *event; 3886 struct cgroup_subsys_state *cfile_css; 3887 unsigned int efd, cfd; 3888 struct fd efile; 3889 struct fd cfile; 3890 const char *name; 3891 char *endp; 3892 int ret; 3893 3894 buf = strstrip(buf); 3895 3896 efd = simple_strtoul(buf, &endp, 10); 3897 if (*endp != ' ') 3898 return -EINVAL; 3899 buf = endp + 1; 3900 3901 cfd = simple_strtoul(buf, &endp, 10); 3902 if ((*endp != ' ') && (*endp != '\0')) 3903 return -EINVAL; 3904 buf = endp + 1; 3905 3906 event = kzalloc(sizeof(*event), GFP_KERNEL); 3907 if (!event) 3908 return -ENOMEM; 3909 3910 event->memcg = memcg; 3911 INIT_LIST_HEAD(&event->list); 3912 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3913 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3914 INIT_WORK(&event->remove, memcg_event_remove); 3915 3916 efile = fdget(efd); 3917 if (!efile.file) { 3918 ret = -EBADF; 3919 goto out_kfree; 3920 } 3921 3922 event->eventfd = eventfd_ctx_fileget(efile.file); 3923 if (IS_ERR(event->eventfd)) { 3924 ret = PTR_ERR(event->eventfd); 3925 goto out_put_efile; 3926 } 3927 3928 cfile = fdget(cfd); 3929 if (!cfile.file) { 3930 ret = -EBADF; 3931 goto out_put_eventfd; 3932 } 3933 3934 /* the process need read permission on control file */ 3935 /* AV: shouldn't we check that it's been opened for read instead? */ 3936 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3937 if (ret < 0) 3938 goto out_put_cfile; 3939 3940 /* 3941 * Determine the event callbacks and set them in @event. This used 3942 * to be done via struct cftype but cgroup core no longer knows 3943 * about these events. The following is crude but the whole thing 3944 * is for compatibility anyway. 3945 * 3946 * DO NOT ADD NEW FILES. 3947 */ 3948 name = cfile.file->f_path.dentry->d_name.name; 3949 3950 if (!strcmp(name, "memory.usage_in_bytes")) { 3951 event->register_event = mem_cgroup_usage_register_event; 3952 event->unregister_event = mem_cgroup_usage_unregister_event; 3953 } else if (!strcmp(name, "memory.oom_control")) { 3954 event->register_event = mem_cgroup_oom_register_event; 3955 event->unregister_event = mem_cgroup_oom_unregister_event; 3956 } else if (!strcmp(name, "memory.pressure_level")) { 3957 event->register_event = vmpressure_register_event; 3958 event->unregister_event = vmpressure_unregister_event; 3959 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3960 event->register_event = memsw_cgroup_usage_register_event; 3961 event->unregister_event = memsw_cgroup_usage_unregister_event; 3962 } else { 3963 ret = -EINVAL; 3964 goto out_put_cfile; 3965 } 3966 3967 /* 3968 * Verify @cfile should belong to @css. Also, remaining events are 3969 * automatically removed on cgroup destruction but the removal is 3970 * asynchronous, so take an extra ref on @css. 3971 */ 3972 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3973 &memory_cgrp_subsys); 3974 ret = -EINVAL; 3975 if (IS_ERR(cfile_css)) 3976 goto out_put_cfile; 3977 if (cfile_css != css) { 3978 css_put(cfile_css); 3979 goto out_put_cfile; 3980 } 3981 3982 ret = event->register_event(memcg, event->eventfd, buf); 3983 if (ret) 3984 goto out_put_css; 3985 3986 efile.file->f_op->poll(efile.file, &event->pt); 3987 3988 spin_lock(&memcg->event_list_lock); 3989 list_add(&event->list, &memcg->event_list); 3990 spin_unlock(&memcg->event_list_lock); 3991 3992 fdput(cfile); 3993 fdput(efile); 3994 3995 return nbytes; 3996 3997 out_put_css: 3998 css_put(css); 3999 out_put_cfile: 4000 fdput(cfile); 4001 out_put_eventfd: 4002 eventfd_ctx_put(event->eventfd); 4003 out_put_efile: 4004 fdput(efile); 4005 out_kfree: 4006 kfree(event); 4007 4008 return ret; 4009 } 4010 4011 static struct cftype mem_cgroup_legacy_files[] = { 4012 { 4013 .name = "usage_in_bytes", 4014 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4015 .read_u64 = mem_cgroup_read_u64, 4016 }, 4017 { 4018 .name = "max_usage_in_bytes", 4019 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4020 .write = mem_cgroup_reset, 4021 .read_u64 = mem_cgroup_read_u64, 4022 }, 4023 { 4024 .name = "limit_in_bytes", 4025 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4026 .write = mem_cgroup_write, 4027 .read_u64 = mem_cgroup_read_u64, 4028 }, 4029 { 4030 .name = "soft_limit_in_bytes", 4031 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4032 .write = mem_cgroup_write, 4033 .read_u64 = mem_cgroup_read_u64, 4034 }, 4035 { 4036 .name = "failcnt", 4037 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4038 .write = mem_cgroup_reset, 4039 .read_u64 = mem_cgroup_read_u64, 4040 }, 4041 { 4042 .name = "stat", 4043 .seq_show = memcg_stat_show, 4044 }, 4045 { 4046 .name = "force_empty", 4047 .write = mem_cgroup_force_empty_write, 4048 }, 4049 { 4050 .name = "use_hierarchy", 4051 .write_u64 = mem_cgroup_hierarchy_write, 4052 .read_u64 = mem_cgroup_hierarchy_read, 4053 }, 4054 { 4055 .name = "cgroup.event_control", /* XXX: for compat */ 4056 .write = memcg_write_event_control, 4057 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4058 }, 4059 { 4060 .name = "swappiness", 4061 .read_u64 = mem_cgroup_swappiness_read, 4062 .write_u64 = mem_cgroup_swappiness_write, 4063 }, 4064 { 4065 .name = "move_charge_at_immigrate", 4066 .read_u64 = mem_cgroup_move_charge_read, 4067 .write_u64 = mem_cgroup_move_charge_write, 4068 }, 4069 { 4070 .name = "oom_control", 4071 .seq_show = mem_cgroup_oom_control_read, 4072 .write_u64 = mem_cgroup_oom_control_write, 4073 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4074 }, 4075 { 4076 .name = "pressure_level", 4077 }, 4078 #ifdef CONFIG_NUMA 4079 { 4080 .name = "numa_stat", 4081 .seq_show = memcg_numa_stat_show, 4082 }, 4083 #endif 4084 #ifdef CONFIG_MEMCG_KMEM 4085 { 4086 .name = "kmem.limit_in_bytes", 4087 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4088 .write = mem_cgroup_write, 4089 .read_u64 = mem_cgroup_read_u64, 4090 }, 4091 { 4092 .name = "kmem.usage_in_bytes", 4093 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4094 .read_u64 = mem_cgroup_read_u64, 4095 }, 4096 { 4097 .name = "kmem.failcnt", 4098 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4099 .write = mem_cgroup_reset, 4100 .read_u64 = mem_cgroup_read_u64, 4101 }, 4102 { 4103 .name = "kmem.max_usage_in_bytes", 4104 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4105 .write = mem_cgroup_reset, 4106 .read_u64 = mem_cgroup_read_u64, 4107 }, 4108 #ifdef CONFIG_SLABINFO 4109 { 4110 .name = "kmem.slabinfo", 4111 .seq_start = slab_start, 4112 .seq_next = slab_next, 4113 .seq_stop = slab_stop, 4114 .seq_show = memcg_slab_show, 4115 }, 4116 #endif 4117 #endif 4118 { }, /* terminate */ 4119 }; 4120 4121 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4122 { 4123 struct mem_cgroup_per_node *pn; 4124 struct mem_cgroup_per_zone *mz; 4125 int zone, tmp = node; 4126 /* 4127 * This routine is called against possible nodes. 4128 * But it's BUG to call kmalloc() against offline node. 4129 * 4130 * TODO: this routine can waste much memory for nodes which will 4131 * never be onlined. It's better to use memory hotplug callback 4132 * function. 4133 */ 4134 if (!node_state(node, N_NORMAL_MEMORY)) 4135 tmp = -1; 4136 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4137 if (!pn) 4138 return 1; 4139 4140 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4141 mz = &pn->zoneinfo[zone]; 4142 lruvec_init(&mz->lruvec); 4143 mz->usage_in_excess = 0; 4144 mz->on_tree = false; 4145 mz->memcg = memcg; 4146 } 4147 memcg->nodeinfo[node] = pn; 4148 return 0; 4149 } 4150 4151 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4152 { 4153 kfree(memcg->nodeinfo[node]); 4154 } 4155 4156 static struct mem_cgroup *mem_cgroup_alloc(void) 4157 { 4158 struct mem_cgroup *memcg; 4159 size_t size; 4160 4161 size = sizeof(struct mem_cgroup); 4162 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4163 4164 memcg = kzalloc(size, GFP_KERNEL); 4165 if (!memcg) 4166 return NULL; 4167 4168 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4169 if (!memcg->stat) 4170 goto out_free; 4171 4172 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4173 goto out_free_stat; 4174 4175 return memcg; 4176 4177 out_free_stat: 4178 free_percpu(memcg->stat); 4179 out_free: 4180 kfree(memcg); 4181 return NULL; 4182 } 4183 4184 /* 4185 * At destroying mem_cgroup, references from swap_cgroup can remain. 4186 * (scanning all at force_empty is too costly...) 4187 * 4188 * Instead of clearing all references at force_empty, we remember 4189 * the number of reference from swap_cgroup and free mem_cgroup when 4190 * it goes down to 0. 4191 * 4192 * Removal of cgroup itself succeeds regardless of refs from swap. 4193 */ 4194 4195 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4196 { 4197 int node; 4198 4199 mem_cgroup_remove_from_trees(memcg); 4200 4201 for_each_node(node) 4202 free_mem_cgroup_per_zone_info(memcg, node); 4203 4204 free_percpu(memcg->stat); 4205 memcg_wb_domain_exit(memcg); 4206 kfree(memcg); 4207 } 4208 4209 /* 4210 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4211 */ 4212 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4213 { 4214 if (!memcg->memory.parent) 4215 return NULL; 4216 return mem_cgroup_from_counter(memcg->memory.parent, memory); 4217 } 4218 EXPORT_SYMBOL(parent_mem_cgroup); 4219 4220 static struct cgroup_subsys_state * __ref 4221 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4222 { 4223 struct mem_cgroup *memcg; 4224 long error = -ENOMEM; 4225 int node; 4226 4227 memcg = mem_cgroup_alloc(); 4228 if (!memcg) 4229 return ERR_PTR(error); 4230 4231 for_each_node(node) 4232 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 4233 goto free_out; 4234 4235 /* root ? */ 4236 if (parent_css == NULL) { 4237 root_mem_cgroup = memcg; 4238 mem_cgroup_root_css = &memcg->css; 4239 page_counter_init(&memcg->memory, NULL); 4240 memcg->high = PAGE_COUNTER_MAX; 4241 memcg->soft_limit = PAGE_COUNTER_MAX; 4242 page_counter_init(&memcg->memsw, NULL); 4243 page_counter_init(&memcg->kmem, NULL); 4244 } 4245 4246 memcg->last_scanned_node = MAX_NUMNODES; 4247 INIT_LIST_HEAD(&memcg->oom_notify); 4248 memcg->move_charge_at_immigrate = 0; 4249 mutex_init(&memcg->thresholds_lock); 4250 spin_lock_init(&memcg->move_lock); 4251 vmpressure_init(&memcg->vmpressure); 4252 INIT_LIST_HEAD(&memcg->event_list); 4253 spin_lock_init(&memcg->event_list_lock); 4254 #ifdef CONFIG_MEMCG_KMEM 4255 memcg->kmemcg_id = -1; 4256 #endif 4257 #ifdef CONFIG_CGROUP_WRITEBACK 4258 INIT_LIST_HEAD(&memcg->cgwb_list); 4259 #endif 4260 return &memcg->css; 4261 4262 free_out: 4263 __mem_cgroup_free(memcg); 4264 return ERR_PTR(error); 4265 } 4266 4267 static int 4268 mem_cgroup_css_online(struct cgroup_subsys_state *css) 4269 { 4270 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4271 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 4272 int ret; 4273 4274 if (css->id > MEM_CGROUP_ID_MAX) 4275 return -ENOSPC; 4276 4277 if (!parent) 4278 return 0; 4279 4280 mutex_lock(&memcg_create_mutex); 4281 4282 memcg->use_hierarchy = parent->use_hierarchy; 4283 memcg->oom_kill_disable = parent->oom_kill_disable; 4284 memcg->swappiness = mem_cgroup_swappiness(parent); 4285 4286 if (parent->use_hierarchy) { 4287 page_counter_init(&memcg->memory, &parent->memory); 4288 memcg->high = PAGE_COUNTER_MAX; 4289 memcg->soft_limit = PAGE_COUNTER_MAX; 4290 page_counter_init(&memcg->memsw, &parent->memsw); 4291 page_counter_init(&memcg->kmem, &parent->kmem); 4292 4293 /* 4294 * No need to take a reference to the parent because cgroup 4295 * core guarantees its existence. 4296 */ 4297 } else { 4298 page_counter_init(&memcg->memory, NULL); 4299 memcg->high = PAGE_COUNTER_MAX; 4300 memcg->soft_limit = PAGE_COUNTER_MAX; 4301 page_counter_init(&memcg->memsw, NULL); 4302 page_counter_init(&memcg->kmem, NULL); 4303 /* 4304 * Deeper hierachy with use_hierarchy == false doesn't make 4305 * much sense so let cgroup subsystem know about this 4306 * unfortunate state in our controller. 4307 */ 4308 if (parent != root_mem_cgroup) 4309 memory_cgrp_subsys.broken_hierarchy = true; 4310 } 4311 mutex_unlock(&memcg_create_mutex); 4312 4313 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); 4314 if (ret) 4315 return ret; 4316 4317 /* 4318 * Make sure the memcg is initialized: mem_cgroup_iter() 4319 * orders reading memcg->initialized against its callers 4320 * reading the memcg members. 4321 */ 4322 smp_store_release(&memcg->initialized, 1); 4323 4324 return 0; 4325 } 4326 4327 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4328 { 4329 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4330 struct mem_cgroup_event *event, *tmp; 4331 4332 /* 4333 * Unregister events and notify userspace. 4334 * Notify userspace about cgroup removing only after rmdir of cgroup 4335 * directory to avoid race between userspace and kernelspace. 4336 */ 4337 spin_lock(&memcg->event_list_lock); 4338 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4339 list_del_init(&event->list); 4340 schedule_work(&event->remove); 4341 } 4342 spin_unlock(&memcg->event_list_lock); 4343 4344 vmpressure_cleanup(&memcg->vmpressure); 4345 4346 memcg_deactivate_kmem(memcg); 4347 4348 wb_memcg_offline(memcg); 4349 } 4350 4351 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4352 { 4353 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4354 4355 invalidate_reclaim_iterators(memcg); 4356 } 4357 4358 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4359 { 4360 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4361 4362 memcg_destroy_kmem(memcg); 4363 __mem_cgroup_free(memcg); 4364 } 4365 4366 /** 4367 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4368 * @css: the target css 4369 * 4370 * Reset the states of the mem_cgroup associated with @css. This is 4371 * invoked when the userland requests disabling on the default hierarchy 4372 * but the memcg is pinned through dependency. The memcg should stop 4373 * applying policies and should revert to the vanilla state as it may be 4374 * made visible again. 4375 * 4376 * The current implementation only resets the essential configurations. 4377 * This needs to be expanded to cover all the visible parts. 4378 */ 4379 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4380 { 4381 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4382 4383 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4384 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4385 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4386 memcg->low = 0; 4387 memcg->high = PAGE_COUNTER_MAX; 4388 memcg->soft_limit = PAGE_COUNTER_MAX; 4389 memcg_wb_domain_size_changed(memcg); 4390 } 4391 4392 #ifdef CONFIG_MMU 4393 /* Handlers for move charge at task migration. */ 4394 static int mem_cgroup_do_precharge(unsigned long count) 4395 { 4396 int ret; 4397 4398 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4399 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4400 if (!ret) { 4401 mc.precharge += count; 4402 return ret; 4403 } 4404 4405 /* Try charges one by one with reclaim */ 4406 while (count--) { 4407 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4408 if (ret) 4409 return ret; 4410 mc.precharge++; 4411 cond_resched(); 4412 } 4413 return 0; 4414 } 4415 4416 /** 4417 * get_mctgt_type - get target type of moving charge 4418 * @vma: the vma the pte to be checked belongs 4419 * @addr: the address corresponding to the pte to be checked 4420 * @ptent: the pte to be checked 4421 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4422 * 4423 * Returns 4424 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4425 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4426 * move charge. if @target is not NULL, the page is stored in target->page 4427 * with extra refcnt got(Callers should handle it). 4428 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4429 * target for charge migration. if @target is not NULL, the entry is stored 4430 * in target->ent. 4431 * 4432 * Called with pte lock held. 4433 */ 4434 union mc_target { 4435 struct page *page; 4436 swp_entry_t ent; 4437 }; 4438 4439 enum mc_target_type { 4440 MC_TARGET_NONE = 0, 4441 MC_TARGET_PAGE, 4442 MC_TARGET_SWAP, 4443 }; 4444 4445 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4446 unsigned long addr, pte_t ptent) 4447 { 4448 struct page *page = vm_normal_page(vma, addr, ptent); 4449 4450 if (!page || !page_mapped(page)) 4451 return NULL; 4452 if (PageAnon(page)) { 4453 if (!(mc.flags & MOVE_ANON)) 4454 return NULL; 4455 } else { 4456 if (!(mc.flags & MOVE_FILE)) 4457 return NULL; 4458 } 4459 if (!get_page_unless_zero(page)) 4460 return NULL; 4461 4462 return page; 4463 } 4464 4465 #ifdef CONFIG_SWAP 4466 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4467 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4468 { 4469 struct page *page = NULL; 4470 swp_entry_t ent = pte_to_swp_entry(ptent); 4471 4472 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4473 return NULL; 4474 /* 4475 * Because lookup_swap_cache() updates some statistics counter, 4476 * we call find_get_page() with swapper_space directly. 4477 */ 4478 page = find_get_page(swap_address_space(ent), ent.val); 4479 if (do_swap_account) 4480 entry->val = ent.val; 4481 4482 return page; 4483 } 4484 #else 4485 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4486 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4487 { 4488 return NULL; 4489 } 4490 #endif 4491 4492 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4493 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4494 { 4495 struct page *page = NULL; 4496 struct address_space *mapping; 4497 pgoff_t pgoff; 4498 4499 if (!vma->vm_file) /* anonymous vma */ 4500 return NULL; 4501 if (!(mc.flags & MOVE_FILE)) 4502 return NULL; 4503 4504 mapping = vma->vm_file->f_mapping; 4505 pgoff = linear_page_index(vma, addr); 4506 4507 /* page is moved even if it's not RSS of this task(page-faulted). */ 4508 #ifdef CONFIG_SWAP 4509 /* shmem/tmpfs may report page out on swap: account for that too. */ 4510 if (shmem_mapping(mapping)) { 4511 page = find_get_entry(mapping, pgoff); 4512 if (radix_tree_exceptional_entry(page)) { 4513 swp_entry_t swp = radix_to_swp_entry(page); 4514 if (do_swap_account) 4515 *entry = swp; 4516 page = find_get_page(swap_address_space(swp), swp.val); 4517 } 4518 } else 4519 page = find_get_page(mapping, pgoff); 4520 #else 4521 page = find_get_page(mapping, pgoff); 4522 #endif 4523 return page; 4524 } 4525 4526 /** 4527 * mem_cgroup_move_account - move account of the page 4528 * @page: the page 4529 * @nr_pages: number of regular pages (>1 for huge pages) 4530 * @from: mem_cgroup which the page is moved from. 4531 * @to: mem_cgroup which the page is moved to. @from != @to. 4532 * 4533 * The caller must confirm following. 4534 * - page is not on LRU (isolate_page() is useful.) 4535 * - compound_lock is held when nr_pages > 1 4536 * 4537 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4538 * from old cgroup. 4539 */ 4540 static int mem_cgroup_move_account(struct page *page, 4541 unsigned int nr_pages, 4542 struct mem_cgroup *from, 4543 struct mem_cgroup *to) 4544 { 4545 unsigned long flags; 4546 int ret; 4547 bool anon; 4548 4549 VM_BUG_ON(from == to); 4550 VM_BUG_ON_PAGE(PageLRU(page), page); 4551 /* 4552 * The page is isolated from LRU. So, collapse function 4553 * will not handle this page. But page splitting can happen. 4554 * Do this check under compound_page_lock(). The caller should 4555 * hold it. 4556 */ 4557 ret = -EBUSY; 4558 if (nr_pages > 1 && !PageTransHuge(page)) 4559 goto out; 4560 4561 /* 4562 * Prevent mem_cgroup_replace_page() from looking at 4563 * page->mem_cgroup of its source page while we change it. 4564 */ 4565 if (!trylock_page(page)) 4566 goto out; 4567 4568 ret = -EINVAL; 4569 if (page->mem_cgroup != from) 4570 goto out_unlock; 4571 4572 anon = PageAnon(page); 4573 4574 spin_lock_irqsave(&from->move_lock, flags); 4575 4576 if (!anon && page_mapped(page)) { 4577 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4578 nr_pages); 4579 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4580 nr_pages); 4581 } 4582 4583 /* 4584 * move_lock grabbed above and caller set from->moving_account, so 4585 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4586 * So mapping should be stable for dirty pages. 4587 */ 4588 if (!anon && PageDirty(page)) { 4589 struct address_space *mapping = page_mapping(page); 4590 4591 if (mapping_cap_account_dirty(mapping)) { 4592 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4593 nr_pages); 4594 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4595 nr_pages); 4596 } 4597 } 4598 4599 if (PageWriteback(page)) { 4600 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4601 nr_pages); 4602 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4603 nr_pages); 4604 } 4605 4606 /* 4607 * It is safe to change page->mem_cgroup here because the page 4608 * is referenced, charged, and isolated - we can't race with 4609 * uncharging, charging, migration, or LRU putback. 4610 */ 4611 4612 /* caller should have done css_get */ 4613 page->mem_cgroup = to; 4614 spin_unlock_irqrestore(&from->move_lock, flags); 4615 4616 ret = 0; 4617 4618 local_irq_disable(); 4619 mem_cgroup_charge_statistics(to, page, nr_pages); 4620 memcg_check_events(to, page); 4621 mem_cgroup_charge_statistics(from, page, -nr_pages); 4622 memcg_check_events(from, page); 4623 local_irq_enable(); 4624 out_unlock: 4625 unlock_page(page); 4626 out: 4627 return ret; 4628 } 4629 4630 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4631 unsigned long addr, pte_t ptent, union mc_target *target) 4632 { 4633 struct page *page = NULL; 4634 enum mc_target_type ret = MC_TARGET_NONE; 4635 swp_entry_t ent = { .val = 0 }; 4636 4637 if (pte_present(ptent)) 4638 page = mc_handle_present_pte(vma, addr, ptent); 4639 else if (is_swap_pte(ptent)) 4640 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 4641 else if (pte_none(ptent)) 4642 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4643 4644 if (!page && !ent.val) 4645 return ret; 4646 if (page) { 4647 /* 4648 * Do only loose check w/o serialization. 4649 * mem_cgroup_move_account() checks the page is valid or 4650 * not under LRU exclusion. 4651 */ 4652 if (page->mem_cgroup == mc.from) { 4653 ret = MC_TARGET_PAGE; 4654 if (target) 4655 target->page = page; 4656 } 4657 if (!ret || !target) 4658 put_page(page); 4659 } 4660 /* There is a swap entry and a page doesn't exist or isn't charged */ 4661 if (ent.val && !ret && 4662 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4663 ret = MC_TARGET_SWAP; 4664 if (target) 4665 target->ent = ent; 4666 } 4667 return ret; 4668 } 4669 4670 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4671 /* 4672 * We don't consider swapping or file mapped pages because THP does not 4673 * support them for now. 4674 * Caller should make sure that pmd_trans_huge(pmd) is true. 4675 */ 4676 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4677 unsigned long addr, pmd_t pmd, union mc_target *target) 4678 { 4679 struct page *page = NULL; 4680 enum mc_target_type ret = MC_TARGET_NONE; 4681 4682 page = pmd_page(pmd); 4683 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4684 if (!(mc.flags & MOVE_ANON)) 4685 return ret; 4686 if (page->mem_cgroup == mc.from) { 4687 ret = MC_TARGET_PAGE; 4688 if (target) { 4689 get_page(page); 4690 target->page = page; 4691 } 4692 } 4693 return ret; 4694 } 4695 #else 4696 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4697 unsigned long addr, pmd_t pmd, union mc_target *target) 4698 { 4699 return MC_TARGET_NONE; 4700 } 4701 #endif 4702 4703 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4704 unsigned long addr, unsigned long end, 4705 struct mm_walk *walk) 4706 { 4707 struct vm_area_struct *vma = walk->vma; 4708 pte_t *pte; 4709 spinlock_t *ptl; 4710 4711 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4712 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4713 mc.precharge += HPAGE_PMD_NR; 4714 spin_unlock(ptl); 4715 return 0; 4716 } 4717 4718 if (pmd_trans_unstable(pmd)) 4719 return 0; 4720 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4721 for (; addr != end; pte++, addr += PAGE_SIZE) 4722 if (get_mctgt_type(vma, addr, *pte, NULL)) 4723 mc.precharge++; /* increment precharge temporarily */ 4724 pte_unmap_unlock(pte - 1, ptl); 4725 cond_resched(); 4726 4727 return 0; 4728 } 4729 4730 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4731 { 4732 unsigned long precharge; 4733 4734 struct mm_walk mem_cgroup_count_precharge_walk = { 4735 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4736 .mm = mm, 4737 }; 4738 down_read(&mm->mmap_sem); 4739 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk); 4740 up_read(&mm->mmap_sem); 4741 4742 precharge = mc.precharge; 4743 mc.precharge = 0; 4744 4745 return precharge; 4746 } 4747 4748 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4749 { 4750 unsigned long precharge = mem_cgroup_count_precharge(mm); 4751 4752 VM_BUG_ON(mc.moving_task); 4753 mc.moving_task = current; 4754 return mem_cgroup_do_precharge(precharge); 4755 } 4756 4757 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4758 static void __mem_cgroup_clear_mc(void) 4759 { 4760 struct mem_cgroup *from = mc.from; 4761 struct mem_cgroup *to = mc.to; 4762 4763 /* we must uncharge all the leftover precharges from mc.to */ 4764 if (mc.precharge) { 4765 cancel_charge(mc.to, mc.precharge); 4766 mc.precharge = 0; 4767 } 4768 /* 4769 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4770 * we must uncharge here. 4771 */ 4772 if (mc.moved_charge) { 4773 cancel_charge(mc.from, mc.moved_charge); 4774 mc.moved_charge = 0; 4775 } 4776 /* we must fixup refcnts and charges */ 4777 if (mc.moved_swap) { 4778 /* uncharge swap account from the old cgroup */ 4779 if (!mem_cgroup_is_root(mc.from)) 4780 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4781 4782 /* 4783 * we charged both to->memory and to->memsw, so we 4784 * should uncharge to->memory. 4785 */ 4786 if (!mem_cgroup_is_root(mc.to)) 4787 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4788 4789 css_put_many(&mc.from->css, mc.moved_swap); 4790 4791 /* we've already done css_get(mc.to) */ 4792 mc.moved_swap = 0; 4793 } 4794 memcg_oom_recover(from); 4795 memcg_oom_recover(to); 4796 wake_up_all(&mc.waitq); 4797 } 4798 4799 static void mem_cgroup_clear_mc(void) 4800 { 4801 /* 4802 * we must clear moving_task before waking up waiters at the end of 4803 * task migration. 4804 */ 4805 mc.moving_task = NULL; 4806 __mem_cgroup_clear_mc(); 4807 spin_lock(&mc.lock); 4808 mc.from = NULL; 4809 mc.to = NULL; 4810 spin_unlock(&mc.lock); 4811 } 4812 4813 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4814 { 4815 struct cgroup_subsys_state *css; 4816 struct mem_cgroup *memcg; 4817 struct mem_cgroup *from; 4818 struct task_struct *leader, *p; 4819 struct mm_struct *mm; 4820 unsigned long move_flags; 4821 int ret = 0; 4822 4823 /* charge immigration isn't supported on the default hierarchy */ 4824 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4825 return 0; 4826 4827 /* 4828 * Multi-process migrations only happen on the default hierarchy 4829 * where charge immigration is not used. Perform charge 4830 * immigration if @tset contains a leader and whine if there are 4831 * multiple. 4832 */ 4833 p = NULL; 4834 cgroup_taskset_for_each_leader(leader, css, tset) { 4835 WARN_ON_ONCE(p); 4836 p = leader; 4837 memcg = mem_cgroup_from_css(css); 4838 } 4839 if (!p) 4840 return 0; 4841 4842 /* 4843 * We are now commited to this value whatever it is. Changes in this 4844 * tunable will only affect upcoming migrations, not the current one. 4845 * So we need to save it, and keep it going. 4846 */ 4847 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4848 if (!move_flags) 4849 return 0; 4850 4851 from = mem_cgroup_from_task(p); 4852 4853 VM_BUG_ON(from == memcg); 4854 4855 mm = get_task_mm(p); 4856 if (!mm) 4857 return 0; 4858 /* We move charges only when we move a owner of the mm */ 4859 if (mm->owner == p) { 4860 VM_BUG_ON(mc.from); 4861 VM_BUG_ON(mc.to); 4862 VM_BUG_ON(mc.precharge); 4863 VM_BUG_ON(mc.moved_charge); 4864 VM_BUG_ON(mc.moved_swap); 4865 4866 spin_lock(&mc.lock); 4867 mc.from = from; 4868 mc.to = memcg; 4869 mc.flags = move_flags; 4870 spin_unlock(&mc.lock); 4871 /* We set mc.moving_task later */ 4872 4873 ret = mem_cgroup_precharge_mc(mm); 4874 if (ret) 4875 mem_cgroup_clear_mc(); 4876 } 4877 mmput(mm); 4878 return ret; 4879 } 4880 4881 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4882 { 4883 if (mc.to) 4884 mem_cgroup_clear_mc(); 4885 } 4886 4887 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4888 unsigned long addr, unsigned long end, 4889 struct mm_walk *walk) 4890 { 4891 int ret = 0; 4892 struct vm_area_struct *vma = walk->vma; 4893 pte_t *pte; 4894 spinlock_t *ptl; 4895 enum mc_target_type target_type; 4896 union mc_target target; 4897 struct page *page; 4898 4899 /* 4900 * We don't take compound_lock() here but no race with splitting thp 4901 * happens because: 4902 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 4903 * under splitting, which means there's no concurrent thp split, 4904 * - if another thread runs into split_huge_page() just after we 4905 * entered this if-block, the thread must wait for page table lock 4906 * to be unlocked in __split_huge_page_splitting(), where the main 4907 * part of thp split is not executed yet. 4908 */ 4909 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4910 if (mc.precharge < HPAGE_PMD_NR) { 4911 spin_unlock(ptl); 4912 return 0; 4913 } 4914 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4915 if (target_type == MC_TARGET_PAGE) { 4916 page = target.page; 4917 if (!isolate_lru_page(page)) { 4918 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 4919 mc.from, mc.to)) { 4920 mc.precharge -= HPAGE_PMD_NR; 4921 mc.moved_charge += HPAGE_PMD_NR; 4922 } 4923 putback_lru_page(page); 4924 } 4925 put_page(page); 4926 } 4927 spin_unlock(ptl); 4928 return 0; 4929 } 4930 4931 if (pmd_trans_unstable(pmd)) 4932 return 0; 4933 retry: 4934 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4935 for (; addr != end; addr += PAGE_SIZE) { 4936 pte_t ptent = *(pte++); 4937 swp_entry_t ent; 4938 4939 if (!mc.precharge) 4940 break; 4941 4942 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4943 case MC_TARGET_PAGE: 4944 page = target.page; 4945 if (isolate_lru_page(page)) 4946 goto put; 4947 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { 4948 mc.precharge--; 4949 /* we uncharge from mc.from later. */ 4950 mc.moved_charge++; 4951 } 4952 putback_lru_page(page); 4953 put: /* get_mctgt_type() gets the page */ 4954 put_page(page); 4955 break; 4956 case MC_TARGET_SWAP: 4957 ent = target.ent; 4958 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4959 mc.precharge--; 4960 /* we fixup refcnts and charges later. */ 4961 mc.moved_swap++; 4962 } 4963 break; 4964 default: 4965 break; 4966 } 4967 } 4968 pte_unmap_unlock(pte - 1, ptl); 4969 cond_resched(); 4970 4971 if (addr != end) { 4972 /* 4973 * We have consumed all precharges we got in can_attach(). 4974 * We try charge one by one, but don't do any additional 4975 * charges to mc.to if we have failed in charge once in attach() 4976 * phase. 4977 */ 4978 ret = mem_cgroup_do_precharge(1); 4979 if (!ret) 4980 goto retry; 4981 } 4982 4983 return ret; 4984 } 4985 4986 static void mem_cgroup_move_charge(struct mm_struct *mm) 4987 { 4988 struct mm_walk mem_cgroup_move_charge_walk = { 4989 .pmd_entry = mem_cgroup_move_charge_pte_range, 4990 .mm = mm, 4991 }; 4992 4993 lru_add_drain_all(); 4994 /* 4995 * Signal mem_cgroup_begin_page_stat() to take the memcg's 4996 * move_lock while we're moving its pages to another memcg. 4997 * Then wait for already started RCU-only updates to finish. 4998 */ 4999 atomic_inc(&mc.from->moving_account); 5000 synchronize_rcu(); 5001 retry: 5002 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 5003 /* 5004 * Someone who are holding the mmap_sem might be waiting in 5005 * waitq. So we cancel all extra charges, wake up all waiters, 5006 * and retry. Because we cancel precharges, we might not be able 5007 * to move enough charges, but moving charge is a best-effort 5008 * feature anyway, so it wouldn't be a big problem. 5009 */ 5010 __mem_cgroup_clear_mc(); 5011 cond_resched(); 5012 goto retry; 5013 } 5014 /* 5015 * When we have consumed all precharges and failed in doing 5016 * additional charge, the page walk just aborts. 5017 */ 5018 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 5019 up_read(&mm->mmap_sem); 5020 atomic_dec(&mc.from->moving_account); 5021 } 5022 5023 static void mem_cgroup_move_task(struct cgroup_taskset *tset) 5024 { 5025 struct cgroup_subsys_state *css; 5026 struct task_struct *p = cgroup_taskset_first(tset, &css); 5027 struct mm_struct *mm = get_task_mm(p); 5028 5029 if (mm) { 5030 if (mc.to) 5031 mem_cgroup_move_charge(mm); 5032 mmput(mm); 5033 } 5034 if (mc.to) 5035 mem_cgroup_clear_mc(); 5036 } 5037 #else /* !CONFIG_MMU */ 5038 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5039 { 5040 return 0; 5041 } 5042 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5043 { 5044 } 5045 static void mem_cgroup_move_task(struct cgroup_taskset *tset) 5046 { 5047 } 5048 #endif 5049 5050 /* 5051 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5052 * to verify whether we're attached to the default hierarchy on each mount 5053 * attempt. 5054 */ 5055 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5056 { 5057 /* 5058 * use_hierarchy is forced on the default hierarchy. cgroup core 5059 * guarantees that @root doesn't have any children, so turning it 5060 * on for the root memcg is enough. 5061 */ 5062 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5063 root_mem_cgroup->use_hierarchy = true; 5064 else 5065 root_mem_cgroup->use_hierarchy = false; 5066 } 5067 5068 static u64 memory_current_read(struct cgroup_subsys_state *css, 5069 struct cftype *cft) 5070 { 5071 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5072 5073 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5074 } 5075 5076 static int memory_low_show(struct seq_file *m, void *v) 5077 { 5078 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5079 unsigned long low = READ_ONCE(memcg->low); 5080 5081 if (low == PAGE_COUNTER_MAX) 5082 seq_puts(m, "max\n"); 5083 else 5084 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5085 5086 return 0; 5087 } 5088 5089 static ssize_t memory_low_write(struct kernfs_open_file *of, 5090 char *buf, size_t nbytes, loff_t off) 5091 { 5092 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5093 unsigned long low; 5094 int err; 5095 5096 buf = strstrip(buf); 5097 err = page_counter_memparse(buf, "max", &low); 5098 if (err) 5099 return err; 5100 5101 memcg->low = low; 5102 5103 return nbytes; 5104 } 5105 5106 static int memory_high_show(struct seq_file *m, void *v) 5107 { 5108 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5109 unsigned long high = READ_ONCE(memcg->high); 5110 5111 if (high == PAGE_COUNTER_MAX) 5112 seq_puts(m, "max\n"); 5113 else 5114 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5115 5116 return 0; 5117 } 5118 5119 static ssize_t memory_high_write(struct kernfs_open_file *of, 5120 char *buf, size_t nbytes, loff_t off) 5121 { 5122 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5123 unsigned long high; 5124 int err; 5125 5126 buf = strstrip(buf); 5127 err = page_counter_memparse(buf, "max", &high); 5128 if (err) 5129 return err; 5130 5131 memcg->high = high; 5132 5133 memcg_wb_domain_size_changed(memcg); 5134 return nbytes; 5135 } 5136 5137 static int memory_max_show(struct seq_file *m, void *v) 5138 { 5139 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5140 unsigned long max = READ_ONCE(memcg->memory.limit); 5141 5142 if (max == PAGE_COUNTER_MAX) 5143 seq_puts(m, "max\n"); 5144 else 5145 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5146 5147 return 0; 5148 } 5149 5150 static ssize_t memory_max_write(struct kernfs_open_file *of, 5151 char *buf, size_t nbytes, loff_t off) 5152 { 5153 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5154 unsigned long max; 5155 int err; 5156 5157 buf = strstrip(buf); 5158 err = page_counter_memparse(buf, "max", &max); 5159 if (err) 5160 return err; 5161 5162 err = mem_cgroup_resize_limit(memcg, max); 5163 if (err) 5164 return err; 5165 5166 memcg_wb_domain_size_changed(memcg); 5167 return nbytes; 5168 } 5169 5170 static int memory_events_show(struct seq_file *m, void *v) 5171 { 5172 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5173 5174 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5175 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5176 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5177 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5178 5179 return 0; 5180 } 5181 5182 static struct cftype memory_files[] = { 5183 { 5184 .name = "current", 5185 .flags = CFTYPE_NOT_ON_ROOT, 5186 .read_u64 = memory_current_read, 5187 }, 5188 { 5189 .name = "low", 5190 .flags = CFTYPE_NOT_ON_ROOT, 5191 .seq_show = memory_low_show, 5192 .write = memory_low_write, 5193 }, 5194 { 5195 .name = "high", 5196 .flags = CFTYPE_NOT_ON_ROOT, 5197 .seq_show = memory_high_show, 5198 .write = memory_high_write, 5199 }, 5200 { 5201 .name = "max", 5202 .flags = CFTYPE_NOT_ON_ROOT, 5203 .seq_show = memory_max_show, 5204 .write = memory_max_write, 5205 }, 5206 { 5207 .name = "events", 5208 .flags = CFTYPE_NOT_ON_ROOT, 5209 .file_offset = offsetof(struct mem_cgroup, events_file), 5210 .seq_show = memory_events_show, 5211 }, 5212 { } /* terminate */ 5213 }; 5214 5215 struct cgroup_subsys memory_cgrp_subsys = { 5216 .css_alloc = mem_cgroup_css_alloc, 5217 .css_online = mem_cgroup_css_online, 5218 .css_offline = mem_cgroup_css_offline, 5219 .css_released = mem_cgroup_css_released, 5220 .css_free = mem_cgroup_css_free, 5221 .css_reset = mem_cgroup_css_reset, 5222 .can_attach = mem_cgroup_can_attach, 5223 .cancel_attach = mem_cgroup_cancel_attach, 5224 .attach = mem_cgroup_move_task, 5225 .bind = mem_cgroup_bind, 5226 .dfl_cftypes = memory_files, 5227 .legacy_cftypes = mem_cgroup_legacy_files, 5228 .early_init = 0, 5229 }; 5230 5231 /** 5232 * mem_cgroup_low - check if memory consumption is below the normal range 5233 * @root: the highest ancestor to consider 5234 * @memcg: the memory cgroup to check 5235 * 5236 * Returns %true if memory consumption of @memcg, and that of all 5237 * configurable ancestors up to @root, is below the normal range. 5238 */ 5239 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5240 { 5241 if (mem_cgroup_disabled()) 5242 return false; 5243 5244 /* 5245 * The toplevel group doesn't have a configurable range, so 5246 * it's never low when looked at directly, and it is not 5247 * considered an ancestor when assessing the hierarchy. 5248 */ 5249 5250 if (memcg == root_mem_cgroup) 5251 return false; 5252 5253 if (page_counter_read(&memcg->memory) >= memcg->low) 5254 return false; 5255 5256 while (memcg != root) { 5257 memcg = parent_mem_cgroup(memcg); 5258 5259 if (memcg == root_mem_cgroup) 5260 break; 5261 5262 if (page_counter_read(&memcg->memory) >= memcg->low) 5263 return false; 5264 } 5265 return true; 5266 } 5267 5268 /** 5269 * mem_cgroup_try_charge - try charging a page 5270 * @page: page to charge 5271 * @mm: mm context of the victim 5272 * @gfp_mask: reclaim mode 5273 * @memcgp: charged memcg return 5274 * 5275 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5276 * pages according to @gfp_mask if necessary. 5277 * 5278 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5279 * Otherwise, an error code is returned. 5280 * 5281 * After page->mapping has been set up, the caller must finalize the 5282 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5283 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5284 */ 5285 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5286 gfp_t gfp_mask, struct mem_cgroup **memcgp) 5287 { 5288 struct mem_cgroup *memcg = NULL; 5289 unsigned int nr_pages = 1; 5290 int ret = 0; 5291 5292 if (mem_cgroup_disabled()) 5293 goto out; 5294 5295 if (PageSwapCache(page)) { 5296 /* 5297 * Every swap fault against a single page tries to charge the 5298 * page, bail as early as possible. shmem_unuse() encounters 5299 * already charged pages, too. The USED bit is protected by 5300 * the page lock, which serializes swap cache removal, which 5301 * in turn serializes uncharging. 5302 */ 5303 VM_BUG_ON_PAGE(!PageLocked(page), page); 5304 if (page->mem_cgroup) 5305 goto out; 5306 5307 if (do_swap_account) { 5308 swp_entry_t ent = { .val = page_private(page), }; 5309 unsigned short id = lookup_swap_cgroup_id(ent); 5310 5311 rcu_read_lock(); 5312 memcg = mem_cgroup_from_id(id); 5313 if (memcg && !css_tryget_online(&memcg->css)) 5314 memcg = NULL; 5315 rcu_read_unlock(); 5316 } 5317 } 5318 5319 if (PageTransHuge(page)) { 5320 nr_pages <<= compound_order(page); 5321 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5322 } 5323 5324 if (!memcg) 5325 memcg = get_mem_cgroup_from_mm(mm); 5326 5327 ret = try_charge(memcg, gfp_mask, nr_pages); 5328 5329 css_put(&memcg->css); 5330 out: 5331 *memcgp = memcg; 5332 return ret; 5333 } 5334 5335 /** 5336 * mem_cgroup_commit_charge - commit a page charge 5337 * @page: page to charge 5338 * @memcg: memcg to charge the page to 5339 * @lrucare: page might be on LRU already 5340 * 5341 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5342 * after page->mapping has been set up. This must happen atomically 5343 * as part of the page instantiation, i.e. under the page table lock 5344 * for anonymous pages, under the page lock for page and swap cache. 5345 * 5346 * In addition, the page must not be on the LRU during the commit, to 5347 * prevent racing with task migration. If it might be, use @lrucare. 5348 * 5349 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5350 */ 5351 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5352 bool lrucare) 5353 { 5354 unsigned int nr_pages = 1; 5355 5356 VM_BUG_ON_PAGE(!page->mapping, page); 5357 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5358 5359 if (mem_cgroup_disabled()) 5360 return; 5361 /* 5362 * Swap faults will attempt to charge the same page multiple 5363 * times. But reuse_swap_page() might have removed the page 5364 * from swapcache already, so we can't check PageSwapCache(). 5365 */ 5366 if (!memcg) 5367 return; 5368 5369 commit_charge(page, memcg, lrucare); 5370 5371 if (PageTransHuge(page)) { 5372 nr_pages <<= compound_order(page); 5373 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5374 } 5375 5376 local_irq_disable(); 5377 mem_cgroup_charge_statistics(memcg, page, nr_pages); 5378 memcg_check_events(memcg, page); 5379 local_irq_enable(); 5380 5381 if (do_swap_account && PageSwapCache(page)) { 5382 swp_entry_t entry = { .val = page_private(page) }; 5383 /* 5384 * The swap entry might not get freed for a long time, 5385 * let's not wait for it. The page already received a 5386 * memory+swap charge, drop the swap entry duplicate. 5387 */ 5388 mem_cgroup_uncharge_swap(entry); 5389 } 5390 } 5391 5392 /** 5393 * mem_cgroup_cancel_charge - cancel a page charge 5394 * @page: page to charge 5395 * @memcg: memcg to charge the page to 5396 * 5397 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5398 */ 5399 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 5400 { 5401 unsigned int nr_pages = 1; 5402 5403 if (mem_cgroup_disabled()) 5404 return; 5405 /* 5406 * Swap faults will attempt to charge the same page multiple 5407 * times. But reuse_swap_page() might have removed the page 5408 * from swapcache already, so we can't check PageSwapCache(). 5409 */ 5410 if (!memcg) 5411 return; 5412 5413 if (PageTransHuge(page)) { 5414 nr_pages <<= compound_order(page); 5415 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5416 } 5417 5418 cancel_charge(memcg, nr_pages); 5419 } 5420 5421 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5422 unsigned long nr_anon, unsigned long nr_file, 5423 unsigned long nr_huge, struct page *dummy_page) 5424 { 5425 unsigned long nr_pages = nr_anon + nr_file; 5426 unsigned long flags; 5427 5428 if (!mem_cgroup_is_root(memcg)) { 5429 page_counter_uncharge(&memcg->memory, nr_pages); 5430 if (do_swap_account) 5431 page_counter_uncharge(&memcg->memsw, nr_pages); 5432 memcg_oom_recover(memcg); 5433 } 5434 5435 local_irq_save(flags); 5436 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5437 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5438 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5439 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5440 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5441 memcg_check_events(memcg, dummy_page); 5442 local_irq_restore(flags); 5443 5444 if (!mem_cgroup_is_root(memcg)) 5445 css_put_many(&memcg->css, nr_pages); 5446 } 5447 5448 static void uncharge_list(struct list_head *page_list) 5449 { 5450 struct mem_cgroup *memcg = NULL; 5451 unsigned long nr_anon = 0; 5452 unsigned long nr_file = 0; 5453 unsigned long nr_huge = 0; 5454 unsigned long pgpgout = 0; 5455 struct list_head *next; 5456 struct page *page; 5457 5458 next = page_list->next; 5459 do { 5460 unsigned int nr_pages = 1; 5461 5462 page = list_entry(next, struct page, lru); 5463 next = page->lru.next; 5464 5465 VM_BUG_ON_PAGE(PageLRU(page), page); 5466 VM_BUG_ON_PAGE(page_count(page), page); 5467 5468 if (!page->mem_cgroup) 5469 continue; 5470 5471 /* 5472 * Nobody should be changing or seriously looking at 5473 * page->mem_cgroup at this point, we have fully 5474 * exclusive access to the page. 5475 */ 5476 5477 if (memcg != page->mem_cgroup) { 5478 if (memcg) { 5479 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5480 nr_huge, page); 5481 pgpgout = nr_anon = nr_file = nr_huge = 0; 5482 } 5483 memcg = page->mem_cgroup; 5484 } 5485 5486 if (PageTransHuge(page)) { 5487 nr_pages <<= compound_order(page); 5488 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5489 nr_huge += nr_pages; 5490 } 5491 5492 if (PageAnon(page)) 5493 nr_anon += nr_pages; 5494 else 5495 nr_file += nr_pages; 5496 5497 page->mem_cgroup = NULL; 5498 5499 pgpgout++; 5500 } while (next != page_list); 5501 5502 if (memcg) 5503 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5504 nr_huge, page); 5505 } 5506 5507 /** 5508 * mem_cgroup_uncharge - uncharge a page 5509 * @page: page to uncharge 5510 * 5511 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5512 * mem_cgroup_commit_charge(). 5513 */ 5514 void mem_cgroup_uncharge(struct page *page) 5515 { 5516 if (mem_cgroup_disabled()) 5517 return; 5518 5519 /* Don't touch page->lru of any random page, pre-check: */ 5520 if (!page->mem_cgroup) 5521 return; 5522 5523 INIT_LIST_HEAD(&page->lru); 5524 uncharge_list(&page->lru); 5525 } 5526 5527 /** 5528 * mem_cgroup_uncharge_list - uncharge a list of page 5529 * @page_list: list of pages to uncharge 5530 * 5531 * Uncharge a list of pages previously charged with 5532 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5533 */ 5534 void mem_cgroup_uncharge_list(struct list_head *page_list) 5535 { 5536 if (mem_cgroup_disabled()) 5537 return; 5538 5539 if (!list_empty(page_list)) 5540 uncharge_list(page_list); 5541 } 5542 5543 /** 5544 * mem_cgroup_replace_page - migrate a charge to another page 5545 * @oldpage: currently charged page 5546 * @newpage: page to transfer the charge to 5547 * 5548 * Migrate the charge from @oldpage to @newpage. 5549 * 5550 * Both pages must be locked, @newpage->mapping must be set up. 5551 * Either or both pages might be on the LRU already. 5552 */ 5553 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5554 { 5555 struct mem_cgroup *memcg; 5556 int isolated; 5557 5558 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5559 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5560 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5561 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5562 newpage); 5563 5564 if (mem_cgroup_disabled()) 5565 return; 5566 5567 /* Page cache replacement: new page already charged? */ 5568 if (newpage->mem_cgroup) 5569 return; 5570 5571 /* Swapcache readahead pages can get replaced before being charged */ 5572 memcg = oldpage->mem_cgroup; 5573 if (!memcg) 5574 return; 5575 5576 lock_page_lru(oldpage, &isolated); 5577 oldpage->mem_cgroup = NULL; 5578 unlock_page_lru(oldpage, isolated); 5579 5580 commit_charge(newpage, memcg, true); 5581 } 5582 5583 /* 5584 * subsys_initcall() for memory controller. 5585 * 5586 * Some parts like hotcpu_notifier() have to be initialized from this context 5587 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5588 * everything that doesn't depend on a specific mem_cgroup structure should 5589 * be initialized from here. 5590 */ 5591 static int __init mem_cgroup_init(void) 5592 { 5593 int cpu, node; 5594 5595 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5596 5597 for_each_possible_cpu(cpu) 5598 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5599 drain_local_stock); 5600 5601 for_each_node(node) { 5602 struct mem_cgroup_tree_per_node *rtpn; 5603 int zone; 5604 5605 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5606 node_online(node) ? node : NUMA_NO_NODE); 5607 5608 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5609 struct mem_cgroup_tree_per_zone *rtpz; 5610 5611 rtpz = &rtpn->rb_tree_per_zone[zone]; 5612 rtpz->rb_root = RB_ROOT; 5613 spin_lock_init(&rtpz->lock); 5614 } 5615 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5616 } 5617 5618 return 0; 5619 } 5620 subsys_initcall(mem_cgroup_init); 5621 5622 #ifdef CONFIG_MEMCG_SWAP 5623 /** 5624 * mem_cgroup_swapout - transfer a memsw charge to swap 5625 * @page: page whose memsw charge to transfer 5626 * @entry: swap entry to move the charge to 5627 * 5628 * Transfer the memsw charge of @page to @entry. 5629 */ 5630 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5631 { 5632 struct mem_cgroup *memcg; 5633 unsigned short oldid; 5634 5635 VM_BUG_ON_PAGE(PageLRU(page), page); 5636 VM_BUG_ON_PAGE(page_count(page), page); 5637 5638 if (!do_swap_account) 5639 return; 5640 5641 memcg = page->mem_cgroup; 5642 5643 /* Readahead page, never charged */ 5644 if (!memcg) 5645 return; 5646 5647 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5648 VM_BUG_ON_PAGE(oldid, page); 5649 mem_cgroup_swap_statistics(memcg, true); 5650 5651 page->mem_cgroup = NULL; 5652 5653 if (!mem_cgroup_is_root(memcg)) 5654 page_counter_uncharge(&memcg->memory, 1); 5655 5656 /* 5657 * Interrupts should be disabled here because the caller holds the 5658 * mapping->tree_lock lock which is taken with interrupts-off. It is 5659 * important here to have the interrupts disabled because it is the 5660 * only synchronisation we have for udpating the per-CPU variables. 5661 */ 5662 VM_BUG_ON(!irqs_disabled()); 5663 mem_cgroup_charge_statistics(memcg, page, -1); 5664 memcg_check_events(memcg, page); 5665 } 5666 5667 /** 5668 * mem_cgroup_uncharge_swap - uncharge a swap entry 5669 * @entry: swap entry to uncharge 5670 * 5671 * Drop the memsw charge associated with @entry. 5672 */ 5673 void mem_cgroup_uncharge_swap(swp_entry_t entry) 5674 { 5675 struct mem_cgroup *memcg; 5676 unsigned short id; 5677 5678 if (!do_swap_account) 5679 return; 5680 5681 id = swap_cgroup_record(entry, 0); 5682 rcu_read_lock(); 5683 memcg = mem_cgroup_from_id(id); 5684 if (memcg) { 5685 if (!mem_cgroup_is_root(memcg)) 5686 page_counter_uncharge(&memcg->memsw, 1); 5687 mem_cgroup_swap_statistics(memcg, false); 5688 css_put(&memcg->css); 5689 } 5690 rcu_read_unlock(); 5691 } 5692 5693 /* for remember boot option*/ 5694 #ifdef CONFIG_MEMCG_SWAP_ENABLED 5695 static int really_do_swap_account __initdata = 1; 5696 #else 5697 static int really_do_swap_account __initdata; 5698 #endif 5699 5700 static int __init enable_swap_account(char *s) 5701 { 5702 if (!strcmp(s, "1")) 5703 really_do_swap_account = 1; 5704 else if (!strcmp(s, "0")) 5705 really_do_swap_account = 0; 5706 return 1; 5707 } 5708 __setup("swapaccount=", enable_swap_account); 5709 5710 static struct cftype memsw_cgroup_files[] = { 5711 { 5712 .name = "memsw.usage_in_bytes", 5713 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 5714 .read_u64 = mem_cgroup_read_u64, 5715 }, 5716 { 5717 .name = "memsw.max_usage_in_bytes", 5718 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 5719 .write = mem_cgroup_reset, 5720 .read_u64 = mem_cgroup_read_u64, 5721 }, 5722 { 5723 .name = "memsw.limit_in_bytes", 5724 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 5725 .write = mem_cgroup_write, 5726 .read_u64 = mem_cgroup_read_u64, 5727 }, 5728 { 5729 .name = "memsw.failcnt", 5730 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 5731 .write = mem_cgroup_reset, 5732 .read_u64 = mem_cgroup_read_u64, 5733 }, 5734 { }, /* terminate */ 5735 }; 5736 5737 static int __init mem_cgroup_swap_init(void) 5738 { 5739 if (!mem_cgroup_disabled() && really_do_swap_account) { 5740 do_swap_account = 1; 5741 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 5742 memsw_cgroup_files)); 5743 } 5744 return 0; 5745 } 5746 subsys_initcall(mem_cgroup_swap_init); 5747 5748 #endif /* CONFIG_MEMCG_SWAP */ 5749