1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/sched/mm.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/hugetlb.h> 41 #include <linux/pagemap.h> 42 #include <linux/smp.h> 43 #include <linux/page-flags.h> 44 #include <linux/backing-dev.h> 45 #include <linux/bit_spinlock.h> 46 #include <linux/rcupdate.h> 47 #include <linux/limits.h> 48 #include <linux/export.h> 49 #include <linux/mutex.h> 50 #include <linux/rbtree.h> 51 #include <linux/slab.h> 52 #include <linux/swap.h> 53 #include <linux/swapops.h> 54 #include <linux/spinlock.h> 55 #include <linux/eventfd.h> 56 #include <linux/poll.h> 57 #include <linux/sort.h> 58 #include <linux/fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/vmpressure.h> 61 #include <linux/mm_inline.h> 62 #include <linux/swap_cgroup.h> 63 #include <linux/cpu.h> 64 #include <linux/oom.h> 65 #include <linux/lockdep.h> 66 #include <linux/file.h> 67 #include <linux/tracehook.h> 68 #include "internal.h" 69 #include <net/sock.h> 70 #include <net/ip.h> 71 #include "slab.h" 72 73 #include <linux/uaccess.h> 74 75 #include <trace/events/vmscan.h> 76 77 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 78 EXPORT_SYMBOL(memory_cgrp_subsys); 79 80 struct mem_cgroup *root_mem_cgroup __read_mostly; 81 82 #define MEM_CGROUP_RECLAIM_RETRIES 5 83 84 /* Socket memory accounting disabled? */ 85 static bool cgroup_memory_nosocket; 86 87 /* Kernel memory accounting disabled? */ 88 static bool cgroup_memory_nokmem; 89 90 /* Whether the swap controller is active */ 91 #ifdef CONFIG_MEMCG_SWAP 92 int do_swap_account __read_mostly; 93 #else 94 #define do_swap_account 0 95 #endif 96 97 /* Whether legacy memory+swap accounting is active */ 98 static bool do_memsw_account(void) 99 { 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 101 } 102 103 static const char *const mem_cgroup_lru_names[] = { 104 "inactive_anon", 105 "active_anon", 106 "inactive_file", 107 "active_file", 108 "unevictable", 109 }; 110 111 #define THRESHOLDS_EVENTS_TARGET 128 112 #define SOFTLIMIT_EVENTS_TARGET 1024 113 #define NUMAINFO_EVENTS_TARGET 1024 114 115 /* 116 * Cgroups above their limits are maintained in a RB-Tree, independent of 117 * their hierarchy representation 118 */ 119 120 struct mem_cgroup_tree_per_node { 121 struct rb_root rb_root; 122 struct rb_node *rb_rightmost; 123 spinlock_t lock; 124 }; 125 126 struct mem_cgroup_tree { 127 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 128 }; 129 130 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 131 132 /* for OOM */ 133 struct mem_cgroup_eventfd_list { 134 struct list_head list; 135 struct eventfd_ctx *eventfd; 136 }; 137 138 /* 139 * cgroup_event represents events which userspace want to receive. 140 */ 141 struct mem_cgroup_event { 142 /* 143 * memcg which the event belongs to. 144 */ 145 struct mem_cgroup *memcg; 146 /* 147 * eventfd to signal userspace about the event. 148 */ 149 struct eventfd_ctx *eventfd; 150 /* 151 * Each of these stored in a list by the cgroup. 152 */ 153 struct list_head list; 154 /* 155 * register_event() callback will be used to add new userspace 156 * waiter for changes related to this event. Use eventfd_signal() 157 * on eventfd to send notification to userspace. 158 */ 159 int (*register_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd, const char *args); 161 /* 162 * unregister_event() callback will be called when userspace closes 163 * the eventfd or on cgroup removing. This callback must be set, 164 * if you want provide notification functionality. 165 */ 166 void (*unregister_event)(struct mem_cgroup *memcg, 167 struct eventfd_ctx *eventfd); 168 /* 169 * All fields below needed to unregister event when 170 * userspace closes eventfd. 171 */ 172 poll_table pt; 173 wait_queue_head_t *wqh; 174 wait_queue_entry_t wait; 175 struct work_struct remove; 176 }; 177 178 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 179 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 180 181 /* Stuffs for move charges at task migration. */ 182 /* 183 * Types of charges to be moved. 184 */ 185 #define MOVE_ANON 0x1U 186 #define MOVE_FILE 0x2U 187 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 188 189 /* "mc" and its members are protected by cgroup_mutex */ 190 static struct move_charge_struct { 191 spinlock_t lock; /* for from, to */ 192 struct mm_struct *mm; 193 struct mem_cgroup *from; 194 struct mem_cgroup *to; 195 unsigned long flags; 196 unsigned long precharge; 197 unsigned long moved_charge; 198 unsigned long moved_swap; 199 struct task_struct *moving_task; /* a task moving charges */ 200 wait_queue_head_t waitq; /* a waitq for other context */ 201 } mc = { 202 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 203 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 204 }; 205 206 /* 207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 208 * limit reclaim to prevent infinite loops, if they ever occur. 209 */ 210 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 211 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 212 213 enum charge_type { 214 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 215 MEM_CGROUP_CHARGE_TYPE_ANON, 216 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 217 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 218 NR_CHARGE_TYPE, 219 }; 220 221 /* for encoding cft->private value on file */ 222 enum res_type { 223 _MEM, 224 _MEMSWAP, 225 _OOM_TYPE, 226 _KMEM, 227 _TCP, 228 }; 229 230 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 231 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 232 #define MEMFILE_ATTR(val) ((val) & 0xffff) 233 /* Used for OOM nofiier */ 234 #define OOM_CONTROL (0) 235 236 /* Some nice accessors for the vmpressure. */ 237 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 238 { 239 if (!memcg) 240 memcg = root_mem_cgroup; 241 return &memcg->vmpressure; 242 } 243 244 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 245 { 246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 247 } 248 249 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 250 { 251 return (memcg == root_mem_cgroup); 252 } 253 254 #ifndef CONFIG_SLOB 255 /* 256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 257 * The main reason for not using cgroup id for this: 258 * this works better in sparse environments, where we have a lot of memcgs, 259 * but only a few kmem-limited. Or also, if we have, for instance, 200 260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 261 * 200 entry array for that. 262 * 263 * The current size of the caches array is stored in memcg_nr_cache_ids. It 264 * will double each time we have to increase it. 265 */ 266 static DEFINE_IDA(memcg_cache_ida); 267 int memcg_nr_cache_ids; 268 269 /* Protects memcg_nr_cache_ids */ 270 static DECLARE_RWSEM(memcg_cache_ids_sem); 271 272 void memcg_get_cache_ids(void) 273 { 274 down_read(&memcg_cache_ids_sem); 275 } 276 277 void memcg_put_cache_ids(void) 278 { 279 up_read(&memcg_cache_ids_sem); 280 } 281 282 /* 283 * MIN_SIZE is different than 1, because we would like to avoid going through 284 * the alloc/free process all the time. In a small machine, 4 kmem-limited 285 * cgroups is a reasonable guess. In the future, it could be a parameter or 286 * tunable, but that is strictly not necessary. 287 * 288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 289 * this constant directly from cgroup, but it is understandable that this is 290 * better kept as an internal representation in cgroup.c. In any case, the 291 * cgrp_id space is not getting any smaller, and we don't have to necessarily 292 * increase ours as well if it increases. 293 */ 294 #define MEMCG_CACHES_MIN_SIZE 4 295 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 296 297 /* 298 * A lot of the calls to the cache allocation functions are expected to be 299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 300 * conditional to this static branch, we'll have to allow modules that does 301 * kmem_cache_alloc and the such to see this symbol as well 302 */ 303 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 304 EXPORT_SYMBOL(memcg_kmem_enabled_key); 305 306 struct workqueue_struct *memcg_kmem_cache_wq; 307 308 #endif /* !CONFIG_SLOB */ 309 310 /** 311 * mem_cgroup_css_from_page - css of the memcg associated with a page 312 * @page: page of interest 313 * 314 * If memcg is bound to the default hierarchy, css of the memcg associated 315 * with @page is returned. The returned css remains associated with @page 316 * until it is released. 317 * 318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 319 * is returned. 320 */ 321 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 322 { 323 struct mem_cgroup *memcg; 324 325 memcg = page->mem_cgroup; 326 327 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 328 memcg = root_mem_cgroup; 329 330 return &memcg->css; 331 } 332 333 /** 334 * page_cgroup_ino - return inode number of the memcg a page is charged to 335 * @page: the page 336 * 337 * Look up the closest online ancestor of the memory cgroup @page is charged to 338 * and return its inode number or 0 if @page is not charged to any cgroup. It 339 * is safe to call this function without holding a reference to @page. 340 * 341 * Note, this function is inherently racy, because there is nothing to prevent 342 * the cgroup inode from getting torn down and potentially reallocated a moment 343 * after page_cgroup_ino() returns, so it only should be used by callers that 344 * do not care (such as procfs interfaces). 345 */ 346 ino_t page_cgroup_ino(struct page *page) 347 { 348 struct mem_cgroup *memcg; 349 unsigned long ino = 0; 350 351 rcu_read_lock(); 352 memcg = READ_ONCE(page->mem_cgroup); 353 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 354 memcg = parent_mem_cgroup(memcg); 355 if (memcg) 356 ino = cgroup_ino(memcg->css.cgroup); 357 rcu_read_unlock(); 358 return ino; 359 } 360 361 static struct mem_cgroup_per_node * 362 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 363 { 364 int nid = page_to_nid(page); 365 366 return memcg->nodeinfo[nid]; 367 } 368 369 static struct mem_cgroup_tree_per_node * 370 soft_limit_tree_node(int nid) 371 { 372 return soft_limit_tree.rb_tree_per_node[nid]; 373 } 374 375 static struct mem_cgroup_tree_per_node * 376 soft_limit_tree_from_page(struct page *page) 377 { 378 int nid = page_to_nid(page); 379 380 return soft_limit_tree.rb_tree_per_node[nid]; 381 } 382 383 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 384 struct mem_cgroup_tree_per_node *mctz, 385 unsigned long new_usage_in_excess) 386 { 387 struct rb_node **p = &mctz->rb_root.rb_node; 388 struct rb_node *parent = NULL; 389 struct mem_cgroup_per_node *mz_node; 390 bool rightmost = true; 391 392 if (mz->on_tree) 393 return; 394 395 mz->usage_in_excess = new_usage_in_excess; 396 if (!mz->usage_in_excess) 397 return; 398 while (*p) { 399 parent = *p; 400 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 401 tree_node); 402 if (mz->usage_in_excess < mz_node->usage_in_excess) { 403 p = &(*p)->rb_left; 404 rightmost = false; 405 } 406 407 /* 408 * We can't avoid mem cgroups that are over their soft 409 * limit by the same amount 410 */ 411 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 412 p = &(*p)->rb_right; 413 } 414 415 if (rightmost) 416 mctz->rb_rightmost = &mz->tree_node; 417 418 rb_link_node(&mz->tree_node, parent, p); 419 rb_insert_color(&mz->tree_node, &mctz->rb_root); 420 mz->on_tree = true; 421 } 422 423 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 424 struct mem_cgroup_tree_per_node *mctz) 425 { 426 if (!mz->on_tree) 427 return; 428 429 if (&mz->tree_node == mctz->rb_rightmost) 430 mctz->rb_rightmost = rb_prev(&mz->tree_node); 431 432 rb_erase(&mz->tree_node, &mctz->rb_root); 433 mz->on_tree = false; 434 } 435 436 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 437 struct mem_cgroup_tree_per_node *mctz) 438 { 439 unsigned long flags; 440 441 spin_lock_irqsave(&mctz->lock, flags); 442 __mem_cgroup_remove_exceeded(mz, mctz); 443 spin_unlock_irqrestore(&mctz->lock, flags); 444 } 445 446 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 447 { 448 unsigned long nr_pages = page_counter_read(&memcg->memory); 449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 450 unsigned long excess = 0; 451 452 if (nr_pages > soft_limit) 453 excess = nr_pages - soft_limit; 454 455 return excess; 456 } 457 458 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 459 { 460 unsigned long excess; 461 struct mem_cgroup_per_node *mz; 462 struct mem_cgroup_tree_per_node *mctz; 463 464 mctz = soft_limit_tree_from_page(page); 465 if (!mctz) 466 return; 467 /* 468 * Necessary to update all ancestors when hierarchy is used. 469 * because their event counter is not touched. 470 */ 471 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 472 mz = mem_cgroup_page_nodeinfo(memcg, page); 473 excess = soft_limit_excess(memcg); 474 /* 475 * We have to update the tree if mz is on RB-tree or 476 * mem is over its softlimit. 477 */ 478 if (excess || mz->on_tree) { 479 unsigned long flags; 480 481 spin_lock_irqsave(&mctz->lock, flags); 482 /* if on-tree, remove it */ 483 if (mz->on_tree) 484 __mem_cgroup_remove_exceeded(mz, mctz); 485 /* 486 * Insert again. mz->usage_in_excess will be updated. 487 * If excess is 0, no tree ops. 488 */ 489 __mem_cgroup_insert_exceeded(mz, mctz, excess); 490 spin_unlock_irqrestore(&mctz->lock, flags); 491 } 492 } 493 } 494 495 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 496 { 497 struct mem_cgroup_tree_per_node *mctz; 498 struct mem_cgroup_per_node *mz; 499 int nid; 500 501 for_each_node(nid) { 502 mz = mem_cgroup_nodeinfo(memcg, nid); 503 mctz = soft_limit_tree_node(nid); 504 if (mctz) 505 mem_cgroup_remove_exceeded(mz, mctz); 506 } 507 } 508 509 static struct mem_cgroup_per_node * 510 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 511 { 512 struct mem_cgroup_per_node *mz; 513 514 retry: 515 mz = NULL; 516 if (!mctz->rb_rightmost) 517 goto done; /* Nothing to reclaim from */ 518 519 mz = rb_entry(mctz->rb_rightmost, 520 struct mem_cgroup_per_node, tree_node); 521 /* 522 * Remove the node now but someone else can add it back, 523 * we will to add it back at the end of reclaim to its correct 524 * position in the tree. 525 */ 526 __mem_cgroup_remove_exceeded(mz, mctz); 527 if (!soft_limit_excess(mz->memcg) || 528 !css_tryget_online(&mz->memcg->css)) 529 goto retry; 530 done: 531 return mz; 532 } 533 534 static struct mem_cgroup_per_node * 535 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 536 { 537 struct mem_cgroup_per_node *mz; 538 539 spin_lock_irq(&mctz->lock); 540 mz = __mem_cgroup_largest_soft_limit_node(mctz); 541 spin_unlock_irq(&mctz->lock); 542 return mz; 543 } 544 545 static unsigned long memcg_sum_events(struct mem_cgroup *memcg, 546 int event) 547 { 548 return atomic_long_read(&memcg->events[event]); 549 } 550 551 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 552 struct page *page, 553 bool compound, int nr_pages) 554 { 555 /* 556 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 557 * counted as CACHE even if it's on ANON LRU. 558 */ 559 if (PageAnon(page)) 560 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); 561 else { 562 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); 563 if (PageSwapBacked(page)) 564 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); 565 } 566 567 if (compound) { 568 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 569 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); 570 } 571 572 /* pagein of a big page is an event. So, ignore page size */ 573 if (nr_pages > 0) 574 __count_memcg_events(memcg, PGPGIN, 1); 575 else { 576 __count_memcg_events(memcg, PGPGOUT, 1); 577 nr_pages = -nr_pages; /* for event */ 578 } 579 580 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); 581 } 582 583 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 584 int nid, unsigned int lru_mask) 585 { 586 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 587 unsigned long nr = 0; 588 enum lru_list lru; 589 590 VM_BUG_ON((unsigned)nid >= nr_node_ids); 591 592 for_each_lru(lru) { 593 if (!(BIT(lru) & lru_mask)) 594 continue; 595 nr += mem_cgroup_get_lru_size(lruvec, lru); 596 } 597 return nr; 598 } 599 600 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 601 unsigned int lru_mask) 602 { 603 unsigned long nr = 0; 604 int nid; 605 606 for_each_node_state(nid, N_MEMORY) 607 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 608 return nr; 609 } 610 611 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 612 enum mem_cgroup_events_target target) 613 { 614 unsigned long val, next; 615 616 val = __this_cpu_read(memcg->stat_cpu->nr_page_events); 617 next = __this_cpu_read(memcg->stat_cpu->targets[target]); 618 /* from time_after() in jiffies.h */ 619 if ((long)(next - val) < 0) { 620 switch (target) { 621 case MEM_CGROUP_TARGET_THRESH: 622 next = val + THRESHOLDS_EVENTS_TARGET; 623 break; 624 case MEM_CGROUP_TARGET_SOFTLIMIT: 625 next = val + SOFTLIMIT_EVENTS_TARGET; 626 break; 627 case MEM_CGROUP_TARGET_NUMAINFO: 628 next = val + NUMAINFO_EVENTS_TARGET; 629 break; 630 default: 631 break; 632 } 633 __this_cpu_write(memcg->stat_cpu->targets[target], next); 634 return true; 635 } 636 return false; 637 } 638 639 /* 640 * Check events in order. 641 * 642 */ 643 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 644 { 645 /* threshold event is triggered in finer grain than soft limit */ 646 if (unlikely(mem_cgroup_event_ratelimit(memcg, 647 MEM_CGROUP_TARGET_THRESH))) { 648 bool do_softlimit; 649 bool do_numainfo __maybe_unused; 650 651 do_softlimit = mem_cgroup_event_ratelimit(memcg, 652 MEM_CGROUP_TARGET_SOFTLIMIT); 653 #if MAX_NUMNODES > 1 654 do_numainfo = mem_cgroup_event_ratelimit(memcg, 655 MEM_CGROUP_TARGET_NUMAINFO); 656 #endif 657 mem_cgroup_threshold(memcg); 658 if (unlikely(do_softlimit)) 659 mem_cgroup_update_tree(memcg, page); 660 #if MAX_NUMNODES > 1 661 if (unlikely(do_numainfo)) 662 atomic_inc(&memcg->numainfo_events); 663 #endif 664 } 665 } 666 667 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 668 { 669 /* 670 * mm_update_next_owner() may clear mm->owner to NULL 671 * if it races with swapoff, page migration, etc. 672 * So this can be called with p == NULL. 673 */ 674 if (unlikely(!p)) 675 return NULL; 676 677 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 678 } 679 EXPORT_SYMBOL(mem_cgroup_from_task); 680 681 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 682 { 683 struct mem_cgroup *memcg = NULL; 684 685 rcu_read_lock(); 686 do { 687 /* 688 * Page cache insertions can happen withou an 689 * actual mm context, e.g. during disk probing 690 * on boot, loopback IO, acct() writes etc. 691 */ 692 if (unlikely(!mm)) 693 memcg = root_mem_cgroup; 694 else { 695 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 696 if (unlikely(!memcg)) 697 memcg = root_mem_cgroup; 698 } 699 } while (!css_tryget_online(&memcg->css)); 700 rcu_read_unlock(); 701 return memcg; 702 } 703 704 /** 705 * mem_cgroup_iter - iterate over memory cgroup hierarchy 706 * @root: hierarchy root 707 * @prev: previously returned memcg, NULL on first invocation 708 * @reclaim: cookie for shared reclaim walks, NULL for full walks 709 * 710 * Returns references to children of the hierarchy below @root, or 711 * @root itself, or %NULL after a full round-trip. 712 * 713 * Caller must pass the return value in @prev on subsequent 714 * invocations for reference counting, or use mem_cgroup_iter_break() 715 * to cancel a hierarchy walk before the round-trip is complete. 716 * 717 * Reclaimers can specify a node and a priority level in @reclaim to 718 * divide up the memcgs in the hierarchy among all concurrent 719 * reclaimers operating on the same node and priority. 720 */ 721 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 722 struct mem_cgroup *prev, 723 struct mem_cgroup_reclaim_cookie *reclaim) 724 { 725 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 726 struct cgroup_subsys_state *css = NULL; 727 struct mem_cgroup *memcg = NULL; 728 struct mem_cgroup *pos = NULL; 729 730 if (mem_cgroup_disabled()) 731 return NULL; 732 733 if (!root) 734 root = root_mem_cgroup; 735 736 if (prev && !reclaim) 737 pos = prev; 738 739 if (!root->use_hierarchy && root != root_mem_cgroup) { 740 if (prev) 741 goto out; 742 return root; 743 } 744 745 rcu_read_lock(); 746 747 if (reclaim) { 748 struct mem_cgroup_per_node *mz; 749 750 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 751 iter = &mz->iter[reclaim->priority]; 752 753 if (prev && reclaim->generation != iter->generation) 754 goto out_unlock; 755 756 while (1) { 757 pos = READ_ONCE(iter->position); 758 if (!pos || css_tryget(&pos->css)) 759 break; 760 /* 761 * css reference reached zero, so iter->position will 762 * be cleared by ->css_released. However, we should not 763 * rely on this happening soon, because ->css_released 764 * is called from a work queue, and by busy-waiting we 765 * might block it. So we clear iter->position right 766 * away. 767 */ 768 (void)cmpxchg(&iter->position, pos, NULL); 769 } 770 } 771 772 if (pos) 773 css = &pos->css; 774 775 for (;;) { 776 css = css_next_descendant_pre(css, &root->css); 777 if (!css) { 778 /* 779 * Reclaimers share the hierarchy walk, and a 780 * new one might jump in right at the end of 781 * the hierarchy - make sure they see at least 782 * one group and restart from the beginning. 783 */ 784 if (!prev) 785 continue; 786 break; 787 } 788 789 /* 790 * Verify the css and acquire a reference. The root 791 * is provided by the caller, so we know it's alive 792 * and kicking, and don't take an extra reference. 793 */ 794 memcg = mem_cgroup_from_css(css); 795 796 if (css == &root->css) 797 break; 798 799 if (css_tryget(css)) 800 break; 801 802 memcg = NULL; 803 } 804 805 if (reclaim) { 806 /* 807 * The position could have already been updated by a competing 808 * thread, so check that the value hasn't changed since we read 809 * it to avoid reclaiming from the same cgroup twice. 810 */ 811 (void)cmpxchg(&iter->position, pos, memcg); 812 813 if (pos) 814 css_put(&pos->css); 815 816 if (!memcg) 817 iter->generation++; 818 else if (!prev) 819 reclaim->generation = iter->generation; 820 } 821 822 out_unlock: 823 rcu_read_unlock(); 824 out: 825 if (prev && prev != root) 826 css_put(&prev->css); 827 828 return memcg; 829 } 830 831 /** 832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 833 * @root: hierarchy root 834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 835 */ 836 void mem_cgroup_iter_break(struct mem_cgroup *root, 837 struct mem_cgroup *prev) 838 { 839 if (!root) 840 root = root_mem_cgroup; 841 if (prev && prev != root) 842 css_put(&prev->css); 843 } 844 845 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 846 { 847 struct mem_cgroup *memcg = dead_memcg; 848 struct mem_cgroup_reclaim_iter *iter; 849 struct mem_cgroup_per_node *mz; 850 int nid; 851 int i; 852 853 while ((memcg = parent_mem_cgroup(memcg))) { 854 for_each_node(nid) { 855 mz = mem_cgroup_nodeinfo(memcg, nid); 856 for (i = 0; i <= DEF_PRIORITY; i++) { 857 iter = &mz->iter[i]; 858 cmpxchg(&iter->position, 859 dead_memcg, NULL); 860 } 861 } 862 } 863 } 864 865 /* 866 * Iteration constructs for visiting all cgroups (under a tree). If 867 * loops are exited prematurely (break), mem_cgroup_iter_break() must 868 * be used for reference counting. 869 */ 870 #define for_each_mem_cgroup_tree(iter, root) \ 871 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 872 iter != NULL; \ 873 iter = mem_cgroup_iter(root, iter, NULL)) 874 875 #define for_each_mem_cgroup(iter) \ 876 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 877 iter != NULL; \ 878 iter = mem_cgroup_iter(NULL, iter, NULL)) 879 880 /** 881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 882 * @memcg: hierarchy root 883 * @fn: function to call for each task 884 * @arg: argument passed to @fn 885 * 886 * This function iterates over tasks attached to @memcg or to any of its 887 * descendants and calls @fn for each task. If @fn returns a non-zero 888 * value, the function breaks the iteration loop and returns the value. 889 * Otherwise, it will iterate over all tasks and return 0. 890 * 891 * This function must not be called for the root memory cgroup. 892 */ 893 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 894 int (*fn)(struct task_struct *, void *), void *arg) 895 { 896 struct mem_cgroup *iter; 897 int ret = 0; 898 899 BUG_ON(memcg == root_mem_cgroup); 900 901 for_each_mem_cgroup_tree(iter, memcg) { 902 struct css_task_iter it; 903 struct task_struct *task; 904 905 css_task_iter_start(&iter->css, 0, &it); 906 while (!ret && (task = css_task_iter_next(&it))) 907 ret = fn(task, arg); 908 css_task_iter_end(&it); 909 if (ret) { 910 mem_cgroup_iter_break(memcg, iter); 911 break; 912 } 913 } 914 return ret; 915 } 916 917 /** 918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 919 * @page: the page 920 * @pgdat: pgdat of the page 921 * 922 * This function is only safe when following the LRU page isolation 923 * and putback protocol: the LRU lock must be held, and the page must 924 * either be PageLRU() or the caller must have isolated/allocated it. 925 */ 926 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 927 { 928 struct mem_cgroup_per_node *mz; 929 struct mem_cgroup *memcg; 930 struct lruvec *lruvec; 931 932 if (mem_cgroup_disabled()) { 933 lruvec = &pgdat->lruvec; 934 goto out; 935 } 936 937 memcg = page->mem_cgroup; 938 /* 939 * Swapcache readahead pages are added to the LRU - and 940 * possibly migrated - before they are charged. 941 */ 942 if (!memcg) 943 memcg = root_mem_cgroup; 944 945 mz = mem_cgroup_page_nodeinfo(memcg, page); 946 lruvec = &mz->lruvec; 947 out: 948 /* 949 * Since a node can be onlined after the mem_cgroup was created, 950 * we have to be prepared to initialize lruvec->zone here; 951 * and if offlined then reonlined, we need to reinitialize it. 952 */ 953 if (unlikely(lruvec->pgdat != pgdat)) 954 lruvec->pgdat = pgdat; 955 return lruvec; 956 } 957 958 /** 959 * mem_cgroup_update_lru_size - account for adding or removing an lru page 960 * @lruvec: mem_cgroup per zone lru vector 961 * @lru: index of lru list the page is sitting on 962 * @zid: zone id of the accounted pages 963 * @nr_pages: positive when adding or negative when removing 964 * 965 * This function must be called under lru_lock, just before a page is added 966 * to or just after a page is removed from an lru list (that ordering being 967 * so as to allow it to check that lru_size 0 is consistent with list_empty). 968 */ 969 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 970 int zid, int nr_pages) 971 { 972 struct mem_cgroup_per_node *mz; 973 unsigned long *lru_size; 974 long size; 975 976 if (mem_cgroup_disabled()) 977 return; 978 979 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 980 lru_size = &mz->lru_zone_size[zid][lru]; 981 982 if (nr_pages < 0) 983 *lru_size += nr_pages; 984 985 size = *lru_size; 986 if (WARN_ONCE(size < 0, 987 "%s(%p, %d, %d): lru_size %ld\n", 988 __func__, lruvec, lru, nr_pages, size)) { 989 VM_BUG_ON(1); 990 *lru_size = 0; 991 } 992 993 if (nr_pages > 0) 994 *lru_size += nr_pages; 995 } 996 997 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 998 { 999 struct mem_cgroup *task_memcg; 1000 struct task_struct *p; 1001 bool ret; 1002 1003 p = find_lock_task_mm(task); 1004 if (p) { 1005 task_memcg = get_mem_cgroup_from_mm(p->mm); 1006 task_unlock(p); 1007 } else { 1008 /* 1009 * All threads may have already detached their mm's, but the oom 1010 * killer still needs to detect if they have already been oom 1011 * killed to prevent needlessly killing additional tasks. 1012 */ 1013 rcu_read_lock(); 1014 task_memcg = mem_cgroup_from_task(task); 1015 css_get(&task_memcg->css); 1016 rcu_read_unlock(); 1017 } 1018 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1019 css_put(&task_memcg->css); 1020 return ret; 1021 } 1022 1023 /** 1024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1025 * @memcg: the memory cgroup 1026 * 1027 * Returns the maximum amount of memory @mem can be charged with, in 1028 * pages. 1029 */ 1030 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1031 { 1032 unsigned long margin = 0; 1033 unsigned long count; 1034 unsigned long limit; 1035 1036 count = page_counter_read(&memcg->memory); 1037 limit = READ_ONCE(memcg->memory.max); 1038 if (count < limit) 1039 margin = limit - count; 1040 1041 if (do_memsw_account()) { 1042 count = page_counter_read(&memcg->memsw); 1043 limit = READ_ONCE(memcg->memsw.max); 1044 if (count <= limit) 1045 margin = min(margin, limit - count); 1046 else 1047 margin = 0; 1048 } 1049 1050 return margin; 1051 } 1052 1053 /* 1054 * A routine for checking "mem" is under move_account() or not. 1055 * 1056 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1057 * moving cgroups. This is for waiting at high-memory pressure 1058 * caused by "move". 1059 */ 1060 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1061 { 1062 struct mem_cgroup *from; 1063 struct mem_cgroup *to; 1064 bool ret = false; 1065 /* 1066 * Unlike task_move routines, we access mc.to, mc.from not under 1067 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1068 */ 1069 spin_lock(&mc.lock); 1070 from = mc.from; 1071 to = mc.to; 1072 if (!from) 1073 goto unlock; 1074 1075 ret = mem_cgroup_is_descendant(from, memcg) || 1076 mem_cgroup_is_descendant(to, memcg); 1077 unlock: 1078 spin_unlock(&mc.lock); 1079 return ret; 1080 } 1081 1082 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1083 { 1084 if (mc.moving_task && current != mc.moving_task) { 1085 if (mem_cgroup_under_move(memcg)) { 1086 DEFINE_WAIT(wait); 1087 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1088 /* moving charge context might have finished. */ 1089 if (mc.moving_task) 1090 schedule(); 1091 finish_wait(&mc.waitq, &wait); 1092 return true; 1093 } 1094 } 1095 return false; 1096 } 1097 1098 static const unsigned int memcg1_stats[] = { 1099 MEMCG_CACHE, 1100 MEMCG_RSS, 1101 MEMCG_RSS_HUGE, 1102 NR_SHMEM, 1103 NR_FILE_MAPPED, 1104 NR_FILE_DIRTY, 1105 NR_WRITEBACK, 1106 MEMCG_SWAP, 1107 }; 1108 1109 static const char *const memcg1_stat_names[] = { 1110 "cache", 1111 "rss", 1112 "rss_huge", 1113 "shmem", 1114 "mapped_file", 1115 "dirty", 1116 "writeback", 1117 "swap", 1118 }; 1119 1120 #define K(x) ((x) << (PAGE_SHIFT-10)) 1121 /** 1122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1123 * @memcg: The memory cgroup that went over limit 1124 * @p: Task that is going to be killed 1125 * 1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1127 * enabled 1128 */ 1129 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1130 { 1131 struct mem_cgroup *iter; 1132 unsigned int i; 1133 1134 rcu_read_lock(); 1135 1136 if (p) { 1137 pr_info("Task in "); 1138 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1139 pr_cont(" killed as a result of limit of "); 1140 } else { 1141 pr_info("Memory limit reached of cgroup "); 1142 } 1143 1144 pr_cont_cgroup_path(memcg->css.cgroup); 1145 pr_cont("\n"); 1146 1147 rcu_read_unlock(); 1148 1149 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1150 K((u64)page_counter_read(&memcg->memory)), 1151 K((u64)memcg->memory.max), memcg->memory.failcnt); 1152 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1153 K((u64)page_counter_read(&memcg->memsw)), 1154 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1155 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1156 K((u64)page_counter_read(&memcg->kmem)), 1157 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1158 1159 for_each_mem_cgroup_tree(iter, memcg) { 1160 pr_info("Memory cgroup stats for "); 1161 pr_cont_cgroup_path(iter->css.cgroup); 1162 pr_cont(":"); 1163 1164 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 1165 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1166 continue; 1167 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1168 K(memcg_page_state(iter, memcg1_stats[i]))); 1169 } 1170 1171 for (i = 0; i < NR_LRU_LISTS; i++) 1172 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1173 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1174 1175 pr_cont("\n"); 1176 } 1177 } 1178 1179 /* 1180 * Return the memory (and swap, if configured) limit for a memcg. 1181 */ 1182 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1183 { 1184 unsigned long max; 1185 1186 max = memcg->memory.max; 1187 if (mem_cgroup_swappiness(memcg)) { 1188 unsigned long memsw_max; 1189 unsigned long swap_max; 1190 1191 memsw_max = memcg->memsw.max; 1192 swap_max = memcg->swap.max; 1193 swap_max = min(swap_max, (unsigned long)total_swap_pages); 1194 max = min(max + swap_max, memsw_max); 1195 } 1196 return max; 1197 } 1198 1199 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1200 int order) 1201 { 1202 struct oom_control oc = { 1203 .zonelist = NULL, 1204 .nodemask = NULL, 1205 .memcg = memcg, 1206 .gfp_mask = gfp_mask, 1207 .order = order, 1208 }; 1209 bool ret; 1210 1211 mutex_lock(&oom_lock); 1212 ret = out_of_memory(&oc); 1213 mutex_unlock(&oom_lock); 1214 return ret; 1215 } 1216 1217 #if MAX_NUMNODES > 1 1218 1219 /** 1220 * test_mem_cgroup_node_reclaimable 1221 * @memcg: the target memcg 1222 * @nid: the node ID to be checked. 1223 * @noswap : specify true here if the user wants flle only information. 1224 * 1225 * This function returns whether the specified memcg contains any 1226 * reclaimable pages on a node. Returns true if there are any reclaimable 1227 * pages in the node. 1228 */ 1229 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1230 int nid, bool noswap) 1231 { 1232 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1233 return true; 1234 if (noswap || !total_swap_pages) 1235 return false; 1236 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1237 return true; 1238 return false; 1239 1240 } 1241 1242 /* 1243 * Always updating the nodemask is not very good - even if we have an empty 1244 * list or the wrong list here, we can start from some node and traverse all 1245 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1246 * 1247 */ 1248 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1249 { 1250 int nid; 1251 /* 1252 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1253 * pagein/pageout changes since the last update. 1254 */ 1255 if (!atomic_read(&memcg->numainfo_events)) 1256 return; 1257 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1258 return; 1259 1260 /* make a nodemask where this memcg uses memory from */ 1261 memcg->scan_nodes = node_states[N_MEMORY]; 1262 1263 for_each_node_mask(nid, node_states[N_MEMORY]) { 1264 1265 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1266 node_clear(nid, memcg->scan_nodes); 1267 } 1268 1269 atomic_set(&memcg->numainfo_events, 0); 1270 atomic_set(&memcg->numainfo_updating, 0); 1271 } 1272 1273 /* 1274 * Selecting a node where we start reclaim from. Because what we need is just 1275 * reducing usage counter, start from anywhere is O,K. Considering 1276 * memory reclaim from current node, there are pros. and cons. 1277 * 1278 * Freeing memory from current node means freeing memory from a node which 1279 * we'll use or we've used. So, it may make LRU bad. And if several threads 1280 * hit limits, it will see a contention on a node. But freeing from remote 1281 * node means more costs for memory reclaim because of memory latency. 1282 * 1283 * Now, we use round-robin. Better algorithm is welcomed. 1284 */ 1285 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1286 { 1287 int node; 1288 1289 mem_cgroup_may_update_nodemask(memcg); 1290 node = memcg->last_scanned_node; 1291 1292 node = next_node_in(node, memcg->scan_nodes); 1293 /* 1294 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1295 * last time it really checked all the LRUs due to rate limiting. 1296 * Fallback to the current node in that case for simplicity. 1297 */ 1298 if (unlikely(node == MAX_NUMNODES)) 1299 node = numa_node_id(); 1300 1301 memcg->last_scanned_node = node; 1302 return node; 1303 } 1304 #else 1305 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1306 { 1307 return 0; 1308 } 1309 #endif 1310 1311 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1312 pg_data_t *pgdat, 1313 gfp_t gfp_mask, 1314 unsigned long *total_scanned) 1315 { 1316 struct mem_cgroup *victim = NULL; 1317 int total = 0; 1318 int loop = 0; 1319 unsigned long excess; 1320 unsigned long nr_scanned; 1321 struct mem_cgroup_reclaim_cookie reclaim = { 1322 .pgdat = pgdat, 1323 .priority = 0, 1324 }; 1325 1326 excess = soft_limit_excess(root_memcg); 1327 1328 while (1) { 1329 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1330 if (!victim) { 1331 loop++; 1332 if (loop >= 2) { 1333 /* 1334 * If we have not been able to reclaim 1335 * anything, it might because there are 1336 * no reclaimable pages under this hierarchy 1337 */ 1338 if (!total) 1339 break; 1340 /* 1341 * We want to do more targeted reclaim. 1342 * excess >> 2 is not to excessive so as to 1343 * reclaim too much, nor too less that we keep 1344 * coming back to reclaim from this cgroup 1345 */ 1346 if (total >= (excess >> 2) || 1347 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1348 break; 1349 } 1350 continue; 1351 } 1352 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1353 pgdat, &nr_scanned); 1354 *total_scanned += nr_scanned; 1355 if (!soft_limit_excess(root_memcg)) 1356 break; 1357 } 1358 mem_cgroup_iter_break(root_memcg, victim); 1359 return total; 1360 } 1361 1362 #ifdef CONFIG_LOCKDEP 1363 static struct lockdep_map memcg_oom_lock_dep_map = { 1364 .name = "memcg_oom_lock", 1365 }; 1366 #endif 1367 1368 static DEFINE_SPINLOCK(memcg_oom_lock); 1369 1370 /* 1371 * Check OOM-Killer is already running under our hierarchy. 1372 * If someone is running, return false. 1373 */ 1374 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1375 { 1376 struct mem_cgroup *iter, *failed = NULL; 1377 1378 spin_lock(&memcg_oom_lock); 1379 1380 for_each_mem_cgroup_tree(iter, memcg) { 1381 if (iter->oom_lock) { 1382 /* 1383 * this subtree of our hierarchy is already locked 1384 * so we cannot give a lock. 1385 */ 1386 failed = iter; 1387 mem_cgroup_iter_break(memcg, iter); 1388 break; 1389 } else 1390 iter->oom_lock = true; 1391 } 1392 1393 if (failed) { 1394 /* 1395 * OK, we failed to lock the whole subtree so we have 1396 * to clean up what we set up to the failing subtree 1397 */ 1398 for_each_mem_cgroup_tree(iter, memcg) { 1399 if (iter == failed) { 1400 mem_cgroup_iter_break(memcg, iter); 1401 break; 1402 } 1403 iter->oom_lock = false; 1404 } 1405 } else 1406 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1407 1408 spin_unlock(&memcg_oom_lock); 1409 1410 return !failed; 1411 } 1412 1413 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1414 { 1415 struct mem_cgroup *iter; 1416 1417 spin_lock(&memcg_oom_lock); 1418 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1419 for_each_mem_cgroup_tree(iter, memcg) 1420 iter->oom_lock = false; 1421 spin_unlock(&memcg_oom_lock); 1422 } 1423 1424 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1425 { 1426 struct mem_cgroup *iter; 1427 1428 spin_lock(&memcg_oom_lock); 1429 for_each_mem_cgroup_tree(iter, memcg) 1430 iter->under_oom++; 1431 spin_unlock(&memcg_oom_lock); 1432 } 1433 1434 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1435 { 1436 struct mem_cgroup *iter; 1437 1438 /* 1439 * When a new child is created while the hierarchy is under oom, 1440 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1441 */ 1442 spin_lock(&memcg_oom_lock); 1443 for_each_mem_cgroup_tree(iter, memcg) 1444 if (iter->under_oom > 0) 1445 iter->under_oom--; 1446 spin_unlock(&memcg_oom_lock); 1447 } 1448 1449 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1450 1451 struct oom_wait_info { 1452 struct mem_cgroup *memcg; 1453 wait_queue_entry_t wait; 1454 }; 1455 1456 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1457 unsigned mode, int sync, void *arg) 1458 { 1459 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1460 struct mem_cgroup *oom_wait_memcg; 1461 struct oom_wait_info *oom_wait_info; 1462 1463 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1464 oom_wait_memcg = oom_wait_info->memcg; 1465 1466 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1467 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1468 return 0; 1469 return autoremove_wake_function(wait, mode, sync, arg); 1470 } 1471 1472 static void memcg_oom_recover(struct mem_cgroup *memcg) 1473 { 1474 /* 1475 * For the following lockless ->under_oom test, the only required 1476 * guarantee is that it must see the state asserted by an OOM when 1477 * this function is called as a result of userland actions 1478 * triggered by the notification of the OOM. This is trivially 1479 * achieved by invoking mem_cgroup_mark_under_oom() before 1480 * triggering notification. 1481 */ 1482 if (memcg && memcg->under_oom) 1483 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1484 } 1485 1486 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1487 { 1488 if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER) 1489 return; 1490 /* 1491 * We are in the middle of the charge context here, so we 1492 * don't want to block when potentially sitting on a callstack 1493 * that holds all kinds of filesystem and mm locks. 1494 * 1495 * Also, the caller may handle a failed allocation gracefully 1496 * (like optional page cache readahead) and so an OOM killer 1497 * invocation might not even be necessary. 1498 * 1499 * That's why we don't do anything here except remember the 1500 * OOM context and then deal with it at the end of the page 1501 * fault when the stack is unwound, the locks are released, 1502 * and when we know whether the fault was overall successful. 1503 */ 1504 css_get(&memcg->css); 1505 current->memcg_in_oom = memcg; 1506 current->memcg_oom_gfp_mask = mask; 1507 current->memcg_oom_order = order; 1508 } 1509 1510 /** 1511 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1512 * @handle: actually kill/wait or just clean up the OOM state 1513 * 1514 * This has to be called at the end of a page fault if the memcg OOM 1515 * handler was enabled. 1516 * 1517 * Memcg supports userspace OOM handling where failed allocations must 1518 * sleep on a waitqueue until the userspace task resolves the 1519 * situation. Sleeping directly in the charge context with all kinds 1520 * of locks held is not a good idea, instead we remember an OOM state 1521 * in the task and mem_cgroup_oom_synchronize() has to be called at 1522 * the end of the page fault to complete the OOM handling. 1523 * 1524 * Returns %true if an ongoing memcg OOM situation was detected and 1525 * completed, %false otherwise. 1526 */ 1527 bool mem_cgroup_oom_synchronize(bool handle) 1528 { 1529 struct mem_cgroup *memcg = current->memcg_in_oom; 1530 struct oom_wait_info owait; 1531 bool locked; 1532 1533 /* OOM is global, do not handle */ 1534 if (!memcg) 1535 return false; 1536 1537 if (!handle) 1538 goto cleanup; 1539 1540 owait.memcg = memcg; 1541 owait.wait.flags = 0; 1542 owait.wait.func = memcg_oom_wake_function; 1543 owait.wait.private = current; 1544 INIT_LIST_HEAD(&owait.wait.entry); 1545 1546 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1547 mem_cgroup_mark_under_oom(memcg); 1548 1549 locked = mem_cgroup_oom_trylock(memcg); 1550 1551 if (locked) 1552 mem_cgroup_oom_notify(memcg); 1553 1554 if (locked && !memcg->oom_kill_disable) { 1555 mem_cgroup_unmark_under_oom(memcg); 1556 finish_wait(&memcg_oom_waitq, &owait.wait); 1557 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1558 current->memcg_oom_order); 1559 } else { 1560 schedule(); 1561 mem_cgroup_unmark_under_oom(memcg); 1562 finish_wait(&memcg_oom_waitq, &owait.wait); 1563 } 1564 1565 if (locked) { 1566 mem_cgroup_oom_unlock(memcg); 1567 /* 1568 * There is no guarantee that an OOM-lock contender 1569 * sees the wakeups triggered by the OOM kill 1570 * uncharges. Wake any sleepers explicitely. 1571 */ 1572 memcg_oom_recover(memcg); 1573 } 1574 cleanup: 1575 current->memcg_in_oom = NULL; 1576 css_put(&memcg->css); 1577 return true; 1578 } 1579 1580 /** 1581 * lock_page_memcg - lock a page->mem_cgroup binding 1582 * @page: the page 1583 * 1584 * This function protects unlocked LRU pages from being moved to 1585 * another cgroup. 1586 * 1587 * It ensures lifetime of the returned memcg. Caller is responsible 1588 * for the lifetime of the page; __unlock_page_memcg() is available 1589 * when @page might get freed inside the locked section. 1590 */ 1591 struct mem_cgroup *lock_page_memcg(struct page *page) 1592 { 1593 struct mem_cgroup *memcg; 1594 unsigned long flags; 1595 1596 /* 1597 * The RCU lock is held throughout the transaction. The fast 1598 * path can get away without acquiring the memcg->move_lock 1599 * because page moving starts with an RCU grace period. 1600 * 1601 * The RCU lock also protects the memcg from being freed when 1602 * the page state that is going to change is the only thing 1603 * preventing the page itself from being freed. E.g. writeback 1604 * doesn't hold a page reference and relies on PG_writeback to 1605 * keep off truncation, migration and so forth. 1606 */ 1607 rcu_read_lock(); 1608 1609 if (mem_cgroup_disabled()) 1610 return NULL; 1611 again: 1612 memcg = page->mem_cgroup; 1613 if (unlikely(!memcg)) 1614 return NULL; 1615 1616 if (atomic_read(&memcg->moving_account) <= 0) 1617 return memcg; 1618 1619 spin_lock_irqsave(&memcg->move_lock, flags); 1620 if (memcg != page->mem_cgroup) { 1621 spin_unlock_irqrestore(&memcg->move_lock, flags); 1622 goto again; 1623 } 1624 1625 /* 1626 * When charge migration first begins, we can have locked and 1627 * unlocked page stat updates happening concurrently. Track 1628 * the task who has the lock for unlock_page_memcg(). 1629 */ 1630 memcg->move_lock_task = current; 1631 memcg->move_lock_flags = flags; 1632 1633 return memcg; 1634 } 1635 EXPORT_SYMBOL(lock_page_memcg); 1636 1637 /** 1638 * __unlock_page_memcg - unlock and unpin a memcg 1639 * @memcg: the memcg 1640 * 1641 * Unlock and unpin a memcg returned by lock_page_memcg(). 1642 */ 1643 void __unlock_page_memcg(struct mem_cgroup *memcg) 1644 { 1645 if (memcg && memcg->move_lock_task == current) { 1646 unsigned long flags = memcg->move_lock_flags; 1647 1648 memcg->move_lock_task = NULL; 1649 memcg->move_lock_flags = 0; 1650 1651 spin_unlock_irqrestore(&memcg->move_lock, flags); 1652 } 1653 1654 rcu_read_unlock(); 1655 } 1656 1657 /** 1658 * unlock_page_memcg - unlock a page->mem_cgroup binding 1659 * @page: the page 1660 */ 1661 void unlock_page_memcg(struct page *page) 1662 { 1663 __unlock_page_memcg(page->mem_cgroup); 1664 } 1665 EXPORT_SYMBOL(unlock_page_memcg); 1666 1667 struct memcg_stock_pcp { 1668 struct mem_cgroup *cached; /* this never be root cgroup */ 1669 unsigned int nr_pages; 1670 struct work_struct work; 1671 unsigned long flags; 1672 #define FLUSHING_CACHED_CHARGE 0 1673 }; 1674 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1675 static DEFINE_MUTEX(percpu_charge_mutex); 1676 1677 /** 1678 * consume_stock: Try to consume stocked charge on this cpu. 1679 * @memcg: memcg to consume from. 1680 * @nr_pages: how many pages to charge. 1681 * 1682 * The charges will only happen if @memcg matches the current cpu's memcg 1683 * stock, and at least @nr_pages are available in that stock. Failure to 1684 * service an allocation will refill the stock. 1685 * 1686 * returns true if successful, false otherwise. 1687 */ 1688 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1689 { 1690 struct memcg_stock_pcp *stock; 1691 unsigned long flags; 1692 bool ret = false; 1693 1694 if (nr_pages > MEMCG_CHARGE_BATCH) 1695 return ret; 1696 1697 local_irq_save(flags); 1698 1699 stock = this_cpu_ptr(&memcg_stock); 1700 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1701 stock->nr_pages -= nr_pages; 1702 ret = true; 1703 } 1704 1705 local_irq_restore(flags); 1706 1707 return ret; 1708 } 1709 1710 /* 1711 * Returns stocks cached in percpu and reset cached information. 1712 */ 1713 static void drain_stock(struct memcg_stock_pcp *stock) 1714 { 1715 struct mem_cgroup *old = stock->cached; 1716 1717 if (stock->nr_pages) { 1718 page_counter_uncharge(&old->memory, stock->nr_pages); 1719 if (do_memsw_account()) 1720 page_counter_uncharge(&old->memsw, stock->nr_pages); 1721 css_put_many(&old->css, stock->nr_pages); 1722 stock->nr_pages = 0; 1723 } 1724 stock->cached = NULL; 1725 } 1726 1727 static void drain_local_stock(struct work_struct *dummy) 1728 { 1729 struct memcg_stock_pcp *stock; 1730 unsigned long flags; 1731 1732 /* 1733 * The only protection from memory hotplug vs. drain_stock races is 1734 * that we always operate on local CPU stock here with IRQ disabled 1735 */ 1736 local_irq_save(flags); 1737 1738 stock = this_cpu_ptr(&memcg_stock); 1739 drain_stock(stock); 1740 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1741 1742 local_irq_restore(flags); 1743 } 1744 1745 /* 1746 * Cache charges(val) to local per_cpu area. 1747 * This will be consumed by consume_stock() function, later. 1748 */ 1749 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1750 { 1751 struct memcg_stock_pcp *stock; 1752 unsigned long flags; 1753 1754 local_irq_save(flags); 1755 1756 stock = this_cpu_ptr(&memcg_stock); 1757 if (stock->cached != memcg) { /* reset if necessary */ 1758 drain_stock(stock); 1759 stock->cached = memcg; 1760 } 1761 stock->nr_pages += nr_pages; 1762 1763 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 1764 drain_stock(stock); 1765 1766 local_irq_restore(flags); 1767 } 1768 1769 /* 1770 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1771 * of the hierarchy under it. 1772 */ 1773 static void drain_all_stock(struct mem_cgroup *root_memcg) 1774 { 1775 int cpu, curcpu; 1776 1777 /* If someone's already draining, avoid adding running more workers. */ 1778 if (!mutex_trylock(&percpu_charge_mutex)) 1779 return; 1780 /* 1781 * Notify other cpus that system-wide "drain" is running 1782 * We do not care about races with the cpu hotplug because cpu down 1783 * as well as workers from this path always operate on the local 1784 * per-cpu data. CPU up doesn't touch memcg_stock at all. 1785 */ 1786 curcpu = get_cpu(); 1787 for_each_online_cpu(cpu) { 1788 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1789 struct mem_cgroup *memcg; 1790 1791 memcg = stock->cached; 1792 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) 1793 continue; 1794 if (!mem_cgroup_is_descendant(memcg, root_memcg)) { 1795 css_put(&memcg->css); 1796 continue; 1797 } 1798 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1799 if (cpu == curcpu) 1800 drain_local_stock(&stock->work); 1801 else 1802 schedule_work_on(cpu, &stock->work); 1803 } 1804 css_put(&memcg->css); 1805 } 1806 put_cpu(); 1807 mutex_unlock(&percpu_charge_mutex); 1808 } 1809 1810 static int memcg_hotplug_cpu_dead(unsigned int cpu) 1811 { 1812 struct memcg_stock_pcp *stock; 1813 struct mem_cgroup *memcg; 1814 1815 stock = &per_cpu(memcg_stock, cpu); 1816 drain_stock(stock); 1817 1818 for_each_mem_cgroup(memcg) { 1819 int i; 1820 1821 for (i = 0; i < MEMCG_NR_STAT; i++) { 1822 int nid; 1823 long x; 1824 1825 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0); 1826 if (x) 1827 atomic_long_add(x, &memcg->stat[i]); 1828 1829 if (i >= NR_VM_NODE_STAT_ITEMS) 1830 continue; 1831 1832 for_each_node(nid) { 1833 struct mem_cgroup_per_node *pn; 1834 1835 pn = mem_cgroup_nodeinfo(memcg, nid); 1836 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 1837 if (x) 1838 atomic_long_add(x, &pn->lruvec_stat[i]); 1839 } 1840 } 1841 1842 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 1843 long x; 1844 1845 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0); 1846 if (x) 1847 atomic_long_add(x, &memcg->events[i]); 1848 } 1849 } 1850 1851 return 0; 1852 } 1853 1854 static void reclaim_high(struct mem_cgroup *memcg, 1855 unsigned int nr_pages, 1856 gfp_t gfp_mask) 1857 { 1858 do { 1859 if (page_counter_read(&memcg->memory) <= memcg->high) 1860 continue; 1861 memcg_memory_event(memcg, MEMCG_HIGH); 1862 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1863 } while ((memcg = parent_mem_cgroup(memcg))); 1864 } 1865 1866 static void high_work_func(struct work_struct *work) 1867 { 1868 struct mem_cgroup *memcg; 1869 1870 memcg = container_of(work, struct mem_cgroup, high_work); 1871 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 1872 } 1873 1874 /* 1875 * Scheduled by try_charge() to be executed from the userland return path 1876 * and reclaims memory over the high limit. 1877 */ 1878 void mem_cgroup_handle_over_high(void) 1879 { 1880 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1881 struct mem_cgroup *memcg; 1882 1883 if (likely(!nr_pages)) 1884 return; 1885 1886 memcg = get_mem_cgroup_from_mm(current->mm); 1887 reclaim_high(memcg, nr_pages, GFP_KERNEL); 1888 css_put(&memcg->css); 1889 current->memcg_nr_pages_over_high = 0; 1890 } 1891 1892 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 1893 unsigned int nr_pages) 1894 { 1895 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 1896 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1897 struct mem_cgroup *mem_over_limit; 1898 struct page_counter *counter; 1899 unsigned long nr_reclaimed; 1900 bool may_swap = true; 1901 bool drained = false; 1902 1903 if (mem_cgroup_is_root(memcg)) 1904 return 0; 1905 retry: 1906 if (consume_stock(memcg, nr_pages)) 1907 return 0; 1908 1909 if (!do_memsw_account() || 1910 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 1911 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 1912 goto done_restock; 1913 if (do_memsw_account()) 1914 page_counter_uncharge(&memcg->memsw, batch); 1915 mem_over_limit = mem_cgroup_from_counter(counter, memory); 1916 } else { 1917 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 1918 may_swap = false; 1919 } 1920 1921 if (batch > nr_pages) { 1922 batch = nr_pages; 1923 goto retry; 1924 } 1925 1926 /* 1927 * Unlike in global OOM situations, memcg is not in a physical 1928 * memory shortage. Allow dying and OOM-killed tasks to 1929 * bypass the last charges so that they can exit quickly and 1930 * free their memory. 1931 */ 1932 if (unlikely(tsk_is_oom_victim(current) || 1933 fatal_signal_pending(current) || 1934 current->flags & PF_EXITING)) 1935 goto force; 1936 1937 /* 1938 * Prevent unbounded recursion when reclaim operations need to 1939 * allocate memory. This might exceed the limits temporarily, 1940 * but we prefer facilitating memory reclaim and getting back 1941 * under the limit over triggering OOM kills in these cases. 1942 */ 1943 if (unlikely(current->flags & PF_MEMALLOC)) 1944 goto force; 1945 1946 if (unlikely(task_in_memcg_oom(current))) 1947 goto nomem; 1948 1949 if (!gfpflags_allow_blocking(gfp_mask)) 1950 goto nomem; 1951 1952 memcg_memory_event(mem_over_limit, MEMCG_MAX); 1953 1954 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1955 gfp_mask, may_swap); 1956 1957 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1958 goto retry; 1959 1960 if (!drained) { 1961 drain_all_stock(mem_over_limit); 1962 drained = true; 1963 goto retry; 1964 } 1965 1966 if (gfp_mask & __GFP_NORETRY) 1967 goto nomem; 1968 /* 1969 * Even though the limit is exceeded at this point, reclaim 1970 * may have been able to free some pages. Retry the charge 1971 * before killing the task. 1972 * 1973 * Only for regular pages, though: huge pages are rather 1974 * unlikely to succeed so close to the limit, and we fall back 1975 * to regular pages anyway in case of failure. 1976 */ 1977 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 1978 goto retry; 1979 /* 1980 * At task move, charge accounts can be doubly counted. So, it's 1981 * better to wait until the end of task_move if something is going on. 1982 */ 1983 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1984 goto retry; 1985 1986 if (nr_retries--) 1987 goto retry; 1988 1989 if (gfp_mask & __GFP_NOFAIL) 1990 goto force; 1991 1992 if (fatal_signal_pending(current)) 1993 goto force; 1994 1995 memcg_memory_event(mem_over_limit, MEMCG_OOM); 1996 1997 mem_cgroup_oom(mem_over_limit, gfp_mask, 1998 get_order(nr_pages * PAGE_SIZE)); 1999 nomem: 2000 if (!(gfp_mask & __GFP_NOFAIL)) 2001 return -ENOMEM; 2002 force: 2003 /* 2004 * The allocation either can't fail or will lead to more memory 2005 * being freed very soon. Allow memory usage go over the limit 2006 * temporarily by force charging it. 2007 */ 2008 page_counter_charge(&memcg->memory, nr_pages); 2009 if (do_memsw_account()) 2010 page_counter_charge(&memcg->memsw, nr_pages); 2011 css_get_many(&memcg->css, nr_pages); 2012 2013 return 0; 2014 2015 done_restock: 2016 css_get_many(&memcg->css, batch); 2017 if (batch > nr_pages) 2018 refill_stock(memcg, batch - nr_pages); 2019 2020 /* 2021 * If the hierarchy is above the normal consumption range, schedule 2022 * reclaim on returning to userland. We can perform reclaim here 2023 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2024 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2025 * not recorded as it most likely matches current's and won't 2026 * change in the meantime. As high limit is checked again before 2027 * reclaim, the cost of mismatch is negligible. 2028 */ 2029 do { 2030 if (page_counter_read(&memcg->memory) > memcg->high) { 2031 /* Don't bother a random interrupted task */ 2032 if (in_interrupt()) { 2033 schedule_work(&memcg->high_work); 2034 break; 2035 } 2036 current->memcg_nr_pages_over_high += batch; 2037 set_notify_resume(current); 2038 break; 2039 } 2040 } while ((memcg = parent_mem_cgroup(memcg))); 2041 2042 return 0; 2043 } 2044 2045 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2046 { 2047 if (mem_cgroup_is_root(memcg)) 2048 return; 2049 2050 page_counter_uncharge(&memcg->memory, nr_pages); 2051 if (do_memsw_account()) 2052 page_counter_uncharge(&memcg->memsw, nr_pages); 2053 2054 css_put_many(&memcg->css, nr_pages); 2055 } 2056 2057 static void lock_page_lru(struct page *page, int *isolated) 2058 { 2059 struct zone *zone = page_zone(page); 2060 2061 spin_lock_irq(zone_lru_lock(zone)); 2062 if (PageLRU(page)) { 2063 struct lruvec *lruvec; 2064 2065 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2066 ClearPageLRU(page); 2067 del_page_from_lru_list(page, lruvec, page_lru(page)); 2068 *isolated = 1; 2069 } else 2070 *isolated = 0; 2071 } 2072 2073 static void unlock_page_lru(struct page *page, int isolated) 2074 { 2075 struct zone *zone = page_zone(page); 2076 2077 if (isolated) { 2078 struct lruvec *lruvec; 2079 2080 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2081 VM_BUG_ON_PAGE(PageLRU(page), page); 2082 SetPageLRU(page); 2083 add_page_to_lru_list(page, lruvec, page_lru(page)); 2084 } 2085 spin_unlock_irq(zone_lru_lock(zone)); 2086 } 2087 2088 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2089 bool lrucare) 2090 { 2091 int isolated; 2092 2093 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2094 2095 /* 2096 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2097 * may already be on some other mem_cgroup's LRU. Take care of it. 2098 */ 2099 if (lrucare) 2100 lock_page_lru(page, &isolated); 2101 2102 /* 2103 * Nobody should be changing or seriously looking at 2104 * page->mem_cgroup at this point: 2105 * 2106 * - the page is uncharged 2107 * 2108 * - the page is off-LRU 2109 * 2110 * - an anonymous fault has exclusive page access, except for 2111 * a locked page table 2112 * 2113 * - a page cache insertion, a swapin fault, or a migration 2114 * have the page locked 2115 */ 2116 page->mem_cgroup = memcg; 2117 2118 if (lrucare) 2119 unlock_page_lru(page, isolated); 2120 } 2121 2122 #ifndef CONFIG_SLOB 2123 static int memcg_alloc_cache_id(void) 2124 { 2125 int id, size; 2126 int err; 2127 2128 id = ida_simple_get(&memcg_cache_ida, 2129 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2130 if (id < 0) 2131 return id; 2132 2133 if (id < memcg_nr_cache_ids) 2134 return id; 2135 2136 /* 2137 * There's no space for the new id in memcg_caches arrays, 2138 * so we have to grow them. 2139 */ 2140 down_write(&memcg_cache_ids_sem); 2141 2142 size = 2 * (id + 1); 2143 if (size < MEMCG_CACHES_MIN_SIZE) 2144 size = MEMCG_CACHES_MIN_SIZE; 2145 else if (size > MEMCG_CACHES_MAX_SIZE) 2146 size = MEMCG_CACHES_MAX_SIZE; 2147 2148 err = memcg_update_all_caches(size); 2149 if (!err) 2150 err = memcg_update_all_list_lrus(size); 2151 if (!err) 2152 memcg_nr_cache_ids = size; 2153 2154 up_write(&memcg_cache_ids_sem); 2155 2156 if (err) { 2157 ida_simple_remove(&memcg_cache_ida, id); 2158 return err; 2159 } 2160 return id; 2161 } 2162 2163 static void memcg_free_cache_id(int id) 2164 { 2165 ida_simple_remove(&memcg_cache_ida, id); 2166 } 2167 2168 struct memcg_kmem_cache_create_work { 2169 struct mem_cgroup *memcg; 2170 struct kmem_cache *cachep; 2171 struct work_struct work; 2172 }; 2173 2174 static void memcg_kmem_cache_create_func(struct work_struct *w) 2175 { 2176 struct memcg_kmem_cache_create_work *cw = 2177 container_of(w, struct memcg_kmem_cache_create_work, work); 2178 struct mem_cgroup *memcg = cw->memcg; 2179 struct kmem_cache *cachep = cw->cachep; 2180 2181 memcg_create_kmem_cache(memcg, cachep); 2182 2183 css_put(&memcg->css); 2184 kfree(cw); 2185 } 2186 2187 /* 2188 * Enqueue the creation of a per-memcg kmem_cache. 2189 */ 2190 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2191 struct kmem_cache *cachep) 2192 { 2193 struct memcg_kmem_cache_create_work *cw; 2194 2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2196 if (!cw) 2197 return; 2198 2199 css_get(&memcg->css); 2200 2201 cw->memcg = memcg; 2202 cw->cachep = cachep; 2203 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2204 2205 queue_work(memcg_kmem_cache_wq, &cw->work); 2206 } 2207 2208 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2209 struct kmem_cache *cachep) 2210 { 2211 /* 2212 * We need to stop accounting when we kmalloc, because if the 2213 * corresponding kmalloc cache is not yet created, the first allocation 2214 * in __memcg_schedule_kmem_cache_create will recurse. 2215 * 2216 * However, it is better to enclose the whole function. Depending on 2217 * the debugging options enabled, INIT_WORK(), for instance, can 2218 * trigger an allocation. This too, will make us recurse. Because at 2219 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2220 * the safest choice is to do it like this, wrapping the whole function. 2221 */ 2222 current->memcg_kmem_skip_account = 1; 2223 __memcg_schedule_kmem_cache_create(memcg, cachep); 2224 current->memcg_kmem_skip_account = 0; 2225 } 2226 2227 static inline bool memcg_kmem_bypass(void) 2228 { 2229 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2230 return true; 2231 return false; 2232 } 2233 2234 /** 2235 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2236 * @cachep: the original global kmem cache 2237 * 2238 * Return the kmem_cache we're supposed to use for a slab allocation. 2239 * We try to use the current memcg's version of the cache. 2240 * 2241 * If the cache does not exist yet, if we are the first user of it, we 2242 * create it asynchronously in a workqueue and let the current allocation 2243 * go through with the original cache. 2244 * 2245 * This function takes a reference to the cache it returns to assure it 2246 * won't get destroyed while we are working with it. Once the caller is 2247 * done with it, memcg_kmem_put_cache() must be called to release the 2248 * reference. 2249 */ 2250 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2251 { 2252 struct mem_cgroup *memcg; 2253 struct kmem_cache *memcg_cachep; 2254 int kmemcg_id; 2255 2256 VM_BUG_ON(!is_root_cache(cachep)); 2257 2258 if (memcg_kmem_bypass()) 2259 return cachep; 2260 2261 if (current->memcg_kmem_skip_account) 2262 return cachep; 2263 2264 memcg = get_mem_cgroup_from_mm(current->mm); 2265 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2266 if (kmemcg_id < 0) 2267 goto out; 2268 2269 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2270 if (likely(memcg_cachep)) 2271 return memcg_cachep; 2272 2273 /* 2274 * If we are in a safe context (can wait, and not in interrupt 2275 * context), we could be be predictable and return right away. 2276 * This would guarantee that the allocation being performed 2277 * already belongs in the new cache. 2278 * 2279 * However, there are some clashes that can arrive from locking. 2280 * For instance, because we acquire the slab_mutex while doing 2281 * memcg_create_kmem_cache, this means no further allocation 2282 * could happen with the slab_mutex held. So it's better to 2283 * defer everything. 2284 */ 2285 memcg_schedule_kmem_cache_create(memcg, cachep); 2286 out: 2287 css_put(&memcg->css); 2288 return cachep; 2289 } 2290 2291 /** 2292 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2293 * @cachep: the cache returned by memcg_kmem_get_cache 2294 */ 2295 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2296 { 2297 if (!is_root_cache(cachep)) 2298 css_put(&cachep->memcg_params.memcg->css); 2299 } 2300 2301 /** 2302 * memcg_kmem_charge_memcg: charge a kmem page 2303 * @page: page to charge 2304 * @gfp: reclaim mode 2305 * @order: allocation order 2306 * @memcg: memory cgroup to charge 2307 * 2308 * Returns 0 on success, an error code on failure. 2309 */ 2310 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2311 struct mem_cgroup *memcg) 2312 { 2313 unsigned int nr_pages = 1 << order; 2314 struct page_counter *counter; 2315 int ret; 2316 2317 ret = try_charge(memcg, gfp, nr_pages); 2318 if (ret) 2319 return ret; 2320 2321 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2322 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2323 cancel_charge(memcg, nr_pages); 2324 return -ENOMEM; 2325 } 2326 2327 page->mem_cgroup = memcg; 2328 2329 return 0; 2330 } 2331 2332 /** 2333 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2334 * @page: page to charge 2335 * @gfp: reclaim mode 2336 * @order: allocation order 2337 * 2338 * Returns 0 on success, an error code on failure. 2339 */ 2340 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2341 { 2342 struct mem_cgroup *memcg; 2343 int ret = 0; 2344 2345 if (memcg_kmem_bypass()) 2346 return 0; 2347 2348 memcg = get_mem_cgroup_from_mm(current->mm); 2349 if (!mem_cgroup_is_root(memcg)) { 2350 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2351 if (!ret) 2352 __SetPageKmemcg(page); 2353 } 2354 css_put(&memcg->css); 2355 return ret; 2356 } 2357 /** 2358 * memcg_kmem_uncharge: uncharge a kmem page 2359 * @page: page to uncharge 2360 * @order: allocation order 2361 */ 2362 void memcg_kmem_uncharge(struct page *page, int order) 2363 { 2364 struct mem_cgroup *memcg = page->mem_cgroup; 2365 unsigned int nr_pages = 1 << order; 2366 2367 if (!memcg) 2368 return; 2369 2370 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2371 2372 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2373 page_counter_uncharge(&memcg->kmem, nr_pages); 2374 2375 page_counter_uncharge(&memcg->memory, nr_pages); 2376 if (do_memsw_account()) 2377 page_counter_uncharge(&memcg->memsw, nr_pages); 2378 2379 page->mem_cgroup = NULL; 2380 2381 /* slab pages do not have PageKmemcg flag set */ 2382 if (PageKmemcg(page)) 2383 __ClearPageKmemcg(page); 2384 2385 css_put_many(&memcg->css, nr_pages); 2386 } 2387 #endif /* !CONFIG_SLOB */ 2388 2389 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2390 2391 /* 2392 * Because tail pages are not marked as "used", set it. We're under 2393 * zone_lru_lock and migration entries setup in all page mappings. 2394 */ 2395 void mem_cgroup_split_huge_fixup(struct page *head) 2396 { 2397 int i; 2398 2399 if (mem_cgroup_disabled()) 2400 return; 2401 2402 for (i = 1; i < HPAGE_PMD_NR; i++) 2403 head[i].mem_cgroup = head->mem_cgroup; 2404 2405 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); 2406 } 2407 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2408 2409 #ifdef CONFIG_MEMCG_SWAP 2410 /** 2411 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2412 * @entry: swap entry to be moved 2413 * @from: mem_cgroup which the entry is moved from 2414 * @to: mem_cgroup which the entry is moved to 2415 * 2416 * It succeeds only when the swap_cgroup's record for this entry is the same 2417 * as the mem_cgroup's id of @from. 2418 * 2419 * Returns 0 on success, -EINVAL on failure. 2420 * 2421 * The caller must have charged to @to, IOW, called page_counter_charge() about 2422 * both res and memsw, and called css_get(). 2423 */ 2424 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2425 struct mem_cgroup *from, struct mem_cgroup *to) 2426 { 2427 unsigned short old_id, new_id; 2428 2429 old_id = mem_cgroup_id(from); 2430 new_id = mem_cgroup_id(to); 2431 2432 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2433 mod_memcg_state(from, MEMCG_SWAP, -1); 2434 mod_memcg_state(to, MEMCG_SWAP, 1); 2435 return 0; 2436 } 2437 return -EINVAL; 2438 } 2439 #else 2440 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2441 struct mem_cgroup *from, struct mem_cgroup *to) 2442 { 2443 return -EINVAL; 2444 } 2445 #endif 2446 2447 static DEFINE_MUTEX(memcg_max_mutex); 2448 2449 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 2450 unsigned long max, bool memsw) 2451 { 2452 bool enlarge = false; 2453 bool drained = false; 2454 int ret; 2455 bool limits_invariant; 2456 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 2457 2458 do { 2459 if (signal_pending(current)) { 2460 ret = -EINTR; 2461 break; 2462 } 2463 2464 mutex_lock(&memcg_max_mutex); 2465 /* 2466 * Make sure that the new limit (memsw or memory limit) doesn't 2467 * break our basic invariant rule memory.max <= memsw.max. 2468 */ 2469 limits_invariant = memsw ? max >= memcg->memory.max : 2470 max <= memcg->memsw.max; 2471 if (!limits_invariant) { 2472 mutex_unlock(&memcg_max_mutex); 2473 ret = -EINVAL; 2474 break; 2475 } 2476 if (max > counter->max) 2477 enlarge = true; 2478 ret = page_counter_set_max(counter, max); 2479 mutex_unlock(&memcg_max_mutex); 2480 2481 if (!ret) 2482 break; 2483 2484 if (!drained) { 2485 drain_all_stock(memcg); 2486 drained = true; 2487 continue; 2488 } 2489 2490 if (!try_to_free_mem_cgroup_pages(memcg, 1, 2491 GFP_KERNEL, !memsw)) { 2492 ret = -EBUSY; 2493 break; 2494 } 2495 } while (true); 2496 2497 if (!ret && enlarge) 2498 memcg_oom_recover(memcg); 2499 2500 return ret; 2501 } 2502 2503 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2504 gfp_t gfp_mask, 2505 unsigned long *total_scanned) 2506 { 2507 unsigned long nr_reclaimed = 0; 2508 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2509 unsigned long reclaimed; 2510 int loop = 0; 2511 struct mem_cgroup_tree_per_node *mctz; 2512 unsigned long excess; 2513 unsigned long nr_scanned; 2514 2515 if (order > 0) 2516 return 0; 2517 2518 mctz = soft_limit_tree_node(pgdat->node_id); 2519 2520 /* 2521 * Do not even bother to check the largest node if the root 2522 * is empty. Do it lockless to prevent lock bouncing. Races 2523 * are acceptable as soft limit is best effort anyway. 2524 */ 2525 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 2526 return 0; 2527 2528 /* 2529 * This loop can run a while, specially if mem_cgroup's continuously 2530 * keep exceeding their soft limit and putting the system under 2531 * pressure 2532 */ 2533 do { 2534 if (next_mz) 2535 mz = next_mz; 2536 else 2537 mz = mem_cgroup_largest_soft_limit_node(mctz); 2538 if (!mz) 2539 break; 2540 2541 nr_scanned = 0; 2542 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2543 gfp_mask, &nr_scanned); 2544 nr_reclaimed += reclaimed; 2545 *total_scanned += nr_scanned; 2546 spin_lock_irq(&mctz->lock); 2547 __mem_cgroup_remove_exceeded(mz, mctz); 2548 2549 /* 2550 * If we failed to reclaim anything from this memory cgroup 2551 * it is time to move on to the next cgroup 2552 */ 2553 next_mz = NULL; 2554 if (!reclaimed) 2555 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2556 2557 excess = soft_limit_excess(mz->memcg); 2558 /* 2559 * One school of thought says that we should not add 2560 * back the node to the tree if reclaim returns 0. 2561 * But our reclaim could return 0, simply because due 2562 * to priority we are exposing a smaller subset of 2563 * memory to reclaim from. Consider this as a longer 2564 * term TODO. 2565 */ 2566 /* If excess == 0, no tree ops */ 2567 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2568 spin_unlock_irq(&mctz->lock); 2569 css_put(&mz->memcg->css); 2570 loop++; 2571 /* 2572 * Could not reclaim anything and there are no more 2573 * mem cgroups to try or we seem to be looping without 2574 * reclaiming anything. 2575 */ 2576 if (!nr_reclaimed && 2577 (next_mz == NULL || 2578 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2579 break; 2580 } while (!nr_reclaimed); 2581 if (next_mz) 2582 css_put(&next_mz->memcg->css); 2583 return nr_reclaimed; 2584 } 2585 2586 /* 2587 * Test whether @memcg has children, dead or alive. Note that this 2588 * function doesn't care whether @memcg has use_hierarchy enabled and 2589 * returns %true if there are child csses according to the cgroup 2590 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2591 */ 2592 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2593 { 2594 bool ret; 2595 2596 rcu_read_lock(); 2597 ret = css_next_child(NULL, &memcg->css); 2598 rcu_read_unlock(); 2599 return ret; 2600 } 2601 2602 /* 2603 * Reclaims as many pages from the given memcg as possible. 2604 * 2605 * Caller is responsible for holding css reference for memcg. 2606 */ 2607 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2608 { 2609 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2610 2611 /* we call try-to-free pages for make this cgroup empty */ 2612 lru_add_drain_all(); 2613 2614 drain_all_stock(memcg); 2615 2616 /* try to free all pages in this cgroup */ 2617 while (nr_retries && page_counter_read(&memcg->memory)) { 2618 int progress; 2619 2620 if (signal_pending(current)) 2621 return -EINTR; 2622 2623 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2624 GFP_KERNEL, true); 2625 if (!progress) { 2626 nr_retries--; 2627 /* maybe some writeback is necessary */ 2628 congestion_wait(BLK_RW_ASYNC, HZ/10); 2629 } 2630 2631 } 2632 2633 return 0; 2634 } 2635 2636 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2637 char *buf, size_t nbytes, 2638 loff_t off) 2639 { 2640 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2641 2642 if (mem_cgroup_is_root(memcg)) 2643 return -EINVAL; 2644 return mem_cgroup_force_empty(memcg) ?: nbytes; 2645 } 2646 2647 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2648 struct cftype *cft) 2649 { 2650 return mem_cgroup_from_css(css)->use_hierarchy; 2651 } 2652 2653 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2654 struct cftype *cft, u64 val) 2655 { 2656 int retval = 0; 2657 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2658 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2659 2660 if (memcg->use_hierarchy == val) 2661 return 0; 2662 2663 /* 2664 * If parent's use_hierarchy is set, we can't make any modifications 2665 * in the child subtrees. If it is unset, then the change can 2666 * occur, provided the current cgroup has no children. 2667 * 2668 * For the root cgroup, parent_mem is NULL, we allow value to be 2669 * set if there are no children. 2670 */ 2671 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2672 (val == 1 || val == 0)) { 2673 if (!memcg_has_children(memcg)) 2674 memcg->use_hierarchy = val; 2675 else 2676 retval = -EBUSY; 2677 } else 2678 retval = -EINVAL; 2679 2680 return retval; 2681 } 2682 2683 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat) 2684 { 2685 struct mem_cgroup *iter; 2686 int i; 2687 2688 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT); 2689 2690 for_each_mem_cgroup_tree(iter, memcg) { 2691 for (i = 0; i < MEMCG_NR_STAT; i++) 2692 stat[i] += memcg_page_state(iter, i); 2693 } 2694 } 2695 2696 static void tree_events(struct mem_cgroup *memcg, unsigned long *events) 2697 { 2698 struct mem_cgroup *iter; 2699 int i; 2700 2701 memset(events, 0, sizeof(*events) * NR_VM_EVENT_ITEMS); 2702 2703 for_each_mem_cgroup_tree(iter, memcg) { 2704 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 2705 events[i] += memcg_sum_events(iter, i); 2706 } 2707 } 2708 2709 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2710 { 2711 unsigned long val = 0; 2712 2713 if (mem_cgroup_is_root(memcg)) { 2714 struct mem_cgroup *iter; 2715 2716 for_each_mem_cgroup_tree(iter, memcg) { 2717 val += memcg_page_state(iter, MEMCG_CACHE); 2718 val += memcg_page_state(iter, MEMCG_RSS); 2719 if (swap) 2720 val += memcg_page_state(iter, MEMCG_SWAP); 2721 } 2722 } else { 2723 if (!swap) 2724 val = page_counter_read(&memcg->memory); 2725 else 2726 val = page_counter_read(&memcg->memsw); 2727 } 2728 return val; 2729 } 2730 2731 enum { 2732 RES_USAGE, 2733 RES_LIMIT, 2734 RES_MAX_USAGE, 2735 RES_FAILCNT, 2736 RES_SOFT_LIMIT, 2737 }; 2738 2739 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2740 struct cftype *cft) 2741 { 2742 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2743 struct page_counter *counter; 2744 2745 switch (MEMFILE_TYPE(cft->private)) { 2746 case _MEM: 2747 counter = &memcg->memory; 2748 break; 2749 case _MEMSWAP: 2750 counter = &memcg->memsw; 2751 break; 2752 case _KMEM: 2753 counter = &memcg->kmem; 2754 break; 2755 case _TCP: 2756 counter = &memcg->tcpmem; 2757 break; 2758 default: 2759 BUG(); 2760 } 2761 2762 switch (MEMFILE_ATTR(cft->private)) { 2763 case RES_USAGE: 2764 if (counter == &memcg->memory) 2765 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2766 if (counter == &memcg->memsw) 2767 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2768 return (u64)page_counter_read(counter) * PAGE_SIZE; 2769 case RES_LIMIT: 2770 return (u64)counter->max * PAGE_SIZE; 2771 case RES_MAX_USAGE: 2772 return (u64)counter->watermark * PAGE_SIZE; 2773 case RES_FAILCNT: 2774 return counter->failcnt; 2775 case RES_SOFT_LIMIT: 2776 return (u64)memcg->soft_limit * PAGE_SIZE; 2777 default: 2778 BUG(); 2779 } 2780 } 2781 2782 #ifndef CONFIG_SLOB 2783 static int memcg_online_kmem(struct mem_cgroup *memcg) 2784 { 2785 int memcg_id; 2786 2787 if (cgroup_memory_nokmem) 2788 return 0; 2789 2790 BUG_ON(memcg->kmemcg_id >= 0); 2791 BUG_ON(memcg->kmem_state); 2792 2793 memcg_id = memcg_alloc_cache_id(); 2794 if (memcg_id < 0) 2795 return memcg_id; 2796 2797 static_branch_inc(&memcg_kmem_enabled_key); 2798 /* 2799 * A memory cgroup is considered kmem-online as soon as it gets 2800 * kmemcg_id. Setting the id after enabling static branching will 2801 * guarantee no one starts accounting before all call sites are 2802 * patched. 2803 */ 2804 memcg->kmemcg_id = memcg_id; 2805 memcg->kmem_state = KMEM_ONLINE; 2806 INIT_LIST_HEAD(&memcg->kmem_caches); 2807 2808 return 0; 2809 } 2810 2811 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2812 { 2813 struct cgroup_subsys_state *css; 2814 struct mem_cgroup *parent, *child; 2815 int kmemcg_id; 2816 2817 if (memcg->kmem_state != KMEM_ONLINE) 2818 return; 2819 /* 2820 * Clear the online state before clearing memcg_caches array 2821 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 2822 * guarantees that no cache will be created for this cgroup 2823 * after we are done (see memcg_create_kmem_cache()). 2824 */ 2825 memcg->kmem_state = KMEM_ALLOCATED; 2826 2827 memcg_deactivate_kmem_caches(memcg); 2828 2829 kmemcg_id = memcg->kmemcg_id; 2830 BUG_ON(kmemcg_id < 0); 2831 2832 parent = parent_mem_cgroup(memcg); 2833 if (!parent) 2834 parent = root_mem_cgroup; 2835 2836 /* 2837 * Change kmemcg_id of this cgroup and all its descendants to the 2838 * parent's id, and then move all entries from this cgroup's list_lrus 2839 * to ones of the parent. After we have finished, all list_lrus 2840 * corresponding to this cgroup are guaranteed to remain empty. The 2841 * ordering is imposed by list_lru_node->lock taken by 2842 * memcg_drain_all_list_lrus(). 2843 */ 2844 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 2845 css_for_each_descendant_pre(css, &memcg->css) { 2846 child = mem_cgroup_from_css(css); 2847 BUG_ON(child->kmemcg_id != kmemcg_id); 2848 child->kmemcg_id = parent->kmemcg_id; 2849 if (!memcg->use_hierarchy) 2850 break; 2851 } 2852 rcu_read_unlock(); 2853 2854 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2855 2856 memcg_free_cache_id(kmemcg_id); 2857 } 2858 2859 static void memcg_free_kmem(struct mem_cgroup *memcg) 2860 { 2861 /* css_alloc() failed, offlining didn't happen */ 2862 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 2863 memcg_offline_kmem(memcg); 2864 2865 if (memcg->kmem_state == KMEM_ALLOCATED) { 2866 memcg_destroy_kmem_caches(memcg); 2867 static_branch_dec(&memcg_kmem_enabled_key); 2868 WARN_ON(page_counter_read(&memcg->kmem)); 2869 } 2870 } 2871 #else 2872 static int memcg_online_kmem(struct mem_cgroup *memcg) 2873 { 2874 return 0; 2875 } 2876 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2877 { 2878 } 2879 static void memcg_free_kmem(struct mem_cgroup *memcg) 2880 { 2881 } 2882 #endif /* !CONFIG_SLOB */ 2883 2884 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 2885 unsigned long max) 2886 { 2887 int ret; 2888 2889 mutex_lock(&memcg_max_mutex); 2890 ret = page_counter_set_max(&memcg->kmem, max); 2891 mutex_unlock(&memcg_max_mutex); 2892 return ret; 2893 } 2894 2895 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 2896 { 2897 int ret; 2898 2899 mutex_lock(&memcg_max_mutex); 2900 2901 ret = page_counter_set_max(&memcg->tcpmem, max); 2902 if (ret) 2903 goto out; 2904 2905 if (!memcg->tcpmem_active) { 2906 /* 2907 * The active flag needs to be written after the static_key 2908 * update. This is what guarantees that the socket activation 2909 * function is the last one to run. See mem_cgroup_sk_alloc() 2910 * for details, and note that we don't mark any socket as 2911 * belonging to this memcg until that flag is up. 2912 * 2913 * We need to do this, because static_keys will span multiple 2914 * sites, but we can't control their order. If we mark a socket 2915 * as accounted, but the accounting functions are not patched in 2916 * yet, we'll lose accounting. 2917 * 2918 * We never race with the readers in mem_cgroup_sk_alloc(), 2919 * because when this value change, the code to process it is not 2920 * patched in yet. 2921 */ 2922 static_branch_inc(&memcg_sockets_enabled_key); 2923 memcg->tcpmem_active = true; 2924 } 2925 out: 2926 mutex_unlock(&memcg_max_mutex); 2927 return ret; 2928 } 2929 2930 /* 2931 * The user of this function is... 2932 * RES_LIMIT. 2933 */ 2934 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2935 char *buf, size_t nbytes, loff_t off) 2936 { 2937 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2938 unsigned long nr_pages; 2939 int ret; 2940 2941 buf = strstrip(buf); 2942 ret = page_counter_memparse(buf, "-1", &nr_pages); 2943 if (ret) 2944 return ret; 2945 2946 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2947 case RES_LIMIT: 2948 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2949 ret = -EINVAL; 2950 break; 2951 } 2952 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2953 case _MEM: 2954 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 2955 break; 2956 case _MEMSWAP: 2957 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 2958 break; 2959 case _KMEM: 2960 ret = memcg_update_kmem_max(memcg, nr_pages); 2961 break; 2962 case _TCP: 2963 ret = memcg_update_tcp_max(memcg, nr_pages); 2964 break; 2965 } 2966 break; 2967 case RES_SOFT_LIMIT: 2968 memcg->soft_limit = nr_pages; 2969 ret = 0; 2970 break; 2971 } 2972 return ret ?: nbytes; 2973 } 2974 2975 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 2976 size_t nbytes, loff_t off) 2977 { 2978 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2979 struct page_counter *counter; 2980 2981 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2982 case _MEM: 2983 counter = &memcg->memory; 2984 break; 2985 case _MEMSWAP: 2986 counter = &memcg->memsw; 2987 break; 2988 case _KMEM: 2989 counter = &memcg->kmem; 2990 break; 2991 case _TCP: 2992 counter = &memcg->tcpmem; 2993 break; 2994 default: 2995 BUG(); 2996 } 2997 2998 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2999 case RES_MAX_USAGE: 3000 page_counter_reset_watermark(counter); 3001 break; 3002 case RES_FAILCNT: 3003 counter->failcnt = 0; 3004 break; 3005 default: 3006 BUG(); 3007 } 3008 3009 return nbytes; 3010 } 3011 3012 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3013 struct cftype *cft) 3014 { 3015 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3016 } 3017 3018 #ifdef CONFIG_MMU 3019 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3020 struct cftype *cft, u64 val) 3021 { 3022 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3023 3024 if (val & ~MOVE_MASK) 3025 return -EINVAL; 3026 3027 /* 3028 * No kind of locking is needed in here, because ->can_attach() will 3029 * check this value once in the beginning of the process, and then carry 3030 * on with stale data. This means that changes to this value will only 3031 * affect task migrations starting after the change. 3032 */ 3033 memcg->move_charge_at_immigrate = val; 3034 return 0; 3035 } 3036 #else 3037 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3038 struct cftype *cft, u64 val) 3039 { 3040 return -ENOSYS; 3041 } 3042 #endif 3043 3044 #ifdef CONFIG_NUMA 3045 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3046 { 3047 struct numa_stat { 3048 const char *name; 3049 unsigned int lru_mask; 3050 }; 3051 3052 static const struct numa_stat stats[] = { 3053 { "total", LRU_ALL }, 3054 { "file", LRU_ALL_FILE }, 3055 { "anon", LRU_ALL_ANON }, 3056 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3057 }; 3058 const struct numa_stat *stat; 3059 int nid; 3060 unsigned long nr; 3061 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3062 3063 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3064 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3065 seq_printf(m, "%s=%lu", stat->name, nr); 3066 for_each_node_state(nid, N_MEMORY) { 3067 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3068 stat->lru_mask); 3069 seq_printf(m, " N%d=%lu", nid, nr); 3070 } 3071 seq_putc(m, '\n'); 3072 } 3073 3074 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3075 struct mem_cgroup *iter; 3076 3077 nr = 0; 3078 for_each_mem_cgroup_tree(iter, memcg) 3079 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3080 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3081 for_each_node_state(nid, N_MEMORY) { 3082 nr = 0; 3083 for_each_mem_cgroup_tree(iter, memcg) 3084 nr += mem_cgroup_node_nr_lru_pages( 3085 iter, nid, stat->lru_mask); 3086 seq_printf(m, " N%d=%lu", nid, nr); 3087 } 3088 seq_putc(m, '\n'); 3089 } 3090 3091 return 0; 3092 } 3093 #endif /* CONFIG_NUMA */ 3094 3095 /* Universal VM events cgroup1 shows, original sort order */ 3096 static const unsigned int memcg1_events[] = { 3097 PGPGIN, 3098 PGPGOUT, 3099 PGFAULT, 3100 PGMAJFAULT, 3101 }; 3102 3103 static const char *const memcg1_event_names[] = { 3104 "pgpgin", 3105 "pgpgout", 3106 "pgfault", 3107 "pgmajfault", 3108 }; 3109 3110 static int memcg_stat_show(struct seq_file *m, void *v) 3111 { 3112 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3113 unsigned long memory, memsw; 3114 struct mem_cgroup *mi; 3115 unsigned int i; 3116 3117 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3118 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3119 3120 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3121 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3122 continue; 3123 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3124 memcg_page_state(memcg, memcg1_stats[i]) * 3125 PAGE_SIZE); 3126 } 3127 3128 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3129 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3130 memcg_sum_events(memcg, memcg1_events[i])); 3131 3132 for (i = 0; i < NR_LRU_LISTS; i++) 3133 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3134 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3135 3136 /* Hierarchical information */ 3137 memory = memsw = PAGE_COUNTER_MAX; 3138 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3139 memory = min(memory, mi->memory.max); 3140 memsw = min(memsw, mi->memsw.max); 3141 } 3142 seq_printf(m, "hierarchical_memory_limit %llu\n", 3143 (u64)memory * PAGE_SIZE); 3144 if (do_memsw_account()) 3145 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3146 (u64)memsw * PAGE_SIZE); 3147 3148 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3149 unsigned long long val = 0; 3150 3151 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3152 continue; 3153 for_each_mem_cgroup_tree(mi, memcg) 3154 val += memcg_page_state(mi, memcg1_stats[i]) * 3155 PAGE_SIZE; 3156 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val); 3157 } 3158 3159 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) { 3160 unsigned long long val = 0; 3161 3162 for_each_mem_cgroup_tree(mi, memcg) 3163 val += memcg_sum_events(mi, memcg1_events[i]); 3164 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val); 3165 } 3166 3167 for (i = 0; i < NR_LRU_LISTS; i++) { 3168 unsigned long long val = 0; 3169 3170 for_each_mem_cgroup_tree(mi, memcg) 3171 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3172 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3173 } 3174 3175 #ifdef CONFIG_DEBUG_VM 3176 { 3177 pg_data_t *pgdat; 3178 struct mem_cgroup_per_node *mz; 3179 struct zone_reclaim_stat *rstat; 3180 unsigned long recent_rotated[2] = {0, 0}; 3181 unsigned long recent_scanned[2] = {0, 0}; 3182 3183 for_each_online_pgdat(pgdat) { 3184 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3185 rstat = &mz->lruvec.reclaim_stat; 3186 3187 recent_rotated[0] += rstat->recent_rotated[0]; 3188 recent_rotated[1] += rstat->recent_rotated[1]; 3189 recent_scanned[0] += rstat->recent_scanned[0]; 3190 recent_scanned[1] += rstat->recent_scanned[1]; 3191 } 3192 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3193 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3194 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3195 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3196 } 3197 #endif 3198 3199 return 0; 3200 } 3201 3202 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3203 struct cftype *cft) 3204 { 3205 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3206 3207 return mem_cgroup_swappiness(memcg); 3208 } 3209 3210 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3211 struct cftype *cft, u64 val) 3212 { 3213 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3214 3215 if (val > 100) 3216 return -EINVAL; 3217 3218 if (css->parent) 3219 memcg->swappiness = val; 3220 else 3221 vm_swappiness = val; 3222 3223 return 0; 3224 } 3225 3226 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3227 { 3228 struct mem_cgroup_threshold_ary *t; 3229 unsigned long usage; 3230 int i; 3231 3232 rcu_read_lock(); 3233 if (!swap) 3234 t = rcu_dereference(memcg->thresholds.primary); 3235 else 3236 t = rcu_dereference(memcg->memsw_thresholds.primary); 3237 3238 if (!t) 3239 goto unlock; 3240 3241 usage = mem_cgroup_usage(memcg, swap); 3242 3243 /* 3244 * current_threshold points to threshold just below or equal to usage. 3245 * If it's not true, a threshold was crossed after last 3246 * call of __mem_cgroup_threshold(). 3247 */ 3248 i = t->current_threshold; 3249 3250 /* 3251 * Iterate backward over array of thresholds starting from 3252 * current_threshold and check if a threshold is crossed. 3253 * If none of thresholds below usage is crossed, we read 3254 * only one element of the array here. 3255 */ 3256 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3257 eventfd_signal(t->entries[i].eventfd, 1); 3258 3259 /* i = current_threshold + 1 */ 3260 i++; 3261 3262 /* 3263 * Iterate forward over array of thresholds starting from 3264 * current_threshold+1 and check if a threshold is crossed. 3265 * If none of thresholds above usage is crossed, we read 3266 * only one element of the array here. 3267 */ 3268 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3269 eventfd_signal(t->entries[i].eventfd, 1); 3270 3271 /* Update current_threshold */ 3272 t->current_threshold = i - 1; 3273 unlock: 3274 rcu_read_unlock(); 3275 } 3276 3277 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3278 { 3279 while (memcg) { 3280 __mem_cgroup_threshold(memcg, false); 3281 if (do_memsw_account()) 3282 __mem_cgroup_threshold(memcg, true); 3283 3284 memcg = parent_mem_cgroup(memcg); 3285 } 3286 } 3287 3288 static int compare_thresholds(const void *a, const void *b) 3289 { 3290 const struct mem_cgroup_threshold *_a = a; 3291 const struct mem_cgroup_threshold *_b = b; 3292 3293 if (_a->threshold > _b->threshold) 3294 return 1; 3295 3296 if (_a->threshold < _b->threshold) 3297 return -1; 3298 3299 return 0; 3300 } 3301 3302 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3303 { 3304 struct mem_cgroup_eventfd_list *ev; 3305 3306 spin_lock(&memcg_oom_lock); 3307 3308 list_for_each_entry(ev, &memcg->oom_notify, list) 3309 eventfd_signal(ev->eventfd, 1); 3310 3311 spin_unlock(&memcg_oom_lock); 3312 return 0; 3313 } 3314 3315 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3316 { 3317 struct mem_cgroup *iter; 3318 3319 for_each_mem_cgroup_tree(iter, memcg) 3320 mem_cgroup_oom_notify_cb(iter); 3321 } 3322 3323 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3324 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3325 { 3326 struct mem_cgroup_thresholds *thresholds; 3327 struct mem_cgroup_threshold_ary *new; 3328 unsigned long threshold; 3329 unsigned long usage; 3330 int i, size, ret; 3331 3332 ret = page_counter_memparse(args, "-1", &threshold); 3333 if (ret) 3334 return ret; 3335 3336 mutex_lock(&memcg->thresholds_lock); 3337 3338 if (type == _MEM) { 3339 thresholds = &memcg->thresholds; 3340 usage = mem_cgroup_usage(memcg, false); 3341 } else if (type == _MEMSWAP) { 3342 thresholds = &memcg->memsw_thresholds; 3343 usage = mem_cgroup_usage(memcg, true); 3344 } else 3345 BUG(); 3346 3347 /* Check if a threshold crossed before adding a new one */ 3348 if (thresholds->primary) 3349 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3350 3351 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3352 3353 /* Allocate memory for new array of thresholds */ 3354 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3355 GFP_KERNEL); 3356 if (!new) { 3357 ret = -ENOMEM; 3358 goto unlock; 3359 } 3360 new->size = size; 3361 3362 /* Copy thresholds (if any) to new array */ 3363 if (thresholds->primary) { 3364 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3365 sizeof(struct mem_cgroup_threshold)); 3366 } 3367 3368 /* Add new threshold */ 3369 new->entries[size - 1].eventfd = eventfd; 3370 new->entries[size - 1].threshold = threshold; 3371 3372 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3373 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3374 compare_thresholds, NULL); 3375 3376 /* Find current threshold */ 3377 new->current_threshold = -1; 3378 for (i = 0; i < size; i++) { 3379 if (new->entries[i].threshold <= usage) { 3380 /* 3381 * new->current_threshold will not be used until 3382 * rcu_assign_pointer(), so it's safe to increment 3383 * it here. 3384 */ 3385 ++new->current_threshold; 3386 } else 3387 break; 3388 } 3389 3390 /* Free old spare buffer and save old primary buffer as spare */ 3391 kfree(thresholds->spare); 3392 thresholds->spare = thresholds->primary; 3393 3394 rcu_assign_pointer(thresholds->primary, new); 3395 3396 /* To be sure that nobody uses thresholds */ 3397 synchronize_rcu(); 3398 3399 unlock: 3400 mutex_unlock(&memcg->thresholds_lock); 3401 3402 return ret; 3403 } 3404 3405 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3406 struct eventfd_ctx *eventfd, const char *args) 3407 { 3408 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3409 } 3410 3411 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3412 struct eventfd_ctx *eventfd, const char *args) 3413 { 3414 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3415 } 3416 3417 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3418 struct eventfd_ctx *eventfd, enum res_type type) 3419 { 3420 struct mem_cgroup_thresholds *thresholds; 3421 struct mem_cgroup_threshold_ary *new; 3422 unsigned long usage; 3423 int i, j, size; 3424 3425 mutex_lock(&memcg->thresholds_lock); 3426 3427 if (type == _MEM) { 3428 thresholds = &memcg->thresholds; 3429 usage = mem_cgroup_usage(memcg, false); 3430 } else if (type == _MEMSWAP) { 3431 thresholds = &memcg->memsw_thresholds; 3432 usage = mem_cgroup_usage(memcg, true); 3433 } else 3434 BUG(); 3435 3436 if (!thresholds->primary) 3437 goto unlock; 3438 3439 /* Check if a threshold crossed before removing */ 3440 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3441 3442 /* Calculate new number of threshold */ 3443 size = 0; 3444 for (i = 0; i < thresholds->primary->size; i++) { 3445 if (thresholds->primary->entries[i].eventfd != eventfd) 3446 size++; 3447 } 3448 3449 new = thresholds->spare; 3450 3451 /* Set thresholds array to NULL if we don't have thresholds */ 3452 if (!size) { 3453 kfree(new); 3454 new = NULL; 3455 goto swap_buffers; 3456 } 3457 3458 new->size = size; 3459 3460 /* Copy thresholds and find current threshold */ 3461 new->current_threshold = -1; 3462 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3463 if (thresholds->primary->entries[i].eventfd == eventfd) 3464 continue; 3465 3466 new->entries[j] = thresholds->primary->entries[i]; 3467 if (new->entries[j].threshold <= usage) { 3468 /* 3469 * new->current_threshold will not be used 3470 * until rcu_assign_pointer(), so it's safe to increment 3471 * it here. 3472 */ 3473 ++new->current_threshold; 3474 } 3475 j++; 3476 } 3477 3478 swap_buffers: 3479 /* Swap primary and spare array */ 3480 thresholds->spare = thresholds->primary; 3481 3482 rcu_assign_pointer(thresholds->primary, new); 3483 3484 /* To be sure that nobody uses thresholds */ 3485 synchronize_rcu(); 3486 3487 /* If all events are unregistered, free the spare array */ 3488 if (!new) { 3489 kfree(thresholds->spare); 3490 thresholds->spare = NULL; 3491 } 3492 unlock: 3493 mutex_unlock(&memcg->thresholds_lock); 3494 } 3495 3496 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3497 struct eventfd_ctx *eventfd) 3498 { 3499 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3500 } 3501 3502 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3503 struct eventfd_ctx *eventfd) 3504 { 3505 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3506 } 3507 3508 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3509 struct eventfd_ctx *eventfd, const char *args) 3510 { 3511 struct mem_cgroup_eventfd_list *event; 3512 3513 event = kmalloc(sizeof(*event), GFP_KERNEL); 3514 if (!event) 3515 return -ENOMEM; 3516 3517 spin_lock(&memcg_oom_lock); 3518 3519 event->eventfd = eventfd; 3520 list_add(&event->list, &memcg->oom_notify); 3521 3522 /* already in OOM ? */ 3523 if (memcg->under_oom) 3524 eventfd_signal(eventfd, 1); 3525 spin_unlock(&memcg_oom_lock); 3526 3527 return 0; 3528 } 3529 3530 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3531 struct eventfd_ctx *eventfd) 3532 { 3533 struct mem_cgroup_eventfd_list *ev, *tmp; 3534 3535 spin_lock(&memcg_oom_lock); 3536 3537 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3538 if (ev->eventfd == eventfd) { 3539 list_del(&ev->list); 3540 kfree(ev); 3541 } 3542 } 3543 3544 spin_unlock(&memcg_oom_lock); 3545 } 3546 3547 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3548 { 3549 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3550 3551 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3552 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3553 seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 3554 return 0; 3555 } 3556 3557 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3558 struct cftype *cft, u64 val) 3559 { 3560 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3561 3562 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3563 if (!css->parent || !((val == 0) || (val == 1))) 3564 return -EINVAL; 3565 3566 memcg->oom_kill_disable = val; 3567 if (!val) 3568 memcg_oom_recover(memcg); 3569 3570 return 0; 3571 } 3572 3573 #ifdef CONFIG_CGROUP_WRITEBACK 3574 3575 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3576 { 3577 return wb_domain_init(&memcg->cgwb_domain, gfp); 3578 } 3579 3580 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3581 { 3582 wb_domain_exit(&memcg->cgwb_domain); 3583 } 3584 3585 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3586 { 3587 wb_domain_size_changed(&memcg->cgwb_domain); 3588 } 3589 3590 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3591 { 3592 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3593 3594 if (!memcg->css.parent) 3595 return NULL; 3596 3597 return &memcg->cgwb_domain; 3598 } 3599 3600 /** 3601 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3602 * @wb: bdi_writeback in question 3603 * @pfilepages: out parameter for number of file pages 3604 * @pheadroom: out parameter for number of allocatable pages according to memcg 3605 * @pdirty: out parameter for number of dirty pages 3606 * @pwriteback: out parameter for number of pages under writeback 3607 * 3608 * Determine the numbers of file, headroom, dirty, and writeback pages in 3609 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3610 * is a bit more involved. 3611 * 3612 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3613 * headroom is calculated as the lowest headroom of itself and the 3614 * ancestors. Note that this doesn't consider the actual amount of 3615 * available memory in the system. The caller should further cap 3616 * *@pheadroom accordingly. 3617 */ 3618 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3619 unsigned long *pheadroom, unsigned long *pdirty, 3620 unsigned long *pwriteback) 3621 { 3622 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3623 struct mem_cgroup *parent; 3624 3625 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3626 3627 /* this should eventually include NR_UNSTABLE_NFS */ 3628 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3629 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3630 (1 << LRU_ACTIVE_FILE)); 3631 *pheadroom = PAGE_COUNTER_MAX; 3632 3633 while ((parent = parent_mem_cgroup(memcg))) { 3634 unsigned long ceiling = min(memcg->memory.max, memcg->high); 3635 unsigned long used = page_counter_read(&memcg->memory); 3636 3637 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3638 memcg = parent; 3639 } 3640 } 3641 3642 #else /* CONFIG_CGROUP_WRITEBACK */ 3643 3644 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3645 { 3646 return 0; 3647 } 3648 3649 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3650 { 3651 } 3652 3653 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3654 { 3655 } 3656 3657 #endif /* CONFIG_CGROUP_WRITEBACK */ 3658 3659 /* 3660 * DO NOT USE IN NEW FILES. 3661 * 3662 * "cgroup.event_control" implementation. 3663 * 3664 * This is way over-engineered. It tries to support fully configurable 3665 * events for each user. Such level of flexibility is completely 3666 * unnecessary especially in the light of the planned unified hierarchy. 3667 * 3668 * Please deprecate this and replace with something simpler if at all 3669 * possible. 3670 */ 3671 3672 /* 3673 * Unregister event and free resources. 3674 * 3675 * Gets called from workqueue. 3676 */ 3677 static void memcg_event_remove(struct work_struct *work) 3678 { 3679 struct mem_cgroup_event *event = 3680 container_of(work, struct mem_cgroup_event, remove); 3681 struct mem_cgroup *memcg = event->memcg; 3682 3683 remove_wait_queue(event->wqh, &event->wait); 3684 3685 event->unregister_event(memcg, event->eventfd); 3686 3687 /* Notify userspace the event is going away. */ 3688 eventfd_signal(event->eventfd, 1); 3689 3690 eventfd_ctx_put(event->eventfd); 3691 kfree(event); 3692 css_put(&memcg->css); 3693 } 3694 3695 /* 3696 * Gets called on EPOLLHUP on eventfd when user closes it. 3697 * 3698 * Called with wqh->lock held and interrupts disabled. 3699 */ 3700 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 3701 int sync, void *key) 3702 { 3703 struct mem_cgroup_event *event = 3704 container_of(wait, struct mem_cgroup_event, wait); 3705 struct mem_cgroup *memcg = event->memcg; 3706 __poll_t flags = key_to_poll(key); 3707 3708 if (flags & EPOLLHUP) { 3709 /* 3710 * If the event has been detached at cgroup removal, we 3711 * can simply return knowing the other side will cleanup 3712 * for us. 3713 * 3714 * We can't race against event freeing since the other 3715 * side will require wqh->lock via remove_wait_queue(), 3716 * which we hold. 3717 */ 3718 spin_lock(&memcg->event_list_lock); 3719 if (!list_empty(&event->list)) { 3720 list_del_init(&event->list); 3721 /* 3722 * We are in atomic context, but cgroup_event_remove() 3723 * may sleep, so we have to call it in workqueue. 3724 */ 3725 schedule_work(&event->remove); 3726 } 3727 spin_unlock(&memcg->event_list_lock); 3728 } 3729 3730 return 0; 3731 } 3732 3733 static void memcg_event_ptable_queue_proc(struct file *file, 3734 wait_queue_head_t *wqh, poll_table *pt) 3735 { 3736 struct mem_cgroup_event *event = 3737 container_of(pt, struct mem_cgroup_event, pt); 3738 3739 event->wqh = wqh; 3740 add_wait_queue(wqh, &event->wait); 3741 } 3742 3743 /* 3744 * DO NOT USE IN NEW FILES. 3745 * 3746 * Parse input and register new cgroup event handler. 3747 * 3748 * Input must be in format '<event_fd> <control_fd> <args>'. 3749 * Interpretation of args is defined by control file implementation. 3750 */ 3751 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3752 char *buf, size_t nbytes, loff_t off) 3753 { 3754 struct cgroup_subsys_state *css = of_css(of); 3755 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3756 struct mem_cgroup_event *event; 3757 struct cgroup_subsys_state *cfile_css; 3758 unsigned int efd, cfd; 3759 struct fd efile; 3760 struct fd cfile; 3761 const char *name; 3762 char *endp; 3763 int ret; 3764 3765 buf = strstrip(buf); 3766 3767 efd = simple_strtoul(buf, &endp, 10); 3768 if (*endp != ' ') 3769 return -EINVAL; 3770 buf = endp + 1; 3771 3772 cfd = simple_strtoul(buf, &endp, 10); 3773 if ((*endp != ' ') && (*endp != '\0')) 3774 return -EINVAL; 3775 buf = endp + 1; 3776 3777 event = kzalloc(sizeof(*event), GFP_KERNEL); 3778 if (!event) 3779 return -ENOMEM; 3780 3781 event->memcg = memcg; 3782 INIT_LIST_HEAD(&event->list); 3783 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3784 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3785 INIT_WORK(&event->remove, memcg_event_remove); 3786 3787 efile = fdget(efd); 3788 if (!efile.file) { 3789 ret = -EBADF; 3790 goto out_kfree; 3791 } 3792 3793 event->eventfd = eventfd_ctx_fileget(efile.file); 3794 if (IS_ERR(event->eventfd)) { 3795 ret = PTR_ERR(event->eventfd); 3796 goto out_put_efile; 3797 } 3798 3799 cfile = fdget(cfd); 3800 if (!cfile.file) { 3801 ret = -EBADF; 3802 goto out_put_eventfd; 3803 } 3804 3805 /* the process need read permission on control file */ 3806 /* AV: shouldn't we check that it's been opened for read instead? */ 3807 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3808 if (ret < 0) 3809 goto out_put_cfile; 3810 3811 /* 3812 * Determine the event callbacks and set them in @event. This used 3813 * to be done via struct cftype but cgroup core no longer knows 3814 * about these events. The following is crude but the whole thing 3815 * is for compatibility anyway. 3816 * 3817 * DO NOT ADD NEW FILES. 3818 */ 3819 name = cfile.file->f_path.dentry->d_name.name; 3820 3821 if (!strcmp(name, "memory.usage_in_bytes")) { 3822 event->register_event = mem_cgroup_usage_register_event; 3823 event->unregister_event = mem_cgroup_usage_unregister_event; 3824 } else if (!strcmp(name, "memory.oom_control")) { 3825 event->register_event = mem_cgroup_oom_register_event; 3826 event->unregister_event = mem_cgroup_oom_unregister_event; 3827 } else if (!strcmp(name, "memory.pressure_level")) { 3828 event->register_event = vmpressure_register_event; 3829 event->unregister_event = vmpressure_unregister_event; 3830 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3831 event->register_event = memsw_cgroup_usage_register_event; 3832 event->unregister_event = memsw_cgroup_usage_unregister_event; 3833 } else { 3834 ret = -EINVAL; 3835 goto out_put_cfile; 3836 } 3837 3838 /* 3839 * Verify @cfile should belong to @css. Also, remaining events are 3840 * automatically removed on cgroup destruction but the removal is 3841 * asynchronous, so take an extra ref on @css. 3842 */ 3843 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3844 &memory_cgrp_subsys); 3845 ret = -EINVAL; 3846 if (IS_ERR(cfile_css)) 3847 goto out_put_cfile; 3848 if (cfile_css != css) { 3849 css_put(cfile_css); 3850 goto out_put_cfile; 3851 } 3852 3853 ret = event->register_event(memcg, event->eventfd, buf); 3854 if (ret) 3855 goto out_put_css; 3856 3857 vfs_poll(efile.file, &event->pt); 3858 3859 spin_lock(&memcg->event_list_lock); 3860 list_add(&event->list, &memcg->event_list); 3861 spin_unlock(&memcg->event_list_lock); 3862 3863 fdput(cfile); 3864 fdput(efile); 3865 3866 return nbytes; 3867 3868 out_put_css: 3869 css_put(css); 3870 out_put_cfile: 3871 fdput(cfile); 3872 out_put_eventfd: 3873 eventfd_ctx_put(event->eventfd); 3874 out_put_efile: 3875 fdput(efile); 3876 out_kfree: 3877 kfree(event); 3878 3879 return ret; 3880 } 3881 3882 static struct cftype mem_cgroup_legacy_files[] = { 3883 { 3884 .name = "usage_in_bytes", 3885 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3886 .read_u64 = mem_cgroup_read_u64, 3887 }, 3888 { 3889 .name = "max_usage_in_bytes", 3890 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3891 .write = mem_cgroup_reset, 3892 .read_u64 = mem_cgroup_read_u64, 3893 }, 3894 { 3895 .name = "limit_in_bytes", 3896 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 3897 .write = mem_cgroup_write, 3898 .read_u64 = mem_cgroup_read_u64, 3899 }, 3900 { 3901 .name = "soft_limit_in_bytes", 3902 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 3903 .write = mem_cgroup_write, 3904 .read_u64 = mem_cgroup_read_u64, 3905 }, 3906 { 3907 .name = "failcnt", 3908 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 3909 .write = mem_cgroup_reset, 3910 .read_u64 = mem_cgroup_read_u64, 3911 }, 3912 { 3913 .name = "stat", 3914 .seq_show = memcg_stat_show, 3915 }, 3916 { 3917 .name = "force_empty", 3918 .write = mem_cgroup_force_empty_write, 3919 }, 3920 { 3921 .name = "use_hierarchy", 3922 .write_u64 = mem_cgroup_hierarchy_write, 3923 .read_u64 = mem_cgroup_hierarchy_read, 3924 }, 3925 { 3926 .name = "cgroup.event_control", /* XXX: for compat */ 3927 .write = memcg_write_event_control, 3928 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 3929 }, 3930 { 3931 .name = "swappiness", 3932 .read_u64 = mem_cgroup_swappiness_read, 3933 .write_u64 = mem_cgroup_swappiness_write, 3934 }, 3935 { 3936 .name = "move_charge_at_immigrate", 3937 .read_u64 = mem_cgroup_move_charge_read, 3938 .write_u64 = mem_cgroup_move_charge_write, 3939 }, 3940 { 3941 .name = "oom_control", 3942 .seq_show = mem_cgroup_oom_control_read, 3943 .write_u64 = mem_cgroup_oom_control_write, 3944 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 3945 }, 3946 { 3947 .name = "pressure_level", 3948 }, 3949 #ifdef CONFIG_NUMA 3950 { 3951 .name = "numa_stat", 3952 .seq_show = memcg_numa_stat_show, 3953 }, 3954 #endif 3955 { 3956 .name = "kmem.limit_in_bytes", 3957 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 3958 .write = mem_cgroup_write, 3959 .read_u64 = mem_cgroup_read_u64, 3960 }, 3961 { 3962 .name = "kmem.usage_in_bytes", 3963 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 3964 .read_u64 = mem_cgroup_read_u64, 3965 }, 3966 { 3967 .name = "kmem.failcnt", 3968 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 3969 .write = mem_cgroup_reset, 3970 .read_u64 = mem_cgroup_read_u64, 3971 }, 3972 { 3973 .name = "kmem.max_usage_in_bytes", 3974 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 3975 .write = mem_cgroup_reset, 3976 .read_u64 = mem_cgroup_read_u64, 3977 }, 3978 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 3979 { 3980 .name = "kmem.slabinfo", 3981 .seq_start = memcg_slab_start, 3982 .seq_next = memcg_slab_next, 3983 .seq_stop = memcg_slab_stop, 3984 .seq_show = memcg_slab_show, 3985 }, 3986 #endif 3987 { 3988 .name = "kmem.tcp.limit_in_bytes", 3989 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 3990 .write = mem_cgroup_write, 3991 .read_u64 = mem_cgroup_read_u64, 3992 }, 3993 { 3994 .name = "kmem.tcp.usage_in_bytes", 3995 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 3996 .read_u64 = mem_cgroup_read_u64, 3997 }, 3998 { 3999 .name = "kmem.tcp.failcnt", 4000 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4001 .write = mem_cgroup_reset, 4002 .read_u64 = mem_cgroup_read_u64, 4003 }, 4004 { 4005 .name = "kmem.tcp.max_usage_in_bytes", 4006 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4007 .write = mem_cgroup_reset, 4008 .read_u64 = mem_cgroup_read_u64, 4009 }, 4010 { }, /* terminate */ 4011 }; 4012 4013 /* 4014 * Private memory cgroup IDR 4015 * 4016 * Swap-out records and page cache shadow entries need to store memcg 4017 * references in constrained space, so we maintain an ID space that is 4018 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4019 * memory-controlled cgroups to 64k. 4020 * 4021 * However, there usually are many references to the oflline CSS after 4022 * the cgroup has been destroyed, such as page cache or reclaimable 4023 * slab objects, that don't need to hang on to the ID. We want to keep 4024 * those dead CSS from occupying IDs, or we might quickly exhaust the 4025 * relatively small ID space and prevent the creation of new cgroups 4026 * even when there are much fewer than 64k cgroups - possibly none. 4027 * 4028 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4029 * be freed and recycled when it's no longer needed, which is usually 4030 * when the CSS is offlined. 4031 * 4032 * The only exception to that are records of swapped out tmpfs/shmem 4033 * pages that need to be attributed to live ancestors on swapin. But 4034 * those references are manageable from userspace. 4035 */ 4036 4037 static DEFINE_IDR(mem_cgroup_idr); 4038 4039 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4040 { 4041 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4042 atomic_add(n, &memcg->id.ref); 4043 } 4044 4045 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4046 { 4047 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4048 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4049 idr_remove(&mem_cgroup_idr, memcg->id.id); 4050 memcg->id.id = 0; 4051 4052 /* Memcg ID pins CSS */ 4053 css_put(&memcg->css); 4054 } 4055 } 4056 4057 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4058 { 4059 mem_cgroup_id_get_many(memcg, 1); 4060 } 4061 4062 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4063 { 4064 mem_cgroup_id_put_many(memcg, 1); 4065 } 4066 4067 /** 4068 * mem_cgroup_from_id - look up a memcg from a memcg id 4069 * @id: the memcg id to look up 4070 * 4071 * Caller must hold rcu_read_lock(). 4072 */ 4073 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4074 { 4075 WARN_ON_ONCE(!rcu_read_lock_held()); 4076 return idr_find(&mem_cgroup_idr, id); 4077 } 4078 4079 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4080 { 4081 struct mem_cgroup_per_node *pn; 4082 int tmp = node; 4083 /* 4084 * This routine is called against possible nodes. 4085 * But it's BUG to call kmalloc() against offline node. 4086 * 4087 * TODO: this routine can waste much memory for nodes which will 4088 * never be onlined. It's better to use memory hotplug callback 4089 * function. 4090 */ 4091 if (!node_state(node, N_NORMAL_MEMORY)) 4092 tmp = -1; 4093 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4094 if (!pn) 4095 return 1; 4096 4097 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat); 4098 if (!pn->lruvec_stat_cpu) { 4099 kfree(pn); 4100 return 1; 4101 } 4102 4103 lruvec_init(&pn->lruvec); 4104 pn->usage_in_excess = 0; 4105 pn->on_tree = false; 4106 pn->memcg = memcg; 4107 4108 memcg->nodeinfo[node] = pn; 4109 return 0; 4110 } 4111 4112 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4113 { 4114 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4115 4116 if (!pn) 4117 return; 4118 4119 free_percpu(pn->lruvec_stat_cpu); 4120 kfree(pn); 4121 } 4122 4123 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4124 { 4125 int node; 4126 4127 for_each_node(node) 4128 free_mem_cgroup_per_node_info(memcg, node); 4129 free_percpu(memcg->stat_cpu); 4130 kfree(memcg); 4131 } 4132 4133 static void mem_cgroup_free(struct mem_cgroup *memcg) 4134 { 4135 memcg_wb_domain_exit(memcg); 4136 __mem_cgroup_free(memcg); 4137 } 4138 4139 static struct mem_cgroup *mem_cgroup_alloc(void) 4140 { 4141 struct mem_cgroup *memcg; 4142 size_t size; 4143 int node; 4144 4145 size = sizeof(struct mem_cgroup); 4146 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4147 4148 memcg = kzalloc(size, GFP_KERNEL); 4149 if (!memcg) 4150 return NULL; 4151 4152 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4153 1, MEM_CGROUP_ID_MAX, 4154 GFP_KERNEL); 4155 if (memcg->id.id < 0) 4156 goto fail; 4157 4158 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu); 4159 if (!memcg->stat_cpu) 4160 goto fail; 4161 4162 for_each_node(node) 4163 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4164 goto fail; 4165 4166 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4167 goto fail; 4168 4169 INIT_WORK(&memcg->high_work, high_work_func); 4170 memcg->last_scanned_node = MAX_NUMNODES; 4171 INIT_LIST_HEAD(&memcg->oom_notify); 4172 mutex_init(&memcg->thresholds_lock); 4173 spin_lock_init(&memcg->move_lock); 4174 vmpressure_init(&memcg->vmpressure); 4175 INIT_LIST_HEAD(&memcg->event_list); 4176 spin_lock_init(&memcg->event_list_lock); 4177 memcg->socket_pressure = jiffies; 4178 #ifndef CONFIG_SLOB 4179 memcg->kmemcg_id = -1; 4180 #endif 4181 #ifdef CONFIG_CGROUP_WRITEBACK 4182 INIT_LIST_HEAD(&memcg->cgwb_list); 4183 #endif 4184 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4185 return memcg; 4186 fail: 4187 if (memcg->id.id > 0) 4188 idr_remove(&mem_cgroup_idr, memcg->id.id); 4189 __mem_cgroup_free(memcg); 4190 return NULL; 4191 } 4192 4193 static struct cgroup_subsys_state * __ref 4194 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4195 { 4196 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4197 struct mem_cgroup *memcg; 4198 long error = -ENOMEM; 4199 4200 memcg = mem_cgroup_alloc(); 4201 if (!memcg) 4202 return ERR_PTR(error); 4203 4204 memcg->high = PAGE_COUNTER_MAX; 4205 memcg->soft_limit = PAGE_COUNTER_MAX; 4206 if (parent) { 4207 memcg->swappiness = mem_cgroup_swappiness(parent); 4208 memcg->oom_kill_disable = parent->oom_kill_disable; 4209 } 4210 if (parent && parent->use_hierarchy) { 4211 memcg->use_hierarchy = true; 4212 page_counter_init(&memcg->memory, &parent->memory); 4213 page_counter_init(&memcg->swap, &parent->swap); 4214 page_counter_init(&memcg->memsw, &parent->memsw); 4215 page_counter_init(&memcg->kmem, &parent->kmem); 4216 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4217 } else { 4218 page_counter_init(&memcg->memory, NULL); 4219 page_counter_init(&memcg->swap, NULL); 4220 page_counter_init(&memcg->memsw, NULL); 4221 page_counter_init(&memcg->kmem, NULL); 4222 page_counter_init(&memcg->tcpmem, NULL); 4223 /* 4224 * Deeper hierachy with use_hierarchy == false doesn't make 4225 * much sense so let cgroup subsystem know about this 4226 * unfortunate state in our controller. 4227 */ 4228 if (parent != root_mem_cgroup) 4229 memory_cgrp_subsys.broken_hierarchy = true; 4230 } 4231 4232 /* The following stuff does not apply to the root */ 4233 if (!parent) { 4234 root_mem_cgroup = memcg; 4235 return &memcg->css; 4236 } 4237 4238 error = memcg_online_kmem(memcg); 4239 if (error) 4240 goto fail; 4241 4242 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4243 static_branch_inc(&memcg_sockets_enabled_key); 4244 4245 return &memcg->css; 4246 fail: 4247 mem_cgroup_free(memcg); 4248 return ERR_PTR(-ENOMEM); 4249 } 4250 4251 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4252 { 4253 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4254 4255 /* Online state pins memcg ID, memcg ID pins CSS */ 4256 atomic_set(&memcg->id.ref, 1); 4257 css_get(css); 4258 return 0; 4259 } 4260 4261 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4262 { 4263 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4264 struct mem_cgroup_event *event, *tmp; 4265 4266 /* 4267 * Unregister events and notify userspace. 4268 * Notify userspace about cgroup removing only after rmdir of cgroup 4269 * directory to avoid race between userspace and kernelspace. 4270 */ 4271 spin_lock(&memcg->event_list_lock); 4272 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4273 list_del_init(&event->list); 4274 schedule_work(&event->remove); 4275 } 4276 spin_unlock(&memcg->event_list_lock); 4277 4278 page_counter_set_min(&memcg->memory, 0); 4279 page_counter_set_low(&memcg->memory, 0); 4280 4281 memcg_offline_kmem(memcg); 4282 wb_memcg_offline(memcg); 4283 4284 mem_cgroup_id_put(memcg); 4285 } 4286 4287 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4288 { 4289 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4290 4291 invalidate_reclaim_iterators(memcg); 4292 } 4293 4294 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4295 { 4296 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4297 4298 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4299 static_branch_dec(&memcg_sockets_enabled_key); 4300 4301 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4302 static_branch_dec(&memcg_sockets_enabled_key); 4303 4304 vmpressure_cleanup(&memcg->vmpressure); 4305 cancel_work_sync(&memcg->high_work); 4306 mem_cgroup_remove_from_trees(memcg); 4307 memcg_free_kmem(memcg); 4308 mem_cgroup_free(memcg); 4309 } 4310 4311 /** 4312 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4313 * @css: the target css 4314 * 4315 * Reset the states of the mem_cgroup associated with @css. This is 4316 * invoked when the userland requests disabling on the default hierarchy 4317 * but the memcg is pinned through dependency. The memcg should stop 4318 * applying policies and should revert to the vanilla state as it may be 4319 * made visible again. 4320 * 4321 * The current implementation only resets the essential configurations. 4322 * This needs to be expanded to cover all the visible parts. 4323 */ 4324 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4325 { 4326 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4327 4328 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 4329 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 4330 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); 4331 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 4332 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 4333 page_counter_set_min(&memcg->memory, 0); 4334 page_counter_set_low(&memcg->memory, 0); 4335 memcg->high = PAGE_COUNTER_MAX; 4336 memcg->soft_limit = PAGE_COUNTER_MAX; 4337 memcg_wb_domain_size_changed(memcg); 4338 } 4339 4340 #ifdef CONFIG_MMU 4341 /* Handlers for move charge at task migration. */ 4342 static int mem_cgroup_do_precharge(unsigned long count) 4343 { 4344 int ret; 4345 4346 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4347 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4348 if (!ret) { 4349 mc.precharge += count; 4350 return ret; 4351 } 4352 4353 /* Try charges one by one with reclaim, but do not retry */ 4354 while (count--) { 4355 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 4356 if (ret) 4357 return ret; 4358 mc.precharge++; 4359 cond_resched(); 4360 } 4361 return 0; 4362 } 4363 4364 union mc_target { 4365 struct page *page; 4366 swp_entry_t ent; 4367 }; 4368 4369 enum mc_target_type { 4370 MC_TARGET_NONE = 0, 4371 MC_TARGET_PAGE, 4372 MC_TARGET_SWAP, 4373 MC_TARGET_DEVICE, 4374 }; 4375 4376 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4377 unsigned long addr, pte_t ptent) 4378 { 4379 struct page *page = _vm_normal_page(vma, addr, ptent, true); 4380 4381 if (!page || !page_mapped(page)) 4382 return NULL; 4383 if (PageAnon(page)) { 4384 if (!(mc.flags & MOVE_ANON)) 4385 return NULL; 4386 } else { 4387 if (!(mc.flags & MOVE_FILE)) 4388 return NULL; 4389 } 4390 if (!get_page_unless_zero(page)) 4391 return NULL; 4392 4393 return page; 4394 } 4395 4396 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 4397 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4398 pte_t ptent, swp_entry_t *entry) 4399 { 4400 struct page *page = NULL; 4401 swp_entry_t ent = pte_to_swp_entry(ptent); 4402 4403 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4404 return NULL; 4405 4406 /* 4407 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 4408 * a device and because they are not accessible by CPU they are store 4409 * as special swap entry in the CPU page table. 4410 */ 4411 if (is_device_private_entry(ent)) { 4412 page = device_private_entry_to_page(ent); 4413 /* 4414 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 4415 * a refcount of 1 when free (unlike normal page) 4416 */ 4417 if (!page_ref_add_unless(page, 1, 1)) 4418 return NULL; 4419 return page; 4420 } 4421 4422 /* 4423 * Because lookup_swap_cache() updates some statistics counter, 4424 * we call find_get_page() with swapper_space directly. 4425 */ 4426 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4427 if (do_memsw_account()) 4428 entry->val = ent.val; 4429 4430 return page; 4431 } 4432 #else 4433 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4434 pte_t ptent, swp_entry_t *entry) 4435 { 4436 return NULL; 4437 } 4438 #endif 4439 4440 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4441 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4442 { 4443 struct page *page = NULL; 4444 struct address_space *mapping; 4445 pgoff_t pgoff; 4446 4447 if (!vma->vm_file) /* anonymous vma */ 4448 return NULL; 4449 if (!(mc.flags & MOVE_FILE)) 4450 return NULL; 4451 4452 mapping = vma->vm_file->f_mapping; 4453 pgoff = linear_page_index(vma, addr); 4454 4455 /* page is moved even if it's not RSS of this task(page-faulted). */ 4456 #ifdef CONFIG_SWAP 4457 /* shmem/tmpfs may report page out on swap: account for that too. */ 4458 if (shmem_mapping(mapping)) { 4459 page = find_get_entry(mapping, pgoff); 4460 if (radix_tree_exceptional_entry(page)) { 4461 swp_entry_t swp = radix_to_swp_entry(page); 4462 if (do_memsw_account()) 4463 *entry = swp; 4464 page = find_get_page(swap_address_space(swp), 4465 swp_offset(swp)); 4466 } 4467 } else 4468 page = find_get_page(mapping, pgoff); 4469 #else 4470 page = find_get_page(mapping, pgoff); 4471 #endif 4472 return page; 4473 } 4474 4475 /** 4476 * mem_cgroup_move_account - move account of the page 4477 * @page: the page 4478 * @compound: charge the page as compound or small page 4479 * @from: mem_cgroup which the page is moved from. 4480 * @to: mem_cgroup which the page is moved to. @from != @to. 4481 * 4482 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4483 * 4484 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4485 * from old cgroup. 4486 */ 4487 static int mem_cgroup_move_account(struct page *page, 4488 bool compound, 4489 struct mem_cgroup *from, 4490 struct mem_cgroup *to) 4491 { 4492 unsigned long flags; 4493 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4494 int ret; 4495 bool anon; 4496 4497 VM_BUG_ON(from == to); 4498 VM_BUG_ON_PAGE(PageLRU(page), page); 4499 VM_BUG_ON(compound && !PageTransHuge(page)); 4500 4501 /* 4502 * Prevent mem_cgroup_migrate() from looking at 4503 * page->mem_cgroup of its source page while we change it. 4504 */ 4505 ret = -EBUSY; 4506 if (!trylock_page(page)) 4507 goto out; 4508 4509 ret = -EINVAL; 4510 if (page->mem_cgroup != from) 4511 goto out_unlock; 4512 4513 anon = PageAnon(page); 4514 4515 spin_lock_irqsave(&from->move_lock, flags); 4516 4517 if (!anon && page_mapped(page)) { 4518 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages); 4519 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages); 4520 } 4521 4522 /* 4523 * move_lock grabbed above and caller set from->moving_account, so 4524 * mod_memcg_page_state will serialize updates to PageDirty. 4525 * So mapping should be stable for dirty pages. 4526 */ 4527 if (!anon && PageDirty(page)) { 4528 struct address_space *mapping = page_mapping(page); 4529 4530 if (mapping_cap_account_dirty(mapping)) { 4531 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages); 4532 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages); 4533 } 4534 } 4535 4536 if (PageWriteback(page)) { 4537 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages); 4538 __mod_memcg_state(to, NR_WRITEBACK, nr_pages); 4539 } 4540 4541 /* 4542 * It is safe to change page->mem_cgroup here because the page 4543 * is referenced, charged, and isolated - we can't race with 4544 * uncharging, charging, migration, or LRU putback. 4545 */ 4546 4547 /* caller should have done css_get */ 4548 page->mem_cgroup = to; 4549 spin_unlock_irqrestore(&from->move_lock, flags); 4550 4551 ret = 0; 4552 4553 local_irq_disable(); 4554 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4555 memcg_check_events(to, page); 4556 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4557 memcg_check_events(from, page); 4558 local_irq_enable(); 4559 out_unlock: 4560 unlock_page(page); 4561 out: 4562 return ret; 4563 } 4564 4565 /** 4566 * get_mctgt_type - get target type of moving charge 4567 * @vma: the vma the pte to be checked belongs 4568 * @addr: the address corresponding to the pte to be checked 4569 * @ptent: the pte to be checked 4570 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4571 * 4572 * Returns 4573 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4574 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4575 * move charge. if @target is not NULL, the page is stored in target->page 4576 * with extra refcnt got(Callers should handle it). 4577 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4578 * target for charge migration. if @target is not NULL, the entry is stored 4579 * in target->ent. 4580 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC 4581 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru). 4582 * For now we such page is charge like a regular page would be as for all 4583 * intent and purposes it is just special memory taking the place of a 4584 * regular page. 4585 * 4586 * See Documentations/vm/hmm.txt and include/linux/hmm.h 4587 * 4588 * Called with pte lock held. 4589 */ 4590 4591 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4592 unsigned long addr, pte_t ptent, union mc_target *target) 4593 { 4594 struct page *page = NULL; 4595 enum mc_target_type ret = MC_TARGET_NONE; 4596 swp_entry_t ent = { .val = 0 }; 4597 4598 if (pte_present(ptent)) 4599 page = mc_handle_present_pte(vma, addr, ptent); 4600 else if (is_swap_pte(ptent)) 4601 page = mc_handle_swap_pte(vma, ptent, &ent); 4602 else if (pte_none(ptent)) 4603 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4604 4605 if (!page && !ent.val) 4606 return ret; 4607 if (page) { 4608 /* 4609 * Do only loose check w/o serialization. 4610 * mem_cgroup_move_account() checks the page is valid or 4611 * not under LRU exclusion. 4612 */ 4613 if (page->mem_cgroup == mc.from) { 4614 ret = MC_TARGET_PAGE; 4615 if (is_device_private_page(page) || 4616 is_device_public_page(page)) 4617 ret = MC_TARGET_DEVICE; 4618 if (target) 4619 target->page = page; 4620 } 4621 if (!ret || !target) 4622 put_page(page); 4623 } 4624 /* 4625 * There is a swap entry and a page doesn't exist or isn't charged. 4626 * But we cannot move a tail-page in a THP. 4627 */ 4628 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 4629 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4630 ret = MC_TARGET_SWAP; 4631 if (target) 4632 target->ent = ent; 4633 } 4634 return ret; 4635 } 4636 4637 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4638 /* 4639 * We don't consider PMD mapped swapping or file mapped pages because THP does 4640 * not support them for now. 4641 * Caller should make sure that pmd_trans_huge(pmd) is true. 4642 */ 4643 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4644 unsigned long addr, pmd_t pmd, union mc_target *target) 4645 { 4646 struct page *page = NULL; 4647 enum mc_target_type ret = MC_TARGET_NONE; 4648 4649 if (unlikely(is_swap_pmd(pmd))) { 4650 VM_BUG_ON(thp_migration_supported() && 4651 !is_pmd_migration_entry(pmd)); 4652 return ret; 4653 } 4654 page = pmd_page(pmd); 4655 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4656 if (!(mc.flags & MOVE_ANON)) 4657 return ret; 4658 if (page->mem_cgroup == mc.from) { 4659 ret = MC_TARGET_PAGE; 4660 if (target) { 4661 get_page(page); 4662 target->page = page; 4663 } 4664 } 4665 return ret; 4666 } 4667 #else 4668 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4669 unsigned long addr, pmd_t pmd, union mc_target *target) 4670 { 4671 return MC_TARGET_NONE; 4672 } 4673 #endif 4674 4675 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4676 unsigned long addr, unsigned long end, 4677 struct mm_walk *walk) 4678 { 4679 struct vm_area_struct *vma = walk->vma; 4680 pte_t *pte; 4681 spinlock_t *ptl; 4682 4683 ptl = pmd_trans_huge_lock(pmd, vma); 4684 if (ptl) { 4685 /* 4686 * Note their can not be MC_TARGET_DEVICE for now as we do not 4687 * support transparent huge page with MEMORY_DEVICE_PUBLIC or 4688 * MEMORY_DEVICE_PRIVATE but this might change. 4689 */ 4690 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4691 mc.precharge += HPAGE_PMD_NR; 4692 spin_unlock(ptl); 4693 return 0; 4694 } 4695 4696 if (pmd_trans_unstable(pmd)) 4697 return 0; 4698 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4699 for (; addr != end; pte++, addr += PAGE_SIZE) 4700 if (get_mctgt_type(vma, addr, *pte, NULL)) 4701 mc.precharge++; /* increment precharge temporarily */ 4702 pte_unmap_unlock(pte - 1, ptl); 4703 cond_resched(); 4704 4705 return 0; 4706 } 4707 4708 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4709 { 4710 unsigned long precharge; 4711 4712 struct mm_walk mem_cgroup_count_precharge_walk = { 4713 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4714 .mm = mm, 4715 }; 4716 down_read(&mm->mmap_sem); 4717 walk_page_range(0, mm->highest_vm_end, 4718 &mem_cgroup_count_precharge_walk); 4719 up_read(&mm->mmap_sem); 4720 4721 precharge = mc.precharge; 4722 mc.precharge = 0; 4723 4724 return precharge; 4725 } 4726 4727 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4728 { 4729 unsigned long precharge = mem_cgroup_count_precharge(mm); 4730 4731 VM_BUG_ON(mc.moving_task); 4732 mc.moving_task = current; 4733 return mem_cgroup_do_precharge(precharge); 4734 } 4735 4736 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4737 static void __mem_cgroup_clear_mc(void) 4738 { 4739 struct mem_cgroup *from = mc.from; 4740 struct mem_cgroup *to = mc.to; 4741 4742 /* we must uncharge all the leftover precharges from mc.to */ 4743 if (mc.precharge) { 4744 cancel_charge(mc.to, mc.precharge); 4745 mc.precharge = 0; 4746 } 4747 /* 4748 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4749 * we must uncharge here. 4750 */ 4751 if (mc.moved_charge) { 4752 cancel_charge(mc.from, mc.moved_charge); 4753 mc.moved_charge = 0; 4754 } 4755 /* we must fixup refcnts and charges */ 4756 if (mc.moved_swap) { 4757 /* uncharge swap account from the old cgroup */ 4758 if (!mem_cgroup_is_root(mc.from)) 4759 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4760 4761 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4762 4763 /* 4764 * we charged both to->memory and to->memsw, so we 4765 * should uncharge to->memory. 4766 */ 4767 if (!mem_cgroup_is_root(mc.to)) 4768 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4769 4770 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4771 css_put_many(&mc.to->css, mc.moved_swap); 4772 4773 mc.moved_swap = 0; 4774 } 4775 memcg_oom_recover(from); 4776 memcg_oom_recover(to); 4777 wake_up_all(&mc.waitq); 4778 } 4779 4780 static void mem_cgroup_clear_mc(void) 4781 { 4782 struct mm_struct *mm = mc.mm; 4783 4784 /* 4785 * we must clear moving_task before waking up waiters at the end of 4786 * task migration. 4787 */ 4788 mc.moving_task = NULL; 4789 __mem_cgroup_clear_mc(); 4790 spin_lock(&mc.lock); 4791 mc.from = NULL; 4792 mc.to = NULL; 4793 mc.mm = NULL; 4794 spin_unlock(&mc.lock); 4795 4796 mmput(mm); 4797 } 4798 4799 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4800 { 4801 struct cgroup_subsys_state *css; 4802 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 4803 struct mem_cgroup *from; 4804 struct task_struct *leader, *p; 4805 struct mm_struct *mm; 4806 unsigned long move_flags; 4807 int ret = 0; 4808 4809 /* charge immigration isn't supported on the default hierarchy */ 4810 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4811 return 0; 4812 4813 /* 4814 * Multi-process migrations only happen on the default hierarchy 4815 * where charge immigration is not used. Perform charge 4816 * immigration if @tset contains a leader and whine if there are 4817 * multiple. 4818 */ 4819 p = NULL; 4820 cgroup_taskset_for_each_leader(leader, css, tset) { 4821 WARN_ON_ONCE(p); 4822 p = leader; 4823 memcg = mem_cgroup_from_css(css); 4824 } 4825 if (!p) 4826 return 0; 4827 4828 /* 4829 * We are now commited to this value whatever it is. Changes in this 4830 * tunable will only affect upcoming migrations, not the current one. 4831 * So we need to save it, and keep it going. 4832 */ 4833 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4834 if (!move_flags) 4835 return 0; 4836 4837 from = mem_cgroup_from_task(p); 4838 4839 VM_BUG_ON(from == memcg); 4840 4841 mm = get_task_mm(p); 4842 if (!mm) 4843 return 0; 4844 /* We move charges only when we move a owner of the mm */ 4845 if (mm->owner == p) { 4846 VM_BUG_ON(mc.from); 4847 VM_BUG_ON(mc.to); 4848 VM_BUG_ON(mc.precharge); 4849 VM_BUG_ON(mc.moved_charge); 4850 VM_BUG_ON(mc.moved_swap); 4851 4852 spin_lock(&mc.lock); 4853 mc.mm = mm; 4854 mc.from = from; 4855 mc.to = memcg; 4856 mc.flags = move_flags; 4857 spin_unlock(&mc.lock); 4858 /* We set mc.moving_task later */ 4859 4860 ret = mem_cgroup_precharge_mc(mm); 4861 if (ret) 4862 mem_cgroup_clear_mc(); 4863 } else { 4864 mmput(mm); 4865 } 4866 return ret; 4867 } 4868 4869 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4870 { 4871 if (mc.to) 4872 mem_cgroup_clear_mc(); 4873 } 4874 4875 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4876 unsigned long addr, unsigned long end, 4877 struct mm_walk *walk) 4878 { 4879 int ret = 0; 4880 struct vm_area_struct *vma = walk->vma; 4881 pte_t *pte; 4882 spinlock_t *ptl; 4883 enum mc_target_type target_type; 4884 union mc_target target; 4885 struct page *page; 4886 4887 ptl = pmd_trans_huge_lock(pmd, vma); 4888 if (ptl) { 4889 if (mc.precharge < HPAGE_PMD_NR) { 4890 spin_unlock(ptl); 4891 return 0; 4892 } 4893 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4894 if (target_type == MC_TARGET_PAGE) { 4895 page = target.page; 4896 if (!isolate_lru_page(page)) { 4897 if (!mem_cgroup_move_account(page, true, 4898 mc.from, mc.to)) { 4899 mc.precharge -= HPAGE_PMD_NR; 4900 mc.moved_charge += HPAGE_PMD_NR; 4901 } 4902 putback_lru_page(page); 4903 } 4904 put_page(page); 4905 } else if (target_type == MC_TARGET_DEVICE) { 4906 page = target.page; 4907 if (!mem_cgroup_move_account(page, true, 4908 mc.from, mc.to)) { 4909 mc.precharge -= HPAGE_PMD_NR; 4910 mc.moved_charge += HPAGE_PMD_NR; 4911 } 4912 put_page(page); 4913 } 4914 spin_unlock(ptl); 4915 return 0; 4916 } 4917 4918 if (pmd_trans_unstable(pmd)) 4919 return 0; 4920 retry: 4921 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4922 for (; addr != end; addr += PAGE_SIZE) { 4923 pte_t ptent = *(pte++); 4924 bool device = false; 4925 swp_entry_t ent; 4926 4927 if (!mc.precharge) 4928 break; 4929 4930 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4931 case MC_TARGET_DEVICE: 4932 device = true; 4933 /* fall through */ 4934 case MC_TARGET_PAGE: 4935 page = target.page; 4936 /* 4937 * We can have a part of the split pmd here. Moving it 4938 * can be done but it would be too convoluted so simply 4939 * ignore such a partial THP and keep it in original 4940 * memcg. There should be somebody mapping the head. 4941 */ 4942 if (PageTransCompound(page)) 4943 goto put; 4944 if (!device && isolate_lru_page(page)) 4945 goto put; 4946 if (!mem_cgroup_move_account(page, false, 4947 mc.from, mc.to)) { 4948 mc.precharge--; 4949 /* we uncharge from mc.from later. */ 4950 mc.moved_charge++; 4951 } 4952 if (!device) 4953 putback_lru_page(page); 4954 put: /* get_mctgt_type() gets the page */ 4955 put_page(page); 4956 break; 4957 case MC_TARGET_SWAP: 4958 ent = target.ent; 4959 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4960 mc.precharge--; 4961 /* we fixup refcnts and charges later. */ 4962 mc.moved_swap++; 4963 } 4964 break; 4965 default: 4966 break; 4967 } 4968 } 4969 pte_unmap_unlock(pte - 1, ptl); 4970 cond_resched(); 4971 4972 if (addr != end) { 4973 /* 4974 * We have consumed all precharges we got in can_attach(). 4975 * We try charge one by one, but don't do any additional 4976 * charges to mc.to if we have failed in charge once in attach() 4977 * phase. 4978 */ 4979 ret = mem_cgroup_do_precharge(1); 4980 if (!ret) 4981 goto retry; 4982 } 4983 4984 return ret; 4985 } 4986 4987 static void mem_cgroup_move_charge(void) 4988 { 4989 struct mm_walk mem_cgroup_move_charge_walk = { 4990 .pmd_entry = mem_cgroup_move_charge_pte_range, 4991 .mm = mc.mm, 4992 }; 4993 4994 lru_add_drain_all(); 4995 /* 4996 * Signal lock_page_memcg() to take the memcg's move_lock 4997 * while we're moving its pages to another memcg. Then wait 4998 * for already started RCU-only updates to finish. 4999 */ 5000 atomic_inc(&mc.from->moving_account); 5001 synchronize_rcu(); 5002 retry: 5003 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5004 /* 5005 * Someone who are holding the mmap_sem might be waiting in 5006 * waitq. So we cancel all extra charges, wake up all waiters, 5007 * and retry. Because we cancel precharges, we might not be able 5008 * to move enough charges, but moving charge is a best-effort 5009 * feature anyway, so it wouldn't be a big problem. 5010 */ 5011 __mem_cgroup_clear_mc(); 5012 cond_resched(); 5013 goto retry; 5014 } 5015 /* 5016 * When we have consumed all precharges and failed in doing 5017 * additional charge, the page walk just aborts. 5018 */ 5019 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 5020 5021 up_read(&mc.mm->mmap_sem); 5022 atomic_dec(&mc.from->moving_account); 5023 } 5024 5025 static void mem_cgroup_move_task(void) 5026 { 5027 if (mc.to) { 5028 mem_cgroup_move_charge(); 5029 mem_cgroup_clear_mc(); 5030 } 5031 } 5032 #else /* !CONFIG_MMU */ 5033 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5034 { 5035 return 0; 5036 } 5037 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5038 { 5039 } 5040 static void mem_cgroup_move_task(void) 5041 { 5042 } 5043 #endif 5044 5045 /* 5046 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5047 * to verify whether we're attached to the default hierarchy on each mount 5048 * attempt. 5049 */ 5050 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5051 { 5052 /* 5053 * use_hierarchy is forced on the default hierarchy. cgroup core 5054 * guarantees that @root doesn't have any children, so turning it 5055 * on for the root memcg is enough. 5056 */ 5057 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5058 root_mem_cgroup->use_hierarchy = true; 5059 else 5060 root_mem_cgroup->use_hierarchy = false; 5061 } 5062 5063 static u64 memory_current_read(struct cgroup_subsys_state *css, 5064 struct cftype *cft) 5065 { 5066 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5067 5068 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5069 } 5070 5071 static int memory_min_show(struct seq_file *m, void *v) 5072 { 5073 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5074 unsigned long min = READ_ONCE(memcg->memory.min); 5075 5076 if (min == PAGE_COUNTER_MAX) 5077 seq_puts(m, "max\n"); 5078 else 5079 seq_printf(m, "%llu\n", (u64)min * PAGE_SIZE); 5080 5081 return 0; 5082 } 5083 5084 static ssize_t memory_min_write(struct kernfs_open_file *of, 5085 char *buf, size_t nbytes, loff_t off) 5086 { 5087 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5088 unsigned long min; 5089 int err; 5090 5091 buf = strstrip(buf); 5092 err = page_counter_memparse(buf, "max", &min); 5093 if (err) 5094 return err; 5095 5096 page_counter_set_min(&memcg->memory, min); 5097 5098 return nbytes; 5099 } 5100 5101 static int memory_low_show(struct seq_file *m, void *v) 5102 { 5103 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5104 unsigned long low = READ_ONCE(memcg->memory.low); 5105 5106 if (low == PAGE_COUNTER_MAX) 5107 seq_puts(m, "max\n"); 5108 else 5109 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5110 5111 return 0; 5112 } 5113 5114 static ssize_t memory_low_write(struct kernfs_open_file *of, 5115 char *buf, size_t nbytes, loff_t off) 5116 { 5117 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5118 unsigned long low; 5119 int err; 5120 5121 buf = strstrip(buf); 5122 err = page_counter_memparse(buf, "max", &low); 5123 if (err) 5124 return err; 5125 5126 page_counter_set_low(&memcg->memory, low); 5127 5128 return nbytes; 5129 } 5130 5131 static int memory_high_show(struct seq_file *m, void *v) 5132 { 5133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5134 unsigned long high = READ_ONCE(memcg->high); 5135 5136 if (high == PAGE_COUNTER_MAX) 5137 seq_puts(m, "max\n"); 5138 else 5139 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5140 5141 return 0; 5142 } 5143 5144 static ssize_t memory_high_write(struct kernfs_open_file *of, 5145 char *buf, size_t nbytes, loff_t off) 5146 { 5147 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5148 unsigned long nr_pages; 5149 unsigned long high; 5150 int err; 5151 5152 buf = strstrip(buf); 5153 err = page_counter_memparse(buf, "max", &high); 5154 if (err) 5155 return err; 5156 5157 memcg->high = high; 5158 5159 nr_pages = page_counter_read(&memcg->memory); 5160 if (nr_pages > high) 5161 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5162 GFP_KERNEL, true); 5163 5164 memcg_wb_domain_size_changed(memcg); 5165 return nbytes; 5166 } 5167 5168 static int memory_max_show(struct seq_file *m, void *v) 5169 { 5170 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5171 unsigned long max = READ_ONCE(memcg->memory.max); 5172 5173 if (max == PAGE_COUNTER_MAX) 5174 seq_puts(m, "max\n"); 5175 else 5176 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5177 5178 return 0; 5179 } 5180 5181 static ssize_t memory_max_write(struct kernfs_open_file *of, 5182 char *buf, size_t nbytes, loff_t off) 5183 { 5184 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5185 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5186 bool drained = false; 5187 unsigned long max; 5188 int err; 5189 5190 buf = strstrip(buf); 5191 err = page_counter_memparse(buf, "max", &max); 5192 if (err) 5193 return err; 5194 5195 xchg(&memcg->memory.max, max); 5196 5197 for (;;) { 5198 unsigned long nr_pages = page_counter_read(&memcg->memory); 5199 5200 if (nr_pages <= max) 5201 break; 5202 5203 if (signal_pending(current)) { 5204 err = -EINTR; 5205 break; 5206 } 5207 5208 if (!drained) { 5209 drain_all_stock(memcg); 5210 drained = true; 5211 continue; 5212 } 5213 5214 if (nr_reclaims) { 5215 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5216 GFP_KERNEL, true)) 5217 nr_reclaims--; 5218 continue; 5219 } 5220 5221 memcg_memory_event(memcg, MEMCG_OOM); 5222 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5223 break; 5224 } 5225 5226 memcg_wb_domain_size_changed(memcg); 5227 return nbytes; 5228 } 5229 5230 static int memory_events_show(struct seq_file *m, void *v) 5231 { 5232 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5233 5234 seq_printf(m, "low %lu\n", 5235 atomic_long_read(&memcg->memory_events[MEMCG_LOW])); 5236 seq_printf(m, "high %lu\n", 5237 atomic_long_read(&memcg->memory_events[MEMCG_HIGH])); 5238 seq_printf(m, "max %lu\n", 5239 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); 5240 seq_printf(m, "oom %lu\n", 5241 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); 5242 seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 5243 5244 return 0; 5245 } 5246 5247 static int memory_stat_show(struct seq_file *m, void *v) 5248 { 5249 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5250 unsigned long stat[MEMCG_NR_STAT]; 5251 unsigned long events[NR_VM_EVENT_ITEMS]; 5252 int i; 5253 5254 /* 5255 * Provide statistics on the state of the memory subsystem as 5256 * well as cumulative event counters that show past behavior. 5257 * 5258 * This list is ordered following a combination of these gradients: 5259 * 1) generic big picture -> specifics and details 5260 * 2) reflecting userspace activity -> reflecting kernel heuristics 5261 * 5262 * Current memory state: 5263 */ 5264 5265 tree_stat(memcg, stat); 5266 tree_events(memcg, events); 5267 5268 seq_printf(m, "anon %llu\n", 5269 (u64)stat[MEMCG_RSS] * PAGE_SIZE); 5270 seq_printf(m, "file %llu\n", 5271 (u64)stat[MEMCG_CACHE] * PAGE_SIZE); 5272 seq_printf(m, "kernel_stack %llu\n", 5273 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5274 seq_printf(m, "slab %llu\n", 5275 (u64)(stat[NR_SLAB_RECLAIMABLE] + 5276 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5277 seq_printf(m, "sock %llu\n", 5278 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5279 5280 seq_printf(m, "shmem %llu\n", 5281 (u64)stat[NR_SHMEM] * PAGE_SIZE); 5282 seq_printf(m, "file_mapped %llu\n", 5283 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE); 5284 seq_printf(m, "file_dirty %llu\n", 5285 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE); 5286 seq_printf(m, "file_writeback %llu\n", 5287 (u64)stat[NR_WRITEBACK] * PAGE_SIZE); 5288 5289 for (i = 0; i < NR_LRU_LISTS; i++) { 5290 struct mem_cgroup *mi; 5291 unsigned long val = 0; 5292 5293 for_each_mem_cgroup_tree(mi, memcg) 5294 val += mem_cgroup_nr_lru_pages(mi, BIT(i)); 5295 seq_printf(m, "%s %llu\n", 5296 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5297 } 5298 5299 seq_printf(m, "slab_reclaimable %llu\n", 5300 (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); 5301 seq_printf(m, "slab_unreclaimable %llu\n", 5302 (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5303 5304 /* Accumulated memory events */ 5305 5306 seq_printf(m, "pgfault %lu\n", events[PGFAULT]); 5307 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); 5308 5309 seq_printf(m, "pgrefill %lu\n", events[PGREFILL]); 5310 seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] + 5311 events[PGSCAN_DIRECT]); 5312 seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] + 5313 events[PGSTEAL_DIRECT]); 5314 seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]); 5315 seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]); 5316 seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]); 5317 seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]); 5318 5319 seq_printf(m, "workingset_refault %lu\n", 5320 stat[WORKINGSET_REFAULT]); 5321 seq_printf(m, "workingset_activate %lu\n", 5322 stat[WORKINGSET_ACTIVATE]); 5323 seq_printf(m, "workingset_nodereclaim %lu\n", 5324 stat[WORKINGSET_NODERECLAIM]); 5325 5326 return 0; 5327 } 5328 5329 static struct cftype memory_files[] = { 5330 { 5331 .name = "current", 5332 .flags = CFTYPE_NOT_ON_ROOT, 5333 .read_u64 = memory_current_read, 5334 }, 5335 { 5336 .name = "min", 5337 .flags = CFTYPE_NOT_ON_ROOT, 5338 .seq_show = memory_min_show, 5339 .write = memory_min_write, 5340 }, 5341 { 5342 .name = "low", 5343 .flags = CFTYPE_NOT_ON_ROOT, 5344 .seq_show = memory_low_show, 5345 .write = memory_low_write, 5346 }, 5347 { 5348 .name = "high", 5349 .flags = CFTYPE_NOT_ON_ROOT, 5350 .seq_show = memory_high_show, 5351 .write = memory_high_write, 5352 }, 5353 { 5354 .name = "max", 5355 .flags = CFTYPE_NOT_ON_ROOT, 5356 .seq_show = memory_max_show, 5357 .write = memory_max_write, 5358 }, 5359 { 5360 .name = "events", 5361 .flags = CFTYPE_NOT_ON_ROOT, 5362 .file_offset = offsetof(struct mem_cgroup, events_file), 5363 .seq_show = memory_events_show, 5364 }, 5365 { 5366 .name = "stat", 5367 .flags = CFTYPE_NOT_ON_ROOT, 5368 .seq_show = memory_stat_show, 5369 }, 5370 { } /* terminate */ 5371 }; 5372 5373 struct cgroup_subsys memory_cgrp_subsys = { 5374 .css_alloc = mem_cgroup_css_alloc, 5375 .css_online = mem_cgroup_css_online, 5376 .css_offline = mem_cgroup_css_offline, 5377 .css_released = mem_cgroup_css_released, 5378 .css_free = mem_cgroup_css_free, 5379 .css_reset = mem_cgroup_css_reset, 5380 .can_attach = mem_cgroup_can_attach, 5381 .cancel_attach = mem_cgroup_cancel_attach, 5382 .post_attach = mem_cgroup_move_task, 5383 .bind = mem_cgroup_bind, 5384 .dfl_cftypes = memory_files, 5385 .legacy_cftypes = mem_cgroup_legacy_files, 5386 .early_init = 0, 5387 }; 5388 5389 /** 5390 * mem_cgroup_protected - check if memory consumption is in the normal range 5391 * @root: the top ancestor of the sub-tree being checked 5392 * @memcg: the memory cgroup to check 5393 * 5394 * WARNING: This function is not stateless! It can only be used as part 5395 * of a top-down tree iteration, not for isolated queries. 5396 * 5397 * Returns one of the following: 5398 * MEMCG_PROT_NONE: cgroup memory is not protected 5399 * MEMCG_PROT_LOW: cgroup memory is protected as long there is 5400 * an unprotected supply of reclaimable memory from other cgroups. 5401 * MEMCG_PROT_MIN: cgroup memory is protected 5402 * 5403 * @root is exclusive; it is never protected when looked at directly 5404 * 5405 * To provide a proper hierarchical behavior, effective memory.min/low values 5406 * are used. Below is the description of how effective memory.low is calculated. 5407 * Effective memory.min values is calculated in the same way. 5408 * 5409 * Effective memory.low is always equal or less than the original memory.low. 5410 * If there is no memory.low overcommittment (which is always true for 5411 * top-level memory cgroups), these two values are equal. 5412 * Otherwise, it's a part of parent's effective memory.low, 5413 * calculated as a cgroup's memory.low usage divided by sum of sibling's 5414 * memory.low usages, where memory.low usage is the size of actually 5415 * protected memory. 5416 * 5417 * low_usage 5418 * elow = min( memory.low, parent->elow * ------------------ ), 5419 * siblings_low_usage 5420 * 5421 * | memory.current, if memory.current < memory.low 5422 * low_usage = | 5423 | 0, otherwise. 5424 * 5425 * 5426 * Such definition of the effective memory.low provides the expected 5427 * hierarchical behavior: parent's memory.low value is limiting 5428 * children, unprotected memory is reclaimed first and cgroups, 5429 * which are not using their guarantee do not affect actual memory 5430 * distribution. 5431 * 5432 * For example, if there are memcgs A, A/B, A/C, A/D and A/E: 5433 * 5434 * A A/memory.low = 2G, A/memory.current = 6G 5435 * //\\ 5436 * BC DE B/memory.low = 3G B/memory.current = 2G 5437 * C/memory.low = 1G C/memory.current = 2G 5438 * D/memory.low = 0 D/memory.current = 2G 5439 * E/memory.low = 10G E/memory.current = 0 5440 * 5441 * and the memory pressure is applied, the following memory distribution 5442 * is expected (approximately): 5443 * 5444 * A/memory.current = 2G 5445 * 5446 * B/memory.current = 1.3G 5447 * C/memory.current = 0.6G 5448 * D/memory.current = 0 5449 * E/memory.current = 0 5450 * 5451 * These calculations require constant tracking of the actual low usages 5452 * (see propagate_protected_usage()), as well as recursive calculation of 5453 * effective memory.low values. But as we do call mem_cgroup_protected() 5454 * path for each memory cgroup top-down from the reclaim, 5455 * it's possible to optimize this part, and save calculated elow 5456 * for next usage. This part is intentionally racy, but it's ok, 5457 * as memory.low is a best-effort mechanism. 5458 */ 5459 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 5460 struct mem_cgroup *memcg) 5461 { 5462 struct mem_cgroup *parent; 5463 unsigned long emin, parent_emin; 5464 unsigned long elow, parent_elow; 5465 unsigned long usage; 5466 5467 if (mem_cgroup_disabled()) 5468 return MEMCG_PROT_NONE; 5469 5470 if (!root) 5471 root = root_mem_cgroup; 5472 if (memcg == root) 5473 return MEMCG_PROT_NONE; 5474 5475 usage = page_counter_read(&memcg->memory); 5476 if (!usage) 5477 return MEMCG_PROT_NONE; 5478 5479 emin = memcg->memory.min; 5480 elow = memcg->memory.low; 5481 5482 parent = parent_mem_cgroup(memcg); 5483 if (parent == root) 5484 goto exit; 5485 5486 parent_emin = READ_ONCE(parent->memory.emin); 5487 emin = min(emin, parent_emin); 5488 if (emin && parent_emin) { 5489 unsigned long min_usage, siblings_min_usage; 5490 5491 min_usage = min(usage, memcg->memory.min); 5492 siblings_min_usage = atomic_long_read( 5493 &parent->memory.children_min_usage); 5494 5495 if (min_usage && siblings_min_usage) 5496 emin = min(emin, parent_emin * min_usage / 5497 siblings_min_usage); 5498 } 5499 5500 parent_elow = READ_ONCE(parent->memory.elow); 5501 elow = min(elow, parent_elow); 5502 if (elow && parent_elow) { 5503 unsigned long low_usage, siblings_low_usage; 5504 5505 low_usage = min(usage, memcg->memory.low); 5506 siblings_low_usage = atomic_long_read( 5507 &parent->memory.children_low_usage); 5508 5509 if (low_usage && siblings_low_usage) 5510 elow = min(elow, parent_elow * low_usage / 5511 siblings_low_usage); 5512 } 5513 5514 exit: 5515 memcg->memory.emin = emin; 5516 memcg->memory.elow = elow; 5517 5518 if (usage <= emin) 5519 return MEMCG_PROT_MIN; 5520 else if (usage <= elow) 5521 return MEMCG_PROT_LOW; 5522 else 5523 return MEMCG_PROT_NONE; 5524 } 5525 5526 /** 5527 * mem_cgroup_try_charge - try charging a page 5528 * @page: page to charge 5529 * @mm: mm context of the victim 5530 * @gfp_mask: reclaim mode 5531 * @memcgp: charged memcg return 5532 * @compound: charge the page as compound or small page 5533 * 5534 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5535 * pages according to @gfp_mask if necessary. 5536 * 5537 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5538 * Otherwise, an error code is returned. 5539 * 5540 * After page->mapping has been set up, the caller must finalize the 5541 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5542 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5543 */ 5544 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5545 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5546 bool compound) 5547 { 5548 struct mem_cgroup *memcg = NULL; 5549 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5550 int ret = 0; 5551 5552 if (mem_cgroup_disabled()) 5553 goto out; 5554 5555 if (PageSwapCache(page)) { 5556 /* 5557 * Every swap fault against a single page tries to charge the 5558 * page, bail as early as possible. shmem_unuse() encounters 5559 * already charged pages, too. The USED bit is protected by 5560 * the page lock, which serializes swap cache removal, which 5561 * in turn serializes uncharging. 5562 */ 5563 VM_BUG_ON_PAGE(!PageLocked(page), page); 5564 if (compound_head(page)->mem_cgroup) 5565 goto out; 5566 5567 if (do_swap_account) { 5568 swp_entry_t ent = { .val = page_private(page), }; 5569 unsigned short id = lookup_swap_cgroup_id(ent); 5570 5571 rcu_read_lock(); 5572 memcg = mem_cgroup_from_id(id); 5573 if (memcg && !css_tryget_online(&memcg->css)) 5574 memcg = NULL; 5575 rcu_read_unlock(); 5576 } 5577 } 5578 5579 if (!memcg) 5580 memcg = get_mem_cgroup_from_mm(mm); 5581 5582 ret = try_charge(memcg, gfp_mask, nr_pages); 5583 5584 css_put(&memcg->css); 5585 out: 5586 *memcgp = memcg; 5587 return ret; 5588 } 5589 5590 /** 5591 * mem_cgroup_commit_charge - commit a page charge 5592 * @page: page to charge 5593 * @memcg: memcg to charge the page to 5594 * @lrucare: page might be on LRU already 5595 * @compound: charge the page as compound or small page 5596 * 5597 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5598 * after page->mapping has been set up. This must happen atomically 5599 * as part of the page instantiation, i.e. under the page table lock 5600 * for anonymous pages, under the page lock for page and swap cache. 5601 * 5602 * In addition, the page must not be on the LRU during the commit, to 5603 * prevent racing with task migration. If it might be, use @lrucare. 5604 * 5605 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5606 */ 5607 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5608 bool lrucare, bool compound) 5609 { 5610 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5611 5612 VM_BUG_ON_PAGE(!page->mapping, page); 5613 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5614 5615 if (mem_cgroup_disabled()) 5616 return; 5617 /* 5618 * Swap faults will attempt to charge the same page multiple 5619 * times. But reuse_swap_page() might have removed the page 5620 * from swapcache already, so we can't check PageSwapCache(). 5621 */ 5622 if (!memcg) 5623 return; 5624 5625 commit_charge(page, memcg, lrucare); 5626 5627 local_irq_disable(); 5628 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5629 memcg_check_events(memcg, page); 5630 local_irq_enable(); 5631 5632 if (do_memsw_account() && PageSwapCache(page)) { 5633 swp_entry_t entry = { .val = page_private(page) }; 5634 /* 5635 * The swap entry might not get freed for a long time, 5636 * let's not wait for it. The page already received a 5637 * memory+swap charge, drop the swap entry duplicate. 5638 */ 5639 mem_cgroup_uncharge_swap(entry, nr_pages); 5640 } 5641 } 5642 5643 /** 5644 * mem_cgroup_cancel_charge - cancel a page charge 5645 * @page: page to charge 5646 * @memcg: memcg to charge the page to 5647 * @compound: charge the page as compound or small page 5648 * 5649 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5650 */ 5651 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5652 bool compound) 5653 { 5654 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5655 5656 if (mem_cgroup_disabled()) 5657 return; 5658 /* 5659 * Swap faults will attempt to charge the same page multiple 5660 * times. But reuse_swap_page() might have removed the page 5661 * from swapcache already, so we can't check PageSwapCache(). 5662 */ 5663 if (!memcg) 5664 return; 5665 5666 cancel_charge(memcg, nr_pages); 5667 } 5668 5669 struct uncharge_gather { 5670 struct mem_cgroup *memcg; 5671 unsigned long pgpgout; 5672 unsigned long nr_anon; 5673 unsigned long nr_file; 5674 unsigned long nr_kmem; 5675 unsigned long nr_huge; 5676 unsigned long nr_shmem; 5677 struct page *dummy_page; 5678 }; 5679 5680 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 5681 { 5682 memset(ug, 0, sizeof(*ug)); 5683 } 5684 5685 static void uncharge_batch(const struct uncharge_gather *ug) 5686 { 5687 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem; 5688 unsigned long flags; 5689 5690 if (!mem_cgroup_is_root(ug->memcg)) { 5691 page_counter_uncharge(&ug->memcg->memory, nr_pages); 5692 if (do_memsw_account()) 5693 page_counter_uncharge(&ug->memcg->memsw, nr_pages); 5694 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 5695 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 5696 memcg_oom_recover(ug->memcg); 5697 } 5698 5699 local_irq_save(flags); 5700 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); 5701 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); 5702 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 5703 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 5704 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 5705 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); 5706 memcg_check_events(ug->memcg, ug->dummy_page); 5707 local_irq_restore(flags); 5708 5709 if (!mem_cgroup_is_root(ug->memcg)) 5710 css_put_many(&ug->memcg->css, nr_pages); 5711 } 5712 5713 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 5714 { 5715 VM_BUG_ON_PAGE(PageLRU(page), page); 5716 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && 5717 !PageHWPoison(page) , page); 5718 5719 if (!page->mem_cgroup) 5720 return; 5721 5722 /* 5723 * Nobody should be changing or seriously looking at 5724 * page->mem_cgroup at this point, we have fully 5725 * exclusive access to the page. 5726 */ 5727 5728 if (ug->memcg != page->mem_cgroup) { 5729 if (ug->memcg) { 5730 uncharge_batch(ug); 5731 uncharge_gather_clear(ug); 5732 } 5733 ug->memcg = page->mem_cgroup; 5734 } 5735 5736 if (!PageKmemcg(page)) { 5737 unsigned int nr_pages = 1; 5738 5739 if (PageTransHuge(page)) { 5740 nr_pages <<= compound_order(page); 5741 ug->nr_huge += nr_pages; 5742 } 5743 if (PageAnon(page)) 5744 ug->nr_anon += nr_pages; 5745 else { 5746 ug->nr_file += nr_pages; 5747 if (PageSwapBacked(page)) 5748 ug->nr_shmem += nr_pages; 5749 } 5750 ug->pgpgout++; 5751 } else { 5752 ug->nr_kmem += 1 << compound_order(page); 5753 __ClearPageKmemcg(page); 5754 } 5755 5756 ug->dummy_page = page; 5757 page->mem_cgroup = NULL; 5758 } 5759 5760 static void uncharge_list(struct list_head *page_list) 5761 { 5762 struct uncharge_gather ug; 5763 struct list_head *next; 5764 5765 uncharge_gather_clear(&ug); 5766 5767 /* 5768 * Note that the list can be a single page->lru; hence the 5769 * do-while loop instead of a simple list_for_each_entry(). 5770 */ 5771 next = page_list->next; 5772 do { 5773 struct page *page; 5774 5775 page = list_entry(next, struct page, lru); 5776 next = page->lru.next; 5777 5778 uncharge_page(page, &ug); 5779 } while (next != page_list); 5780 5781 if (ug.memcg) 5782 uncharge_batch(&ug); 5783 } 5784 5785 /** 5786 * mem_cgroup_uncharge - uncharge a page 5787 * @page: page to uncharge 5788 * 5789 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5790 * mem_cgroup_commit_charge(). 5791 */ 5792 void mem_cgroup_uncharge(struct page *page) 5793 { 5794 struct uncharge_gather ug; 5795 5796 if (mem_cgroup_disabled()) 5797 return; 5798 5799 /* Don't touch page->lru of any random page, pre-check: */ 5800 if (!page->mem_cgroup) 5801 return; 5802 5803 uncharge_gather_clear(&ug); 5804 uncharge_page(page, &ug); 5805 uncharge_batch(&ug); 5806 } 5807 5808 /** 5809 * mem_cgroup_uncharge_list - uncharge a list of page 5810 * @page_list: list of pages to uncharge 5811 * 5812 * Uncharge a list of pages previously charged with 5813 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5814 */ 5815 void mem_cgroup_uncharge_list(struct list_head *page_list) 5816 { 5817 if (mem_cgroup_disabled()) 5818 return; 5819 5820 if (!list_empty(page_list)) 5821 uncharge_list(page_list); 5822 } 5823 5824 /** 5825 * mem_cgroup_migrate - charge a page's replacement 5826 * @oldpage: currently circulating page 5827 * @newpage: replacement page 5828 * 5829 * Charge @newpage as a replacement page for @oldpage. @oldpage will 5830 * be uncharged upon free. 5831 * 5832 * Both pages must be locked, @newpage->mapping must be set up. 5833 */ 5834 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 5835 { 5836 struct mem_cgroup *memcg; 5837 unsigned int nr_pages; 5838 bool compound; 5839 unsigned long flags; 5840 5841 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5842 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5843 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5844 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5845 newpage); 5846 5847 if (mem_cgroup_disabled()) 5848 return; 5849 5850 /* Page cache replacement: new page already charged? */ 5851 if (newpage->mem_cgroup) 5852 return; 5853 5854 /* Swapcache readahead pages can get replaced before being charged */ 5855 memcg = oldpage->mem_cgroup; 5856 if (!memcg) 5857 return; 5858 5859 /* Force-charge the new page. The old one will be freed soon */ 5860 compound = PageTransHuge(newpage); 5861 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 5862 5863 page_counter_charge(&memcg->memory, nr_pages); 5864 if (do_memsw_account()) 5865 page_counter_charge(&memcg->memsw, nr_pages); 5866 css_get_many(&memcg->css, nr_pages); 5867 5868 commit_charge(newpage, memcg, false); 5869 5870 local_irq_save(flags); 5871 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5872 memcg_check_events(memcg, newpage); 5873 local_irq_restore(flags); 5874 } 5875 5876 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5877 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5878 5879 void mem_cgroup_sk_alloc(struct sock *sk) 5880 { 5881 struct mem_cgroup *memcg; 5882 5883 if (!mem_cgroup_sockets_enabled) 5884 return; 5885 5886 /* 5887 * Socket cloning can throw us here with sk_memcg already 5888 * filled. It won't however, necessarily happen from 5889 * process context. So the test for root memcg given 5890 * the current task's memcg won't help us in this case. 5891 * 5892 * Respecting the original socket's memcg is a better 5893 * decision in this case. 5894 */ 5895 if (sk->sk_memcg) { 5896 css_get(&sk->sk_memcg->css); 5897 return; 5898 } 5899 5900 rcu_read_lock(); 5901 memcg = mem_cgroup_from_task(current); 5902 if (memcg == root_mem_cgroup) 5903 goto out; 5904 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 5905 goto out; 5906 if (css_tryget_online(&memcg->css)) 5907 sk->sk_memcg = memcg; 5908 out: 5909 rcu_read_unlock(); 5910 } 5911 5912 void mem_cgroup_sk_free(struct sock *sk) 5913 { 5914 if (sk->sk_memcg) 5915 css_put(&sk->sk_memcg->css); 5916 } 5917 5918 /** 5919 * mem_cgroup_charge_skmem - charge socket memory 5920 * @memcg: memcg to charge 5921 * @nr_pages: number of pages to charge 5922 * 5923 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5924 * @memcg's configured limit, %false if the charge had to be forced. 5925 */ 5926 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5927 { 5928 gfp_t gfp_mask = GFP_KERNEL; 5929 5930 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5931 struct page_counter *fail; 5932 5933 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 5934 memcg->tcpmem_pressure = 0; 5935 return true; 5936 } 5937 page_counter_charge(&memcg->tcpmem, nr_pages); 5938 memcg->tcpmem_pressure = 1; 5939 return false; 5940 } 5941 5942 /* Don't block in the packet receive path */ 5943 if (in_softirq()) 5944 gfp_mask = GFP_NOWAIT; 5945 5946 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 5947 5948 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 5949 return true; 5950 5951 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 5952 return false; 5953 } 5954 5955 /** 5956 * mem_cgroup_uncharge_skmem - uncharge socket memory 5957 * @memcg: memcg to uncharge 5958 * @nr_pages: number of pages to uncharge 5959 */ 5960 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5961 { 5962 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5963 page_counter_uncharge(&memcg->tcpmem, nr_pages); 5964 return; 5965 } 5966 5967 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 5968 5969 refill_stock(memcg, nr_pages); 5970 } 5971 5972 static int __init cgroup_memory(char *s) 5973 { 5974 char *token; 5975 5976 while ((token = strsep(&s, ",")) != NULL) { 5977 if (!*token) 5978 continue; 5979 if (!strcmp(token, "nosocket")) 5980 cgroup_memory_nosocket = true; 5981 if (!strcmp(token, "nokmem")) 5982 cgroup_memory_nokmem = true; 5983 } 5984 return 0; 5985 } 5986 __setup("cgroup.memory=", cgroup_memory); 5987 5988 /* 5989 * subsys_initcall() for memory controller. 5990 * 5991 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 5992 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 5993 * basically everything that doesn't depend on a specific mem_cgroup structure 5994 * should be initialized from here. 5995 */ 5996 static int __init mem_cgroup_init(void) 5997 { 5998 int cpu, node; 5999 6000 #ifndef CONFIG_SLOB 6001 /* 6002 * Kmem cache creation is mostly done with the slab_mutex held, 6003 * so use a workqueue with limited concurrency to avoid stalling 6004 * all worker threads in case lots of cgroups are created and 6005 * destroyed simultaneously. 6006 */ 6007 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 6008 BUG_ON(!memcg_kmem_cache_wq); 6009 #endif 6010 6011 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 6012 memcg_hotplug_cpu_dead); 6013 6014 for_each_possible_cpu(cpu) 6015 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 6016 drain_local_stock); 6017 6018 for_each_node(node) { 6019 struct mem_cgroup_tree_per_node *rtpn; 6020 6021 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 6022 node_online(node) ? node : NUMA_NO_NODE); 6023 6024 rtpn->rb_root = RB_ROOT; 6025 rtpn->rb_rightmost = NULL; 6026 spin_lock_init(&rtpn->lock); 6027 soft_limit_tree.rb_tree_per_node[node] = rtpn; 6028 } 6029 6030 return 0; 6031 } 6032 subsys_initcall(mem_cgroup_init); 6033 6034 #ifdef CONFIG_MEMCG_SWAP 6035 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 6036 { 6037 while (!atomic_inc_not_zero(&memcg->id.ref)) { 6038 /* 6039 * The root cgroup cannot be destroyed, so it's refcount must 6040 * always be >= 1. 6041 */ 6042 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 6043 VM_BUG_ON(1); 6044 break; 6045 } 6046 memcg = parent_mem_cgroup(memcg); 6047 if (!memcg) 6048 memcg = root_mem_cgroup; 6049 } 6050 return memcg; 6051 } 6052 6053 /** 6054 * mem_cgroup_swapout - transfer a memsw charge to swap 6055 * @page: page whose memsw charge to transfer 6056 * @entry: swap entry to move the charge to 6057 * 6058 * Transfer the memsw charge of @page to @entry. 6059 */ 6060 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 6061 { 6062 struct mem_cgroup *memcg, *swap_memcg; 6063 unsigned int nr_entries; 6064 unsigned short oldid; 6065 6066 VM_BUG_ON_PAGE(PageLRU(page), page); 6067 VM_BUG_ON_PAGE(page_count(page), page); 6068 6069 if (!do_memsw_account()) 6070 return; 6071 6072 memcg = page->mem_cgroup; 6073 6074 /* Readahead page, never charged */ 6075 if (!memcg) 6076 return; 6077 6078 /* 6079 * In case the memcg owning these pages has been offlined and doesn't 6080 * have an ID allocated to it anymore, charge the closest online 6081 * ancestor for the swap instead and transfer the memory+swap charge. 6082 */ 6083 swap_memcg = mem_cgroup_id_get_online(memcg); 6084 nr_entries = hpage_nr_pages(page); 6085 /* Get references for the tail pages, too */ 6086 if (nr_entries > 1) 6087 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 6088 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 6089 nr_entries); 6090 VM_BUG_ON_PAGE(oldid, page); 6091 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 6092 6093 page->mem_cgroup = NULL; 6094 6095 if (!mem_cgroup_is_root(memcg)) 6096 page_counter_uncharge(&memcg->memory, nr_entries); 6097 6098 if (memcg != swap_memcg) { 6099 if (!mem_cgroup_is_root(swap_memcg)) 6100 page_counter_charge(&swap_memcg->memsw, nr_entries); 6101 page_counter_uncharge(&memcg->memsw, nr_entries); 6102 } 6103 6104 /* 6105 * Interrupts should be disabled here because the caller holds the 6106 * i_pages lock which is taken with interrupts-off. It is 6107 * important here to have the interrupts disabled because it is the 6108 * only synchronisation we have for updating the per-CPU variables. 6109 */ 6110 VM_BUG_ON(!irqs_disabled()); 6111 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), 6112 -nr_entries); 6113 memcg_check_events(memcg, page); 6114 6115 if (!mem_cgroup_is_root(memcg)) 6116 css_put_many(&memcg->css, nr_entries); 6117 } 6118 6119 /** 6120 * mem_cgroup_try_charge_swap - try charging swap space for a page 6121 * @page: page being added to swap 6122 * @entry: swap entry to charge 6123 * 6124 * Try to charge @page's memcg for the swap space at @entry. 6125 * 6126 * Returns 0 on success, -ENOMEM on failure. 6127 */ 6128 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 6129 { 6130 unsigned int nr_pages = hpage_nr_pages(page); 6131 struct page_counter *counter; 6132 struct mem_cgroup *memcg; 6133 unsigned short oldid; 6134 6135 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 6136 return 0; 6137 6138 memcg = page->mem_cgroup; 6139 6140 /* Readahead page, never charged */ 6141 if (!memcg) 6142 return 0; 6143 6144 if (!entry.val) { 6145 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6146 return 0; 6147 } 6148 6149 memcg = mem_cgroup_id_get_online(memcg); 6150 6151 if (!mem_cgroup_is_root(memcg) && 6152 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 6153 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 6154 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 6155 mem_cgroup_id_put(memcg); 6156 return -ENOMEM; 6157 } 6158 6159 /* Get references for the tail pages, too */ 6160 if (nr_pages > 1) 6161 mem_cgroup_id_get_many(memcg, nr_pages - 1); 6162 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 6163 VM_BUG_ON_PAGE(oldid, page); 6164 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 6165 6166 return 0; 6167 } 6168 6169 /** 6170 * mem_cgroup_uncharge_swap - uncharge swap space 6171 * @entry: swap entry to uncharge 6172 * @nr_pages: the amount of swap space to uncharge 6173 */ 6174 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 6175 { 6176 struct mem_cgroup *memcg; 6177 unsigned short id; 6178 6179 if (!do_swap_account) 6180 return; 6181 6182 id = swap_cgroup_record(entry, 0, nr_pages); 6183 rcu_read_lock(); 6184 memcg = mem_cgroup_from_id(id); 6185 if (memcg) { 6186 if (!mem_cgroup_is_root(memcg)) { 6187 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6188 page_counter_uncharge(&memcg->swap, nr_pages); 6189 else 6190 page_counter_uncharge(&memcg->memsw, nr_pages); 6191 } 6192 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 6193 mem_cgroup_id_put_many(memcg, nr_pages); 6194 } 6195 rcu_read_unlock(); 6196 } 6197 6198 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 6199 { 6200 long nr_swap_pages = get_nr_swap_pages(); 6201 6202 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6203 return nr_swap_pages; 6204 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6205 nr_swap_pages = min_t(long, nr_swap_pages, 6206 READ_ONCE(memcg->swap.max) - 6207 page_counter_read(&memcg->swap)); 6208 return nr_swap_pages; 6209 } 6210 6211 bool mem_cgroup_swap_full(struct page *page) 6212 { 6213 struct mem_cgroup *memcg; 6214 6215 VM_BUG_ON_PAGE(!PageLocked(page), page); 6216 6217 if (vm_swap_full()) 6218 return true; 6219 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6220 return false; 6221 6222 memcg = page->mem_cgroup; 6223 if (!memcg) 6224 return false; 6225 6226 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6227 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) 6228 return true; 6229 6230 return false; 6231 } 6232 6233 /* for remember boot option*/ 6234 #ifdef CONFIG_MEMCG_SWAP_ENABLED 6235 static int really_do_swap_account __initdata = 1; 6236 #else 6237 static int really_do_swap_account __initdata; 6238 #endif 6239 6240 static int __init enable_swap_account(char *s) 6241 { 6242 if (!strcmp(s, "1")) 6243 really_do_swap_account = 1; 6244 else if (!strcmp(s, "0")) 6245 really_do_swap_account = 0; 6246 return 1; 6247 } 6248 __setup("swapaccount=", enable_swap_account); 6249 6250 static u64 swap_current_read(struct cgroup_subsys_state *css, 6251 struct cftype *cft) 6252 { 6253 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6254 6255 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6256 } 6257 6258 static int swap_max_show(struct seq_file *m, void *v) 6259 { 6260 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6261 unsigned long max = READ_ONCE(memcg->swap.max); 6262 6263 if (max == PAGE_COUNTER_MAX) 6264 seq_puts(m, "max\n"); 6265 else 6266 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6267 6268 return 0; 6269 } 6270 6271 static ssize_t swap_max_write(struct kernfs_open_file *of, 6272 char *buf, size_t nbytes, loff_t off) 6273 { 6274 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6275 unsigned long max; 6276 int err; 6277 6278 buf = strstrip(buf); 6279 err = page_counter_memparse(buf, "max", &max); 6280 if (err) 6281 return err; 6282 6283 xchg(&memcg->swap.max, max); 6284 6285 return nbytes; 6286 } 6287 6288 static int swap_events_show(struct seq_file *m, void *v) 6289 { 6290 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6291 6292 seq_printf(m, "max %lu\n", 6293 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 6294 seq_printf(m, "fail %lu\n", 6295 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 6296 6297 return 0; 6298 } 6299 6300 static struct cftype swap_files[] = { 6301 { 6302 .name = "swap.current", 6303 .flags = CFTYPE_NOT_ON_ROOT, 6304 .read_u64 = swap_current_read, 6305 }, 6306 { 6307 .name = "swap.max", 6308 .flags = CFTYPE_NOT_ON_ROOT, 6309 .seq_show = swap_max_show, 6310 .write = swap_max_write, 6311 }, 6312 { 6313 .name = "swap.events", 6314 .flags = CFTYPE_NOT_ON_ROOT, 6315 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 6316 .seq_show = swap_events_show, 6317 }, 6318 { } /* terminate */ 6319 }; 6320 6321 static struct cftype memsw_cgroup_files[] = { 6322 { 6323 .name = "memsw.usage_in_bytes", 6324 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6325 .read_u64 = mem_cgroup_read_u64, 6326 }, 6327 { 6328 .name = "memsw.max_usage_in_bytes", 6329 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6330 .write = mem_cgroup_reset, 6331 .read_u64 = mem_cgroup_read_u64, 6332 }, 6333 { 6334 .name = "memsw.limit_in_bytes", 6335 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6336 .write = mem_cgroup_write, 6337 .read_u64 = mem_cgroup_read_u64, 6338 }, 6339 { 6340 .name = "memsw.failcnt", 6341 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6342 .write = mem_cgroup_reset, 6343 .read_u64 = mem_cgroup_read_u64, 6344 }, 6345 { }, /* terminate */ 6346 }; 6347 6348 static int __init mem_cgroup_swap_init(void) 6349 { 6350 if (!mem_cgroup_disabled() && really_do_swap_account) { 6351 do_swap_account = 1; 6352 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6353 swap_files)); 6354 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6355 memsw_cgroup_files)); 6356 } 6357 return 0; 6358 } 6359 subsys_initcall(mem_cgroup_swap_init); 6360 6361 #endif /* CONFIG_MEMCG_SWAP */ 6362