1 /* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34 #include <linux/page_counter.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cgroup.h> 37 #include <linux/mm.h> 38 #include <linux/sched/mm.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/hugetlb.h> 41 #include <linux/pagemap.h> 42 #include <linux/smp.h> 43 #include <linux/page-flags.h> 44 #include <linux/backing-dev.h> 45 #include <linux/bit_spinlock.h> 46 #include <linux/rcupdate.h> 47 #include <linux/limits.h> 48 #include <linux/export.h> 49 #include <linux/mutex.h> 50 #include <linux/rbtree.h> 51 #include <linux/slab.h> 52 #include <linux/swap.h> 53 #include <linux/swapops.h> 54 #include <linux/spinlock.h> 55 #include <linux/eventfd.h> 56 #include <linux/poll.h> 57 #include <linux/sort.h> 58 #include <linux/fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/vmpressure.h> 61 #include <linux/mm_inline.h> 62 #include <linux/swap_cgroup.h> 63 #include <linux/cpu.h> 64 #include <linux/oom.h> 65 #include <linux/lockdep.h> 66 #include <linux/file.h> 67 #include <linux/tracehook.h> 68 #include "internal.h" 69 #include <net/sock.h> 70 #include <net/ip.h> 71 #include "slab.h" 72 73 #include <linux/uaccess.h> 74 75 #include <trace/events/vmscan.h> 76 77 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 78 EXPORT_SYMBOL(memory_cgrp_subsys); 79 80 struct mem_cgroup *root_mem_cgroup __read_mostly; 81 82 #define MEM_CGROUP_RECLAIM_RETRIES 5 83 84 /* Socket memory accounting disabled? */ 85 static bool cgroup_memory_nosocket; 86 87 /* Kernel memory accounting disabled? */ 88 static bool cgroup_memory_nokmem; 89 90 /* Whether the swap controller is active */ 91 #ifdef CONFIG_MEMCG_SWAP 92 int do_swap_account __read_mostly; 93 #else 94 #define do_swap_account 0 95 #endif 96 97 /* Whether legacy memory+swap accounting is active */ 98 static bool do_memsw_account(void) 99 { 100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account; 101 } 102 103 static const char *const mem_cgroup_lru_names[] = { 104 "inactive_anon", 105 "active_anon", 106 "inactive_file", 107 "active_file", 108 "unevictable", 109 }; 110 111 #define THRESHOLDS_EVENTS_TARGET 128 112 #define SOFTLIMIT_EVENTS_TARGET 1024 113 #define NUMAINFO_EVENTS_TARGET 1024 114 115 /* 116 * Cgroups above their limits are maintained in a RB-Tree, independent of 117 * their hierarchy representation 118 */ 119 120 struct mem_cgroup_tree_per_node { 121 struct rb_root rb_root; 122 spinlock_t lock; 123 }; 124 125 struct mem_cgroup_tree { 126 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 127 }; 128 129 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 130 131 /* for OOM */ 132 struct mem_cgroup_eventfd_list { 133 struct list_head list; 134 struct eventfd_ctx *eventfd; 135 }; 136 137 /* 138 * cgroup_event represents events which userspace want to receive. 139 */ 140 struct mem_cgroup_event { 141 /* 142 * memcg which the event belongs to. 143 */ 144 struct mem_cgroup *memcg; 145 /* 146 * eventfd to signal userspace about the event. 147 */ 148 struct eventfd_ctx *eventfd; 149 /* 150 * Each of these stored in a list by the cgroup. 151 */ 152 struct list_head list; 153 /* 154 * register_event() callback will be used to add new userspace 155 * waiter for changes related to this event. Use eventfd_signal() 156 * on eventfd to send notification to userspace. 157 */ 158 int (*register_event)(struct mem_cgroup *memcg, 159 struct eventfd_ctx *eventfd, const char *args); 160 /* 161 * unregister_event() callback will be called when userspace closes 162 * the eventfd or on cgroup removing. This callback must be set, 163 * if you want provide notification functionality. 164 */ 165 void (*unregister_event)(struct mem_cgroup *memcg, 166 struct eventfd_ctx *eventfd); 167 /* 168 * All fields below needed to unregister event when 169 * userspace closes eventfd. 170 */ 171 poll_table pt; 172 wait_queue_head_t *wqh; 173 wait_queue_entry_t wait; 174 struct work_struct remove; 175 }; 176 177 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 178 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 179 180 /* Stuffs for move charges at task migration. */ 181 /* 182 * Types of charges to be moved. 183 */ 184 #define MOVE_ANON 0x1U 185 #define MOVE_FILE 0x2U 186 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 187 188 /* "mc" and its members are protected by cgroup_mutex */ 189 static struct move_charge_struct { 190 spinlock_t lock; /* for from, to */ 191 struct mm_struct *mm; 192 struct mem_cgroup *from; 193 struct mem_cgroup *to; 194 unsigned long flags; 195 unsigned long precharge; 196 unsigned long moved_charge; 197 unsigned long moved_swap; 198 struct task_struct *moving_task; /* a task moving charges */ 199 wait_queue_head_t waitq; /* a waitq for other context */ 200 } mc = { 201 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 202 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 203 }; 204 205 /* 206 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 207 * limit reclaim to prevent infinite loops, if they ever occur. 208 */ 209 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 210 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 211 212 enum charge_type { 213 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 214 MEM_CGROUP_CHARGE_TYPE_ANON, 215 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 216 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 217 NR_CHARGE_TYPE, 218 }; 219 220 /* for encoding cft->private value on file */ 221 enum res_type { 222 _MEM, 223 _MEMSWAP, 224 _OOM_TYPE, 225 _KMEM, 226 _TCP, 227 }; 228 229 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 230 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 231 #define MEMFILE_ATTR(val) ((val) & 0xffff) 232 /* Used for OOM nofiier */ 233 #define OOM_CONTROL (0) 234 235 /* Some nice accessors for the vmpressure. */ 236 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 237 { 238 if (!memcg) 239 memcg = root_mem_cgroup; 240 return &memcg->vmpressure; 241 } 242 243 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 244 { 245 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 246 } 247 248 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 249 { 250 return (memcg == root_mem_cgroup); 251 } 252 253 #ifndef CONFIG_SLOB 254 /* 255 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 256 * The main reason for not using cgroup id for this: 257 * this works better in sparse environments, where we have a lot of memcgs, 258 * but only a few kmem-limited. Or also, if we have, for instance, 200 259 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 260 * 200 entry array for that. 261 * 262 * The current size of the caches array is stored in memcg_nr_cache_ids. It 263 * will double each time we have to increase it. 264 */ 265 static DEFINE_IDA(memcg_cache_ida); 266 int memcg_nr_cache_ids; 267 268 /* Protects memcg_nr_cache_ids */ 269 static DECLARE_RWSEM(memcg_cache_ids_sem); 270 271 void memcg_get_cache_ids(void) 272 { 273 down_read(&memcg_cache_ids_sem); 274 } 275 276 void memcg_put_cache_ids(void) 277 { 278 up_read(&memcg_cache_ids_sem); 279 } 280 281 /* 282 * MIN_SIZE is different than 1, because we would like to avoid going through 283 * the alloc/free process all the time. In a small machine, 4 kmem-limited 284 * cgroups is a reasonable guess. In the future, it could be a parameter or 285 * tunable, but that is strictly not necessary. 286 * 287 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 288 * this constant directly from cgroup, but it is understandable that this is 289 * better kept as an internal representation in cgroup.c. In any case, the 290 * cgrp_id space is not getting any smaller, and we don't have to necessarily 291 * increase ours as well if it increases. 292 */ 293 #define MEMCG_CACHES_MIN_SIZE 4 294 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 295 296 /* 297 * A lot of the calls to the cache allocation functions are expected to be 298 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 299 * conditional to this static branch, we'll have to allow modules that does 300 * kmem_cache_alloc and the such to see this symbol as well 301 */ 302 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 303 EXPORT_SYMBOL(memcg_kmem_enabled_key); 304 305 struct workqueue_struct *memcg_kmem_cache_wq; 306 307 #endif /* !CONFIG_SLOB */ 308 309 /** 310 * mem_cgroup_css_from_page - css of the memcg associated with a page 311 * @page: page of interest 312 * 313 * If memcg is bound to the default hierarchy, css of the memcg associated 314 * with @page is returned. The returned css remains associated with @page 315 * until it is released. 316 * 317 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 318 * is returned. 319 */ 320 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 321 { 322 struct mem_cgroup *memcg; 323 324 memcg = page->mem_cgroup; 325 326 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 327 memcg = root_mem_cgroup; 328 329 return &memcg->css; 330 } 331 332 /** 333 * page_cgroup_ino - return inode number of the memcg a page is charged to 334 * @page: the page 335 * 336 * Look up the closest online ancestor of the memory cgroup @page is charged to 337 * and return its inode number or 0 if @page is not charged to any cgroup. It 338 * is safe to call this function without holding a reference to @page. 339 * 340 * Note, this function is inherently racy, because there is nothing to prevent 341 * the cgroup inode from getting torn down and potentially reallocated a moment 342 * after page_cgroup_ino() returns, so it only should be used by callers that 343 * do not care (such as procfs interfaces). 344 */ 345 ino_t page_cgroup_ino(struct page *page) 346 { 347 struct mem_cgroup *memcg; 348 unsigned long ino = 0; 349 350 rcu_read_lock(); 351 memcg = READ_ONCE(page->mem_cgroup); 352 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 353 memcg = parent_mem_cgroup(memcg); 354 if (memcg) 355 ino = cgroup_ino(memcg->css.cgroup); 356 rcu_read_unlock(); 357 return ino; 358 } 359 360 static struct mem_cgroup_per_node * 361 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 362 { 363 int nid = page_to_nid(page); 364 365 return memcg->nodeinfo[nid]; 366 } 367 368 static struct mem_cgroup_tree_per_node * 369 soft_limit_tree_node(int nid) 370 { 371 return soft_limit_tree.rb_tree_per_node[nid]; 372 } 373 374 static struct mem_cgroup_tree_per_node * 375 soft_limit_tree_from_page(struct page *page) 376 { 377 int nid = page_to_nid(page); 378 379 return soft_limit_tree.rb_tree_per_node[nid]; 380 } 381 382 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 383 struct mem_cgroup_tree_per_node *mctz, 384 unsigned long new_usage_in_excess) 385 { 386 struct rb_node **p = &mctz->rb_root.rb_node; 387 struct rb_node *parent = NULL; 388 struct mem_cgroup_per_node *mz_node; 389 390 if (mz->on_tree) 391 return; 392 393 mz->usage_in_excess = new_usage_in_excess; 394 if (!mz->usage_in_excess) 395 return; 396 while (*p) { 397 parent = *p; 398 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 399 tree_node); 400 if (mz->usage_in_excess < mz_node->usage_in_excess) 401 p = &(*p)->rb_left; 402 /* 403 * We can't avoid mem cgroups that are over their soft 404 * limit by the same amount 405 */ 406 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 407 p = &(*p)->rb_right; 408 } 409 rb_link_node(&mz->tree_node, parent, p); 410 rb_insert_color(&mz->tree_node, &mctz->rb_root); 411 mz->on_tree = true; 412 } 413 414 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 415 struct mem_cgroup_tree_per_node *mctz) 416 { 417 if (!mz->on_tree) 418 return; 419 rb_erase(&mz->tree_node, &mctz->rb_root); 420 mz->on_tree = false; 421 } 422 423 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 424 struct mem_cgroup_tree_per_node *mctz) 425 { 426 unsigned long flags; 427 428 spin_lock_irqsave(&mctz->lock, flags); 429 __mem_cgroup_remove_exceeded(mz, mctz); 430 spin_unlock_irqrestore(&mctz->lock, flags); 431 } 432 433 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 434 { 435 unsigned long nr_pages = page_counter_read(&memcg->memory); 436 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 437 unsigned long excess = 0; 438 439 if (nr_pages > soft_limit) 440 excess = nr_pages - soft_limit; 441 442 return excess; 443 } 444 445 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 446 { 447 unsigned long excess; 448 struct mem_cgroup_per_node *mz; 449 struct mem_cgroup_tree_per_node *mctz; 450 451 mctz = soft_limit_tree_from_page(page); 452 if (!mctz) 453 return; 454 /* 455 * Necessary to update all ancestors when hierarchy is used. 456 * because their event counter is not touched. 457 */ 458 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 459 mz = mem_cgroup_page_nodeinfo(memcg, page); 460 excess = soft_limit_excess(memcg); 461 /* 462 * We have to update the tree if mz is on RB-tree or 463 * mem is over its softlimit. 464 */ 465 if (excess || mz->on_tree) { 466 unsigned long flags; 467 468 spin_lock_irqsave(&mctz->lock, flags); 469 /* if on-tree, remove it */ 470 if (mz->on_tree) 471 __mem_cgroup_remove_exceeded(mz, mctz); 472 /* 473 * Insert again. mz->usage_in_excess will be updated. 474 * If excess is 0, no tree ops. 475 */ 476 __mem_cgroup_insert_exceeded(mz, mctz, excess); 477 spin_unlock_irqrestore(&mctz->lock, flags); 478 } 479 } 480 } 481 482 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 483 { 484 struct mem_cgroup_tree_per_node *mctz; 485 struct mem_cgroup_per_node *mz; 486 int nid; 487 488 for_each_node(nid) { 489 mz = mem_cgroup_nodeinfo(memcg, nid); 490 mctz = soft_limit_tree_node(nid); 491 if (mctz) 492 mem_cgroup_remove_exceeded(mz, mctz); 493 } 494 } 495 496 static struct mem_cgroup_per_node * 497 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 498 { 499 struct rb_node *rightmost = NULL; 500 struct mem_cgroup_per_node *mz; 501 502 retry: 503 mz = NULL; 504 rightmost = rb_last(&mctz->rb_root); 505 if (!rightmost) 506 goto done; /* Nothing to reclaim from */ 507 508 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node); 509 /* 510 * Remove the node now but someone else can add it back, 511 * we will to add it back at the end of reclaim to its correct 512 * position in the tree. 513 */ 514 __mem_cgroup_remove_exceeded(mz, mctz); 515 if (!soft_limit_excess(mz->memcg) || 516 !css_tryget_online(&mz->memcg->css)) 517 goto retry; 518 done: 519 return mz; 520 } 521 522 static struct mem_cgroup_per_node * 523 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 524 { 525 struct mem_cgroup_per_node *mz; 526 527 spin_lock_irq(&mctz->lock); 528 mz = __mem_cgroup_largest_soft_limit_node(mctz); 529 spin_unlock_irq(&mctz->lock); 530 return mz; 531 } 532 533 /* 534 * Return page count for single (non recursive) @memcg. 535 * 536 * Implementation Note: reading percpu statistics for memcg. 537 * 538 * Both of vmstat[] and percpu_counter has threshold and do periodic 539 * synchronization to implement "quick" read. There are trade-off between 540 * reading cost and precision of value. Then, we may have a chance to implement 541 * a periodic synchronization of counter in memcg's counter. 542 * 543 * But this _read() function is used for user interface now. The user accounts 544 * memory usage by memory cgroup and he _always_ requires exact value because 545 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 546 * have to visit all online cpus and make sum. So, for now, unnecessary 547 * synchronization is not implemented. (just implemented for cpu hotplug) 548 * 549 * If there are kernel internal actions which can make use of some not-exact 550 * value, and reading all cpu value can be performance bottleneck in some 551 * common workload, threshold and synchronization as vmstat[] should be 552 * implemented. 553 */ 554 555 static unsigned long memcg_sum_events(struct mem_cgroup *memcg, 556 enum memcg_event_item event) 557 { 558 unsigned long val = 0; 559 int cpu; 560 561 for_each_possible_cpu(cpu) 562 val += per_cpu(memcg->stat->events[event], cpu); 563 return val; 564 } 565 566 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 567 struct page *page, 568 bool compound, int nr_pages) 569 { 570 /* 571 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 572 * counted as CACHE even if it's on ANON LRU. 573 */ 574 if (PageAnon(page)) 575 __this_cpu_add(memcg->stat->count[MEMCG_RSS], nr_pages); 576 else { 577 __this_cpu_add(memcg->stat->count[MEMCG_CACHE], nr_pages); 578 if (PageSwapBacked(page)) 579 __this_cpu_add(memcg->stat->count[NR_SHMEM], nr_pages); 580 } 581 582 if (compound) { 583 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 584 __this_cpu_add(memcg->stat->count[MEMCG_RSS_HUGE], nr_pages); 585 } 586 587 /* pagein of a big page is an event. So, ignore page size */ 588 if (nr_pages > 0) 589 __this_cpu_inc(memcg->stat->events[PGPGIN]); 590 else { 591 __this_cpu_inc(memcg->stat->events[PGPGOUT]); 592 nr_pages = -nr_pages; /* for event */ 593 } 594 595 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 596 } 597 598 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 599 int nid, unsigned int lru_mask) 600 { 601 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); 602 unsigned long nr = 0; 603 enum lru_list lru; 604 605 VM_BUG_ON((unsigned)nid >= nr_node_ids); 606 607 for_each_lru(lru) { 608 if (!(BIT(lru) & lru_mask)) 609 continue; 610 nr += mem_cgroup_get_lru_size(lruvec, lru); 611 } 612 return nr; 613 } 614 615 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 616 unsigned int lru_mask) 617 { 618 unsigned long nr = 0; 619 int nid; 620 621 for_each_node_state(nid, N_MEMORY) 622 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 623 return nr; 624 } 625 626 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 627 enum mem_cgroup_events_target target) 628 { 629 unsigned long val, next; 630 631 val = __this_cpu_read(memcg->stat->nr_page_events); 632 next = __this_cpu_read(memcg->stat->targets[target]); 633 /* from time_after() in jiffies.h */ 634 if ((long)(next - val) < 0) { 635 switch (target) { 636 case MEM_CGROUP_TARGET_THRESH: 637 next = val + THRESHOLDS_EVENTS_TARGET; 638 break; 639 case MEM_CGROUP_TARGET_SOFTLIMIT: 640 next = val + SOFTLIMIT_EVENTS_TARGET; 641 break; 642 case MEM_CGROUP_TARGET_NUMAINFO: 643 next = val + NUMAINFO_EVENTS_TARGET; 644 break; 645 default: 646 break; 647 } 648 __this_cpu_write(memcg->stat->targets[target], next); 649 return true; 650 } 651 return false; 652 } 653 654 /* 655 * Check events in order. 656 * 657 */ 658 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 659 { 660 /* threshold event is triggered in finer grain than soft limit */ 661 if (unlikely(mem_cgroup_event_ratelimit(memcg, 662 MEM_CGROUP_TARGET_THRESH))) { 663 bool do_softlimit; 664 bool do_numainfo __maybe_unused; 665 666 do_softlimit = mem_cgroup_event_ratelimit(memcg, 667 MEM_CGROUP_TARGET_SOFTLIMIT); 668 #if MAX_NUMNODES > 1 669 do_numainfo = mem_cgroup_event_ratelimit(memcg, 670 MEM_CGROUP_TARGET_NUMAINFO); 671 #endif 672 mem_cgroup_threshold(memcg); 673 if (unlikely(do_softlimit)) 674 mem_cgroup_update_tree(memcg, page); 675 #if MAX_NUMNODES > 1 676 if (unlikely(do_numainfo)) 677 atomic_inc(&memcg->numainfo_events); 678 #endif 679 } 680 } 681 682 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 683 { 684 /* 685 * mm_update_next_owner() may clear mm->owner to NULL 686 * if it races with swapoff, page migration, etc. 687 * So this can be called with p == NULL. 688 */ 689 if (unlikely(!p)) 690 return NULL; 691 692 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 693 } 694 EXPORT_SYMBOL(mem_cgroup_from_task); 695 696 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 697 { 698 struct mem_cgroup *memcg = NULL; 699 700 rcu_read_lock(); 701 do { 702 /* 703 * Page cache insertions can happen withou an 704 * actual mm context, e.g. during disk probing 705 * on boot, loopback IO, acct() writes etc. 706 */ 707 if (unlikely(!mm)) 708 memcg = root_mem_cgroup; 709 else { 710 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 711 if (unlikely(!memcg)) 712 memcg = root_mem_cgroup; 713 } 714 } while (!css_tryget_online(&memcg->css)); 715 rcu_read_unlock(); 716 return memcg; 717 } 718 719 /** 720 * mem_cgroup_iter - iterate over memory cgroup hierarchy 721 * @root: hierarchy root 722 * @prev: previously returned memcg, NULL on first invocation 723 * @reclaim: cookie for shared reclaim walks, NULL for full walks 724 * 725 * Returns references to children of the hierarchy below @root, or 726 * @root itself, or %NULL after a full round-trip. 727 * 728 * Caller must pass the return value in @prev on subsequent 729 * invocations for reference counting, or use mem_cgroup_iter_break() 730 * to cancel a hierarchy walk before the round-trip is complete. 731 * 732 * Reclaimers can specify a zone and a priority level in @reclaim to 733 * divide up the memcgs in the hierarchy among all concurrent 734 * reclaimers operating on the same zone and priority. 735 */ 736 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 737 struct mem_cgroup *prev, 738 struct mem_cgroup_reclaim_cookie *reclaim) 739 { 740 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 741 struct cgroup_subsys_state *css = NULL; 742 struct mem_cgroup *memcg = NULL; 743 struct mem_cgroup *pos = NULL; 744 745 if (mem_cgroup_disabled()) 746 return NULL; 747 748 if (!root) 749 root = root_mem_cgroup; 750 751 if (prev && !reclaim) 752 pos = prev; 753 754 if (!root->use_hierarchy && root != root_mem_cgroup) { 755 if (prev) 756 goto out; 757 return root; 758 } 759 760 rcu_read_lock(); 761 762 if (reclaim) { 763 struct mem_cgroup_per_node *mz; 764 765 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 766 iter = &mz->iter[reclaim->priority]; 767 768 if (prev && reclaim->generation != iter->generation) 769 goto out_unlock; 770 771 while (1) { 772 pos = READ_ONCE(iter->position); 773 if (!pos || css_tryget(&pos->css)) 774 break; 775 /* 776 * css reference reached zero, so iter->position will 777 * be cleared by ->css_released. However, we should not 778 * rely on this happening soon, because ->css_released 779 * is called from a work queue, and by busy-waiting we 780 * might block it. So we clear iter->position right 781 * away. 782 */ 783 (void)cmpxchg(&iter->position, pos, NULL); 784 } 785 } 786 787 if (pos) 788 css = &pos->css; 789 790 for (;;) { 791 css = css_next_descendant_pre(css, &root->css); 792 if (!css) { 793 /* 794 * Reclaimers share the hierarchy walk, and a 795 * new one might jump in right at the end of 796 * the hierarchy - make sure they see at least 797 * one group and restart from the beginning. 798 */ 799 if (!prev) 800 continue; 801 break; 802 } 803 804 /* 805 * Verify the css and acquire a reference. The root 806 * is provided by the caller, so we know it's alive 807 * and kicking, and don't take an extra reference. 808 */ 809 memcg = mem_cgroup_from_css(css); 810 811 if (css == &root->css) 812 break; 813 814 if (css_tryget(css)) 815 break; 816 817 memcg = NULL; 818 } 819 820 if (reclaim) { 821 /* 822 * The position could have already been updated by a competing 823 * thread, so check that the value hasn't changed since we read 824 * it to avoid reclaiming from the same cgroup twice. 825 */ 826 (void)cmpxchg(&iter->position, pos, memcg); 827 828 if (pos) 829 css_put(&pos->css); 830 831 if (!memcg) 832 iter->generation++; 833 else if (!prev) 834 reclaim->generation = iter->generation; 835 } 836 837 out_unlock: 838 rcu_read_unlock(); 839 out: 840 if (prev && prev != root) 841 css_put(&prev->css); 842 843 return memcg; 844 } 845 846 /** 847 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 848 * @root: hierarchy root 849 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 850 */ 851 void mem_cgroup_iter_break(struct mem_cgroup *root, 852 struct mem_cgroup *prev) 853 { 854 if (!root) 855 root = root_mem_cgroup; 856 if (prev && prev != root) 857 css_put(&prev->css); 858 } 859 860 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 861 { 862 struct mem_cgroup *memcg = dead_memcg; 863 struct mem_cgroup_reclaim_iter *iter; 864 struct mem_cgroup_per_node *mz; 865 int nid; 866 int i; 867 868 while ((memcg = parent_mem_cgroup(memcg))) { 869 for_each_node(nid) { 870 mz = mem_cgroup_nodeinfo(memcg, nid); 871 for (i = 0; i <= DEF_PRIORITY; i++) { 872 iter = &mz->iter[i]; 873 cmpxchg(&iter->position, 874 dead_memcg, NULL); 875 } 876 } 877 } 878 } 879 880 /* 881 * Iteration constructs for visiting all cgroups (under a tree). If 882 * loops are exited prematurely (break), mem_cgroup_iter_break() must 883 * be used for reference counting. 884 */ 885 #define for_each_mem_cgroup_tree(iter, root) \ 886 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 887 iter != NULL; \ 888 iter = mem_cgroup_iter(root, iter, NULL)) 889 890 #define for_each_mem_cgroup(iter) \ 891 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 892 iter != NULL; \ 893 iter = mem_cgroup_iter(NULL, iter, NULL)) 894 895 /** 896 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 897 * @memcg: hierarchy root 898 * @fn: function to call for each task 899 * @arg: argument passed to @fn 900 * 901 * This function iterates over tasks attached to @memcg or to any of its 902 * descendants and calls @fn for each task. If @fn returns a non-zero 903 * value, the function breaks the iteration loop and returns the value. 904 * Otherwise, it will iterate over all tasks and return 0. 905 * 906 * This function must not be called for the root memory cgroup. 907 */ 908 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 909 int (*fn)(struct task_struct *, void *), void *arg) 910 { 911 struct mem_cgroup *iter; 912 int ret = 0; 913 914 BUG_ON(memcg == root_mem_cgroup); 915 916 for_each_mem_cgroup_tree(iter, memcg) { 917 struct css_task_iter it; 918 struct task_struct *task; 919 920 css_task_iter_start(&iter->css, &it); 921 while (!ret && (task = css_task_iter_next(&it))) 922 ret = fn(task, arg); 923 css_task_iter_end(&it); 924 if (ret) { 925 mem_cgroup_iter_break(memcg, iter); 926 break; 927 } 928 } 929 return ret; 930 } 931 932 /** 933 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 934 * @page: the page 935 * @zone: zone of the page 936 * 937 * This function is only safe when following the LRU page isolation 938 * and putback protocol: the LRU lock must be held, and the page must 939 * either be PageLRU() or the caller must have isolated/allocated it. 940 */ 941 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 942 { 943 struct mem_cgroup_per_node *mz; 944 struct mem_cgroup *memcg; 945 struct lruvec *lruvec; 946 947 if (mem_cgroup_disabled()) { 948 lruvec = &pgdat->lruvec; 949 goto out; 950 } 951 952 memcg = page->mem_cgroup; 953 /* 954 * Swapcache readahead pages are added to the LRU - and 955 * possibly migrated - before they are charged. 956 */ 957 if (!memcg) 958 memcg = root_mem_cgroup; 959 960 mz = mem_cgroup_page_nodeinfo(memcg, page); 961 lruvec = &mz->lruvec; 962 out: 963 /* 964 * Since a node can be onlined after the mem_cgroup was created, 965 * we have to be prepared to initialize lruvec->zone here; 966 * and if offlined then reonlined, we need to reinitialize it. 967 */ 968 if (unlikely(lruvec->pgdat != pgdat)) 969 lruvec->pgdat = pgdat; 970 return lruvec; 971 } 972 973 /** 974 * mem_cgroup_update_lru_size - account for adding or removing an lru page 975 * @lruvec: mem_cgroup per zone lru vector 976 * @lru: index of lru list the page is sitting on 977 * @zid: zone id of the accounted pages 978 * @nr_pages: positive when adding or negative when removing 979 * 980 * This function must be called under lru_lock, just before a page is added 981 * to or just after a page is removed from an lru list (that ordering being 982 * so as to allow it to check that lru_size 0 is consistent with list_empty). 983 */ 984 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 985 int zid, int nr_pages) 986 { 987 struct mem_cgroup_per_node *mz; 988 unsigned long *lru_size; 989 long size; 990 991 if (mem_cgroup_disabled()) 992 return; 993 994 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 995 lru_size = &mz->lru_zone_size[zid][lru]; 996 997 if (nr_pages < 0) 998 *lru_size += nr_pages; 999 1000 size = *lru_size; 1001 if (WARN_ONCE(size < 0, 1002 "%s(%p, %d, %d): lru_size %ld\n", 1003 __func__, lruvec, lru, nr_pages, size)) { 1004 VM_BUG_ON(1); 1005 *lru_size = 0; 1006 } 1007 1008 if (nr_pages > 0) 1009 *lru_size += nr_pages; 1010 } 1011 1012 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1013 { 1014 struct mem_cgroup *task_memcg; 1015 struct task_struct *p; 1016 bool ret; 1017 1018 p = find_lock_task_mm(task); 1019 if (p) { 1020 task_memcg = get_mem_cgroup_from_mm(p->mm); 1021 task_unlock(p); 1022 } else { 1023 /* 1024 * All threads may have already detached their mm's, but the oom 1025 * killer still needs to detect if they have already been oom 1026 * killed to prevent needlessly killing additional tasks. 1027 */ 1028 rcu_read_lock(); 1029 task_memcg = mem_cgroup_from_task(task); 1030 css_get(&task_memcg->css); 1031 rcu_read_unlock(); 1032 } 1033 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1034 css_put(&task_memcg->css); 1035 return ret; 1036 } 1037 1038 /** 1039 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1040 * @memcg: the memory cgroup 1041 * 1042 * Returns the maximum amount of memory @mem can be charged with, in 1043 * pages. 1044 */ 1045 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1046 { 1047 unsigned long margin = 0; 1048 unsigned long count; 1049 unsigned long limit; 1050 1051 count = page_counter_read(&memcg->memory); 1052 limit = READ_ONCE(memcg->memory.limit); 1053 if (count < limit) 1054 margin = limit - count; 1055 1056 if (do_memsw_account()) { 1057 count = page_counter_read(&memcg->memsw); 1058 limit = READ_ONCE(memcg->memsw.limit); 1059 if (count <= limit) 1060 margin = min(margin, limit - count); 1061 else 1062 margin = 0; 1063 } 1064 1065 return margin; 1066 } 1067 1068 /* 1069 * A routine for checking "mem" is under move_account() or not. 1070 * 1071 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1072 * moving cgroups. This is for waiting at high-memory pressure 1073 * caused by "move". 1074 */ 1075 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1076 { 1077 struct mem_cgroup *from; 1078 struct mem_cgroup *to; 1079 bool ret = false; 1080 /* 1081 * Unlike task_move routines, we access mc.to, mc.from not under 1082 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1083 */ 1084 spin_lock(&mc.lock); 1085 from = mc.from; 1086 to = mc.to; 1087 if (!from) 1088 goto unlock; 1089 1090 ret = mem_cgroup_is_descendant(from, memcg) || 1091 mem_cgroup_is_descendant(to, memcg); 1092 unlock: 1093 spin_unlock(&mc.lock); 1094 return ret; 1095 } 1096 1097 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1098 { 1099 if (mc.moving_task && current != mc.moving_task) { 1100 if (mem_cgroup_under_move(memcg)) { 1101 DEFINE_WAIT(wait); 1102 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1103 /* moving charge context might have finished. */ 1104 if (mc.moving_task) 1105 schedule(); 1106 finish_wait(&mc.waitq, &wait); 1107 return true; 1108 } 1109 } 1110 return false; 1111 } 1112 1113 unsigned int memcg1_stats[] = { 1114 MEMCG_CACHE, 1115 MEMCG_RSS, 1116 MEMCG_RSS_HUGE, 1117 NR_SHMEM, 1118 NR_FILE_MAPPED, 1119 NR_FILE_DIRTY, 1120 NR_WRITEBACK, 1121 MEMCG_SWAP, 1122 }; 1123 1124 static const char *const memcg1_stat_names[] = { 1125 "cache", 1126 "rss", 1127 "rss_huge", 1128 "shmem", 1129 "mapped_file", 1130 "dirty", 1131 "writeback", 1132 "swap", 1133 }; 1134 1135 #define K(x) ((x) << (PAGE_SHIFT-10)) 1136 /** 1137 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1138 * @memcg: The memory cgroup that went over limit 1139 * @p: Task that is going to be killed 1140 * 1141 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1142 * enabled 1143 */ 1144 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1145 { 1146 struct mem_cgroup *iter; 1147 unsigned int i; 1148 1149 rcu_read_lock(); 1150 1151 if (p) { 1152 pr_info("Task in "); 1153 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1154 pr_cont(" killed as a result of limit of "); 1155 } else { 1156 pr_info("Memory limit reached of cgroup "); 1157 } 1158 1159 pr_cont_cgroup_path(memcg->css.cgroup); 1160 pr_cont("\n"); 1161 1162 rcu_read_unlock(); 1163 1164 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1165 K((u64)page_counter_read(&memcg->memory)), 1166 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1167 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1168 K((u64)page_counter_read(&memcg->memsw)), 1169 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1170 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1171 K((u64)page_counter_read(&memcg->kmem)), 1172 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1173 1174 for_each_mem_cgroup_tree(iter, memcg) { 1175 pr_info("Memory cgroup stats for "); 1176 pr_cont_cgroup_path(iter->css.cgroup); 1177 pr_cont(":"); 1178 1179 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 1180 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account) 1181 continue; 1182 pr_cont(" %s:%luKB", memcg1_stat_names[i], 1183 K(memcg_page_state(iter, memcg1_stats[i]))); 1184 } 1185 1186 for (i = 0; i < NR_LRU_LISTS; i++) 1187 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1188 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1189 1190 pr_cont("\n"); 1191 } 1192 } 1193 1194 /* 1195 * This function returns the number of memcg under hierarchy tree. Returns 1196 * 1(self count) if no children. 1197 */ 1198 static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1199 { 1200 int num = 0; 1201 struct mem_cgroup *iter; 1202 1203 for_each_mem_cgroup_tree(iter, memcg) 1204 num++; 1205 return num; 1206 } 1207 1208 /* 1209 * Return the memory (and swap, if configured) limit for a memcg. 1210 */ 1211 unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1212 { 1213 unsigned long limit; 1214 1215 limit = memcg->memory.limit; 1216 if (mem_cgroup_swappiness(memcg)) { 1217 unsigned long memsw_limit; 1218 unsigned long swap_limit; 1219 1220 memsw_limit = memcg->memsw.limit; 1221 swap_limit = memcg->swap.limit; 1222 swap_limit = min(swap_limit, (unsigned long)total_swap_pages); 1223 limit = min(limit + swap_limit, memsw_limit); 1224 } 1225 return limit; 1226 } 1227 1228 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1229 int order) 1230 { 1231 struct oom_control oc = { 1232 .zonelist = NULL, 1233 .nodemask = NULL, 1234 .memcg = memcg, 1235 .gfp_mask = gfp_mask, 1236 .order = order, 1237 }; 1238 bool ret; 1239 1240 mutex_lock(&oom_lock); 1241 ret = out_of_memory(&oc); 1242 mutex_unlock(&oom_lock); 1243 return ret; 1244 } 1245 1246 #if MAX_NUMNODES > 1 1247 1248 /** 1249 * test_mem_cgroup_node_reclaimable 1250 * @memcg: the target memcg 1251 * @nid: the node ID to be checked. 1252 * @noswap : specify true here if the user wants flle only information. 1253 * 1254 * This function returns whether the specified memcg contains any 1255 * reclaimable pages on a node. Returns true if there are any reclaimable 1256 * pages in the node. 1257 */ 1258 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1259 int nid, bool noswap) 1260 { 1261 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1262 return true; 1263 if (noswap || !total_swap_pages) 1264 return false; 1265 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1266 return true; 1267 return false; 1268 1269 } 1270 1271 /* 1272 * Always updating the nodemask is not very good - even if we have an empty 1273 * list or the wrong list here, we can start from some node and traverse all 1274 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1275 * 1276 */ 1277 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1278 { 1279 int nid; 1280 /* 1281 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1282 * pagein/pageout changes since the last update. 1283 */ 1284 if (!atomic_read(&memcg->numainfo_events)) 1285 return; 1286 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1287 return; 1288 1289 /* make a nodemask where this memcg uses memory from */ 1290 memcg->scan_nodes = node_states[N_MEMORY]; 1291 1292 for_each_node_mask(nid, node_states[N_MEMORY]) { 1293 1294 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1295 node_clear(nid, memcg->scan_nodes); 1296 } 1297 1298 atomic_set(&memcg->numainfo_events, 0); 1299 atomic_set(&memcg->numainfo_updating, 0); 1300 } 1301 1302 /* 1303 * Selecting a node where we start reclaim from. Because what we need is just 1304 * reducing usage counter, start from anywhere is O,K. Considering 1305 * memory reclaim from current node, there are pros. and cons. 1306 * 1307 * Freeing memory from current node means freeing memory from a node which 1308 * we'll use or we've used. So, it may make LRU bad. And if several threads 1309 * hit limits, it will see a contention on a node. But freeing from remote 1310 * node means more costs for memory reclaim because of memory latency. 1311 * 1312 * Now, we use round-robin. Better algorithm is welcomed. 1313 */ 1314 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1315 { 1316 int node; 1317 1318 mem_cgroup_may_update_nodemask(memcg); 1319 node = memcg->last_scanned_node; 1320 1321 node = next_node_in(node, memcg->scan_nodes); 1322 /* 1323 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages 1324 * last time it really checked all the LRUs due to rate limiting. 1325 * Fallback to the current node in that case for simplicity. 1326 */ 1327 if (unlikely(node == MAX_NUMNODES)) 1328 node = numa_node_id(); 1329 1330 memcg->last_scanned_node = node; 1331 return node; 1332 } 1333 #else 1334 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1335 { 1336 return 0; 1337 } 1338 #endif 1339 1340 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1341 pg_data_t *pgdat, 1342 gfp_t gfp_mask, 1343 unsigned long *total_scanned) 1344 { 1345 struct mem_cgroup *victim = NULL; 1346 int total = 0; 1347 int loop = 0; 1348 unsigned long excess; 1349 unsigned long nr_scanned; 1350 struct mem_cgroup_reclaim_cookie reclaim = { 1351 .pgdat = pgdat, 1352 .priority = 0, 1353 }; 1354 1355 excess = soft_limit_excess(root_memcg); 1356 1357 while (1) { 1358 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1359 if (!victim) { 1360 loop++; 1361 if (loop >= 2) { 1362 /* 1363 * If we have not been able to reclaim 1364 * anything, it might because there are 1365 * no reclaimable pages under this hierarchy 1366 */ 1367 if (!total) 1368 break; 1369 /* 1370 * We want to do more targeted reclaim. 1371 * excess >> 2 is not to excessive so as to 1372 * reclaim too much, nor too less that we keep 1373 * coming back to reclaim from this cgroup 1374 */ 1375 if (total >= (excess >> 2) || 1376 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1377 break; 1378 } 1379 continue; 1380 } 1381 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1382 pgdat, &nr_scanned); 1383 *total_scanned += nr_scanned; 1384 if (!soft_limit_excess(root_memcg)) 1385 break; 1386 } 1387 mem_cgroup_iter_break(root_memcg, victim); 1388 return total; 1389 } 1390 1391 #ifdef CONFIG_LOCKDEP 1392 static struct lockdep_map memcg_oom_lock_dep_map = { 1393 .name = "memcg_oom_lock", 1394 }; 1395 #endif 1396 1397 static DEFINE_SPINLOCK(memcg_oom_lock); 1398 1399 /* 1400 * Check OOM-Killer is already running under our hierarchy. 1401 * If someone is running, return false. 1402 */ 1403 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1404 { 1405 struct mem_cgroup *iter, *failed = NULL; 1406 1407 spin_lock(&memcg_oom_lock); 1408 1409 for_each_mem_cgroup_tree(iter, memcg) { 1410 if (iter->oom_lock) { 1411 /* 1412 * this subtree of our hierarchy is already locked 1413 * so we cannot give a lock. 1414 */ 1415 failed = iter; 1416 mem_cgroup_iter_break(memcg, iter); 1417 break; 1418 } else 1419 iter->oom_lock = true; 1420 } 1421 1422 if (failed) { 1423 /* 1424 * OK, we failed to lock the whole subtree so we have 1425 * to clean up what we set up to the failing subtree 1426 */ 1427 for_each_mem_cgroup_tree(iter, memcg) { 1428 if (iter == failed) { 1429 mem_cgroup_iter_break(memcg, iter); 1430 break; 1431 } 1432 iter->oom_lock = false; 1433 } 1434 } else 1435 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1436 1437 spin_unlock(&memcg_oom_lock); 1438 1439 return !failed; 1440 } 1441 1442 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1443 { 1444 struct mem_cgroup *iter; 1445 1446 spin_lock(&memcg_oom_lock); 1447 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1448 for_each_mem_cgroup_tree(iter, memcg) 1449 iter->oom_lock = false; 1450 spin_unlock(&memcg_oom_lock); 1451 } 1452 1453 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1454 { 1455 struct mem_cgroup *iter; 1456 1457 spin_lock(&memcg_oom_lock); 1458 for_each_mem_cgroup_tree(iter, memcg) 1459 iter->under_oom++; 1460 spin_unlock(&memcg_oom_lock); 1461 } 1462 1463 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1464 { 1465 struct mem_cgroup *iter; 1466 1467 /* 1468 * When a new child is created while the hierarchy is under oom, 1469 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1470 */ 1471 spin_lock(&memcg_oom_lock); 1472 for_each_mem_cgroup_tree(iter, memcg) 1473 if (iter->under_oom > 0) 1474 iter->under_oom--; 1475 spin_unlock(&memcg_oom_lock); 1476 } 1477 1478 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1479 1480 struct oom_wait_info { 1481 struct mem_cgroup *memcg; 1482 wait_queue_entry_t wait; 1483 }; 1484 1485 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1486 unsigned mode, int sync, void *arg) 1487 { 1488 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1489 struct mem_cgroup *oom_wait_memcg; 1490 struct oom_wait_info *oom_wait_info; 1491 1492 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1493 oom_wait_memcg = oom_wait_info->memcg; 1494 1495 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1496 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1497 return 0; 1498 return autoremove_wake_function(wait, mode, sync, arg); 1499 } 1500 1501 static void memcg_oom_recover(struct mem_cgroup *memcg) 1502 { 1503 /* 1504 * For the following lockless ->under_oom test, the only required 1505 * guarantee is that it must see the state asserted by an OOM when 1506 * this function is called as a result of userland actions 1507 * triggered by the notification of the OOM. This is trivially 1508 * achieved by invoking mem_cgroup_mark_under_oom() before 1509 * triggering notification. 1510 */ 1511 if (memcg && memcg->under_oom) 1512 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1513 } 1514 1515 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1516 { 1517 if (!current->memcg_may_oom) 1518 return; 1519 /* 1520 * We are in the middle of the charge context here, so we 1521 * don't want to block when potentially sitting on a callstack 1522 * that holds all kinds of filesystem and mm locks. 1523 * 1524 * Also, the caller may handle a failed allocation gracefully 1525 * (like optional page cache readahead) and so an OOM killer 1526 * invocation might not even be necessary. 1527 * 1528 * That's why we don't do anything here except remember the 1529 * OOM context and then deal with it at the end of the page 1530 * fault when the stack is unwound, the locks are released, 1531 * and when we know whether the fault was overall successful. 1532 */ 1533 css_get(&memcg->css); 1534 current->memcg_in_oom = memcg; 1535 current->memcg_oom_gfp_mask = mask; 1536 current->memcg_oom_order = order; 1537 } 1538 1539 /** 1540 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1541 * @handle: actually kill/wait or just clean up the OOM state 1542 * 1543 * This has to be called at the end of a page fault if the memcg OOM 1544 * handler was enabled. 1545 * 1546 * Memcg supports userspace OOM handling where failed allocations must 1547 * sleep on a waitqueue until the userspace task resolves the 1548 * situation. Sleeping directly in the charge context with all kinds 1549 * of locks held is not a good idea, instead we remember an OOM state 1550 * in the task and mem_cgroup_oom_synchronize() has to be called at 1551 * the end of the page fault to complete the OOM handling. 1552 * 1553 * Returns %true if an ongoing memcg OOM situation was detected and 1554 * completed, %false otherwise. 1555 */ 1556 bool mem_cgroup_oom_synchronize(bool handle) 1557 { 1558 struct mem_cgroup *memcg = current->memcg_in_oom; 1559 struct oom_wait_info owait; 1560 bool locked; 1561 1562 /* OOM is global, do not handle */ 1563 if (!memcg) 1564 return false; 1565 1566 if (!handle) 1567 goto cleanup; 1568 1569 owait.memcg = memcg; 1570 owait.wait.flags = 0; 1571 owait.wait.func = memcg_oom_wake_function; 1572 owait.wait.private = current; 1573 INIT_LIST_HEAD(&owait.wait.entry); 1574 1575 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1576 mem_cgroup_mark_under_oom(memcg); 1577 1578 locked = mem_cgroup_oom_trylock(memcg); 1579 1580 if (locked) 1581 mem_cgroup_oom_notify(memcg); 1582 1583 if (locked && !memcg->oom_kill_disable) { 1584 mem_cgroup_unmark_under_oom(memcg); 1585 finish_wait(&memcg_oom_waitq, &owait.wait); 1586 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1587 current->memcg_oom_order); 1588 } else { 1589 schedule(); 1590 mem_cgroup_unmark_under_oom(memcg); 1591 finish_wait(&memcg_oom_waitq, &owait.wait); 1592 } 1593 1594 if (locked) { 1595 mem_cgroup_oom_unlock(memcg); 1596 /* 1597 * There is no guarantee that an OOM-lock contender 1598 * sees the wakeups triggered by the OOM kill 1599 * uncharges. Wake any sleepers explicitely. 1600 */ 1601 memcg_oom_recover(memcg); 1602 } 1603 cleanup: 1604 current->memcg_in_oom = NULL; 1605 css_put(&memcg->css); 1606 return true; 1607 } 1608 1609 /** 1610 * lock_page_memcg - lock a page->mem_cgroup binding 1611 * @page: the page 1612 * 1613 * This function protects unlocked LRU pages from being moved to 1614 * another cgroup and stabilizes their page->mem_cgroup binding. 1615 */ 1616 void lock_page_memcg(struct page *page) 1617 { 1618 struct mem_cgroup *memcg; 1619 unsigned long flags; 1620 1621 /* 1622 * The RCU lock is held throughout the transaction. The fast 1623 * path can get away without acquiring the memcg->move_lock 1624 * because page moving starts with an RCU grace period. 1625 */ 1626 rcu_read_lock(); 1627 1628 if (mem_cgroup_disabled()) 1629 return; 1630 again: 1631 memcg = page->mem_cgroup; 1632 if (unlikely(!memcg)) 1633 return; 1634 1635 if (atomic_read(&memcg->moving_account) <= 0) 1636 return; 1637 1638 spin_lock_irqsave(&memcg->move_lock, flags); 1639 if (memcg != page->mem_cgroup) { 1640 spin_unlock_irqrestore(&memcg->move_lock, flags); 1641 goto again; 1642 } 1643 1644 /* 1645 * When charge migration first begins, we can have locked and 1646 * unlocked page stat updates happening concurrently. Track 1647 * the task who has the lock for unlock_page_memcg(). 1648 */ 1649 memcg->move_lock_task = current; 1650 memcg->move_lock_flags = flags; 1651 1652 return; 1653 } 1654 EXPORT_SYMBOL(lock_page_memcg); 1655 1656 /** 1657 * unlock_page_memcg - unlock a page->mem_cgroup binding 1658 * @page: the page 1659 */ 1660 void unlock_page_memcg(struct page *page) 1661 { 1662 struct mem_cgroup *memcg = page->mem_cgroup; 1663 1664 if (memcg && memcg->move_lock_task == current) { 1665 unsigned long flags = memcg->move_lock_flags; 1666 1667 memcg->move_lock_task = NULL; 1668 memcg->move_lock_flags = 0; 1669 1670 spin_unlock_irqrestore(&memcg->move_lock, flags); 1671 } 1672 1673 rcu_read_unlock(); 1674 } 1675 EXPORT_SYMBOL(unlock_page_memcg); 1676 1677 /* 1678 * size of first charge trial. "32" comes from vmscan.c's magic value. 1679 * TODO: maybe necessary to use big numbers in big irons. 1680 */ 1681 #define CHARGE_BATCH 32U 1682 struct memcg_stock_pcp { 1683 struct mem_cgroup *cached; /* this never be root cgroup */ 1684 unsigned int nr_pages; 1685 struct work_struct work; 1686 unsigned long flags; 1687 #define FLUSHING_CACHED_CHARGE 0 1688 }; 1689 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1690 static DEFINE_MUTEX(percpu_charge_mutex); 1691 1692 /** 1693 * consume_stock: Try to consume stocked charge on this cpu. 1694 * @memcg: memcg to consume from. 1695 * @nr_pages: how many pages to charge. 1696 * 1697 * The charges will only happen if @memcg matches the current cpu's memcg 1698 * stock, and at least @nr_pages are available in that stock. Failure to 1699 * service an allocation will refill the stock. 1700 * 1701 * returns true if successful, false otherwise. 1702 */ 1703 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1704 { 1705 struct memcg_stock_pcp *stock; 1706 unsigned long flags; 1707 bool ret = false; 1708 1709 if (nr_pages > CHARGE_BATCH) 1710 return ret; 1711 1712 local_irq_save(flags); 1713 1714 stock = this_cpu_ptr(&memcg_stock); 1715 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1716 stock->nr_pages -= nr_pages; 1717 ret = true; 1718 } 1719 1720 local_irq_restore(flags); 1721 1722 return ret; 1723 } 1724 1725 /* 1726 * Returns stocks cached in percpu and reset cached information. 1727 */ 1728 static void drain_stock(struct memcg_stock_pcp *stock) 1729 { 1730 struct mem_cgroup *old = stock->cached; 1731 1732 if (stock->nr_pages) { 1733 page_counter_uncharge(&old->memory, stock->nr_pages); 1734 if (do_memsw_account()) 1735 page_counter_uncharge(&old->memsw, stock->nr_pages); 1736 css_put_many(&old->css, stock->nr_pages); 1737 stock->nr_pages = 0; 1738 } 1739 stock->cached = NULL; 1740 } 1741 1742 static void drain_local_stock(struct work_struct *dummy) 1743 { 1744 struct memcg_stock_pcp *stock; 1745 unsigned long flags; 1746 1747 local_irq_save(flags); 1748 1749 stock = this_cpu_ptr(&memcg_stock); 1750 drain_stock(stock); 1751 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1752 1753 local_irq_restore(flags); 1754 } 1755 1756 /* 1757 * Cache charges(val) to local per_cpu area. 1758 * This will be consumed by consume_stock() function, later. 1759 */ 1760 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1761 { 1762 struct memcg_stock_pcp *stock; 1763 unsigned long flags; 1764 1765 local_irq_save(flags); 1766 1767 stock = this_cpu_ptr(&memcg_stock); 1768 if (stock->cached != memcg) { /* reset if necessary */ 1769 drain_stock(stock); 1770 stock->cached = memcg; 1771 } 1772 stock->nr_pages += nr_pages; 1773 1774 local_irq_restore(flags); 1775 } 1776 1777 /* 1778 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1779 * of the hierarchy under it. 1780 */ 1781 static void drain_all_stock(struct mem_cgroup *root_memcg) 1782 { 1783 int cpu, curcpu; 1784 1785 /* If someone's already draining, avoid adding running more workers. */ 1786 if (!mutex_trylock(&percpu_charge_mutex)) 1787 return; 1788 /* Notify other cpus that system-wide "drain" is running */ 1789 get_online_cpus(); 1790 curcpu = get_cpu(); 1791 for_each_online_cpu(cpu) { 1792 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1793 struct mem_cgroup *memcg; 1794 1795 memcg = stock->cached; 1796 if (!memcg || !stock->nr_pages) 1797 continue; 1798 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1799 continue; 1800 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1801 if (cpu == curcpu) 1802 drain_local_stock(&stock->work); 1803 else 1804 schedule_work_on(cpu, &stock->work); 1805 } 1806 } 1807 put_cpu(); 1808 put_online_cpus(); 1809 mutex_unlock(&percpu_charge_mutex); 1810 } 1811 1812 static int memcg_hotplug_cpu_dead(unsigned int cpu) 1813 { 1814 struct memcg_stock_pcp *stock; 1815 1816 stock = &per_cpu(memcg_stock, cpu); 1817 drain_stock(stock); 1818 return 0; 1819 } 1820 1821 static void reclaim_high(struct mem_cgroup *memcg, 1822 unsigned int nr_pages, 1823 gfp_t gfp_mask) 1824 { 1825 do { 1826 if (page_counter_read(&memcg->memory) <= memcg->high) 1827 continue; 1828 mem_cgroup_event(memcg, MEMCG_HIGH); 1829 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); 1830 } while ((memcg = parent_mem_cgroup(memcg))); 1831 } 1832 1833 static void high_work_func(struct work_struct *work) 1834 { 1835 struct mem_cgroup *memcg; 1836 1837 memcg = container_of(work, struct mem_cgroup, high_work); 1838 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL); 1839 } 1840 1841 /* 1842 * Scheduled by try_charge() to be executed from the userland return path 1843 * and reclaims memory over the high limit. 1844 */ 1845 void mem_cgroup_handle_over_high(void) 1846 { 1847 unsigned int nr_pages = current->memcg_nr_pages_over_high; 1848 struct mem_cgroup *memcg; 1849 1850 if (likely(!nr_pages)) 1851 return; 1852 1853 memcg = get_mem_cgroup_from_mm(current->mm); 1854 reclaim_high(memcg, nr_pages, GFP_KERNEL); 1855 css_put(&memcg->css); 1856 current->memcg_nr_pages_over_high = 0; 1857 } 1858 1859 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 1860 unsigned int nr_pages) 1861 { 1862 unsigned int batch = max(CHARGE_BATCH, nr_pages); 1863 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1864 struct mem_cgroup *mem_over_limit; 1865 struct page_counter *counter; 1866 unsigned long nr_reclaimed; 1867 bool may_swap = true; 1868 bool drained = false; 1869 1870 if (mem_cgroup_is_root(memcg)) 1871 return 0; 1872 retry: 1873 if (consume_stock(memcg, nr_pages)) 1874 return 0; 1875 1876 if (!do_memsw_account() || 1877 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 1878 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 1879 goto done_restock; 1880 if (do_memsw_account()) 1881 page_counter_uncharge(&memcg->memsw, batch); 1882 mem_over_limit = mem_cgroup_from_counter(counter, memory); 1883 } else { 1884 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 1885 may_swap = false; 1886 } 1887 1888 if (batch > nr_pages) { 1889 batch = nr_pages; 1890 goto retry; 1891 } 1892 1893 /* 1894 * Unlike in global OOM situations, memcg is not in a physical 1895 * memory shortage. Allow dying and OOM-killed tasks to 1896 * bypass the last charges so that they can exit quickly and 1897 * free their memory. 1898 */ 1899 if (unlikely(test_thread_flag(TIF_MEMDIE) || 1900 fatal_signal_pending(current) || 1901 current->flags & PF_EXITING)) 1902 goto force; 1903 1904 /* 1905 * Prevent unbounded recursion when reclaim operations need to 1906 * allocate memory. This might exceed the limits temporarily, 1907 * but we prefer facilitating memory reclaim and getting back 1908 * under the limit over triggering OOM kills in these cases. 1909 */ 1910 if (unlikely(current->flags & PF_MEMALLOC)) 1911 goto force; 1912 1913 if (unlikely(task_in_memcg_oom(current))) 1914 goto nomem; 1915 1916 if (!gfpflags_allow_blocking(gfp_mask)) 1917 goto nomem; 1918 1919 mem_cgroup_event(mem_over_limit, MEMCG_MAX); 1920 1921 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 1922 gfp_mask, may_swap); 1923 1924 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 1925 goto retry; 1926 1927 if (!drained) { 1928 drain_all_stock(mem_over_limit); 1929 drained = true; 1930 goto retry; 1931 } 1932 1933 if (gfp_mask & __GFP_NORETRY) 1934 goto nomem; 1935 /* 1936 * Even though the limit is exceeded at this point, reclaim 1937 * may have been able to free some pages. Retry the charge 1938 * before killing the task. 1939 * 1940 * Only for regular pages, though: huge pages are rather 1941 * unlikely to succeed so close to the limit, and we fall back 1942 * to regular pages anyway in case of failure. 1943 */ 1944 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 1945 goto retry; 1946 /* 1947 * At task move, charge accounts can be doubly counted. So, it's 1948 * better to wait until the end of task_move if something is going on. 1949 */ 1950 if (mem_cgroup_wait_acct_move(mem_over_limit)) 1951 goto retry; 1952 1953 if (nr_retries--) 1954 goto retry; 1955 1956 if (gfp_mask & __GFP_NOFAIL) 1957 goto force; 1958 1959 if (fatal_signal_pending(current)) 1960 goto force; 1961 1962 mem_cgroup_event(mem_over_limit, MEMCG_OOM); 1963 1964 mem_cgroup_oom(mem_over_limit, gfp_mask, 1965 get_order(nr_pages * PAGE_SIZE)); 1966 nomem: 1967 if (!(gfp_mask & __GFP_NOFAIL)) 1968 return -ENOMEM; 1969 force: 1970 /* 1971 * The allocation either can't fail or will lead to more memory 1972 * being freed very soon. Allow memory usage go over the limit 1973 * temporarily by force charging it. 1974 */ 1975 page_counter_charge(&memcg->memory, nr_pages); 1976 if (do_memsw_account()) 1977 page_counter_charge(&memcg->memsw, nr_pages); 1978 css_get_many(&memcg->css, nr_pages); 1979 1980 return 0; 1981 1982 done_restock: 1983 css_get_many(&memcg->css, batch); 1984 if (batch > nr_pages) 1985 refill_stock(memcg, batch - nr_pages); 1986 1987 /* 1988 * If the hierarchy is above the normal consumption range, schedule 1989 * reclaim on returning to userland. We can perform reclaim here 1990 * if __GFP_RECLAIM but let's always punt for simplicity and so that 1991 * GFP_KERNEL can consistently be used during reclaim. @memcg is 1992 * not recorded as it most likely matches current's and won't 1993 * change in the meantime. As high limit is checked again before 1994 * reclaim, the cost of mismatch is negligible. 1995 */ 1996 do { 1997 if (page_counter_read(&memcg->memory) > memcg->high) { 1998 /* Don't bother a random interrupted task */ 1999 if (in_interrupt()) { 2000 schedule_work(&memcg->high_work); 2001 break; 2002 } 2003 current->memcg_nr_pages_over_high += batch; 2004 set_notify_resume(current); 2005 break; 2006 } 2007 } while ((memcg = parent_mem_cgroup(memcg))); 2008 2009 return 0; 2010 } 2011 2012 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2013 { 2014 if (mem_cgroup_is_root(memcg)) 2015 return; 2016 2017 page_counter_uncharge(&memcg->memory, nr_pages); 2018 if (do_memsw_account()) 2019 page_counter_uncharge(&memcg->memsw, nr_pages); 2020 2021 css_put_many(&memcg->css, nr_pages); 2022 } 2023 2024 static void lock_page_lru(struct page *page, int *isolated) 2025 { 2026 struct zone *zone = page_zone(page); 2027 2028 spin_lock_irq(zone_lru_lock(zone)); 2029 if (PageLRU(page)) { 2030 struct lruvec *lruvec; 2031 2032 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2033 ClearPageLRU(page); 2034 del_page_from_lru_list(page, lruvec, page_lru(page)); 2035 *isolated = 1; 2036 } else 2037 *isolated = 0; 2038 } 2039 2040 static void unlock_page_lru(struct page *page, int isolated) 2041 { 2042 struct zone *zone = page_zone(page); 2043 2044 if (isolated) { 2045 struct lruvec *lruvec; 2046 2047 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 2048 VM_BUG_ON_PAGE(PageLRU(page), page); 2049 SetPageLRU(page); 2050 add_page_to_lru_list(page, lruvec, page_lru(page)); 2051 } 2052 spin_unlock_irq(zone_lru_lock(zone)); 2053 } 2054 2055 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2056 bool lrucare) 2057 { 2058 int isolated; 2059 2060 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2061 2062 /* 2063 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2064 * may already be on some other mem_cgroup's LRU. Take care of it. 2065 */ 2066 if (lrucare) 2067 lock_page_lru(page, &isolated); 2068 2069 /* 2070 * Nobody should be changing or seriously looking at 2071 * page->mem_cgroup at this point: 2072 * 2073 * - the page is uncharged 2074 * 2075 * - the page is off-LRU 2076 * 2077 * - an anonymous fault has exclusive page access, except for 2078 * a locked page table 2079 * 2080 * - a page cache insertion, a swapin fault, or a migration 2081 * have the page locked 2082 */ 2083 page->mem_cgroup = memcg; 2084 2085 if (lrucare) 2086 unlock_page_lru(page, isolated); 2087 } 2088 2089 #ifndef CONFIG_SLOB 2090 static int memcg_alloc_cache_id(void) 2091 { 2092 int id, size; 2093 int err; 2094 2095 id = ida_simple_get(&memcg_cache_ida, 2096 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2097 if (id < 0) 2098 return id; 2099 2100 if (id < memcg_nr_cache_ids) 2101 return id; 2102 2103 /* 2104 * There's no space for the new id in memcg_caches arrays, 2105 * so we have to grow them. 2106 */ 2107 down_write(&memcg_cache_ids_sem); 2108 2109 size = 2 * (id + 1); 2110 if (size < MEMCG_CACHES_MIN_SIZE) 2111 size = MEMCG_CACHES_MIN_SIZE; 2112 else if (size > MEMCG_CACHES_MAX_SIZE) 2113 size = MEMCG_CACHES_MAX_SIZE; 2114 2115 err = memcg_update_all_caches(size); 2116 if (!err) 2117 err = memcg_update_all_list_lrus(size); 2118 if (!err) 2119 memcg_nr_cache_ids = size; 2120 2121 up_write(&memcg_cache_ids_sem); 2122 2123 if (err) { 2124 ida_simple_remove(&memcg_cache_ida, id); 2125 return err; 2126 } 2127 return id; 2128 } 2129 2130 static void memcg_free_cache_id(int id) 2131 { 2132 ida_simple_remove(&memcg_cache_ida, id); 2133 } 2134 2135 struct memcg_kmem_cache_create_work { 2136 struct mem_cgroup *memcg; 2137 struct kmem_cache *cachep; 2138 struct work_struct work; 2139 }; 2140 2141 static void memcg_kmem_cache_create_func(struct work_struct *w) 2142 { 2143 struct memcg_kmem_cache_create_work *cw = 2144 container_of(w, struct memcg_kmem_cache_create_work, work); 2145 struct mem_cgroup *memcg = cw->memcg; 2146 struct kmem_cache *cachep = cw->cachep; 2147 2148 memcg_create_kmem_cache(memcg, cachep); 2149 2150 css_put(&memcg->css); 2151 kfree(cw); 2152 } 2153 2154 /* 2155 * Enqueue the creation of a per-memcg kmem_cache. 2156 */ 2157 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2158 struct kmem_cache *cachep) 2159 { 2160 struct memcg_kmem_cache_create_work *cw; 2161 2162 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2163 if (!cw) 2164 return; 2165 2166 css_get(&memcg->css); 2167 2168 cw->memcg = memcg; 2169 cw->cachep = cachep; 2170 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2171 2172 queue_work(memcg_kmem_cache_wq, &cw->work); 2173 } 2174 2175 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2176 struct kmem_cache *cachep) 2177 { 2178 /* 2179 * We need to stop accounting when we kmalloc, because if the 2180 * corresponding kmalloc cache is not yet created, the first allocation 2181 * in __memcg_schedule_kmem_cache_create will recurse. 2182 * 2183 * However, it is better to enclose the whole function. Depending on 2184 * the debugging options enabled, INIT_WORK(), for instance, can 2185 * trigger an allocation. This too, will make us recurse. Because at 2186 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2187 * the safest choice is to do it like this, wrapping the whole function. 2188 */ 2189 current->memcg_kmem_skip_account = 1; 2190 __memcg_schedule_kmem_cache_create(memcg, cachep); 2191 current->memcg_kmem_skip_account = 0; 2192 } 2193 2194 static inline bool memcg_kmem_bypass(void) 2195 { 2196 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2197 return true; 2198 return false; 2199 } 2200 2201 /** 2202 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation 2203 * @cachep: the original global kmem cache 2204 * 2205 * Return the kmem_cache we're supposed to use for a slab allocation. 2206 * We try to use the current memcg's version of the cache. 2207 * 2208 * If the cache does not exist yet, if we are the first user of it, we 2209 * create it asynchronously in a workqueue and let the current allocation 2210 * go through with the original cache. 2211 * 2212 * This function takes a reference to the cache it returns to assure it 2213 * won't get destroyed while we are working with it. Once the caller is 2214 * done with it, memcg_kmem_put_cache() must be called to release the 2215 * reference. 2216 */ 2217 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) 2218 { 2219 struct mem_cgroup *memcg; 2220 struct kmem_cache *memcg_cachep; 2221 int kmemcg_id; 2222 2223 VM_BUG_ON(!is_root_cache(cachep)); 2224 2225 if (memcg_kmem_bypass()) 2226 return cachep; 2227 2228 if (current->memcg_kmem_skip_account) 2229 return cachep; 2230 2231 memcg = get_mem_cgroup_from_mm(current->mm); 2232 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2233 if (kmemcg_id < 0) 2234 goto out; 2235 2236 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2237 if (likely(memcg_cachep)) 2238 return memcg_cachep; 2239 2240 /* 2241 * If we are in a safe context (can wait, and not in interrupt 2242 * context), we could be be predictable and return right away. 2243 * This would guarantee that the allocation being performed 2244 * already belongs in the new cache. 2245 * 2246 * However, there are some clashes that can arrive from locking. 2247 * For instance, because we acquire the slab_mutex while doing 2248 * memcg_create_kmem_cache, this means no further allocation 2249 * could happen with the slab_mutex held. So it's better to 2250 * defer everything. 2251 */ 2252 memcg_schedule_kmem_cache_create(memcg, cachep); 2253 out: 2254 css_put(&memcg->css); 2255 return cachep; 2256 } 2257 2258 /** 2259 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache 2260 * @cachep: the cache returned by memcg_kmem_get_cache 2261 */ 2262 void memcg_kmem_put_cache(struct kmem_cache *cachep) 2263 { 2264 if (!is_root_cache(cachep)) 2265 css_put(&cachep->memcg_params.memcg->css); 2266 } 2267 2268 /** 2269 * memcg_kmem_charge: charge a kmem page 2270 * @page: page to charge 2271 * @gfp: reclaim mode 2272 * @order: allocation order 2273 * @memcg: memory cgroup to charge 2274 * 2275 * Returns 0 on success, an error code on failure. 2276 */ 2277 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2278 struct mem_cgroup *memcg) 2279 { 2280 unsigned int nr_pages = 1 << order; 2281 struct page_counter *counter; 2282 int ret; 2283 2284 ret = try_charge(memcg, gfp, nr_pages); 2285 if (ret) 2286 return ret; 2287 2288 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 2289 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 2290 cancel_charge(memcg, nr_pages); 2291 return -ENOMEM; 2292 } 2293 2294 page->mem_cgroup = memcg; 2295 2296 return 0; 2297 } 2298 2299 /** 2300 * memcg_kmem_charge: charge a kmem page to the current memory cgroup 2301 * @page: page to charge 2302 * @gfp: reclaim mode 2303 * @order: allocation order 2304 * 2305 * Returns 0 on success, an error code on failure. 2306 */ 2307 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2308 { 2309 struct mem_cgroup *memcg; 2310 int ret = 0; 2311 2312 if (memcg_kmem_bypass()) 2313 return 0; 2314 2315 memcg = get_mem_cgroup_from_mm(current->mm); 2316 if (!mem_cgroup_is_root(memcg)) { 2317 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2318 if (!ret) 2319 __SetPageKmemcg(page); 2320 } 2321 css_put(&memcg->css); 2322 return ret; 2323 } 2324 /** 2325 * memcg_kmem_uncharge: uncharge a kmem page 2326 * @page: page to uncharge 2327 * @order: allocation order 2328 */ 2329 void memcg_kmem_uncharge(struct page *page, int order) 2330 { 2331 struct mem_cgroup *memcg = page->mem_cgroup; 2332 unsigned int nr_pages = 1 << order; 2333 2334 if (!memcg) 2335 return; 2336 2337 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2338 2339 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2340 page_counter_uncharge(&memcg->kmem, nr_pages); 2341 2342 page_counter_uncharge(&memcg->memory, nr_pages); 2343 if (do_memsw_account()) 2344 page_counter_uncharge(&memcg->memsw, nr_pages); 2345 2346 page->mem_cgroup = NULL; 2347 2348 /* slab pages do not have PageKmemcg flag set */ 2349 if (PageKmemcg(page)) 2350 __ClearPageKmemcg(page); 2351 2352 css_put_many(&memcg->css, nr_pages); 2353 } 2354 #endif /* !CONFIG_SLOB */ 2355 2356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2357 2358 /* 2359 * Because tail pages are not marked as "used", set it. We're under 2360 * zone_lru_lock and migration entries setup in all page mappings. 2361 */ 2362 void mem_cgroup_split_huge_fixup(struct page *head) 2363 { 2364 int i; 2365 2366 if (mem_cgroup_disabled()) 2367 return; 2368 2369 for (i = 1; i < HPAGE_PMD_NR; i++) 2370 head[i].mem_cgroup = head->mem_cgroup; 2371 2372 __this_cpu_sub(head->mem_cgroup->stat->count[MEMCG_RSS_HUGE], 2373 HPAGE_PMD_NR); 2374 } 2375 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2376 2377 #ifdef CONFIG_MEMCG_SWAP 2378 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2379 int nr_entries) 2380 { 2381 this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries); 2382 } 2383 2384 /** 2385 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2386 * @entry: swap entry to be moved 2387 * @from: mem_cgroup which the entry is moved from 2388 * @to: mem_cgroup which the entry is moved to 2389 * 2390 * It succeeds only when the swap_cgroup's record for this entry is the same 2391 * as the mem_cgroup's id of @from. 2392 * 2393 * Returns 0 on success, -EINVAL on failure. 2394 * 2395 * The caller must have charged to @to, IOW, called page_counter_charge() about 2396 * both res and memsw, and called css_get(). 2397 */ 2398 static int mem_cgroup_move_swap_account(swp_entry_t entry, 2399 struct mem_cgroup *from, struct mem_cgroup *to) 2400 { 2401 unsigned short old_id, new_id; 2402 2403 old_id = mem_cgroup_id(from); 2404 new_id = mem_cgroup_id(to); 2405 2406 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2407 mem_cgroup_swap_statistics(from, -1); 2408 mem_cgroup_swap_statistics(to, 1); 2409 return 0; 2410 } 2411 return -EINVAL; 2412 } 2413 #else 2414 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2415 struct mem_cgroup *from, struct mem_cgroup *to) 2416 { 2417 return -EINVAL; 2418 } 2419 #endif 2420 2421 static DEFINE_MUTEX(memcg_limit_mutex); 2422 2423 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2424 unsigned long limit) 2425 { 2426 unsigned long curusage; 2427 unsigned long oldusage; 2428 bool enlarge = false; 2429 int retry_count; 2430 int ret; 2431 2432 /* 2433 * For keeping hierarchical_reclaim simple, how long we should retry 2434 * is depends on callers. We set our retry-count to be function 2435 * of # of children which we should visit in this loop. 2436 */ 2437 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2438 mem_cgroup_count_children(memcg); 2439 2440 oldusage = page_counter_read(&memcg->memory); 2441 2442 do { 2443 if (signal_pending(current)) { 2444 ret = -EINTR; 2445 break; 2446 } 2447 2448 mutex_lock(&memcg_limit_mutex); 2449 if (limit > memcg->memsw.limit) { 2450 mutex_unlock(&memcg_limit_mutex); 2451 ret = -EINVAL; 2452 break; 2453 } 2454 if (limit > memcg->memory.limit) 2455 enlarge = true; 2456 ret = page_counter_limit(&memcg->memory, limit); 2457 mutex_unlock(&memcg_limit_mutex); 2458 2459 if (!ret) 2460 break; 2461 2462 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2463 2464 curusage = page_counter_read(&memcg->memory); 2465 /* Usage is reduced ? */ 2466 if (curusage >= oldusage) 2467 retry_count--; 2468 else 2469 oldusage = curusage; 2470 } while (retry_count); 2471 2472 if (!ret && enlarge) 2473 memcg_oom_recover(memcg); 2474 2475 return ret; 2476 } 2477 2478 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2479 unsigned long limit) 2480 { 2481 unsigned long curusage; 2482 unsigned long oldusage; 2483 bool enlarge = false; 2484 int retry_count; 2485 int ret; 2486 2487 /* see mem_cgroup_resize_res_limit */ 2488 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2489 mem_cgroup_count_children(memcg); 2490 2491 oldusage = page_counter_read(&memcg->memsw); 2492 2493 do { 2494 if (signal_pending(current)) { 2495 ret = -EINTR; 2496 break; 2497 } 2498 2499 mutex_lock(&memcg_limit_mutex); 2500 if (limit < memcg->memory.limit) { 2501 mutex_unlock(&memcg_limit_mutex); 2502 ret = -EINVAL; 2503 break; 2504 } 2505 if (limit > memcg->memsw.limit) 2506 enlarge = true; 2507 ret = page_counter_limit(&memcg->memsw, limit); 2508 mutex_unlock(&memcg_limit_mutex); 2509 2510 if (!ret) 2511 break; 2512 2513 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2514 2515 curusage = page_counter_read(&memcg->memsw); 2516 /* Usage is reduced ? */ 2517 if (curusage >= oldusage) 2518 retry_count--; 2519 else 2520 oldusage = curusage; 2521 } while (retry_count); 2522 2523 if (!ret && enlarge) 2524 memcg_oom_recover(memcg); 2525 2526 return ret; 2527 } 2528 2529 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 2530 gfp_t gfp_mask, 2531 unsigned long *total_scanned) 2532 { 2533 unsigned long nr_reclaimed = 0; 2534 struct mem_cgroup_per_node *mz, *next_mz = NULL; 2535 unsigned long reclaimed; 2536 int loop = 0; 2537 struct mem_cgroup_tree_per_node *mctz; 2538 unsigned long excess; 2539 unsigned long nr_scanned; 2540 2541 if (order > 0) 2542 return 0; 2543 2544 mctz = soft_limit_tree_node(pgdat->node_id); 2545 2546 /* 2547 * Do not even bother to check the largest node if the root 2548 * is empty. Do it lockless to prevent lock bouncing. Races 2549 * are acceptable as soft limit is best effort anyway. 2550 */ 2551 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 2552 return 0; 2553 2554 /* 2555 * This loop can run a while, specially if mem_cgroup's continuously 2556 * keep exceeding their soft limit and putting the system under 2557 * pressure 2558 */ 2559 do { 2560 if (next_mz) 2561 mz = next_mz; 2562 else 2563 mz = mem_cgroup_largest_soft_limit_node(mctz); 2564 if (!mz) 2565 break; 2566 2567 nr_scanned = 0; 2568 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 2569 gfp_mask, &nr_scanned); 2570 nr_reclaimed += reclaimed; 2571 *total_scanned += nr_scanned; 2572 spin_lock_irq(&mctz->lock); 2573 __mem_cgroup_remove_exceeded(mz, mctz); 2574 2575 /* 2576 * If we failed to reclaim anything from this memory cgroup 2577 * it is time to move on to the next cgroup 2578 */ 2579 next_mz = NULL; 2580 if (!reclaimed) 2581 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2582 2583 excess = soft_limit_excess(mz->memcg); 2584 /* 2585 * One school of thought says that we should not add 2586 * back the node to the tree if reclaim returns 0. 2587 * But our reclaim could return 0, simply because due 2588 * to priority we are exposing a smaller subset of 2589 * memory to reclaim from. Consider this as a longer 2590 * term TODO. 2591 */ 2592 /* If excess == 0, no tree ops */ 2593 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2594 spin_unlock_irq(&mctz->lock); 2595 css_put(&mz->memcg->css); 2596 loop++; 2597 /* 2598 * Could not reclaim anything and there are no more 2599 * mem cgroups to try or we seem to be looping without 2600 * reclaiming anything. 2601 */ 2602 if (!nr_reclaimed && 2603 (next_mz == NULL || 2604 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2605 break; 2606 } while (!nr_reclaimed); 2607 if (next_mz) 2608 css_put(&next_mz->memcg->css); 2609 return nr_reclaimed; 2610 } 2611 2612 /* 2613 * Test whether @memcg has children, dead or alive. Note that this 2614 * function doesn't care whether @memcg has use_hierarchy enabled and 2615 * returns %true if there are child csses according to the cgroup 2616 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2617 */ 2618 static inline bool memcg_has_children(struct mem_cgroup *memcg) 2619 { 2620 bool ret; 2621 2622 rcu_read_lock(); 2623 ret = css_next_child(NULL, &memcg->css); 2624 rcu_read_unlock(); 2625 return ret; 2626 } 2627 2628 /* 2629 * Reclaims as many pages from the given memcg as possible. 2630 * 2631 * Caller is responsible for holding css reference for memcg. 2632 */ 2633 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2634 { 2635 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2636 2637 /* we call try-to-free pages for make this cgroup empty */ 2638 lru_add_drain_all(); 2639 /* try to free all pages in this cgroup */ 2640 while (nr_retries && page_counter_read(&memcg->memory)) { 2641 int progress; 2642 2643 if (signal_pending(current)) 2644 return -EINTR; 2645 2646 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2647 GFP_KERNEL, true); 2648 if (!progress) { 2649 nr_retries--; 2650 /* maybe some writeback is necessary */ 2651 congestion_wait(BLK_RW_ASYNC, HZ/10); 2652 } 2653 2654 } 2655 2656 return 0; 2657 } 2658 2659 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2660 char *buf, size_t nbytes, 2661 loff_t off) 2662 { 2663 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2664 2665 if (mem_cgroup_is_root(memcg)) 2666 return -EINVAL; 2667 return mem_cgroup_force_empty(memcg) ?: nbytes; 2668 } 2669 2670 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2671 struct cftype *cft) 2672 { 2673 return mem_cgroup_from_css(css)->use_hierarchy; 2674 } 2675 2676 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2677 struct cftype *cft, u64 val) 2678 { 2679 int retval = 0; 2680 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2681 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2682 2683 if (memcg->use_hierarchy == val) 2684 return 0; 2685 2686 /* 2687 * If parent's use_hierarchy is set, we can't make any modifications 2688 * in the child subtrees. If it is unset, then the change can 2689 * occur, provided the current cgroup has no children. 2690 * 2691 * For the root cgroup, parent_mem is NULL, we allow value to be 2692 * set if there are no children. 2693 */ 2694 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2695 (val == 1 || val == 0)) { 2696 if (!memcg_has_children(memcg)) 2697 memcg->use_hierarchy = val; 2698 else 2699 retval = -EBUSY; 2700 } else 2701 retval = -EINVAL; 2702 2703 return retval; 2704 } 2705 2706 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat) 2707 { 2708 struct mem_cgroup *iter; 2709 int i; 2710 2711 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT); 2712 2713 for_each_mem_cgroup_tree(iter, memcg) { 2714 for (i = 0; i < MEMCG_NR_STAT; i++) 2715 stat[i] += memcg_page_state(iter, i); 2716 } 2717 } 2718 2719 static void tree_events(struct mem_cgroup *memcg, unsigned long *events) 2720 { 2721 struct mem_cgroup *iter; 2722 int i; 2723 2724 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS); 2725 2726 for_each_mem_cgroup_tree(iter, memcg) { 2727 for (i = 0; i < MEMCG_NR_EVENTS; i++) 2728 events[i] += memcg_sum_events(iter, i); 2729 } 2730 } 2731 2732 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2733 { 2734 unsigned long val = 0; 2735 2736 if (mem_cgroup_is_root(memcg)) { 2737 struct mem_cgroup *iter; 2738 2739 for_each_mem_cgroup_tree(iter, memcg) { 2740 val += memcg_page_state(iter, MEMCG_CACHE); 2741 val += memcg_page_state(iter, MEMCG_RSS); 2742 if (swap) 2743 val += memcg_page_state(iter, MEMCG_SWAP); 2744 } 2745 } else { 2746 if (!swap) 2747 val = page_counter_read(&memcg->memory); 2748 else 2749 val = page_counter_read(&memcg->memsw); 2750 } 2751 return val; 2752 } 2753 2754 enum { 2755 RES_USAGE, 2756 RES_LIMIT, 2757 RES_MAX_USAGE, 2758 RES_FAILCNT, 2759 RES_SOFT_LIMIT, 2760 }; 2761 2762 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2763 struct cftype *cft) 2764 { 2765 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2766 struct page_counter *counter; 2767 2768 switch (MEMFILE_TYPE(cft->private)) { 2769 case _MEM: 2770 counter = &memcg->memory; 2771 break; 2772 case _MEMSWAP: 2773 counter = &memcg->memsw; 2774 break; 2775 case _KMEM: 2776 counter = &memcg->kmem; 2777 break; 2778 case _TCP: 2779 counter = &memcg->tcpmem; 2780 break; 2781 default: 2782 BUG(); 2783 } 2784 2785 switch (MEMFILE_ATTR(cft->private)) { 2786 case RES_USAGE: 2787 if (counter == &memcg->memory) 2788 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2789 if (counter == &memcg->memsw) 2790 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2791 return (u64)page_counter_read(counter) * PAGE_SIZE; 2792 case RES_LIMIT: 2793 return (u64)counter->limit * PAGE_SIZE; 2794 case RES_MAX_USAGE: 2795 return (u64)counter->watermark * PAGE_SIZE; 2796 case RES_FAILCNT: 2797 return counter->failcnt; 2798 case RES_SOFT_LIMIT: 2799 return (u64)memcg->soft_limit * PAGE_SIZE; 2800 default: 2801 BUG(); 2802 } 2803 } 2804 2805 #ifndef CONFIG_SLOB 2806 static int memcg_online_kmem(struct mem_cgroup *memcg) 2807 { 2808 int memcg_id; 2809 2810 if (cgroup_memory_nokmem) 2811 return 0; 2812 2813 BUG_ON(memcg->kmemcg_id >= 0); 2814 BUG_ON(memcg->kmem_state); 2815 2816 memcg_id = memcg_alloc_cache_id(); 2817 if (memcg_id < 0) 2818 return memcg_id; 2819 2820 static_branch_inc(&memcg_kmem_enabled_key); 2821 /* 2822 * A memory cgroup is considered kmem-online as soon as it gets 2823 * kmemcg_id. Setting the id after enabling static branching will 2824 * guarantee no one starts accounting before all call sites are 2825 * patched. 2826 */ 2827 memcg->kmemcg_id = memcg_id; 2828 memcg->kmem_state = KMEM_ONLINE; 2829 INIT_LIST_HEAD(&memcg->kmem_caches); 2830 2831 return 0; 2832 } 2833 2834 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2835 { 2836 struct cgroup_subsys_state *css; 2837 struct mem_cgroup *parent, *child; 2838 int kmemcg_id; 2839 2840 if (memcg->kmem_state != KMEM_ONLINE) 2841 return; 2842 /* 2843 * Clear the online state before clearing memcg_caches array 2844 * entries. The slab_mutex in memcg_deactivate_kmem_caches() 2845 * guarantees that no cache will be created for this cgroup 2846 * after we are done (see memcg_create_kmem_cache()). 2847 */ 2848 memcg->kmem_state = KMEM_ALLOCATED; 2849 2850 memcg_deactivate_kmem_caches(memcg); 2851 2852 kmemcg_id = memcg->kmemcg_id; 2853 BUG_ON(kmemcg_id < 0); 2854 2855 parent = parent_mem_cgroup(memcg); 2856 if (!parent) 2857 parent = root_mem_cgroup; 2858 2859 /* 2860 * Change kmemcg_id of this cgroup and all its descendants to the 2861 * parent's id, and then move all entries from this cgroup's list_lrus 2862 * to ones of the parent. After we have finished, all list_lrus 2863 * corresponding to this cgroup are guaranteed to remain empty. The 2864 * ordering is imposed by list_lru_node->lock taken by 2865 * memcg_drain_all_list_lrus(). 2866 */ 2867 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 2868 css_for_each_descendant_pre(css, &memcg->css) { 2869 child = mem_cgroup_from_css(css); 2870 BUG_ON(child->kmemcg_id != kmemcg_id); 2871 child->kmemcg_id = parent->kmemcg_id; 2872 if (!memcg->use_hierarchy) 2873 break; 2874 } 2875 rcu_read_unlock(); 2876 2877 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 2878 2879 memcg_free_cache_id(kmemcg_id); 2880 } 2881 2882 static void memcg_free_kmem(struct mem_cgroup *memcg) 2883 { 2884 /* css_alloc() failed, offlining didn't happen */ 2885 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 2886 memcg_offline_kmem(memcg); 2887 2888 if (memcg->kmem_state == KMEM_ALLOCATED) { 2889 memcg_destroy_kmem_caches(memcg); 2890 static_branch_dec(&memcg_kmem_enabled_key); 2891 WARN_ON(page_counter_read(&memcg->kmem)); 2892 } 2893 } 2894 #else 2895 static int memcg_online_kmem(struct mem_cgroup *memcg) 2896 { 2897 return 0; 2898 } 2899 static void memcg_offline_kmem(struct mem_cgroup *memcg) 2900 { 2901 } 2902 static void memcg_free_kmem(struct mem_cgroup *memcg) 2903 { 2904 } 2905 #endif /* !CONFIG_SLOB */ 2906 2907 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2908 unsigned long limit) 2909 { 2910 int ret; 2911 2912 mutex_lock(&memcg_limit_mutex); 2913 ret = page_counter_limit(&memcg->kmem, limit); 2914 mutex_unlock(&memcg_limit_mutex); 2915 return ret; 2916 } 2917 2918 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) 2919 { 2920 int ret; 2921 2922 mutex_lock(&memcg_limit_mutex); 2923 2924 ret = page_counter_limit(&memcg->tcpmem, limit); 2925 if (ret) 2926 goto out; 2927 2928 if (!memcg->tcpmem_active) { 2929 /* 2930 * The active flag needs to be written after the static_key 2931 * update. This is what guarantees that the socket activation 2932 * function is the last one to run. See mem_cgroup_sk_alloc() 2933 * for details, and note that we don't mark any socket as 2934 * belonging to this memcg until that flag is up. 2935 * 2936 * We need to do this, because static_keys will span multiple 2937 * sites, but we can't control their order. If we mark a socket 2938 * as accounted, but the accounting functions are not patched in 2939 * yet, we'll lose accounting. 2940 * 2941 * We never race with the readers in mem_cgroup_sk_alloc(), 2942 * because when this value change, the code to process it is not 2943 * patched in yet. 2944 */ 2945 static_branch_inc(&memcg_sockets_enabled_key); 2946 memcg->tcpmem_active = true; 2947 } 2948 out: 2949 mutex_unlock(&memcg_limit_mutex); 2950 return ret; 2951 } 2952 2953 /* 2954 * The user of this function is... 2955 * RES_LIMIT. 2956 */ 2957 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2958 char *buf, size_t nbytes, loff_t off) 2959 { 2960 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2961 unsigned long nr_pages; 2962 int ret; 2963 2964 buf = strstrip(buf); 2965 ret = page_counter_memparse(buf, "-1", &nr_pages); 2966 if (ret) 2967 return ret; 2968 2969 switch (MEMFILE_ATTR(of_cft(of)->private)) { 2970 case RES_LIMIT: 2971 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 2972 ret = -EINVAL; 2973 break; 2974 } 2975 switch (MEMFILE_TYPE(of_cft(of)->private)) { 2976 case _MEM: 2977 ret = mem_cgroup_resize_limit(memcg, nr_pages); 2978 break; 2979 case _MEMSWAP: 2980 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 2981 break; 2982 case _KMEM: 2983 ret = memcg_update_kmem_limit(memcg, nr_pages); 2984 break; 2985 case _TCP: 2986 ret = memcg_update_tcp_limit(memcg, nr_pages); 2987 break; 2988 } 2989 break; 2990 case RES_SOFT_LIMIT: 2991 memcg->soft_limit = nr_pages; 2992 ret = 0; 2993 break; 2994 } 2995 return ret ?: nbytes; 2996 } 2997 2998 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 2999 size_t nbytes, loff_t off) 3000 { 3001 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3002 struct page_counter *counter; 3003 3004 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3005 case _MEM: 3006 counter = &memcg->memory; 3007 break; 3008 case _MEMSWAP: 3009 counter = &memcg->memsw; 3010 break; 3011 case _KMEM: 3012 counter = &memcg->kmem; 3013 break; 3014 case _TCP: 3015 counter = &memcg->tcpmem; 3016 break; 3017 default: 3018 BUG(); 3019 } 3020 3021 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3022 case RES_MAX_USAGE: 3023 page_counter_reset_watermark(counter); 3024 break; 3025 case RES_FAILCNT: 3026 counter->failcnt = 0; 3027 break; 3028 default: 3029 BUG(); 3030 } 3031 3032 return nbytes; 3033 } 3034 3035 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3036 struct cftype *cft) 3037 { 3038 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3039 } 3040 3041 #ifdef CONFIG_MMU 3042 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3043 struct cftype *cft, u64 val) 3044 { 3045 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3046 3047 if (val & ~MOVE_MASK) 3048 return -EINVAL; 3049 3050 /* 3051 * No kind of locking is needed in here, because ->can_attach() will 3052 * check this value once in the beginning of the process, and then carry 3053 * on with stale data. This means that changes to this value will only 3054 * affect task migrations starting after the change. 3055 */ 3056 memcg->move_charge_at_immigrate = val; 3057 return 0; 3058 } 3059 #else 3060 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3061 struct cftype *cft, u64 val) 3062 { 3063 return -ENOSYS; 3064 } 3065 #endif 3066 3067 #ifdef CONFIG_NUMA 3068 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3069 { 3070 struct numa_stat { 3071 const char *name; 3072 unsigned int lru_mask; 3073 }; 3074 3075 static const struct numa_stat stats[] = { 3076 { "total", LRU_ALL }, 3077 { "file", LRU_ALL_FILE }, 3078 { "anon", LRU_ALL_ANON }, 3079 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3080 }; 3081 const struct numa_stat *stat; 3082 int nid; 3083 unsigned long nr; 3084 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3085 3086 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3087 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3088 seq_printf(m, "%s=%lu", stat->name, nr); 3089 for_each_node_state(nid, N_MEMORY) { 3090 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3091 stat->lru_mask); 3092 seq_printf(m, " N%d=%lu", nid, nr); 3093 } 3094 seq_putc(m, '\n'); 3095 } 3096 3097 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3098 struct mem_cgroup *iter; 3099 3100 nr = 0; 3101 for_each_mem_cgroup_tree(iter, memcg) 3102 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3103 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3104 for_each_node_state(nid, N_MEMORY) { 3105 nr = 0; 3106 for_each_mem_cgroup_tree(iter, memcg) 3107 nr += mem_cgroup_node_nr_lru_pages( 3108 iter, nid, stat->lru_mask); 3109 seq_printf(m, " N%d=%lu", nid, nr); 3110 } 3111 seq_putc(m, '\n'); 3112 } 3113 3114 return 0; 3115 } 3116 #endif /* CONFIG_NUMA */ 3117 3118 /* Universal VM events cgroup1 shows, original sort order */ 3119 unsigned int memcg1_events[] = { 3120 PGPGIN, 3121 PGPGOUT, 3122 PGFAULT, 3123 PGMAJFAULT, 3124 }; 3125 3126 static const char *const memcg1_event_names[] = { 3127 "pgpgin", 3128 "pgpgout", 3129 "pgfault", 3130 "pgmajfault", 3131 }; 3132 3133 static int memcg_stat_show(struct seq_file *m, void *v) 3134 { 3135 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3136 unsigned long memory, memsw; 3137 struct mem_cgroup *mi; 3138 unsigned int i; 3139 3140 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3141 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3142 3143 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3144 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3145 continue; 3146 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 3147 memcg_page_state(memcg, memcg1_stats[i]) * 3148 PAGE_SIZE); 3149 } 3150 3151 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3152 seq_printf(m, "%s %lu\n", memcg1_event_names[i], 3153 memcg_sum_events(memcg, memcg1_events[i])); 3154 3155 for (i = 0; i < NR_LRU_LISTS; i++) 3156 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3157 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3158 3159 /* Hierarchical information */ 3160 memory = memsw = PAGE_COUNTER_MAX; 3161 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3162 memory = min(memory, mi->memory.limit); 3163 memsw = min(memsw, mi->memsw.limit); 3164 } 3165 seq_printf(m, "hierarchical_memory_limit %llu\n", 3166 (u64)memory * PAGE_SIZE); 3167 if (do_memsw_account()) 3168 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3169 (u64)memsw * PAGE_SIZE); 3170 3171 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3172 unsigned long long val = 0; 3173 3174 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3175 continue; 3176 for_each_mem_cgroup_tree(mi, memcg) 3177 val += memcg_page_state(mi, memcg1_stats[i]) * 3178 PAGE_SIZE; 3179 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val); 3180 } 3181 3182 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) { 3183 unsigned long long val = 0; 3184 3185 for_each_mem_cgroup_tree(mi, memcg) 3186 val += memcg_sum_events(mi, memcg1_events[i]); 3187 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val); 3188 } 3189 3190 for (i = 0; i < NR_LRU_LISTS; i++) { 3191 unsigned long long val = 0; 3192 3193 for_each_mem_cgroup_tree(mi, memcg) 3194 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3195 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3196 } 3197 3198 #ifdef CONFIG_DEBUG_VM 3199 { 3200 pg_data_t *pgdat; 3201 struct mem_cgroup_per_node *mz; 3202 struct zone_reclaim_stat *rstat; 3203 unsigned long recent_rotated[2] = {0, 0}; 3204 unsigned long recent_scanned[2] = {0, 0}; 3205 3206 for_each_online_pgdat(pgdat) { 3207 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 3208 rstat = &mz->lruvec.reclaim_stat; 3209 3210 recent_rotated[0] += rstat->recent_rotated[0]; 3211 recent_rotated[1] += rstat->recent_rotated[1]; 3212 recent_scanned[0] += rstat->recent_scanned[0]; 3213 recent_scanned[1] += rstat->recent_scanned[1]; 3214 } 3215 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3216 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3217 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3218 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3219 } 3220 #endif 3221 3222 return 0; 3223 } 3224 3225 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3226 struct cftype *cft) 3227 { 3228 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3229 3230 return mem_cgroup_swappiness(memcg); 3231 } 3232 3233 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3234 struct cftype *cft, u64 val) 3235 { 3236 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3237 3238 if (val > 100) 3239 return -EINVAL; 3240 3241 if (css->parent) 3242 memcg->swappiness = val; 3243 else 3244 vm_swappiness = val; 3245 3246 return 0; 3247 } 3248 3249 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3250 { 3251 struct mem_cgroup_threshold_ary *t; 3252 unsigned long usage; 3253 int i; 3254 3255 rcu_read_lock(); 3256 if (!swap) 3257 t = rcu_dereference(memcg->thresholds.primary); 3258 else 3259 t = rcu_dereference(memcg->memsw_thresholds.primary); 3260 3261 if (!t) 3262 goto unlock; 3263 3264 usage = mem_cgroup_usage(memcg, swap); 3265 3266 /* 3267 * current_threshold points to threshold just below or equal to usage. 3268 * If it's not true, a threshold was crossed after last 3269 * call of __mem_cgroup_threshold(). 3270 */ 3271 i = t->current_threshold; 3272 3273 /* 3274 * Iterate backward over array of thresholds starting from 3275 * current_threshold and check if a threshold is crossed. 3276 * If none of thresholds below usage is crossed, we read 3277 * only one element of the array here. 3278 */ 3279 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3280 eventfd_signal(t->entries[i].eventfd, 1); 3281 3282 /* i = current_threshold + 1 */ 3283 i++; 3284 3285 /* 3286 * Iterate forward over array of thresholds starting from 3287 * current_threshold+1 and check if a threshold is crossed. 3288 * If none of thresholds above usage is crossed, we read 3289 * only one element of the array here. 3290 */ 3291 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3292 eventfd_signal(t->entries[i].eventfd, 1); 3293 3294 /* Update current_threshold */ 3295 t->current_threshold = i - 1; 3296 unlock: 3297 rcu_read_unlock(); 3298 } 3299 3300 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3301 { 3302 while (memcg) { 3303 __mem_cgroup_threshold(memcg, false); 3304 if (do_memsw_account()) 3305 __mem_cgroup_threshold(memcg, true); 3306 3307 memcg = parent_mem_cgroup(memcg); 3308 } 3309 } 3310 3311 static int compare_thresholds(const void *a, const void *b) 3312 { 3313 const struct mem_cgroup_threshold *_a = a; 3314 const struct mem_cgroup_threshold *_b = b; 3315 3316 if (_a->threshold > _b->threshold) 3317 return 1; 3318 3319 if (_a->threshold < _b->threshold) 3320 return -1; 3321 3322 return 0; 3323 } 3324 3325 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3326 { 3327 struct mem_cgroup_eventfd_list *ev; 3328 3329 spin_lock(&memcg_oom_lock); 3330 3331 list_for_each_entry(ev, &memcg->oom_notify, list) 3332 eventfd_signal(ev->eventfd, 1); 3333 3334 spin_unlock(&memcg_oom_lock); 3335 return 0; 3336 } 3337 3338 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3339 { 3340 struct mem_cgroup *iter; 3341 3342 for_each_mem_cgroup_tree(iter, memcg) 3343 mem_cgroup_oom_notify_cb(iter); 3344 } 3345 3346 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3347 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3348 { 3349 struct mem_cgroup_thresholds *thresholds; 3350 struct mem_cgroup_threshold_ary *new; 3351 unsigned long threshold; 3352 unsigned long usage; 3353 int i, size, ret; 3354 3355 ret = page_counter_memparse(args, "-1", &threshold); 3356 if (ret) 3357 return ret; 3358 3359 mutex_lock(&memcg->thresholds_lock); 3360 3361 if (type == _MEM) { 3362 thresholds = &memcg->thresholds; 3363 usage = mem_cgroup_usage(memcg, false); 3364 } else if (type == _MEMSWAP) { 3365 thresholds = &memcg->memsw_thresholds; 3366 usage = mem_cgroup_usage(memcg, true); 3367 } else 3368 BUG(); 3369 3370 /* Check if a threshold crossed before adding a new one */ 3371 if (thresholds->primary) 3372 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3373 3374 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3375 3376 /* Allocate memory for new array of thresholds */ 3377 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3378 GFP_KERNEL); 3379 if (!new) { 3380 ret = -ENOMEM; 3381 goto unlock; 3382 } 3383 new->size = size; 3384 3385 /* Copy thresholds (if any) to new array */ 3386 if (thresholds->primary) { 3387 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3388 sizeof(struct mem_cgroup_threshold)); 3389 } 3390 3391 /* Add new threshold */ 3392 new->entries[size - 1].eventfd = eventfd; 3393 new->entries[size - 1].threshold = threshold; 3394 3395 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3396 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3397 compare_thresholds, NULL); 3398 3399 /* Find current threshold */ 3400 new->current_threshold = -1; 3401 for (i = 0; i < size; i++) { 3402 if (new->entries[i].threshold <= usage) { 3403 /* 3404 * new->current_threshold will not be used until 3405 * rcu_assign_pointer(), so it's safe to increment 3406 * it here. 3407 */ 3408 ++new->current_threshold; 3409 } else 3410 break; 3411 } 3412 3413 /* Free old spare buffer and save old primary buffer as spare */ 3414 kfree(thresholds->spare); 3415 thresholds->spare = thresholds->primary; 3416 3417 rcu_assign_pointer(thresholds->primary, new); 3418 3419 /* To be sure that nobody uses thresholds */ 3420 synchronize_rcu(); 3421 3422 unlock: 3423 mutex_unlock(&memcg->thresholds_lock); 3424 3425 return ret; 3426 } 3427 3428 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3429 struct eventfd_ctx *eventfd, const char *args) 3430 { 3431 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3432 } 3433 3434 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3435 struct eventfd_ctx *eventfd, const char *args) 3436 { 3437 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3438 } 3439 3440 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3441 struct eventfd_ctx *eventfd, enum res_type type) 3442 { 3443 struct mem_cgroup_thresholds *thresholds; 3444 struct mem_cgroup_threshold_ary *new; 3445 unsigned long usage; 3446 int i, j, size; 3447 3448 mutex_lock(&memcg->thresholds_lock); 3449 3450 if (type == _MEM) { 3451 thresholds = &memcg->thresholds; 3452 usage = mem_cgroup_usage(memcg, false); 3453 } else if (type == _MEMSWAP) { 3454 thresholds = &memcg->memsw_thresholds; 3455 usage = mem_cgroup_usage(memcg, true); 3456 } else 3457 BUG(); 3458 3459 if (!thresholds->primary) 3460 goto unlock; 3461 3462 /* Check if a threshold crossed before removing */ 3463 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3464 3465 /* Calculate new number of threshold */ 3466 size = 0; 3467 for (i = 0; i < thresholds->primary->size; i++) { 3468 if (thresholds->primary->entries[i].eventfd != eventfd) 3469 size++; 3470 } 3471 3472 new = thresholds->spare; 3473 3474 /* Set thresholds array to NULL if we don't have thresholds */ 3475 if (!size) { 3476 kfree(new); 3477 new = NULL; 3478 goto swap_buffers; 3479 } 3480 3481 new->size = size; 3482 3483 /* Copy thresholds and find current threshold */ 3484 new->current_threshold = -1; 3485 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3486 if (thresholds->primary->entries[i].eventfd == eventfd) 3487 continue; 3488 3489 new->entries[j] = thresholds->primary->entries[i]; 3490 if (new->entries[j].threshold <= usage) { 3491 /* 3492 * new->current_threshold will not be used 3493 * until rcu_assign_pointer(), so it's safe to increment 3494 * it here. 3495 */ 3496 ++new->current_threshold; 3497 } 3498 j++; 3499 } 3500 3501 swap_buffers: 3502 /* Swap primary and spare array */ 3503 thresholds->spare = thresholds->primary; 3504 3505 rcu_assign_pointer(thresholds->primary, new); 3506 3507 /* To be sure that nobody uses thresholds */ 3508 synchronize_rcu(); 3509 3510 /* If all events are unregistered, free the spare array */ 3511 if (!new) { 3512 kfree(thresholds->spare); 3513 thresholds->spare = NULL; 3514 } 3515 unlock: 3516 mutex_unlock(&memcg->thresholds_lock); 3517 } 3518 3519 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3520 struct eventfd_ctx *eventfd) 3521 { 3522 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3523 } 3524 3525 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3526 struct eventfd_ctx *eventfd) 3527 { 3528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3529 } 3530 3531 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3532 struct eventfd_ctx *eventfd, const char *args) 3533 { 3534 struct mem_cgroup_eventfd_list *event; 3535 3536 event = kmalloc(sizeof(*event), GFP_KERNEL); 3537 if (!event) 3538 return -ENOMEM; 3539 3540 spin_lock(&memcg_oom_lock); 3541 3542 event->eventfd = eventfd; 3543 list_add(&event->list, &memcg->oom_notify); 3544 3545 /* already in OOM ? */ 3546 if (memcg->under_oom) 3547 eventfd_signal(eventfd, 1); 3548 spin_unlock(&memcg_oom_lock); 3549 3550 return 0; 3551 } 3552 3553 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3554 struct eventfd_ctx *eventfd) 3555 { 3556 struct mem_cgroup_eventfd_list *ev, *tmp; 3557 3558 spin_lock(&memcg_oom_lock); 3559 3560 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3561 if (ev->eventfd == eventfd) { 3562 list_del(&ev->list); 3563 kfree(ev); 3564 } 3565 } 3566 3567 spin_unlock(&memcg_oom_lock); 3568 } 3569 3570 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3571 { 3572 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3573 3574 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3575 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3576 seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 3577 return 0; 3578 } 3579 3580 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3581 struct cftype *cft, u64 val) 3582 { 3583 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3584 3585 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3586 if (!css->parent || !((val == 0) || (val == 1))) 3587 return -EINVAL; 3588 3589 memcg->oom_kill_disable = val; 3590 if (!val) 3591 memcg_oom_recover(memcg); 3592 3593 return 0; 3594 } 3595 3596 #ifdef CONFIG_CGROUP_WRITEBACK 3597 3598 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3599 { 3600 return &memcg->cgwb_list; 3601 } 3602 3603 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3604 { 3605 return wb_domain_init(&memcg->cgwb_domain, gfp); 3606 } 3607 3608 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3609 { 3610 wb_domain_exit(&memcg->cgwb_domain); 3611 } 3612 3613 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3614 { 3615 wb_domain_size_changed(&memcg->cgwb_domain); 3616 } 3617 3618 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3619 { 3620 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3621 3622 if (!memcg->css.parent) 3623 return NULL; 3624 3625 return &memcg->cgwb_domain; 3626 } 3627 3628 /** 3629 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3630 * @wb: bdi_writeback in question 3631 * @pfilepages: out parameter for number of file pages 3632 * @pheadroom: out parameter for number of allocatable pages according to memcg 3633 * @pdirty: out parameter for number of dirty pages 3634 * @pwriteback: out parameter for number of pages under writeback 3635 * 3636 * Determine the numbers of file, headroom, dirty, and writeback pages in 3637 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3638 * is a bit more involved. 3639 * 3640 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3641 * headroom is calculated as the lowest headroom of itself and the 3642 * ancestors. Note that this doesn't consider the actual amount of 3643 * available memory in the system. The caller should further cap 3644 * *@pheadroom accordingly. 3645 */ 3646 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3647 unsigned long *pheadroom, unsigned long *pdirty, 3648 unsigned long *pwriteback) 3649 { 3650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3651 struct mem_cgroup *parent; 3652 3653 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 3654 3655 /* this should eventually include NR_UNSTABLE_NFS */ 3656 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 3657 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3658 (1 << LRU_ACTIVE_FILE)); 3659 *pheadroom = PAGE_COUNTER_MAX; 3660 3661 while ((parent = parent_mem_cgroup(memcg))) { 3662 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3663 unsigned long used = page_counter_read(&memcg->memory); 3664 3665 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3666 memcg = parent; 3667 } 3668 } 3669 3670 #else /* CONFIG_CGROUP_WRITEBACK */ 3671 3672 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3673 { 3674 return 0; 3675 } 3676 3677 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3678 { 3679 } 3680 3681 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3682 { 3683 } 3684 3685 #endif /* CONFIG_CGROUP_WRITEBACK */ 3686 3687 /* 3688 * DO NOT USE IN NEW FILES. 3689 * 3690 * "cgroup.event_control" implementation. 3691 * 3692 * This is way over-engineered. It tries to support fully configurable 3693 * events for each user. Such level of flexibility is completely 3694 * unnecessary especially in the light of the planned unified hierarchy. 3695 * 3696 * Please deprecate this and replace with something simpler if at all 3697 * possible. 3698 */ 3699 3700 /* 3701 * Unregister event and free resources. 3702 * 3703 * Gets called from workqueue. 3704 */ 3705 static void memcg_event_remove(struct work_struct *work) 3706 { 3707 struct mem_cgroup_event *event = 3708 container_of(work, struct mem_cgroup_event, remove); 3709 struct mem_cgroup *memcg = event->memcg; 3710 3711 remove_wait_queue(event->wqh, &event->wait); 3712 3713 event->unregister_event(memcg, event->eventfd); 3714 3715 /* Notify userspace the event is going away. */ 3716 eventfd_signal(event->eventfd, 1); 3717 3718 eventfd_ctx_put(event->eventfd); 3719 kfree(event); 3720 css_put(&memcg->css); 3721 } 3722 3723 /* 3724 * Gets called on POLLHUP on eventfd when user closes it. 3725 * 3726 * Called with wqh->lock held and interrupts disabled. 3727 */ 3728 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 3729 int sync, void *key) 3730 { 3731 struct mem_cgroup_event *event = 3732 container_of(wait, struct mem_cgroup_event, wait); 3733 struct mem_cgroup *memcg = event->memcg; 3734 unsigned long flags = (unsigned long)key; 3735 3736 if (flags & POLLHUP) { 3737 /* 3738 * If the event has been detached at cgroup removal, we 3739 * can simply return knowing the other side will cleanup 3740 * for us. 3741 * 3742 * We can't race against event freeing since the other 3743 * side will require wqh->lock via remove_wait_queue(), 3744 * which we hold. 3745 */ 3746 spin_lock(&memcg->event_list_lock); 3747 if (!list_empty(&event->list)) { 3748 list_del_init(&event->list); 3749 /* 3750 * We are in atomic context, but cgroup_event_remove() 3751 * may sleep, so we have to call it in workqueue. 3752 */ 3753 schedule_work(&event->remove); 3754 } 3755 spin_unlock(&memcg->event_list_lock); 3756 } 3757 3758 return 0; 3759 } 3760 3761 static void memcg_event_ptable_queue_proc(struct file *file, 3762 wait_queue_head_t *wqh, poll_table *pt) 3763 { 3764 struct mem_cgroup_event *event = 3765 container_of(pt, struct mem_cgroup_event, pt); 3766 3767 event->wqh = wqh; 3768 add_wait_queue(wqh, &event->wait); 3769 } 3770 3771 /* 3772 * DO NOT USE IN NEW FILES. 3773 * 3774 * Parse input and register new cgroup event handler. 3775 * 3776 * Input must be in format '<event_fd> <control_fd> <args>'. 3777 * Interpretation of args is defined by control file implementation. 3778 */ 3779 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3780 char *buf, size_t nbytes, loff_t off) 3781 { 3782 struct cgroup_subsys_state *css = of_css(of); 3783 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3784 struct mem_cgroup_event *event; 3785 struct cgroup_subsys_state *cfile_css; 3786 unsigned int efd, cfd; 3787 struct fd efile; 3788 struct fd cfile; 3789 const char *name; 3790 char *endp; 3791 int ret; 3792 3793 buf = strstrip(buf); 3794 3795 efd = simple_strtoul(buf, &endp, 10); 3796 if (*endp != ' ') 3797 return -EINVAL; 3798 buf = endp + 1; 3799 3800 cfd = simple_strtoul(buf, &endp, 10); 3801 if ((*endp != ' ') && (*endp != '\0')) 3802 return -EINVAL; 3803 buf = endp + 1; 3804 3805 event = kzalloc(sizeof(*event), GFP_KERNEL); 3806 if (!event) 3807 return -ENOMEM; 3808 3809 event->memcg = memcg; 3810 INIT_LIST_HEAD(&event->list); 3811 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3812 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3813 INIT_WORK(&event->remove, memcg_event_remove); 3814 3815 efile = fdget(efd); 3816 if (!efile.file) { 3817 ret = -EBADF; 3818 goto out_kfree; 3819 } 3820 3821 event->eventfd = eventfd_ctx_fileget(efile.file); 3822 if (IS_ERR(event->eventfd)) { 3823 ret = PTR_ERR(event->eventfd); 3824 goto out_put_efile; 3825 } 3826 3827 cfile = fdget(cfd); 3828 if (!cfile.file) { 3829 ret = -EBADF; 3830 goto out_put_eventfd; 3831 } 3832 3833 /* the process need read permission on control file */ 3834 /* AV: shouldn't we check that it's been opened for read instead? */ 3835 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3836 if (ret < 0) 3837 goto out_put_cfile; 3838 3839 /* 3840 * Determine the event callbacks and set them in @event. This used 3841 * to be done via struct cftype but cgroup core no longer knows 3842 * about these events. The following is crude but the whole thing 3843 * is for compatibility anyway. 3844 * 3845 * DO NOT ADD NEW FILES. 3846 */ 3847 name = cfile.file->f_path.dentry->d_name.name; 3848 3849 if (!strcmp(name, "memory.usage_in_bytes")) { 3850 event->register_event = mem_cgroup_usage_register_event; 3851 event->unregister_event = mem_cgroup_usage_unregister_event; 3852 } else if (!strcmp(name, "memory.oom_control")) { 3853 event->register_event = mem_cgroup_oom_register_event; 3854 event->unregister_event = mem_cgroup_oom_unregister_event; 3855 } else if (!strcmp(name, "memory.pressure_level")) { 3856 event->register_event = vmpressure_register_event; 3857 event->unregister_event = vmpressure_unregister_event; 3858 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3859 event->register_event = memsw_cgroup_usage_register_event; 3860 event->unregister_event = memsw_cgroup_usage_unregister_event; 3861 } else { 3862 ret = -EINVAL; 3863 goto out_put_cfile; 3864 } 3865 3866 /* 3867 * Verify @cfile should belong to @css. Also, remaining events are 3868 * automatically removed on cgroup destruction but the removal is 3869 * asynchronous, so take an extra ref on @css. 3870 */ 3871 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3872 &memory_cgrp_subsys); 3873 ret = -EINVAL; 3874 if (IS_ERR(cfile_css)) 3875 goto out_put_cfile; 3876 if (cfile_css != css) { 3877 css_put(cfile_css); 3878 goto out_put_cfile; 3879 } 3880 3881 ret = event->register_event(memcg, event->eventfd, buf); 3882 if (ret) 3883 goto out_put_css; 3884 3885 efile.file->f_op->poll(efile.file, &event->pt); 3886 3887 spin_lock(&memcg->event_list_lock); 3888 list_add(&event->list, &memcg->event_list); 3889 spin_unlock(&memcg->event_list_lock); 3890 3891 fdput(cfile); 3892 fdput(efile); 3893 3894 return nbytes; 3895 3896 out_put_css: 3897 css_put(css); 3898 out_put_cfile: 3899 fdput(cfile); 3900 out_put_eventfd: 3901 eventfd_ctx_put(event->eventfd); 3902 out_put_efile: 3903 fdput(efile); 3904 out_kfree: 3905 kfree(event); 3906 3907 return ret; 3908 } 3909 3910 static struct cftype mem_cgroup_legacy_files[] = { 3911 { 3912 .name = "usage_in_bytes", 3913 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3914 .read_u64 = mem_cgroup_read_u64, 3915 }, 3916 { 3917 .name = "max_usage_in_bytes", 3918 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 3919 .write = mem_cgroup_reset, 3920 .read_u64 = mem_cgroup_read_u64, 3921 }, 3922 { 3923 .name = "limit_in_bytes", 3924 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 3925 .write = mem_cgroup_write, 3926 .read_u64 = mem_cgroup_read_u64, 3927 }, 3928 { 3929 .name = "soft_limit_in_bytes", 3930 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 3931 .write = mem_cgroup_write, 3932 .read_u64 = mem_cgroup_read_u64, 3933 }, 3934 { 3935 .name = "failcnt", 3936 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 3937 .write = mem_cgroup_reset, 3938 .read_u64 = mem_cgroup_read_u64, 3939 }, 3940 { 3941 .name = "stat", 3942 .seq_show = memcg_stat_show, 3943 }, 3944 { 3945 .name = "force_empty", 3946 .write = mem_cgroup_force_empty_write, 3947 }, 3948 { 3949 .name = "use_hierarchy", 3950 .write_u64 = mem_cgroup_hierarchy_write, 3951 .read_u64 = mem_cgroup_hierarchy_read, 3952 }, 3953 { 3954 .name = "cgroup.event_control", /* XXX: for compat */ 3955 .write = memcg_write_event_control, 3956 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 3957 }, 3958 { 3959 .name = "swappiness", 3960 .read_u64 = mem_cgroup_swappiness_read, 3961 .write_u64 = mem_cgroup_swappiness_write, 3962 }, 3963 { 3964 .name = "move_charge_at_immigrate", 3965 .read_u64 = mem_cgroup_move_charge_read, 3966 .write_u64 = mem_cgroup_move_charge_write, 3967 }, 3968 { 3969 .name = "oom_control", 3970 .seq_show = mem_cgroup_oom_control_read, 3971 .write_u64 = mem_cgroup_oom_control_write, 3972 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 3973 }, 3974 { 3975 .name = "pressure_level", 3976 }, 3977 #ifdef CONFIG_NUMA 3978 { 3979 .name = "numa_stat", 3980 .seq_show = memcg_numa_stat_show, 3981 }, 3982 #endif 3983 { 3984 .name = "kmem.limit_in_bytes", 3985 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 3986 .write = mem_cgroup_write, 3987 .read_u64 = mem_cgroup_read_u64, 3988 }, 3989 { 3990 .name = "kmem.usage_in_bytes", 3991 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 3992 .read_u64 = mem_cgroup_read_u64, 3993 }, 3994 { 3995 .name = "kmem.failcnt", 3996 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 3997 .write = mem_cgroup_reset, 3998 .read_u64 = mem_cgroup_read_u64, 3999 }, 4000 { 4001 .name = "kmem.max_usage_in_bytes", 4002 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4003 .write = mem_cgroup_reset, 4004 .read_u64 = mem_cgroup_read_u64, 4005 }, 4006 #ifdef CONFIG_SLABINFO 4007 { 4008 .name = "kmem.slabinfo", 4009 .seq_start = memcg_slab_start, 4010 .seq_next = memcg_slab_next, 4011 .seq_stop = memcg_slab_stop, 4012 .seq_show = memcg_slab_show, 4013 }, 4014 #endif 4015 { 4016 .name = "kmem.tcp.limit_in_bytes", 4017 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4018 .write = mem_cgroup_write, 4019 .read_u64 = mem_cgroup_read_u64, 4020 }, 4021 { 4022 .name = "kmem.tcp.usage_in_bytes", 4023 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4024 .read_u64 = mem_cgroup_read_u64, 4025 }, 4026 { 4027 .name = "kmem.tcp.failcnt", 4028 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4029 .write = mem_cgroup_reset, 4030 .read_u64 = mem_cgroup_read_u64, 4031 }, 4032 { 4033 .name = "kmem.tcp.max_usage_in_bytes", 4034 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4035 .write = mem_cgroup_reset, 4036 .read_u64 = mem_cgroup_read_u64, 4037 }, 4038 { }, /* terminate */ 4039 }; 4040 4041 /* 4042 * Private memory cgroup IDR 4043 * 4044 * Swap-out records and page cache shadow entries need to store memcg 4045 * references in constrained space, so we maintain an ID space that is 4046 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4047 * memory-controlled cgroups to 64k. 4048 * 4049 * However, there usually are many references to the oflline CSS after 4050 * the cgroup has been destroyed, such as page cache or reclaimable 4051 * slab objects, that don't need to hang on to the ID. We want to keep 4052 * those dead CSS from occupying IDs, or we might quickly exhaust the 4053 * relatively small ID space and prevent the creation of new cgroups 4054 * even when there are much fewer than 64k cgroups - possibly none. 4055 * 4056 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4057 * be freed and recycled when it's no longer needed, which is usually 4058 * when the CSS is offlined. 4059 * 4060 * The only exception to that are records of swapped out tmpfs/shmem 4061 * pages that need to be attributed to live ancestors on swapin. But 4062 * those references are manageable from userspace. 4063 */ 4064 4065 static DEFINE_IDR(mem_cgroup_idr); 4066 4067 static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4068 { 4069 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4070 atomic_add(n, &memcg->id.ref); 4071 } 4072 4073 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4074 { 4075 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4076 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4077 idr_remove(&mem_cgroup_idr, memcg->id.id); 4078 memcg->id.id = 0; 4079 4080 /* Memcg ID pins CSS */ 4081 css_put(&memcg->css); 4082 } 4083 } 4084 4085 static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4086 { 4087 mem_cgroup_id_get_many(memcg, 1); 4088 } 4089 4090 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4091 { 4092 mem_cgroup_id_put_many(memcg, 1); 4093 } 4094 4095 /** 4096 * mem_cgroup_from_id - look up a memcg from a memcg id 4097 * @id: the memcg id to look up 4098 * 4099 * Caller must hold rcu_read_lock(). 4100 */ 4101 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 4102 { 4103 WARN_ON_ONCE(!rcu_read_lock_held()); 4104 return idr_find(&mem_cgroup_idr, id); 4105 } 4106 4107 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4108 { 4109 struct mem_cgroup_per_node *pn; 4110 int tmp = node; 4111 /* 4112 * This routine is called against possible nodes. 4113 * But it's BUG to call kmalloc() against offline node. 4114 * 4115 * TODO: this routine can waste much memory for nodes which will 4116 * never be onlined. It's better to use memory hotplug callback 4117 * function. 4118 */ 4119 if (!node_state(node, N_NORMAL_MEMORY)) 4120 tmp = -1; 4121 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4122 if (!pn) 4123 return 1; 4124 4125 pn->lruvec_stat = alloc_percpu(struct lruvec_stat); 4126 if (!pn->lruvec_stat) { 4127 kfree(pn); 4128 return 1; 4129 } 4130 4131 lruvec_init(&pn->lruvec); 4132 pn->usage_in_excess = 0; 4133 pn->on_tree = false; 4134 pn->memcg = memcg; 4135 4136 memcg->nodeinfo[node] = pn; 4137 return 0; 4138 } 4139 4140 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 4141 { 4142 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 4143 4144 free_percpu(pn->lruvec_stat); 4145 kfree(pn); 4146 } 4147 4148 static void __mem_cgroup_free(struct mem_cgroup *memcg) 4149 { 4150 int node; 4151 4152 for_each_node(node) 4153 free_mem_cgroup_per_node_info(memcg, node); 4154 free_percpu(memcg->stat); 4155 kfree(memcg); 4156 } 4157 4158 static void mem_cgroup_free(struct mem_cgroup *memcg) 4159 { 4160 memcg_wb_domain_exit(memcg); 4161 __mem_cgroup_free(memcg); 4162 } 4163 4164 static struct mem_cgroup *mem_cgroup_alloc(void) 4165 { 4166 struct mem_cgroup *memcg; 4167 size_t size; 4168 int node; 4169 4170 size = sizeof(struct mem_cgroup); 4171 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4172 4173 memcg = kzalloc(size, GFP_KERNEL); 4174 if (!memcg) 4175 return NULL; 4176 4177 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 4178 1, MEM_CGROUP_ID_MAX, 4179 GFP_KERNEL); 4180 if (memcg->id.id < 0) 4181 goto fail; 4182 4183 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4184 if (!memcg->stat) 4185 goto fail; 4186 4187 for_each_node(node) 4188 if (alloc_mem_cgroup_per_node_info(memcg, node)) 4189 goto fail; 4190 4191 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4192 goto fail; 4193 4194 INIT_WORK(&memcg->high_work, high_work_func); 4195 memcg->last_scanned_node = MAX_NUMNODES; 4196 INIT_LIST_HEAD(&memcg->oom_notify); 4197 mutex_init(&memcg->thresholds_lock); 4198 spin_lock_init(&memcg->move_lock); 4199 vmpressure_init(&memcg->vmpressure); 4200 INIT_LIST_HEAD(&memcg->event_list); 4201 spin_lock_init(&memcg->event_list_lock); 4202 memcg->socket_pressure = jiffies; 4203 #ifndef CONFIG_SLOB 4204 memcg->kmemcg_id = -1; 4205 #endif 4206 #ifdef CONFIG_CGROUP_WRITEBACK 4207 INIT_LIST_HEAD(&memcg->cgwb_list); 4208 #endif 4209 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4210 return memcg; 4211 fail: 4212 if (memcg->id.id > 0) 4213 idr_remove(&mem_cgroup_idr, memcg->id.id); 4214 __mem_cgroup_free(memcg); 4215 return NULL; 4216 } 4217 4218 static struct cgroup_subsys_state * __ref 4219 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4220 { 4221 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 4222 struct mem_cgroup *memcg; 4223 long error = -ENOMEM; 4224 4225 memcg = mem_cgroup_alloc(); 4226 if (!memcg) 4227 return ERR_PTR(error); 4228 4229 memcg->high = PAGE_COUNTER_MAX; 4230 memcg->soft_limit = PAGE_COUNTER_MAX; 4231 if (parent) { 4232 memcg->swappiness = mem_cgroup_swappiness(parent); 4233 memcg->oom_kill_disable = parent->oom_kill_disable; 4234 } 4235 if (parent && parent->use_hierarchy) { 4236 memcg->use_hierarchy = true; 4237 page_counter_init(&memcg->memory, &parent->memory); 4238 page_counter_init(&memcg->swap, &parent->swap); 4239 page_counter_init(&memcg->memsw, &parent->memsw); 4240 page_counter_init(&memcg->kmem, &parent->kmem); 4241 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 4242 } else { 4243 page_counter_init(&memcg->memory, NULL); 4244 page_counter_init(&memcg->swap, NULL); 4245 page_counter_init(&memcg->memsw, NULL); 4246 page_counter_init(&memcg->kmem, NULL); 4247 page_counter_init(&memcg->tcpmem, NULL); 4248 /* 4249 * Deeper hierachy with use_hierarchy == false doesn't make 4250 * much sense so let cgroup subsystem know about this 4251 * unfortunate state in our controller. 4252 */ 4253 if (parent != root_mem_cgroup) 4254 memory_cgrp_subsys.broken_hierarchy = true; 4255 } 4256 4257 /* The following stuff does not apply to the root */ 4258 if (!parent) { 4259 root_mem_cgroup = memcg; 4260 return &memcg->css; 4261 } 4262 4263 error = memcg_online_kmem(memcg); 4264 if (error) 4265 goto fail; 4266 4267 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4268 static_branch_inc(&memcg_sockets_enabled_key); 4269 4270 return &memcg->css; 4271 fail: 4272 mem_cgroup_free(memcg); 4273 return ERR_PTR(-ENOMEM); 4274 } 4275 4276 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 4277 { 4278 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4279 4280 /* Online state pins memcg ID, memcg ID pins CSS */ 4281 atomic_set(&memcg->id.ref, 1); 4282 css_get(css); 4283 return 0; 4284 } 4285 4286 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4287 { 4288 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4289 struct mem_cgroup_event *event, *tmp; 4290 4291 /* 4292 * Unregister events and notify userspace. 4293 * Notify userspace about cgroup removing only after rmdir of cgroup 4294 * directory to avoid race between userspace and kernelspace. 4295 */ 4296 spin_lock(&memcg->event_list_lock); 4297 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4298 list_del_init(&event->list); 4299 schedule_work(&event->remove); 4300 } 4301 spin_unlock(&memcg->event_list_lock); 4302 4303 memcg_offline_kmem(memcg); 4304 wb_memcg_offline(memcg); 4305 4306 mem_cgroup_id_put(memcg); 4307 } 4308 4309 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4310 { 4311 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4312 4313 invalidate_reclaim_iterators(memcg); 4314 } 4315 4316 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4317 { 4318 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4319 4320 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 4321 static_branch_dec(&memcg_sockets_enabled_key); 4322 4323 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 4324 static_branch_dec(&memcg_sockets_enabled_key); 4325 4326 vmpressure_cleanup(&memcg->vmpressure); 4327 cancel_work_sync(&memcg->high_work); 4328 mem_cgroup_remove_from_trees(memcg); 4329 memcg_free_kmem(memcg); 4330 mem_cgroup_free(memcg); 4331 } 4332 4333 /** 4334 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4335 * @css: the target css 4336 * 4337 * Reset the states of the mem_cgroup associated with @css. This is 4338 * invoked when the userland requests disabling on the default hierarchy 4339 * but the memcg is pinned through dependency. The memcg should stop 4340 * applying policies and should revert to the vanilla state as it may be 4341 * made visible again. 4342 * 4343 * The current implementation only resets the essential configurations. 4344 * This needs to be expanded to cover all the visible parts. 4345 */ 4346 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4347 { 4348 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4349 4350 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX); 4351 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX); 4352 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX); 4353 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX); 4354 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX); 4355 memcg->low = 0; 4356 memcg->high = PAGE_COUNTER_MAX; 4357 memcg->soft_limit = PAGE_COUNTER_MAX; 4358 memcg_wb_domain_size_changed(memcg); 4359 } 4360 4361 #ifdef CONFIG_MMU 4362 /* Handlers for move charge at task migration. */ 4363 static int mem_cgroup_do_precharge(unsigned long count) 4364 { 4365 int ret; 4366 4367 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4368 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4369 if (!ret) { 4370 mc.precharge += count; 4371 return ret; 4372 } 4373 4374 /* Try charges one by one with reclaim, but do not retry */ 4375 while (count--) { 4376 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 4377 if (ret) 4378 return ret; 4379 mc.precharge++; 4380 cond_resched(); 4381 } 4382 return 0; 4383 } 4384 4385 union mc_target { 4386 struct page *page; 4387 swp_entry_t ent; 4388 }; 4389 4390 enum mc_target_type { 4391 MC_TARGET_NONE = 0, 4392 MC_TARGET_PAGE, 4393 MC_TARGET_SWAP, 4394 }; 4395 4396 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4397 unsigned long addr, pte_t ptent) 4398 { 4399 struct page *page = vm_normal_page(vma, addr, ptent); 4400 4401 if (!page || !page_mapped(page)) 4402 return NULL; 4403 if (PageAnon(page)) { 4404 if (!(mc.flags & MOVE_ANON)) 4405 return NULL; 4406 } else { 4407 if (!(mc.flags & MOVE_FILE)) 4408 return NULL; 4409 } 4410 if (!get_page_unless_zero(page)) 4411 return NULL; 4412 4413 return page; 4414 } 4415 4416 #ifdef CONFIG_SWAP 4417 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4418 pte_t ptent, swp_entry_t *entry) 4419 { 4420 struct page *page = NULL; 4421 swp_entry_t ent = pte_to_swp_entry(ptent); 4422 4423 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4424 return NULL; 4425 /* 4426 * Because lookup_swap_cache() updates some statistics counter, 4427 * we call find_get_page() with swapper_space directly. 4428 */ 4429 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 4430 if (do_memsw_account()) 4431 entry->val = ent.val; 4432 4433 return page; 4434 } 4435 #else 4436 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4437 pte_t ptent, swp_entry_t *entry) 4438 { 4439 return NULL; 4440 } 4441 #endif 4442 4443 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4444 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4445 { 4446 struct page *page = NULL; 4447 struct address_space *mapping; 4448 pgoff_t pgoff; 4449 4450 if (!vma->vm_file) /* anonymous vma */ 4451 return NULL; 4452 if (!(mc.flags & MOVE_FILE)) 4453 return NULL; 4454 4455 mapping = vma->vm_file->f_mapping; 4456 pgoff = linear_page_index(vma, addr); 4457 4458 /* page is moved even if it's not RSS of this task(page-faulted). */ 4459 #ifdef CONFIG_SWAP 4460 /* shmem/tmpfs may report page out on swap: account for that too. */ 4461 if (shmem_mapping(mapping)) { 4462 page = find_get_entry(mapping, pgoff); 4463 if (radix_tree_exceptional_entry(page)) { 4464 swp_entry_t swp = radix_to_swp_entry(page); 4465 if (do_memsw_account()) 4466 *entry = swp; 4467 page = find_get_page(swap_address_space(swp), 4468 swp_offset(swp)); 4469 } 4470 } else 4471 page = find_get_page(mapping, pgoff); 4472 #else 4473 page = find_get_page(mapping, pgoff); 4474 #endif 4475 return page; 4476 } 4477 4478 /** 4479 * mem_cgroup_move_account - move account of the page 4480 * @page: the page 4481 * @compound: charge the page as compound or small page 4482 * @from: mem_cgroup which the page is moved from. 4483 * @to: mem_cgroup which the page is moved to. @from != @to. 4484 * 4485 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 4486 * 4487 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4488 * from old cgroup. 4489 */ 4490 static int mem_cgroup_move_account(struct page *page, 4491 bool compound, 4492 struct mem_cgroup *from, 4493 struct mem_cgroup *to) 4494 { 4495 unsigned long flags; 4496 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 4497 int ret; 4498 bool anon; 4499 4500 VM_BUG_ON(from == to); 4501 VM_BUG_ON_PAGE(PageLRU(page), page); 4502 VM_BUG_ON(compound && !PageTransHuge(page)); 4503 4504 /* 4505 * Prevent mem_cgroup_migrate() from looking at 4506 * page->mem_cgroup of its source page while we change it. 4507 */ 4508 ret = -EBUSY; 4509 if (!trylock_page(page)) 4510 goto out; 4511 4512 ret = -EINVAL; 4513 if (page->mem_cgroup != from) 4514 goto out_unlock; 4515 4516 anon = PageAnon(page); 4517 4518 spin_lock_irqsave(&from->move_lock, flags); 4519 4520 if (!anon && page_mapped(page)) { 4521 __this_cpu_sub(from->stat->count[NR_FILE_MAPPED], nr_pages); 4522 __this_cpu_add(to->stat->count[NR_FILE_MAPPED], nr_pages); 4523 } 4524 4525 /* 4526 * move_lock grabbed above and caller set from->moving_account, so 4527 * mod_memcg_page_state will serialize updates to PageDirty. 4528 * So mapping should be stable for dirty pages. 4529 */ 4530 if (!anon && PageDirty(page)) { 4531 struct address_space *mapping = page_mapping(page); 4532 4533 if (mapping_cap_account_dirty(mapping)) { 4534 __this_cpu_sub(from->stat->count[NR_FILE_DIRTY], 4535 nr_pages); 4536 __this_cpu_add(to->stat->count[NR_FILE_DIRTY], 4537 nr_pages); 4538 } 4539 } 4540 4541 if (PageWriteback(page)) { 4542 __this_cpu_sub(from->stat->count[NR_WRITEBACK], nr_pages); 4543 __this_cpu_add(to->stat->count[NR_WRITEBACK], nr_pages); 4544 } 4545 4546 /* 4547 * It is safe to change page->mem_cgroup here because the page 4548 * is referenced, charged, and isolated - we can't race with 4549 * uncharging, charging, migration, or LRU putback. 4550 */ 4551 4552 /* caller should have done css_get */ 4553 page->mem_cgroup = to; 4554 spin_unlock_irqrestore(&from->move_lock, flags); 4555 4556 ret = 0; 4557 4558 local_irq_disable(); 4559 mem_cgroup_charge_statistics(to, page, compound, nr_pages); 4560 memcg_check_events(to, page); 4561 mem_cgroup_charge_statistics(from, page, compound, -nr_pages); 4562 memcg_check_events(from, page); 4563 local_irq_enable(); 4564 out_unlock: 4565 unlock_page(page); 4566 out: 4567 return ret; 4568 } 4569 4570 /** 4571 * get_mctgt_type - get target type of moving charge 4572 * @vma: the vma the pte to be checked belongs 4573 * @addr: the address corresponding to the pte to be checked 4574 * @ptent: the pte to be checked 4575 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4576 * 4577 * Returns 4578 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4579 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4580 * move charge. if @target is not NULL, the page is stored in target->page 4581 * with extra refcnt got(Callers should handle it). 4582 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4583 * target for charge migration. if @target is not NULL, the entry is stored 4584 * in target->ent. 4585 * 4586 * Called with pte lock held. 4587 */ 4588 4589 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4590 unsigned long addr, pte_t ptent, union mc_target *target) 4591 { 4592 struct page *page = NULL; 4593 enum mc_target_type ret = MC_TARGET_NONE; 4594 swp_entry_t ent = { .val = 0 }; 4595 4596 if (pte_present(ptent)) 4597 page = mc_handle_present_pte(vma, addr, ptent); 4598 else if (is_swap_pte(ptent)) 4599 page = mc_handle_swap_pte(vma, ptent, &ent); 4600 else if (pte_none(ptent)) 4601 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4602 4603 if (!page && !ent.val) 4604 return ret; 4605 if (page) { 4606 /* 4607 * Do only loose check w/o serialization. 4608 * mem_cgroup_move_account() checks the page is valid or 4609 * not under LRU exclusion. 4610 */ 4611 if (page->mem_cgroup == mc.from) { 4612 ret = MC_TARGET_PAGE; 4613 if (target) 4614 target->page = page; 4615 } 4616 if (!ret || !target) 4617 put_page(page); 4618 } 4619 /* There is a swap entry and a page doesn't exist or isn't charged */ 4620 if (ent.val && !ret && 4621 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4622 ret = MC_TARGET_SWAP; 4623 if (target) 4624 target->ent = ent; 4625 } 4626 return ret; 4627 } 4628 4629 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4630 /* 4631 * We don't consider swapping or file mapped pages because THP does not 4632 * support them for now. 4633 * Caller should make sure that pmd_trans_huge(pmd) is true. 4634 */ 4635 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4636 unsigned long addr, pmd_t pmd, union mc_target *target) 4637 { 4638 struct page *page = NULL; 4639 enum mc_target_type ret = MC_TARGET_NONE; 4640 4641 page = pmd_page(pmd); 4642 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4643 if (!(mc.flags & MOVE_ANON)) 4644 return ret; 4645 if (page->mem_cgroup == mc.from) { 4646 ret = MC_TARGET_PAGE; 4647 if (target) { 4648 get_page(page); 4649 target->page = page; 4650 } 4651 } 4652 return ret; 4653 } 4654 #else 4655 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4656 unsigned long addr, pmd_t pmd, union mc_target *target) 4657 { 4658 return MC_TARGET_NONE; 4659 } 4660 #endif 4661 4662 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4663 unsigned long addr, unsigned long end, 4664 struct mm_walk *walk) 4665 { 4666 struct vm_area_struct *vma = walk->vma; 4667 pte_t *pte; 4668 spinlock_t *ptl; 4669 4670 ptl = pmd_trans_huge_lock(pmd, vma); 4671 if (ptl) { 4672 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4673 mc.precharge += HPAGE_PMD_NR; 4674 spin_unlock(ptl); 4675 return 0; 4676 } 4677 4678 if (pmd_trans_unstable(pmd)) 4679 return 0; 4680 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4681 for (; addr != end; pte++, addr += PAGE_SIZE) 4682 if (get_mctgt_type(vma, addr, *pte, NULL)) 4683 mc.precharge++; /* increment precharge temporarily */ 4684 pte_unmap_unlock(pte - 1, ptl); 4685 cond_resched(); 4686 4687 return 0; 4688 } 4689 4690 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4691 { 4692 unsigned long precharge; 4693 4694 struct mm_walk mem_cgroup_count_precharge_walk = { 4695 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4696 .mm = mm, 4697 }; 4698 down_read(&mm->mmap_sem); 4699 walk_page_range(0, mm->highest_vm_end, 4700 &mem_cgroup_count_precharge_walk); 4701 up_read(&mm->mmap_sem); 4702 4703 precharge = mc.precharge; 4704 mc.precharge = 0; 4705 4706 return precharge; 4707 } 4708 4709 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4710 { 4711 unsigned long precharge = mem_cgroup_count_precharge(mm); 4712 4713 VM_BUG_ON(mc.moving_task); 4714 mc.moving_task = current; 4715 return mem_cgroup_do_precharge(precharge); 4716 } 4717 4718 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4719 static void __mem_cgroup_clear_mc(void) 4720 { 4721 struct mem_cgroup *from = mc.from; 4722 struct mem_cgroup *to = mc.to; 4723 4724 /* we must uncharge all the leftover precharges from mc.to */ 4725 if (mc.precharge) { 4726 cancel_charge(mc.to, mc.precharge); 4727 mc.precharge = 0; 4728 } 4729 /* 4730 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4731 * we must uncharge here. 4732 */ 4733 if (mc.moved_charge) { 4734 cancel_charge(mc.from, mc.moved_charge); 4735 mc.moved_charge = 0; 4736 } 4737 /* we must fixup refcnts and charges */ 4738 if (mc.moved_swap) { 4739 /* uncharge swap account from the old cgroup */ 4740 if (!mem_cgroup_is_root(mc.from)) 4741 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4742 4743 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4744 4745 /* 4746 * we charged both to->memory and to->memsw, so we 4747 * should uncharge to->memory. 4748 */ 4749 if (!mem_cgroup_is_root(mc.to)) 4750 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4751 4752 mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4753 css_put_many(&mc.to->css, mc.moved_swap); 4754 4755 mc.moved_swap = 0; 4756 } 4757 memcg_oom_recover(from); 4758 memcg_oom_recover(to); 4759 wake_up_all(&mc.waitq); 4760 } 4761 4762 static void mem_cgroup_clear_mc(void) 4763 { 4764 struct mm_struct *mm = mc.mm; 4765 4766 /* 4767 * we must clear moving_task before waking up waiters at the end of 4768 * task migration. 4769 */ 4770 mc.moving_task = NULL; 4771 __mem_cgroup_clear_mc(); 4772 spin_lock(&mc.lock); 4773 mc.from = NULL; 4774 mc.to = NULL; 4775 mc.mm = NULL; 4776 spin_unlock(&mc.lock); 4777 4778 mmput(mm); 4779 } 4780 4781 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4782 { 4783 struct cgroup_subsys_state *css; 4784 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 4785 struct mem_cgroup *from; 4786 struct task_struct *leader, *p; 4787 struct mm_struct *mm; 4788 unsigned long move_flags; 4789 int ret = 0; 4790 4791 /* charge immigration isn't supported on the default hierarchy */ 4792 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4793 return 0; 4794 4795 /* 4796 * Multi-process migrations only happen on the default hierarchy 4797 * where charge immigration is not used. Perform charge 4798 * immigration if @tset contains a leader and whine if there are 4799 * multiple. 4800 */ 4801 p = NULL; 4802 cgroup_taskset_for_each_leader(leader, css, tset) { 4803 WARN_ON_ONCE(p); 4804 p = leader; 4805 memcg = mem_cgroup_from_css(css); 4806 } 4807 if (!p) 4808 return 0; 4809 4810 /* 4811 * We are now commited to this value whatever it is. Changes in this 4812 * tunable will only affect upcoming migrations, not the current one. 4813 * So we need to save it, and keep it going. 4814 */ 4815 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4816 if (!move_flags) 4817 return 0; 4818 4819 from = mem_cgroup_from_task(p); 4820 4821 VM_BUG_ON(from == memcg); 4822 4823 mm = get_task_mm(p); 4824 if (!mm) 4825 return 0; 4826 /* We move charges only when we move a owner of the mm */ 4827 if (mm->owner == p) { 4828 VM_BUG_ON(mc.from); 4829 VM_BUG_ON(mc.to); 4830 VM_BUG_ON(mc.precharge); 4831 VM_BUG_ON(mc.moved_charge); 4832 VM_BUG_ON(mc.moved_swap); 4833 4834 spin_lock(&mc.lock); 4835 mc.mm = mm; 4836 mc.from = from; 4837 mc.to = memcg; 4838 mc.flags = move_flags; 4839 spin_unlock(&mc.lock); 4840 /* We set mc.moving_task later */ 4841 4842 ret = mem_cgroup_precharge_mc(mm); 4843 if (ret) 4844 mem_cgroup_clear_mc(); 4845 } else { 4846 mmput(mm); 4847 } 4848 return ret; 4849 } 4850 4851 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4852 { 4853 if (mc.to) 4854 mem_cgroup_clear_mc(); 4855 } 4856 4857 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4858 unsigned long addr, unsigned long end, 4859 struct mm_walk *walk) 4860 { 4861 int ret = 0; 4862 struct vm_area_struct *vma = walk->vma; 4863 pte_t *pte; 4864 spinlock_t *ptl; 4865 enum mc_target_type target_type; 4866 union mc_target target; 4867 struct page *page; 4868 4869 ptl = pmd_trans_huge_lock(pmd, vma); 4870 if (ptl) { 4871 if (mc.precharge < HPAGE_PMD_NR) { 4872 spin_unlock(ptl); 4873 return 0; 4874 } 4875 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4876 if (target_type == MC_TARGET_PAGE) { 4877 page = target.page; 4878 if (!isolate_lru_page(page)) { 4879 if (!mem_cgroup_move_account(page, true, 4880 mc.from, mc.to)) { 4881 mc.precharge -= HPAGE_PMD_NR; 4882 mc.moved_charge += HPAGE_PMD_NR; 4883 } 4884 putback_lru_page(page); 4885 } 4886 put_page(page); 4887 } 4888 spin_unlock(ptl); 4889 return 0; 4890 } 4891 4892 if (pmd_trans_unstable(pmd)) 4893 return 0; 4894 retry: 4895 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4896 for (; addr != end; addr += PAGE_SIZE) { 4897 pte_t ptent = *(pte++); 4898 swp_entry_t ent; 4899 4900 if (!mc.precharge) 4901 break; 4902 4903 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4904 case MC_TARGET_PAGE: 4905 page = target.page; 4906 /* 4907 * We can have a part of the split pmd here. Moving it 4908 * can be done but it would be too convoluted so simply 4909 * ignore such a partial THP and keep it in original 4910 * memcg. There should be somebody mapping the head. 4911 */ 4912 if (PageTransCompound(page)) 4913 goto put; 4914 if (isolate_lru_page(page)) 4915 goto put; 4916 if (!mem_cgroup_move_account(page, false, 4917 mc.from, mc.to)) { 4918 mc.precharge--; 4919 /* we uncharge from mc.from later. */ 4920 mc.moved_charge++; 4921 } 4922 putback_lru_page(page); 4923 put: /* get_mctgt_type() gets the page */ 4924 put_page(page); 4925 break; 4926 case MC_TARGET_SWAP: 4927 ent = target.ent; 4928 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4929 mc.precharge--; 4930 /* we fixup refcnts and charges later. */ 4931 mc.moved_swap++; 4932 } 4933 break; 4934 default: 4935 break; 4936 } 4937 } 4938 pte_unmap_unlock(pte - 1, ptl); 4939 cond_resched(); 4940 4941 if (addr != end) { 4942 /* 4943 * We have consumed all precharges we got in can_attach(). 4944 * We try charge one by one, but don't do any additional 4945 * charges to mc.to if we have failed in charge once in attach() 4946 * phase. 4947 */ 4948 ret = mem_cgroup_do_precharge(1); 4949 if (!ret) 4950 goto retry; 4951 } 4952 4953 return ret; 4954 } 4955 4956 static void mem_cgroup_move_charge(void) 4957 { 4958 struct mm_walk mem_cgroup_move_charge_walk = { 4959 .pmd_entry = mem_cgroup_move_charge_pte_range, 4960 .mm = mc.mm, 4961 }; 4962 4963 lru_add_drain_all(); 4964 /* 4965 * Signal lock_page_memcg() to take the memcg's move_lock 4966 * while we're moving its pages to another memcg. Then wait 4967 * for already started RCU-only updates to finish. 4968 */ 4969 atomic_inc(&mc.from->moving_account); 4970 synchronize_rcu(); 4971 retry: 4972 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 4973 /* 4974 * Someone who are holding the mmap_sem might be waiting in 4975 * waitq. So we cancel all extra charges, wake up all waiters, 4976 * and retry. Because we cancel precharges, we might not be able 4977 * to move enough charges, but moving charge is a best-effort 4978 * feature anyway, so it wouldn't be a big problem. 4979 */ 4980 __mem_cgroup_clear_mc(); 4981 cond_resched(); 4982 goto retry; 4983 } 4984 /* 4985 * When we have consumed all precharges and failed in doing 4986 * additional charge, the page walk just aborts. 4987 */ 4988 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); 4989 4990 up_read(&mc.mm->mmap_sem); 4991 atomic_dec(&mc.from->moving_account); 4992 } 4993 4994 static void mem_cgroup_move_task(void) 4995 { 4996 if (mc.to) { 4997 mem_cgroup_move_charge(); 4998 mem_cgroup_clear_mc(); 4999 } 5000 } 5001 #else /* !CONFIG_MMU */ 5002 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5003 { 5004 return 0; 5005 } 5006 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5007 { 5008 } 5009 static void mem_cgroup_move_task(void) 5010 { 5011 } 5012 #endif 5013 5014 /* 5015 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5016 * to verify whether we're attached to the default hierarchy on each mount 5017 * attempt. 5018 */ 5019 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5020 { 5021 /* 5022 * use_hierarchy is forced on the default hierarchy. cgroup core 5023 * guarantees that @root doesn't have any children, so turning it 5024 * on for the root memcg is enough. 5025 */ 5026 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5027 root_mem_cgroup->use_hierarchy = true; 5028 else 5029 root_mem_cgroup->use_hierarchy = false; 5030 } 5031 5032 static u64 memory_current_read(struct cgroup_subsys_state *css, 5033 struct cftype *cft) 5034 { 5035 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5036 5037 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5038 } 5039 5040 static int memory_low_show(struct seq_file *m, void *v) 5041 { 5042 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5043 unsigned long low = READ_ONCE(memcg->low); 5044 5045 if (low == PAGE_COUNTER_MAX) 5046 seq_puts(m, "max\n"); 5047 else 5048 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5049 5050 return 0; 5051 } 5052 5053 static ssize_t memory_low_write(struct kernfs_open_file *of, 5054 char *buf, size_t nbytes, loff_t off) 5055 { 5056 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5057 unsigned long low; 5058 int err; 5059 5060 buf = strstrip(buf); 5061 err = page_counter_memparse(buf, "max", &low); 5062 if (err) 5063 return err; 5064 5065 memcg->low = low; 5066 5067 return nbytes; 5068 } 5069 5070 static int memory_high_show(struct seq_file *m, void *v) 5071 { 5072 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5073 unsigned long high = READ_ONCE(memcg->high); 5074 5075 if (high == PAGE_COUNTER_MAX) 5076 seq_puts(m, "max\n"); 5077 else 5078 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5079 5080 return 0; 5081 } 5082 5083 static ssize_t memory_high_write(struct kernfs_open_file *of, 5084 char *buf, size_t nbytes, loff_t off) 5085 { 5086 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5087 unsigned long nr_pages; 5088 unsigned long high; 5089 int err; 5090 5091 buf = strstrip(buf); 5092 err = page_counter_memparse(buf, "max", &high); 5093 if (err) 5094 return err; 5095 5096 memcg->high = high; 5097 5098 nr_pages = page_counter_read(&memcg->memory); 5099 if (nr_pages > high) 5100 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5101 GFP_KERNEL, true); 5102 5103 memcg_wb_domain_size_changed(memcg); 5104 return nbytes; 5105 } 5106 5107 static int memory_max_show(struct seq_file *m, void *v) 5108 { 5109 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5110 unsigned long max = READ_ONCE(memcg->memory.limit); 5111 5112 if (max == PAGE_COUNTER_MAX) 5113 seq_puts(m, "max\n"); 5114 else 5115 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5116 5117 return 0; 5118 } 5119 5120 static ssize_t memory_max_write(struct kernfs_open_file *of, 5121 char *buf, size_t nbytes, loff_t off) 5122 { 5123 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5124 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5125 bool drained = false; 5126 unsigned long max; 5127 int err; 5128 5129 buf = strstrip(buf); 5130 err = page_counter_memparse(buf, "max", &max); 5131 if (err) 5132 return err; 5133 5134 xchg(&memcg->memory.limit, max); 5135 5136 for (;;) { 5137 unsigned long nr_pages = page_counter_read(&memcg->memory); 5138 5139 if (nr_pages <= max) 5140 break; 5141 5142 if (signal_pending(current)) { 5143 err = -EINTR; 5144 break; 5145 } 5146 5147 if (!drained) { 5148 drain_all_stock(memcg); 5149 drained = true; 5150 continue; 5151 } 5152 5153 if (nr_reclaims) { 5154 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5155 GFP_KERNEL, true)) 5156 nr_reclaims--; 5157 continue; 5158 } 5159 5160 mem_cgroup_event(memcg, MEMCG_OOM); 5161 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5162 break; 5163 } 5164 5165 memcg_wb_domain_size_changed(memcg); 5166 return nbytes; 5167 } 5168 5169 static int memory_events_show(struct seq_file *m, void *v) 5170 { 5171 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5172 5173 seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW)); 5174 seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH)); 5175 seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX)); 5176 seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM)); 5177 seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 5178 5179 return 0; 5180 } 5181 5182 static int memory_stat_show(struct seq_file *m, void *v) 5183 { 5184 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5185 unsigned long stat[MEMCG_NR_STAT]; 5186 unsigned long events[MEMCG_NR_EVENTS]; 5187 int i; 5188 5189 /* 5190 * Provide statistics on the state of the memory subsystem as 5191 * well as cumulative event counters that show past behavior. 5192 * 5193 * This list is ordered following a combination of these gradients: 5194 * 1) generic big picture -> specifics and details 5195 * 2) reflecting userspace activity -> reflecting kernel heuristics 5196 * 5197 * Current memory state: 5198 */ 5199 5200 tree_stat(memcg, stat); 5201 tree_events(memcg, events); 5202 5203 seq_printf(m, "anon %llu\n", 5204 (u64)stat[MEMCG_RSS] * PAGE_SIZE); 5205 seq_printf(m, "file %llu\n", 5206 (u64)stat[MEMCG_CACHE] * PAGE_SIZE); 5207 seq_printf(m, "kernel_stack %llu\n", 5208 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024); 5209 seq_printf(m, "slab %llu\n", 5210 (u64)(stat[NR_SLAB_RECLAIMABLE] + 5211 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5212 seq_printf(m, "sock %llu\n", 5213 (u64)stat[MEMCG_SOCK] * PAGE_SIZE); 5214 5215 seq_printf(m, "shmem %llu\n", 5216 (u64)stat[NR_SHMEM] * PAGE_SIZE); 5217 seq_printf(m, "file_mapped %llu\n", 5218 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE); 5219 seq_printf(m, "file_dirty %llu\n", 5220 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE); 5221 seq_printf(m, "file_writeback %llu\n", 5222 (u64)stat[NR_WRITEBACK] * PAGE_SIZE); 5223 5224 for (i = 0; i < NR_LRU_LISTS; i++) { 5225 struct mem_cgroup *mi; 5226 unsigned long val = 0; 5227 5228 for_each_mem_cgroup_tree(mi, memcg) 5229 val += mem_cgroup_nr_lru_pages(mi, BIT(i)); 5230 seq_printf(m, "%s %llu\n", 5231 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE); 5232 } 5233 5234 seq_printf(m, "slab_reclaimable %llu\n", 5235 (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); 5236 seq_printf(m, "slab_unreclaimable %llu\n", 5237 (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5238 5239 /* Accumulated memory events */ 5240 5241 seq_printf(m, "pgfault %lu\n", events[PGFAULT]); 5242 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); 5243 5244 seq_printf(m, "pgrefill %lu\n", events[PGREFILL]); 5245 seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] + 5246 events[PGSCAN_DIRECT]); 5247 seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] + 5248 events[PGSTEAL_DIRECT]); 5249 seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]); 5250 seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]); 5251 seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]); 5252 seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]); 5253 5254 seq_printf(m, "workingset_refault %lu\n", 5255 stat[WORKINGSET_REFAULT]); 5256 seq_printf(m, "workingset_activate %lu\n", 5257 stat[WORKINGSET_ACTIVATE]); 5258 seq_printf(m, "workingset_nodereclaim %lu\n", 5259 stat[WORKINGSET_NODERECLAIM]); 5260 5261 return 0; 5262 } 5263 5264 static struct cftype memory_files[] = { 5265 { 5266 .name = "current", 5267 .flags = CFTYPE_NOT_ON_ROOT, 5268 .read_u64 = memory_current_read, 5269 }, 5270 { 5271 .name = "low", 5272 .flags = CFTYPE_NOT_ON_ROOT, 5273 .seq_show = memory_low_show, 5274 .write = memory_low_write, 5275 }, 5276 { 5277 .name = "high", 5278 .flags = CFTYPE_NOT_ON_ROOT, 5279 .seq_show = memory_high_show, 5280 .write = memory_high_write, 5281 }, 5282 { 5283 .name = "max", 5284 .flags = CFTYPE_NOT_ON_ROOT, 5285 .seq_show = memory_max_show, 5286 .write = memory_max_write, 5287 }, 5288 { 5289 .name = "events", 5290 .flags = CFTYPE_NOT_ON_ROOT, 5291 .file_offset = offsetof(struct mem_cgroup, events_file), 5292 .seq_show = memory_events_show, 5293 }, 5294 { 5295 .name = "stat", 5296 .flags = CFTYPE_NOT_ON_ROOT, 5297 .seq_show = memory_stat_show, 5298 }, 5299 { } /* terminate */ 5300 }; 5301 5302 struct cgroup_subsys memory_cgrp_subsys = { 5303 .css_alloc = mem_cgroup_css_alloc, 5304 .css_online = mem_cgroup_css_online, 5305 .css_offline = mem_cgroup_css_offline, 5306 .css_released = mem_cgroup_css_released, 5307 .css_free = mem_cgroup_css_free, 5308 .css_reset = mem_cgroup_css_reset, 5309 .can_attach = mem_cgroup_can_attach, 5310 .cancel_attach = mem_cgroup_cancel_attach, 5311 .post_attach = mem_cgroup_move_task, 5312 .bind = mem_cgroup_bind, 5313 .dfl_cftypes = memory_files, 5314 .legacy_cftypes = mem_cgroup_legacy_files, 5315 .early_init = 0, 5316 }; 5317 5318 /** 5319 * mem_cgroup_low - check if memory consumption is below the normal range 5320 * @root: the top ancestor of the sub-tree being checked 5321 * @memcg: the memory cgroup to check 5322 * 5323 * Returns %true if memory consumption of @memcg, and that of all 5324 * ancestors up to (but not including) @root, is below the normal range. 5325 * 5326 * @root is exclusive; it is never low when looked at directly and isn't 5327 * checked when traversing the hierarchy. 5328 * 5329 * Excluding @root enables using memory.low to prioritize memory usage 5330 * between cgroups within a subtree of the hierarchy that is limited by 5331 * memory.high or memory.max. 5332 * 5333 * For example, given cgroup A with children B and C: 5334 * 5335 * A 5336 * / \ 5337 * B C 5338 * 5339 * and 5340 * 5341 * 1. A/memory.current > A/memory.high 5342 * 2. A/B/memory.current < A/B/memory.low 5343 * 3. A/C/memory.current >= A/C/memory.low 5344 * 5345 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we 5346 * should reclaim from 'C' until 'A' is no longer high or until we can 5347 * no longer reclaim from 'C'. If 'A', i.e. @root, isn't excluded by 5348 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered 5349 * low and we will reclaim indiscriminately from both 'B' and 'C'. 5350 */ 5351 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5352 { 5353 if (mem_cgroup_disabled()) 5354 return false; 5355 5356 if (!root) 5357 root = root_mem_cgroup; 5358 if (memcg == root) 5359 return false; 5360 5361 for (; memcg != root; memcg = parent_mem_cgroup(memcg)) { 5362 if (page_counter_read(&memcg->memory) >= memcg->low) 5363 return false; 5364 } 5365 5366 return true; 5367 } 5368 5369 /** 5370 * mem_cgroup_try_charge - try charging a page 5371 * @page: page to charge 5372 * @mm: mm context of the victim 5373 * @gfp_mask: reclaim mode 5374 * @memcgp: charged memcg return 5375 * @compound: charge the page as compound or small page 5376 * 5377 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5378 * pages according to @gfp_mask if necessary. 5379 * 5380 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5381 * Otherwise, an error code is returned. 5382 * 5383 * After page->mapping has been set up, the caller must finalize the 5384 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5385 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5386 */ 5387 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5388 gfp_t gfp_mask, struct mem_cgroup **memcgp, 5389 bool compound) 5390 { 5391 struct mem_cgroup *memcg = NULL; 5392 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5393 int ret = 0; 5394 5395 if (mem_cgroup_disabled()) 5396 goto out; 5397 5398 if (PageSwapCache(page)) { 5399 /* 5400 * Every swap fault against a single page tries to charge the 5401 * page, bail as early as possible. shmem_unuse() encounters 5402 * already charged pages, too. The USED bit is protected by 5403 * the page lock, which serializes swap cache removal, which 5404 * in turn serializes uncharging. 5405 */ 5406 VM_BUG_ON_PAGE(!PageLocked(page), page); 5407 if (page->mem_cgroup) 5408 goto out; 5409 5410 if (do_swap_account) { 5411 swp_entry_t ent = { .val = page_private(page), }; 5412 unsigned short id = lookup_swap_cgroup_id(ent); 5413 5414 rcu_read_lock(); 5415 memcg = mem_cgroup_from_id(id); 5416 if (memcg && !css_tryget_online(&memcg->css)) 5417 memcg = NULL; 5418 rcu_read_unlock(); 5419 } 5420 } 5421 5422 if (!memcg) 5423 memcg = get_mem_cgroup_from_mm(mm); 5424 5425 ret = try_charge(memcg, gfp_mask, nr_pages); 5426 5427 css_put(&memcg->css); 5428 out: 5429 *memcgp = memcg; 5430 return ret; 5431 } 5432 5433 /** 5434 * mem_cgroup_commit_charge - commit a page charge 5435 * @page: page to charge 5436 * @memcg: memcg to charge the page to 5437 * @lrucare: page might be on LRU already 5438 * @compound: charge the page as compound or small page 5439 * 5440 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5441 * after page->mapping has been set up. This must happen atomically 5442 * as part of the page instantiation, i.e. under the page table lock 5443 * for anonymous pages, under the page lock for page and swap cache. 5444 * 5445 * In addition, the page must not be on the LRU during the commit, to 5446 * prevent racing with task migration. If it might be, use @lrucare. 5447 * 5448 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5449 */ 5450 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5451 bool lrucare, bool compound) 5452 { 5453 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5454 5455 VM_BUG_ON_PAGE(!page->mapping, page); 5456 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5457 5458 if (mem_cgroup_disabled()) 5459 return; 5460 /* 5461 * Swap faults will attempt to charge the same page multiple 5462 * times. But reuse_swap_page() might have removed the page 5463 * from swapcache already, so we can't check PageSwapCache(). 5464 */ 5465 if (!memcg) 5466 return; 5467 5468 commit_charge(page, memcg, lrucare); 5469 5470 local_irq_disable(); 5471 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); 5472 memcg_check_events(memcg, page); 5473 local_irq_enable(); 5474 5475 if (do_memsw_account() && PageSwapCache(page)) { 5476 swp_entry_t entry = { .val = page_private(page) }; 5477 /* 5478 * The swap entry might not get freed for a long time, 5479 * let's not wait for it. The page already received a 5480 * memory+swap charge, drop the swap entry duplicate. 5481 */ 5482 mem_cgroup_uncharge_swap(entry, nr_pages); 5483 } 5484 } 5485 5486 /** 5487 * mem_cgroup_cancel_charge - cancel a page charge 5488 * @page: page to charge 5489 * @memcg: memcg to charge the page to 5490 * @compound: charge the page as compound or small page 5491 * 5492 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5493 */ 5494 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 5495 bool compound) 5496 { 5497 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; 5498 5499 if (mem_cgroup_disabled()) 5500 return; 5501 /* 5502 * Swap faults will attempt to charge the same page multiple 5503 * times. But reuse_swap_page() might have removed the page 5504 * from swapcache already, so we can't check PageSwapCache(). 5505 */ 5506 if (!memcg) 5507 return; 5508 5509 cancel_charge(memcg, nr_pages); 5510 } 5511 5512 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5513 unsigned long nr_anon, unsigned long nr_file, 5514 unsigned long nr_kmem, unsigned long nr_huge, 5515 unsigned long nr_shmem, struct page *dummy_page) 5516 { 5517 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5518 unsigned long flags; 5519 5520 if (!mem_cgroup_is_root(memcg)) { 5521 page_counter_uncharge(&memcg->memory, nr_pages); 5522 if (do_memsw_account()) 5523 page_counter_uncharge(&memcg->memsw, nr_pages); 5524 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) 5525 page_counter_uncharge(&memcg->kmem, nr_kmem); 5526 memcg_oom_recover(memcg); 5527 } 5528 5529 local_irq_save(flags); 5530 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon); 5531 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file); 5532 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge); 5533 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem); 5534 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); 5535 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5536 memcg_check_events(memcg, dummy_page); 5537 local_irq_restore(flags); 5538 5539 if (!mem_cgroup_is_root(memcg)) 5540 css_put_many(&memcg->css, nr_pages); 5541 } 5542 5543 static void uncharge_list(struct list_head *page_list) 5544 { 5545 struct mem_cgroup *memcg = NULL; 5546 unsigned long nr_shmem = 0; 5547 unsigned long nr_anon = 0; 5548 unsigned long nr_file = 0; 5549 unsigned long nr_huge = 0; 5550 unsigned long nr_kmem = 0; 5551 unsigned long pgpgout = 0; 5552 struct list_head *next; 5553 struct page *page; 5554 5555 /* 5556 * Note that the list can be a single page->lru; hence the 5557 * do-while loop instead of a simple list_for_each_entry(). 5558 */ 5559 next = page_list->next; 5560 do { 5561 page = list_entry(next, struct page, lru); 5562 next = page->lru.next; 5563 5564 VM_BUG_ON_PAGE(PageLRU(page), page); 5565 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page); 5566 5567 if (!page->mem_cgroup) 5568 continue; 5569 5570 /* 5571 * Nobody should be changing or seriously looking at 5572 * page->mem_cgroup at this point, we have fully 5573 * exclusive access to the page. 5574 */ 5575 5576 if (memcg != page->mem_cgroup) { 5577 if (memcg) { 5578 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5579 nr_kmem, nr_huge, nr_shmem, page); 5580 pgpgout = nr_anon = nr_file = nr_kmem = 0; 5581 nr_huge = nr_shmem = 0; 5582 } 5583 memcg = page->mem_cgroup; 5584 } 5585 5586 if (!PageKmemcg(page)) { 5587 unsigned int nr_pages = 1; 5588 5589 if (PageTransHuge(page)) { 5590 nr_pages <<= compound_order(page); 5591 nr_huge += nr_pages; 5592 } 5593 if (PageAnon(page)) 5594 nr_anon += nr_pages; 5595 else { 5596 nr_file += nr_pages; 5597 if (PageSwapBacked(page)) 5598 nr_shmem += nr_pages; 5599 } 5600 pgpgout++; 5601 } else { 5602 nr_kmem += 1 << compound_order(page); 5603 __ClearPageKmemcg(page); 5604 } 5605 5606 page->mem_cgroup = NULL; 5607 } while (next != page_list); 5608 5609 if (memcg) 5610 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5611 nr_kmem, nr_huge, nr_shmem, page); 5612 } 5613 5614 /** 5615 * mem_cgroup_uncharge - uncharge a page 5616 * @page: page to uncharge 5617 * 5618 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5619 * mem_cgroup_commit_charge(). 5620 */ 5621 void mem_cgroup_uncharge(struct page *page) 5622 { 5623 if (mem_cgroup_disabled()) 5624 return; 5625 5626 /* Don't touch page->lru of any random page, pre-check: */ 5627 if (!page->mem_cgroup) 5628 return; 5629 5630 INIT_LIST_HEAD(&page->lru); 5631 uncharge_list(&page->lru); 5632 } 5633 5634 /** 5635 * mem_cgroup_uncharge_list - uncharge a list of page 5636 * @page_list: list of pages to uncharge 5637 * 5638 * Uncharge a list of pages previously charged with 5639 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5640 */ 5641 void mem_cgroup_uncharge_list(struct list_head *page_list) 5642 { 5643 if (mem_cgroup_disabled()) 5644 return; 5645 5646 if (!list_empty(page_list)) 5647 uncharge_list(page_list); 5648 } 5649 5650 /** 5651 * mem_cgroup_migrate - charge a page's replacement 5652 * @oldpage: currently circulating page 5653 * @newpage: replacement page 5654 * 5655 * Charge @newpage as a replacement page for @oldpage. @oldpage will 5656 * be uncharged upon free. 5657 * 5658 * Both pages must be locked, @newpage->mapping must be set up. 5659 */ 5660 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 5661 { 5662 struct mem_cgroup *memcg; 5663 unsigned int nr_pages; 5664 bool compound; 5665 unsigned long flags; 5666 5667 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5668 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5669 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5670 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5671 newpage); 5672 5673 if (mem_cgroup_disabled()) 5674 return; 5675 5676 /* Page cache replacement: new page already charged? */ 5677 if (newpage->mem_cgroup) 5678 return; 5679 5680 /* Swapcache readahead pages can get replaced before being charged */ 5681 memcg = oldpage->mem_cgroup; 5682 if (!memcg) 5683 return; 5684 5685 /* Force-charge the new page. The old one will be freed soon */ 5686 compound = PageTransHuge(newpage); 5687 nr_pages = compound ? hpage_nr_pages(newpage) : 1; 5688 5689 page_counter_charge(&memcg->memory, nr_pages); 5690 if (do_memsw_account()) 5691 page_counter_charge(&memcg->memsw, nr_pages); 5692 css_get_many(&memcg->css, nr_pages); 5693 5694 commit_charge(newpage, memcg, false); 5695 5696 local_irq_save(flags); 5697 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5698 memcg_check_events(memcg, newpage); 5699 local_irq_restore(flags); 5700 } 5701 5702 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5703 EXPORT_SYMBOL(memcg_sockets_enabled_key); 5704 5705 void mem_cgroup_sk_alloc(struct sock *sk) 5706 { 5707 struct mem_cgroup *memcg; 5708 5709 if (!mem_cgroup_sockets_enabled) 5710 return; 5711 5712 /* 5713 * Socket cloning can throw us here with sk_memcg already 5714 * filled. It won't however, necessarily happen from 5715 * process context. So the test for root memcg given 5716 * the current task's memcg won't help us in this case. 5717 * 5718 * Respecting the original socket's memcg is a better 5719 * decision in this case. 5720 */ 5721 if (sk->sk_memcg) { 5722 BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); 5723 css_get(&sk->sk_memcg->css); 5724 return; 5725 } 5726 5727 rcu_read_lock(); 5728 memcg = mem_cgroup_from_task(current); 5729 if (memcg == root_mem_cgroup) 5730 goto out; 5731 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 5732 goto out; 5733 if (css_tryget_online(&memcg->css)) 5734 sk->sk_memcg = memcg; 5735 out: 5736 rcu_read_unlock(); 5737 } 5738 5739 void mem_cgroup_sk_free(struct sock *sk) 5740 { 5741 if (sk->sk_memcg) 5742 css_put(&sk->sk_memcg->css); 5743 } 5744 5745 /** 5746 * mem_cgroup_charge_skmem - charge socket memory 5747 * @memcg: memcg to charge 5748 * @nr_pages: number of pages to charge 5749 * 5750 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 5751 * @memcg's configured limit, %false if the charge had to be forced. 5752 */ 5753 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5754 { 5755 gfp_t gfp_mask = GFP_KERNEL; 5756 5757 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5758 struct page_counter *fail; 5759 5760 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 5761 memcg->tcpmem_pressure = 0; 5762 return true; 5763 } 5764 page_counter_charge(&memcg->tcpmem, nr_pages); 5765 memcg->tcpmem_pressure = 1; 5766 return false; 5767 } 5768 5769 /* Don't block in the packet receive path */ 5770 if (in_softirq()) 5771 gfp_mask = GFP_NOWAIT; 5772 5773 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages); 5774 5775 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 5776 return true; 5777 5778 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 5779 return false; 5780 } 5781 5782 /** 5783 * mem_cgroup_uncharge_skmem - uncharge socket memory 5784 * @memcg - memcg to uncharge 5785 * @nr_pages - number of pages to uncharge 5786 */ 5787 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 5788 { 5789 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 5790 page_counter_uncharge(&memcg->tcpmem, nr_pages); 5791 return; 5792 } 5793 5794 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages); 5795 5796 page_counter_uncharge(&memcg->memory, nr_pages); 5797 css_put_many(&memcg->css, nr_pages); 5798 } 5799 5800 static int __init cgroup_memory(char *s) 5801 { 5802 char *token; 5803 5804 while ((token = strsep(&s, ",")) != NULL) { 5805 if (!*token) 5806 continue; 5807 if (!strcmp(token, "nosocket")) 5808 cgroup_memory_nosocket = true; 5809 if (!strcmp(token, "nokmem")) 5810 cgroup_memory_nokmem = true; 5811 } 5812 return 0; 5813 } 5814 __setup("cgroup.memory=", cgroup_memory); 5815 5816 /* 5817 * subsys_initcall() for memory controller. 5818 * 5819 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 5820 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 5821 * basically everything that doesn't depend on a specific mem_cgroup structure 5822 * should be initialized from here. 5823 */ 5824 static int __init mem_cgroup_init(void) 5825 { 5826 int cpu, node; 5827 5828 #ifndef CONFIG_SLOB 5829 /* 5830 * Kmem cache creation is mostly done with the slab_mutex held, 5831 * so use a workqueue with limited concurrency to avoid stalling 5832 * all worker threads in case lots of cgroups are created and 5833 * destroyed simultaneously. 5834 */ 5835 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); 5836 BUG_ON(!memcg_kmem_cache_wq); 5837 #endif 5838 5839 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 5840 memcg_hotplug_cpu_dead); 5841 5842 for_each_possible_cpu(cpu) 5843 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5844 drain_local_stock); 5845 5846 for_each_node(node) { 5847 struct mem_cgroup_tree_per_node *rtpn; 5848 5849 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5850 node_online(node) ? node : NUMA_NO_NODE); 5851 5852 rtpn->rb_root = RB_ROOT; 5853 spin_lock_init(&rtpn->lock); 5854 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5855 } 5856 5857 return 0; 5858 } 5859 subsys_initcall(mem_cgroup_init); 5860 5861 #ifdef CONFIG_MEMCG_SWAP 5862 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 5863 { 5864 while (!atomic_inc_not_zero(&memcg->id.ref)) { 5865 /* 5866 * The root cgroup cannot be destroyed, so it's refcount must 5867 * always be >= 1. 5868 */ 5869 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 5870 VM_BUG_ON(1); 5871 break; 5872 } 5873 memcg = parent_mem_cgroup(memcg); 5874 if (!memcg) 5875 memcg = root_mem_cgroup; 5876 } 5877 return memcg; 5878 } 5879 5880 /** 5881 * mem_cgroup_swapout - transfer a memsw charge to swap 5882 * @page: page whose memsw charge to transfer 5883 * @entry: swap entry to move the charge to 5884 * 5885 * Transfer the memsw charge of @page to @entry. 5886 */ 5887 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5888 { 5889 struct mem_cgroup *memcg, *swap_memcg; 5890 unsigned short oldid; 5891 5892 VM_BUG_ON_PAGE(PageLRU(page), page); 5893 VM_BUG_ON_PAGE(page_count(page), page); 5894 5895 if (!do_memsw_account()) 5896 return; 5897 5898 memcg = page->mem_cgroup; 5899 5900 /* Readahead page, never charged */ 5901 if (!memcg) 5902 return; 5903 5904 /* 5905 * In case the memcg owning these pages has been offlined and doesn't 5906 * have an ID allocated to it anymore, charge the closest online 5907 * ancestor for the swap instead and transfer the memory+swap charge. 5908 */ 5909 swap_memcg = mem_cgroup_id_get_online(memcg); 5910 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 1); 5911 VM_BUG_ON_PAGE(oldid, page); 5912 mem_cgroup_swap_statistics(swap_memcg, 1); 5913 5914 page->mem_cgroup = NULL; 5915 5916 if (!mem_cgroup_is_root(memcg)) 5917 page_counter_uncharge(&memcg->memory, 1); 5918 5919 if (memcg != swap_memcg) { 5920 if (!mem_cgroup_is_root(swap_memcg)) 5921 page_counter_charge(&swap_memcg->memsw, 1); 5922 page_counter_uncharge(&memcg->memsw, 1); 5923 } 5924 5925 /* 5926 * Interrupts should be disabled here because the caller holds the 5927 * mapping->tree_lock lock which is taken with interrupts-off. It is 5928 * important here to have the interrupts disabled because it is the 5929 * only synchronisation we have for udpating the per-CPU variables. 5930 */ 5931 VM_BUG_ON(!irqs_disabled()); 5932 mem_cgroup_charge_statistics(memcg, page, false, -1); 5933 memcg_check_events(memcg, page); 5934 5935 if (!mem_cgroup_is_root(memcg)) 5936 css_put(&memcg->css); 5937 } 5938 5939 /** 5940 * mem_cgroup_try_charge_swap - try charging swap space for a page 5941 * @page: page being added to swap 5942 * @entry: swap entry to charge 5943 * 5944 * Try to charge @page's memcg for the swap space at @entry. 5945 * 5946 * Returns 0 on success, -ENOMEM on failure. 5947 */ 5948 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 5949 { 5950 unsigned int nr_pages = hpage_nr_pages(page); 5951 struct page_counter *counter; 5952 struct mem_cgroup *memcg; 5953 unsigned short oldid; 5954 5955 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account) 5956 return 0; 5957 5958 memcg = page->mem_cgroup; 5959 5960 /* Readahead page, never charged */ 5961 if (!memcg) 5962 return 0; 5963 5964 memcg = mem_cgroup_id_get_online(memcg); 5965 5966 if (!mem_cgroup_is_root(memcg) && 5967 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 5968 mem_cgroup_id_put(memcg); 5969 return -ENOMEM; 5970 } 5971 5972 /* Get references for the tail pages, too */ 5973 if (nr_pages > 1) 5974 mem_cgroup_id_get_many(memcg, nr_pages - 1); 5975 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 5976 VM_BUG_ON_PAGE(oldid, page); 5977 mem_cgroup_swap_statistics(memcg, nr_pages); 5978 5979 return 0; 5980 } 5981 5982 /** 5983 * mem_cgroup_uncharge_swap - uncharge swap space 5984 * @entry: swap entry to uncharge 5985 * @nr_pages: the amount of swap space to uncharge 5986 */ 5987 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 5988 { 5989 struct mem_cgroup *memcg; 5990 unsigned short id; 5991 5992 if (!do_swap_account) 5993 return; 5994 5995 id = swap_cgroup_record(entry, 0, nr_pages); 5996 rcu_read_lock(); 5997 memcg = mem_cgroup_from_id(id); 5998 if (memcg) { 5999 if (!mem_cgroup_is_root(memcg)) { 6000 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6001 page_counter_uncharge(&memcg->swap, nr_pages); 6002 else 6003 page_counter_uncharge(&memcg->memsw, nr_pages); 6004 } 6005 mem_cgroup_swap_statistics(memcg, -nr_pages); 6006 mem_cgroup_id_put_many(memcg, nr_pages); 6007 } 6008 rcu_read_unlock(); 6009 } 6010 6011 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 6012 { 6013 long nr_swap_pages = get_nr_swap_pages(); 6014 6015 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6016 return nr_swap_pages; 6017 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6018 nr_swap_pages = min_t(long, nr_swap_pages, 6019 READ_ONCE(memcg->swap.limit) - 6020 page_counter_read(&memcg->swap)); 6021 return nr_swap_pages; 6022 } 6023 6024 bool mem_cgroup_swap_full(struct page *page) 6025 { 6026 struct mem_cgroup *memcg; 6027 6028 VM_BUG_ON_PAGE(!PageLocked(page), page); 6029 6030 if (vm_swap_full()) 6031 return true; 6032 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6033 return false; 6034 6035 memcg = page->mem_cgroup; 6036 if (!memcg) 6037 return false; 6038 6039 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 6040 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit) 6041 return true; 6042 6043 return false; 6044 } 6045 6046 /* for remember boot option*/ 6047 #ifdef CONFIG_MEMCG_SWAP_ENABLED 6048 static int really_do_swap_account __initdata = 1; 6049 #else 6050 static int really_do_swap_account __initdata; 6051 #endif 6052 6053 static int __init enable_swap_account(char *s) 6054 { 6055 if (!strcmp(s, "1")) 6056 really_do_swap_account = 1; 6057 else if (!strcmp(s, "0")) 6058 really_do_swap_account = 0; 6059 return 1; 6060 } 6061 __setup("swapaccount=", enable_swap_account); 6062 6063 static u64 swap_current_read(struct cgroup_subsys_state *css, 6064 struct cftype *cft) 6065 { 6066 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6067 6068 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 6069 } 6070 6071 static int swap_max_show(struct seq_file *m, void *v) 6072 { 6073 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 6074 unsigned long max = READ_ONCE(memcg->swap.limit); 6075 6076 if (max == PAGE_COUNTER_MAX) 6077 seq_puts(m, "max\n"); 6078 else 6079 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 6080 6081 return 0; 6082 } 6083 6084 static ssize_t swap_max_write(struct kernfs_open_file *of, 6085 char *buf, size_t nbytes, loff_t off) 6086 { 6087 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6088 unsigned long max; 6089 int err; 6090 6091 buf = strstrip(buf); 6092 err = page_counter_memparse(buf, "max", &max); 6093 if (err) 6094 return err; 6095 6096 mutex_lock(&memcg_limit_mutex); 6097 err = page_counter_limit(&memcg->swap, max); 6098 mutex_unlock(&memcg_limit_mutex); 6099 if (err) 6100 return err; 6101 6102 return nbytes; 6103 } 6104 6105 static struct cftype swap_files[] = { 6106 { 6107 .name = "swap.current", 6108 .flags = CFTYPE_NOT_ON_ROOT, 6109 .read_u64 = swap_current_read, 6110 }, 6111 { 6112 .name = "swap.max", 6113 .flags = CFTYPE_NOT_ON_ROOT, 6114 .seq_show = swap_max_show, 6115 .write = swap_max_write, 6116 }, 6117 { } /* terminate */ 6118 }; 6119 6120 static struct cftype memsw_cgroup_files[] = { 6121 { 6122 .name = "memsw.usage_in_bytes", 6123 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 6124 .read_u64 = mem_cgroup_read_u64, 6125 }, 6126 { 6127 .name = "memsw.max_usage_in_bytes", 6128 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 6129 .write = mem_cgroup_reset, 6130 .read_u64 = mem_cgroup_read_u64, 6131 }, 6132 { 6133 .name = "memsw.limit_in_bytes", 6134 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 6135 .write = mem_cgroup_write, 6136 .read_u64 = mem_cgroup_read_u64, 6137 }, 6138 { 6139 .name = "memsw.failcnt", 6140 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 6141 .write = mem_cgroup_reset, 6142 .read_u64 = mem_cgroup_read_u64, 6143 }, 6144 { }, /* terminate */ 6145 }; 6146 6147 static int __init mem_cgroup_swap_init(void) 6148 { 6149 if (!mem_cgroup_disabled() && really_do_swap_account) { 6150 do_swap_account = 1; 6151 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, 6152 swap_files)); 6153 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 6154 memsw_cgroup_files)); 6155 } 6156 return 0; 6157 } 6158 subsys_initcall(mem_cgroup_swap_init); 6159 6160 #endif /* CONFIG_MEMCG_SWAP */ 6161