1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 82 83 /* Socket memory accounting disabled? */ 84 static bool cgroup_memory_nosocket; 85 86 /* Kernel memory accounting disabled? */ 87 bool cgroup_memory_nokmem; 88 89 /* Whether the swap controller is active */ 90 #ifdef CONFIG_MEMCG_SWAP 91 bool cgroup_memory_noswap __read_mostly; 92 #else 93 #define cgroup_memory_noswap 1 94 #endif 95 96 #ifdef CONFIG_CGROUP_WRITEBACK 97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 98 #endif 99 100 /* Whether legacy memory+swap accounting is active */ 101 static bool do_memsw_account(void) 102 { 103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 104 } 105 106 #define THRESHOLDS_EVENTS_TARGET 128 107 #define SOFTLIMIT_EVENTS_TARGET 1024 108 109 /* 110 * Cgroups above their limits are maintained in a RB-Tree, independent of 111 * their hierarchy representation 112 */ 113 114 struct mem_cgroup_tree_per_node { 115 struct rb_root rb_root; 116 struct rb_node *rb_rightmost; 117 spinlock_t lock; 118 }; 119 120 struct mem_cgroup_tree { 121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 122 }; 123 124 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 125 126 /* for OOM */ 127 struct mem_cgroup_eventfd_list { 128 struct list_head list; 129 struct eventfd_ctx *eventfd; 130 }; 131 132 /* 133 * cgroup_event represents events which userspace want to receive. 134 */ 135 struct mem_cgroup_event { 136 /* 137 * memcg which the event belongs to. 138 */ 139 struct mem_cgroup *memcg; 140 /* 141 * eventfd to signal userspace about the event. 142 */ 143 struct eventfd_ctx *eventfd; 144 /* 145 * Each of these stored in a list by the cgroup. 146 */ 147 struct list_head list; 148 /* 149 * register_event() callback will be used to add new userspace 150 * waiter for changes related to this event. Use eventfd_signal() 151 * on eventfd to send notification to userspace. 152 */ 153 int (*register_event)(struct mem_cgroup *memcg, 154 struct eventfd_ctx *eventfd, const char *args); 155 /* 156 * unregister_event() callback will be called when userspace closes 157 * the eventfd or on cgroup removing. This callback must be set, 158 * if you want provide notification functionality. 159 */ 160 void (*unregister_event)(struct mem_cgroup *memcg, 161 struct eventfd_ctx *eventfd); 162 /* 163 * All fields below needed to unregister event when 164 * userspace closes eventfd. 165 */ 166 poll_table pt; 167 wait_queue_head_t *wqh; 168 wait_queue_entry_t wait; 169 struct work_struct remove; 170 }; 171 172 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 174 175 /* Stuffs for move charges at task migration. */ 176 /* 177 * Types of charges to be moved. 178 */ 179 #define MOVE_ANON 0x1U 180 #define MOVE_FILE 0x2U 181 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 182 183 /* "mc" and its members are protected by cgroup_mutex */ 184 static struct move_charge_struct { 185 spinlock_t lock; /* for from, to */ 186 struct mm_struct *mm; 187 struct mem_cgroup *from; 188 struct mem_cgroup *to; 189 unsigned long flags; 190 unsigned long precharge; 191 unsigned long moved_charge; 192 unsigned long moved_swap; 193 struct task_struct *moving_task; /* a task moving charges */ 194 wait_queue_head_t waitq; /* a waitq for other context */ 195 } mc = { 196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 198 }; 199 200 /* 201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 202 * limit reclaim to prevent infinite loops, if they ever occur. 203 */ 204 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 205 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 206 207 /* for encoding cft->private value on file */ 208 enum res_type { 209 _MEM, 210 _MEMSWAP, 211 _OOM_TYPE, 212 _KMEM, 213 _TCP, 214 }; 215 216 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 217 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 218 #define MEMFILE_ATTR(val) ((val) & 0xffff) 219 /* Used for OOM notifier */ 220 #define OOM_CONTROL (0) 221 222 /* 223 * Iteration constructs for visiting all cgroups (under a tree). If 224 * loops are exited prematurely (break), mem_cgroup_iter_break() must 225 * be used for reference counting. 226 */ 227 #define for_each_mem_cgroup_tree(iter, root) \ 228 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 229 iter != NULL; \ 230 iter = mem_cgroup_iter(root, iter, NULL)) 231 232 #define for_each_mem_cgroup(iter) \ 233 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 234 iter != NULL; \ 235 iter = mem_cgroup_iter(NULL, iter, NULL)) 236 237 static inline bool should_force_charge(void) 238 { 239 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 240 (current->flags & PF_EXITING); 241 } 242 243 /* Some nice accessors for the vmpressure. */ 244 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 245 { 246 if (!memcg) 247 memcg = root_mem_cgroup; 248 return &memcg->vmpressure; 249 } 250 251 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 252 { 253 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 254 } 255 256 #ifdef CONFIG_MEMCG_KMEM 257 extern spinlock_t css_set_lock; 258 259 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 260 unsigned int nr_pages); 261 262 static void obj_cgroup_release(struct percpu_ref *ref) 263 { 264 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 265 unsigned int nr_bytes; 266 unsigned int nr_pages; 267 unsigned long flags; 268 269 /* 270 * At this point all allocated objects are freed, and 271 * objcg->nr_charged_bytes can't have an arbitrary byte value. 272 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 273 * 274 * The following sequence can lead to it: 275 * 1) CPU0: objcg == stock->cached_objcg 276 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 277 * PAGE_SIZE bytes are charged 278 * 3) CPU1: a process from another memcg is allocating something, 279 * the stock if flushed, 280 * objcg->nr_charged_bytes = PAGE_SIZE - 92 281 * 5) CPU0: we do release this object, 282 * 92 bytes are added to stock->nr_bytes 283 * 6) CPU0: stock is flushed, 284 * 92 bytes are added to objcg->nr_charged_bytes 285 * 286 * In the result, nr_charged_bytes == PAGE_SIZE. 287 * This page will be uncharged in obj_cgroup_release(). 288 */ 289 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 290 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 291 nr_pages = nr_bytes >> PAGE_SHIFT; 292 293 if (nr_pages) 294 obj_cgroup_uncharge_pages(objcg, nr_pages); 295 296 spin_lock_irqsave(&css_set_lock, flags); 297 list_del(&objcg->list); 298 spin_unlock_irqrestore(&css_set_lock, flags); 299 300 percpu_ref_exit(ref); 301 kfree_rcu(objcg, rcu); 302 } 303 304 static struct obj_cgroup *obj_cgroup_alloc(void) 305 { 306 struct obj_cgroup *objcg; 307 int ret; 308 309 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 310 if (!objcg) 311 return NULL; 312 313 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 314 GFP_KERNEL); 315 if (ret) { 316 kfree(objcg); 317 return NULL; 318 } 319 INIT_LIST_HEAD(&objcg->list); 320 return objcg; 321 } 322 323 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 324 struct mem_cgroup *parent) 325 { 326 struct obj_cgroup *objcg, *iter; 327 328 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 329 330 spin_lock_irq(&css_set_lock); 331 332 /* 1) Ready to reparent active objcg. */ 333 list_add(&objcg->list, &memcg->objcg_list); 334 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 335 list_for_each_entry(iter, &memcg->objcg_list, list) 336 WRITE_ONCE(iter->memcg, parent); 337 /* 3) Move already reparented objcgs to the parent's list */ 338 list_splice(&memcg->objcg_list, &parent->objcg_list); 339 340 spin_unlock_irq(&css_set_lock); 341 342 percpu_ref_kill(&objcg->refcnt); 343 } 344 345 /* 346 * This will be used as a shrinker list's index. 347 * The main reason for not using cgroup id for this: 348 * this works better in sparse environments, where we have a lot of memcgs, 349 * but only a few kmem-limited. Or also, if we have, for instance, 200 350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 351 * 200 entry array for that. 352 * 353 * The current size of the caches array is stored in memcg_nr_cache_ids. It 354 * will double each time we have to increase it. 355 */ 356 static DEFINE_IDA(memcg_cache_ida); 357 int memcg_nr_cache_ids; 358 359 /* Protects memcg_nr_cache_ids */ 360 static DECLARE_RWSEM(memcg_cache_ids_sem); 361 362 void memcg_get_cache_ids(void) 363 { 364 down_read(&memcg_cache_ids_sem); 365 } 366 367 void memcg_put_cache_ids(void) 368 { 369 up_read(&memcg_cache_ids_sem); 370 } 371 372 /* 373 * MIN_SIZE is different than 1, because we would like to avoid going through 374 * the alloc/free process all the time. In a small machine, 4 kmem-limited 375 * cgroups is a reasonable guess. In the future, it could be a parameter or 376 * tunable, but that is strictly not necessary. 377 * 378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 379 * this constant directly from cgroup, but it is understandable that this is 380 * better kept as an internal representation in cgroup.c. In any case, the 381 * cgrp_id space is not getting any smaller, and we don't have to necessarily 382 * increase ours as well if it increases. 383 */ 384 #define MEMCG_CACHES_MIN_SIZE 4 385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 386 387 /* 388 * A lot of the calls to the cache allocation functions are expected to be 389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 390 * conditional to this static branch, we'll have to allow modules that does 391 * kmem_cache_alloc and the such to see this symbol as well 392 */ 393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 394 EXPORT_SYMBOL(memcg_kmem_enabled_key); 395 #endif 396 397 /** 398 * mem_cgroup_css_from_page - css of the memcg associated with a page 399 * @page: page of interest 400 * 401 * If memcg is bound to the default hierarchy, css of the memcg associated 402 * with @page is returned. The returned css remains associated with @page 403 * until it is released. 404 * 405 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 406 * is returned. 407 */ 408 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 409 { 410 struct mem_cgroup *memcg; 411 412 memcg = page_memcg(page); 413 414 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 415 memcg = root_mem_cgroup; 416 417 return &memcg->css; 418 } 419 420 /** 421 * page_cgroup_ino - return inode number of the memcg a page is charged to 422 * @page: the page 423 * 424 * Look up the closest online ancestor of the memory cgroup @page is charged to 425 * and return its inode number or 0 if @page is not charged to any cgroup. It 426 * is safe to call this function without holding a reference to @page. 427 * 428 * Note, this function is inherently racy, because there is nothing to prevent 429 * the cgroup inode from getting torn down and potentially reallocated a moment 430 * after page_cgroup_ino() returns, so it only should be used by callers that 431 * do not care (such as procfs interfaces). 432 */ 433 ino_t page_cgroup_ino(struct page *page) 434 { 435 struct mem_cgroup *memcg; 436 unsigned long ino = 0; 437 438 rcu_read_lock(); 439 memcg = page_memcg_check(page); 440 441 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 442 memcg = parent_mem_cgroup(memcg); 443 if (memcg) 444 ino = cgroup_ino(memcg->css.cgroup); 445 rcu_read_unlock(); 446 return ino; 447 } 448 449 static struct mem_cgroup_per_node * 450 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 451 { 452 int nid = page_to_nid(page); 453 454 return memcg->nodeinfo[nid]; 455 } 456 457 static struct mem_cgroup_tree_per_node * 458 soft_limit_tree_node(int nid) 459 { 460 return soft_limit_tree.rb_tree_per_node[nid]; 461 } 462 463 static struct mem_cgroup_tree_per_node * 464 soft_limit_tree_from_page(struct page *page) 465 { 466 int nid = page_to_nid(page); 467 468 return soft_limit_tree.rb_tree_per_node[nid]; 469 } 470 471 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 472 struct mem_cgroup_tree_per_node *mctz, 473 unsigned long new_usage_in_excess) 474 { 475 struct rb_node **p = &mctz->rb_root.rb_node; 476 struct rb_node *parent = NULL; 477 struct mem_cgroup_per_node *mz_node; 478 bool rightmost = true; 479 480 if (mz->on_tree) 481 return; 482 483 mz->usage_in_excess = new_usage_in_excess; 484 if (!mz->usage_in_excess) 485 return; 486 while (*p) { 487 parent = *p; 488 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 489 tree_node); 490 if (mz->usage_in_excess < mz_node->usage_in_excess) { 491 p = &(*p)->rb_left; 492 rightmost = false; 493 } else { 494 p = &(*p)->rb_right; 495 } 496 } 497 498 if (rightmost) 499 mctz->rb_rightmost = &mz->tree_node; 500 501 rb_link_node(&mz->tree_node, parent, p); 502 rb_insert_color(&mz->tree_node, &mctz->rb_root); 503 mz->on_tree = true; 504 } 505 506 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 507 struct mem_cgroup_tree_per_node *mctz) 508 { 509 if (!mz->on_tree) 510 return; 511 512 if (&mz->tree_node == mctz->rb_rightmost) 513 mctz->rb_rightmost = rb_prev(&mz->tree_node); 514 515 rb_erase(&mz->tree_node, &mctz->rb_root); 516 mz->on_tree = false; 517 } 518 519 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 520 struct mem_cgroup_tree_per_node *mctz) 521 { 522 unsigned long flags; 523 524 spin_lock_irqsave(&mctz->lock, flags); 525 __mem_cgroup_remove_exceeded(mz, mctz); 526 spin_unlock_irqrestore(&mctz->lock, flags); 527 } 528 529 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 530 { 531 unsigned long nr_pages = page_counter_read(&memcg->memory); 532 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 533 unsigned long excess = 0; 534 535 if (nr_pages > soft_limit) 536 excess = nr_pages - soft_limit; 537 538 return excess; 539 } 540 541 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 542 { 543 unsigned long excess; 544 struct mem_cgroup_per_node *mz; 545 struct mem_cgroup_tree_per_node *mctz; 546 547 mctz = soft_limit_tree_from_page(page); 548 if (!mctz) 549 return; 550 /* 551 * Necessary to update all ancestors when hierarchy is used. 552 * because their event counter is not touched. 553 */ 554 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 555 mz = mem_cgroup_page_nodeinfo(memcg, page); 556 excess = soft_limit_excess(memcg); 557 /* 558 * We have to update the tree if mz is on RB-tree or 559 * mem is over its softlimit. 560 */ 561 if (excess || mz->on_tree) { 562 unsigned long flags; 563 564 spin_lock_irqsave(&mctz->lock, flags); 565 /* if on-tree, remove it */ 566 if (mz->on_tree) 567 __mem_cgroup_remove_exceeded(mz, mctz); 568 /* 569 * Insert again. mz->usage_in_excess will be updated. 570 * If excess is 0, no tree ops. 571 */ 572 __mem_cgroup_insert_exceeded(mz, mctz, excess); 573 spin_unlock_irqrestore(&mctz->lock, flags); 574 } 575 } 576 } 577 578 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 579 { 580 struct mem_cgroup_tree_per_node *mctz; 581 struct mem_cgroup_per_node *mz; 582 int nid; 583 584 for_each_node(nid) { 585 mz = memcg->nodeinfo[nid]; 586 mctz = soft_limit_tree_node(nid); 587 if (mctz) 588 mem_cgroup_remove_exceeded(mz, mctz); 589 } 590 } 591 592 static struct mem_cgroup_per_node * 593 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 594 { 595 struct mem_cgroup_per_node *mz; 596 597 retry: 598 mz = NULL; 599 if (!mctz->rb_rightmost) 600 goto done; /* Nothing to reclaim from */ 601 602 mz = rb_entry(mctz->rb_rightmost, 603 struct mem_cgroup_per_node, tree_node); 604 /* 605 * Remove the node now but someone else can add it back, 606 * we will to add it back at the end of reclaim to its correct 607 * position in the tree. 608 */ 609 __mem_cgroup_remove_exceeded(mz, mctz); 610 if (!soft_limit_excess(mz->memcg) || 611 !css_tryget(&mz->memcg->css)) 612 goto retry; 613 done: 614 return mz; 615 } 616 617 static struct mem_cgroup_per_node * 618 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 619 { 620 struct mem_cgroup_per_node *mz; 621 622 spin_lock_irq(&mctz->lock); 623 mz = __mem_cgroup_largest_soft_limit_node(mctz); 624 spin_unlock_irq(&mctz->lock); 625 return mz; 626 } 627 628 /** 629 * __mod_memcg_state - update cgroup memory statistics 630 * @memcg: the memory cgroup 631 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 632 * @val: delta to add to the counter, can be negative 633 */ 634 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 635 { 636 if (mem_cgroup_disabled()) 637 return; 638 639 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 640 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 641 } 642 643 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 644 static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 645 { 646 long x = READ_ONCE(memcg->vmstats.state[idx]); 647 #ifdef CONFIG_SMP 648 if (x < 0) 649 x = 0; 650 #endif 651 return x; 652 } 653 654 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 655 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 656 { 657 long x = 0; 658 int cpu; 659 660 for_each_possible_cpu(cpu) 661 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 662 #ifdef CONFIG_SMP 663 if (x < 0) 664 x = 0; 665 #endif 666 return x; 667 } 668 669 static struct mem_cgroup_per_node * 670 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 671 { 672 struct mem_cgroup *parent; 673 674 parent = parent_mem_cgroup(pn->memcg); 675 if (!parent) 676 return NULL; 677 return parent->nodeinfo[nid]; 678 } 679 680 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 681 int val) 682 { 683 struct mem_cgroup_per_node *pn; 684 struct mem_cgroup *memcg; 685 long x, threshold = MEMCG_CHARGE_BATCH; 686 687 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 688 memcg = pn->memcg; 689 690 /* Update memcg */ 691 __mod_memcg_state(memcg, idx, val); 692 693 /* Update lruvec */ 694 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 695 696 if (vmstat_item_in_bytes(idx)) 697 threshold <<= PAGE_SHIFT; 698 699 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 700 if (unlikely(abs(x) > threshold)) { 701 pg_data_t *pgdat = lruvec_pgdat(lruvec); 702 struct mem_cgroup_per_node *pi; 703 704 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 705 atomic_long_add(x, &pi->lruvec_stat[idx]); 706 x = 0; 707 } 708 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 709 } 710 711 /** 712 * __mod_lruvec_state - update lruvec memory statistics 713 * @lruvec: the lruvec 714 * @idx: the stat item 715 * @val: delta to add to the counter, can be negative 716 * 717 * The lruvec is the intersection of the NUMA node and a cgroup. This 718 * function updates the all three counters that are affected by a 719 * change of state at this level: per-node, per-cgroup, per-lruvec. 720 */ 721 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 722 int val) 723 { 724 /* Update node */ 725 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 726 727 /* Update memcg and lruvec */ 728 if (!mem_cgroup_disabled()) 729 __mod_memcg_lruvec_state(lruvec, idx, val); 730 } 731 732 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 733 int val) 734 { 735 struct page *head = compound_head(page); /* rmap on tail pages */ 736 struct mem_cgroup *memcg; 737 pg_data_t *pgdat = page_pgdat(page); 738 struct lruvec *lruvec; 739 740 rcu_read_lock(); 741 memcg = page_memcg(head); 742 /* Untracked pages have no memcg, no lruvec. Update only the node */ 743 if (!memcg) { 744 rcu_read_unlock(); 745 __mod_node_page_state(pgdat, idx, val); 746 return; 747 } 748 749 lruvec = mem_cgroup_lruvec(memcg, pgdat); 750 __mod_lruvec_state(lruvec, idx, val); 751 rcu_read_unlock(); 752 } 753 EXPORT_SYMBOL(__mod_lruvec_page_state); 754 755 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 756 { 757 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 758 struct mem_cgroup *memcg; 759 struct lruvec *lruvec; 760 761 rcu_read_lock(); 762 memcg = mem_cgroup_from_obj(p); 763 764 /* 765 * Untracked pages have no memcg, no lruvec. Update only the 766 * node. If we reparent the slab objects to the root memcg, 767 * when we free the slab object, we need to update the per-memcg 768 * vmstats to keep it correct for the root memcg. 769 */ 770 if (!memcg) { 771 __mod_node_page_state(pgdat, idx, val); 772 } else { 773 lruvec = mem_cgroup_lruvec(memcg, pgdat); 774 __mod_lruvec_state(lruvec, idx, val); 775 } 776 rcu_read_unlock(); 777 } 778 779 /* 780 * mod_objcg_mlstate() may be called with irq enabled, so 781 * mod_memcg_lruvec_state() should be used. 782 */ 783 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 784 struct pglist_data *pgdat, 785 enum node_stat_item idx, int nr) 786 { 787 struct mem_cgroup *memcg; 788 struct lruvec *lruvec; 789 790 rcu_read_lock(); 791 memcg = obj_cgroup_memcg(objcg); 792 lruvec = mem_cgroup_lruvec(memcg, pgdat); 793 mod_memcg_lruvec_state(lruvec, idx, nr); 794 rcu_read_unlock(); 795 } 796 797 /** 798 * __count_memcg_events - account VM events in a cgroup 799 * @memcg: the memory cgroup 800 * @idx: the event item 801 * @count: the number of events that occurred 802 */ 803 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 804 unsigned long count) 805 { 806 if (mem_cgroup_disabled()) 807 return; 808 809 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); 810 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 811 } 812 813 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 814 { 815 return READ_ONCE(memcg->vmstats.events[event]); 816 } 817 818 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 819 { 820 long x = 0; 821 int cpu; 822 823 for_each_possible_cpu(cpu) 824 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); 825 return x; 826 } 827 828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 829 struct page *page, 830 int nr_pages) 831 { 832 /* pagein of a big page is an event. So, ignore page size */ 833 if (nr_pages > 0) 834 __count_memcg_events(memcg, PGPGIN, 1); 835 else { 836 __count_memcg_events(memcg, PGPGOUT, 1); 837 nr_pages = -nr_pages; /* for event */ 838 } 839 840 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 841 } 842 843 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 844 enum mem_cgroup_events_target target) 845 { 846 unsigned long val, next; 847 848 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 849 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 850 /* from time_after() in jiffies.h */ 851 if ((long)(next - val) < 0) { 852 switch (target) { 853 case MEM_CGROUP_TARGET_THRESH: 854 next = val + THRESHOLDS_EVENTS_TARGET; 855 break; 856 case MEM_CGROUP_TARGET_SOFTLIMIT: 857 next = val + SOFTLIMIT_EVENTS_TARGET; 858 break; 859 default: 860 break; 861 } 862 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 863 return true; 864 } 865 return false; 866 } 867 868 /* 869 * Check events in order. 870 * 871 */ 872 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 873 { 874 /* threshold event is triggered in finer grain than soft limit */ 875 if (unlikely(mem_cgroup_event_ratelimit(memcg, 876 MEM_CGROUP_TARGET_THRESH))) { 877 bool do_softlimit; 878 879 do_softlimit = mem_cgroup_event_ratelimit(memcg, 880 MEM_CGROUP_TARGET_SOFTLIMIT); 881 mem_cgroup_threshold(memcg); 882 if (unlikely(do_softlimit)) 883 mem_cgroup_update_tree(memcg, page); 884 } 885 } 886 887 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 888 { 889 /* 890 * mm_update_next_owner() may clear mm->owner to NULL 891 * if it races with swapoff, page migration, etc. 892 * So this can be called with p == NULL. 893 */ 894 if (unlikely(!p)) 895 return NULL; 896 897 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 898 } 899 EXPORT_SYMBOL(mem_cgroup_from_task); 900 901 static __always_inline struct mem_cgroup *active_memcg(void) 902 { 903 if (in_interrupt()) 904 return this_cpu_read(int_active_memcg); 905 else 906 return current->active_memcg; 907 } 908 909 /** 910 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 911 * @mm: mm from which memcg should be extracted. It can be NULL. 912 * 913 * Obtain a reference on mm->memcg and returns it if successful. If mm 914 * is NULL, then the memcg is chosen as follows: 915 * 1) The active memcg, if set. 916 * 2) current->mm->memcg, if available 917 * 3) root memcg 918 * If mem_cgroup is disabled, NULL is returned. 919 */ 920 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 921 { 922 struct mem_cgroup *memcg; 923 924 if (mem_cgroup_disabled()) 925 return NULL; 926 927 /* 928 * Page cache insertions can happen without an 929 * actual mm context, e.g. during disk probing 930 * on boot, loopback IO, acct() writes etc. 931 * 932 * No need to css_get on root memcg as the reference 933 * counting is disabled on the root level in the 934 * cgroup core. See CSS_NO_REF. 935 */ 936 if (unlikely(!mm)) { 937 memcg = active_memcg(); 938 if (unlikely(memcg)) { 939 /* remote memcg must hold a ref */ 940 css_get(&memcg->css); 941 return memcg; 942 } 943 mm = current->mm; 944 if (unlikely(!mm)) 945 return root_mem_cgroup; 946 } 947 948 rcu_read_lock(); 949 do { 950 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 951 if (unlikely(!memcg)) 952 memcg = root_mem_cgroup; 953 } while (!css_tryget(&memcg->css)); 954 rcu_read_unlock(); 955 return memcg; 956 } 957 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 958 959 static __always_inline bool memcg_kmem_bypass(void) 960 { 961 /* Allow remote memcg charging from any context. */ 962 if (unlikely(active_memcg())) 963 return false; 964 965 /* Memcg to charge can't be determined. */ 966 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 967 return true; 968 969 return false; 970 } 971 972 /** 973 * mem_cgroup_iter - iterate over memory cgroup hierarchy 974 * @root: hierarchy root 975 * @prev: previously returned memcg, NULL on first invocation 976 * @reclaim: cookie for shared reclaim walks, NULL for full walks 977 * 978 * Returns references to children of the hierarchy below @root, or 979 * @root itself, or %NULL after a full round-trip. 980 * 981 * Caller must pass the return value in @prev on subsequent 982 * invocations for reference counting, or use mem_cgroup_iter_break() 983 * to cancel a hierarchy walk before the round-trip is complete. 984 * 985 * Reclaimers can specify a node in @reclaim to divide up the memcgs 986 * in the hierarchy among all concurrent reclaimers operating on the 987 * same node. 988 */ 989 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 990 struct mem_cgroup *prev, 991 struct mem_cgroup_reclaim_cookie *reclaim) 992 { 993 struct mem_cgroup_reclaim_iter *iter; 994 struct cgroup_subsys_state *css = NULL; 995 struct mem_cgroup *memcg = NULL; 996 struct mem_cgroup *pos = NULL; 997 998 if (mem_cgroup_disabled()) 999 return NULL; 1000 1001 if (!root) 1002 root = root_mem_cgroup; 1003 1004 if (prev && !reclaim) 1005 pos = prev; 1006 1007 rcu_read_lock(); 1008 1009 if (reclaim) { 1010 struct mem_cgroup_per_node *mz; 1011 1012 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1013 iter = &mz->iter; 1014 1015 if (prev && reclaim->generation != iter->generation) 1016 goto out_unlock; 1017 1018 while (1) { 1019 pos = READ_ONCE(iter->position); 1020 if (!pos || css_tryget(&pos->css)) 1021 break; 1022 /* 1023 * css reference reached zero, so iter->position will 1024 * be cleared by ->css_released. However, we should not 1025 * rely on this happening soon, because ->css_released 1026 * is called from a work queue, and by busy-waiting we 1027 * might block it. So we clear iter->position right 1028 * away. 1029 */ 1030 (void)cmpxchg(&iter->position, pos, NULL); 1031 } 1032 } 1033 1034 if (pos) 1035 css = &pos->css; 1036 1037 for (;;) { 1038 css = css_next_descendant_pre(css, &root->css); 1039 if (!css) { 1040 /* 1041 * Reclaimers share the hierarchy walk, and a 1042 * new one might jump in right at the end of 1043 * the hierarchy - make sure they see at least 1044 * one group and restart from the beginning. 1045 */ 1046 if (!prev) 1047 continue; 1048 break; 1049 } 1050 1051 /* 1052 * Verify the css and acquire a reference. The root 1053 * is provided by the caller, so we know it's alive 1054 * and kicking, and don't take an extra reference. 1055 */ 1056 memcg = mem_cgroup_from_css(css); 1057 1058 if (css == &root->css) 1059 break; 1060 1061 if (css_tryget(css)) 1062 break; 1063 1064 memcg = NULL; 1065 } 1066 1067 if (reclaim) { 1068 /* 1069 * The position could have already been updated by a competing 1070 * thread, so check that the value hasn't changed since we read 1071 * it to avoid reclaiming from the same cgroup twice. 1072 */ 1073 (void)cmpxchg(&iter->position, pos, memcg); 1074 1075 if (pos) 1076 css_put(&pos->css); 1077 1078 if (!memcg) 1079 iter->generation++; 1080 else if (!prev) 1081 reclaim->generation = iter->generation; 1082 } 1083 1084 out_unlock: 1085 rcu_read_unlock(); 1086 if (prev && prev != root) 1087 css_put(&prev->css); 1088 1089 return memcg; 1090 } 1091 1092 /** 1093 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1094 * @root: hierarchy root 1095 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1096 */ 1097 void mem_cgroup_iter_break(struct mem_cgroup *root, 1098 struct mem_cgroup *prev) 1099 { 1100 if (!root) 1101 root = root_mem_cgroup; 1102 if (prev && prev != root) 1103 css_put(&prev->css); 1104 } 1105 1106 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1107 struct mem_cgroup *dead_memcg) 1108 { 1109 struct mem_cgroup_reclaim_iter *iter; 1110 struct mem_cgroup_per_node *mz; 1111 int nid; 1112 1113 for_each_node(nid) { 1114 mz = from->nodeinfo[nid]; 1115 iter = &mz->iter; 1116 cmpxchg(&iter->position, dead_memcg, NULL); 1117 } 1118 } 1119 1120 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1121 { 1122 struct mem_cgroup *memcg = dead_memcg; 1123 struct mem_cgroup *last; 1124 1125 do { 1126 __invalidate_reclaim_iterators(memcg, dead_memcg); 1127 last = memcg; 1128 } while ((memcg = parent_mem_cgroup(memcg))); 1129 1130 /* 1131 * When cgruop1 non-hierarchy mode is used, 1132 * parent_mem_cgroup() does not walk all the way up to the 1133 * cgroup root (root_mem_cgroup). So we have to handle 1134 * dead_memcg from cgroup root separately. 1135 */ 1136 if (last != root_mem_cgroup) 1137 __invalidate_reclaim_iterators(root_mem_cgroup, 1138 dead_memcg); 1139 } 1140 1141 /** 1142 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1143 * @memcg: hierarchy root 1144 * @fn: function to call for each task 1145 * @arg: argument passed to @fn 1146 * 1147 * This function iterates over tasks attached to @memcg or to any of its 1148 * descendants and calls @fn for each task. If @fn returns a non-zero 1149 * value, the function breaks the iteration loop and returns the value. 1150 * Otherwise, it will iterate over all tasks and return 0. 1151 * 1152 * This function must not be called for the root memory cgroup. 1153 */ 1154 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1155 int (*fn)(struct task_struct *, void *), void *arg) 1156 { 1157 struct mem_cgroup *iter; 1158 int ret = 0; 1159 1160 BUG_ON(memcg == root_mem_cgroup); 1161 1162 for_each_mem_cgroup_tree(iter, memcg) { 1163 struct css_task_iter it; 1164 struct task_struct *task; 1165 1166 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1167 while (!ret && (task = css_task_iter_next(&it))) 1168 ret = fn(task, arg); 1169 css_task_iter_end(&it); 1170 if (ret) { 1171 mem_cgroup_iter_break(memcg, iter); 1172 break; 1173 } 1174 } 1175 return ret; 1176 } 1177 1178 #ifdef CONFIG_DEBUG_VM 1179 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) 1180 { 1181 struct mem_cgroup *memcg; 1182 1183 if (mem_cgroup_disabled()) 1184 return; 1185 1186 memcg = page_memcg(page); 1187 1188 if (!memcg) 1189 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); 1190 else 1191 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); 1192 } 1193 #endif 1194 1195 /** 1196 * lock_page_lruvec - lock and return lruvec for a given page. 1197 * @page: the page 1198 * 1199 * These functions are safe to use under any of the following conditions: 1200 * - page locked 1201 * - PageLRU cleared 1202 * - lock_page_memcg() 1203 * - page->_refcount is zero 1204 */ 1205 struct lruvec *lock_page_lruvec(struct page *page) 1206 { 1207 struct lruvec *lruvec; 1208 1209 lruvec = mem_cgroup_page_lruvec(page); 1210 spin_lock(&lruvec->lru_lock); 1211 1212 lruvec_memcg_debug(lruvec, page); 1213 1214 return lruvec; 1215 } 1216 1217 struct lruvec *lock_page_lruvec_irq(struct page *page) 1218 { 1219 struct lruvec *lruvec; 1220 1221 lruvec = mem_cgroup_page_lruvec(page); 1222 spin_lock_irq(&lruvec->lru_lock); 1223 1224 lruvec_memcg_debug(lruvec, page); 1225 1226 return lruvec; 1227 } 1228 1229 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) 1230 { 1231 struct lruvec *lruvec; 1232 1233 lruvec = mem_cgroup_page_lruvec(page); 1234 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1235 1236 lruvec_memcg_debug(lruvec, page); 1237 1238 return lruvec; 1239 } 1240 1241 /** 1242 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1243 * @lruvec: mem_cgroup per zone lru vector 1244 * @lru: index of lru list the page is sitting on 1245 * @zid: zone id of the accounted pages 1246 * @nr_pages: positive when adding or negative when removing 1247 * 1248 * This function must be called under lru_lock, just before a page is added 1249 * to or just after a page is removed from an lru list (that ordering being 1250 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1251 */ 1252 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1253 int zid, int nr_pages) 1254 { 1255 struct mem_cgroup_per_node *mz; 1256 unsigned long *lru_size; 1257 long size; 1258 1259 if (mem_cgroup_disabled()) 1260 return; 1261 1262 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1263 lru_size = &mz->lru_zone_size[zid][lru]; 1264 1265 if (nr_pages < 0) 1266 *lru_size += nr_pages; 1267 1268 size = *lru_size; 1269 if (WARN_ONCE(size < 0, 1270 "%s(%p, %d, %d): lru_size %ld\n", 1271 __func__, lruvec, lru, nr_pages, size)) { 1272 VM_BUG_ON(1); 1273 *lru_size = 0; 1274 } 1275 1276 if (nr_pages > 0) 1277 *lru_size += nr_pages; 1278 } 1279 1280 /** 1281 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1282 * @memcg: the memory cgroup 1283 * 1284 * Returns the maximum amount of memory @mem can be charged with, in 1285 * pages. 1286 */ 1287 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1288 { 1289 unsigned long margin = 0; 1290 unsigned long count; 1291 unsigned long limit; 1292 1293 count = page_counter_read(&memcg->memory); 1294 limit = READ_ONCE(memcg->memory.max); 1295 if (count < limit) 1296 margin = limit - count; 1297 1298 if (do_memsw_account()) { 1299 count = page_counter_read(&memcg->memsw); 1300 limit = READ_ONCE(memcg->memsw.max); 1301 if (count < limit) 1302 margin = min(margin, limit - count); 1303 else 1304 margin = 0; 1305 } 1306 1307 return margin; 1308 } 1309 1310 /* 1311 * A routine for checking "mem" is under move_account() or not. 1312 * 1313 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1314 * moving cgroups. This is for waiting at high-memory pressure 1315 * caused by "move". 1316 */ 1317 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1318 { 1319 struct mem_cgroup *from; 1320 struct mem_cgroup *to; 1321 bool ret = false; 1322 /* 1323 * Unlike task_move routines, we access mc.to, mc.from not under 1324 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1325 */ 1326 spin_lock(&mc.lock); 1327 from = mc.from; 1328 to = mc.to; 1329 if (!from) 1330 goto unlock; 1331 1332 ret = mem_cgroup_is_descendant(from, memcg) || 1333 mem_cgroup_is_descendant(to, memcg); 1334 unlock: 1335 spin_unlock(&mc.lock); 1336 return ret; 1337 } 1338 1339 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1340 { 1341 if (mc.moving_task && current != mc.moving_task) { 1342 if (mem_cgroup_under_move(memcg)) { 1343 DEFINE_WAIT(wait); 1344 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1345 /* moving charge context might have finished. */ 1346 if (mc.moving_task) 1347 schedule(); 1348 finish_wait(&mc.waitq, &wait); 1349 return true; 1350 } 1351 } 1352 return false; 1353 } 1354 1355 struct memory_stat { 1356 const char *name; 1357 unsigned int idx; 1358 }; 1359 1360 static const struct memory_stat memory_stats[] = { 1361 { "anon", NR_ANON_MAPPED }, 1362 { "file", NR_FILE_PAGES }, 1363 { "kernel_stack", NR_KERNEL_STACK_KB }, 1364 { "pagetables", NR_PAGETABLE }, 1365 { "percpu", MEMCG_PERCPU_B }, 1366 { "sock", MEMCG_SOCK }, 1367 { "shmem", NR_SHMEM }, 1368 { "file_mapped", NR_FILE_MAPPED }, 1369 { "file_dirty", NR_FILE_DIRTY }, 1370 { "file_writeback", NR_WRITEBACK }, 1371 #ifdef CONFIG_SWAP 1372 { "swapcached", NR_SWAPCACHE }, 1373 #endif 1374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1375 { "anon_thp", NR_ANON_THPS }, 1376 { "file_thp", NR_FILE_THPS }, 1377 { "shmem_thp", NR_SHMEM_THPS }, 1378 #endif 1379 { "inactive_anon", NR_INACTIVE_ANON }, 1380 { "active_anon", NR_ACTIVE_ANON }, 1381 { "inactive_file", NR_INACTIVE_FILE }, 1382 { "active_file", NR_ACTIVE_FILE }, 1383 { "unevictable", NR_UNEVICTABLE }, 1384 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1385 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1386 1387 /* The memory events */ 1388 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1389 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1390 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1391 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1392 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1393 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1394 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1395 }; 1396 1397 /* Translate stat items to the correct unit for memory.stat output */ 1398 static int memcg_page_state_unit(int item) 1399 { 1400 switch (item) { 1401 case MEMCG_PERCPU_B: 1402 case NR_SLAB_RECLAIMABLE_B: 1403 case NR_SLAB_UNRECLAIMABLE_B: 1404 case WORKINGSET_REFAULT_ANON: 1405 case WORKINGSET_REFAULT_FILE: 1406 case WORKINGSET_ACTIVATE_ANON: 1407 case WORKINGSET_ACTIVATE_FILE: 1408 case WORKINGSET_RESTORE_ANON: 1409 case WORKINGSET_RESTORE_FILE: 1410 case WORKINGSET_NODERECLAIM: 1411 return 1; 1412 case NR_KERNEL_STACK_KB: 1413 return SZ_1K; 1414 default: 1415 return PAGE_SIZE; 1416 } 1417 } 1418 1419 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1420 int item) 1421 { 1422 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1423 } 1424 1425 static char *memory_stat_format(struct mem_cgroup *memcg) 1426 { 1427 struct seq_buf s; 1428 int i; 1429 1430 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1431 if (!s.buffer) 1432 return NULL; 1433 1434 /* 1435 * Provide statistics on the state of the memory subsystem as 1436 * well as cumulative event counters that show past behavior. 1437 * 1438 * This list is ordered following a combination of these gradients: 1439 * 1) generic big picture -> specifics and details 1440 * 2) reflecting userspace activity -> reflecting kernel heuristics 1441 * 1442 * Current memory state: 1443 */ 1444 cgroup_rstat_flush(memcg->css.cgroup); 1445 1446 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1447 u64 size; 1448 1449 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1450 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1451 1452 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1453 size += memcg_page_state_output(memcg, 1454 NR_SLAB_RECLAIMABLE_B); 1455 seq_buf_printf(&s, "slab %llu\n", size); 1456 } 1457 } 1458 1459 /* Accumulated memory events */ 1460 1461 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1462 memcg_events(memcg, PGFAULT)); 1463 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1464 memcg_events(memcg, PGMAJFAULT)); 1465 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1466 memcg_events(memcg, PGREFILL)); 1467 seq_buf_printf(&s, "pgscan %lu\n", 1468 memcg_events(memcg, PGSCAN_KSWAPD) + 1469 memcg_events(memcg, PGSCAN_DIRECT)); 1470 seq_buf_printf(&s, "pgsteal %lu\n", 1471 memcg_events(memcg, PGSTEAL_KSWAPD) + 1472 memcg_events(memcg, PGSTEAL_DIRECT)); 1473 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1474 memcg_events(memcg, PGACTIVATE)); 1475 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1476 memcg_events(memcg, PGDEACTIVATE)); 1477 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1478 memcg_events(memcg, PGLAZYFREE)); 1479 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1480 memcg_events(memcg, PGLAZYFREED)); 1481 1482 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1483 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1484 memcg_events(memcg, THP_FAULT_ALLOC)); 1485 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1486 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1487 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1488 1489 /* The above should easily fit into one page */ 1490 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1491 1492 return s.buffer; 1493 } 1494 1495 #define K(x) ((x) << (PAGE_SHIFT-10)) 1496 /** 1497 * mem_cgroup_print_oom_context: Print OOM information relevant to 1498 * memory controller. 1499 * @memcg: The memory cgroup that went over limit 1500 * @p: Task that is going to be killed 1501 * 1502 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1503 * enabled 1504 */ 1505 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1506 { 1507 rcu_read_lock(); 1508 1509 if (memcg) { 1510 pr_cont(",oom_memcg="); 1511 pr_cont_cgroup_path(memcg->css.cgroup); 1512 } else 1513 pr_cont(",global_oom"); 1514 if (p) { 1515 pr_cont(",task_memcg="); 1516 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1517 } 1518 rcu_read_unlock(); 1519 } 1520 1521 /** 1522 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1523 * memory controller. 1524 * @memcg: The memory cgroup that went over limit 1525 */ 1526 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1527 { 1528 char *buf; 1529 1530 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1531 K((u64)page_counter_read(&memcg->memory)), 1532 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1533 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1534 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1535 K((u64)page_counter_read(&memcg->swap)), 1536 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1537 else { 1538 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1539 K((u64)page_counter_read(&memcg->memsw)), 1540 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1541 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1542 K((u64)page_counter_read(&memcg->kmem)), 1543 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1544 } 1545 1546 pr_info("Memory cgroup stats for "); 1547 pr_cont_cgroup_path(memcg->css.cgroup); 1548 pr_cont(":"); 1549 buf = memory_stat_format(memcg); 1550 if (!buf) 1551 return; 1552 pr_info("%s", buf); 1553 kfree(buf); 1554 } 1555 1556 /* 1557 * Return the memory (and swap, if configured) limit for a memcg. 1558 */ 1559 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1560 { 1561 unsigned long max = READ_ONCE(memcg->memory.max); 1562 1563 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1564 if (mem_cgroup_swappiness(memcg)) 1565 max += min(READ_ONCE(memcg->swap.max), 1566 (unsigned long)total_swap_pages); 1567 } else { /* v1 */ 1568 if (mem_cgroup_swappiness(memcg)) { 1569 /* Calculate swap excess capacity from memsw limit */ 1570 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1571 1572 max += min(swap, (unsigned long)total_swap_pages); 1573 } 1574 } 1575 return max; 1576 } 1577 1578 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1579 { 1580 return page_counter_read(&memcg->memory); 1581 } 1582 1583 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1584 int order) 1585 { 1586 struct oom_control oc = { 1587 .zonelist = NULL, 1588 .nodemask = NULL, 1589 .memcg = memcg, 1590 .gfp_mask = gfp_mask, 1591 .order = order, 1592 }; 1593 bool ret = true; 1594 1595 if (mutex_lock_killable(&oom_lock)) 1596 return true; 1597 1598 if (mem_cgroup_margin(memcg) >= (1 << order)) 1599 goto unlock; 1600 1601 /* 1602 * A few threads which were not waiting at mutex_lock_killable() can 1603 * fail to bail out. Therefore, check again after holding oom_lock. 1604 */ 1605 ret = should_force_charge() || out_of_memory(&oc); 1606 1607 unlock: 1608 mutex_unlock(&oom_lock); 1609 return ret; 1610 } 1611 1612 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1613 pg_data_t *pgdat, 1614 gfp_t gfp_mask, 1615 unsigned long *total_scanned) 1616 { 1617 struct mem_cgroup *victim = NULL; 1618 int total = 0; 1619 int loop = 0; 1620 unsigned long excess; 1621 unsigned long nr_scanned; 1622 struct mem_cgroup_reclaim_cookie reclaim = { 1623 .pgdat = pgdat, 1624 }; 1625 1626 excess = soft_limit_excess(root_memcg); 1627 1628 while (1) { 1629 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1630 if (!victim) { 1631 loop++; 1632 if (loop >= 2) { 1633 /* 1634 * If we have not been able to reclaim 1635 * anything, it might because there are 1636 * no reclaimable pages under this hierarchy 1637 */ 1638 if (!total) 1639 break; 1640 /* 1641 * We want to do more targeted reclaim. 1642 * excess >> 2 is not to excessive so as to 1643 * reclaim too much, nor too less that we keep 1644 * coming back to reclaim from this cgroup 1645 */ 1646 if (total >= (excess >> 2) || 1647 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1648 break; 1649 } 1650 continue; 1651 } 1652 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1653 pgdat, &nr_scanned); 1654 *total_scanned += nr_scanned; 1655 if (!soft_limit_excess(root_memcg)) 1656 break; 1657 } 1658 mem_cgroup_iter_break(root_memcg, victim); 1659 return total; 1660 } 1661 1662 #ifdef CONFIG_LOCKDEP 1663 static struct lockdep_map memcg_oom_lock_dep_map = { 1664 .name = "memcg_oom_lock", 1665 }; 1666 #endif 1667 1668 static DEFINE_SPINLOCK(memcg_oom_lock); 1669 1670 /* 1671 * Check OOM-Killer is already running under our hierarchy. 1672 * If someone is running, return false. 1673 */ 1674 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1675 { 1676 struct mem_cgroup *iter, *failed = NULL; 1677 1678 spin_lock(&memcg_oom_lock); 1679 1680 for_each_mem_cgroup_tree(iter, memcg) { 1681 if (iter->oom_lock) { 1682 /* 1683 * this subtree of our hierarchy is already locked 1684 * so we cannot give a lock. 1685 */ 1686 failed = iter; 1687 mem_cgroup_iter_break(memcg, iter); 1688 break; 1689 } else 1690 iter->oom_lock = true; 1691 } 1692 1693 if (failed) { 1694 /* 1695 * OK, we failed to lock the whole subtree so we have 1696 * to clean up what we set up to the failing subtree 1697 */ 1698 for_each_mem_cgroup_tree(iter, memcg) { 1699 if (iter == failed) { 1700 mem_cgroup_iter_break(memcg, iter); 1701 break; 1702 } 1703 iter->oom_lock = false; 1704 } 1705 } else 1706 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1707 1708 spin_unlock(&memcg_oom_lock); 1709 1710 return !failed; 1711 } 1712 1713 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1714 { 1715 struct mem_cgroup *iter; 1716 1717 spin_lock(&memcg_oom_lock); 1718 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1719 for_each_mem_cgroup_tree(iter, memcg) 1720 iter->oom_lock = false; 1721 spin_unlock(&memcg_oom_lock); 1722 } 1723 1724 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1725 { 1726 struct mem_cgroup *iter; 1727 1728 spin_lock(&memcg_oom_lock); 1729 for_each_mem_cgroup_tree(iter, memcg) 1730 iter->under_oom++; 1731 spin_unlock(&memcg_oom_lock); 1732 } 1733 1734 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1735 { 1736 struct mem_cgroup *iter; 1737 1738 /* 1739 * Be careful about under_oom underflows because a child memcg 1740 * could have been added after mem_cgroup_mark_under_oom. 1741 */ 1742 spin_lock(&memcg_oom_lock); 1743 for_each_mem_cgroup_tree(iter, memcg) 1744 if (iter->under_oom > 0) 1745 iter->under_oom--; 1746 spin_unlock(&memcg_oom_lock); 1747 } 1748 1749 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1750 1751 struct oom_wait_info { 1752 struct mem_cgroup *memcg; 1753 wait_queue_entry_t wait; 1754 }; 1755 1756 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1757 unsigned mode, int sync, void *arg) 1758 { 1759 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1760 struct mem_cgroup *oom_wait_memcg; 1761 struct oom_wait_info *oom_wait_info; 1762 1763 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1764 oom_wait_memcg = oom_wait_info->memcg; 1765 1766 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1767 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1768 return 0; 1769 return autoremove_wake_function(wait, mode, sync, arg); 1770 } 1771 1772 static void memcg_oom_recover(struct mem_cgroup *memcg) 1773 { 1774 /* 1775 * For the following lockless ->under_oom test, the only required 1776 * guarantee is that it must see the state asserted by an OOM when 1777 * this function is called as a result of userland actions 1778 * triggered by the notification of the OOM. This is trivially 1779 * achieved by invoking mem_cgroup_mark_under_oom() before 1780 * triggering notification. 1781 */ 1782 if (memcg && memcg->under_oom) 1783 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1784 } 1785 1786 enum oom_status { 1787 OOM_SUCCESS, 1788 OOM_FAILED, 1789 OOM_ASYNC, 1790 OOM_SKIPPED 1791 }; 1792 1793 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1794 { 1795 enum oom_status ret; 1796 bool locked; 1797 1798 if (order > PAGE_ALLOC_COSTLY_ORDER) 1799 return OOM_SKIPPED; 1800 1801 memcg_memory_event(memcg, MEMCG_OOM); 1802 1803 /* 1804 * We are in the middle of the charge context here, so we 1805 * don't want to block when potentially sitting on a callstack 1806 * that holds all kinds of filesystem and mm locks. 1807 * 1808 * cgroup1 allows disabling the OOM killer and waiting for outside 1809 * handling until the charge can succeed; remember the context and put 1810 * the task to sleep at the end of the page fault when all locks are 1811 * released. 1812 * 1813 * On the other hand, in-kernel OOM killer allows for an async victim 1814 * memory reclaim (oom_reaper) and that means that we are not solely 1815 * relying on the oom victim to make a forward progress and we can 1816 * invoke the oom killer here. 1817 * 1818 * Please note that mem_cgroup_out_of_memory might fail to find a 1819 * victim and then we have to bail out from the charge path. 1820 */ 1821 if (memcg->oom_kill_disable) { 1822 if (!current->in_user_fault) 1823 return OOM_SKIPPED; 1824 css_get(&memcg->css); 1825 current->memcg_in_oom = memcg; 1826 current->memcg_oom_gfp_mask = mask; 1827 current->memcg_oom_order = order; 1828 1829 return OOM_ASYNC; 1830 } 1831 1832 mem_cgroup_mark_under_oom(memcg); 1833 1834 locked = mem_cgroup_oom_trylock(memcg); 1835 1836 if (locked) 1837 mem_cgroup_oom_notify(memcg); 1838 1839 mem_cgroup_unmark_under_oom(memcg); 1840 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1841 ret = OOM_SUCCESS; 1842 else 1843 ret = OOM_FAILED; 1844 1845 if (locked) 1846 mem_cgroup_oom_unlock(memcg); 1847 1848 return ret; 1849 } 1850 1851 /** 1852 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1853 * @handle: actually kill/wait or just clean up the OOM state 1854 * 1855 * This has to be called at the end of a page fault if the memcg OOM 1856 * handler was enabled. 1857 * 1858 * Memcg supports userspace OOM handling where failed allocations must 1859 * sleep on a waitqueue until the userspace task resolves the 1860 * situation. Sleeping directly in the charge context with all kinds 1861 * of locks held is not a good idea, instead we remember an OOM state 1862 * in the task and mem_cgroup_oom_synchronize() has to be called at 1863 * the end of the page fault to complete the OOM handling. 1864 * 1865 * Returns %true if an ongoing memcg OOM situation was detected and 1866 * completed, %false otherwise. 1867 */ 1868 bool mem_cgroup_oom_synchronize(bool handle) 1869 { 1870 struct mem_cgroup *memcg = current->memcg_in_oom; 1871 struct oom_wait_info owait; 1872 bool locked; 1873 1874 /* OOM is global, do not handle */ 1875 if (!memcg) 1876 return false; 1877 1878 if (!handle) 1879 goto cleanup; 1880 1881 owait.memcg = memcg; 1882 owait.wait.flags = 0; 1883 owait.wait.func = memcg_oom_wake_function; 1884 owait.wait.private = current; 1885 INIT_LIST_HEAD(&owait.wait.entry); 1886 1887 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1888 mem_cgroup_mark_under_oom(memcg); 1889 1890 locked = mem_cgroup_oom_trylock(memcg); 1891 1892 if (locked) 1893 mem_cgroup_oom_notify(memcg); 1894 1895 if (locked && !memcg->oom_kill_disable) { 1896 mem_cgroup_unmark_under_oom(memcg); 1897 finish_wait(&memcg_oom_waitq, &owait.wait); 1898 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1899 current->memcg_oom_order); 1900 } else { 1901 schedule(); 1902 mem_cgroup_unmark_under_oom(memcg); 1903 finish_wait(&memcg_oom_waitq, &owait.wait); 1904 } 1905 1906 if (locked) { 1907 mem_cgroup_oom_unlock(memcg); 1908 /* 1909 * There is no guarantee that an OOM-lock contender 1910 * sees the wakeups triggered by the OOM kill 1911 * uncharges. Wake any sleepers explicitly. 1912 */ 1913 memcg_oom_recover(memcg); 1914 } 1915 cleanup: 1916 current->memcg_in_oom = NULL; 1917 css_put(&memcg->css); 1918 return true; 1919 } 1920 1921 /** 1922 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1923 * @victim: task to be killed by the OOM killer 1924 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1925 * 1926 * Returns a pointer to a memory cgroup, which has to be cleaned up 1927 * by killing all belonging OOM-killable tasks. 1928 * 1929 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1930 */ 1931 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1932 struct mem_cgroup *oom_domain) 1933 { 1934 struct mem_cgroup *oom_group = NULL; 1935 struct mem_cgroup *memcg; 1936 1937 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1938 return NULL; 1939 1940 if (!oom_domain) 1941 oom_domain = root_mem_cgroup; 1942 1943 rcu_read_lock(); 1944 1945 memcg = mem_cgroup_from_task(victim); 1946 if (memcg == root_mem_cgroup) 1947 goto out; 1948 1949 /* 1950 * If the victim task has been asynchronously moved to a different 1951 * memory cgroup, we might end up killing tasks outside oom_domain. 1952 * In this case it's better to ignore memory.group.oom. 1953 */ 1954 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1955 goto out; 1956 1957 /* 1958 * Traverse the memory cgroup hierarchy from the victim task's 1959 * cgroup up to the OOMing cgroup (or root) to find the 1960 * highest-level memory cgroup with oom.group set. 1961 */ 1962 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1963 if (memcg->oom_group) 1964 oom_group = memcg; 1965 1966 if (memcg == oom_domain) 1967 break; 1968 } 1969 1970 if (oom_group) 1971 css_get(&oom_group->css); 1972 out: 1973 rcu_read_unlock(); 1974 1975 return oom_group; 1976 } 1977 1978 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1979 { 1980 pr_info("Tasks in "); 1981 pr_cont_cgroup_path(memcg->css.cgroup); 1982 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1983 } 1984 1985 /** 1986 * lock_page_memcg - lock a page and memcg binding 1987 * @page: the page 1988 * 1989 * This function protects unlocked LRU pages from being moved to 1990 * another cgroup. 1991 * 1992 * It ensures lifetime of the locked memcg. Caller is responsible 1993 * for the lifetime of the page. 1994 */ 1995 void lock_page_memcg(struct page *page) 1996 { 1997 struct page *head = compound_head(page); /* rmap on tail pages */ 1998 struct mem_cgroup *memcg; 1999 unsigned long flags; 2000 2001 /* 2002 * The RCU lock is held throughout the transaction. The fast 2003 * path can get away without acquiring the memcg->move_lock 2004 * because page moving starts with an RCU grace period. 2005 */ 2006 rcu_read_lock(); 2007 2008 if (mem_cgroup_disabled()) 2009 return; 2010 again: 2011 memcg = page_memcg(head); 2012 if (unlikely(!memcg)) 2013 return; 2014 2015 #ifdef CONFIG_PROVE_LOCKING 2016 local_irq_save(flags); 2017 might_lock(&memcg->move_lock); 2018 local_irq_restore(flags); 2019 #endif 2020 2021 if (atomic_read(&memcg->moving_account) <= 0) 2022 return; 2023 2024 spin_lock_irqsave(&memcg->move_lock, flags); 2025 if (memcg != page_memcg(head)) { 2026 spin_unlock_irqrestore(&memcg->move_lock, flags); 2027 goto again; 2028 } 2029 2030 /* 2031 * When charge migration first begins, we can have multiple 2032 * critical sections holding the fast-path RCU lock and one 2033 * holding the slowpath move_lock. Track the task who has the 2034 * move_lock for unlock_page_memcg(). 2035 */ 2036 memcg->move_lock_task = current; 2037 memcg->move_lock_flags = flags; 2038 } 2039 EXPORT_SYMBOL(lock_page_memcg); 2040 2041 static void __unlock_page_memcg(struct mem_cgroup *memcg) 2042 { 2043 if (memcg && memcg->move_lock_task == current) { 2044 unsigned long flags = memcg->move_lock_flags; 2045 2046 memcg->move_lock_task = NULL; 2047 memcg->move_lock_flags = 0; 2048 2049 spin_unlock_irqrestore(&memcg->move_lock, flags); 2050 } 2051 2052 rcu_read_unlock(); 2053 } 2054 2055 /** 2056 * unlock_page_memcg - unlock a page and memcg binding 2057 * @page: the page 2058 */ 2059 void unlock_page_memcg(struct page *page) 2060 { 2061 struct page *head = compound_head(page); 2062 2063 __unlock_page_memcg(page_memcg(head)); 2064 } 2065 EXPORT_SYMBOL(unlock_page_memcg); 2066 2067 struct obj_stock { 2068 #ifdef CONFIG_MEMCG_KMEM 2069 struct obj_cgroup *cached_objcg; 2070 struct pglist_data *cached_pgdat; 2071 unsigned int nr_bytes; 2072 int nr_slab_reclaimable_b; 2073 int nr_slab_unreclaimable_b; 2074 #else 2075 int dummy[0]; 2076 #endif 2077 }; 2078 2079 struct memcg_stock_pcp { 2080 struct mem_cgroup *cached; /* this never be root cgroup */ 2081 unsigned int nr_pages; 2082 struct obj_stock task_obj; 2083 struct obj_stock irq_obj; 2084 2085 struct work_struct work; 2086 unsigned long flags; 2087 #define FLUSHING_CACHED_CHARGE 0 2088 }; 2089 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2090 static DEFINE_MUTEX(percpu_charge_mutex); 2091 2092 #ifdef CONFIG_MEMCG_KMEM 2093 static void drain_obj_stock(struct obj_stock *stock); 2094 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2095 struct mem_cgroup *root_memcg); 2096 2097 #else 2098 static inline void drain_obj_stock(struct obj_stock *stock) 2099 { 2100 } 2101 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2102 struct mem_cgroup *root_memcg) 2103 { 2104 return false; 2105 } 2106 #endif 2107 2108 /* 2109 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2110 * sequence used in this case to access content from object stock is slow. 2111 * To optimize for user context access, there are now two object stocks for 2112 * task context and interrupt context access respectively. 2113 * 2114 * The task context object stock can be accessed by disabling preemption only 2115 * which is cheap in non-preempt kernel. The interrupt context object stock 2116 * can only be accessed after disabling interrupt. User context code can 2117 * access interrupt object stock, but not vice versa. 2118 */ 2119 static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2120 { 2121 struct memcg_stock_pcp *stock; 2122 2123 if (likely(in_task())) { 2124 *pflags = 0UL; 2125 preempt_disable(); 2126 stock = this_cpu_ptr(&memcg_stock); 2127 return &stock->task_obj; 2128 } 2129 2130 local_irq_save(*pflags); 2131 stock = this_cpu_ptr(&memcg_stock); 2132 return &stock->irq_obj; 2133 } 2134 2135 static inline void put_obj_stock(unsigned long flags) 2136 { 2137 if (likely(in_task())) 2138 preempt_enable(); 2139 else 2140 local_irq_restore(flags); 2141 } 2142 2143 /** 2144 * consume_stock: Try to consume stocked charge on this cpu. 2145 * @memcg: memcg to consume from. 2146 * @nr_pages: how many pages to charge. 2147 * 2148 * The charges will only happen if @memcg matches the current cpu's memcg 2149 * stock, and at least @nr_pages are available in that stock. Failure to 2150 * service an allocation will refill the stock. 2151 * 2152 * returns true if successful, false otherwise. 2153 */ 2154 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2155 { 2156 struct memcg_stock_pcp *stock; 2157 unsigned long flags; 2158 bool ret = false; 2159 2160 if (nr_pages > MEMCG_CHARGE_BATCH) 2161 return ret; 2162 2163 local_irq_save(flags); 2164 2165 stock = this_cpu_ptr(&memcg_stock); 2166 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2167 stock->nr_pages -= nr_pages; 2168 ret = true; 2169 } 2170 2171 local_irq_restore(flags); 2172 2173 return ret; 2174 } 2175 2176 /* 2177 * Returns stocks cached in percpu and reset cached information. 2178 */ 2179 static void drain_stock(struct memcg_stock_pcp *stock) 2180 { 2181 struct mem_cgroup *old = stock->cached; 2182 2183 if (!old) 2184 return; 2185 2186 if (stock->nr_pages) { 2187 page_counter_uncharge(&old->memory, stock->nr_pages); 2188 if (do_memsw_account()) 2189 page_counter_uncharge(&old->memsw, stock->nr_pages); 2190 stock->nr_pages = 0; 2191 } 2192 2193 css_put(&old->css); 2194 stock->cached = NULL; 2195 } 2196 2197 static void drain_local_stock(struct work_struct *dummy) 2198 { 2199 struct memcg_stock_pcp *stock; 2200 unsigned long flags; 2201 2202 /* 2203 * The only protection from memory hotplug vs. drain_stock races is 2204 * that we always operate on local CPU stock here with IRQ disabled 2205 */ 2206 local_irq_save(flags); 2207 2208 stock = this_cpu_ptr(&memcg_stock); 2209 drain_obj_stock(&stock->irq_obj); 2210 if (in_task()) 2211 drain_obj_stock(&stock->task_obj); 2212 drain_stock(stock); 2213 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2214 2215 local_irq_restore(flags); 2216 } 2217 2218 /* 2219 * Cache charges(val) to local per_cpu area. 2220 * This will be consumed by consume_stock() function, later. 2221 */ 2222 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2223 { 2224 struct memcg_stock_pcp *stock; 2225 unsigned long flags; 2226 2227 local_irq_save(flags); 2228 2229 stock = this_cpu_ptr(&memcg_stock); 2230 if (stock->cached != memcg) { /* reset if necessary */ 2231 drain_stock(stock); 2232 css_get(&memcg->css); 2233 stock->cached = memcg; 2234 } 2235 stock->nr_pages += nr_pages; 2236 2237 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2238 drain_stock(stock); 2239 2240 local_irq_restore(flags); 2241 } 2242 2243 /* 2244 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2245 * of the hierarchy under it. 2246 */ 2247 static void drain_all_stock(struct mem_cgroup *root_memcg) 2248 { 2249 int cpu, curcpu; 2250 2251 /* If someone's already draining, avoid adding running more workers. */ 2252 if (!mutex_trylock(&percpu_charge_mutex)) 2253 return; 2254 /* 2255 * Notify other cpus that system-wide "drain" is running 2256 * We do not care about races with the cpu hotplug because cpu down 2257 * as well as workers from this path always operate on the local 2258 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2259 */ 2260 curcpu = get_cpu(); 2261 for_each_online_cpu(cpu) { 2262 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2263 struct mem_cgroup *memcg; 2264 bool flush = false; 2265 2266 rcu_read_lock(); 2267 memcg = stock->cached; 2268 if (memcg && stock->nr_pages && 2269 mem_cgroup_is_descendant(memcg, root_memcg)) 2270 flush = true; 2271 if (obj_stock_flush_required(stock, root_memcg)) 2272 flush = true; 2273 rcu_read_unlock(); 2274 2275 if (flush && 2276 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2277 if (cpu == curcpu) 2278 drain_local_stock(&stock->work); 2279 else 2280 schedule_work_on(cpu, &stock->work); 2281 } 2282 } 2283 put_cpu(); 2284 mutex_unlock(&percpu_charge_mutex); 2285 } 2286 2287 static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu) 2288 { 2289 int nid; 2290 2291 for_each_node(nid) { 2292 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 2293 unsigned long stat[NR_VM_NODE_STAT_ITEMS]; 2294 struct batched_lruvec_stat *lstatc; 2295 int i; 2296 2297 lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu); 2298 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 2299 stat[i] = lstatc->count[i]; 2300 lstatc->count[i] = 0; 2301 } 2302 2303 do { 2304 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 2305 atomic_long_add(stat[i], &pn->lruvec_stat[i]); 2306 } while ((pn = parent_nodeinfo(pn, nid))); 2307 } 2308 } 2309 2310 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2311 { 2312 struct memcg_stock_pcp *stock; 2313 struct mem_cgroup *memcg; 2314 2315 stock = &per_cpu(memcg_stock, cpu); 2316 drain_stock(stock); 2317 2318 for_each_mem_cgroup(memcg) 2319 memcg_flush_lruvec_page_state(memcg, cpu); 2320 2321 return 0; 2322 } 2323 2324 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2325 unsigned int nr_pages, 2326 gfp_t gfp_mask) 2327 { 2328 unsigned long nr_reclaimed = 0; 2329 2330 do { 2331 unsigned long pflags; 2332 2333 if (page_counter_read(&memcg->memory) <= 2334 READ_ONCE(memcg->memory.high)) 2335 continue; 2336 2337 memcg_memory_event(memcg, MEMCG_HIGH); 2338 2339 psi_memstall_enter(&pflags); 2340 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2341 gfp_mask, true); 2342 psi_memstall_leave(&pflags); 2343 } while ((memcg = parent_mem_cgroup(memcg)) && 2344 !mem_cgroup_is_root(memcg)); 2345 2346 return nr_reclaimed; 2347 } 2348 2349 static void high_work_func(struct work_struct *work) 2350 { 2351 struct mem_cgroup *memcg; 2352 2353 memcg = container_of(work, struct mem_cgroup, high_work); 2354 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2355 } 2356 2357 /* 2358 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2359 * enough to still cause a significant slowdown in most cases, while still 2360 * allowing diagnostics and tracing to proceed without becoming stuck. 2361 */ 2362 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2363 2364 /* 2365 * When calculating the delay, we use these either side of the exponentiation to 2366 * maintain precision and scale to a reasonable number of jiffies (see the table 2367 * below. 2368 * 2369 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2370 * overage ratio to a delay. 2371 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2372 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2373 * to produce a reasonable delay curve. 2374 * 2375 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2376 * reasonable delay curve compared to precision-adjusted overage, not 2377 * penalising heavily at first, but still making sure that growth beyond the 2378 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2379 * example, with a high of 100 megabytes: 2380 * 2381 * +-------+------------------------+ 2382 * | usage | time to allocate in ms | 2383 * +-------+------------------------+ 2384 * | 100M | 0 | 2385 * | 101M | 6 | 2386 * | 102M | 25 | 2387 * | 103M | 57 | 2388 * | 104M | 102 | 2389 * | 105M | 159 | 2390 * | 106M | 230 | 2391 * | 107M | 313 | 2392 * | 108M | 409 | 2393 * | 109M | 518 | 2394 * | 110M | 639 | 2395 * | 111M | 774 | 2396 * | 112M | 921 | 2397 * | 113M | 1081 | 2398 * | 114M | 1254 | 2399 * | 115M | 1439 | 2400 * | 116M | 1638 | 2401 * | 117M | 1849 | 2402 * | 118M | 2000 | 2403 * | 119M | 2000 | 2404 * | 120M | 2000 | 2405 * +-------+------------------------+ 2406 */ 2407 #define MEMCG_DELAY_PRECISION_SHIFT 20 2408 #define MEMCG_DELAY_SCALING_SHIFT 14 2409 2410 static u64 calculate_overage(unsigned long usage, unsigned long high) 2411 { 2412 u64 overage; 2413 2414 if (usage <= high) 2415 return 0; 2416 2417 /* 2418 * Prevent division by 0 in overage calculation by acting as if 2419 * it was a threshold of 1 page 2420 */ 2421 high = max(high, 1UL); 2422 2423 overage = usage - high; 2424 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2425 return div64_u64(overage, high); 2426 } 2427 2428 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2429 { 2430 u64 overage, max_overage = 0; 2431 2432 do { 2433 overage = calculate_overage(page_counter_read(&memcg->memory), 2434 READ_ONCE(memcg->memory.high)); 2435 max_overage = max(overage, max_overage); 2436 } while ((memcg = parent_mem_cgroup(memcg)) && 2437 !mem_cgroup_is_root(memcg)); 2438 2439 return max_overage; 2440 } 2441 2442 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2443 { 2444 u64 overage, max_overage = 0; 2445 2446 do { 2447 overage = calculate_overage(page_counter_read(&memcg->swap), 2448 READ_ONCE(memcg->swap.high)); 2449 if (overage) 2450 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2451 max_overage = max(overage, max_overage); 2452 } while ((memcg = parent_mem_cgroup(memcg)) && 2453 !mem_cgroup_is_root(memcg)); 2454 2455 return max_overage; 2456 } 2457 2458 /* 2459 * Get the number of jiffies that we should penalise a mischievous cgroup which 2460 * is exceeding its memory.high by checking both it and its ancestors. 2461 */ 2462 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2463 unsigned int nr_pages, 2464 u64 max_overage) 2465 { 2466 unsigned long penalty_jiffies; 2467 2468 if (!max_overage) 2469 return 0; 2470 2471 /* 2472 * We use overage compared to memory.high to calculate the number of 2473 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2474 * fairly lenient on small overages, and increasingly harsh when the 2475 * memcg in question makes it clear that it has no intention of stopping 2476 * its crazy behaviour, so we exponentially increase the delay based on 2477 * overage amount. 2478 */ 2479 penalty_jiffies = max_overage * max_overage * HZ; 2480 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2481 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2482 2483 /* 2484 * Factor in the task's own contribution to the overage, such that four 2485 * N-sized allocations are throttled approximately the same as one 2486 * 4N-sized allocation. 2487 * 2488 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2489 * larger the current charge patch is than that. 2490 */ 2491 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2492 } 2493 2494 /* 2495 * Scheduled by try_charge() to be executed from the userland return path 2496 * and reclaims memory over the high limit. 2497 */ 2498 void mem_cgroup_handle_over_high(void) 2499 { 2500 unsigned long penalty_jiffies; 2501 unsigned long pflags; 2502 unsigned long nr_reclaimed; 2503 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2504 int nr_retries = MAX_RECLAIM_RETRIES; 2505 struct mem_cgroup *memcg; 2506 bool in_retry = false; 2507 2508 if (likely(!nr_pages)) 2509 return; 2510 2511 memcg = get_mem_cgroup_from_mm(current->mm); 2512 current->memcg_nr_pages_over_high = 0; 2513 2514 retry_reclaim: 2515 /* 2516 * The allocating task should reclaim at least the batch size, but for 2517 * subsequent retries we only want to do what's necessary to prevent oom 2518 * or breaching resource isolation. 2519 * 2520 * This is distinct from memory.max or page allocator behaviour because 2521 * memory.high is currently batched, whereas memory.max and the page 2522 * allocator run every time an allocation is made. 2523 */ 2524 nr_reclaimed = reclaim_high(memcg, 2525 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2526 GFP_KERNEL); 2527 2528 /* 2529 * memory.high is breached and reclaim is unable to keep up. Throttle 2530 * allocators proactively to slow down excessive growth. 2531 */ 2532 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2533 mem_find_max_overage(memcg)); 2534 2535 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2536 swap_find_max_overage(memcg)); 2537 2538 /* 2539 * Clamp the max delay per usermode return so as to still keep the 2540 * application moving forwards and also permit diagnostics, albeit 2541 * extremely slowly. 2542 */ 2543 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2544 2545 /* 2546 * Don't sleep if the amount of jiffies this memcg owes us is so low 2547 * that it's not even worth doing, in an attempt to be nice to those who 2548 * go only a small amount over their memory.high value and maybe haven't 2549 * been aggressively reclaimed enough yet. 2550 */ 2551 if (penalty_jiffies <= HZ / 100) 2552 goto out; 2553 2554 /* 2555 * If reclaim is making forward progress but we're still over 2556 * memory.high, we want to encourage that rather than doing allocator 2557 * throttling. 2558 */ 2559 if (nr_reclaimed || nr_retries--) { 2560 in_retry = true; 2561 goto retry_reclaim; 2562 } 2563 2564 /* 2565 * If we exit early, we're guaranteed to die (since 2566 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2567 * need to account for any ill-begotten jiffies to pay them off later. 2568 */ 2569 psi_memstall_enter(&pflags); 2570 schedule_timeout_killable(penalty_jiffies); 2571 psi_memstall_leave(&pflags); 2572 2573 out: 2574 css_put(&memcg->css); 2575 } 2576 2577 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2578 unsigned int nr_pages) 2579 { 2580 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2581 int nr_retries = MAX_RECLAIM_RETRIES; 2582 struct mem_cgroup *mem_over_limit; 2583 struct page_counter *counter; 2584 enum oom_status oom_status; 2585 unsigned long nr_reclaimed; 2586 bool may_swap = true; 2587 bool drained = false; 2588 unsigned long pflags; 2589 2590 retry: 2591 if (consume_stock(memcg, nr_pages)) 2592 return 0; 2593 2594 if (!do_memsw_account() || 2595 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2596 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2597 goto done_restock; 2598 if (do_memsw_account()) 2599 page_counter_uncharge(&memcg->memsw, batch); 2600 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2601 } else { 2602 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2603 may_swap = false; 2604 } 2605 2606 if (batch > nr_pages) { 2607 batch = nr_pages; 2608 goto retry; 2609 } 2610 2611 /* 2612 * Memcg doesn't have a dedicated reserve for atomic 2613 * allocations. But like the global atomic pool, we need to 2614 * put the burden of reclaim on regular allocation requests 2615 * and let these go through as privileged allocations. 2616 */ 2617 if (gfp_mask & __GFP_ATOMIC) 2618 goto force; 2619 2620 /* 2621 * Unlike in global OOM situations, memcg is not in a physical 2622 * memory shortage. Allow dying and OOM-killed tasks to 2623 * bypass the last charges so that they can exit quickly and 2624 * free their memory. 2625 */ 2626 if (unlikely(should_force_charge())) 2627 goto force; 2628 2629 /* 2630 * Prevent unbounded recursion when reclaim operations need to 2631 * allocate memory. This might exceed the limits temporarily, 2632 * but we prefer facilitating memory reclaim and getting back 2633 * under the limit over triggering OOM kills in these cases. 2634 */ 2635 if (unlikely(current->flags & PF_MEMALLOC)) 2636 goto force; 2637 2638 if (unlikely(task_in_memcg_oom(current))) 2639 goto nomem; 2640 2641 if (!gfpflags_allow_blocking(gfp_mask)) 2642 goto nomem; 2643 2644 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2645 2646 psi_memstall_enter(&pflags); 2647 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2648 gfp_mask, may_swap); 2649 psi_memstall_leave(&pflags); 2650 2651 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2652 goto retry; 2653 2654 if (!drained) { 2655 drain_all_stock(mem_over_limit); 2656 drained = true; 2657 goto retry; 2658 } 2659 2660 if (gfp_mask & __GFP_NORETRY) 2661 goto nomem; 2662 /* 2663 * Even though the limit is exceeded at this point, reclaim 2664 * may have been able to free some pages. Retry the charge 2665 * before killing the task. 2666 * 2667 * Only for regular pages, though: huge pages are rather 2668 * unlikely to succeed so close to the limit, and we fall back 2669 * to regular pages anyway in case of failure. 2670 */ 2671 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2672 goto retry; 2673 /* 2674 * At task move, charge accounts can be doubly counted. So, it's 2675 * better to wait until the end of task_move if something is going on. 2676 */ 2677 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2678 goto retry; 2679 2680 if (nr_retries--) 2681 goto retry; 2682 2683 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2684 goto nomem; 2685 2686 if (fatal_signal_pending(current)) 2687 goto force; 2688 2689 /* 2690 * keep retrying as long as the memcg oom killer is able to make 2691 * a forward progress or bypass the charge if the oom killer 2692 * couldn't make any progress. 2693 */ 2694 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2695 get_order(nr_pages * PAGE_SIZE)); 2696 switch (oom_status) { 2697 case OOM_SUCCESS: 2698 nr_retries = MAX_RECLAIM_RETRIES; 2699 goto retry; 2700 case OOM_FAILED: 2701 goto force; 2702 default: 2703 goto nomem; 2704 } 2705 nomem: 2706 if (!(gfp_mask & __GFP_NOFAIL)) 2707 return -ENOMEM; 2708 force: 2709 /* 2710 * The allocation either can't fail or will lead to more memory 2711 * being freed very soon. Allow memory usage go over the limit 2712 * temporarily by force charging it. 2713 */ 2714 page_counter_charge(&memcg->memory, nr_pages); 2715 if (do_memsw_account()) 2716 page_counter_charge(&memcg->memsw, nr_pages); 2717 2718 return 0; 2719 2720 done_restock: 2721 if (batch > nr_pages) 2722 refill_stock(memcg, batch - nr_pages); 2723 2724 /* 2725 * If the hierarchy is above the normal consumption range, schedule 2726 * reclaim on returning to userland. We can perform reclaim here 2727 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2728 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2729 * not recorded as it most likely matches current's and won't 2730 * change in the meantime. As high limit is checked again before 2731 * reclaim, the cost of mismatch is negligible. 2732 */ 2733 do { 2734 bool mem_high, swap_high; 2735 2736 mem_high = page_counter_read(&memcg->memory) > 2737 READ_ONCE(memcg->memory.high); 2738 swap_high = page_counter_read(&memcg->swap) > 2739 READ_ONCE(memcg->swap.high); 2740 2741 /* Don't bother a random interrupted task */ 2742 if (in_interrupt()) { 2743 if (mem_high) { 2744 schedule_work(&memcg->high_work); 2745 break; 2746 } 2747 continue; 2748 } 2749 2750 if (mem_high || swap_high) { 2751 /* 2752 * The allocating tasks in this cgroup will need to do 2753 * reclaim or be throttled to prevent further growth 2754 * of the memory or swap footprints. 2755 * 2756 * Target some best-effort fairness between the tasks, 2757 * and distribute reclaim work and delay penalties 2758 * based on how much each task is actually allocating. 2759 */ 2760 current->memcg_nr_pages_over_high += batch; 2761 set_notify_resume(current); 2762 break; 2763 } 2764 } while ((memcg = parent_mem_cgroup(memcg))); 2765 2766 return 0; 2767 } 2768 2769 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2770 unsigned int nr_pages) 2771 { 2772 if (mem_cgroup_is_root(memcg)) 2773 return 0; 2774 2775 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2776 } 2777 2778 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2779 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2780 { 2781 if (mem_cgroup_is_root(memcg)) 2782 return; 2783 2784 page_counter_uncharge(&memcg->memory, nr_pages); 2785 if (do_memsw_account()) 2786 page_counter_uncharge(&memcg->memsw, nr_pages); 2787 } 2788 #endif 2789 2790 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2791 { 2792 VM_BUG_ON_PAGE(page_memcg(page), page); 2793 /* 2794 * Any of the following ensures page's memcg stability: 2795 * 2796 * - the page lock 2797 * - LRU isolation 2798 * - lock_page_memcg() 2799 * - exclusive reference 2800 */ 2801 page->memcg_data = (unsigned long)memcg; 2802 } 2803 2804 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 2805 { 2806 struct mem_cgroup *memcg; 2807 2808 rcu_read_lock(); 2809 retry: 2810 memcg = obj_cgroup_memcg(objcg); 2811 if (unlikely(!css_tryget(&memcg->css))) 2812 goto retry; 2813 rcu_read_unlock(); 2814 2815 return memcg; 2816 } 2817 2818 #ifdef CONFIG_MEMCG_KMEM 2819 /* 2820 * The allocated objcg pointers array is not accounted directly. 2821 * Moreover, it should not come from DMA buffer and is not readily 2822 * reclaimable. So those GFP bits should be masked off. 2823 */ 2824 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2825 2826 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2827 gfp_t gfp, bool new_page) 2828 { 2829 unsigned int objects = objs_per_slab_page(s, page); 2830 unsigned long memcg_data; 2831 void *vec; 2832 2833 gfp &= ~OBJCGS_CLEAR_MASK; 2834 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2835 page_to_nid(page)); 2836 if (!vec) 2837 return -ENOMEM; 2838 2839 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2840 if (new_page) { 2841 /* 2842 * If the slab page is brand new and nobody can yet access 2843 * it's memcg_data, no synchronization is required and 2844 * memcg_data can be simply assigned. 2845 */ 2846 page->memcg_data = memcg_data; 2847 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { 2848 /* 2849 * If the slab page is already in use, somebody can allocate 2850 * and assign obj_cgroups in parallel. In this case the existing 2851 * objcg vector should be reused. 2852 */ 2853 kfree(vec); 2854 return 0; 2855 } 2856 2857 kmemleak_not_leak(vec); 2858 return 0; 2859 } 2860 2861 /* 2862 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2863 * 2864 * A passed kernel object can be a slab object or a generic kernel page, so 2865 * different mechanisms for getting the memory cgroup pointer should be used. 2866 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2867 * can not know for sure how the kernel object is implemented. 2868 * mem_cgroup_from_obj() can be safely used in such cases. 2869 * 2870 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2871 * cgroup_mutex, etc. 2872 */ 2873 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2874 { 2875 struct page *page; 2876 2877 if (mem_cgroup_disabled()) 2878 return NULL; 2879 2880 page = virt_to_head_page(p); 2881 2882 /* 2883 * Slab objects are accounted individually, not per-page. 2884 * Memcg membership data for each individual object is saved in 2885 * the page->obj_cgroups. 2886 */ 2887 if (page_objcgs_check(page)) { 2888 struct obj_cgroup *objcg; 2889 unsigned int off; 2890 2891 off = obj_to_index(page->slab_cache, page, p); 2892 objcg = page_objcgs(page)[off]; 2893 if (objcg) 2894 return obj_cgroup_memcg(objcg); 2895 2896 return NULL; 2897 } 2898 2899 /* 2900 * page_memcg_check() is used here, because page_has_obj_cgroups() 2901 * check above could fail because the object cgroups vector wasn't set 2902 * at that moment, but it can be set concurrently. 2903 * page_memcg_check(page) will guarantee that a proper memory 2904 * cgroup pointer or NULL will be returned. 2905 */ 2906 return page_memcg_check(page); 2907 } 2908 2909 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2910 { 2911 struct obj_cgroup *objcg = NULL; 2912 struct mem_cgroup *memcg; 2913 2914 if (memcg_kmem_bypass()) 2915 return NULL; 2916 2917 rcu_read_lock(); 2918 if (unlikely(active_memcg())) 2919 memcg = active_memcg(); 2920 else 2921 memcg = mem_cgroup_from_task(current); 2922 2923 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2924 objcg = rcu_dereference(memcg->objcg); 2925 if (objcg && obj_cgroup_tryget(objcg)) 2926 break; 2927 objcg = NULL; 2928 } 2929 rcu_read_unlock(); 2930 2931 return objcg; 2932 } 2933 2934 static int memcg_alloc_cache_id(void) 2935 { 2936 int id, size; 2937 int err; 2938 2939 id = ida_simple_get(&memcg_cache_ida, 2940 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2941 if (id < 0) 2942 return id; 2943 2944 if (id < memcg_nr_cache_ids) 2945 return id; 2946 2947 /* 2948 * There's no space for the new id in memcg_caches arrays, 2949 * so we have to grow them. 2950 */ 2951 down_write(&memcg_cache_ids_sem); 2952 2953 size = 2 * (id + 1); 2954 if (size < MEMCG_CACHES_MIN_SIZE) 2955 size = MEMCG_CACHES_MIN_SIZE; 2956 else if (size > MEMCG_CACHES_MAX_SIZE) 2957 size = MEMCG_CACHES_MAX_SIZE; 2958 2959 err = memcg_update_all_list_lrus(size); 2960 if (!err) 2961 memcg_nr_cache_ids = size; 2962 2963 up_write(&memcg_cache_ids_sem); 2964 2965 if (err) { 2966 ida_simple_remove(&memcg_cache_ida, id); 2967 return err; 2968 } 2969 return id; 2970 } 2971 2972 static void memcg_free_cache_id(int id) 2973 { 2974 ida_simple_remove(&memcg_cache_ida, id); 2975 } 2976 2977 /* 2978 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2979 * @objcg: object cgroup to uncharge 2980 * @nr_pages: number of pages to uncharge 2981 */ 2982 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2983 unsigned int nr_pages) 2984 { 2985 struct mem_cgroup *memcg; 2986 2987 memcg = get_mem_cgroup_from_objcg(objcg); 2988 2989 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2990 page_counter_uncharge(&memcg->kmem, nr_pages); 2991 refill_stock(memcg, nr_pages); 2992 2993 css_put(&memcg->css); 2994 } 2995 2996 /* 2997 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2998 * @objcg: object cgroup to charge 2999 * @gfp: reclaim mode 3000 * @nr_pages: number of pages to charge 3001 * 3002 * Returns 0 on success, an error code on failure. 3003 */ 3004 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3005 unsigned int nr_pages) 3006 { 3007 struct page_counter *counter; 3008 struct mem_cgroup *memcg; 3009 int ret; 3010 3011 memcg = get_mem_cgroup_from_objcg(objcg); 3012 3013 ret = try_charge_memcg(memcg, gfp, nr_pages); 3014 if (ret) 3015 goto out; 3016 3017 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3018 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3019 3020 /* 3021 * Enforce __GFP_NOFAIL allocation because callers are not 3022 * prepared to see failures and likely do not have any failure 3023 * handling code. 3024 */ 3025 if (gfp & __GFP_NOFAIL) { 3026 page_counter_charge(&memcg->kmem, nr_pages); 3027 goto out; 3028 } 3029 cancel_charge(memcg, nr_pages); 3030 ret = -ENOMEM; 3031 } 3032 out: 3033 css_put(&memcg->css); 3034 3035 return ret; 3036 } 3037 3038 /** 3039 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3040 * @page: page to charge 3041 * @gfp: reclaim mode 3042 * @order: allocation order 3043 * 3044 * Returns 0 on success, an error code on failure. 3045 */ 3046 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3047 { 3048 struct obj_cgroup *objcg; 3049 int ret = 0; 3050 3051 objcg = get_obj_cgroup_from_current(); 3052 if (objcg) { 3053 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3054 if (!ret) { 3055 page->memcg_data = (unsigned long)objcg | 3056 MEMCG_DATA_KMEM; 3057 return 0; 3058 } 3059 obj_cgroup_put(objcg); 3060 } 3061 return ret; 3062 } 3063 3064 /** 3065 * __memcg_kmem_uncharge_page: uncharge a kmem page 3066 * @page: page to uncharge 3067 * @order: allocation order 3068 */ 3069 void __memcg_kmem_uncharge_page(struct page *page, int order) 3070 { 3071 struct obj_cgroup *objcg; 3072 unsigned int nr_pages = 1 << order; 3073 3074 if (!PageMemcgKmem(page)) 3075 return; 3076 3077 objcg = __page_objcg(page); 3078 obj_cgroup_uncharge_pages(objcg, nr_pages); 3079 page->memcg_data = 0; 3080 obj_cgroup_put(objcg); 3081 } 3082 3083 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3084 enum node_stat_item idx, int nr) 3085 { 3086 unsigned long flags; 3087 struct obj_stock *stock = get_obj_stock(&flags); 3088 int *bytes; 3089 3090 /* 3091 * Save vmstat data in stock and skip vmstat array update unless 3092 * accumulating over a page of vmstat data or when pgdat or idx 3093 * changes. 3094 */ 3095 if (stock->cached_objcg != objcg) { 3096 drain_obj_stock(stock); 3097 obj_cgroup_get(objcg); 3098 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3099 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3100 stock->cached_objcg = objcg; 3101 stock->cached_pgdat = pgdat; 3102 } else if (stock->cached_pgdat != pgdat) { 3103 /* Flush the existing cached vmstat data */ 3104 if (stock->nr_slab_reclaimable_b) { 3105 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B, 3106 stock->nr_slab_reclaimable_b); 3107 stock->nr_slab_reclaimable_b = 0; 3108 } 3109 if (stock->nr_slab_unreclaimable_b) { 3110 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B, 3111 stock->nr_slab_unreclaimable_b); 3112 stock->nr_slab_unreclaimable_b = 0; 3113 } 3114 stock->cached_pgdat = pgdat; 3115 } 3116 3117 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3118 : &stock->nr_slab_unreclaimable_b; 3119 /* 3120 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3121 * cached locally at least once before pushing it out. 3122 */ 3123 if (!*bytes) { 3124 *bytes = nr; 3125 nr = 0; 3126 } else { 3127 *bytes += nr; 3128 if (abs(*bytes) > PAGE_SIZE) { 3129 nr = *bytes; 3130 *bytes = 0; 3131 } else { 3132 nr = 0; 3133 } 3134 } 3135 if (nr) 3136 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3137 3138 put_obj_stock(flags); 3139 } 3140 3141 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3142 { 3143 unsigned long flags; 3144 struct obj_stock *stock = get_obj_stock(&flags); 3145 bool ret = false; 3146 3147 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3148 stock->nr_bytes -= nr_bytes; 3149 ret = true; 3150 } 3151 3152 put_obj_stock(flags); 3153 3154 return ret; 3155 } 3156 3157 static void drain_obj_stock(struct obj_stock *stock) 3158 { 3159 struct obj_cgroup *old = stock->cached_objcg; 3160 3161 if (!old) 3162 return; 3163 3164 if (stock->nr_bytes) { 3165 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3166 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3167 3168 if (nr_pages) 3169 obj_cgroup_uncharge_pages(old, nr_pages); 3170 3171 /* 3172 * The leftover is flushed to the centralized per-memcg value. 3173 * On the next attempt to refill obj stock it will be moved 3174 * to a per-cpu stock (probably, on an other CPU), see 3175 * refill_obj_stock(). 3176 * 3177 * How often it's flushed is a trade-off between the memory 3178 * limit enforcement accuracy and potential CPU contention, 3179 * so it might be changed in the future. 3180 */ 3181 atomic_add(nr_bytes, &old->nr_charged_bytes); 3182 stock->nr_bytes = 0; 3183 } 3184 3185 /* 3186 * Flush the vmstat data in current stock 3187 */ 3188 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3189 if (stock->nr_slab_reclaimable_b) { 3190 mod_objcg_mlstate(old, stock->cached_pgdat, 3191 NR_SLAB_RECLAIMABLE_B, 3192 stock->nr_slab_reclaimable_b); 3193 stock->nr_slab_reclaimable_b = 0; 3194 } 3195 if (stock->nr_slab_unreclaimable_b) { 3196 mod_objcg_mlstate(old, stock->cached_pgdat, 3197 NR_SLAB_UNRECLAIMABLE_B, 3198 stock->nr_slab_unreclaimable_b); 3199 stock->nr_slab_unreclaimable_b = 0; 3200 } 3201 stock->cached_pgdat = NULL; 3202 } 3203 3204 obj_cgroup_put(old); 3205 stock->cached_objcg = NULL; 3206 } 3207 3208 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3209 struct mem_cgroup *root_memcg) 3210 { 3211 struct mem_cgroup *memcg; 3212 3213 if (in_task() && stock->task_obj.cached_objcg) { 3214 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); 3215 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3216 return true; 3217 } 3218 if (stock->irq_obj.cached_objcg) { 3219 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); 3220 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3221 return true; 3222 } 3223 3224 return false; 3225 } 3226 3227 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3228 bool allow_uncharge) 3229 { 3230 unsigned long flags; 3231 struct obj_stock *stock = get_obj_stock(&flags); 3232 unsigned int nr_pages = 0; 3233 3234 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3235 drain_obj_stock(stock); 3236 obj_cgroup_get(objcg); 3237 stock->cached_objcg = objcg; 3238 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3239 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3240 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3241 } 3242 stock->nr_bytes += nr_bytes; 3243 3244 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3245 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3246 stock->nr_bytes &= (PAGE_SIZE - 1); 3247 } 3248 3249 put_obj_stock(flags); 3250 3251 if (nr_pages) 3252 obj_cgroup_uncharge_pages(objcg, nr_pages); 3253 } 3254 3255 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3256 { 3257 unsigned int nr_pages, nr_bytes; 3258 int ret; 3259 3260 if (consume_obj_stock(objcg, size)) 3261 return 0; 3262 3263 /* 3264 * In theory, objcg->nr_charged_bytes can have enough 3265 * pre-charged bytes to satisfy the allocation. However, 3266 * flushing objcg->nr_charged_bytes requires two atomic 3267 * operations, and objcg->nr_charged_bytes can't be big. 3268 * The shared objcg->nr_charged_bytes can also become a 3269 * performance bottleneck if all tasks of the same memcg are 3270 * trying to update it. So it's better to ignore it and try 3271 * grab some new pages. The stock's nr_bytes will be flushed to 3272 * objcg->nr_charged_bytes later on when objcg changes. 3273 * 3274 * The stock's nr_bytes may contain enough pre-charged bytes 3275 * to allow one less page from being charged, but we can't rely 3276 * on the pre-charged bytes not being changed outside of 3277 * consume_obj_stock() or refill_obj_stock(). So ignore those 3278 * pre-charged bytes as well when charging pages. To avoid a 3279 * page uncharge right after a page charge, we set the 3280 * allow_uncharge flag to false when calling refill_obj_stock() 3281 * to temporarily allow the pre-charged bytes to exceed the page 3282 * size limit. The maximum reachable value of the pre-charged 3283 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3284 * race. 3285 */ 3286 nr_pages = size >> PAGE_SHIFT; 3287 nr_bytes = size & (PAGE_SIZE - 1); 3288 3289 if (nr_bytes) 3290 nr_pages += 1; 3291 3292 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3293 if (!ret && nr_bytes) 3294 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3295 3296 return ret; 3297 } 3298 3299 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3300 { 3301 refill_obj_stock(objcg, size, true); 3302 } 3303 3304 #endif /* CONFIG_MEMCG_KMEM */ 3305 3306 /* 3307 * Because page_memcg(head) is not set on tails, set it now. 3308 */ 3309 void split_page_memcg(struct page *head, unsigned int nr) 3310 { 3311 struct mem_cgroup *memcg = page_memcg(head); 3312 int i; 3313 3314 if (mem_cgroup_disabled() || !memcg) 3315 return; 3316 3317 for (i = 1; i < nr; i++) 3318 head[i].memcg_data = head->memcg_data; 3319 3320 if (PageMemcgKmem(head)) 3321 obj_cgroup_get_many(__page_objcg(head), nr - 1); 3322 else 3323 css_get_many(&memcg->css, nr - 1); 3324 } 3325 3326 #ifdef CONFIG_MEMCG_SWAP 3327 /** 3328 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3329 * @entry: swap entry to be moved 3330 * @from: mem_cgroup which the entry is moved from 3331 * @to: mem_cgroup which the entry is moved to 3332 * 3333 * It succeeds only when the swap_cgroup's record for this entry is the same 3334 * as the mem_cgroup's id of @from. 3335 * 3336 * Returns 0 on success, -EINVAL on failure. 3337 * 3338 * The caller must have charged to @to, IOW, called page_counter_charge() about 3339 * both res and memsw, and called css_get(). 3340 */ 3341 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3342 struct mem_cgroup *from, struct mem_cgroup *to) 3343 { 3344 unsigned short old_id, new_id; 3345 3346 old_id = mem_cgroup_id(from); 3347 new_id = mem_cgroup_id(to); 3348 3349 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3350 mod_memcg_state(from, MEMCG_SWAP, -1); 3351 mod_memcg_state(to, MEMCG_SWAP, 1); 3352 return 0; 3353 } 3354 return -EINVAL; 3355 } 3356 #else 3357 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3358 struct mem_cgroup *from, struct mem_cgroup *to) 3359 { 3360 return -EINVAL; 3361 } 3362 #endif 3363 3364 static DEFINE_MUTEX(memcg_max_mutex); 3365 3366 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3367 unsigned long max, bool memsw) 3368 { 3369 bool enlarge = false; 3370 bool drained = false; 3371 int ret; 3372 bool limits_invariant; 3373 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3374 3375 do { 3376 if (signal_pending(current)) { 3377 ret = -EINTR; 3378 break; 3379 } 3380 3381 mutex_lock(&memcg_max_mutex); 3382 /* 3383 * Make sure that the new limit (memsw or memory limit) doesn't 3384 * break our basic invariant rule memory.max <= memsw.max. 3385 */ 3386 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3387 max <= memcg->memsw.max; 3388 if (!limits_invariant) { 3389 mutex_unlock(&memcg_max_mutex); 3390 ret = -EINVAL; 3391 break; 3392 } 3393 if (max > counter->max) 3394 enlarge = true; 3395 ret = page_counter_set_max(counter, max); 3396 mutex_unlock(&memcg_max_mutex); 3397 3398 if (!ret) 3399 break; 3400 3401 if (!drained) { 3402 drain_all_stock(memcg); 3403 drained = true; 3404 continue; 3405 } 3406 3407 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3408 GFP_KERNEL, !memsw)) { 3409 ret = -EBUSY; 3410 break; 3411 } 3412 } while (true); 3413 3414 if (!ret && enlarge) 3415 memcg_oom_recover(memcg); 3416 3417 return ret; 3418 } 3419 3420 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3421 gfp_t gfp_mask, 3422 unsigned long *total_scanned) 3423 { 3424 unsigned long nr_reclaimed = 0; 3425 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3426 unsigned long reclaimed; 3427 int loop = 0; 3428 struct mem_cgroup_tree_per_node *mctz; 3429 unsigned long excess; 3430 unsigned long nr_scanned; 3431 3432 if (order > 0) 3433 return 0; 3434 3435 mctz = soft_limit_tree_node(pgdat->node_id); 3436 3437 /* 3438 * Do not even bother to check the largest node if the root 3439 * is empty. Do it lockless to prevent lock bouncing. Races 3440 * are acceptable as soft limit is best effort anyway. 3441 */ 3442 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3443 return 0; 3444 3445 /* 3446 * This loop can run a while, specially if mem_cgroup's continuously 3447 * keep exceeding their soft limit and putting the system under 3448 * pressure 3449 */ 3450 do { 3451 if (next_mz) 3452 mz = next_mz; 3453 else 3454 mz = mem_cgroup_largest_soft_limit_node(mctz); 3455 if (!mz) 3456 break; 3457 3458 nr_scanned = 0; 3459 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3460 gfp_mask, &nr_scanned); 3461 nr_reclaimed += reclaimed; 3462 *total_scanned += nr_scanned; 3463 spin_lock_irq(&mctz->lock); 3464 __mem_cgroup_remove_exceeded(mz, mctz); 3465 3466 /* 3467 * If we failed to reclaim anything from this memory cgroup 3468 * it is time to move on to the next cgroup 3469 */ 3470 next_mz = NULL; 3471 if (!reclaimed) 3472 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3473 3474 excess = soft_limit_excess(mz->memcg); 3475 /* 3476 * One school of thought says that we should not add 3477 * back the node to the tree if reclaim returns 0. 3478 * But our reclaim could return 0, simply because due 3479 * to priority we are exposing a smaller subset of 3480 * memory to reclaim from. Consider this as a longer 3481 * term TODO. 3482 */ 3483 /* If excess == 0, no tree ops */ 3484 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3485 spin_unlock_irq(&mctz->lock); 3486 css_put(&mz->memcg->css); 3487 loop++; 3488 /* 3489 * Could not reclaim anything and there are no more 3490 * mem cgroups to try or we seem to be looping without 3491 * reclaiming anything. 3492 */ 3493 if (!nr_reclaimed && 3494 (next_mz == NULL || 3495 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3496 break; 3497 } while (!nr_reclaimed); 3498 if (next_mz) 3499 css_put(&next_mz->memcg->css); 3500 return nr_reclaimed; 3501 } 3502 3503 /* 3504 * Reclaims as many pages from the given memcg as possible. 3505 * 3506 * Caller is responsible for holding css reference for memcg. 3507 */ 3508 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3509 { 3510 int nr_retries = MAX_RECLAIM_RETRIES; 3511 3512 /* we call try-to-free pages for make this cgroup empty */ 3513 lru_add_drain_all(); 3514 3515 drain_all_stock(memcg); 3516 3517 /* try to free all pages in this cgroup */ 3518 while (nr_retries && page_counter_read(&memcg->memory)) { 3519 int progress; 3520 3521 if (signal_pending(current)) 3522 return -EINTR; 3523 3524 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3525 GFP_KERNEL, true); 3526 if (!progress) { 3527 nr_retries--; 3528 /* maybe some writeback is necessary */ 3529 congestion_wait(BLK_RW_ASYNC, HZ/10); 3530 } 3531 3532 } 3533 3534 return 0; 3535 } 3536 3537 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3538 char *buf, size_t nbytes, 3539 loff_t off) 3540 { 3541 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3542 3543 if (mem_cgroup_is_root(memcg)) 3544 return -EINVAL; 3545 return mem_cgroup_force_empty(memcg) ?: nbytes; 3546 } 3547 3548 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3549 struct cftype *cft) 3550 { 3551 return 1; 3552 } 3553 3554 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3555 struct cftype *cft, u64 val) 3556 { 3557 if (val == 1) 3558 return 0; 3559 3560 pr_warn_once("Non-hierarchical mode is deprecated. " 3561 "Please report your usecase to linux-mm@kvack.org if you " 3562 "depend on this functionality.\n"); 3563 3564 return -EINVAL; 3565 } 3566 3567 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3568 { 3569 unsigned long val; 3570 3571 if (mem_cgroup_is_root(memcg)) { 3572 cgroup_rstat_flush(memcg->css.cgroup); 3573 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3574 memcg_page_state(memcg, NR_ANON_MAPPED); 3575 if (swap) 3576 val += memcg_page_state(memcg, MEMCG_SWAP); 3577 } else { 3578 if (!swap) 3579 val = page_counter_read(&memcg->memory); 3580 else 3581 val = page_counter_read(&memcg->memsw); 3582 } 3583 return val; 3584 } 3585 3586 enum { 3587 RES_USAGE, 3588 RES_LIMIT, 3589 RES_MAX_USAGE, 3590 RES_FAILCNT, 3591 RES_SOFT_LIMIT, 3592 }; 3593 3594 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3595 struct cftype *cft) 3596 { 3597 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3598 struct page_counter *counter; 3599 3600 switch (MEMFILE_TYPE(cft->private)) { 3601 case _MEM: 3602 counter = &memcg->memory; 3603 break; 3604 case _MEMSWAP: 3605 counter = &memcg->memsw; 3606 break; 3607 case _KMEM: 3608 counter = &memcg->kmem; 3609 break; 3610 case _TCP: 3611 counter = &memcg->tcpmem; 3612 break; 3613 default: 3614 BUG(); 3615 } 3616 3617 switch (MEMFILE_ATTR(cft->private)) { 3618 case RES_USAGE: 3619 if (counter == &memcg->memory) 3620 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3621 if (counter == &memcg->memsw) 3622 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3623 return (u64)page_counter_read(counter) * PAGE_SIZE; 3624 case RES_LIMIT: 3625 return (u64)counter->max * PAGE_SIZE; 3626 case RES_MAX_USAGE: 3627 return (u64)counter->watermark * PAGE_SIZE; 3628 case RES_FAILCNT: 3629 return counter->failcnt; 3630 case RES_SOFT_LIMIT: 3631 return (u64)memcg->soft_limit * PAGE_SIZE; 3632 default: 3633 BUG(); 3634 } 3635 } 3636 3637 #ifdef CONFIG_MEMCG_KMEM 3638 static int memcg_online_kmem(struct mem_cgroup *memcg) 3639 { 3640 struct obj_cgroup *objcg; 3641 int memcg_id; 3642 3643 if (cgroup_memory_nokmem) 3644 return 0; 3645 3646 BUG_ON(memcg->kmemcg_id >= 0); 3647 BUG_ON(memcg->kmem_state); 3648 3649 memcg_id = memcg_alloc_cache_id(); 3650 if (memcg_id < 0) 3651 return memcg_id; 3652 3653 objcg = obj_cgroup_alloc(); 3654 if (!objcg) { 3655 memcg_free_cache_id(memcg_id); 3656 return -ENOMEM; 3657 } 3658 objcg->memcg = memcg; 3659 rcu_assign_pointer(memcg->objcg, objcg); 3660 3661 static_branch_enable(&memcg_kmem_enabled_key); 3662 3663 memcg->kmemcg_id = memcg_id; 3664 memcg->kmem_state = KMEM_ONLINE; 3665 3666 return 0; 3667 } 3668 3669 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3670 { 3671 struct cgroup_subsys_state *css; 3672 struct mem_cgroup *parent, *child; 3673 int kmemcg_id; 3674 3675 if (memcg->kmem_state != KMEM_ONLINE) 3676 return; 3677 3678 memcg->kmem_state = KMEM_ALLOCATED; 3679 3680 parent = parent_mem_cgroup(memcg); 3681 if (!parent) 3682 parent = root_mem_cgroup; 3683 3684 memcg_reparent_objcgs(memcg, parent); 3685 3686 kmemcg_id = memcg->kmemcg_id; 3687 BUG_ON(kmemcg_id < 0); 3688 3689 /* 3690 * Change kmemcg_id of this cgroup and all its descendants to the 3691 * parent's id, and then move all entries from this cgroup's list_lrus 3692 * to ones of the parent. After we have finished, all list_lrus 3693 * corresponding to this cgroup are guaranteed to remain empty. The 3694 * ordering is imposed by list_lru_node->lock taken by 3695 * memcg_drain_all_list_lrus(). 3696 */ 3697 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3698 css_for_each_descendant_pre(css, &memcg->css) { 3699 child = mem_cgroup_from_css(css); 3700 BUG_ON(child->kmemcg_id != kmemcg_id); 3701 child->kmemcg_id = parent->kmemcg_id; 3702 } 3703 rcu_read_unlock(); 3704 3705 memcg_drain_all_list_lrus(kmemcg_id, parent); 3706 3707 memcg_free_cache_id(kmemcg_id); 3708 } 3709 3710 static void memcg_free_kmem(struct mem_cgroup *memcg) 3711 { 3712 /* css_alloc() failed, offlining didn't happen */ 3713 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3714 memcg_offline_kmem(memcg); 3715 } 3716 #else 3717 static int memcg_online_kmem(struct mem_cgroup *memcg) 3718 { 3719 return 0; 3720 } 3721 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3722 { 3723 } 3724 static void memcg_free_kmem(struct mem_cgroup *memcg) 3725 { 3726 } 3727 #endif /* CONFIG_MEMCG_KMEM */ 3728 3729 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3730 unsigned long max) 3731 { 3732 int ret; 3733 3734 mutex_lock(&memcg_max_mutex); 3735 ret = page_counter_set_max(&memcg->kmem, max); 3736 mutex_unlock(&memcg_max_mutex); 3737 return ret; 3738 } 3739 3740 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3741 { 3742 int ret; 3743 3744 mutex_lock(&memcg_max_mutex); 3745 3746 ret = page_counter_set_max(&memcg->tcpmem, max); 3747 if (ret) 3748 goto out; 3749 3750 if (!memcg->tcpmem_active) { 3751 /* 3752 * The active flag needs to be written after the static_key 3753 * update. This is what guarantees that the socket activation 3754 * function is the last one to run. See mem_cgroup_sk_alloc() 3755 * for details, and note that we don't mark any socket as 3756 * belonging to this memcg until that flag is up. 3757 * 3758 * We need to do this, because static_keys will span multiple 3759 * sites, but we can't control their order. If we mark a socket 3760 * as accounted, but the accounting functions are not patched in 3761 * yet, we'll lose accounting. 3762 * 3763 * We never race with the readers in mem_cgroup_sk_alloc(), 3764 * because when this value change, the code to process it is not 3765 * patched in yet. 3766 */ 3767 static_branch_inc(&memcg_sockets_enabled_key); 3768 memcg->tcpmem_active = true; 3769 } 3770 out: 3771 mutex_unlock(&memcg_max_mutex); 3772 return ret; 3773 } 3774 3775 /* 3776 * The user of this function is... 3777 * RES_LIMIT. 3778 */ 3779 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3780 char *buf, size_t nbytes, loff_t off) 3781 { 3782 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3783 unsigned long nr_pages; 3784 int ret; 3785 3786 buf = strstrip(buf); 3787 ret = page_counter_memparse(buf, "-1", &nr_pages); 3788 if (ret) 3789 return ret; 3790 3791 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3792 case RES_LIMIT: 3793 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3794 ret = -EINVAL; 3795 break; 3796 } 3797 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3798 case _MEM: 3799 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3800 break; 3801 case _MEMSWAP: 3802 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3803 break; 3804 case _KMEM: 3805 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3806 "Please report your usecase to linux-mm@kvack.org if you " 3807 "depend on this functionality.\n"); 3808 ret = memcg_update_kmem_max(memcg, nr_pages); 3809 break; 3810 case _TCP: 3811 ret = memcg_update_tcp_max(memcg, nr_pages); 3812 break; 3813 } 3814 break; 3815 case RES_SOFT_LIMIT: 3816 memcg->soft_limit = nr_pages; 3817 ret = 0; 3818 break; 3819 } 3820 return ret ?: nbytes; 3821 } 3822 3823 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3824 size_t nbytes, loff_t off) 3825 { 3826 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3827 struct page_counter *counter; 3828 3829 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3830 case _MEM: 3831 counter = &memcg->memory; 3832 break; 3833 case _MEMSWAP: 3834 counter = &memcg->memsw; 3835 break; 3836 case _KMEM: 3837 counter = &memcg->kmem; 3838 break; 3839 case _TCP: 3840 counter = &memcg->tcpmem; 3841 break; 3842 default: 3843 BUG(); 3844 } 3845 3846 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3847 case RES_MAX_USAGE: 3848 page_counter_reset_watermark(counter); 3849 break; 3850 case RES_FAILCNT: 3851 counter->failcnt = 0; 3852 break; 3853 default: 3854 BUG(); 3855 } 3856 3857 return nbytes; 3858 } 3859 3860 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3861 struct cftype *cft) 3862 { 3863 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3864 } 3865 3866 #ifdef CONFIG_MMU 3867 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3868 struct cftype *cft, u64 val) 3869 { 3870 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3871 3872 if (val & ~MOVE_MASK) 3873 return -EINVAL; 3874 3875 /* 3876 * No kind of locking is needed in here, because ->can_attach() will 3877 * check this value once in the beginning of the process, and then carry 3878 * on with stale data. This means that changes to this value will only 3879 * affect task migrations starting after the change. 3880 */ 3881 memcg->move_charge_at_immigrate = val; 3882 return 0; 3883 } 3884 #else 3885 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3886 struct cftype *cft, u64 val) 3887 { 3888 return -ENOSYS; 3889 } 3890 #endif 3891 3892 #ifdef CONFIG_NUMA 3893 3894 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3895 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3896 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3897 3898 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3899 int nid, unsigned int lru_mask, bool tree) 3900 { 3901 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3902 unsigned long nr = 0; 3903 enum lru_list lru; 3904 3905 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3906 3907 for_each_lru(lru) { 3908 if (!(BIT(lru) & lru_mask)) 3909 continue; 3910 if (tree) 3911 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3912 else 3913 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3914 } 3915 return nr; 3916 } 3917 3918 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3919 unsigned int lru_mask, 3920 bool tree) 3921 { 3922 unsigned long nr = 0; 3923 enum lru_list lru; 3924 3925 for_each_lru(lru) { 3926 if (!(BIT(lru) & lru_mask)) 3927 continue; 3928 if (tree) 3929 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3930 else 3931 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3932 } 3933 return nr; 3934 } 3935 3936 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3937 { 3938 struct numa_stat { 3939 const char *name; 3940 unsigned int lru_mask; 3941 }; 3942 3943 static const struct numa_stat stats[] = { 3944 { "total", LRU_ALL }, 3945 { "file", LRU_ALL_FILE }, 3946 { "anon", LRU_ALL_ANON }, 3947 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3948 }; 3949 const struct numa_stat *stat; 3950 int nid; 3951 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3952 3953 cgroup_rstat_flush(memcg->css.cgroup); 3954 3955 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3956 seq_printf(m, "%s=%lu", stat->name, 3957 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3958 false)); 3959 for_each_node_state(nid, N_MEMORY) 3960 seq_printf(m, " N%d=%lu", nid, 3961 mem_cgroup_node_nr_lru_pages(memcg, nid, 3962 stat->lru_mask, false)); 3963 seq_putc(m, '\n'); 3964 } 3965 3966 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3967 3968 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3969 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3970 true)); 3971 for_each_node_state(nid, N_MEMORY) 3972 seq_printf(m, " N%d=%lu", nid, 3973 mem_cgroup_node_nr_lru_pages(memcg, nid, 3974 stat->lru_mask, true)); 3975 seq_putc(m, '\n'); 3976 } 3977 3978 return 0; 3979 } 3980 #endif /* CONFIG_NUMA */ 3981 3982 static const unsigned int memcg1_stats[] = { 3983 NR_FILE_PAGES, 3984 NR_ANON_MAPPED, 3985 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3986 NR_ANON_THPS, 3987 #endif 3988 NR_SHMEM, 3989 NR_FILE_MAPPED, 3990 NR_FILE_DIRTY, 3991 NR_WRITEBACK, 3992 MEMCG_SWAP, 3993 }; 3994 3995 static const char *const memcg1_stat_names[] = { 3996 "cache", 3997 "rss", 3998 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3999 "rss_huge", 4000 #endif 4001 "shmem", 4002 "mapped_file", 4003 "dirty", 4004 "writeback", 4005 "swap", 4006 }; 4007 4008 /* Universal VM events cgroup1 shows, original sort order */ 4009 static const unsigned int memcg1_events[] = { 4010 PGPGIN, 4011 PGPGOUT, 4012 PGFAULT, 4013 PGMAJFAULT, 4014 }; 4015 4016 static int memcg_stat_show(struct seq_file *m, void *v) 4017 { 4018 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4019 unsigned long memory, memsw; 4020 struct mem_cgroup *mi; 4021 unsigned int i; 4022 4023 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4024 4025 cgroup_rstat_flush(memcg->css.cgroup); 4026 4027 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4028 unsigned long nr; 4029 4030 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4031 continue; 4032 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4033 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4034 } 4035 4036 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4037 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4038 memcg_events_local(memcg, memcg1_events[i])); 4039 4040 for (i = 0; i < NR_LRU_LISTS; i++) 4041 seq_printf(m, "%s %lu\n", lru_list_name(i), 4042 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4043 PAGE_SIZE); 4044 4045 /* Hierarchical information */ 4046 memory = memsw = PAGE_COUNTER_MAX; 4047 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4048 memory = min(memory, READ_ONCE(mi->memory.max)); 4049 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4050 } 4051 seq_printf(m, "hierarchical_memory_limit %llu\n", 4052 (u64)memory * PAGE_SIZE); 4053 if (do_memsw_account()) 4054 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4055 (u64)memsw * PAGE_SIZE); 4056 4057 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4058 unsigned long nr; 4059 4060 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4061 continue; 4062 nr = memcg_page_state(memcg, memcg1_stats[i]); 4063 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4064 (u64)nr * PAGE_SIZE); 4065 } 4066 4067 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4068 seq_printf(m, "total_%s %llu\n", 4069 vm_event_name(memcg1_events[i]), 4070 (u64)memcg_events(memcg, memcg1_events[i])); 4071 4072 for (i = 0; i < NR_LRU_LISTS; i++) 4073 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4074 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4075 PAGE_SIZE); 4076 4077 #ifdef CONFIG_DEBUG_VM 4078 { 4079 pg_data_t *pgdat; 4080 struct mem_cgroup_per_node *mz; 4081 unsigned long anon_cost = 0; 4082 unsigned long file_cost = 0; 4083 4084 for_each_online_pgdat(pgdat) { 4085 mz = memcg->nodeinfo[pgdat->node_id]; 4086 4087 anon_cost += mz->lruvec.anon_cost; 4088 file_cost += mz->lruvec.file_cost; 4089 } 4090 seq_printf(m, "anon_cost %lu\n", anon_cost); 4091 seq_printf(m, "file_cost %lu\n", file_cost); 4092 } 4093 #endif 4094 4095 return 0; 4096 } 4097 4098 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4099 struct cftype *cft) 4100 { 4101 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4102 4103 return mem_cgroup_swappiness(memcg); 4104 } 4105 4106 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4107 struct cftype *cft, u64 val) 4108 { 4109 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4110 4111 if (val > 100) 4112 return -EINVAL; 4113 4114 if (!mem_cgroup_is_root(memcg)) 4115 memcg->swappiness = val; 4116 else 4117 vm_swappiness = val; 4118 4119 return 0; 4120 } 4121 4122 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4123 { 4124 struct mem_cgroup_threshold_ary *t; 4125 unsigned long usage; 4126 int i; 4127 4128 rcu_read_lock(); 4129 if (!swap) 4130 t = rcu_dereference(memcg->thresholds.primary); 4131 else 4132 t = rcu_dereference(memcg->memsw_thresholds.primary); 4133 4134 if (!t) 4135 goto unlock; 4136 4137 usage = mem_cgroup_usage(memcg, swap); 4138 4139 /* 4140 * current_threshold points to threshold just below or equal to usage. 4141 * If it's not true, a threshold was crossed after last 4142 * call of __mem_cgroup_threshold(). 4143 */ 4144 i = t->current_threshold; 4145 4146 /* 4147 * Iterate backward over array of thresholds starting from 4148 * current_threshold and check if a threshold is crossed. 4149 * If none of thresholds below usage is crossed, we read 4150 * only one element of the array here. 4151 */ 4152 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4153 eventfd_signal(t->entries[i].eventfd, 1); 4154 4155 /* i = current_threshold + 1 */ 4156 i++; 4157 4158 /* 4159 * Iterate forward over array of thresholds starting from 4160 * current_threshold+1 and check if a threshold is crossed. 4161 * If none of thresholds above usage is crossed, we read 4162 * only one element of the array here. 4163 */ 4164 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4165 eventfd_signal(t->entries[i].eventfd, 1); 4166 4167 /* Update current_threshold */ 4168 t->current_threshold = i - 1; 4169 unlock: 4170 rcu_read_unlock(); 4171 } 4172 4173 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4174 { 4175 while (memcg) { 4176 __mem_cgroup_threshold(memcg, false); 4177 if (do_memsw_account()) 4178 __mem_cgroup_threshold(memcg, true); 4179 4180 memcg = parent_mem_cgroup(memcg); 4181 } 4182 } 4183 4184 static int compare_thresholds(const void *a, const void *b) 4185 { 4186 const struct mem_cgroup_threshold *_a = a; 4187 const struct mem_cgroup_threshold *_b = b; 4188 4189 if (_a->threshold > _b->threshold) 4190 return 1; 4191 4192 if (_a->threshold < _b->threshold) 4193 return -1; 4194 4195 return 0; 4196 } 4197 4198 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4199 { 4200 struct mem_cgroup_eventfd_list *ev; 4201 4202 spin_lock(&memcg_oom_lock); 4203 4204 list_for_each_entry(ev, &memcg->oom_notify, list) 4205 eventfd_signal(ev->eventfd, 1); 4206 4207 spin_unlock(&memcg_oom_lock); 4208 return 0; 4209 } 4210 4211 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4212 { 4213 struct mem_cgroup *iter; 4214 4215 for_each_mem_cgroup_tree(iter, memcg) 4216 mem_cgroup_oom_notify_cb(iter); 4217 } 4218 4219 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4220 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4221 { 4222 struct mem_cgroup_thresholds *thresholds; 4223 struct mem_cgroup_threshold_ary *new; 4224 unsigned long threshold; 4225 unsigned long usage; 4226 int i, size, ret; 4227 4228 ret = page_counter_memparse(args, "-1", &threshold); 4229 if (ret) 4230 return ret; 4231 4232 mutex_lock(&memcg->thresholds_lock); 4233 4234 if (type == _MEM) { 4235 thresholds = &memcg->thresholds; 4236 usage = mem_cgroup_usage(memcg, false); 4237 } else if (type == _MEMSWAP) { 4238 thresholds = &memcg->memsw_thresholds; 4239 usage = mem_cgroup_usage(memcg, true); 4240 } else 4241 BUG(); 4242 4243 /* Check if a threshold crossed before adding a new one */ 4244 if (thresholds->primary) 4245 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4246 4247 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4248 4249 /* Allocate memory for new array of thresholds */ 4250 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4251 if (!new) { 4252 ret = -ENOMEM; 4253 goto unlock; 4254 } 4255 new->size = size; 4256 4257 /* Copy thresholds (if any) to new array */ 4258 if (thresholds->primary) 4259 memcpy(new->entries, thresholds->primary->entries, 4260 flex_array_size(new, entries, size - 1)); 4261 4262 /* Add new threshold */ 4263 new->entries[size - 1].eventfd = eventfd; 4264 new->entries[size - 1].threshold = threshold; 4265 4266 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4267 sort(new->entries, size, sizeof(*new->entries), 4268 compare_thresholds, NULL); 4269 4270 /* Find current threshold */ 4271 new->current_threshold = -1; 4272 for (i = 0; i < size; i++) { 4273 if (new->entries[i].threshold <= usage) { 4274 /* 4275 * new->current_threshold will not be used until 4276 * rcu_assign_pointer(), so it's safe to increment 4277 * it here. 4278 */ 4279 ++new->current_threshold; 4280 } else 4281 break; 4282 } 4283 4284 /* Free old spare buffer and save old primary buffer as spare */ 4285 kfree(thresholds->spare); 4286 thresholds->spare = thresholds->primary; 4287 4288 rcu_assign_pointer(thresholds->primary, new); 4289 4290 /* To be sure that nobody uses thresholds */ 4291 synchronize_rcu(); 4292 4293 unlock: 4294 mutex_unlock(&memcg->thresholds_lock); 4295 4296 return ret; 4297 } 4298 4299 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4300 struct eventfd_ctx *eventfd, const char *args) 4301 { 4302 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4303 } 4304 4305 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4306 struct eventfd_ctx *eventfd, const char *args) 4307 { 4308 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4309 } 4310 4311 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4312 struct eventfd_ctx *eventfd, enum res_type type) 4313 { 4314 struct mem_cgroup_thresholds *thresholds; 4315 struct mem_cgroup_threshold_ary *new; 4316 unsigned long usage; 4317 int i, j, size, entries; 4318 4319 mutex_lock(&memcg->thresholds_lock); 4320 4321 if (type == _MEM) { 4322 thresholds = &memcg->thresholds; 4323 usage = mem_cgroup_usage(memcg, false); 4324 } else if (type == _MEMSWAP) { 4325 thresholds = &memcg->memsw_thresholds; 4326 usage = mem_cgroup_usage(memcg, true); 4327 } else 4328 BUG(); 4329 4330 if (!thresholds->primary) 4331 goto unlock; 4332 4333 /* Check if a threshold crossed before removing */ 4334 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4335 4336 /* Calculate new number of threshold */ 4337 size = entries = 0; 4338 for (i = 0; i < thresholds->primary->size; i++) { 4339 if (thresholds->primary->entries[i].eventfd != eventfd) 4340 size++; 4341 else 4342 entries++; 4343 } 4344 4345 new = thresholds->spare; 4346 4347 /* If no items related to eventfd have been cleared, nothing to do */ 4348 if (!entries) 4349 goto unlock; 4350 4351 /* Set thresholds array to NULL if we don't have thresholds */ 4352 if (!size) { 4353 kfree(new); 4354 new = NULL; 4355 goto swap_buffers; 4356 } 4357 4358 new->size = size; 4359 4360 /* Copy thresholds and find current threshold */ 4361 new->current_threshold = -1; 4362 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4363 if (thresholds->primary->entries[i].eventfd == eventfd) 4364 continue; 4365 4366 new->entries[j] = thresholds->primary->entries[i]; 4367 if (new->entries[j].threshold <= usage) { 4368 /* 4369 * new->current_threshold will not be used 4370 * until rcu_assign_pointer(), so it's safe to increment 4371 * it here. 4372 */ 4373 ++new->current_threshold; 4374 } 4375 j++; 4376 } 4377 4378 swap_buffers: 4379 /* Swap primary and spare array */ 4380 thresholds->spare = thresholds->primary; 4381 4382 rcu_assign_pointer(thresholds->primary, new); 4383 4384 /* To be sure that nobody uses thresholds */ 4385 synchronize_rcu(); 4386 4387 /* If all events are unregistered, free the spare array */ 4388 if (!new) { 4389 kfree(thresholds->spare); 4390 thresholds->spare = NULL; 4391 } 4392 unlock: 4393 mutex_unlock(&memcg->thresholds_lock); 4394 } 4395 4396 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4397 struct eventfd_ctx *eventfd) 4398 { 4399 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4400 } 4401 4402 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4403 struct eventfd_ctx *eventfd) 4404 { 4405 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4406 } 4407 4408 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4409 struct eventfd_ctx *eventfd, const char *args) 4410 { 4411 struct mem_cgroup_eventfd_list *event; 4412 4413 event = kmalloc(sizeof(*event), GFP_KERNEL); 4414 if (!event) 4415 return -ENOMEM; 4416 4417 spin_lock(&memcg_oom_lock); 4418 4419 event->eventfd = eventfd; 4420 list_add(&event->list, &memcg->oom_notify); 4421 4422 /* already in OOM ? */ 4423 if (memcg->under_oom) 4424 eventfd_signal(eventfd, 1); 4425 spin_unlock(&memcg_oom_lock); 4426 4427 return 0; 4428 } 4429 4430 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4431 struct eventfd_ctx *eventfd) 4432 { 4433 struct mem_cgroup_eventfd_list *ev, *tmp; 4434 4435 spin_lock(&memcg_oom_lock); 4436 4437 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4438 if (ev->eventfd == eventfd) { 4439 list_del(&ev->list); 4440 kfree(ev); 4441 } 4442 } 4443 4444 spin_unlock(&memcg_oom_lock); 4445 } 4446 4447 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4448 { 4449 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4450 4451 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4452 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4453 seq_printf(sf, "oom_kill %lu\n", 4454 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4455 return 0; 4456 } 4457 4458 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4459 struct cftype *cft, u64 val) 4460 { 4461 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4462 4463 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4464 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4465 return -EINVAL; 4466 4467 memcg->oom_kill_disable = val; 4468 if (!val) 4469 memcg_oom_recover(memcg); 4470 4471 return 0; 4472 } 4473 4474 #ifdef CONFIG_CGROUP_WRITEBACK 4475 4476 #include <trace/events/writeback.h> 4477 4478 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4479 { 4480 return wb_domain_init(&memcg->cgwb_domain, gfp); 4481 } 4482 4483 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4484 { 4485 wb_domain_exit(&memcg->cgwb_domain); 4486 } 4487 4488 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4489 { 4490 wb_domain_size_changed(&memcg->cgwb_domain); 4491 } 4492 4493 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4494 { 4495 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4496 4497 if (!memcg->css.parent) 4498 return NULL; 4499 4500 return &memcg->cgwb_domain; 4501 } 4502 4503 /** 4504 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4505 * @wb: bdi_writeback in question 4506 * @pfilepages: out parameter for number of file pages 4507 * @pheadroom: out parameter for number of allocatable pages according to memcg 4508 * @pdirty: out parameter for number of dirty pages 4509 * @pwriteback: out parameter for number of pages under writeback 4510 * 4511 * Determine the numbers of file, headroom, dirty, and writeback pages in 4512 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4513 * is a bit more involved. 4514 * 4515 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4516 * headroom is calculated as the lowest headroom of itself and the 4517 * ancestors. Note that this doesn't consider the actual amount of 4518 * available memory in the system. The caller should further cap 4519 * *@pheadroom accordingly. 4520 */ 4521 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4522 unsigned long *pheadroom, unsigned long *pdirty, 4523 unsigned long *pwriteback) 4524 { 4525 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4526 struct mem_cgroup *parent; 4527 4528 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); 4529 4530 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4531 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4532 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4533 memcg_page_state(memcg, NR_ACTIVE_FILE); 4534 4535 *pheadroom = PAGE_COUNTER_MAX; 4536 while ((parent = parent_mem_cgroup(memcg))) { 4537 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4538 READ_ONCE(memcg->memory.high)); 4539 unsigned long used = page_counter_read(&memcg->memory); 4540 4541 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4542 memcg = parent; 4543 } 4544 } 4545 4546 /* 4547 * Foreign dirty flushing 4548 * 4549 * There's an inherent mismatch between memcg and writeback. The former 4550 * tracks ownership per-page while the latter per-inode. This was a 4551 * deliberate design decision because honoring per-page ownership in the 4552 * writeback path is complicated, may lead to higher CPU and IO overheads 4553 * and deemed unnecessary given that write-sharing an inode across 4554 * different cgroups isn't a common use-case. 4555 * 4556 * Combined with inode majority-writer ownership switching, this works well 4557 * enough in most cases but there are some pathological cases. For 4558 * example, let's say there are two cgroups A and B which keep writing to 4559 * different but confined parts of the same inode. B owns the inode and 4560 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4561 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4562 * triggering background writeback. A will be slowed down without a way to 4563 * make writeback of the dirty pages happen. 4564 * 4565 * Conditions like the above can lead to a cgroup getting repeatedly and 4566 * severely throttled after making some progress after each 4567 * dirty_expire_interval while the underlying IO device is almost 4568 * completely idle. 4569 * 4570 * Solving this problem completely requires matching the ownership tracking 4571 * granularities between memcg and writeback in either direction. However, 4572 * the more egregious behaviors can be avoided by simply remembering the 4573 * most recent foreign dirtying events and initiating remote flushes on 4574 * them when local writeback isn't enough to keep the memory clean enough. 4575 * 4576 * The following two functions implement such mechanism. When a foreign 4577 * page - a page whose memcg and writeback ownerships don't match - is 4578 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4579 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4580 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4581 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4582 * foreign bdi_writebacks which haven't expired. Both the numbers of 4583 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4584 * limited to MEMCG_CGWB_FRN_CNT. 4585 * 4586 * The mechanism only remembers IDs and doesn't hold any object references. 4587 * As being wrong occasionally doesn't matter, updates and accesses to the 4588 * records are lockless and racy. 4589 */ 4590 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4591 struct bdi_writeback *wb) 4592 { 4593 struct mem_cgroup *memcg = page_memcg(page); 4594 struct memcg_cgwb_frn *frn; 4595 u64 now = get_jiffies_64(); 4596 u64 oldest_at = now; 4597 int oldest = -1; 4598 int i; 4599 4600 trace_track_foreign_dirty(page, wb); 4601 4602 /* 4603 * Pick the slot to use. If there is already a slot for @wb, keep 4604 * using it. If not replace the oldest one which isn't being 4605 * written out. 4606 */ 4607 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4608 frn = &memcg->cgwb_frn[i]; 4609 if (frn->bdi_id == wb->bdi->id && 4610 frn->memcg_id == wb->memcg_css->id) 4611 break; 4612 if (time_before64(frn->at, oldest_at) && 4613 atomic_read(&frn->done.cnt) == 1) { 4614 oldest = i; 4615 oldest_at = frn->at; 4616 } 4617 } 4618 4619 if (i < MEMCG_CGWB_FRN_CNT) { 4620 /* 4621 * Re-using an existing one. Update timestamp lazily to 4622 * avoid making the cacheline hot. We want them to be 4623 * reasonably up-to-date and significantly shorter than 4624 * dirty_expire_interval as that's what expires the record. 4625 * Use the shorter of 1s and dirty_expire_interval / 8. 4626 */ 4627 unsigned long update_intv = 4628 min_t(unsigned long, HZ, 4629 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4630 4631 if (time_before64(frn->at, now - update_intv)) 4632 frn->at = now; 4633 } else if (oldest >= 0) { 4634 /* replace the oldest free one */ 4635 frn = &memcg->cgwb_frn[oldest]; 4636 frn->bdi_id = wb->bdi->id; 4637 frn->memcg_id = wb->memcg_css->id; 4638 frn->at = now; 4639 } 4640 } 4641 4642 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4643 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4644 { 4645 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4646 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4647 u64 now = jiffies_64; 4648 int i; 4649 4650 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4651 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4652 4653 /* 4654 * If the record is older than dirty_expire_interval, 4655 * writeback on it has already started. No need to kick it 4656 * off again. Also, don't start a new one if there's 4657 * already one in flight. 4658 */ 4659 if (time_after64(frn->at, now - intv) && 4660 atomic_read(&frn->done.cnt) == 1) { 4661 frn->at = 0; 4662 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4663 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4664 WB_REASON_FOREIGN_FLUSH, 4665 &frn->done); 4666 } 4667 } 4668 } 4669 4670 #else /* CONFIG_CGROUP_WRITEBACK */ 4671 4672 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4673 { 4674 return 0; 4675 } 4676 4677 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4678 { 4679 } 4680 4681 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4682 { 4683 } 4684 4685 #endif /* CONFIG_CGROUP_WRITEBACK */ 4686 4687 /* 4688 * DO NOT USE IN NEW FILES. 4689 * 4690 * "cgroup.event_control" implementation. 4691 * 4692 * This is way over-engineered. It tries to support fully configurable 4693 * events for each user. Such level of flexibility is completely 4694 * unnecessary especially in the light of the planned unified hierarchy. 4695 * 4696 * Please deprecate this and replace with something simpler if at all 4697 * possible. 4698 */ 4699 4700 /* 4701 * Unregister event and free resources. 4702 * 4703 * Gets called from workqueue. 4704 */ 4705 static void memcg_event_remove(struct work_struct *work) 4706 { 4707 struct mem_cgroup_event *event = 4708 container_of(work, struct mem_cgroup_event, remove); 4709 struct mem_cgroup *memcg = event->memcg; 4710 4711 remove_wait_queue(event->wqh, &event->wait); 4712 4713 event->unregister_event(memcg, event->eventfd); 4714 4715 /* Notify userspace the event is going away. */ 4716 eventfd_signal(event->eventfd, 1); 4717 4718 eventfd_ctx_put(event->eventfd); 4719 kfree(event); 4720 css_put(&memcg->css); 4721 } 4722 4723 /* 4724 * Gets called on EPOLLHUP on eventfd when user closes it. 4725 * 4726 * Called with wqh->lock held and interrupts disabled. 4727 */ 4728 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4729 int sync, void *key) 4730 { 4731 struct mem_cgroup_event *event = 4732 container_of(wait, struct mem_cgroup_event, wait); 4733 struct mem_cgroup *memcg = event->memcg; 4734 __poll_t flags = key_to_poll(key); 4735 4736 if (flags & EPOLLHUP) { 4737 /* 4738 * If the event has been detached at cgroup removal, we 4739 * can simply return knowing the other side will cleanup 4740 * for us. 4741 * 4742 * We can't race against event freeing since the other 4743 * side will require wqh->lock via remove_wait_queue(), 4744 * which we hold. 4745 */ 4746 spin_lock(&memcg->event_list_lock); 4747 if (!list_empty(&event->list)) { 4748 list_del_init(&event->list); 4749 /* 4750 * We are in atomic context, but cgroup_event_remove() 4751 * may sleep, so we have to call it in workqueue. 4752 */ 4753 schedule_work(&event->remove); 4754 } 4755 spin_unlock(&memcg->event_list_lock); 4756 } 4757 4758 return 0; 4759 } 4760 4761 static void memcg_event_ptable_queue_proc(struct file *file, 4762 wait_queue_head_t *wqh, poll_table *pt) 4763 { 4764 struct mem_cgroup_event *event = 4765 container_of(pt, struct mem_cgroup_event, pt); 4766 4767 event->wqh = wqh; 4768 add_wait_queue(wqh, &event->wait); 4769 } 4770 4771 /* 4772 * DO NOT USE IN NEW FILES. 4773 * 4774 * Parse input and register new cgroup event handler. 4775 * 4776 * Input must be in format '<event_fd> <control_fd> <args>'. 4777 * Interpretation of args is defined by control file implementation. 4778 */ 4779 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4780 char *buf, size_t nbytes, loff_t off) 4781 { 4782 struct cgroup_subsys_state *css = of_css(of); 4783 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4784 struct mem_cgroup_event *event; 4785 struct cgroup_subsys_state *cfile_css; 4786 unsigned int efd, cfd; 4787 struct fd efile; 4788 struct fd cfile; 4789 const char *name; 4790 char *endp; 4791 int ret; 4792 4793 buf = strstrip(buf); 4794 4795 efd = simple_strtoul(buf, &endp, 10); 4796 if (*endp != ' ') 4797 return -EINVAL; 4798 buf = endp + 1; 4799 4800 cfd = simple_strtoul(buf, &endp, 10); 4801 if ((*endp != ' ') && (*endp != '\0')) 4802 return -EINVAL; 4803 buf = endp + 1; 4804 4805 event = kzalloc(sizeof(*event), GFP_KERNEL); 4806 if (!event) 4807 return -ENOMEM; 4808 4809 event->memcg = memcg; 4810 INIT_LIST_HEAD(&event->list); 4811 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4812 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4813 INIT_WORK(&event->remove, memcg_event_remove); 4814 4815 efile = fdget(efd); 4816 if (!efile.file) { 4817 ret = -EBADF; 4818 goto out_kfree; 4819 } 4820 4821 event->eventfd = eventfd_ctx_fileget(efile.file); 4822 if (IS_ERR(event->eventfd)) { 4823 ret = PTR_ERR(event->eventfd); 4824 goto out_put_efile; 4825 } 4826 4827 cfile = fdget(cfd); 4828 if (!cfile.file) { 4829 ret = -EBADF; 4830 goto out_put_eventfd; 4831 } 4832 4833 /* the process need read permission on control file */ 4834 /* AV: shouldn't we check that it's been opened for read instead? */ 4835 ret = file_permission(cfile.file, MAY_READ); 4836 if (ret < 0) 4837 goto out_put_cfile; 4838 4839 /* 4840 * Determine the event callbacks and set them in @event. This used 4841 * to be done via struct cftype but cgroup core no longer knows 4842 * about these events. The following is crude but the whole thing 4843 * is for compatibility anyway. 4844 * 4845 * DO NOT ADD NEW FILES. 4846 */ 4847 name = cfile.file->f_path.dentry->d_name.name; 4848 4849 if (!strcmp(name, "memory.usage_in_bytes")) { 4850 event->register_event = mem_cgroup_usage_register_event; 4851 event->unregister_event = mem_cgroup_usage_unregister_event; 4852 } else if (!strcmp(name, "memory.oom_control")) { 4853 event->register_event = mem_cgroup_oom_register_event; 4854 event->unregister_event = mem_cgroup_oom_unregister_event; 4855 } else if (!strcmp(name, "memory.pressure_level")) { 4856 event->register_event = vmpressure_register_event; 4857 event->unregister_event = vmpressure_unregister_event; 4858 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4859 event->register_event = memsw_cgroup_usage_register_event; 4860 event->unregister_event = memsw_cgroup_usage_unregister_event; 4861 } else { 4862 ret = -EINVAL; 4863 goto out_put_cfile; 4864 } 4865 4866 /* 4867 * Verify @cfile should belong to @css. Also, remaining events are 4868 * automatically removed on cgroup destruction but the removal is 4869 * asynchronous, so take an extra ref on @css. 4870 */ 4871 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4872 &memory_cgrp_subsys); 4873 ret = -EINVAL; 4874 if (IS_ERR(cfile_css)) 4875 goto out_put_cfile; 4876 if (cfile_css != css) { 4877 css_put(cfile_css); 4878 goto out_put_cfile; 4879 } 4880 4881 ret = event->register_event(memcg, event->eventfd, buf); 4882 if (ret) 4883 goto out_put_css; 4884 4885 vfs_poll(efile.file, &event->pt); 4886 4887 spin_lock(&memcg->event_list_lock); 4888 list_add(&event->list, &memcg->event_list); 4889 spin_unlock(&memcg->event_list_lock); 4890 4891 fdput(cfile); 4892 fdput(efile); 4893 4894 return nbytes; 4895 4896 out_put_css: 4897 css_put(css); 4898 out_put_cfile: 4899 fdput(cfile); 4900 out_put_eventfd: 4901 eventfd_ctx_put(event->eventfd); 4902 out_put_efile: 4903 fdput(efile); 4904 out_kfree: 4905 kfree(event); 4906 4907 return ret; 4908 } 4909 4910 static struct cftype mem_cgroup_legacy_files[] = { 4911 { 4912 .name = "usage_in_bytes", 4913 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4914 .read_u64 = mem_cgroup_read_u64, 4915 }, 4916 { 4917 .name = "max_usage_in_bytes", 4918 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4919 .write = mem_cgroup_reset, 4920 .read_u64 = mem_cgroup_read_u64, 4921 }, 4922 { 4923 .name = "limit_in_bytes", 4924 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4925 .write = mem_cgroup_write, 4926 .read_u64 = mem_cgroup_read_u64, 4927 }, 4928 { 4929 .name = "soft_limit_in_bytes", 4930 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4931 .write = mem_cgroup_write, 4932 .read_u64 = mem_cgroup_read_u64, 4933 }, 4934 { 4935 .name = "failcnt", 4936 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4937 .write = mem_cgroup_reset, 4938 .read_u64 = mem_cgroup_read_u64, 4939 }, 4940 { 4941 .name = "stat", 4942 .seq_show = memcg_stat_show, 4943 }, 4944 { 4945 .name = "force_empty", 4946 .write = mem_cgroup_force_empty_write, 4947 }, 4948 { 4949 .name = "use_hierarchy", 4950 .write_u64 = mem_cgroup_hierarchy_write, 4951 .read_u64 = mem_cgroup_hierarchy_read, 4952 }, 4953 { 4954 .name = "cgroup.event_control", /* XXX: for compat */ 4955 .write = memcg_write_event_control, 4956 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4957 }, 4958 { 4959 .name = "swappiness", 4960 .read_u64 = mem_cgroup_swappiness_read, 4961 .write_u64 = mem_cgroup_swappiness_write, 4962 }, 4963 { 4964 .name = "move_charge_at_immigrate", 4965 .read_u64 = mem_cgroup_move_charge_read, 4966 .write_u64 = mem_cgroup_move_charge_write, 4967 }, 4968 { 4969 .name = "oom_control", 4970 .seq_show = mem_cgroup_oom_control_read, 4971 .write_u64 = mem_cgroup_oom_control_write, 4972 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4973 }, 4974 { 4975 .name = "pressure_level", 4976 }, 4977 #ifdef CONFIG_NUMA 4978 { 4979 .name = "numa_stat", 4980 .seq_show = memcg_numa_stat_show, 4981 }, 4982 #endif 4983 { 4984 .name = "kmem.limit_in_bytes", 4985 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4986 .write = mem_cgroup_write, 4987 .read_u64 = mem_cgroup_read_u64, 4988 }, 4989 { 4990 .name = "kmem.usage_in_bytes", 4991 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4992 .read_u64 = mem_cgroup_read_u64, 4993 }, 4994 { 4995 .name = "kmem.failcnt", 4996 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4997 .write = mem_cgroup_reset, 4998 .read_u64 = mem_cgroup_read_u64, 4999 }, 5000 { 5001 .name = "kmem.max_usage_in_bytes", 5002 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5003 .write = mem_cgroup_reset, 5004 .read_u64 = mem_cgroup_read_u64, 5005 }, 5006 #if defined(CONFIG_MEMCG_KMEM) && \ 5007 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5008 { 5009 .name = "kmem.slabinfo", 5010 .seq_show = memcg_slab_show, 5011 }, 5012 #endif 5013 { 5014 .name = "kmem.tcp.limit_in_bytes", 5015 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5016 .write = mem_cgroup_write, 5017 .read_u64 = mem_cgroup_read_u64, 5018 }, 5019 { 5020 .name = "kmem.tcp.usage_in_bytes", 5021 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5022 .read_u64 = mem_cgroup_read_u64, 5023 }, 5024 { 5025 .name = "kmem.tcp.failcnt", 5026 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5027 .write = mem_cgroup_reset, 5028 .read_u64 = mem_cgroup_read_u64, 5029 }, 5030 { 5031 .name = "kmem.tcp.max_usage_in_bytes", 5032 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5033 .write = mem_cgroup_reset, 5034 .read_u64 = mem_cgroup_read_u64, 5035 }, 5036 { }, /* terminate */ 5037 }; 5038 5039 /* 5040 * Private memory cgroup IDR 5041 * 5042 * Swap-out records and page cache shadow entries need to store memcg 5043 * references in constrained space, so we maintain an ID space that is 5044 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5045 * memory-controlled cgroups to 64k. 5046 * 5047 * However, there usually are many references to the offline CSS after 5048 * the cgroup has been destroyed, such as page cache or reclaimable 5049 * slab objects, that don't need to hang on to the ID. We want to keep 5050 * those dead CSS from occupying IDs, or we might quickly exhaust the 5051 * relatively small ID space and prevent the creation of new cgroups 5052 * even when there are much fewer than 64k cgroups - possibly none. 5053 * 5054 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5055 * be freed and recycled when it's no longer needed, which is usually 5056 * when the CSS is offlined. 5057 * 5058 * The only exception to that are records of swapped out tmpfs/shmem 5059 * pages that need to be attributed to live ancestors on swapin. But 5060 * those references are manageable from userspace. 5061 */ 5062 5063 static DEFINE_IDR(mem_cgroup_idr); 5064 5065 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5066 { 5067 if (memcg->id.id > 0) { 5068 idr_remove(&mem_cgroup_idr, memcg->id.id); 5069 memcg->id.id = 0; 5070 } 5071 } 5072 5073 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5074 unsigned int n) 5075 { 5076 refcount_add(n, &memcg->id.ref); 5077 } 5078 5079 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5080 { 5081 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5082 mem_cgroup_id_remove(memcg); 5083 5084 /* Memcg ID pins CSS */ 5085 css_put(&memcg->css); 5086 } 5087 } 5088 5089 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5090 { 5091 mem_cgroup_id_put_many(memcg, 1); 5092 } 5093 5094 /** 5095 * mem_cgroup_from_id - look up a memcg from a memcg id 5096 * @id: the memcg id to look up 5097 * 5098 * Caller must hold rcu_read_lock(). 5099 */ 5100 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5101 { 5102 WARN_ON_ONCE(!rcu_read_lock_held()); 5103 return idr_find(&mem_cgroup_idr, id); 5104 } 5105 5106 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5107 { 5108 struct mem_cgroup_per_node *pn; 5109 int tmp = node; 5110 /* 5111 * This routine is called against possible nodes. 5112 * But it's BUG to call kmalloc() against offline node. 5113 * 5114 * TODO: this routine can waste much memory for nodes which will 5115 * never be onlined. It's better to use memory hotplug callback 5116 * function. 5117 */ 5118 if (!node_state(node, N_NORMAL_MEMORY)) 5119 tmp = -1; 5120 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5121 if (!pn) 5122 return 1; 5123 5124 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5125 GFP_KERNEL_ACCOUNT); 5126 if (!pn->lruvec_stat_local) { 5127 kfree(pn); 5128 return 1; 5129 } 5130 5131 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat, 5132 GFP_KERNEL_ACCOUNT); 5133 if (!pn->lruvec_stat_cpu) { 5134 free_percpu(pn->lruvec_stat_local); 5135 kfree(pn); 5136 return 1; 5137 } 5138 5139 lruvec_init(&pn->lruvec); 5140 pn->usage_in_excess = 0; 5141 pn->on_tree = false; 5142 pn->memcg = memcg; 5143 5144 memcg->nodeinfo[node] = pn; 5145 return 0; 5146 } 5147 5148 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5149 { 5150 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5151 5152 if (!pn) 5153 return; 5154 5155 free_percpu(pn->lruvec_stat_cpu); 5156 free_percpu(pn->lruvec_stat_local); 5157 kfree(pn); 5158 } 5159 5160 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5161 { 5162 int node; 5163 5164 for_each_node(node) 5165 free_mem_cgroup_per_node_info(memcg, node); 5166 free_percpu(memcg->vmstats_percpu); 5167 kfree(memcg); 5168 } 5169 5170 static void mem_cgroup_free(struct mem_cgroup *memcg) 5171 { 5172 int cpu; 5173 5174 memcg_wb_domain_exit(memcg); 5175 /* 5176 * Flush percpu lruvec stats to guarantee the value 5177 * correctness on parent's and all ancestor levels. 5178 */ 5179 for_each_online_cpu(cpu) 5180 memcg_flush_lruvec_page_state(memcg, cpu); 5181 __mem_cgroup_free(memcg); 5182 } 5183 5184 static struct mem_cgroup *mem_cgroup_alloc(void) 5185 { 5186 struct mem_cgroup *memcg; 5187 unsigned int size; 5188 int node; 5189 int __maybe_unused i; 5190 long error = -ENOMEM; 5191 5192 size = sizeof(struct mem_cgroup); 5193 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5194 5195 memcg = kzalloc(size, GFP_KERNEL); 5196 if (!memcg) 5197 return ERR_PTR(error); 5198 5199 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5200 1, MEM_CGROUP_ID_MAX, 5201 GFP_KERNEL); 5202 if (memcg->id.id < 0) { 5203 error = memcg->id.id; 5204 goto fail; 5205 } 5206 5207 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5208 GFP_KERNEL_ACCOUNT); 5209 if (!memcg->vmstats_percpu) 5210 goto fail; 5211 5212 for_each_node(node) 5213 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5214 goto fail; 5215 5216 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5217 goto fail; 5218 5219 INIT_WORK(&memcg->high_work, high_work_func); 5220 INIT_LIST_HEAD(&memcg->oom_notify); 5221 mutex_init(&memcg->thresholds_lock); 5222 spin_lock_init(&memcg->move_lock); 5223 vmpressure_init(&memcg->vmpressure); 5224 INIT_LIST_HEAD(&memcg->event_list); 5225 spin_lock_init(&memcg->event_list_lock); 5226 memcg->socket_pressure = jiffies; 5227 #ifdef CONFIG_MEMCG_KMEM 5228 memcg->kmemcg_id = -1; 5229 INIT_LIST_HEAD(&memcg->objcg_list); 5230 #endif 5231 #ifdef CONFIG_CGROUP_WRITEBACK 5232 INIT_LIST_HEAD(&memcg->cgwb_list); 5233 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5234 memcg->cgwb_frn[i].done = 5235 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5236 #endif 5237 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5238 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5239 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5240 memcg->deferred_split_queue.split_queue_len = 0; 5241 #endif 5242 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5243 return memcg; 5244 fail: 5245 mem_cgroup_id_remove(memcg); 5246 __mem_cgroup_free(memcg); 5247 return ERR_PTR(error); 5248 } 5249 5250 static struct cgroup_subsys_state * __ref 5251 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5252 { 5253 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5254 struct mem_cgroup *memcg, *old_memcg; 5255 long error = -ENOMEM; 5256 5257 old_memcg = set_active_memcg(parent); 5258 memcg = mem_cgroup_alloc(); 5259 set_active_memcg(old_memcg); 5260 if (IS_ERR(memcg)) 5261 return ERR_CAST(memcg); 5262 5263 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5264 memcg->soft_limit = PAGE_COUNTER_MAX; 5265 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5266 if (parent) { 5267 memcg->swappiness = mem_cgroup_swappiness(parent); 5268 memcg->oom_kill_disable = parent->oom_kill_disable; 5269 5270 page_counter_init(&memcg->memory, &parent->memory); 5271 page_counter_init(&memcg->swap, &parent->swap); 5272 page_counter_init(&memcg->kmem, &parent->kmem); 5273 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5274 } else { 5275 page_counter_init(&memcg->memory, NULL); 5276 page_counter_init(&memcg->swap, NULL); 5277 page_counter_init(&memcg->kmem, NULL); 5278 page_counter_init(&memcg->tcpmem, NULL); 5279 5280 root_mem_cgroup = memcg; 5281 return &memcg->css; 5282 } 5283 5284 /* The following stuff does not apply to the root */ 5285 error = memcg_online_kmem(memcg); 5286 if (error) 5287 goto fail; 5288 5289 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5290 static_branch_inc(&memcg_sockets_enabled_key); 5291 5292 return &memcg->css; 5293 fail: 5294 mem_cgroup_id_remove(memcg); 5295 mem_cgroup_free(memcg); 5296 return ERR_PTR(error); 5297 } 5298 5299 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5300 { 5301 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5302 5303 /* 5304 * A memcg must be visible for expand_shrinker_info() 5305 * by the time the maps are allocated. So, we allocate maps 5306 * here, when for_each_mem_cgroup() can't skip it. 5307 */ 5308 if (alloc_shrinker_info(memcg)) { 5309 mem_cgroup_id_remove(memcg); 5310 return -ENOMEM; 5311 } 5312 5313 /* Online state pins memcg ID, memcg ID pins CSS */ 5314 refcount_set(&memcg->id.ref, 1); 5315 css_get(css); 5316 return 0; 5317 } 5318 5319 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5320 { 5321 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5322 struct mem_cgroup_event *event, *tmp; 5323 5324 /* 5325 * Unregister events and notify userspace. 5326 * Notify userspace about cgroup removing only after rmdir of cgroup 5327 * directory to avoid race between userspace and kernelspace. 5328 */ 5329 spin_lock(&memcg->event_list_lock); 5330 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5331 list_del_init(&event->list); 5332 schedule_work(&event->remove); 5333 } 5334 spin_unlock(&memcg->event_list_lock); 5335 5336 page_counter_set_min(&memcg->memory, 0); 5337 page_counter_set_low(&memcg->memory, 0); 5338 5339 memcg_offline_kmem(memcg); 5340 reparent_shrinker_deferred(memcg); 5341 wb_memcg_offline(memcg); 5342 5343 drain_all_stock(memcg); 5344 5345 mem_cgroup_id_put(memcg); 5346 } 5347 5348 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5349 { 5350 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5351 5352 invalidate_reclaim_iterators(memcg); 5353 } 5354 5355 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5356 { 5357 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5358 int __maybe_unused i; 5359 5360 #ifdef CONFIG_CGROUP_WRITEBACK 5361 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5362 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5363 #endif 5364 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5365 static_branch_dec(&memcg_sockets_enabled_key); 5366 5367 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5368 static_branch_dec(&memcg_sockets_enabled_key); 5369 5370 vmpressure_cleanup(&memcg->vmpressure); 5371 cancel_work_sync(&memcg->high_work); 5372 mem_cgroup_remove_from_trees(memcg); 5373 free_shrinker_info(memcg); 5374 memcg_free_kmem(memcg); 5375 mem_cgroup_free(memcg); 5376 } 5377 5378 /** 5379 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5380 * @css: the target css 5381 * 5382 * Reset the states of the mem_cgroup associated with @css. This is 5383 * invoked when the userland requests disabling on the default hierarchy 5384 * but the memcg is pinned through dependency. The memcg should stop 5385 * applying policies and should revert to the vanilla state as it may be 5386 * made visible again. 5387 * 5388 * The current implementation only resets the essential configurations. 5389 * This needs to be expanded to cover all the visible parts. 5390 */ 5391 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5392 { 5393 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5394 5395 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5396 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5397 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5398 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5399 page_counter_set_min(&memcg->memory, 0); 5400 page_counter_set_low(&memcg->memory, 0); 5401 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5402 memcg->soft_limit = PAGE_COUNTER_MAX; 5403 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5404 memcg_wb_domain_size_changed(memcg); 5405 } 5406 5407 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5408 { 5409 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5410 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5411 struct memcg_vmstats_percpu *statc; 5412 long delta, v; 5413 int i; 5414 5415 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5416 5417 for (i = 0; i < MEMCG_NR_STAT; i++) { 5418 /* 5419 * Collect the aggregated propagation counts of groups 5420 * below us. We're in a per-cpu loop here and this is 5421 * a global counter, so the first cycle will get them. 5422 */ 5423 delta = memcg->vmstats.state_pending[i]; 5424 if (delta) 5425 memcg->vmstats.state_pending[i] = 0; 5426 5427 /* Add CPU changes on this level since the last flush */ 5428 v = READ_ONCE(statc->state[i]); 5429 if (v != statc->state_prev[i]) { 5430 delta += v - statc->state_prev[i]; 5431 statc->state_prev[i] = v; 5432 } 5433 5434 if (!delta) 5435 continue; 5436 5437 /* Aggregate counts on this level and propagate upwards */ 5438 memcg->vmstats.state[i] += delta; 5439 if (parent) 5440 parent->vmstats.state_pending[i] += delta; 5441 } 5442 5443 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 5444 delta = memcg->vmstats.events_pending[i]; 5445 if (delta) 5446 memcg->vmstats.events_pending[i] = 0; 5447 5448 v = READ_ONCE(statc->events[i]); 5449 if (v != statc->events_prev[i]) { 5450 delta += v - statc->events_prev[i]; 5451 statc->events_prev[i] = v; 5452 } 5453 5454 if (!delta) 5455 continue; 5456 5457 memcg->vmstats.events[i] += delta; 5458 if (parent) 5459 parent->vmstats.events_pending[i] += delta; 5460 } 5461 } 5462 5463 #ifdef CONFIG_MMU 5464 /* Handlers for move charge at task migration. */ 5465 static int mem_cgroup_do_precharge(unsigned long count) 5466 { 5467 int ret; 5468 5469 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5470 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5471 if (!ret) { 5472 mc.precharge += count; 5473 return ret; 5474 } 5475 5476 /* Try charges one by one with reclaim, but do not retry */ 5477 while (count--) { 5478 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5479 if (ret) 5480 return ret; 5481 mc.precharge++; 5482 cond_resched(); 5483 } 5484 return 0; 5485 } 5486 5487 union mc_target { 5488 struct page *page; 5489 swp_entry_t ent; 5490 }; 5491 5492 enum mc_target_type { 5493 MC_TARGET_NONE = 0, 5494 MC_TARGET_PAGE, 5495 MC_TARGET_SWAP, 5496 MC_TARGET_DEVICE, 5497 }; 5498 5499 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5500 unsigned long addr, pte_t ptent) 5501 { 5502 struct page *page = vm_normal_page(vma, addr, ptent); 5503 5504 if (!page || !page_mapped(page)) 5505 return NULL; 5506 if (PageAnon(page)) { 5507 if (!(mc.flags & MOVE_ANON)) 5508 return NULL; 5509 } else { 5510 if (!(mc.flags & MOVE_FILE)) 5511 return NULL; 5512 } 5513 if (!get_page_unless_zero(page)) 5514 return NULL; 5515 5516 return page; 5517 } 5518 5519 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5520 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5521 pte_t ptent, swp_entry_t *entry) 5522 { 5523 struct page *page = NULL; 5524 swp_entry_t ent = pte_to_swp_entry(ptent); 5525 5526 if (!(mc.flags & MOVE_ANON)) 5527 return NULL; 5528 5529 /* 5530 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5531 * a device and because they are not accessible by CPU they are store 5532 * as special swap entry in the CPU page table. 5533 */ 5534 if (is_device_private_entry(ent)) { 5535 page = device_private_entry_to_page(ent); 5536 /* 5537 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5538 * a refcount of 1 when free (unlike normal page) 5539 */ 5540 if (!page_ref_add_unless(page, 1, 1)) 5541 return NULL; 5542 return page; 5543 } 5544 5545 if (non_swap_entry(ent)) 5546 return NULL; 5547 5548 /* 5549 * Because lookup_swap_cache() updates some statistics counter, 5550 * we call find_get_page() with swapper_space directly. 5551 */ 5552 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5553 entry->val = ent.val; 5554 5555 return page; 5556 } 5557 #else 5558 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5559 pte_t ptent, swp_entry_t *entry) 5560 { 5561 return NULL; 5562 } 5563 #endif 5564 5565 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5566 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5567 { 5568 if (!vma->vm_file) /* anonymous vma */ 5569 return NULL; 5570 if (!(mc.flags & MOVE_FILE)) 5571 return NULL; 5572 5573 /* page is moved even if it's not RSS of this task(page-faulted). */ 5574 /* shmem/tmpfs may report page out on swap: account for that too. */ 5575 return find_get_incore_page(vma->vm_file->f_mapping, 5576 linear_page_index(vma, addr)); 5577 } 5578 5579 /** 5580 * mem_cgroup_move_account - move account of the page 5581 * @page: the page 5582 * @compound: charge the page as compound or small page 5583 * @from: mem_cgroup which the page is moved from. 5584 * @to: mem_cgroup which the page is moved to. @from != @to. 5585 * 5586 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5587 * 5588 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5589 * from old cgroup. 5590 */ 5591 static int mem_cgroup_move_account(struct page *page, 5592 bool compound, 5593 struct mem_cgroup *from, 5594 struct mem_cgroup *to) 5595 { 5596 struct lruvec *from_vec, *to_vec; 5597 struct pglist_data *pgdat; 5598 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5599 int ret; 5600 5601 VM_BUG_ON(from == to); 5602 VM_BUG_ON_PAGE(PageLRU(page), page); 5603 VM_BUG_ON(compound && !PageTransHuge(page)); 5604 5605 /* 5606 * Prevent mem_cgroup_migrate() from looking at 5607 * page's memory cgroup of its source page while we change it. 5608 */ 5609 ret = -EBUSY; 5610 if (!trylock_page(page)) 5611 goto out; 5612 5613 ret = -EINVAL; 5614 if (page_memcg(page) != from) 5615 goto out_unlock; 5616 5617 pgdat = page_pgdat(page); 5618 from_vec = mem_cgroup_lruvec(from, pgdat); 5619 to_vec = mem_cgroup_lruvec(to, pgdat); 5620 5621 lock_page_memcg(page); 5622 5623 if (PageAnon(page)) { 5624 if (page_mapped(page)) { 5625 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5626 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5627 if (PageTransHuge(page)) { 5628 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5629 -nr_pages); 5630 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5631 nr_pages); 5632 } 5633 } 5634 } else { 5635 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5636 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5637 5638 if (PageSwapBacked(page)) { 5639 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5640 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5641 } 5642 5643 if (page_mapped(page)) { 5644 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5645 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5646 } 5647 5648 if (PageDirty(page)) { 5649 struct address_space *mapping = page_mapping(page); 5650 5651 if (mapping_can_writeback(mapping)) { 5652 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5653 -nr_pages); 5654 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5655 nr_pages); 5656 } 5657 } 5658 } 5659 5660 if (PageWriteback(page)) { 5661 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5662 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5663 } 5664 5665 /* 5666 * All state has been migrated, let's switch to the new memcg. 5667 * 5668 * It is safe to change page's memcg here because the page 5669 * is referenced, charged, isolated, and locked: we can't race 5670 * with (un)charging, migration, LRU putback, or anything else 5671 * that would rely on a stable page's memory cgroup. 5672 * 5673 * Note that lock_page_memcg is a memcg lock, not a page lock, 5674 * to save space. As soon as we switch page's memory cgroup to a 5675 * new memcg that isn't locked, the above state can change 5676 * concurrently again. Make sure we're truly done with it. 5677 */ 5678 smp_mb(); 5679 5680 css_get(&to->css); 5681 css_put(&from->css); 5682 5683 page->memcg_data = (unsigned long)to; 5684 5685 __unlock_page_memcg(from); 5686 5687 ret = 0; 5688 5689 local_irq_disable(); 5690 mem_cgroup_charge_statistics(to, page, nr_pages); 5691 memcg_check_events(to, page); 5692 mem_cgroup_charge_statistics(from, page, -nr_pages); 5693 memcg_check_events(from, page); 5694 local_irq_enable(); 5695 out_unlock: 5696 unlock_page(page); 5697 out: 5698 return ret; 5699 } 5700 5701 /** 5702 * get_mctgt_type - get target type of moving charge 5703 * @vma: the vma the pte to be checked belongs 5704 * @addr: the address corresponding to the pte to be checked 5705 * @ptent: the pte to be checked 5706 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5707 * 5708 * Returns 5709 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5710 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5711 * move charge. if @target is not NULL, the page is stored in target->page 5712 * with extra refcnt got(Callers should handle it). 5713 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5714 * target for charge migration. if @target is not NULL, the entry is stored 5715 * in target->ent. 5716 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5717 * (so ZONE_DEVICE page and thus not on the lru). 5718 * For now we such page is charge like a regular page would be as for all 5719 * intent and purposes it is just special memory taking the place of a 5720 * regular page. 5721 * 5722 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5723 * 5724 * Called with pte lock held. 5725 */ 5726 5727 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5728 unsigned long addr, pte_t ptent, union mc_target *target) 5729 { 5730 struct page *page = NULL; 5731 enum mc_target_type ret = MC_TARGET_NONE; 5732 swp_entry_t ent = { .val = 0 }; 5733 5734 if (pte_present(ptent)) 5735 page = mc_handle_present_pte(vma, addr, ptent); 5736 else if (is_swap_pte(ptent)) 5737 page = mc_handle_swap_pte(vma, ptent, &ent); 5738 else if (pte_none(ptent)) 5739 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5740 5741 if (!page && !ent.val) 5742 return ret; 5743 if (page) { 5744 /* 5745 * Do only loose check w/o serialization. 5746 * mem_cgroup_move_account() checks the page is valid or 5747 * not under LRU exclusion. 5748 */ 5749 if (page_memcg(page) == mc.from) { 5750 ret = MC_TARGET_PAGE; 5751 if (is_device_private_page(page)) 5752 ret = MC_TARGET_DEVICE; 5753 if (target) 5754 target->page = page; 5755 } 5756 if (!ret || !target) 5757 put_page(page); 5758 } 5759 /* 5760 * There is a swap entry and a page doesn't exist or isn't charged. 5761 * But we cannot move a tail-page in a THP. 5762 */ 5763 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5764 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5765 ret = MC_TARGET_SWAP; 5766 if (target) 5767 target->ent = ent; 5768 } 5769 return ret; 5770 } 5771 5772 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5773 /* 5774 * We don't consider PMD mapped swapping or file mapped pages because THP does 5775 * not support them for now. 5776 * Caller should make sure that pmd_trans_huge(pmd) is true. 5777 */ 5778 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5779 unsigned long addr, pmd_t pmd, union mc_target *target) 5780 { 5781 struct page *page = NULL; 5782 enum mc_target_type ret = MC_TARGET_NONE; 5783 5784 if (unlikely(is_swap_pmd(pmd))) { 5785 VM_BUG_ON(thp_migration_supported() && 5786 !is_pmd_migration_entry(pmd)); 5787 return ret; 5788 } 5789 page = pmd_page(pmd); 5790 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5791 if (!(mc.flags & MOVE_ANON)) 5792 return ret; 5793 if (page_memcg(page) == mc.from) { 5794 ret = MC_TARGET_PAGE; 5795 if (target) { 5796 get_page(page); 5797 target->page = page; 5798 } 5799 } 5800 return ret; 5801 } 5802 #else 5803 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5804 unsigned long addr, pmd_t pmd, union mc_target *target) 5805 { 5806 return MC_TARGET_NONE; 5807 } 5808 #endif 5809 5810 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5811 unsigned long addr, unsigned long end, 5812 struct mm_walk *walk) 5813 { 5814 struct vm_area_struct *vma = walk->vma; 5815 pte_t *pte; 5816 spinlock_t *ptl; 5817 5818 ptl = pmd_trans_huge_lock(pmd, vma); 5819 if (ptl) { 5820 /* 5821 * Note their can not be MC_TARGET_DEVICE for now as we do not 5822 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5823 * this might change. 5824 */ 5825 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5826 mc.precharge += HPAGE_PMD_NR; 5827 spin_unlock(ptl); 5828 return 0; 5829 } 5830 5831 if (pmd_trans_unstable(pmd)) 5832 return 0; 5833 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5834 for (; addr != end; pte++, addr += PAGE_SIZE) 5835 if (get_mctgt_type(vma, addr, *pte, NULL)) 5836 mc.precharge++; /* increment precharge temporarily */ 5837 pte_unmap_unlock(pte - 1, ptl); 5838 cond_resched(); 5839 5840 return 0; 5841 } 5842 5843 static const struct mm_walk_ops precharge_walk_ops = { 5844 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5845 }; 5846 5847 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5848 { 5849 unsigned long precharge; 5850 5851 mmap_read_lock(mm); 5852 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5853 mmap_read_unlock(mm); 5854 5855 precharge = mc.precharge; 5856 mc.precharge = 0; 5857 5858 return precharge; 5859 } 5860 5861 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5862 { 5863 unsigned long precharge = mem_cgroup_count_precharge(mm); 5864 5865 VM_BUG_ON(mc.moving_task); 5866 mc.moving_task = current; 5867 return mem_cgroup_do_precharge(precharge); 5868 } 5869 5870 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5871 static void __mem_cgroup_clear_mc(void) 5872 { 5873 struct mem_cgroup *from = mc.from; 5874 struct mem_cgroup *to = mc.to; 5875 5876 /* we must uncharge all the leftover precharges from mc.to */ 5877 if (mc.precharge) { 5878 cancel_charge(mc.to, mc.precharge); 5879 mc.precharge = 0; 5880 } 5881 /* 5882 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5883 * we must uncharge here. 5884 */ 5885 if (mc.moved_charge) { 5886 cancel_charge(mc.from, mc.moved_charge); 5887 mc.moved_charge = 0; 5888 } 5889 /* we must fixup refcnts and charges */ 5890 if (mc.moved_swap) { 5891 /* uncharge swap account from the old cgroup */ 5892 if (!mem_cgroup_is_root(mc.from)) 5893 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5894 5895 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5896 5897 /* 5898 * we charged both to->memory and to->memsw, so we 5899 * should uncharge to->memory. 5900 */ 5901 if (!mem_cgroup_is_root(mc.to)) 5902 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5903 5904 mc.moved_swap = 0; 5905 } 5906 memcg_oom_recover(from); 5907 memcg_oom_recover(to); 5908 wake_up_all(&mc.waitq); 5909 } 5910 5911 static void mem_cgroup_clear_mc(void) 5912 { 5913 struct mm_struct *mm = mc.mm; 5914 5915 /* 5916 * we must clear moving_task before waking up waiters at the end of 5917 * task migration. 5918 */ 5919 mc.moving_task = NULL; 5920 __mem_cgroup_clear_mc(); 5921 spin_lock(&mc.lock); 5922 mc.from = NULL; 5923 mc.to = NULL; 5924 mc.mm = NULL; 5925 spin_unlock(&mc.lock); 5926 5927 mmput(mm); 5928 } 5929 5930 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5931 { 5932 struct cgroup_subsys_state *css; 5933 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5934 struct mem_cgroup *from; 5935 struct task_struct *leader, *p; 5936 struct mm_struct *mm; 5937 unsigned long move_flags; 5938 int ret = 0; 5939 5940 /* charge immigration isn't supported on the default hierarchy */ 5941 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5942 return 0; 5943 5944 /* 5945 * Multi-process migrations only happen on the default hierarchy 5946 * where charge immigration is not used. Perform charge 5947 * immigration if @tset contains a leader and whine if there are 5948 * multiple. 5949 */ 5950 p = NULL; 5951 cgroup_taskset_for_each_leader(leader, css, tset) { 5952 WARN_ON_ONCE(p); 5953 p = leader; 5954 memcg = mem_cgroup_from_css(css); 5955 } 5956 if (!p) 5957 return 0; 5958 5959 /* 5960 * We are now committed to this value whatever it is. Changes in this 5961 * tunable will only affect upcoming migrations, not the current one. 5962 * So we need to save it, and keep it going. 5963 */ 5964 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5965 if (!move_flags) 5966 return 0; 5967 5968 from = mem_cgroup_from_task(p); 5969 5970 VM_BUG_ON(from == memcg); 5971 5972 mm = get_task_mm(p); 5973 if (!mm) 5974 return 0; 5975 /* We move charges only when we move a owner of the mm */ 5976 if (mm->owner == p) { 5977 VM_BUG_ON(mc.from); 5978 VM_BUG_ON(mc.to); 5979 VM_BUG_ON(mc.precharge); 5980 VM_BUG_ON(mc.moved_charge); 5981 VM_BUG_ON(mc.moved_swap); 5982 5983 spin_lock(&mc.lock); 5984 mc.mm = mm; 5985 mc.from = from; 5986 mc.to = memcg; 5987 mc.flags = move_flags; 5988 spin_unlock(&mc.lock); 5989 /* We set mc.moving_task later */ 5990 5991 ret = mem_cgroup_precharge_mc(mm); 5992 if (ret) 5993 mem_cgroup_clear_mc(); 5994 } else { 5995 mmput(mm); 5996 } 5997 return ret; 5998 } 5999 6000 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6001 { 6002 if (mc.to) 6003 mem_cgroup_clear_mc(); 6004 } 6005 6006 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6007 unsigned long addr, unsigned long end, 6008 struct mm_walk *walk) 6009 { 6010 int ret = 0; 6011 struct vm_area_struct *vma = walk->vma; 6012 pte_t *pte; 6013 spinlock_t *ptl; 6014 enum mc_target_type target_type; 6015 union mc_target target; 6016 struct page *page; 6017 6018 ptl = pmd_trans_huge_lock(pmd, vma); 6019 if (ptl) { 6020 if (mc.precharge < HPAGE_PMD_NR) { 6021 spin_unlock(ptl); 6022 return 0; 6023 } 6024 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6025 if (target_type == MC_TARGET_PAGE) { 6026 page = target.page; 6027 if (!isolate_lru_page(page)) { 6028 if (!mem_cgroup_move_account(page, true, 6029 mc.from, mc.to)) { 6030 mc.precharge -= HPAGE_PMD_NR; 6031 mc.moved_charge += HPAGE_PMD_NR; 6032 } 6033 putback_lru_page(page); 6034 } 6035 put_page(page); 6036 } else if (target_type == MC_TARGET_DEVICE) { 6037 page = target.page; 6038 if (!mem_cgroup_move_account(page, true, 6039 mc.from, mc.to)) { 6040 mc.precharge -= HPAGE_PMD_NR; 6041 mc.moved_charge += HPAGE_PMD_NR; 6042 } 6043 put_page(page); 6044 } 6045 spin_unlock(ptl); 6046 return 0; 6047 } 6048 6049 if (pmd_trans_unstable(pmd)) 6050 return 0; 6051 retry: 6052 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6053 for (; addr != end; addr += PAGE_SIZE) { 6054 pte_t ptent = *(pte++); 6055 bool device = false; 6056 swp_entry_t ent; 6057 6058 if (!mc.precharge) 6059 break; 6060 6061 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6062 case MC_TARGET_DEVICE: 6063 device = true; 6064 fallthrough; 6065 case MC_TARGET_PAGE: 6066 page = target.page; 6067 /* 6068 * We can have a part of the split pmd here. Moving it 6069 * can be done but it would be too convoluted so simply 6070 * ignore such a partial THP and keep it in original 6071 * memcg. There should be somebody mapping the head. 6072 */ 6073 if (PageTransCompound(page)) 6074 goto put; 6075 if (!device && isolate_lru_page(page)) 6076 goto put; 6077 if (!mem_cgroup_move_account(page, false, 6078 mc.from, mc.to)) { 6079 mc.precharge--; 6080 /* we uncharge from mc.from later. */ 6081 mc.moved_charge++; 6082 } 6083 if (!device) 6084 putback_lru_page(page); 6085 put: /* get_mctgt_type() gets the page */ 6086 put_page(page); 6087 break; 6088 case MC_TARGET_SWAP: 6089 ent = target.ent; 6090 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6091 mc.precharge--; 6092 mem_cgroup_id_get_many(mc.to, 1); 6093 /* we fixup other refcnts and charges later. */ 6094 mc.moved_swap++; 6095 } 6096 break; 6097 default: 6098 break; 6099 } 6100 } 6101 pte_unmap_unlock(pte - 1, ptl); 6102 cond_resched(); 6103 6104 if (addr != end) { 6105 /* 6106 * We have consumed all precharges we got in can_attach(). 6107 * We try charge one by one, but don't do any additional 6108 * charges to mc.to if we have failed in charge once in attach() 6109 * phase. 6110 */ 6111 ret = mem_cgroup_do_precharge(1); 6112 if (!ret) 6113 goto retry; 6114 } 6115 6116 return ret; 6117 } 6118 6119 static const struct mm_walk_ops charge_walk_ops = { 6120 .pmd_entry = mem_cgroup_move_charge_pte_range, 6121 }; 6122 6123 static void mem_cgroup_move_charge(void) 6124 { 6125 lru_add_drain_all(); 6126 /* 6127 * Signal lock_page_memcg() to take the memcg's move_lock 6128 * while we're moving its pages to another memcg. Then wait 6129 * for already started RCU-only updates to finish. 6130 */ 6131 atomic_inc(&mc.from->moving_account); 6132 synchronize_rcu(); 6133 retry: 6134 if (unlikely(!mmap_read_trylock(mc.mm))) { 6135 /* 6136 * Someone who are holding the mmap_lock might be waiting in 6137 * waitq. So we cancel all extra charges, wake up all waiters, 6138 * and retry. Because we cancel precharges, we might not be able 6139 * to move enough charges, but moving charge is a best-effort 6140 * feature anyway, so it wouldn't be a big problem. 6141 */ 6142 __mem_cgroup_clear_mc(); 6143 cond_resched(); 6144 goto retry; 6145 } 6146 /* 6147 * When we have consumed all precharges and failed in doing 6148 * additional charge, the page walk just aborts. 6149 */ 6150 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6151 NULL); 6152 6153 mmap_read_unlock(mc.mm); 6154 atomic_dec(&mc.from->moving_account); 6155 } 6156 6157 static void mem_cgroup_move_task(void) 6158 { 6159 if (mc.to) { 6160 mem_cgroup_move_charge(); 6161 mem_cgroup_clear_mc(); 6162 } 6163 } 6164 #else /* !CONFIG_MMU */ 6165 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6166 { 6167 return 0; 6168 } 6169 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6170 { 6171 } 6172 static void mem_cgroup_move_task(void) 6173 { 6174 } 6175 #endif 6176 6177 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6178 { 6179 if (value == PAGE_COUNTER_MAX) 6180 seq_puts(m, "max\n"); 6181 else 6182 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6183 6184 return 0; 6185 } 6186 6187 static u64 memory_current_read(struct cgroup_subsys_state *css, 6188 struct cftype *cft) 6189 { 6190 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6191 6192 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6193 } 6194 6195 static int memory_min_show(struct seq_file *m, void *v) 6196 { 6197 return seq_puts_memcg_tunable(m, 6198 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6199 } 6200 6201 static ssize_t memory_min_write(struct kernfs_open_file *of, 6202 char *buf, size_t nbytes, loff_t off) 6203 { 6204 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6205 unsigned long min; 6206 int err; 6207 6208 buf = strstrip(buf); 6209 err = page_counter_memparse(buf, "max", &min); 6210 if (err) 6211 return err; 6212 6213 page_counter_set_min(&memcg->memory, min); 6214 6215 return nbytes; 6216 } 6217 6218 static int memory_low_show(struct seq_file *m, void *v) 6219 { 6220 return seq_puts_memcg_tunable(m, 6221 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6222 } 6223 6224 static ssize_t memory_low_write(struct kernfs_open_file *of, 6225 char *buf, size_t nbytes, loff_t off) 6226 { 6227 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6228 unsigned long low; 6229 int err; 6230 6231 buf = strstrip(buf); 6232 err = page_counter_memparse(buf, "max", &low); 6233 if (err) 6234 return err; 6235 6236 page_counter_set_low(&memcg->memory, low); 6237 6238 return nbytes; 6239 } 6240 6241 static int memory_high_show(struct seq_file *m, void *v) 6242 { 6243 return seq_puts_memcg_tunable(m, 6244 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6245 } 6246 6247 static ssize_t memory_high_write(struct kernfs_open_file *of, 6248 char *buf, size_t nbytes, loff_t off) 6249 { 6250 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6251 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6252 bool drained = false; 6253 unsigned long high; 6254 int err; 6255 6256 buf = strstrip(buf); 6257 err = page_counter_memparse(buf, "max", &high); 6258 if (err) 6259 return err; 6260 6261 page_counter_set_high(&memcg->memory, high); 6262 6263 for (;;) { 6264 unsigned long nr_pages = page_counter_read(&memcg->memory); 6265 unsigned long reclaimed; 6266 6267 if (nr_pages <= high) 6268 break; 6269 6270 if (signal_pending(current)) 6271 break; 6272 6273 if (!drained) { 6274 drain_all_stock(memcg); 6275 drained = true; 6276 continue; 6277 } 6278 6279 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6280 GFP_KERNEL, true); 6281 6282 if (!reclaimed && !nr_retries--) 6283 break; 6284 } 6285 6286 memcg_wb_domain_size_changed(memcg); 6287 return nbytes; 6288 } 6289 6290 static int memory_max_show(struct seq_file *m, void *v) 6291 { 6292 return seq_puts_memcg_tunable(m, 6293 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6294 } 6295 6296 static ssize_t memory_max_write(struct kernfs_open_file *of, 6297 char *buf, size_t nbytes, loff_t off) 6298 { 6299 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6300 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6301 bool drained = false; 6302 unsigned long max; 6303 int err; 6304 6305 buf = strstrip(buf); 6306 err = page_counter_memparse(buf, "max", &max); 6307 if (err) 6308 return err; 6309 6310 xchg(&memcg->memory.max, max); 6311 6312 for (;;) { 6313 unsigned long nr_pages = page_counter_read(&memcg->memory); 6314 6315 if (nr_pages <= max) 6316 break; 6317 6318 if (signal_pending(current)) 6319 break; 6320 6321 if (!drained) { 6322 drain_all_stock(memcg); 6323 drained = true; 6324 continue; 6325 } 6326 6327 if (nr_reclaims) { 6328 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6329 GFP_KERNEL, true)) 6330 nr_reclaims--; 6331 continue; 6332 } 6333 6334 memcg_memory_event(memcg, MEMCG_OOM); 6335 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6336 break; 6337 } 6338 6339 memcg_wb_domain_size_changed(memcg); 6340 return nbytes; 6341 } 6342 6343 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6344 { 6345 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6346 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6347 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6348 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6349 seq_printf(m, "oom_kill %lu\n", 6350 atomic_long_read(&events[MEMCG_OOM_KILL])); 6351 } 6352 6353 static int memory_events_show(struct seq_file *m, void *v) 6354 { 6355 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6356 6357 __memory_events_show(m, memcg->memory_events); 6358 return 0; 6359 } 6360 6361 static int memory_events_local_show(struct seq_file *m, void *v) 6362 { 6363 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6364 6365 __memory_events_show(m, memcg->memory_events_local); 6366 return 0; 6367 } 6368 6369 static int memory_stat_show(struct seq_file *m, void *v) 6370 { 6371 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6372 char *buf; 6373 6374 buf = memory_stat_format(memcg); 6375 if (!buf) 6376 return -ENOMEM; 6377 seq_puts(m, buf); 6378 kfree(buf); 6379 return 0; 6380 } 6381 6382 #ifdef CONFIG_NUMA 6383 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6384 int item) 6385 { 6386 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6387 } 6388 6389 static int memory_numa_stat_show(struct seq_file *m, void *v) 6390 { 6391 int i; 6392 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6393 6394 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6395 int nid; 6396 6397 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6398 continue; 6399 6400 seq_printf(m, "%s", memory_stats[i].name); 6401 for_each_node_state(nid, N_MEMORY) { 6402 u64 size; 6403 struct lruvec *lruvec; 6404 6405 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6406 size = lruvec_page_state_output(lruvec, 6407 memory_stats[i].idx); 6408 seq_printf(m, " N%d=%llu", nid, size); 6409 } 6410 seq_putc(m, '\n'); 6411 } 6412 6413 return 0; 6414 } 6415 #endif 6416 6417 static int memory_oom_group_show(struct seq_file *m, void *v) 6418 { 6419 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6420 6421 seq_printf(m, "%d\n", memcg->oom_group); 6422 6423 return 0; 6424 } 6425 6426 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6427 char *buf, size_t nbytes, loff_t off) 6428 { 6429 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6430 int ret, oom_group; 6431 6432 buf = strstrip(buf); 6433 if (!buf) 6434 return -EINVAL; 6435 6436 ret = kstrtoint(buf, 0, &oom_group); 6437 if (ret) 6438 return ret; 6439 6440 if (oom_group != 0 && oom_group != 1) 6441 return -EINVAL; 6442 6443 memcg->oom_group = oom_group; 6444 6445 return nbytes; 6446 } 6447 6448 static struct cftype memory_files[] = { 6449 { 6450 .name = "current", 6451 .flags = CFTYPE_NOT_ON_ROOT, 6452 .read_u64 = memory_current_read, 6453 }, 6454 { 6455 .name = "min", 6456 .flags = CFTYPE_NOT_ON_ROOT, 6457 .seq_show = memory_min_show, 6458 .write = memory_min_write, 6459 }, 6460 { 6461 .name = "low", 6462 .flags = CFTYPE_NOT_ON_ROOT, 6463 .seq_show = memory_low_show, 6464 .write = memory_low_write, 6465 }, 6466 { 6467 .name = "high", 6468 .flags = CFTYPE_NOT_ON_ROOT, 6469 .seq_show = memory_high_show, 6470 .write = memory_high_write, 6471 }, 6472 { 6473 .name = "max", 6474 .flags = CFTYPE_NOT_ON_ROOT, 6475 .seq_show = memory_max_show, 6476 .write = memory_max_write, 6477 }, 6478 { 6479 .name = "events", 6480 .flags = CFTYPE_NOT_ON_ROOT, 6481 .file_offset = offsetof(struct mem_cgroup, events_file), 6482 .seq_show = memory_events_show, 6483 }, 6484 { 6485 .name = "events.local", 6486 .flags = CFTYPE_NOT_ON_ROOT, 6487 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6488 .seq_show = memory_events_local_show, 6489 }, 6490 { 6491 .name = "stat", 6492 .seq_show = memory_stat_show, 6493 }, 6494 #ifdef CONFIG_NUMA 6495 { 6496 .name = "numa_stat", 6497 .seq_show = memory_numa_stat_show, 6498 }, 6499 #endif 6500 { 6501 .name = "oom.group", 6502 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6503 .seq_show = memory_oom_group_show, 6504 .write = memory_oom_group_write, 6505 }, 6506 { } /* terminate */ 6507 }; 6508 6509 struct cgroup_subsys memory_cgrp_subsys = { 6510 .css_alloc = mem_cgroup_css_alloc, 6511 .css_online = mem_cgroup_css_online, 6512 .css_offline = mem_cgroup_css_offline, 6513 .css_released = mem_cgroup_css_released, 6514 .css_free = mem_cgroup_css_free, 6515 .css_reset = mem_cgroup_css_reset, 6516 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6517 .can_attach = mem_cgroup_can_attach, 6518 .cancel_attach = mem_cgroup_cancel_attach, 6519 .post_attach = mem_cgroup_move_task, 6520 .dfl_cftypes = memory_files, 6521 .legacy_cftypes = mem_cgroup_legacy_files, 6522 .early_init = 0, 6523 }; 6524 6525 /* 6526 * This function calculates an individual cgroup's effective 6527 * protection which is derived from its own memory.min/low, its 6528 * parent's and siblings' settings, as well as the actual memory 6529 * distribution in the tree. 6530 * 6531 * The following rules apply to the effective protection values: 6532 * 6533 * 1. At the first level of reclaim, effective protection is equal to 6534 * the declared protection in memory.min and memory.low. 6535 * 6536 * 2. To enable safe delegation of the protection configuration, at 6537 * subsequent levels the effective protection is capped to the 6538 * parent's effective protection. 6539 * 6540 * 3. To make complex and dynamic subtrees easier to configure, the 6541 * user is allowed to overcommit the declared protection at a given 6542 * level. If that is the case, the parent's effective protection is 6543 * distributed to the children in proportion to how much protection 6544 * they have declared and how much of it they are utilizing. 6545 * 6546 * This makes distribution proportional, but also work-conserving: 6547 * if one cgroup claims much more protection than it uses memory, 6548 * the unused remainder is available to its siblings. 6549 * 6550 * 4. Conversely, when the declared protection is undercommitted at a 6551 * given level, the distribution of the larger parental protection 6552 * budget is NOT proportional. A cgroup's protection from a sibling 6553 * is capped to its own memory.min/low setting. 6554 * 6555 * 5. However, to allow protecting recursive subtrees from each other 6556 * without having to declare each individual cgroup's fixed share 6557 * of the ancestor's claim to protection, any unutilized - 6558 * "floating" - protection from up the tree is distributed in 6559 * proportion to each cgroup's *usage*. This makes the protection 6560 * neutral wrt sibling cgroups and lets them compete freely over 6561 * the shared parental protection budget, but it protects the 6562 * subtree as a whole from neighboring subtrees. 6563 * 6564 * Note that 4. and 5. are not in conflict: 4. is about protecting 6565 * against immediate siblings whereas 5. is about protecting against 6566 * neighboring subtrees. 6567 */ 6568 static unsigned long effective_protection(unsigned long usage, 6569 unsigned long parent_usage, 6570 unsigned long setting, 6571 unsigned long parent_effective, 6572 unsigned long siblings_protected) 6573 { 6574 unsigned long protected; 6575 unsigned long ep; 6576 6577 protected = min(usage, setting); 6578 /* 6579 * If all cgroups at this level combined claim and use more 6580 * protection then what the parent affords them, distribute 6581 * shares in proportion to utilization. 6582 * 6583 * We are using actual utilization rather than the statically 6584 * claimed protection in order to be work-conserving: claimed 6585 * but unused protection is available to siblings that would 6586 * otherwise get a smaller chunk than what they claimed. 6587 */ 6588 if (siblings_protected > parent_effective) 6589 return protected * parent_effective / siblings_protected; 6590 6591 /* 6592 * Ok, utilized protection of all children is within what the 6593 * parent affords them, so we know whatever this child claims 6594 * and utilizes is effectively protected. 6595 * 6596 * If there is unprotected usage beyond this value, reclaim 6597 * will apply pressure in proportion to that amount. 6598 * 6599 * If there is unutilized protection, the cgroup will be fully 6600 * shielded from reclaim, but we do return a smaller value for 6601 * protection than what the group could enjoy in theory. This 6602 * is okay. With the overcommit distribution above, effective 6603 * protection is always dependent on how memory is actually 6604 * consumed among the siblings anyway. 6605 */ 6606 ep = protected; 6607 6608 /* 6609 * If the children aren't claiming (all of) the protection 6610 * afforded to them by the parent, distribute the remainder in 6611 * proportion to the (unprotected) memory of each cgroup. That 6612 * way, cgroups that aren't explicitly prioritized wrt each 6613 * other compete freely over the allowance, but they are 6614 * collectively protected from neighboring trees. 6615 * 6616 * We're using unprotected memory for the weight so that if 6617 * some cgroups DO claim explicit protection, we don't protect 6618 * the same bytes twice. 6619 * 6620 * Check both usage and parent_usage against the respective 6621 * protected values. One should imply the other, but they 6622 * aren't read atomically - make sure the division is sane. 6623 */ 6624 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6625 return ep; 6626 if (parent_effective > siblings_protected && 6627 parent_usage > siblings_protected && 6628 usage > protected) { 6629 unsigned long unclaimed; 6630 6631 unclaimed = parent_effective - siblings_protected; 6632 unclaimed *= usage - protected; 6633 unclaimed /= parent_usage - siblings_protected; 6634 6635 ep += unclaimed; 6636 } 6637 6638 return ep; 6639 } 6640 6641 /** 6642 * mem_cgroup_protected - check if memory consumption is in the normal range 6643 * @root: the top ancestor of the sub-tree being checked 6644 * @memcg: the memory cgroup to check 6645 * 6646 * WARNING: This function is not stateless! It can only be used as part 6647 * of a top-down tree iteration, not for isolated queries. 6648 */ 6649 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6650 struct mem_cgroup *memcg) 6651 { 6652 unsigned long usage, parent_usage; 6653 struct mem_cgroup *parent; 6654 6655 if (mem_cgroup_disabled()) 6656 return; 6657 6658 if (!root) 6659 root = root_mem_cgroup; 6660 6661 /* 6662 * Effective values of the reclaim targets are ignored so they 6663 * can be stale. Have a look at mem_cgroup_protection for more 6664 * details. 6665 * TODO: calculation should be more robust so that we do not need 6666 * that special casing. 6667 */ 6668 if (memcg == root) 6669 return; 6670 6671 usage = page_counter_read(&memcg->memory); 6672 if (!usage) 6673 return; 6674 6675 parent = parent_mem_cgroup(memcg); 6676 /* No parent means a non-hierarchical mode on v1 memcg */ 6677 if (!parent) 6678 return; 6679 6680 if (parent == root) { 6681 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6682 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6683 return; 6684 } 6685 6686 parent_usage = page_counter_read(&parent->memory); 6687 6688 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6689 READ_ONCE(memcg->memory.min), 6690 READ_ONCE(parent->memory.emin), 6691 atomic_long_read(&parent->memory.children_min_usage))); 6692 6693 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6694 READ_ONCE(memcg->memory.low), 6695 READ_ONCE(parent->memory.elow), 6696 atomic_long_read(&parent->memory.children_low_usage))); 6697 } 6698 6699 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg, 6700 gfp_t gfp) 6701 { 6702 unsigned int nr_pages = thp_nr_pages(page); 6703 int ret; 6704 6705 ret = try_charge(memcg, gfp, nr_pages); 6706 if (ret) 6707 goto out; 6708 6709 css_get(&memcg->css); 6710 commit_charge(page, memcg); 6711 6712 local_irq_disable(); 6713 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6714 memcg_check_events(memcg, page); 6715 local_irq_enable(); 6716 out: 6717 return ret; 6718 } 6719 6720 /** 6721 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6722 * @page: page to charge 6723 * @mm: mm context of the victim 6724 * @gfp_mask: reclaim mode 6725 * 6726 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6727 * pages according to @gfp_mask if necessary. if @mm is NULL, try to 6728 * charge to the active memcg. 6729 * 6730 * Do not use this for pages allocated for swapin. 6731 * 6732 * Returns 0 on success. Otherwise, an error code is returned. 6733 */ 6734 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6735 { 6736 struct mem_cgroup *memcg; 6737 int ret; 6738 6739 if (mem_cgroup_disabled()) 6740 return 0; 6741 6742 memcg = get_mem_cgroup_from_mm(mm); 6743 ret = __mem_cgroup_charge(page, memcg, gfp_mask); 6744 css_put(&memcg->css); 6745 6746 return ret; 6747 } 6748 6749 /** 6750 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin 6751 * @page: page to charge 6752 * @mm: mm context of the victim 6753 * @gfp: reclaim mode 6754 * @entry: swap entry for which the page is allocated 6755 * 6756 * This function charges a page allocated for swapin. Please call this before 6757 * adding the page to the swapcache. 6758 * 6759 * Returns 0 on success. Otherwise, an error code is returned. 6760 */ 6761 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 6762 gfp_t gfp, swp_entry_t entry) 6763 { 6764 struct mem_cgroup *memcg; 6765 unsigned short id; 6766 int ret; 6767 6768 if (mem_cgroup_disabled()) 6769 return 0; 6770 6771 id = lookup_swap_cgroup_id(entry); 6772 rcu_read_lock(); 6773 memcg = mem_cgroup_from_id(id); 6774 if (!memcg || !css_tryget_online(&memcg->css)) 6775 memcg = get_mem_cgroup_from_mm(mm); 6776 rcu_read_unlock(); 6777 6778 ret = __mem_cgroup_charge(page, memcg, gfp); 6779 6780 css_put(&memcg->css); 6781 return ret; 6782 } 6783 6784 /* 6785 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 6786 * @entry: swap entry for which the page is charged 6787 * 6788 * Call this function after successfully adding the charged page to swapcache. 6789 * 6790 * Note: This function assumes the page for which swap slot is being uncharged 6791 * is order 0 page. 6792 */ 6793 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 6794 { 6795 /* 6796 * Cgroup1's unified memory+swap counter has been charged with the 6797 * new swapcache page, finish the transfer by uncharging the swap 6798 * slot. The swap slot would also get uncharged when it dies, but 6799 * it can stick around indefinitely and we'd count the page twice 6800 * the entire time. 6801 * 6802 * Cgroup2 has separate resource counters for memory and swap, 6803 * so this is a non-issue here. Memory and swap charge lifetimes 6804 * correspond 1:1 to page and swap slot lifetimes: we charge the 6805 * page to memory here, and uncharge swap when the slot is freed. 6806 */ 6807 if (!mem_cgroup_disabled() && do_memsw_account()) { 6808 /* 6809 * The swap entry might not get freed for a long time, 6810 * let's not wait for it. The page already received a 6811 * memory+swap charge, drop the swap entry duplicate. 6812 */ 6813 mem_cgroup_uncharge_swap(entry, 1); 6814 } 6815 } 6816 6817 struct uncharge_gather { 6818 struct mem_cgroup *memcg; 6819 unsigned long nr_memory; 6820 unsigned long pgpgout; 6821 unsigned long nr_kmem; 6822 struct page *dummy_page; 6823 }; 6824 6825 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6826 { 6827 memset(ug, 0, sizeof(*ug)); 6828 } 6829 6830 static void uncharge_batch(const struct uncharge_gather *ug) 6831 { 6832 unsigned long flags; 6833 6834 if (ug->nr_memory) { 6835 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 6836 if (do_memsw_account()) 6837 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 6838 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6839 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6840 memcg_oom_recover(ug->memcg); 6841 } 6842 6843 local_irq_save(flags); 6844 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6845 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 6846 memcg_check_events(ug->memcg, ug->dummy_page); 6847 local_irq_restore(flags); 6848 6849 /* drop reference from uncharge_page */ 6850 css_put(&ug->memcg->css); 6851 } 6852 6853 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6854 { 6855 unsigned long nr_pages; 6856 struct mem_cgroup *memcg; 6857 struct obj_cgroup *objcg; 6858 bool use_objcg = PageMemcgKmem(page); 6859 6860 VM_BUG_ON_PAGE(PageLRU(page), page); 6861 6862 /* 6863 * Nobody should be changing or seriously looking at 6864 * page memcg or objcg at this point, we have fully 6865 * exclusive access to the page. 6866 */ 6867 if (use_objcg) { 6868 objcg = __page_objcg(page); 6869 /* 6870 * This get matches the put at the end of the function and 6871 * kmem pages do not hold memcg references anymore. 6872 */ 6873 memcg = get_mem_cgroup_from_objcg(objcg); 6874 } else { 6875 memcg = __page_memcg(page); 6876 } 6877 6878 if (!memcg) 6879 return; 6880 6881 if (ug->memcg != memcg) { 6882 if (ug->memcg) { 6883 uncharge_batch(ug); 6884 uncharge_gather_clear(ug); 6885 } 6886 ug->memcg = memcg; 6887 ug->dummy_page = page; 6888 6889 /* pairs with css_put in uncharge_batch */ 6890 css_get(&memcg->css); 6891 } 6892 6893 nr_pages = compound_nr(page); 6894 6895 if (use_objcg) { 6896 ug->nr_memory += nr_pages; 6897 ug->nr_kmem += nr_pages; 6898 6899 page->memcg_data = 0; 6900 obj_cgroup_put(objcg); 6901 } else { 6902 /* LRU pages aren't accounted at the root level */ 6903 if (!mem_cgroup_is_root(memcg)) 6904 ug->nr_memory += nr_pages; 6905 ug->pgpgout++; 6906 6907 page->memcg_data = 0; 6908 } 6909 6910 css_put(&memcg->css); 6911 } 6912 6913 /** 6914 * mem_cgroup_uncharge - uncharge a page 6915 * @page: page to uncharge 6916 * 6917 * Uncharge a page previously charged with mem_cgroup_charge(). 6918 */ 6919 void mem_cgroup_uncharge(struct page *page) 6920 { 6921 struct uncharge_gather ug; 6922 6923 if (mem_cgroup_disabled()) 6924 return; 6925 6926 /* Don't touch page->lru of any random page, pre-check: */ 6927 if (!page_memcg(page)) 6928 return; 6929 6930 uncharge_gather_clear(&ug); 6931 uncharge_page(page, &ug); 6932 uncharge_batch(&ug); 6933 } 6934 6935 /** 6936 * mem_cgroup_uncharge_list - uncharge a list of page 6937 * @page_list: list of pages to uncharge 6938 * 6939 * Uncharge a list of pages previously charged with 6940 * mem_cgroup_charge(). 6941 */ 6942 void mem_cgroup_uncharge_list(struct list_head *page_list) 6943 { 6944 struct uncharge_gather ug; 6945 struct page *page; 6946 6947 if (mem_cgroup_disabled()) 6948 return; 6949 6950 uncharge_gather_clear(&ug); 6951 list_for_each_entry(page, page_list, lru) 6952 uncharge_page(page, &ug); 6953 if (ug.memcg) 6954 uncharge_batch(&ug); 6955 } 6956 6957 /** 6958 * mem_cgroup_migrate - charge a page's replacement 6959 * @oldpage: currently circulating page 6960 * @newpage: replacement page 6961 * 6962 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6963 * be uncharged upon free. 6964 * 6965 * Both pages must be locked, @newpage->mapping must be set up. 6966 */ 6967 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6968 { 6969 struct mem_cgroup *memcg; 6970 unsigned int nr_pages; 6971 unsigned long flags; 6972 6973 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6974 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6975 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6976 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6977 newpage); 6978 6979 if (mem_cgroup_disabled()) 6980 return; 6981 6982 /* Page cache replacement: new page already charged? */ 6983 if (page_memcg(newpage)) 6984 return; 6985 6986 memcg = page_memcg(oldpage); 6987 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); 6988 if (!memcg) 6989 return; 6990 6991 /* Force-charge the new page. The old one will be freed soon */ 6992 nr_pages = thp_nr_pages(newpage); 6993 6994 if (!mem_cgroup_is_root(memcg)) { 6995 page_counter_charge(&memcg->memory, nr_pages); 6996 if (do_memsw_account()) 6997 page_counter_charge(&memcg->memsw, nr_pages); 6998 } 6999 7000 css_get(&memcg->css); 7001 commit_charge(newpage, memcg); 7002 7003 local_irq_save(flags); 7004 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7005 memcg_check_events(memcg, newpage); 7006 local_irq_restore(flags); 7007 } 7008 7009 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7010 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7011 7012 void mem_cgroup_sk_alloc(struct sock *sk) 7013 { 7014 struct mem_cgroup *memcg; 7015 7016 if (!mem_cgroup_sockets_enabled) 7017 return; 7018 7019 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7020 if (in_interrupt()) 7021 return; 7022 7023 rcu_read_lock(); 7024 memcg = mem_cgroup_from_task(current); 7025 if (memcg == root_mem_cgroup) 7026 goto out; 7027 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7028 goto out; 7029 if (css_tryget(&memcg->css)) 7030 sk->sk_memcg = memcg; 7031 out: 7032 rcu_read_unlock(); 7033 } 7034 7035 void mem_cgroup_sk_free(struct sock *sk) 7036 { 7037 if (sk->sk_memcg) 7038 css_put(&sk->sk_memcg->css); 7039 } 7040 7041 /** 7042 * mem_cgroup_charge_skmem - charge socket memory 7043 * @memcg: memcg to charge 7044 * @nr_pages: number of pages to charge 7045 * 7046 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7047 * @memcg's configured limit, %false if the charge had to be forced. 7048 */ 7049 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7050 { 7051 gfp_t gfp_mask = GFP_KERNEL; 7052 7053 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7054 struct page_counter *fail; 7055 7056 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7057 memcg->tcpmem_pressure = 0; 7058 return true; 7059 } 7060 page_counter_charge(&memcg->tcpmem, nr_pages); 7061 memcg->tcpmem_pressure = 1; 7062 return false; 7063 } 7064 7065 /* Don't block in the packet receive path */ 7066 if (in_softirq()) 7067 gfp_mask = GFP_NOWAIT; 7068 7069 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7070 7071 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7072 return true; 7073 7074 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7075 return false; 7076 } 7077 7078 /** 7079 * mem_cgroup_uncharge_skmem - uncharge socket memory 7080 * @memcg: memcg to uncharge 7081 * @nr_pages: number of pages to uncharge 7082 */ 7083 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7084 { 7085 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7086 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7087 return; 7088 } 7089 7090 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7091 7092 refill_stock(memcg, nr_pages); 7093 } 7094 7095 static int __init cgroup_memory(char *s) 7096 { 7097 char *token; 7098 7099 while ((token = strsep(&s, ",")) != NULL) { 7100 if (!*token) 7101 continue; 7102 if (!strcmp(token, "nosocket")) 7103 cgroup_memory_nosocket = true; 7104 if (!strcmp(token, "nokmem")) 7105 cgroup_memory_nokmem = true; 7106 } 7107 return 0; 7108 } 7109 __setup("cgroup.memory=", cgroup_memory); 7110 7111 /* 7112 * subsys_initcall() for memory controller. 7113 * 7114 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7115 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7116 * basically everything that doesn't depend on a specific mem_cgroup structure 7117 * should be initialized from here. 7118 */ 7119 static int __init mem_cgroup_init(void) 7120 { 7121 int cpu, node; 7122 7123 /* 7124 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7125 * used for per-memcg-per-cpu caching of per-node statistics. In order 7126 * to work fine, we should make sure that the overfill threshold can't 7127 * exceed S32_MAX / PAGE_SIZE. 7128 */ 7129 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7130 7131 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7132 memcg_hotplug_cpu_dead); 7133 7134 for_each_possible_cpu(cpu) 7135 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7136 drain_local_stock); 7137 7138 for_each_node(node) { 7139 struct mem_cgroup_tree_per_node *rtpn; 7140 7141 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7142 node_online(node) ? node : NUMA_NO_NODE); 7143 7144 rtpn->rb_root = RB_ROOT; 7145 rtpn->rb_rightmost = NULL; 7146 spin_lock_init(&rtpn->lock); 7147 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7148 } 7149 7150 return 0; 7151 } 7152 subsys_initcall(mem_cgroup_init); 7153 7154 #ifdef CONFIG_MEMCG_SWAP 7155 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7156 { 7157 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7158 /* 7159 * The root cgroup cannot be destroyed, so it's refcount must 7160 * always be >= 1. 7161 */ 7162 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7163 VM_BUG_ON(1); 7164 break; 7165 } 7166 memcg = parent_mem_cgroup(memcg); 7167 if (!memcg) 7168 memcg = root_mem_cgroup; 7169 } 7170 return memcg; 7171 } 7172 7173 /** 7174 * mem_cgroup_swapout - transfer a memsw charge to swap 7175 * @page: page whose memsw charge to transfer 7176 * @entry: swap entry to move the charge to 7177 * 7178 * Transfer the memsw charge of @page to @entry. 7179 */ 7180 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7181 { 7182 struct mem_cgroup *memcg, *swap_memcg; 7183 unsigned int nr_entries; 7184 unsigned short oldid; 7185 7186 VM_BUG_ON_PAGE(PageLRU(page), page); 7187 VM_BUG_ON_PAGE(page_count(page), page); 7188 7189 if (mem_cgroup_disabled()) 7190 return; 7191 7192 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7193 return; 7194 7195 memcg = page_memcg(page); 7196 7197 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7198 if (!memcg) 7199 return; 7200 7201 /* 7202 * In case the memcg owning these pages has been offlined and doesn't 7203 * have an ID allocated to it anymore, charge the closest online 7204 * ancestor for the swap instead and transfer the memory+swap charge. 7205 */ 7206 swap_memcg = mem_cgroup_id_get_online(memcg); 7207 nr_entries = thp_nr_pages(page); 7208 /* Get references for the tail pages, too */ 7209 if (nr_entries > 1) 7210 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7211 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7212 nr_entries); 7213 VM_BUG_ON_PAGE(oldid, page); 7214 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7215 7216 page->memcg_data = 0; 7217 7218 if (!mem_cgroup_is_root(memcg)) 7219 page_counter_uncharge(&memcg->memory, nr_entries); 7220 7221 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7222 if (!mem_cgroup_is_root(swap_memcg)) 7223 page_counter_charge(&swap_memcg->memsw, nr_entries); 7224 page_counter_uncharge(&memcg->memsw, nr_entries); 7225 } 7226 7227 /* 7228 * Interrupts should be disabled here because the caller holds the 7229 * i_pages lock which is taken with interrupts-off. It is 7230 * important here to have the interrupts disabled because it is the 7231 * only synchronisation we have for updating the per-CPU variables. 7232 */ 7233 VM_BUG_ON(!irqs_disabled()); 7234 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7235 memcg_check_events(memcg, page); 7236 7237 css_put(&memcg->css); 7238 } 7239 7240 /** 7241 * mem_cgroup_try_charge_swap - try charging swap space for a page 7242 * @page: page being added to swap 7243 * @entry: swap entry to charge 7244 * 7245 * Try to charge @page's memcg for the swap space at @entry. 7246 * 7247 * Returns 0 on success, -ENOMEM on failure. 7248 */ 7249 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7250 { 7251 unsigned int nr_pages = thp_nr_pages(page); 7252 struct page_counter *counter; 7253 struct mem_cgroup *memcg; 7254 unsigned short oldid; 7255 7256 if (mem_cgroup_disabled()) 7257 return 0; 7258 7259 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7260 return 0; 7261 7262 memcg = page_memcg(page); 7263 7264 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7265 if (!memcg) 7266 return 0; 7267 7268 if (!entry.val) { 7269 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7270 return 0; 7271 } 7272 7273 memcg = mem_cgroup_id_get_online(memcg); 7274 7275 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7276 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7277 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7278 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7279 mem_cgroup_id_put(memcg); 7280 return -ENOMEM; 7281 } 7282 7283 /* Get references for the tail pages, too */ 7284 if (nr_pages > 1) 7285 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7286 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7287 VM_BUG_ON_PAGE(oldid, page); 7288 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7289 7290 return 0; 7291 } 7292 7293 /** 7294 * mem_cgroup_uncharge_swap - uncharge swap space 7295 * @entry: swap entry to uncharge 7296 * @nr_pages: the amount of swap space to uncharge 7297 */ 7298 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7299 { 7300 struct mem_cgroup *memcg; 7301 unsigned short id; 7302 7303 id = swap_cgroup_record(entry, 0, nr_pages); 7304 rcu_read_lock(); 7305 memcg = mem_cgroup_from_id(id); 7306 if (memcg) { 7307 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7308 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7309 page_counter_uncharge(&memcg->swap, nr_pages); 7310 else 7311 page_counter_uncharge(&memcg->memsw, nr_pages); 7312 } 7313 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7314 mem_cgroup_id_put_many(memcg, nr_pages); 7315 } 7316 rcu_read_unlock(); 7317 } 7318 7319 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7320 { 7321 long nr_swap_pages = get_nr_swap_pages(); 7322 7323 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7324 return nr_swap_pages; 7325 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7326 nr_swap_pages = min_t(long, nr_swap_pages, 7327 READ_ONCE(memcg->swap.max) - 7328 page_counter_read(&memcg->swap)); 7329 return nr_swap_pages; 7330 } 7331 7332 bool mem_cgroup_swap_full(struct page *page) 7333 { 7334 struct mem_cgroup *memcg; 7335 7336 VM_BUG_ON_PAGE(!PageLocked(page), page); 7337 7338 if (vm_swap_full()) 7339 return true; 7340 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7341 return false; 7342 7343 memcg = page_memcg(page); 7344 if (!memcg) 7345 return false; 7346 7347 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7348 unsigned long usage = page_counter_read(&memcg->swap); 7349 7350 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7351 usage * 2 >= READ_ONCE(memcg->swap.max)) 7352 return true; 7353 } 7354 7355 return false; 7356 } 7357 7358 static int __init setup_swap_account(char *s) 7359 { 7360 if (!strcmp(s, "1")) 7361 cgroup_memory_noswap = false; 7362 else if (!strcmp(s, "0")) 7363 cgroup_memory_noswap = true; 7364 return 1; 7365 } 7366 __setup("swapaccount=", setup_swap_account); 7367 7368 static u64 swap_current_read(struct cgroup_subsys_state *css, 7369 struct cftype *cft) 7370 { 7371 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7372 7373 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7374 } 7375 7376 static int swap_high_show(struct seq_file *m, void *v) 7377 { 7378 return seq_puts_memcg_tunable(m, 7379 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7380 } 7381 7382 static ssize_t swap_high_write(struct kernfs_open_file *of, 7383 char *buf, size_t nbytes, loff_t off) 7384 { 7385 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7386 unsigned long high; 7387 int err; 7388 7389 buf = strstrip(buf); 7390 err = page_counter_memparse(buf, "max", &high); 7391 if (err) 7392 return err; 7393 7394 page_counter_set_high(&memcg->swap, high); 7395 7396 return nbytes; 7397 } 7398 7399 static int swap_max_show(struct seq_file *m, void *v) 7400 { 7401 return seq_puts_memcg_tunable(m, 7402 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7403 } 7404 7405 static ssize_t swap_max_write(struct kernfs_open_file *of, 7406 char *buf, size_t nbytes, loff_t off) 7407 { 7408 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7409 unsigned long max; 7410 int err; 7411 7412 buf = strstrip(buf); 7413 err = page_counter_memparse(buf, "max", &max); 7414 if (err) 7415 return err; 7416 7417 xchg(&memcg->swap.max, max); 7418 7419 return nbytes; 7420 } 7421 7422 static int swap_events_show(struct seq_file *m, void *v) 7423 { 7424 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7425 7426 seq_printf(m, "high %lu\n", 7427 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7428 seq_printf(m, "max %lu\n", 7429 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7430 seq_printf(m, "fail %lu\n", 7431 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7432 7433 return 0; 7434 } 7435 7436 static struct cftype swap_files[] = { 7437 { 7438 .name = "swap.current", 7439 .flags = CFTYPE_NOT_ON_ROOT, 7440 .read_u64 = swap_current_read, 7441 }, 7442 { 7443 .name = "swap.high", 7444 .flags = CFTYPE_NOT_ON_ROOT, 7445 .seq_show = swap_high_show, 7446 .write = swap_high_write, 7447 }, 7448 { 7449 .name = "swap.max", 7450 .flags = CFTYPE_NOT_ON_ROOT, 7451 .seq_show = swap_max_show, 7452 .write = swap_max_write, 7453 }, 7454 { 7455 .name = "swap.events", 7456 .flags = CFTYPE_NOT_ON_ROOT, 7457 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7458 .seq_show = swap_events_show, 7459 }, 7460 { } /* terminate */ 7461 }; 7462 7463 static struct cftype memsw_files[] = { 7464 { 7465 .name = "memsw.usage_in_bytes", 7466 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7467 .read_u64 = mem_cgroup_read_u64, 7468 }, 7469 { 7470 .name = "memsw.max_usage_in_bytes", 7471 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7472 .write = mem_cgroup_reset, 7473 .read_u64 = mem_cgroup_read_u64, 7474 }, 7475 { 7476 .name = "memsw.limit_in_bytes", 7477 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7478 .write = mem_cgroup_write, 7479 .read_u64 = mem_cgroup_read_u64, 7480 }, 7481 { 7482 .name = "memsw.failcnt", 7483 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7484 .write = mem_cgroup_reset, 7485 .read_u64 = mem_cgroup_read_u64, 7486 }, 7487 { }, /* terminate */ 7488 }; 7489 7490 /* 7491 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7492 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7493 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7494 * boot parameter. This may result in premature OOPS inside 7495 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7496 */ 7497 static int __init mem_cgroup_swap_init(void) 7498 { 7499 /* No memory control -> no swap control */ 7500 if (mem_cgroup_disabled()) 7501 cgroup_memory_noswap = true; 7502 7503 if (cgroup_memory_noswap) 7504 return 0; 7505 7506 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7507 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7508 7509 return 0; 7510 } 7511 core_initcall(mem_cgroup_swap_init); 7512 7513 #endif /* CONFIG_MEMCG_SWAP */ 7514