1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 82 83 /* Socket memory accounting disabled? */ 84 static bool cgroup_memory_nosocket __ro_after_init; 85 86 /* Kernel memory accounting disabled? */ 87 bool cgroup_memory_nokmem __ro_after_init; 88 89 /* Whether the swap controller is active */ 90 #ifdef CONFIG_MEMCG_SWAP 91 bool cgroup_memory_noswap __ro_after_init; 92 #else 93 #define cgroup_memory_noswap 1 94 #endif 95 96 #ifdef CONFIG_CGROUP_WRITEBACK 97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 98 #endif 99 100 /* Whether legacy memory+swap accounting is active */ 101 static bool do_memsw_account(void) 102 { 103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 104 } 105 106 #define THRESHOLDS_EVENTS_TARGET 128 107 #define SOFTLIMIT_EVENTS_TARGET 1024 108 109 /* 110 * Cgroups above their limits are maintained in a RB-Tree, independent of 111 * their hierarchy representation 112 */ 113 114 struct mem_cgroup_tree_per_node { 115 struct rb_root rb_root; 116 struct rb_node *rb_rightmost; 117 spinlock_t lock; 118 }; 119 120 struct mem_cgroup_tree { 121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 122 }; 123 124 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 125 126 /* for OOM */ 127 struct mem_cgroup_eventfd_list { 128 struct list_head list; 129 struct eventfd_ctx *eventfd; 130 }; 131 132 /* 133 * cgroup_event represents events which userspace want to receive. 134 */ 135 struct mem_cgroup_event { 136 /* 137 * memcg which the event belongs to. 138 */ 139 struct mem_cgroup *memcg; 140 /* 141 * eventfd to signal userspace about the event. 142 */ 143 struct eventfd_ctx *eventfd; 144 /* 145 * Each of these stored in a list by the cgroup. 146 */ 147 struct list_head list; 148 /* 149 * register_event() callback will be used to add new userspace 150 * waiter for changes related to this event. Use eventfd_signal() 151 * on eventfd to send notification to userspace. 152 */ 153 int (*register_event)(struct mem_cgroup *memcg, 154 struct eventfd_ctx *eventfd, const char *args); 155 /* 156 * unregister_event() callback will be called when userspace closes 157 * the eventfd or on cgroup removing. This callback must be set, 158 * if you want provide notification functionality. 159 */ 160 void (*unregister_event)(struct mem_cgroup *memcg, 161 struct eventfd_ctx *eventfd); 162 /* 163 * All fields below needed to unregister event when 164 * userspace closes eventfd. 165 */ 166 poll_table pt; 167 wait_queue_head_t *wqh; 168 wait_queue_entry_t wait; 169 struct work_struct remove; 170 }; 171 172 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 174 175 /* Stuffs for move charges at task migration. */ 176 /* 177 * Types of charges to be moved. 178 */ 179 #define MOVE_ANON 0x1U 180 #define MOVE_FILE 0x2U 181 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 182 183 /* "mc" and its members are protected by cgroup_mutex */ 184 static struct move_charge_struct { 185 spinlock_t lock; /* for from, to */ 186 struct mm_struct *mm; 187 struct mem_cgroup *from; 188 struct mem_cgroup *to; 189 unsigned long flags; 190 unsigned long precharge; 191 unsigned long moved_charge; 192 unsigned long moved_swap; 193 struct task_struct *moving_task; /* a task moving charges */ 194 wait_queue_head_t waitq; /* a waitq for other context */ 195 } mc = { 196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 198 }; 199 200 /* 201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 202 * limit reclaim to prevent infinite loops, if they ever occur. 203 */ 204 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 205 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 206 207 /* for encoding cft->private value on file */ 208 enum res_type { 209 _MEM, 210 _MEMSWAP, 211 _OOM_TYPE, 212 _KMEM, 213 _TCP, 214 }; 215 216 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 217 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 218 #define MEMFILE_ATTR(val) ((val) & 0xffff) 219 /* Used for OOM notifier */ 220 #define OOM_CONTROL (0) 221 222 /* 223 * Iteration constructs for visiting all cgroups (under a tree). If 224 * loops are exited prematurely (break), mem_cgroup_iter_break() must 225 * be used for reference counting. 226 */ 227 #define for_each_mem_cgroup_tree(iter, root) \ 228 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 229 iter != NULL; \ 230 iter = mem_cgroup_iter(root, iter, NULL)) 231 232 #define for_each_mem_cgroup(iter) \ 233 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 234 iter != NULL; \ 235 iter = mem_cgroup_iter(NULL, iter, NULL)) 236 237 static inline bool should_force_charge(void) 238 { 239 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 240 (current->flags & PF_EXITING); 241 } 242 243 /* Some nice accessors for the vmpressure. */ 244 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 245 { 246 if (!memcg) 247 memcg = root_mem_cgroup; 248 return &memcg->vmpressure; 249 } 250 251 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 252 { 253 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 254 } 255 256 #ifdef CONFIG_MEMCG_KMEM 257 extern spinlock_t css_set_lock; 258 259 bool mem_cgroup_kmem_disabled(void) 260 { 261 return cgroup_memory_nokmem; 262 } 263 264 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 265 unsigned int nr_pages); 266 267 static void obj_cgroup_release(struct percpu_ref *ref) 268 { 269 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 270 unsigned int nr_bytes; 271 unsigned int nr_pages; 272 unsigned long flags; 273 274 /* 275 * At this point all allocated objects are freed, and 276 * objcg->nr_charged_bytes can't have an arbitrary byte value. 277 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 278 * 279 * The following sequence can lead to it: 280 * 1) CPU0: objcg == stock->cached_objcg 281 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 282 * PAGE_SIZE bytes are charged 283 * 3) CPU1: a process from another memcg is allocating something, 284 * the stock if flushed, 285 * objcg->nr_charged_bytes = PAGE_SIZE - 92 286 * 5) CPU0: we do release this object, 287 * 92 bytes are added to stock->nr_bytes 288 * 6) CPU0: stock is flushed, 289 * 92 bytes are added to objcg->nr_charged_bytes 290 * 291 * In the result, nr_charged_bytes == PAGE_SIZE. 292 * This page will be uncharged in obj_cgroup_release(). 293 */ 294 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 295 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 296 nr_pages = nr_bytes >> PAGE_SHIFT; 297 298 if (nr_pages) 299 obj_cgroup_uncharge_pages(objcg, nr_pages); 300 301 spin_lock_irqsave(&css_set_lock, flags); 302 list_del(&objcg->list); 303 spin_unlock_irqrestore(&css_set_lock, flags); 304 305 percpu_ref_exit(ref); 306 kfree_rcu(objcg, rcu); 307 } 308 309 static struct obj_cgroup *obj_cgroup_alloc(void) 310 { 311 struct obj_cgroup *objcg; 312 int ret; 313 314 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 315 if (!objcg) 316 return NULL; 317 318 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 319 GFP_KERNEL); 320 if (ret) { 321 kfree(objcg); 322 return NULL; 323 } 324 INIT_LIST_HEAD(&objcg->list); 325 return objcg; 326 } 327 328 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 329 struct mem_cgroup *parent) 330 { 331 struct obj_cgroup *objcg, *iter; 332 333 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 334 335 spin_lock_irq(&css_set_lock); 336 337 /* 1) Ready to reparent active objcg. */ 338 list_add(&objcg->list, &memcg->objcg_list); 339 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 340 list_for_each_entry(iter, &memcg->objcg_list, list) 341 WRITE_ONCE(iter->memcg, parent); 342 /* 3) Move already reparented objcgs to the parent's list */ 343 list_splice(&memcg->objcg_list, &parent->objcg_list); 344 345 spin_unlock_irq(&css_set_lock); 346 347 percpu_ref_kill(&objcg->refcnt); 348 } 349 350 /* 351 * This will be used as a shrinker list's index. 352 * The main reason for not using cgroup id for this: 353 * this works better in sparse environments, where we have a lot of memcgs, 354 * but only a few kmem-limited. Or also, if we have, for instance, 200 355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 356 * 200 entry array for that. 357 * 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It 359 * will double each time we have to increase it. 360 */ 361 static DEFINE_IDA(memcg_cache_ida); 362 int memcg_nr_cache_ids; 363 364 /* Protects memcg_nr_cache_ids */ 365 static DECLARE_RWSEM(memcg_cache_ids_sem); 366 367 void memcg_get_cache_ids(void) 368 { 369 down_read(&memcg_cache_ids_sem); 370 } 371 372 void memcg_put_cache_ids(void) 373 { 374 up_read(&memcg_cache_ids_sem); 375 } 376 377 /* 378 * MIN_SIZE is different than 1, because we would like to avoid going through 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited 380 * cgroups is a reasonable guess. In the future, it could be a parameter or 381 * tunable, but that is strictly not necessary. 382 * 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 384 * this constant directly from cgroup, but it is understandable that this is 385 * better kept as an internal representation in cgroup.c. In any case, the 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily 387 * increase ours as well if it increases. 388 */ 389 #define MEMCG_CACHES_MIN_SIZE 4 390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 391 392 /* 393 * A lot of the calls to the cache allocation functions are expected to be 394 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 395 * conditional to this static branch, we'll have to allow modules that does 396 * kmem_cache_alloc and the such to see this symbol as well 397 */ 398 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 399 EXPORT_SYMBOL(memcg_kmem_enabled_key); 400 #endif 401 402 /** 403 * mem_cgroup_css_from_page - css of the memcg associated with a page 404 * @page: page of interest 405 * 406 * If memcg is bound to the default hierarchy, css of the memcg associated 407 * with @page is returned. The returned css remains associated with @page 408 * until it is released. 409 * 410 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 411 * is returned. 412 */ 413 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 414 { 415 struct mem_cgroup *memcg; 416 417 memcg = page_memcg(page); 418 419 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 420 memcg = root_mem_cgroup; 421 422 return &memcg->css; 423 } 424 425 /** 426 * page_cgroup_ino - return inode number of the memcg a page is charged to 427 * @page: the page 428 * 429 * Look up the closest online ancestor of the memory cgroup @page is charged to 430 * and return its inode number or 0 if @page is not charged to any cgroup. It 431 * is safe to call this function without holding a reference to @page. 432 * 433 * Note, this function is inherently racy, because there is nothing to prevent 434 * the cgroup inode from getting torn down and potentially reallocated a moment 435 * after page_cgroup_ino() returns, so it only should be used by callers that 436 * do not care (such as procfs interfaces). 437 */ 438 ino_t page_cgroup_ino(struct page *page) 439 { 440 struct mem_cgroup *memcg; 441 unsigned long ino = 0; 442 443 rcu_read_lock(); 444 memcg = page_memcg_check(page); 445 446 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 447 memcg = parent_mem_cgroup(memcg); 448 if (memcg) 449 ino = cgroup_ino(memcg->css.cgroup); 450 rcu_read_unlock(); 451 return ino; 452 } 453 454 static struct mem_cgroup_per_node * 455 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 456 { 457 int nid = page_to_nid(page); 458 459 return memcg->nodeinfo[nid]; 460 } 461 462 static struct mem_cgroup_tree_per_node * 463 soft_limit_tree_node(int nid) 464 { 465 return soft_limit_tree.rb_tree_per_node[nid]; 466 } 467 468 static struct mem_cgroup_tree_per_node * 469 soft_limit_tree_from_page(struct page *page) 470 { 471 int nid = page_to_nid(page); 472 473 return soft_limit_tree.rb_tree_per_node[nid]; 474 } 475 476 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 477 struct mem_cgroup_tree_per_node *mctz, 478 unsigned long new_usage_in_excess) 479 { 480 struct rb_node **p = &mctz->rb_root.rb_node; 481 struct rb_node *parent = NULL; 482 struct mem_cgroup_per_node *mz_node; 483 bool rightmost = true; 484 485 if (mz->on_tree) 486 return; 487 488 mz->usage_in_excess = new_usage_in_excess; 489 if (!mz->usage_in_excess) 490 return; 491 while (*p) { 492 parent = *p; 493 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 494 tree_node); 495 if (mz->usage_in_excess < mz_node->usage_in_excess) { 496 p = &(*p)->rb_left; 497 rightmost = false; 498 } else { 499 p = &(*p)->rb_right; 500 } 501 } 502 503 if (rightmost) 504 mctz->rb_rightmost = &mz->tree_node; 505 506 rb_link_node(&mz->tree_node, parent, p); 507 rb_insert_color(&mz->tree_node, &mctz->rb_root); 508 mz->on_tree = true; 509 } 510 511 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 512 struct mem_cgroup_tree_per_node *mctz) 513 { 514 if (!mz->on_tree) 515 return; 516 517 if (&mz->tree_node == mctz->rb_rightmost) 518 mctz->rb_rightmost = rb_prev(&mz->tree_node); 519 520 rb_erase(&mz->tree_node, &mctz->rb_root); 521 mz->on_tree = false; 522 } 523 524 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 525 struct mem_cgroup_tree_per_node *mctz) 526 { 527 unsigned long flags; 528 529 spin_lock_irqsave(&mctz->lock, flags); 530 __mem_cgroup_remove_exceeded(mz, mctz); 531 spin_unlock_irqrestore(&mctz->lock, flags); 532 } 533 534 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 535 { 536 unsigned long nr_pages = page_counter_read(&memcg->memory); 537 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 538 unsigned long excess = 0; 539 540 if (nr_pages > soft_limit) 541 excess = nr_pages - soft_limit; 542 543 return excess; 544 } 545 546 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 547 { 548 unsigned long excess; 549 struct mem_cgroup_per_node *mz; 550 struct mem_cgroup_tree_per_node *mctz; 551 552 mctz = soft_limit_tree_from_page(page); 553 if (!mctz) 554 return; 555 /* 556 * Necessary to update all ancestors when hierarchy is used. 557 * because their event counter is not touched. 558 */ 559 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 560 mz = mem_cgroup_page_nodeinfo(memcg, page); 561 excess = soft_limit_excess(memcg); 562 /* 563 * We have to update the tree if mz is on RB-tree or 564 * mem is over its softlimit. 565 */ 566 if (excess || mz->on_tree) { 567 unsigned long flags; 568 569 spin_lock_irqsave(&mctz->lock, flags); 570 /* if on-tree, remove it */ 571 if (mz->on_tree) 572 __mem_cgroup_remove_exceeded(mz, mctz); 573 /* 574 * Insert again. mz->usage_in_excess will be updated. 575 * If excess is 0, no tree ops. 576 */ 577 __mem_cgroup_insert_exceeded(mz, mctz, excess); 578 spin_unlock_irqrestore(&mctz->lock, flags); 579 } 580 } 581 } 582 583 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 584 { 585 struct mem_cgroup_tree_per_node *mctz; 586 struct mem_cgroup_per_node *mz; 587 int nid; 588 589 for_each_node(nid) { 590 mz = memcg->nodeinfo[nid]; 591 mctz = soft_limit_tree_node(nid); 592 if (mctz) 593 mem_cgroup_remove_exceeded(mz, mctz); 594 } 595 } 596 597 static struct mem_cgroup_per_node * 598 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 599 { 600 struct mem_cgroup_per_node *mz; 601 602 retry: 603 mz = NULL; 604 if (!mctz->rb_rightmost) 605 goto done; /* Nothing to reclaim from */ 606 607 mz = rb_entry(mctz->rb_rightmost, 608 struct mem_cgroup_per_node, tree_node); 609 /* 610 * Remove the node now but someone else can add it back, 611 * we will to add it back at the end of reclaim to its correct 612 * position in the tree. 613 */ 614 __mem_cgroup_remove_exceeded(mz, mctz); 615 if (!soft_limit_excess(mz->memcg) || 616 !css_tryget(&mz->memcg->css)) 617 goto retry; 618 done: 619 return mz; 620 } 621 622 static struct mem_cgroup_per_node * 623 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 624 { 625 struct mem_cgroup_per_node *mz; 626 627 spin_lock_irq(&mctz->lock); 628 mz = __mem_cgroup_largest_soft_limit_node(mctz); 629 spin_unlock_irq(&mctz->lock); 630 return mz; 631 } 632 633 /** 634 * __mod_memcg_state - update cgroup memory statistics 635 * @memcg: the memory cgroup 636 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 637 * @val: delta to add to the counter, can be negative 638 */ 639 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 640 { 641 if (mem_cgroup_disabled()) 642 return; 643 644 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 645 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 646 } 647 648 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 649 static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 650 { 651 long x = READ_ONCE(memcg->vmstats.state[idx]); 652 #ifdef CONFIG_SMP 653 if (x < 0) 654 x = 0; 655 #endif 656 return x; 657 } 658 659 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 660 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 661 { 662 long x = 0; 663 int cpu; 664 665 for_each_possible_cpu(cpu) 666 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 667 #ifdef CONFIG_SMP 668 if (x < 0) 669 x = 0; 670 #endif 671 return x; 672 } 673 674 static struct mem_cgroup_per_node * 675 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 676 { 677 struct mem_cgroup *parent; 678 679 parent = parent_mem_cgroup(pn->memcg); 680 if (!parent) 681 return NULL; 682 return parent->nodeinfo[nid]; 683 } 684 685 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 686 int val) 687 { 688 struct mem_cgroup_per_node *pn; 689 struct mem_cgroup *memcg; 690 long x, threshold = MEMCG_CHARGE_BATCH; 691 692 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 693 memcg = pn->memcg; 694 695 /* Update memcg */ 696 __mod_memcg_state(memcg, idx, val); 697 698 /* Update lruvec */ 699 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 700 701 if (vmstat_item_in_bytes(idx)) 702 threshold <<= PAGE_SHIFT; 703 704 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 705 if (unlikely(abs(x) > threshold)) { 706 pg_data_t *pgdat = lruvec_pgdat(lruvec); 707 struct mem_cgroup_per_node *pi; 708 709 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 710 atomic_long_add(x, &pi->lruvec_stat[idx]); 711 x = 0; 712 } 713 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 714 } 715 716 /** 717 * __mod_lruvec_state - update lruvec memory statistics 718 * @lruvec: the lruvec 719 * @idx: the stat item 720 * @val: delta to add to the counter, can be negative 721 * 722 * The lruvec is the intersection of the NUMA node and a cgroup. This 723 * function updates the all three counters that are affected by a 724 * change of state at this level: per-node, per-cgroup, per-lruvec. 725 */ 726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 727 int val) 728 { 729 /* Update node */ 730 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 731 732 /* Update memcg and lruvec */ 733 if (!mem_cgroup_disabled()) 734 __mod_memcg_lruvec_state(lruvec, idx, val); 735 } 736 737 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 738 int val) 739 { 740 struct page *head = compound_head(page); /* rmap on tail pages */ 741 struct mem_cgroup *memcg; 742 pg_data_t *pgdat = page_pgdat(page); 743 struct lruvec *lruvec; 744 745 rcu_read_lock(); 746 memcg = page_memcg(head); 747 /* Untracked pages have no memcg, no lruvec. Update only the node */ 748 if (!memcg) { 749 rcu_read_unlock(); 750 __mod_node_page_state(pgdat, idx, val); 751 return; 752 } 753 754 lruvec = mem_cgroup_lruvec(memcg, pgdat); 755 __mod_lruvec_state(lruvec, idx, val); 756 rcu_read_unlock(); 757 } 758 EXPORT_SYMBOL(__mod_lruvec_page_state); 759 760 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 761 { 762 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 763 struct mem_cgroup *memcg; 764 struct lruvec *lruvec; 765 766 rcu_read_lock(); 767 memcg = mem_cgroup_from_obj(p); 768 769 /* 770 * Untracked pages have no memcg, no lruvec. Update only the 771 * node. If we reparent the slab objects to the root memcg, 772 * when we free the slab object, we need to update the per-memcg 773 * vmstats to keep it correct for the root memcg. 774 */ 775 if (!memcg) { 776 __mod_node_page_state(pgdat, idx, val); 777 } else { 778 lruvec = mem_cgroup_lruvec(memcg, pgdat); 779 __mod_lruvec_state(lruvec, idx, val); 780 } 781 rcu_read_unlock(); 782 } 783 784 /* 785 * mod_objcg_mlstate() may be called with irq enabled, so 786 * mod_memcg_lruvec_state() should be used. 787 */ 788 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 789 struct pglist_data *pgdat, 790 enum node_stat_item idx, int nr) 791 { 792 struct mem_cgroup *memcg; 793 struct lruvec *lruvec; 794 795 rcu_read_lock(); 796 memcg = obj_cgroup_memcg(objcg); 797 lruvec = mem_cgroup_lruvec(memcg, pgdat); 798 mod_memcg_lruvec_state(lruvec, idx, nr); 799 rcu_read_unlock(); 800 } 801 802 /** 803 * __count_memcg_events - account VM events in a cgroup 804 * @memcg: the memory cgroup 805 * @idx: the event item 806 * @count: the number of events that occurred 807 */ 808 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 809 unsigned long count) 810 { 811 if (mem_cgroup_disabled()) 812 return; 813 814 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); 815 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 816 } 817 818 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 819 { 820 return READ_ONCE(memcg->vmstats.events[event]); 821 } 822 823 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 824 { 825 long x = 0; 826 int cpu; 827 828 for_each_possible_cpu(cpu) 829 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); 830 return x; 831 } 832 833 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 834 struct page *page, 835 int nr_pages) 836 { 837 /* pagein of a big page is an event. So, ignore page size */ 838 if (nr_pages > 0) 839 __count_memcg_events(memcg, PGPGIN, 1); 840 else { 841 __count_memcg_events(memcg, PGPGOUT, 1); 842 nr_pages = -nr_pages; /* for event */ 843 } 844 845 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 846 } 847 848 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 849 enum mem_cgroup_events_target target) 850 { 851 unsigned long val, next; 852 853 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 854 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 855 /* from time_after() in jiffies.h */ 856 if ((long)(next - val) < 0) { 857 switch (target) { 858 case MEM_CGROUP_TARGET_THRESH: 859 next = val + THRESHOLDS_EVENTS_TARGET; 860 break; 861 case MEM_CGROUP_TARGET_SOFTLIMIT: 862 next = val + SOFTLIMIT_EVENTS_TARGET; 863 break; 864 default: 865 break; 866 } 867 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 868 return true; 869 } 870 return false; 871 } 872 873 /* 874 * Check events in order. 875 * 876 */ 877 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 878 { 879 /* threshold event is triggered in finer grain than soft limit */ 880 if (unlikely(mem_cgroup_event_ratelimit(memcg, 881 MEM_CGROUP_TARGET_THRESH))) { 882 bool do_softlimit; 883 884 do_softlimit = mem_cgroup_event_ratelimit(memcg, 885 MEM_CGROUP_TARGET_SOFTLIMIT); 886 mem_cgroup_threshold(memcg); 887 if (unlikely(do_softlimit)) 888 mem_cgroup_update_tree(memcg, page); 889 } 890 } 891 892 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 893 { 894 /* 895 * mm_update_next_owner() may clear mm->owner to NULL 896 * if it races with swapoff, page migration, etc. 897 * So this can be called with p == NULL. 898 */ 899 if (unlikely(!p)) 900 return NULL; 901 902 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 903 } 904 EXPORT_SYMBOL(mem_cgroup_from_task); 905 906 static __always_inline struct mem_cgroup *active_memcg(void) 907 { 908 if (in_interrupt()) 909 return this_cpu_read(int_active_memcg); 910 else 911 return current->active_memcg; 912 } 913 914 /** 915 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 916 * @mm: mm from which memcg should be extracted. It can be NULL. 917 * 918 * Obtain a reference on mm->memcg and returns it if successful. If mm 919 * is NULL, then the memcg is chosen as follows: 920 * 1) The active memcg, if set. 921 * 2) current->mm->memcg, if available 922 * 3) root memcg 923 * If mem_cgroup is disabled, NULL is returned. 924 */ 925 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 926 { 927 struct mem_cgroup *memcg; 928 929 if (mem_cgroup_disabled()) 930 return NULL; 931 932 /* 933 * Page cache insertions can happen without an 934 * actual mm context, e.g. during disk probing 935 * on boot, loopback IO, acct() writes etc. 936 * 937 * No need to css_get on root memcg as the reference 938 * counting is disabled on the root level in the 939 * cgroup core. See CSS_NO_REF. 940 */ 941 if (unlikely(!mm)) { 942 memcg = active_memcg(); 943 if (unlikely(memcg)) { 944 /* remote memcg must hold a ref */ 945 css_get(&memcg->css); 946 return memcg; 947 } 948 mm = current->mm; 949 if (unlikely(!mm)) 950 return root_mem_cgroup; 951 } 952 953 rcu_read_lock(); 954 do { 955 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 956 if (unlikely(!memcg)) 957 memcg = root_mem_cgroup; 958 } while (!css_tryget(&memcg->css)); 959 rcu_read_unlock(); 960 return memcg; 961 } 962 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 963 964 static __always_inline bool memcg_kmem_bypass(void) 965 { 966 /* Allow remote memcg charging from any context. */ 967 if (unlikely(active_memcg())) 968 return false; 969 970 /* Memcg to charge can't be determined. */ 971 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) 972 return true; 973 974 return false; 975 } 976 977 /** 978 * mem_cgroup_iter - iterate over memory cgroup hierarchy 979 * @root: hierarchy root 980 * @prev: previously returned memcg, NULL on first invocation 981 * @reclaim: cookie for shared reclaim walks, NULL for full walks 982 * 983 * Returns references to children of the hierarchy below @root, or 984 * @root itself, or %NULL after a full round-trip. 985 * 986 * Caller must pass the return value in @prev on subsequent 987 * invocations for reference counting, or use mem_cgroup_iter_break() 988 * to cancel a hierarchy walk before the round-trip is complete. 989 * 990 * Reclaimers can specify a node in @reclaim to divide up the memcgs 991 * in the hierarchy among all concurrent reclaimers operating on the 992 * same node. 993 */ 994 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 995 struct mem_cgroup *prev, 996 struct mem_cgroup_reclaim_cookie *reclaim) 997 { 998 struct mem_cgroup_reclaim_iter *iter; 999 struct cgroup_subsys_state *css = NULL; 1000 struct mem_cgroup *memcg = NULL; 1001 struct mem_cgroup *pos = NULL; 1002 1003 if (mem_cgroup_disabled()) 1004 return NULL; 1005 1006 if (!root) 1007 root = root_mem_cgroup; 1008 1009 if (prev && !reclaim) 1010 pos = prev; 1011 1012 rcu_read_lock(); 1013 1014 if (reclaim) { 1015 struct mem_cgroup_per_node *mz; 1016 1017 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1018 iter = &mz->iter; 1019 1020 if (prev && reclaim->generation != iter->generation) 1021 goto out_unlock; 1022 1023 while (1) { 1024 pos = READ_ONCE(iter->position); 1025 if (!pos || css_tryget(&pos->css)) 1026 break; 1027 /* 1028 * css reference reached zero, so iter->position will 1029 * be cleared by ->css_released. However, we should not 1030 * rely on this happening soon, because ->css_released 1031 * is called from a work queue, and by busy-waiting we 1032 * might block it. So we clear iter->position right 1033 * away. 1034 */ 1035 (void)cmpxchg(&iter->position, pos, NULL); 1036 } 1037 } 1038 1039 if (pos) 1040 css = &pos->css; 1041 1042 for (;;) { 1043 css = css_next_descendant_pre(css, &root->css); 1044 if (!css) { 1045 /* 1046 * Reclaimers share the hierarchy walk, and a 1047 * new one might jump in right at the end of 1048 * the hierarchy - make sure they see at least 1049 * one group and restart from the beginning. 1050 */ 1051 if (!prev) 1052 continue; 1053 break; 1054 } 1055 1056 /* 1057 * Verify the css and acquire a reference. The root 1058 * is provided by the caller, so we know it's alive 1059 * and kicking, and don't take an extra reference. 1060 */ 1061 memcg = mem_cgroup_from_css(css); 1062 1063 if (css == &root->css) 1064 break; 1065 1066 if (css_tryget(css)) 1067 break; 1068 1069 memcg = NULL; 1070 } 1071 1072 if (reclaim) { 1073 /* 1074 * The position could have already been updated by a competing 1075 * thread, so check that the value hasn't changed since we read 1076 * it to avoid reclaiming from the same cgroup twice. 1077 */ 1078 (void)cmpxchg(&iter->position, pos, memcg); 1079 1080 if (pos) 1081 css_put(&pos->css); 1082 1083 if (!memcg) 1084 iter->generation++; 1085 else if (!prev) 1086 reclaim->generation = iter->generation; 1087 } 1088 1089 out_unlock: 1090 rcu_read_unlock(); 1091 if (prev && prev != root) 1092 css_put(&prev->css); 1093 1094 return memcg; 1095 } 1096 1097 /** 1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1099 * @root: hierarchy root 1100 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1101 */ 1102 void mem_cgroup_iter_break(struct mem_cgroup *root, 1103 struct mem_cgroup *prev) 1104 { 1105 if (!root) 1106 root = root_mem_cgroup; 1107 if (prev && prev != root) 1108 css_put(&prev->css); 1109 } 1110 1111 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1112 struct mem_cgroup *dead_memcg) 1113 { 1114 struct mem_cgroup_reclaim_iter *iter; 1115 struct mem_cgroup_per_node *mz; 1116 int nid; 1117 1118 for_each_node(nid) { 1119 mz = from->nodeinfo[nid]; 1120 iter = &mz->iter; 1121 cmpxchg(&iter->position, dead_memcg, NULL); 1122 } 1123 } 1124 1125 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1126 { 1127 struct mem_cgroup *memcg = dead_memcg; 1128 struct mem_cgroup *last; 1129 1130 do { 1131 __invalidate_reclaim_iterators(memcg, dead_memcg); 1132 last = memcg; 1133 } while ((memcg = parent_mem_cgroup(memcg))); 1134 1135 /* 1136 * When cgruop1 non-hierarchy mode is used, 1137 * parent_mem_cgroup() does not walk all the way up to the 1138 * cgroup root (root_mem_cgroup). So we have to handle 1139 * dead_memcg from cgroup root separately. 1140 */ 1141 if (last != root_mem_cgroup) 1142 __invalidate_reclaim_iterators(root_mem_cgroup, 1143 dead_memcg); 1144 } 1145 1146 /** 1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1148 * @memcg: hierarchy root 1149 * @fn: function to call for each task 1150 * @arg: argument passed to @fn 1151 * 1152 * This function iterates over tasks attached to @memcg or to any of its 1153 * descendants and calls @fn for each task. If @fn returns a non-zero 1154 * value, the function breaks the iteration loop and returns the value. 1155 * Otherwise, it will iterate over all tasks and return 0. 1156 * 1157 * This function must not be called for the root memory cgroup. 1158 */ 1159 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1160 int (*fn)(struct task_struct *, void *), void *arg) 1161 { 1162 struct mem_cgroup *iter; 1163 int ret = 0; 1164 1165 BUG_ON(memcg == root_mem_cgroup); 1166 1167 for_each_mem_cgroup_tree(iter, memcg) { 1168 struct css_task_iter it; 1169 struct task_struct *task; 1170 1171 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1172 while (!ret && (task = css_task_iter_next(&it))) 1173 ret = fn(task, arg); 1174 css_task_iter_end(&it); 1175 if (ret) { 1176 mem_cgroup_iter_break(memcg, iter); 1177 break; 1178 } 1179 } 1180 return ret; 1181 } 1182 1183 #ifdef CONFIG_DEBUG_VM 1184 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) 1185 { 1186 struct mem_cgroup *memcg; 1187 1188 if (mem_cgroup_disabled()) 1189 return; 1190 1191 memcg = page_memcg(page); 1192 1193 if (!memcg) 1194 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); 1195 else 1196 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); 1197 } 1198 #endif 1199 1200 /** 1201 * lock_page_lruvec - lock and return lruvec for a given page. 1202 * @page: the page 1203 * 1204 * These functions are safe to use under any of the following conditions: 1205 * - page locked 1206 * - PageLRU cleared 1207 * - lock_page_memcg() 1208 * - page->_refcount is zero 1209 */ 1210 struct lruvec *lock_page_lruvec(struct page *page) 1211 { 1212 struct lruvec *lruvec; 1213 1214 lruvec = mem_cgroup_page_lruvec(page); 1215 spin_lock(&lruvec->lru_lock); 1216 1217 lruvec_memcg_debug(lruvec, page); 1218 1219 return lruvec; 1220 } 1221 1222 struct lruvec *lock_page_lruvec_irq(struct page *page) 1223 { 1224 struct lruvec *lruvec; 1225 1226 lruvec = mem_cgroup_page_lruvec(page); 1227 spin_lock_irq(&lruvec->lru_lock); 1228 1229 lruvec_memcg_debug(lruvec, page); 1230 1231 return lruvec; 1232 } 1233 1234 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) 1235 { 1236 struct lruvec *lruvec; 1237 1238 lruvec = mem_cgroup_page_lruvec(page); 1239 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1240 1241 lruvec_memcg_debug(lruvec, page); 1242 1243 return lruvec; 1244 } 1245 1246 /** 1247 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1248 * @lruvec: mem_cgroup per zone lru vector 1249 * @lru: index of lru list the page is sitting on 1250 * @zid: zone id of the accounted pages 1251 * @nr_pages: positive when adding or negative when removing 1252 * 1253 * This function must be called under lru_lock, just before a page is added 1254 * to or just after a page is removed from an lru list (that ordering being 1255 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1256 */ 1257 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1258 int zid, int nr_pages) 1259 { 1260 struct mem_cgroup_per_node *mz; 1261 unsigned long *lru_size; 1262 long size; 1263 1264 if (mem_cgroup_disabled()) 1265 return; 1266 1267 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1268 lru_size = &mz->lru_zone_size[zid][lru]; 1269 1270 if (nr_pages < 0) 1271 *lru_size += nr_pages; 1272 1273 size = *lru_size; 1274 if (WARN_ONCE(size < 0, 1275 "%s(%p, %d, %d): lru_size %ld\n", 1276 __func__, lruvec, lru, nr_pages, size)) { 1277 VM_BUG_ON(1); 1278 *lru_size = 0; 1279 } 1280 1281 if (nr_pages > 0) 1282 *lru_size += nr_pages; 1283 } 1284 1285 /** 1286 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1287 * @memcg: the memory cgroup 1288 * 1289 * Returns the maximum amount of memory @mem can be charged with, in 1290 * pages. 1291 */ 1292 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1293 { 1294 unsigned long margin = 0; 1295 unsigned long count; 1296 unsigned long limit; 1297 1298 count = page_counter_read(&memcg->memory); 1299 limit = READ_ONCE(memcg->memory.max); 1300 if (count < limit) 1301 margin = limit - count; 1302 1303 if (do_memsw_account()) { 1304 count = page_counter_read(&memcg->memsw); 1305 limit = READ_ONCE(memcg->memsw.max); 1306 if (count < limit) 1307 margin = min(margin, limit - count); 1308 else 1309 margin = 0; 1310 } 1311 1312 return margin; 1313 } 1314 1315 /* 1316 * A routine for checking "mem" is under move_account() or not. 1317 * 1318 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1319 * moving cgroups. This is for waiting at high-memory pressure 1320 * caused by "move". 1321 */ 1322 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1323 { 1324 struct mem_cgroup *from; 1325 struct mem_cgroup *to; 1326 bool ret = false; 1327 /* 1328 * Unlike task_move routines, we access mc.to, mc.from not under 1329 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1330 */ 1331 spin_lock(&mc.lock); 1332 from = mc.from; 1333 to = mc.to; 1334 if (!from) 1335 goto unlock; 1336 1337 ret = mem_cgroup_is_descendant(from, memcg) || 1338 mem_cgroup_is_descendant(to, memcg); 1339 unlock: 1340 spin_unlock(&mc.lock); 1341 return ret; 1342 } 1343 1344 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1345 { 1346 if (mc.moving_task && current != mc.moving_task) { 1347 if (mem_cgroup_under_move(memcg)) { 1348 DEFINE_WAIT(wait); 1349 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1350 /* moving charge context might have finished. */ 1351 if (mc.moving_task) 1352 schedule(); 1353 finish_wait(&mc.waitq, &wait); 1354 return true; 1355 } 1356 } 1357 return false; 1358 } 1359 1360 struct memory_stat { 1361 const char *name; 1362 unsigned int idx; 1363 }; 1364 1365 static const struct memory_stat memory_stats[] = { 1366 { "anon", NR_ANON_MAPPED }, 1367 { "file", NR_FILE_PAGES }, 1368 { "kernel_stack", NR_KERNEL_STACK_KB }, 1369 { "pagetables", NR_PAGETABLE }, 1370 { "percpu", MEMCG_PERCPU_B }, 1371 { "sock", MEMCG_SOCK }, 1372 { "shmem", NR_SHMEM }, 1373 { "file_mapped", NR_FILE_MAPPED }, 1374 { "file_dirty", NR_FILE_DIRTY }, 1375 { "file_writeback", NR_WRITEBACK }, 1376 #ifdef CONFIG_SWAP 1377 { "swapcached", NR_SWAPCACHE }, 1378 #endif 1379 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1380 { "anon_thp", NR_ANON_THPS }, 1381 { "file_thp", NR_FILE_THPS }, 1382 { "shmem_thp", NR_SHMEM_THPS }, 1383 #endif 1384 { "inactive_anon", NR_INACTIVE_ANON }, 1385 { "active_anon", NR_ACTIVE_ANON }, 1386 { "inactive_file", NR_INACTIVE_FILE }, 1387 { "active_file", NR_ACTIVE_FILE }, 1388 { "unevictable", NR_UNEVICTABLE }, 1389 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1390 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1391 1392 /* The memory events */ 1393 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1394 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1395 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1396 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1397 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1398 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1399 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1400 }; 1401 1402 /* Translate stat items to the correct unit for memory.stat output */ 1403 static int memcg_page_state_unit(int item) 1404 { 1405 switch (item) { 1406 case MEMCG_PERCPU_B: 1407 case NR_SLAB_RECLAIMABLE_B: 1408 case NR_SLAB_UNRECLAIMABLE_B: 1409 case WORKINGSET_REFAULT_ANON: 1410 case WORKINGSET_REFAULT_FILE: 1411 case WORKINGSET_ACTIVATE_ANON: 1412 case WORKINGSET_ACTIVATE_FILE: 1413 case WORKINGSET_RESTORE_ANON: 1414 case WORKINGSET_RESTORE_FILE: 1415 case WORKINGSET_NODERECLAIM: 1416 return 1; 1417 case NR_KERNEL_STACK_KB: 1418 return SZ_1K; 1419 default: 1420 return PAGE_SIZE; 1421 } 1422 } 1423 1424 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1425 int item) 1426 { 1427 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1428 } 1429 1430 static char *memory_stat_format(struct mem_cgroup *memcg) 1431 { 1432 struct seq_buf s; 1433 int i; 1434 1435 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1436 if (!s.buffer) 1437 return NULL; 1438 1439 /* 1440 * Provide statistics on the state of the memory subsystem as 1441 * well as cumulative event counters that show past behavior. 1442 * 1443 * This list is ordered following a combination of these gradients: 1444 * 1) generic big picture -> specifics and details 1445 * 2) reflecting userspace activity -> reflecting kernel heuristics 1446 * 1447 * Current memory state: 1448 */ 1449 cgroup_rstat_flush(memcg->css.cgroup); 1450 1451 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1452 u64 size; 1453 1454 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1455 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1456 1457 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1458 size += memcg_page_state_output(memcg, 1459 NR_SLAB_RECLAIMABLE_B); 1460 seq_buf_printf(&s, "slab %llu\n", size); 1461 } 1462 } 1463 1464 /* Accumulated memory events */ 1465 1466 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1467 memcg_events(memcg, PGFAULT)); 1468 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1469 memcg_events(memcg, PGMAJFAULT)); 1470 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1471 memcg_events(memcg, PGREFILL)); 1472 seq_buf_printf(&s, "pgscan %lu\n", 1473 memcg_events(memcg, PGSCAN_KSWAPD) + 1474 memcg_events(memcg, PGSCAN_DIRECT)); 1475 seq_buf_printf(&s, "pgsteal %lu\n", 1476 memcg_events(memcg, PGSTEAL_KSWAPD) + 1477 memcg_events(memcg, PGSTEAL_DIRECT)); 1478 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1479 memcg_events(memcg, PGACTIVATE)); 1480 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1481 memcg_events(memcg, PGDEACTIVATE)); 1482 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1483 memcg_events(memcg, PGLAZYFREE)); 1484 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1485 memcg_events(memcg, PGLAZYFREED)); 1486 1487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1488 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1489 memcg_events(memcg, THP_FAULT_ALLOC)); 1490 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1491 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1492 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1493 1494 /* The above should easily fit into one page */ 1495 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1496 1497 return s.buffer; 1498 } 1499 1500 #define K(x) ((x) << (PAGE_SHIFT-10)) 1501 /** 1502 * mem_cgroup_print_oom_context: Print OOM information relevant to 1503 * memory controller. 1504 * @memcg: The memory cgroup that went over limit 1505 * @p: Task that is going to be killed 1506 * 1507 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1508 * enabled 1509 */ 1510 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1511 { 1512 rcu_read_lock(); 1513 1514 if (memcg) { 1515 pr_cont(",oom_memcg="); 1516 pr_cont_cgroup_path(memcg->css.cgroup); 1517 } else 1518 pr_cont(",global_oom"); 1519 if (p) { 1520 pr_cont(",task_memcg="); 1521 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1522 } 1523 rcu_read_unlock(); 1524 } 1525 1526 /** 1527 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1528 * memory controller. 1529 * @memcg: The memory cgroup that went over limit 1530 */ 1531 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1532 { 1533 char *buf; 1534 1535 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1536 K((u64)page_counter_read(&memcg->memory)), 1537 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1538 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1539 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1540 K((u64)page_counter_read(&memcg->swap)), 1541 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1542 else { 1543 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1544 K((u64)page_counter_read(&memcg->memsw)), 1545 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1546 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1547 K((u64)page_counter_read(&memcg->kmem)), 1548 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1549 } 1550 1551 pr_info("Memory cgroup stats for "); 1552 pr_cont_cgroup_path(memcg->css.cgroup); 1553 pr_cont(":"); 1554 buf = memory_stat_format(memcg); 1555 if (!buf) 1556 return; 1557 pr_info("%s", buf); 1558 kfree(buf); 1559 } 1560 1561 /* 1562 * Return the memory (and swap, if configured) limit for a memcg. 1563 */ 1564 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1565 { 1566 unsigned long max = READ_ONCE(memcg->memory.max); 1567 1568 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1569 if (mem_cgroup_swappiness(memcg)) 1570 max += min(READ_ONCE(memcg->swap.max), 1571 (unsigned long)total_swap_pages); 1572 } else { /* v1 */ 1573 if (mem_cgroup_swappiness(memcg)) { 1574 /* Calculate swap excess capacity from memsw limit */ 1575 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1576 1577 max += min(swap, (unsigned long)total_swap_pages); 1578 } 1579 } 1580 return max; 1581 } 1582 1583 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1584 { 1585 return page_counter_read(&memcg->memory); 1586 } 1587 1588 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1589 int order) 1590 { 1591 struct oom_control oc = { 1592 .zonelist = NULL, 1593 .nodemask = NULL, 1594 .memcg = memcg, 1595 .gfp_mask = gfp_mask, 1596 .order = order, 1597 }; 1598 bool ret = true; 1599 1600 if (mutex_lock_killable(&oom_lock)) 1601 return true; 1602 1603 if (mem_cgroup_margin(memcg) >= (1 << order)) 1604 goto unlock; 1605 1606 /* 1607 * A few threads which were not waiting at mutex_lock_killable() can 1608 * fail to bail out. Therefore, check again after holding oom_lock. 1609 */ 1610 ret = should_force_charge() || out_of_memory(&oc); 1611 1612 unlock: 1613 mutex_unlock(&oom_lock); 1614 return ret; 1615 } 1616 1617 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1618 pg_data_t *pgdat, 1619 gfp_t gfp_mask, 1620 unsigned long *total_scanned) 1621 { 1622 struct mem_cgroup *victim = NULL; 1623 int total = 0; 1624 int loop = 0; 1625 unsigned long excess; 1626 unsigned long nr_scanned; 1627 struct mem_cgroup_reclaim_cookie reclaim = { 1628 .pgdat = pgdat, 1629 }; 1630 1631 excess = soft_limit_excess(root_memcg); 1632 1633 while (1) { 1634 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1635 if (!victim) { 1636 loop++; 1637 if (loop >= 2) { 1638 /* 1639 * If we have not been able to reclaim 1640 * anything, it might because there are 1641 * no reclaimable pages under this hierarchy 1642 */ 1643 if (!total) 1644 break; 1645 /* 1646 * We want to do more targeted reclaim. 1647 * excess >> 2 is not to excessive so as to 1648 * reclaim too much, nor too less that we keep 1649 * coming back to reclaim from this cgroup 1650 */ 1651 if (total >= (excess >> 2) || 1652 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1653 break; 1654 } 1655 continue; 1656 } 1657 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1658 pgdat, &nr_scanned); 1659 *total_scanned += nr_scanned; 1660 if (!soft_limit_excess(root_memcg)) 1661 break; 1662 } 1663 mem_cgroup_iter_break(root_memcg, victim); 1664 return total; 1665 } 1666 1667 #ifdef CONFIG_LOCKDEP 1668 static struct lockdep_map memcg_oom_lock_dep_map = { 1669 .name = "memcg_oom_lock", 1670 }; 1671 #endif 1672 1673 static DEFINE_SPINLOCK(memcg_oom_lock); 1674 1675 /* 1676 * Check OOM-Killer is already running under our hierarchy. 1677 * If someone is running, return false. 1678 */ 1679 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1680 { 1681 struct mem_cgroup *iter, *failed = NULL; 1682 1683 spin_lock(&memcg_oom_lock); 1684 1685 for_each_mem_cgroup_tree(iter, memcg) { 1686 if (iter->oom_lock) { 1687 /* 1688 * this subtree of our hierarchy is already locked 1689 * so we cannot give a lock. 1690 */ 1691 failed = iter; 1692 mem_cgroup_iter_break(memcg, iter); 1693 break; 1694 } else 1695 iter->oom_lock = true; 1696 } 1697 1698 if (failed) { 1699 /* 1700 * OK, we failed to lock the whole subtree so we have 1701 * to clean up what we set up to the failing subtree 1702 */ 1703 for_each_mem_cgroup_tree(iter, memcg) { 1704 if (iter == failed) { 1705 mem_cgroup_iter_break(memcg, iter); 1706 break; 1707 } 1708 iter->oom_lock = false; 1709 } 1710 } else 1711 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1712 1713 spin_unlock(&memcg_oom_lock); 1714 1715 return !failed; 1716 } 1717 1718 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1719 { 1720 struct mem_cgroup *iter; 1721 1722 spin_lock(&memcg_oom_lock); 1723 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1724 for_each_mem_cgroup_tree(iter, memcg) 1725 iter->oom_lock = false; 1726 spin_unlock(&memcg_oom_lock); 1727 } 1728 1729 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1730 { 1731 struct mem_cgroup *iter; 1732 1733 spin_lock(&memcg_oom_lock); 1734 for_each_mem_cgroup_tree(iter, memcg) 1735 iter->under_oom++; 1736 spin_unlock(&memcg_oom_lock); 1737 } 1738 1739 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1740 { 1741 struct mem_cgroup *iter; 1742 1743 /* 1744 * Be careful about under_oom underflows because a child memcg 1745 * could have been added after mem_cgroup_mark_under_oom. 1746 */ 1747 spin_lock(&memcg_oom_lock); 1748 for_each_mem_cgroup_tree(iter, memcg) 1749 if (iter->under_oom > 0) 1750 iter->under_oom--; 1751 spin_unlock(&memcg_oom_lock); 1752 } 1753 1754 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1755 1756 struct oom_wait_info { 1757 struct mem_cgroup *memcg; 1758 wait_queue_entry_t wait; 1759 }; 1760 1761 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1762 unsigned mode, int sync, void *arg) 1763 { 1764 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1765 struct mem_cgroup *oom_wait_memcg; 1766 struct oom_wait_info *oom_wait_info; 1767 1768 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1769 oom_wait_memcg = oom_wait_info->memcg; 1770 1771 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1772 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1773 return 0; 1774 return autoremove_wake_function(wait, mode, sync, arg); 1775 } 1776 1777 static void memcg_oom_recover(struct mem_cgroup *memcg) 1778 { 1779 /* 1780 * For the following lockless ->under_oom test, the only required 1781 * guarantee is that it must see the state asserted by an OOM when 1782 * this function is called as a result of userland actions 1783 * triggered by the notification of the OOM. This is trivially 1784 * achieved by invoking mem_cgroup_mark_under_oom() before 1785 * triggering notification. 1786 */ 1787 if (memcg && memcg->under_oom) 1788 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1789 } 1790 1791 enum oom_status { 1792 OOM_SUCCESS, 1793 OOM_FAILED, 1794 OOM_ASYNC, 1795 OOM_SKIPPED 1796 }; 1797 1798 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1799 { 1800 enum oom_status ret; 1801 bool locked; 1802 1803 if (order > PAGE_ALLOC_COSTLY_ORDER) 1804 return OOM_SKIPPED; 1805 1806 memcg_memory_event(memcg, MEMCG_OOM); 1807 1808 /* 1809 * We are in the middle of the charge context here, so we 1810 * don't want to block when potentially sitting on a callstack 1811 * that holds all kinds of filesystem and mm locks. 1812 * 1813 * cgroup1 allows disabling the OOM killer and waiting for outside 1814 * handling until the charge can succeed; remember the context and put 1815 * the task to sleep at the end of the page fault when all locks are 1816 * released. 1817 * 1818 * On the other hand, in-kernel OOM killer allows for an async victim 1819 * memory reclaim (oom_reaper) and that means that we are not solely 1820 * relying on the oom victim to make a forward progress and we can 1821 * invoke the oom killer here. 1822 * 1823 * Please note that mem_cgroup_out_of_memory might fail to find a 1824 * victim and then we have to bail out from the charge path. 1825 */ 1826 if (memcg->oom_kill_disable) { 1827 if (!current->in_user_fault) 1828 return OOM_SKIPPED; 1829 css_get(&memcg->css); 1830 current->memcg_in_oom = memcg; 1831 current->memcg_oom_gfp_mask = mask; 1832 current->memcg_oom_order = order; 1833 1834 return OOM_ASYNC; 1835 } 1836 1837 mem_cgroup_mark_under_oom(memcg); 1838 1839 locked = mem_cgroup_oom_trylock(memcg); 1840 1841 if (locked) 1842 mem_cgroup_oom_notify(memcg); 1843 1844 mem_cgroup_unmark_under_oom(memcg); 1845 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1846 ret = OOM_SUCCESS; 1847 else 1848 ret = OOM_FAILED; 1849 1850 if (locked) 1851 mem_cgroup_oom_unlock(memcg); 1852 1853 return ret; 1854 } 1855 1856 /** 1857 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1858 * @handle: actually kill/wait or just clean up the OOM state 1859 * 1860 * This has to be called at the end of a page fault if the memcg OOM 1861 * handler was enabled. 1862 * 1863 * Memcg supports userspace OOM handling where failed allocations must 1864 * sleep on a waitqueue until the userspace task resolves the 1865 * situation. Sleeping directly in the charge context with all kinds 1866 * of locks held is not a good idea, instead we remember an OOM state 1867 * in the task and mem_cgroup_oom_synchronize() has to be called at 1868 * the end of the page fault to complete the OOM handling. 1869 * 1870 * Returns %true if an ongoing memcg OOM situation was detected and 1871 * completed, %false otherwise. 1872 */ 1873 bool mem_cgroup_oom_synchronize(bool handle) 1874 { 1875 struct mem_cgroup *memcg = current->memcg_in_oom; 1876 struct oom_wait_info owait; 1877 bool locked; 1878 1879 /* OOM is global, do not handle */ 1880 if (!memcg) 1881 return false; 1882 1883 if (!handle) 1884 goto cleanup; 1885 1886 owait.memcg = memcg; 1887 owait.wait.flags = 0; 1888 owait.wait.func = memcg_oom_wake_function; 1889 owait.wait.private = current; 1890 INIT_LIST_HEAD(&owait.wait.entry); 1891 1892 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1893 mem_cgroup_mark_under_oom(memcg); 1894 1895 locked = mem_cgroup_oom_trylock(memcg); 1896 1897 if (locked) 1898 mem_cgroup_oom_notify(memcg); 1899 1900 if (locked && !memcg->oom_kill_disable) { 1901 mem_cgroup_unmark_under_oom(memcg); 1902 finish_wait(&memcg_oom_waitq, &owait.wait); 1903 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1904 current->memcg_oom_order); 1905 } else { 1906 schedule(); 1907 mem_cgroup_unmark_under_oom(memcg); 1908 finish_wait(&memcg_oom_waitq, &owait.wait); 1909 } 1910 1911 if (locked) { 1912 mem_cgroup_oom_unlock(memcg); 1913 /* 1914 * There is no guarantee that an OOM-lock contender 1915 * sees the wakeups triggered by the OOM kill 1916 * uncharges. Wake any sleepers explicitly. 1917 */ 1918 memcg_oom_recover(memcg); 1919 } 1920 cleanup: 1921 current->memcg_in_oom = NULL; 1922 css_put(&memcg->css); 1923 return true; 1924 } 1925 1926 /** 1927 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1928 * @victim: task to be killed by the OOM killer 1929 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1930 * 1931 * Returns a pointer to a memory cgroup, which has to be cleaned up 1932 * by killing all belonging OOM-killable tasks. 1933 * 1934 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1935 */ 1936 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1937 struct mem_cgroup *oom_domain) 1938 { 1939 struct mem_cgroup *oom_group = NULL; 1940 struct mem_cgroup *memcg; 1941 1942 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1943 return NULL; 1944 1945 if (!oom_domain) 1946 oom_domain = root_mem_cgroup; 1947 1948 rcu_read_lock(); 1949 1950 memcg = mem_cgroup_from_task(victim); 1951 if (memcg == root_mem_cgroup) 1952 goto out; 1953 1954 /* 1955 * If the victim task has been asynchronously moved to a different 1956 * memory cgroup, we might end up killing tasks outside oom_domain. 1957 * In this case it's better to ignore memory.group.oom. 1958 */ 1959 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1960 goto out; 1961 1962 /* 1963 * Traverse the memory cgroup hierarchy from the victim task's 1964 * cgroup up to the OOMing cgroup (or root) to find the 1965 * highest-level memory cgroup with oom.group set. 1966 */ 1967 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1968 if (memcg->oom_group) 1969 oom_group = memcg; 1970 1971 if (memcg == oom_domain) 1972 break; 1973 } 1974 1975 if (oom_group) 1976 css_get(&oom_group->css); 1977 out: 1978 rcu_read_unlock(); 1979 1980 return oom_group; 1981 } 1982 1983 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1984 { 1985 pr_info("Tasks in "); 1986 pr_cont_cgroup_path(memcg->css.cgroup); 1987 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1988 } 1989 1990 /** 1991 * lock_page_memcg - lock a page and memcg binding 1992 * @page: the page 1993 * 1994 * This function protects unlocked LRU pages from being moved to 1995 * another cgroup. 1996 * 1997 * It ensures lifetime of the locked memcg. Caller is responsible 1998 * for the lifetime of the page. 1999 */ 2000 void lock_page_memcg(struct page *page) 2001 { 2002 struct page *head = compound_head(page); /* rmap on tail pages */ 2003 struct mem_cgroup *memcg; 2004 unsigned long flags; 2005 2006 /* 2007 * The RCU lock is held throughout the transaction. The fast 2008 * path can get away without acquiring the memcg->move_lock 2009 * because page moving starts with an RCU grace period. 2010 */ 2011 rcu_read_lock(); 2012 2013 if (mem_cgroup_disabled()) 2014 return; 2015 again: 2016 memcg = page_memcg(head); 2017 if (unlikely(!memcg)) 2018 return; 2019 2020 #ifdef CONFIG_PROVE_LOCKING 2021 local_irq_save(flags); 2022 might_lock(&memcg->move_lock); 2023 local_irq_restore(flags); 2024 #endif 2025 2026 if (atomic_read(&memcg->moving_account) <= 0) 2027 return; 2028 2029 spin_lock_irqsave(&memcg->move_lock, flags); 2030 if (memcg != page_memcg(head)) { 2031 spin_unlock_irqrestore(&memcg->move_lock, flags); 2032 goto again; 2033 } 2034 2035 /* 2036 * When charge migration first begins, we can have multiple 2037 * critical sections holding the fast-path RCU lock and one 2038 * holding the slowpath move_lock. Track the task who has the 2039 * move_lock for unlock_page_memcg(). 2040 */ 2041 memcg->move_lock_task = current; 2042 memcg->move_lock_flags = flags; 2043 } 2044 EXPORT_SYMBOL(lock_page_memcg); 2045 2046 static void __unlock_page_memcg(struct mem_cgroup *memcg) 2047 { 2048 if (memcg && memcg->move_lock_task == current) { 2049 unsigned long flags = memcg->move_lock_flags; 2050 2051 memcg->move_lock_task = NULL; 2052 memcg->move_lock_flags = 0; 2053 2054 spin_unlock_irqrestore(&memcg->move_lock, flags); 2055 } 2056 2057 rcu_read_unlock(); 2058 } 2059 2060 /** 2061 * unlock_page_memcg - unlock a page and memcg binding 2062 * @page: the page 2063 */ 2064 void unlock_page_memcg(struct page *page) 2065 { 2066 struct page *head = compound_head(page); 2067 2068 __unlock_page_memcg(page_memcg(head)); 2069 } 2070 EXPORT_SYMBOL(unlock_page_memcg); 2071 2072 struct obj_stock { 2073 #ifdef CONFIG_MEMCG_KMEM 2074 struct obj_cgroup *cached_objcg; 2075 struct pglist_data *cached_pgdat; 2076 unsigned int nr_bytes; 2077 int nr_slab_reclaimable_b; 2078 int nr_slab_unreclaimable_b; 2079 #else 2080 int dummy[0]; 2081 #endif 2082 }; 2083 2084 struct memcg_stock_pcp { 2085 struct mem_cgroup *cached; /* this never be root cgroup */ 2086 unsigned int nr_pages; 2087 struct obj_stock task_obj; 2088 struct obj_stock irq_obj; 2089 2090 struct work_struct work; 2091 unsigned long flags; 2092 #define FLUSHING_CACHED_CHARGE 0 2093 }; 2094 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2095 static DEFINE_MUTEX(percpu_charge_mutex); 2096 2097 #ifdef CONFIG_MEMCG_KMEM 2098 static void drain_obj_stock(struct obj_stock *stock); 2099 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2100 struct mem_cgroup *root_memcg); 2101 2102 #else 2103 static inline void drain_obj_stock(struct obj_stock *stock) 2104 { 2105 } 2106 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2107 struct mem_cgroup *root_memcg) 2108 { 2109 return false; 2110 } 2111 #endif 2112 2113 /* 2114 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2115 * sequence used in this case to access content from object stock is slow. 2116 * To optimize for user context access, there are now two object stocks for 2117 * task context and interrupt context access respectively. 2118 * 2119 * The task context object stock can be accessed by disabling preemption only 2120 * which is cheap in non-preempt kernel. The interrupt context object stock 2121 * can only be accessed after disabling interrupt. User context code can 2122 * access interrupt object stock, but not vice versa. 2123 */ 2124 static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2125 { 2126 struct memcg_stock_pcp *stock; 2127 2128 if (likely(in_task())) { 2129 *pflags = 0UL; 2130 preempt_disable(); 2131 stock = this_cpu_ptr(&memcg_stock); 2132 return &stock->task_obj; 2133 } 2134 2135 local_irq_save(*pflags); 2136 stock = this_cpu_ptr(&memcg_stock); 2137 return &stock->irq_obj; 2138 } 2139 2140 static inline void put_obj_stock(unsigned long flags) 2141 { 2142 if (likely(in_task())) 2143 preempt_enable(); 2144 else 2145 local_irq_restore(flags); 2146 } 2147 2148 /** 2149 * consume_stock: Try to consume stocked charge on this cpu. 2150 * @memcg: memcg to consume from. 2151 * @nr_pages: how many pages to charge. 2152 * 2153 * The charges will only happen if @memcg matches the current cpu's memcg 2154 * stock, and at least @nr_pages are available in that stock. Failure to 2155 * service an allocation will refill the stock. 2156 * 2157 * returns true if successful, false otherwise. 2158 */ 2159 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2160 { 2161 struct memcg_stock_pcp *stock; 2162 unsigned long flags; 2163 bool ret = false; 2164 2165 if (nr_pages > MEMCG_CHARGE_BATCH) 2166 return ret; 2167 2168 local_irq_save(flags); 2169 2170 stock = this_cpu_ptr(&memcg_stock); 2171 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2172 stock->nr_pages -= nr_pages; 2173 ret = true; 2174 } 2175 2176 local_irq_restore(flags); 2177 2178 return ret; 2179 } 2180 2181 /* 2182 * Returns stocks cached in percpu and reset cached information. 2183 */ 2184 static void drain_stock(struct memcg_stock_pcp *stock) 2185 { 2186 struct mem_cgroup *old = stock->cached; 2187 2188 if (!old) 2189 return; 2190 2191 if (stock->nr_pages) { 2192 page_counter_uncharge(&old->memory, stock->nr_pages); 2193 if (do_memsw_account()) 2194 page_counter_uncharge(&old->memsw, stock->nr_pages); 2195 stock->nr_pages = 0; 2196 } 2197 2198 css_put(&old->css); 2199 stock->cached = NULL; 2200 } 2201 2202 static void drain_local_stock(struct work_struct *dummy) 2203 { 2204 struct memcg_stock_pcp *stock; 2205 unsigned long flags; 2206 2207 /* 2208 * The only protection from memory hotplug vs. drain_stock races is 2209 * that we always operate on local CPU stock here with IRQ disabled 2210 */ 2211 local_irq_save(flags); 2212 2213 stock = this_cpu_ptr(&memcg_stock); 2214 drain_obj_stock(&stock->irq_obj); 2215 if (in_task()) 2216 drain_obj_stock(&stock->task_obj); 2217 drain_stock(stock); 2218 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2219 2220 local_irq_restore(flags); 2221 } 2222 2223 /* 2224 * Cache charges(val) to local per_cpu area. 2225 * This will be consumed by consume_stock() function, later. 2226 */ 2227 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2228 { 2229 struct memcg_stock_pcp *stock; 2230 unsigned long flags; 2231 2232 local_irq_save(flags); 2233 2234 stock = this_cpu_ptr(&memcg_stock); 2235 if (stock->cached != memcg) { /* reset if necessary */ 2236 drain_stock(stock); 2237 css_get(&memcg->css); 2238 stock->cached = memcg; 2239 } 2240 stock->nr_pages += nr_pages; 2241 2242 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2243 drain_stock(stock); 2244 2245 local_irq_restore(flags); 2246 } 2247 2248 /* 2249 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2250 * of the hierarchy under it. 2251 */ 2252 static void drain_all_stock(struct mem_cgroup *root_memcg) 2253 { 2254 int cpu, curcpu; 2255 2256 /* If someone's already draining, avoid adding running more workers. */ 2257 if (!mutex_trylock(&percpu_charge_mutex)) 2258 return; 2259 /* 2260 * Notify other cpus that system-wide "drain" is running 2261 * We do not care about races with the cpu hotplug because cpu down 2262 * as well as workers from this path always operate on the local 2263 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2264 */ 2265 curcpu = get_cpu(); 2266 for_each_online_cpu(cpu) { 2267 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2268 struct mem_cgroup *memcg; 2269 bool flush = false; 2270 2271 rcu_read_lock(); 2272 memcg = stock->cached; 2273 if (memcg && stock->nr_pages && 2274 mem_cgroup_is_descendant(memcg, root_memcg)) 2275 flush = true; 2276 if (obj_stock_flush_required(stock, root_memcg)) 2277 flush = true; 2278 rcu_read_unlock(); 2279 2280 if (flush && 2281 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2282 if (cpu == curcpu) 2283 drain_local_stock(&stock->work); 2284 else 2285 schedule_work_on(cpu, &stock->work); 2286 } 2287 } 2288 put_cpu(); 2289 mutex_unlock(&percpu_charge_mutex); 2290 } 2291 2292 static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu) 2293 { 2294 int nid; 2295 2296 for_each_node(nid) { 2297 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 2298 unsigned long stat[NR_VM_NODE_STAT_ITEMS]; 2299 struct batched_lruvec_stat *lstatc; 2300 int i; 2301 2302 lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu); 2303 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 2304 stat[i] = lstatc->count[i]; 2305 lstatc->count[i] = 0; 2306 } 2307 2308 do { 2309 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 2310 atomic_long_add(stat[i], &pn->lruvec_stat[i]); 2311 } while ((pn = parent_nodeinfo(pn, nid))); 2312 } 2313 } 2314 2315 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2316 { 2317 struct memcg_stock_pcp *stock; 2318 struct mem_cgroup *memcg; 2319 2320 stock = &per_cpu(memcg_stock, cpu); 2321 drain_stock(stock); 2322 2323 for_each_mem_cgroup(memcg) 2324 memcg_flush_lruvec_page_state(memcg, cpu); 2325 2326 return 0; 2327 } 2328 2329 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2330 unsigned int nr_pages, 2331 gfp_t gfp_mask) 2332 { 2333 unsigned long nr_reclaimed = 0; 2334 2335 do { 2336 unsigned long pflags; 2337 2338 if (page_counter_read(&memcg->memory) <= 2339 READ_ONCE(memcg->memory.high)) 2340 continue; 2341 2342 memcg_memory_event(memcg, MEMCG_HIGH); 2343 2344 psi_memstall_enter(&pflags); 2345 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2346 gfp_mask, true); 2347 psi_memstall_leave(&pflags); 2348 } while ((memcg = parent_mem_cgroup(memcg)) && 2349 !mem_cgroup_is_root(memcg)); 2350 2351 return nr_reclaimed; 2352 } 2353 2354 static void high_work_func(struct work_struct *work) 2355 { 2356 struct mem_cgroup *memcg; 2357 2358 memcg = container_of(work, struct mem_cgroup, high_work); 2359 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2360 } 2361 2362 /* 2363 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2364 * enough to still cause a significant slowdown in most cases, while still 2365 * allowing diagnostics and tracing to proceed without becoming stuck. 2366 */ 2367 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2368 2369 /* 2370 * When calculating the delay, we use these either side of the exponentiation to 2371 * maintain precision and scale to a reasonable number of jiffies (see the table 2372 * below. 2373 * 2374 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2375 * overage ratio to a delay. 2376 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2377 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2378 * to produce a reasonable delay curve. 2379 * 2380 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2381 * reasonable delay curve compared to precision-adjusted overage, not 2382 * penalising heavily at first, but still making sure that growth beyond the 2383 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2384 * example, with a high of 100 megabytes: 2385 * 2386 * +-------+------------------------+ 2387 * | usage | time to allocate in ms | 2388 * +-------+------------------------+ 2389 * | 100M | 0 | 2390 * | 101M | 6 | 2391 * | 102M | 25 | 2392 * | 103M | 57 | 2393 * | 104M | 102 | 2394 * | 105M | 159 | 2395 * | 106M | 230 | 2396 * | 107M | 313 | 2397 * | 108M | 409 | 2398 * | 109M | 518 | 2399 * | 110M | 639 | 2400 * | 111M | 774 | 2401 * | 112M | 921 | 2402 * | 113M | 1081 | 2403 * | 114M | 1254 | 2404 * | 115M | 1439 | 2405 * | 116M | 1638 | 2406 * | 117M | 1849 | 2407 * | 118M | 2000 | 2408 * | 119M | 2000 | 2409 * | 120M | 2000 | 2410 * +-------+------------------------+ 2411 */ 2412 #define MEMCG_DELAY_PRECISION_SHIFT 20 2413 #define MEMCG_DELAY_SCALING_SHIFT 14 2414 2415 static u64 calculate_overage(unsigned long usage, unsigned long high) 2416 { 2417 u64 overage; 2418 2419 if (usage <= high) 2420 return 0; 2421 2422 /* 2423 * Prevent division by 0 in overage calculation by acting as if 2424 * it was a threshold of 1 page 2425 */ 2426 high = max(high, 1UL); 2427 2428 overage = usage - high; 2429 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2430 return div64_u64(overage, high); 2431 } 2432 2433 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2434 { 2435 u64 overage, max_overage = 0; 2436 2437 do { 2438 overage = calculate_overage(page_counter_read(&memcg->memory), 2439 READ_ONCE(memcg->memory.high)); 2440 max_overage = max(overage, max_overage); 2441 } while ((memcg = parent_mem_cgroup(memcg)) && 2442 !mem_cgroup_is_root(memcg)); 2443 2444 return max_overage; 2445 } 2446 2447 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2448 { 2449 u64 overage, max_overage = 0; 2450 2451 do { 2452 overage = calculate_overage(page_counter_read(&memcg->swap), 2453 READ_ONCE(memcg->swap.high)); 2454 if (overage) 2455 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2456 max_overage = max(overage, max_overage); 2457 } while ((memcg = parent_mem_cgroup(memcg)) && 2458 !mem_cgroup_is_root(memcg)); 2459 2460 return max_overage; 2461 } 2462 2463 /* 2464 * Get the number of jiffies that we should penalise a mischievous cgroup which 2465 * is exceeding its memory.high by checking both it and its ancestors. 2466 */ 2467 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2468 unsigned int nr_pages, 2469 u64 max_overage) 2470 { 2471 unsigned long penalty_jiffies; 2472 2473 if (!max_overage) 2474 return 0; 2475 2476 /* 2477 * We use overage compared to memory.high to calculate the number of 2478 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2479 * fairly lenient on small overages, and increasingly harsh when the 2480 * memcg in question makes it clear that it has no intention of stopping 2481 * its crazy behaviour, so we exponentially increase the delay based on 2482 * overage amount. 2483 */ 2484 penalty_jiffies = max_overage * max_overage * HZ; 2485 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2486 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2487 2488 /* 2489 * Factor in the task's own contribution to the overage, such that four 2490 * N-sized allocations are throttled approximately the same as one 2491 * 4N-sized allocation. 2492 * 2493 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2494 * larger the current charge patch is than that. 2495 */ 2496 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2497 } 2498 2499 /* 2500 * Scheduled by try_charge() to be executed from the userland return path 2501 * and reclaims memory over the high limit. 2502 */ 2503 void mem_cgroup_handle_over_high(void) 2504 { 2505 unsigned long penalty_jiffies; 2506 unsigned long pflags; 2507 unsigned long nr_reclaimed; 2508 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2509 int nr_retries = MAX_RECLAIM_RETRIES; 2510 struct mem_cgroup *memcg; 2511 bool in_retry = false; 2512 2513 if (likely(!nr_pages)) 2514 return; 2515 2516 memcg = get_mem_cgroup_from_mm(current->mm); 2517 current->memcg_nr_pages_over_high = 0; 2518 2519 retry_reclaim: 2520 /* 2521 * The allocating task should reclaim at least the batch size, but for 2522 * subsequent retries we only want to do what's necessary to prevent oom 2523 * or breaching resource isolation. 2524 * 2525 * This is distinct from memory.max or page allocator behaviour because 2526 * memory.high is currently batched, whereas memory.max and the page 2527 * allocator run every time an allocation is made. 2528 */ 2529 nr_reclaimed = reclaim_high(memcg, 2530 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2531 GFP_KERNEL); 2532 2533 /* 2534 * memory.high is breached and reclaim is unable to keep up. Throttle 2535 * allocators proactively to slow down excessive growth. 2536 */ 2537 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2538 mem_find_max_overage(memcg)); 2539 2540 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2541 swap_find_max_overage(memcg)); 2542 2543 /* 2544 * Clamp the max delay per usermode return so as to still keep the 2545 * application moving forwards and also permit diagnostics, albeit 2546 * extremely slowly. 2547 */ 2548 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2549 2550 /* 2551 * Don't sleep if the amount of jiffies this memcg owes us is so low 2552 * that it's not even worth doing, in an attempt to be nice to those who 2553 * go only a small amount over their memory.high value and maybe haven't 2554 * been aggressively reclaimed enough yet. 2555 */ 2556 if (penalty_jiffies <= HZ / 100) 2557 goto out; 2558 2559 /* 2560 * If reclaim is making forward progress but we're still over 2561 * memory.high, we want to encourage that rather than doing allocator 2562 * throttling. 2563 */ 2564 if (nr_reclaimed || nr_retries--) { 2565 in_retry = true; 2566 goto retry_reclaim; 2567 } 2568 2569 /* 2570 * If we exit early, we're guaranteed to die (since 2571 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2572 * need to account for any ill-begotten jiffies to pay them off later. 2573 */ 2574 psi_memstall_enter(&pflags); 2575 schedule_timeout_killable(penalty_jiffies); 2576 psi_memstall_leave(&pflags); 2577 2578 out: 2579 css_put(&memcg->css); 2580 } 2581 2582 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2583 unsigned int nr_pages) 2584 { 2585 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2586 int nr_retries = MAX_RECLAIM_RETRIES; 2587 struct mem_cgroup *mem_over_limit; 2588 struct page_counter *counter; 2589 enum oom_status oom_status; 2590 unsigned long nr_reclaimed; 2591 bool may_swap = true; 2592 bool drained = false; 2593 unsigned long pflags; 2594 2595 retry: 2596 if (consume_stock(memcg, nr_pages)) 2597 return 0; 2598 2599 if (!do_memsw_account() || 2600 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2601 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2602 goto done_restock; 2603 if (do_memsw_account()) 2604 page_counter_uncharge(&memcg->memsw, batch); 2605 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2606 } else { 2607 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2608 may_swap = false; 2609 } 2610 2611 if (batch > nr_pages) { 2612 batch = nr_pages; 2613 goto retry; 2614 } 2615 2616 /* 2617 * Memcg doesn't have a dedicated reserve for atomic 2618 * allocations. But like the global atomic pool, we need to 2619 * put the burden of reclaim on regular allocation requests 2620 * and let these go through as privileged allocations. 2621 */ 2622 if (gfp_mask & __GFP_ATOMIC) 2623 goto force; 2624 2625 /* 2626 * Unlike in global OOM situations, memcg is not in a physical 2627 * memory shortage. Allow dying and OOM-killed tasks to 2628 * bypass the last charges so that they can exit quickly and 2629 * free their memory. 2630 */ 2631 if (unlikely(should_force_charge())) 2632 goto force; 2633 2634 /* 2635 * Prevent unbounded recursion when reclaim operations need to 2636 * allocate memory. This might exceed the limits temporarily, 2637 * but we prefer facilitating memory reclaim and getting back 2638 * under the limit over triggering OOM kills in these cases. 2639 */ 2640 if (unlikely(current->flags & PF_MEMALLOC)) 2641 goto force; 2642 2643 if (unlikely(task_in_memcg_oom(current))) 2644 goto nomem; 2645 2646 if (!gfpflags_allow_blocking(gfp_mask)) 2647 goto nomem; 2648 2649 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2650 2651 psi_memstall_enter(&pflags); 2652 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2653 gfp_mask, may_swap); 2654 psi_memstall_leave(&pflags); 2655 2656 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2657 goto retry; 2658 2659 if (!drained) { 2660 drain_all_stock(mem_over_limit); 2661 drained = true; 2662 goto retry; 2663 } 2664 2665 if (gfp_mask & __GFP_NORETRY) 2666 goto nomem; 2667 /* 2668 * Even though the limit is exceeded at this point, reclaim 2669 * may have been able to free some pages. Retry the charge 2670 * before killing the task. 2671 * 2672 * Only for regular pages, though: huge pages are rather 2673 * unlikely to succeed so close to the limit, and we fall back 2674 * to regular pages anyway in case of failure. 2675 */ 2676 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2677 goto retry; 2678 /* 2679 * At task move, charge accounts can be doubly counted. So, it's 2680 * better to wait until the end of task_move if something is going on. 2681 */ 2682 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2683 goto retry; 2684 2685 if (nr_retries--) 2686 goto retry; 2687 2688 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2689 goto nomem; 2690 2691 if (fatal_signal_pending(current)) 2692 goto force; 2693 2694 /* 2695 * keep retrying as long as the memcg oom killer is able to make 2696 * a forward progress or bypass the charge if the oom killer 2697 * couldn't make any progress. 2698 */ 2699 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2700 get_order(nr_pages * PAGE_SIZE)); 2701 switch (oom_status) { 2702 case OOM_SUCCESS: 2703 nr_retries = MAX_RECLAIM_RETRIES; 2704 goto retry; 2705 case OOM_FAILED: 2706 goto force; 2707 default: 2708 goto nomem; 2709 } 2710 nomem: 2711 if (!(gfp_mask & __GFP_NOFAIL)) 2712 return -ENOMEM; 2713 force: 2714 /* 2715 * The allocation either can't fail or will lead to more memory 2716 * being freed very soon. Allow memory usage go over the limit 2717 * temporarily by force charging it. 2718 */ 2719 page_counter_charge(&memcg->memory, nr_pages); 2720 if (do_memsw_account()) 2721 page_counter_charge(&memcg->memsw, nr_pages); 2722 2723 return 0; 2724 2725 done_restock: 2726 if (batch > nr_pages) 2727 refill_stock(memcg, batch - nr_pages); 2728 2729 /* 2730 * If the hierarchy is above the normal consumption range, schedule 2731 * reclaim on returning to userland. We can perform reclaim here 2732 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2733 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2734 * not recorded as it most likely matches current's and won't 2735 * change in the meantime. As high limit is checked again before 2736 * reclaim, the cost of mismatch is negligible. 2737 */ 2738 do { 2739 bool mem_high, swap_high; 2740 2741 mem_high = page_counter_read(&memcg->memory) > 2742 READ_ONCE(memcg->memory.high); 2743 swap_high = page_counter_read(&memcg->swap) > 2744 READ_ONCE(memcg->swap.high); 2745 2746 /* Don't bother a random interrupted task */ 2747 if (in_interrupt()) { 2748 if (mem_high) { 2749 schedule_work(&memcg->high_work); 2750 break; 2751 } 2752 continue; 2753 } 2754 2755 if (mem_high || swap_high) { 2756 /* 2757 * The allocating tasks in this cgroup will need to do 2758 * reclaim or be throttled to prevent further growth 2759 * of the memory or swap footprints. 2760 * 2761 * Target some best-effort fairness between the tasks, 2762 * and distribute reclaim work and delay penalties 2763 * based on how much each task is actually allocating. 2764 */ 2765 current->memcg_nr_pages_over_high += batch; 2766 set_notify_resume(current); 2767 break; 2768 } 2769 } while ((memcg = parent_mem_cgroup(memcg))); 2770 2771 return 0; 2772 } 2773 2774 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2775 unsigned int nr_pages) 2776 { 2777 if (mem_cgroup_is_root(memcg)) 2778 return 0; 2779 2780 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2781 } 2782 2783 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2784 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2785 { 2786 if (mem_cgroup_is_root(memcg)) 2787 return; 2788 2789 page_counter_uncharge(&memcg->memory, nr_pages); 2790 if (do_memsw_account()) 2791 page_counter_uncharge(&memcg->memsw, nr_pages); 2792 } 2793 #endif 2794 2795 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2796 { 2797 VM_BUG_ON_PAGE(page_memcg(page), page); 2798 /* 2799 * Any of the following ensures page's memcg stability: 2800 * 2801 * - the page lock 2802 * - LRU isolation 2803 * - lock_page_memcg() 2804 * - exclusive reference 2805 */ 2806 page->memcg_data = (unsigned long)memcg; 2807 } 2808 2809 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 2810 { 2811 struct mem_cgroup *memcg; 2812 2813 rcu_read_lock(); 2814 retry: 2815 memcg = obj_cgroup_memcg(objcg); 2816 if (unlikely(!css_tryget(&memcg->css))) 2817 goto retry; 2818 rcu_read_unlock(); 2819 2820 return memcg; 2821 } 2822 2823 #ifdef CONFIG_MEMCG_KMEM 2824 /* 2825 * The allocated objcg pointers array is not accounted directly. 2826 * Moreover, it should not come from DMA buffer and is not readily 2827 * reclaimable. So those GFP bits should be masked off. 2828 */ 2829 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2830 2831 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2832 gfp_t gfp, bool new_page) 2833 { 2834 unsigned int objects = objs_per_slab_page(s, page); 2835 unsigned long memcg_data; 2836 void *vec; 2837 2838 gfp &= ~OBJCGS_CLEAR_MASK; 2839 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2840 page_to_nid(page)); 2841 if (!vec) 2842 return -ENOMEM; 2843 2844 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2845 if (new_page) { 2846 /* 2847 * If the slab page is brand new and nobody can yet access 2848 * it's memcg_data, no synchronization is required and 2849 * memcg_data can be simply assigned. 2850 */ 2851 page->memcg_data = memcg_data; 2852 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { 2853 /* 2854 * If the slab page is already in use, somebody can allocate 2855 * and assign obj_cgroups in parallel. In this case the existing 2856 * objcg vector should be reused. 2857 */ 2858 kfree(vec); 2859 return 0; 2860 } 2861 2862 kmemleak_not_leak(vec); 2863 return 0; 2864 } 2865 2866 /* 2867 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2868 * 2869 * A passed kernel object can be a slab object or a generic kernel page, so 2870 * different mechanisms for getting the memory cgroup pointer should be used. 2871 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2872 * can not know for sure how the kernel object is implemented. 2873 * mem_cgroup_from_obj() can be safely used in such cases. 2874 * 2875 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2876 * cgroup_mutex, etc. 2877 */ 2878 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2879 { 2880 struct page *page; 2881 2882 if (mem_cgroup_disabled()) 2883 return NULL; 2884 2885 page = virt_to_head_page(p); 2886 2887 /* 2888 * Slab objects are accounted individually, not per-page. 2889 * Memcg membership data for each individual object is saved in 2890 * the page->obj_cgroups. 2891 */ 2892 if (page_objcgs_check(page)) { 2893 struct obj_cgroup *objcg; 2894 unsigned int off; 2895 2896 off = obj_to_index(page->slab_cache, page, p); 2897 objcg = page_objcgs(page)[off]; 2898 if (objcg) 2899 return obj_cgroup_memcg(objcg); 2900 2901 return NULL; 2902 } 2903 2904 /* 2905 * page_memcg_check() is used here, because page_has_obj_cgroups() 2906 * check above could fail because the object cgroups vector wasn't set 2907 * at that moment, but it can be set concurrently. 2908 * page_memcg_check(page) will guarantee that a proper memory 2909 * cgroup pointer or NULL will be returned. 2910 */ 2911 return page_memcg_check(page); 2912 } 2913 2914 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2915 { 2916 struct obj_cgroup *objcg = NULL; 2917 struct mem_cgroup *memcg; 2918 2919 if (memcg_kmem_bypass()) 2920 return NULL; 2921 2922 rcu_read_lock(); 2923 if (unlikely(active_memcg())) 2924 memcg = active_memcg(); 2925 else 2926 memcg = mem_cgroup_from_task(current); 2927 2928 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2929 objcg = rcu_dereference(memcg->objcg); 2930 if (objcg && obj_cgroup_tryget(objcg)) 2931 break; 2932 objcg = NULL; 2933 } 2934 rcu_read_unlock(); 2935 2936 return objcg; 2937 } 2938 2939 static int memcg_alloc_cache_id(void) 2940 { 2941 int id, size; 2942 int err; 2943 2944 id = ida_simple_get(&memcg_cache_ida, 2945 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2946 if (id < 0) 2947 return id; 2948 2949 if (id < memcg_nr_cache_ids) 2950 return id; 2951 2952 /* 2953 * There's no space for the new id in memcg_caches arrays, 2954 * so we have to grow them. 2955 */ 2956 down_write(&memcg_cache_ids_sem); 2957 2958 size = 2 * (id + 1); 2959 if (size < MEMCG_CACHES_MIN_SIZE) 2960 size = MEMCG_CACHES_MIN_SIZE; 2961 else if (size > MEMCG_CACHES_MAX_SIZE) 2962 size = MEMCG_CACHES_MAX_SIZE; 2963 2964 err = memcg_update_all_list_lrus(size); 2965 if (!err) 2966 memcg_nr_cache_ids = size; 2967 2968 up_write(&memcg_cache_ids_sem); 2969 2970 if (err) { 2971 ida_simple_remove(&memcg_cache_ida, id); 2972 return err; 2973 } 2974 return id; 2975 } 2976 2977 static void memcg_free_cache_id(int id) 2978 { 2979 ida_simple_remove(&memcg_cache_ida, id); 2980 } 2981 2982 /* 2983 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2984 * @objcg: object cgroup to uncharge 2985 * @nr_pages: number of pages to uncharge 2986 */ 2987 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2988 unsigned int nr_pages) 2989 { 2990 struct mem_cgroup *memcg; 2991 2992 memcg = get_mem_cgroup_from_objcg(objcg); 2993 2994 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2995 page_counter_uncharge(&memcg->kmem, nr_pages); 2996 refill_stock(memcg, nr_pages); 2997 2998 css_put(&memcg->css); 2999 } 3000 3001 /* 3002 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 3003 * @objcg: object cgroup to charge 3004 * @gfp: reclaim mode 3005 * @nr_pages: number of pages to charge 3006 * 3007 * Returns 0 on success, an error code on failure. 3008 */ 3009 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3010 unsigned int nr_pages) 3011 { 3012 struct page_counter *counter; 3013 struct mem_cgroup *memcg; 3014 int ret; 3015 3016 memcg = get_mem_cgroup_from_objcg(objcg); 3017 3018 ret = try_charge_memcg(memcg, gfp, nr_pages); 3019 if (ret) 3020 goto out; 3021 3022 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3023 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3024 3025 /* 3026 * Enforce __GFP_NOFAIL allocation because callers are not 3027 * prepared to see failures and likely do not have any failure 3028 * handling code. 3029 */ 3030 if (gfp & __GFP_NOFAIL) { 3031 page_counter_charge(&memcg->kmem, nr_pages); 3032 goto out; 3033 } 3034 cancel_charge(memcg, nr_pages); 3035 ret = -ENOMEM; 3036 } 3037 out: 3038 css_put(&memcg->css); 3039 3040 return ret; 3041 } 3042 3043 /** 3044 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3045 * @page: page to charge 3046 * @gfp: reclaim mode 3047 * @order: allocation order 3048 * 3049 * Returns 0 on success, an error code on failure. 3050 */ 3051 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3052 { 3053 struct obj_cgroup *objcg; 3054 int ret = 0; 3055 3056 objcg = get_obj_cgroup_from_current(); 3057 if (objcg) { 3058 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3059 if (!ret) { 3060 page->memcg_data = (unsigned long)objcg | 3061 MEMCG_DATA_KMEM; 3062 return 0; 3063 } 3064 obj_cgroup_put(objcg); 3065 } 3066 return ret; 3067 } 3068 3069 /** 3070 * __memcg_kmem_uncharge_page: uncharge a kmem page 3071 * @page: page to uncharge 3072 * @order: allocation order 3073 */ 3074 void __memcg_kmem_uncharge_page(struct page *page, int order) 3075 { 3076 struct obj_cgroup *objcg; 3077 unsigned int nr_pages = 1 << order; 3078 3079 if (!PageMemcgKmem(page)) 3080 return; 3081 3082 objcg = __page_objcg(page); 3083 obj_cgroup_uncharge_pages(objcg, nr_pages); 3084 page->memcg_data = 0; 3085 obj_cgroup_put(objcg); 3086 } 3087 3088 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3089 enum node_stat_item idx, int nr) 3090 { 3091 unsigned long flags; 3092 struct obj_stock *stock = get_obj_stock(&flags); 3093 int *bytes; 3094 3095 /* 3096 * Save vmstat data in stock and skip vmstat array update unless 3097 * accumulating over a page of vmstat data or when pgdat or idx 3098 * changes. 3099 */ 3100 if (stock->cached_objcg != objcg) { 3101 drain_obj_stock(stock); 3102 obj_cgroup_get(objcg); 3103 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3104 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3105 stock->cached_objcg = objcg; 3106 stock->cached_pgdat = pgdat; 3107 } else if (stock->cached_pgdat != pgdat) { 3108 /* Flush the existing cached vmstat data */ 3109 struct pglist_data *oldpg = stock->cached_pgdat; 3110 3111 if (stock->nr_slab_reclaimable_b) { 3112 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 3113 stock->nr_slab_reclaimable_b); 3114 stock->nr_slab_reclaimable_b = 0; 3115 } 3116 if (stock->nr_slab_unreclaimable_b) { 3117 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 3118 stock->nr_slab_unreclaimable_b); 3119 stock->nr_slab_unreclaimable_b = 0; 3120 } 3121 stock->cached_pgdat = pgdat; 3122 } 3123 3124 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3125 : &stock->nr_slab_unreclaimable_b; 3126 /* 3127 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3128 * cached locally at least once before pushing it out. 3129 */ 3130 if (!*bytes) { 3131 *bytes = nr; 3132 nr = 0; 3133 } else { 3134 *bytes += nr; 3135 if (abs(*bytes) > PAGE_SIZE) { 3136 nr = *bytes; 3137 *bytes = 0; 3138 } else { 3139 nr = 0; 3140 } 3141 } 3142 if (nr) 3143 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3144 3145 put_obj_stock(flags); 3146 } 3147 3148 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3149 { 3150 unsigned long flags; 3151 struct obj_stock *stock = get_obj_stock(&flags); 3152 bool ret = false; 3153 3154 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3155 stock->nr_bytes -= nr_bytes; 3156 ret = true; 3157 } 3158 3159 put_obj_stock(flags); 3160 3161 return ret; 3162 } 3163 3164 static void drain_obj_stock(struct obj_stock *stock) 3165 { 3166 struct obj_cgroup *old = stock->cached_objcg; 3167 3168 if (!old) 3169 return; 3170 3171 if (stock->nr_bytes) { 3172 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3173 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3174 3175 if (nr_pages) 3176 obj_cgroup_uncharge_pages(old, nr_pages); 3177 3178 /* 3179 * The leftover is flushed to the centralized per-memcg value. 3180 * On the next attempt to refill obj stock it will be moved 3181 * to a per-cpu stock (probably, on an other CPU), see 3182 * refill_obj_stock(). 3183 * 3184 * How often it's flushed is a trade-off between the memory 3185 * limit enforcement accuracy and potential CPU contention, 3186 * so it might be changed in the future. 3187 */ 3188 atomic_add(nr_bytes, &old->nr_charged_bytes); 3189 stock->nr_bytes = 0; 3190 } 3191 3192 /* 3193 * Flush the vmstat data in current stock 3194 */ 3195 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3196 if (stock->nr_slab_reclaimable_b) { 3197 mod_objcg_mlstate(old, stock->cached_pgdat, 3198 NR_SLAB_RECLAIMABLE_B, 3199 stock->nr_slab_reclaimable_b); 3200 stock->nr_slab_reclaimable_b = 0; 3201 } 3202 if (stock->nr_slab_unreclaimable_b) { 3203 mod_objcg_mlstate(old, stock->cached_pgdat, 3204 NR_SLAB_UNRECLAIMABLE_B, 3205 stock->nr_slab_unreclaimable_b); 3206 stock->nr_slab_unreclaimable_b = 0; 3207 } 3208 stock->cached_pgdat = NULL; 3209 } 3210 3211 obj_cgroup_put(old); 3212 stock->cached_objcg = NULL; 3213 } 3214 3215 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3216 struct mem_cgroup *root_memcg) 3217 { 3218 struct mem_cgroup *memcg; 3219 3220 if (in_task() && stock->task_obj.cached_objcg) { 3221 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); 3222 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3223 return true; 3224 } 3225 if (stock->irq_obj.cached_objcg) { 3226 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); 3227 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3228 return true; 3229 } 3230 3231 return false; 3232 } 3233 3234 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3235 bool allow_uncharge) 3236 { 3237 unsigned long flags; 3238 struct obj_stock *stock = get_obj_stock(&flags); 3239 unsigned int nr_pages = 0; 3240 3241 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3242 drain_obj_stock(stock); 3243 obj_cgroup_get(objcg); 3244 stock->cached_objcg = objcg; 3245 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3246 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3247 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3248 } 3249 stock->nr_bytes += nr_bytes; 3250 3251 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3252 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3253 stock->nr_bytes &= (PAGE_SIZE - 1); 3254 } 3255 3256 put_obj_stock(flags); 3257 3258 if (nr_pages) 3259 obj_cgroup_uncharge_pages(objcg, nr_pages); 3260 } 3261 3262 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3263 { 3264 unsigned int nr_pages, nr_bytes; 3265 int ret; 3266 3267 if (consume_obj_stock(objcg, size)) 3268 return 0; 3269 3270 /* 3271 * In theory, objcg->nr_charged_bytes can have enough 3272 * pre-charged bytes to satisfy the allocation. However, 3273 * flushing objcg->nr_charged_bytes requires two atomic 3274 * operations, and objcg->nr_charged_bytes can't be big. 3275 * The shared objcg->nr_charged_bytes can also become a 3276 * performance bottleneck if all tasks of the same memcg are 3277 * trying to update it. So it's better to ignore it and try 3278 * grab some new pages. The stock's nr_bytes will be flushed to 3279 * objcg->nr_charged_bytes later on when objcg changes. 3280 * 3281 * The stock's nr_bytes may contain enough pre-charged bytes 3282 * to allow one less page from being charged, but we can't rely 3283 * on the pre-charged bytes not being changed outside of 3284 * consume_obj_stock() or refill_obj_stock(). So ignore those 3285 * pre-charged bytes as well when charging pages. To avoid a 3286 * page uncharge right after a page charge, we set the 3287 * allow_uncharge flag to false when calling refill_obj_stock() 3288 * to temporarily allow the pre-charged bytes to exceed the page 3289 * size limit. The maximum reachable value of the pre-charged 3290 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3291 * race. 3292 */ 3293 nr_pages = size >> PAGE_SHIFT; 3294 nr_bytes = size & (PAGE_SIZE - 1); 3295 3296 if (nr_bytes) 3297 nr_pages += 1; 3298 3299 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3300 if (!ret && nr_bytes) 3301 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3302 3303 return ret; 3304 } 3305 3306 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3307 { 3308 refill_obj_stock(objcg, size, true); 3309 } 3310 3311 #endif /* CONFIG_MEMCG_KMEM */ 3312 3313 /* 3314 * Because page_memcg(head) is not set on tails, set it now. 3315 */ 3316 void split_page_memcg(struct page *head, unsigned int nr) 3317 { 3318 struct mem_cgroup *memcg = page_memcg(head); 3319 int i; 3320 3321 if (mem_cgroup_disabled() || !memcg) 3322 return; 3323 3324 for (i = 1; i < nr; i++) 3325 head[i].memcg_data = head->memcg_data; 3326 3327 if (PageMemcgKmem(head)) 3328 obj_cgroup_get_many(__page_objcg(head), nr - 1); 3329 else 3330 css_get_many(&memcg->css, nr - 1); 3331 } 3332 3333 #ifdef CONFIG_MEMCG_SWAP 3334 /** 3335 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3336 * @entry: swap entry to be moved 3337 * @from: mem_cgroup which the entry is moved from 3338 * @to: mem_cgroup which the entry is moved to 3339 * 3340 * It succeeds only when the swap_cgroup's record for this entry is the same 3341 * as the mem_cgroup's id of @from. 3342 * 3343 * Returns 0 on success, -EINVAL on failure. 3344 * 3345 * The caller must have charged to @to, IOW, called page_counter_charge() about 3346 * both res and memsw, and called css_get(). 3347 */ 3348 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3349 struct mem_cgroup *from, struct mem_cgroup *to) 3350 { 3351 unsigned short old_id, new_id; 3352 3353 old_id = mem_cgroup_id(from); 3354 new_id = mem_cgroup_id(to); 3355 3356 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3357 mod_memcg_state(from, MEMCG_SWAP, -1); 3358 mod_memcg_state(to, MEMCG_SWAP, 1); 3359 return 0; 3360 } 3361 return -EINVAL; 3362 } 3363 #else 3364 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3365 struct mem_cgroup *from, struct mem_cgroup *to) 3366 { 3367 return -EINVAL; 3368 } 3369 #endif 3370 3371 static DEFINE_MUTEX(memcg_max_mutex); 3372 3373 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3374 unsigned long max, bool memsw) 3375 { 3376 bool enlarge = false; 3377 bool drained = false; 3378 int ret; 3379 bool limits_invariant; 3380 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3381 3382 do { 3383 if (signal_pending(current)) { 3384 ret = -EINTR; 3385 break; 3386 } 3387 3388 mutex_lock(&memcg_max_mutex); 3389 /* 3390 * Make sure that the new limit (memsw or memory limit) doesn't 3391 * break our basic invariant rule memory.max <= memsw.max. 3392 */ 3393 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3394 max <= memcg->memsw.max; 3395 if (!limits_invariant) { 3396 mutex_unlock(&memcg_max_mutex); 3397 ret = -EINVAL; 3398 break; 3399 } 3400 if (max > counter->max) 3401 enlarge = true; 3402 ret = page_counter_set_max(counter, max); 3403 mutex_unlock(&memcg_max_mutex); 3404 3405 if (!ret) 3406 break; 3407 3408 if (!drained) { 3409 drain_all_stock(memcg); 3410 drained = true; 3411 continue; 3412 } 3413 3414 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3415 GFP_KERNEL, !memsw)) { 3416 ret = -EBUSY; 3417 break; 3418 } 3419 } while (true); 3420 3421 if (!ret && enlarge) 3422 memcg_oom_recover(memcg); 3423 3424 return ret; 3425 } 3426 3427 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3428 gfp_t gfp_mask, 3429 unsigned long *total_scanned) 3430 { 3431 unsigned long nr_reclaimed = 0; 3432 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3433 unsigned long reclaimed; 3434 int loop = 0; 3435 struct mem_cgroup_tree_per_node *mctz; 3436 unsigned long excess; 3437 unsigned long nr_scanned; 3438 3439 if (order > 0) 3440 return 0; 3441 3442 mctz = soft_limit_tree_node(pgdat->node_id); 3443 3444 /* 3445 * Do not even bother to check the largest node if the root 3446 * is empty. Do it lockless to prevent lock bouncing. Races 3447 * are acceptable as soft limit is best effort anyway. 3448 */ 3449 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3450 return 0; 3451 3452 /* 3453 * This loop can run a while, specially if mem_cgroup's continuously 3454 * keep exceeding their soft limit and putting the system under 3455 * pressure 3456 */ 3457 do { 3458 if (next_mz) 3459 mz = next_mz; 3460 else 3461 mz = mem_cgroup_largest_soft_limit_node(mctz); 3462 if (!mz) 3463 break; 3464 3465 nr_scanned = 0; 3466 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3467 gfp_mask, &nr_scanned); 3468 nr_reclaimed += reclaimed; 3469 *total_scanned += nr_scanned; 3470 spin_lock_irq(&mctz->lock); 3471 __mem_cgroup_remove_exceeded(mz, mctz); 3472 3473 /* 3474 * If we failed to reclaim anything from this memory cgroup 3475 * it is time to move on to the next cgroup 3476 */ 3477 next_mz = NULL; 3478 if (!reclaimed) 3479 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3480 3481 excess = soft_limit_excess(mz->memcg); 3482 /* 3483 * One school of thought says that we should not add 3484 * back the node to the tree if reclaim returns 0. 3485 * But our reclaim could return 0, simply because due 3486 * to priority we are exposing a smaller subset of 3487 * memory to reclaim from. Consider this as a longer 3488 * term TODO. 3489 */ 3490 /* If excess == 0, no tree ops */ 3491 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3492 spin_unlock_irq(&mctz->lock); 3493 css_put(&mz->memcg->css); 3494 loop++; 3495 /* 3496 * Could not reclaim anything and there are no more 3497 * mem cgroups to try or we seem to be looping without 3498 * reclaiming anything. 3499 */ 3500 if (!nr_reclaimed && 3501 (next_mz == NULL || 3502 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3503 break; 3504 } while (!nr_reclaimed); 3505 if (next_mz) 3506 css_put(&next_mz->memcg->css); 3507 return nr_reclaimed; 3508 } 3509 3510 /* 3511 * Reclaims as many pages from the given memcg as possible. 3512 * 3513 * Caller is responsible for holding css reference for memcg. 3514 */ 3515 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3516 { 3517 int nr_retries = MAX_RECLAIM_RETRIES; 3518 3519 /* we call try-to-free pages for make this cgroup empty */ 3520 lru_add_drain_all(); 3521 3522 drain_all_stock(memcg); 3523 3524 /* try to free all pages in this cgroup */ 3525 while (nr_retries && page_counter_read(&memcg->memory)) { 3526 int progress; 3527 3528 if (signal_pending(current)) 3529 return -EINTR; 3530 3531 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3532 GFP_KERNEL, true); 3533 if (!progress) { 3534 nr_retries--; 3535 /* maybe some writeback is necessary */ 3536 congestion_wait(BLK_RW_ASYNC, HZ/10); 3537 } 3538 3539 } 3540 3541 return 0; 3542 } 3543 3544 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3545 char *buf, size_t nbytes, 3546 loff_t off) 3547 { 3548 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3549 3550 if (mem_cgroup_is_root(memcg)) 3551 return -EINVAL; 3552 return mem_cgroup_force_empty(memcg) ?: nbytes; 3553 } 3554 3555 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3556 struct cftype *cft) 3557 { 3558 return 1; 3559 } 3560 3561 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3562 struct cftype *cft, u64 val) 3563 { 3564 if (val == 1) 3565 return 0; 3566 3567 pr_warn_once("Non-hierarchical mode is deprecated. " 3568 "Please report your usecase to linux-mm@kvack.org if you " 3569 "depend on this functionality.\n"); 3570 3571 return -EINVAL; 3572 } 3573 3574 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3575 { 3576 unsigned long val; 3577 3578 if (mem_cgroup_is_root(memcg)) { 3579 /* mem_cgroup_threshold() calls here from irqsafe context */ 3580 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); 3581 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3582 memcg_page_state(memcg, NR_ANON_MAPPED); 3583 if (swap) 3584 val += memcg_page_state(memcg, MEMCG_SWAP); 3585 } else { 3586 if (!swap) 3587 val = page_counter_read(&memcg->memory); 3588 else 3589 val = page_counter_read(&memcg->memsw); 3590 } 3591 return val; 3592 } 3593 3594 enum { 3595 RES_USAGE, 3596 RES_LIMIT, 3597 RES_MAX_USAGE, 3598 RES_FAILCNT, 3599 RES_SOFT_LIMIT, 3600 }; 3601 3602 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3603 struct cftype *cft) 3604 { 3605 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3606 struct page_counter *counter; 3607 3608 switch (MEMFILE_TYPE(cft->private)) { 3609 case _MEM: 3610 counter = &memcg->memory; 3611 break; 3612 case _MEMSWAP: 3613 counter = &memcg->memsw; 3614 break; 3615 case _KMEM: 3616 counter = &memcg->kmem; 3617 break; 3618 case _TCP: 3619 counter = &memcg->tcpmem; 3620 break; 3621 default: 3622 BUG(); 3623 } 3624 3625 switch (MEMFILE_ATTR(cft->private)) { 3626 case RES_USAGE: 3627 if (counter == &memcg->memory) 3628 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3629 if (counter == &memcg->memsw) 3630 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3631 return (u64)page_counter_read(counter) * PAGE_SIZE; 3632 case RES_LIMIT: 3633 return (u64)counter->max * PAGE_SIZE; 3634 case RES_MAX_USAGE: 3635 return (u64)counter->watermark * PAGE_SIZE; 3636 case RES_FAILCNT: 3637 return counter->failcnt; 3638 case RES_SOFT_LIMIT: 3639 return (u64)memcg->soft_limit * PAGE_SIZE; 3640 default: 3641 BUG(); 3642 } 3643 } 3644 3645 #ifdef CONFIG_MEMCG_KMEM 3646 static int memcg_online_kmem(struct mem_cgroup *memcg) 3647 { 3648 struct obj_cgroup *objcg; 3649 int memcg_id; 3650 3651 if (cgroup_memory_nokmem) 3652 return 0; 3653 3654 BUG_ON(memcg->kmemcg_id >= 0); 3655 BUG_ON(memcg->kmem_state); 3656 3657 memcg_id = memcg_alloc_cache_id(); 3658 if (memcg_id < 0) 3659 return memcg_id; 3660 3661 objcg = obj_cgroup_alloc(); 3662 if (!objcg) { 3663 memcg_free_cache_id(memcg_id); 3664 return -ENOMEM; 3665 } 3666 objcg->memcg = memcg; 3667 rcu_assign_pointer(memcg->objcg, objcg); 3668 3669 static_branch_enable(&memcg_kmem_enabled_key); 3670 3671 memcg->kmemcg_id = memcg_id; 3672 memcg->kmem_state = KMEM_ONLINE; 3673 3674 return 0; 3675 } 3676 3677 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3678 { 3679 struct cgroup_subsys_state *css; 3680 struct mem_cgroup *parent, *child; 3681 int kmemcg_id; 3682 3683 if (memcg->kmem_state != KMEM_ONLINE) 3684 return; 3685 3686 memcg->kmem_state = KMEM_ALLOCATED; 3687 3688 parent = parent_mem_cgroup(memcg); 3689 if (!parent) 3690 parent = root_mem_cgroup; 3691 3692 memcg_reparent_objcgs(memcg, parent); 3693 3694 kmemcg_id = memcg->kmemcg_id; 3695 BUG_ON(kmemcg_id < 0); 3696 3697 /* 3698 * Change kmemcg_id of this cgroup and all its descendants to the 3699 * parent's id, and then move all entries from this cgroup's list_lrus 3700 * to ones of the parent. After we have finished, all list_lrus 3701 * corresponding to this cgroup are guaranteed to remain empty. The 3702 * ordering is imposed by list_lru_node->lock taken by 3703 * memcg_drain_all_list_lrus(). 3704 */ 3705 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3706 css_for_each_descendant_pre(css, &memcg->css) { 3707 child = mem_cgroup_from_css(css); 3708 BUG_ON(child->kmemcg_id != kmemcg_id); 3709 child->kmemcg_id = parent->kmemcg_id; 3710 } 3711 rcu_read_unlock(); 3712 3713 memcg_drain_all_list_lrus(kmemcg_id, parent); 3714 3715 memcg_free_cache_id(kmemcg_id); 3716 } 3717 3718 static void memcg_free_kmem(struct mem_cgroup *memcg) 3719 { 3720 /* css_alloc() failed, offlining didn't happen */ 3721 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3722 memcg_offline_kmem(memcg); 3723 } 3724 #else 3725 static int memcg_online_kmem(struct mem_cgroup *memcg) 3726 { 3727 return 0; 3728 } 3729 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3730 { 3731 } 3732 static void memcg_free_kmem(struct mem_cgroup *memcg) 3733 { 3734 } 3735 #endif /* CONFIG_MEMCG_KMEM */ 3736 3737 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3738 unsigned long max) 3739 { 3740 int ret; 3741 3742 mutex_lock(&memcg_max_mutex); 3743 ret = page_counter_set_max(&memcg->kmem, max); 3744 mutex_unlock(&memcg_max_mutex); 3745 return ret; 3746 } 3747 3748 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3749 { 3750 int ret; 3751 3752 mutex_lock(&memcg_max_mutex); 3753 3754 ret = page_counter_set_max(&memcg->tcpmem, max); 3755 if (ret) 3756 goto out; 3757 3758 if (!memcg->tcpmem_active) { 3759 /* 3760 * The active flag needs to be written after the static_key 3761 * update. This is what guarantees that the socket activation 3762 * function is the last one to run. See mem_cgroup_sk_alloc() 3763 * for details, and note that we don't mark any socket as 3764 * belonging to this memcg until that flag is up. 3765 * 3766 * We need to do this, because static_keys will span multiple 3767 * sites, but we can't control their order. If we mark a socket 3768 * as accounted, but the accounting functions are not patched in 3769 * yet, we'll lose accounting. 3770 * 3771 * We never race with the readers in mem_cgroup_sk_alloc(), 3772 * because when this value change, the code to process it is not 3773 * patched in yet. 3774 */ 3775 static_branch_inc(&memcg_sockets_enabled_key); 3776 memcg->tcpmem_active = true; 3777 } 3778 out: 3779 mutex_unlock(&memcg_max_mutex); 3780 return ret; 3781 } 3782 3783 /* 3784 * The user of this function is... 3785 * RES_LIMIT. 3786 */ 3787 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3788 char *buf, size_t nbytes, loff_t off) 3789 { 3790 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3791 unsigned long nr_pages; 3792 int ret; 3793 3794 buf = strstrip(buf); 3795 ret = page_counter_memparse(buf, "-1", &nr_pages); 3796 if (ret) 3797 return ret; 3798 3799 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3800 case RES_LIMIT: 3801 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3802 ret = -EINVAL; 3803 break; 3804 } 3805 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3806 case _MEM: 3807 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3808 break; 3809 case _MEMSWAP: 3810 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3811 break; 3812 case _KMEM: 3813 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3814 "Please report your usecase to linux-mm@kvack.org if you " 3815 "depend on this functionality.\n"); 3816 ret = memcg_update_kmem_max(memcg, nr_pages); 3817 break; 3818 case _TCP: 3819 ret = memcg_update_tcp_max(memcg, nr_pages); 3820 break; 3821 } 3822 break; 3823 case RES_SOFT_LIMIT: 3824 memcg->soft_limit = nr_pages; 3825 ret = 0; 3826 break; 3827 } 3828 return ret ?: nbytes; 3829 } 3830 3831 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3832 size_t nbytes, loff_t off) 3833 { 3834 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3835 struct page_counter *counter; 3836 3837 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3838 case _MEM: 3839 counter = &memcg->memory; 3840 break; 3841 case _MEMSWAP: 3842 counter = &memcg->memsw; 3843 break; 3844 case _KMEM: 3845 counter = &memcg->kmem; 3846 break; 3847 case _TCP: 3848 counter = &memcg->tcpmem; 3849 break; 3850 default: 3851 BUG(); 3852 } 3853 3854 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3855 case RES_MAX_USAGE: 3856 page_counter_reset_watermark(counter); 3857 break; 3858 case RES_FAILCNT: 3859 counter->failcnt = 0; 3860 break; 3861 default: 3862 BUG(); 3863 } 3864 3865 return nbytes; 3866 } 3867 3868 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3869 struct cftype *cft) 3870 { 3871 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3872 } 3873 3874 #ifdef CONFIG_MMU 3875 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3876 struct cftype *cft, u64 val) 3877 { 3878 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3879 3880 if (val & ~MOVE_MASK) 3881 return -EINVAL; 3882 3883 /* 3884 * No kind of locking is needed in here, because ->can_attach() will 3885 * check this value once in the beginning of the process, and then carry 3886 * on with stale data. This means that changes to this value will only 3887 * affect task migrations starting after the change. 3888 */ 3889 memcg->move_charge_at_immigrate = val; 3890 return 0; 3891 } 3892 #else 3893 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3894 struct cftype *cft, u64 val) 3895 { 3896 return -ENOSYS; 3897 } 3898 #endif 3899 3900 #ifdef CONFIG_NUMA 3901 3902 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3903 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3904 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3905 3906 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3907 int nid, unsigned int lru_mask, bool tree) 3908 { 3909 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3910 unsigned long nr = 0; 3911 enum lru_list lru; 3912 3913 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3914 3915 for_each_lru(lru) { 3916 if (!(BIT(lru) & lru_mask)) 3917 continue; 3918 if (tree) 3919 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3920 else 3921 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3922 } 3923 return nr; 3924 } 3925 3926 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3927 unsigned int lru_mask, 3928 bool tree) 3929 { 3930 unsigned long nr = 0; 3931 enum lru_list lru; 3932 3933 for_each_lru(lru) { 3934 if (!(BIT(lru) & lru_mask)) 3935 continue; 3936 if (tree) 3937 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3938 else 3939 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3940 } 3941 return nr; 3942 } 3943 3944 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3945 { 3946 struct numa_stat { 3947 const char *name; 3948 unsigned int lru_mask; 3949 }; 3950 3951 static const struct numa_stat stats[] = { 3952 { "total", LRU_ALL }, 3953 { "file", LRU_ALL_FILE }, 3954 { "anon", LRU_ALL_ANON }, 3955 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3956 }; 3957 const struct numa_stat *stat; 3958 int nid; 3959 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3960 3961 cgroup_rstat_flush(memcg->css.cgroup); 3962 3963 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3964 seq_printf(m, "%s=%lu", stat->name, 3965 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3966 false)); 3967 for_each_node_state(nid, N_MEMORY) 3968 seq_printf(m, " N%d=%lu", nid, 3969 mem_cgroup_node_nr_lru_pages(memcg, nid, 3970 stat->lru_mask, false)); 3971 seq_putc(m, '\n'); 3972 } 3973 3974 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3975 3976 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3977 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3978 true)); 3979 for_each_node_state(nid, N_MEMORY) 3980 seq_printf(m, " N%d=%lu", nid, 3981 mem_cgroup_node_nr_lru_pages(memcg, nid, 3982 stat->lru_mask, true)); 3983 seq_putc(m, '\n'); 3984 } 3985 3986 return 0; 3987 } 3988 #endif /* CONFIG_NUMA */ 3989 3990 static const unsigned int memcg1_stats[] = { 3991 NR_FILE_PAGES, 3992 NR_ANON_MAPPED, 3993 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3994 NR_ANON_THPS, 3995 #endif 3996 NR_SHMEM, 3997 NR_FILE_MAPPED, 3998 NR_FILE_DIRTY, 3999 NR_WRITEBACK, 4000 MEMCG_SWAP, 4001 }; 4002 4003 static const char *const memcg1_stat_names[] = { 4004 "cache", 4005 "rss", 4006 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4007 "rss_huge", 4008 #endif 4009 "shmem", 4010 "mapped_file", 4011 "dirty", 4012 "writeback", 4013 "swap", 4014 }; 4015 4016 /* Universal VM events cgroup1 shows, original sort order */ 4017 static const unsigned int memcg1_events[] = { 4018 PGPGIN, 4019 PGPGOUT, 4020 PGFAULT, 4021 PGMAJFAULT, 4022 }; 4023 4024 static int memcg_stat_show(struct seq_file *m, void *v) 4025 { 4026 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4027 unsigned long memory, memsw; 4028 struct mem_cgroup *mi; 4029 unsigned int i; 4030 4031 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4032 4033 cgroup_rstat_flush(memcg->css.cgroup); 4034 4035 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4036 unsigned long nr; 4037 4038 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4039 continue; 4040 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4041 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4042 } 4043 4044 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4045 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4046 memcg_events_local(memcg, memcg1_events[i])); 4047 4048 for (i = 0; i < NR_LRU_LISTS; i++) 4049 seq_printf(m, "%s %lu\n", lru_list_name(i), 4050 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4051 PAGE_SIZE); 4052 4053 /* Hierarchical information */ 4054 memory = memsw = PAGE_COUNTER_MAX; 4055 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4056 memory = min(memory, READ_ONCE(mi->memory.max)); 4057 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4058 } 4059 seq_printf(m, "hierarchical_memory_limit %llu\n", 4060 (u64)memory * PAGE_SIZE); 4061 if (do_memsw_account()) 4062 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4063 (u64)memsw * PAGE_SIZE); 4064 4065 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4066 unsigned long nr; 4067 4068 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4069 continue; 4070 nr = memcg_page_state(memcg, memcg1_stats[i]); 4071 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4072 (u64)nr * PAGE_SIZE); 4073 } 4074 4075 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4076 seq_printf(m, "total_%s %llu\n", 4077 vm_event_name(memcg1_events[i]), 4078 (u64)memcg_events(memcg, memcg1_events[i])); 4079 4080 for (i = 0; i < NR_LRU_LISTS; i++) 4081 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4082 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4083 PAGE_SIZE); 4084 4085 #ifdef CONFIG_DEBUG_VM 4086 { 4087 pg_data_t *pgdat; 4088 struct mem_cgroup_per_node *mz; 4089 unsigned long anon_cost = 0; 4090 unsigned long file_cost = 0; 4091 4092 for_each_online_pgdat(pgdat) { 4093 mz = memcg->nodeinfo[pgdat->node_id]; 4094 4095 anon_cost += mz->lruvec.anon_cost; 4096 file_cost += mz->lruvec.file_cost; 4097 } 4098 seq_printf(m, "anon_cost %lu\n", anon_cost); 4099 seq_printf(m, "file_cost %lu\n", file_cost); 4100 } 4101 #endif 4102 4103 return 0; 4104 } 4105 4106 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4107 struct cftype *cft) 4108 { 4109 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4110 4111 return mem_cgroup_swappiness(memcg); 4112 } 4113 4114 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4115 struct cftype *cft, u64 val) 4116 { 4117 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4118 4119 if (val > 100) 4120 return -EINVAL; 4121 4122 if (!mem_cgroup_is_root(memcg)) 4123 memcg->swappiness = val; 4124 else 4125 vm_swappiness = val; 4126 4127 return 0; 4128 } 4129 4130 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4131 { 4132 struct mem_cgroup_threshold_ary *t; 4133 unsigned long usage; 4134 int i; 4135 4136 rcu_read_lock(); 4137 if (!swap) 4138 t = rcu_dereference(memcg->thresholds.primary); 4139 else 4140 t = rcu_dereference(memcg->memsw_thresholds.primary); 4141 4142 if (!t) 4143 goto unlock; 4144 4145 usage = mem_cgroup_usage(memcg, swap); 4146 4147 /* 4148 * current_threshold points to threshold just below or equal to usage. 4149 * If it's not true, a threshold was crossed after last 4150 * call of __mem_cgroup_threshold(). 4151 */ 4152 i = t->current_threshold; 4153 4154 /* 4155 * Iterate backward over array of thresholds starting from 4156 * current_threshold and check if a threshold is crossed. 4157 * If none of thresholds below usage is crossed, we read 4158 * only one element of the array here. 4159 */ 4160 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4161 eventfd_signal(t->entries[i].eventfd, 1); 4162 4163 /* i = current_threshold + 1 */ 4164 i++; 4165 4166 /* 4167 * Iterate forward over array of thresholds starting from 4168 * current_threshold+1 and check if a threshold is crossed. 4169 * If none of thresholds above usage is crossed, we read 4170 * only one element of the array here. 4171 */ 4172 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4173 eventfd_signal(t->entries[i].eventfd, 1); 4174 4175 /* Update current_threshold */ 4176 t->current_threshold = i - 1; 4177 unlock: 4178 rcu_read_unlock(); 4179 } 4180 4181 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4182 { 4183 while (memcg) { 4184 __mem_cgroup_threshold(memcg, false); 4185 if (do_memsw_account()) 4186 __mem_cgroup_threshold(memcg, true); 4187 4188 memcg = parent_mem_cgroup(memcg); 4189 } 4190 } 4191 4192 static int compare_thresholds(const void *a, const void *b) 4193 { 4194 const struct mem_cgroup_threshold *_a = a; 4195 const struct mem_cgroup_threshold *_b = b; 4196 4197 if (_a->threshold > _b->threshold) 4198 return 1; 4199 4200 if (_a->threshold < _b->threshold) 4201 return -1; 4202 4203 return 0; 4204 } 4205 4206 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4207 { 4208 struct mem_cgroup_eventfd_list *ev; 4209 4210 spin_lock(&memcg_oom_lock); 4211 4212 list_for_each_entry(ev, &memcg->oom_notify, list) 4213 eventfd_signal(ev->eventfd, 1); 4214 4215 spin_unlock(&memcg_oom_lock); 4216 return 0; 4217 } 4218 4219 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4220 { 4221 struct mem_cgroup *iter; 4222 4223 for_each_mem_cgroup_tree(iter, memcg) 4224 mem_cgroup_oom_notify_cb(iter); 4225 } 4226 4227 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4228 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4229 { 4230 struct mem_cgroup_thresholds *thresholds; 4231 struct mem_cgroup_threshold_ary *new; 4232 unsigned long threshold; 4233 unsigned long usage; 4234 int i, size, ret; 4235 4236 ret = page_counter_memparse(args, "-1", &threshold); 4237 if (ret) 4238 return ret; 4239 4240 mutex_lock(&memcg->thresholds_lock); 4241 4242 if (type == _MEM) { 4243 thresholds = &memcg->thresholds; 4244 usage = mem_cgroup_usage(memcg, false); 4245 } else if (type == _MEMSWAP) { 4246 thresholds = &memcg->memsw_thresholds; 4247 usage = mem_cgroup_usage(memcg, true); 4248 } else 4249 BUG(); 4250 4251 /* Check if a threshold crossed before adding a new one */ 4252 if (thresholds->primary) 4253 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4254 4255 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4256 4257 /* Allocate memory for new array of thresholds */ 4258 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4259 if (!new) { 4260 ret = -ENOMEM; 4261 goto unlock; 4262 } 4263 new->size = size; 4264 4265 /* Copy thresholds (if any) to new array */ 4266 if (thresholds->primary) 4267 memcpy(new->entries, thresholds->primary->entries, 4268 flex_array_size(new, entries, size - 1)); 4269 4270 /* Add new threshold */ 4271 new->entries[size - 1].eventfd = eventfd; 4272 new->entries[size - 1].threshold = threshold; 4273 4274 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4275 sort(new->entries, size, sizeof(*new->entries), 4276 compare_thresholds, NULL); 4277 4278 /* Find current threshold */ 4279 new->current_threshold = -1; 4280 for (i = 0; i < size; i++) { 4281 if (new->entries[i].threshold <= usage) { 4282 /* 4283 * new->current_threshold will not be used until 4284 * rcu_assign_pointer(), so it's safe to increment 4285 * it here. 4286 */ 4287 ++new->current_threshold; 4288 } else 4289 break; 4290 } 4291 4292 /* Free old spare buffer and save old primary buffer as spare */ 4293 kfree(thresholds->spare); 4294 thresholds->spare = thresholds->primary; 4295 4296 rcu_assign_pointer(thresholds->primary, new); 4297 4298 /* To be sure that nobody uses thresholds */ 4299 synchronize_rcu(); 4300 4301 unlock: 4302 mutex_unlock(&memcg->thresholds_lock); 4303 4304 return ret; 4305 } 4306 4307 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4308 struct eventfd_ctx *eventfd, const char *args) 4309 { 4310 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4311 } 4312 4313 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4314 struct eventfd_ctx *eventfd, const char *args) 4315 { 4316 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4317 } 4318 4319 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4320 struct eventfd_ctx *eventfd, enum res_type type) 4321 { 4322 struct mem_cgroup_thresholds *thresholds; 4323 struct mem_cgroup_threshold_ary *new; 4324 unsigned long usage; 4325 int i, j, size, entries; 4326 4327 mutex_lock(&memcg->thresholds_lock); 4328 4329 if (type == _MEM) { 4330 thresholds = &memcg->thresholds; 4331 usage = mem_cgroup_usage(memcg, false); 4332 } else if (type == _MEMSWAP) { 4333 thresholds = &memcg->memsw_thresholds; 4334 usage = mem_cgroup_usage(memcg, true); 4335 } else 4336 BUG(); 4337 4338 if (!thresholds->primary) 4339 goto unlock; 4340 4341 /* Check if a threshold crossed before removing */ 4342 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4343 4344 /* Calculate new number of threshold */ 4345 size = entries = 0; 4346 for (i = 0; i < thresholds->primary->size; i++) { 4347 if (thresholds->primary->entries[i].eventfd != eventfd) 4348 size++; 4349 else 4350 entries++; 4351 } 4352 4353 new = thresholds->spare; 4354 4355 /* If no items related to eventfd have been cleared, nothing to do */ 4356 if (!entries) 4357 goto unlock; 4358 4359 /* Set thresholds array to NULL if we don't have thresholds */ 4360 if (!size) { 4361 kfree(new); 4362 new = NULL; 4363 goto swap_buffers; 4364 } 4365 4366 new->size = size; 4367 4368 /* Copy thresholds and find current threshold */ 4369 new->current_threshold = -1; 4370 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4371 if (thresholds->primary->entries[i].eventfd == eventfd) 4372 continue; 4373 4374 new->entries[j] = thresholds->primary->entries[i]; 4375 if (new->entries[j].threshold <= usage) { 4376 /* 4377 * new->current_threshold will not be used 4378 * until rcu_assign_pointer(), so it's safe to increment 4379 * it here. 4380 */ 4381 ++new->current_threshold; 4382 } 4383 j++; 4384 } 4385 4386 swap_buffers: 4387 /* Swap primary and spare array */ 4388 thresholds->spare = thresholds->primary; 4389 4390 rcu_assign_pointer(thresholds->primary, new); 4391 4392 /* To be sure that nobody uses thresholds */ 4393 synchronize_rcu(); 4394 4395 /* If all events are unregistered, free the spare array */ 4396 if (!new) { 4397 kfree(thresholds->spare); 4398 thresholds->spare = NULL; 4399 } 4400 unlock: 4401 mutex_unlock(&memcg->thresholds_lock); 4402 } 4403 4404 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4405 struct eventfd_ctx *eventfd) 4406 { 4407 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4408 } 4409 4410 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4411 struct eventfd_ctx *eventfd) 4412 { 4413 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4414 } 4415 4416 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4417 struct eventfd_ctx *eventfd, const char *args) 4418 { 4419 struct mem_cgroup_eventfd_list *event; 4420 4421 event = kmalloc(sizeof(*event), GFP_KERNEL); 4422 if (!event) 4423 return -ENOMEM; 4424 4425 spin_lock(&memcg_oom_lock); 4426 4427 event->eventfd = eventfd; 4428 list_add(&event->list, &memcg->oom_notify); 4429 4430 /* already in OOM ? */ 4431 if (memcg->under_oom) 4432 eventfd_signal(eventfd, 1); 4433 spin_unlock(&memcg_oom_lock); 4434 4435 return 0; 4436 } 4437 4438 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4439 struct eventfd_ctx *eventfd) 4440 { 4441 struct mem_cgroup_eventfd_list *ev, *tmp; 4442 4443 spin_lock(&memcg_oom_lock); 4444 4445 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4446 if (ev->eventfd == eventfd) { 4447 list_del(&ev->list); 4448 kfree(ev); 4449 } 4450 } 4451 4452 spin_unlock(&memcg_oom_lock); 4453 } 4454 4455 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4456 { 4457 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4458 4459 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4460 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4461 seq_printf(sf, "oom_kill %lu\n", 4462 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4463 return 0; 4464 } 4465 4466 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4467 struct cftype *cft, u64 val) 4468 { 4469 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4470 4471 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4472 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4473 return -EINVAL; 4474 4475 memcg->oom_kill_disable = val; 4476 if (!val) 4477 memcg_oom_recover(memcg); 4478 4479 return 0; 4480 } 4481 4482 #ifdef CONFIG_CGROUP_WRITEBACK 4483 4484 #include <trace/events/writeback.h> 4485 4486 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4487 { 4488 return wb_domain_init(&memcg->cgwb_domain, gfp); 4489 } 4490 4491 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4492 { 4493 wb_domain_exit(&memcg->cgwb_domain); 4494 } 4495 4496 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4497 { 4498 wb_domain_size_changed(&memcg->cgwb_domain); 4499 } 4500 4501 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4502 { 4503 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4504 4505 if (!memcg->css.parent) 4506 return NULL; 4507 4508 return &memcg->cgwb_domain; 4509 } 4510 4511 /** 4512 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4513 * @wb: bdi_writeback in question 4514 * @pfilepages: out parameter for number of file pages 4515 * @pheadroom: out parameter for number of allocatable pages according to memcg 4516 * @pdirty: out parameter for number of dirty pages 4517 * @pwriteback: out parameter for number of pages under writeback 4518 * 4519 * Determine the numbers of file, headroom, dirty, and writeback pages in 4520 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4521 * is a bit more involved. 4522 * 4523 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4524 * headroom is calculated as the lowest headroom of itself and the 4525 * ancestors. Note that this doesn't consider the actual amount of 4526 * available memory in the system. The caller should further cap 4527 * *@pheadroom accordingly. 4528 */ 4529 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4530 unsigned long *pheadroom, unsigned long *pdirty, 4531 unsigned long *pwriteback) 4532 { 4533 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4534 struct mem_cgroup *parent; 4535 4536 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); 4537 4538 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4539 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4540 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4541 memcg_page_state(memcg, NR_ACTIVE_FILE); 4542 4543 *pheadroom = PAGE_COUNTER_MAX; 4544 while ((parent = parent_mem_cgroup(memcg))) { 4545 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4546 READ_ONCE(memcg->memory.high)); 4547 unsigned long used = page_counter_read(&memcg->memory); 4548 4549 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4550 memcg = parent; 4551 } 4552 } 4553 4554 /* 4555 * Foreign dirty flushing 4556 * 4557 * There's an inherent mismatch between memcg and writeback. The former 4558 * tracks ownership per-page while the latter per-inode. This was a 4559 * deliberate design decision because honoring per-page ownership in the 4560 * writeback path is complicated, may lead to higher CPU and IO overheads 4561 * and deemed unnecessary given that write-sharing an inode across 4562 * different cgroups isn't a common use-case. 4563 * 4564 * Combined with inode majority-writer ownership switching, this works well 4565 * enough in most cases but there are some pathological cases. For 4566 * example, let's say there are two cgroups A and B which keep writing to 4567 * different but confined parts of the same inode. B owns the inode and 4568 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4569 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4570 * triggering background writeback. A will be slowed down without a way to 4571 * make writeback of the dirty pages happen. 4572 * 4573 * Conditions like the above can lead to a cgroup getting repeatedly and 4574 * severely throttled after making some progress after each 4575 * dirty_expire_interval while the underlying IO device is almost 4576 * completely idle. 4577 * 4578 * Solving this problem completely requires matching the ownership tracking 4579 * granularities between memcg and writeback in either direction. However, 4580 * the more egregious behaviors can be avoided by simply remembering the 4581 * most recent foreign dirtying events and initiating remote flushes on 4582 * them when local writeback isn't enough to keep the memory clean enough. 4583 * 4584 * The following two functions implement such mechanism. When a foreign 4585 * page - a page whose memcg and writeback ownerships don't match - is 4586 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4587 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4588 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4589 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4590 * foreign bdi_writebacks which haven't expired. Both the numbers of 4591 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4592 * limited to MEMCG_CGWB_FRN_CNT. 4593 * 4594 * The mechanism only remembers IDs and doesn't hold any object references. 4595 * As being wrong occasionally doesn't matter, updates and accesses to the 4596 * records are lockless and racy. 4597 */ 4598 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4599 struct bdi_writeback *wb) 4600 { 4601 struct mem_cgroup *memcg = page_memcg(page); 4602 struct memcg_cgwb_frn *frn; 4603 u64 now = get_jiffies_64(); 4604 u64 oldest_at = now; 4605 int oldest = -1; 4606 int i; 4607 4608 trace_track_foreign_dirty(page, wb); 4609 4610 /* 4611 * Pick the slot to use. If there is already a slot for @wb, keep 4612 * using it. If not replace the oldest one which isn't being 4613 * written out. 4614 */ 4615 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4616 frn = &memcg->cgwb_frn[i]; 4617 if (frn->bdi_id == wb->bdi->id && 4618 frn->memcg_id == wb->memcg_css->id) 4619 break; 4620 if (time_before64(frn->at, oldest_at) && 4621 atomic_read(&frn->done.cnt) == 1) { 4622 oldest = i; 4623 oldest_at = frn->at; 4624 } 4625 } 4626 4627 if (i < MEMCG_CGWB_FRN_CNT) { 4628 /* 4629 * Re-using an existing one. Update timestamp lazily to 4630 * avoid making the cacheline hot. We want them to be 4631 * reasonably up-to-date and significantly shorter than 4632 * dirty_expire_interval as that's what expires the record. 4633 * Use the shorter of 1s and dirty_expire_interval / 8. 4634 */ 4635 unsigned long update_intv = 4636 min_t(unsigned long, HZ, 4637 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4638 4639 if (time_before64(frn->at, now - update_intv)) 4640 frn->at = now; 4641 } else if (oldest >= 0) { 4642 /* replace the oldest free one */ 4643 frn = &memcg->cgwb_frn[oldest]; 4644 frn->bdi_id = wb->bdi->id; 4645 frn->memcg_id = wb->memcg_css->id; 4646 frn->at = now; 4647 } 4648 } 4649 4650 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4651 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4652 { 4653 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4654 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4655 u64 now = jiffies_64; 4656 int i; 4657 4658 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4659 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4660 4661 /* 4662 * If the record is older than dirty_expire_interval, 4663 * writeback on it has already started. No need to kick it 4664 * off again. Also, don't start a new one if there's 4665 * already one in flight. 4666 */ 4667 if (time_after64(frn->at, now - intv) && 4668 atomic_read(&frn->done.cnt) == 1) { 4669 frn->at = 0; 4670 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4671 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4672 WB_REASON_FOREIGN_FLUSH, 4673 &frn->done); 4674 } 4675 } 4676 } 4677 4678 #else /* CONFIG_CGROUP_WRITEBACK */ 4679 4680 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4681 { 4682 return 0; 4683 } 4684 4685 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4686 { 4687 } 4688 4689 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4690 { 4691 } 4692 4693 #endif /* CONFIG_CGROUP_WRITEBACK */ 4694 4695 /* 4696 * DO NOT USE IN NEW FILES. 4697 * 4698 * "cgroup.event_control" implementation. 4699 * 4700 * This is way over-engineered. It tries to support fully configurable 4701 * events for each user. Such level of flexibility is completely 4702 * unnecessary especially in the light of the planned unified hierarchy. 4703 * 4704 * Please deprecate this and replace with something simpler if at all 4705 * possible. 4706 */ 4707 4708 /* 4709 * Unregister event and free resources. 4710 * 4711 * Gets called from workqueue. 4712 */ 4713 static void memcg_event_remove(struct work_struct *work) 4714 { 4715 struct mem_cgroup_event *event = 4716 container_of(work, struct mem_cgroup_event, remove); 4717 struct mem_cgroup *memcg = event->memcg; 4718 4719 remove_wait_queue(event->wqh, &event->wait); 4720 4721 event->unregister_event(memcg, event->eventfd); 4722 4723 /* Notify userspace the event is going away. */ 4724 eventfd_signal(event->eventfd, 1); 4725 4726 eventfd_ctx_put(event->eventfd); 4727 kfree(event); 4728 css_put(&memcg->css); 4729 } 4730 4731 /* 4732 * Gets called on EPOLLHUP on eventfd when user closes it. 4733 * 4734 * Called with wqh->lock held and interrupts disabled. 4735 */ 4736 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4737 int sync, void *key) 4738 { 4739 struct mem_cgroup_event *event = 4740 container_of(wait, struct mem_cgroup_event, wait); 4741 struct mem_cgroup *memcg = event->memcg; 4742 __poll_t flags = key_to_poll(key); 4743 4744 if (flags & EPOLLHUP) { 4745 /* 4746 * If the event has been detached at cgroup removal, we 4747 * can simply return knowing the other side will cleanup 4748 * for us. 4749 * 4750 * We can't race against event freeing since the other 4751 * side will require wqh->lock via remove_wait_queue(), 4752 * which we hold. 4753 */ 4754 spin_lock(&memcg->event_list_lock); 4755 if (!list_empty(&event->list)) { 4756 list_del_init(&event->list); 4757 /* 4758 * We are in atomic context, but cgroup_event_remove() 4759 * may sleep, so we have to call it in workqueue. 4760 */ 4761 schedule_work(&event->remove); 4762 } 4763 spin_unlock(&memcg->event_list_lock); 4764 } 4765 4766 return 0; 4767 } 4768 4769 static void memcg_event_ptable_queue_proc(struct file *file, 4770 wait_queue_head_t *wqh, poll_table *pt) 4771 { 4772 struct mem_cgroup_event *event = 4773 container_of(pt, struct mem_cgroup_event, pt); 4774 4775 event->wqh = wqh; 4776 add_wait_queue(wqh, &event->wait); 4777 } 4778 4779 /* 4780 * DO NOT USE IN NEW FILES. 4781 * 4782 * Parse input and register new cgroup event handler. 4783 * 4784 * Input must be in format '<event_fd> <control_fd> <args>'. 4785 * Interpretation of args is defined by control file implementation. 4786 */ 4787 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4788 char *buf, size_t nbytes, loff_t off) 4789 { 4790 struct cgroup_subsys_state *css = of_css(of); 4791 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4792 struct mem_cgroup_event *event; 4793 struct cgroup_subsys_state *cfile_css; 4794 unsigned int efd, cfd; 4795 struct fd efile; 4796 struct fd cfile; 4797 const char *name; 4798 char *endp; 4799 int ret; 4800 4801 buf = strstrip(buf); 4802 4803 efd = simple_strtoul(buf, &endp, 10); 4804 if (*endp != ' ') 4805 return -EINVAL; 4806 buf = endp + 1; 4807 4808 cfd = simple_strtoul(buf, &endp, 10); 4809 if ((*endp != ' ') && (*endp != '\0')) 4810 return -EINVAL; 4811 buf = endp + 1; 4812 4813 event = kzalloc(sizeof(*event), GFP_KERNEL); 4814 if (!event) 4815 return -ENOMEM; 4816 4817 event->memcg = memcg; 4818 INIT_LIST_HEAD(&event->list); 4819 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4820 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4821 INIT_WORK(&event->remove, memcg_event_remove); 4822 4823 efile = fdget(efd); 4824 if (!efile.file) { 4825 ret = -EBADF; 4826 goto out_kfree; 4827 } 4828 4829 event->eventfd = eventfd_ctx_fileget(efile.file); 4830 if (IS_ERR(event->eventfd)) { 4831 ret = PTR_ERR(event->eventfd); 4832 goto out_put_efile; 4833 } 4834 4835 cfile = fdget(cfd); 4836 if (!cfile.file) { 4837 ret = -EBADF; 4838 goto out_put_eventfd; 4839 } 4840 4841 /* the process need read permission on control file */ 4842 /* AV: shouldn't we check that it's been opened for read instead? */ 4843 ret = file_permission(cfile.file, MAY_READ); 4844 if (ret < 0) 4845 goto out_put_cfile; 4846 4847 /* 4848 * Determine the event callbacks and set them in @event. This used 4849 * to be done via struct cftype but cgroup core no longer knows 4850 * about these events. The following is crude but the whole thing 4851 * is for compatibility anyway. 4852 * 4853 * DO NOT ADD NEW FILES. 4854 */ 4855 name = cfile.file->f_path.dentry->d_name.name; 4856 4857 if (!strcmp(name, "memory.usage_in_bytes")) { 4858 event->register_event = mem_cgroup_usage_register_event; 4859 event->unregister_event = mem_cgroup_usage_unregister_event; 4860 } else if (!strcmp(name, "memory.oom_control")) { 4861 event->register_event = mem_cgroup_oom_register_event; 4862 event->unregister_event = mem_cgroup_oom_unregister_event; 4863 } else if (!strcmp(name, "memory.pressure_level")) { 4864 event->register_event = vmpressure_register_event; 4865 event->unregister_event = vmpressure_unregister_event; 4866 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4867 event->register_event = memsw_cgroup_usage_register_event; 4868 event->unregister_event = memsw_cgroup_usage_unregister_event; 4869 } else { 4870 ret = -EINVAL; 4871 goto out_put_cfile; 4872 } 4873 4874 /* 4875 * Verify @cfile should belong to @css. Also, remaining events are 4876 * automatically removed on cgroup destruction but the removal is 4877 * asynchronous, so take an extra ref on @css. 4878 */ 4879 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4880 &memory_cgrp_subsys); 4881 ret = -EINVAL; 4882 if (IS_ERR(cfile_css)) 4883 goto out_put_cfile; 4884 if (cfile_css != css) { 4885 css_put(cfile_css); 4886 goto out_put_cfile; 4887 } 4888 4889 ret = event->register_event(memcg, event->eventfd, buf); 4890 if (ret) 4891 goto out_put_css; 4892 4893 vfs_poll(efile.file, &event->pt); 4894 4895 spin_lock(&memcg->event_list_lock); 4896 list_add(&event->list, &memcg->event_list); 4897 spin_unlock(&memcg->event_list_lock); 4898 4899 fdput(cfile); 4900 fdput(efile); 4901 4902 return nbytes; 4903 4904 out_put_css: 4905 css_put(css); 4906 out_put_cfile: 4907 fdput(cfile); 4908 out_put_eventfd: 4909 eventfd_ctx_put(event->eventfd); 4910 out_put_efile: 4911 fdput(efile); 4912 out_kfree: 4913 kfree(event); 4914 4915 return ret; 4916 } 4917 4918 static struct cftype mem_cgroup_legacy_files[] = { 4919 { 4920 .name = "usage_in_bytes", 4921 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4922 .read_u64 = mem_cgroup_read_u64, 4923 }, 4924 { 4925 .name = "max_usage_in_bytes", 4926 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4927 .write = mem_cgroup_reset, 4928 .read_u64 = mem_cgroup_read_u64, 4929 }, 4930 { 4931 .name = "limit_in_bytes", 4932 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4933 .write = mem_cgroup_write, 4934 .read_u64 = mem_cgroup_read_u64, 4935 }, 4936 { 4937 .name = "soft_limit_in_bytes", 4938 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4939 .write = mem_cgroup_write, 4940 .read_u64 = mem_cgroup_read_u64, 4941 }, 4942 { 4943 .name = "failcnt", 4944 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4945 .write = mem_cgroup_reset, 4946 .read_u64 = mem_cgroup_read_u64, 4947 }, 4948 { 4949 .name = "stat", 4950 .seq_show = memcg_stat_show, 4951 }, 4952 { 4953 .name = "force_empty", 4954 .write = mem_cgroup_force_empty_write, 4955 }, 4956 { 4957 .name = "use_hierarchy", 4958 .write_u64 = mem_cgroup_hierarchy_write, 4959 .read_u64 = mem_cgroup_hierarchy_read, 4960 }, 4961 { 4962 .name = "cgroup.event_control", /* XXX: for compat */ 4963 .write = memcg_write_event_control, 4964 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4965 }, 4966 { 4967 .name = "swappiness", 4968 .read_u64 = mem_cgroup_swappiness_read, 4969 .write_u64 = mem_cgroup_swappiness_write, 4970 }, 4971 { 4972 .name = "move_charge_at_immigrate", 4973 .read_u64 = mem_cgroup_move_charge_read, 4974 .write_u64 = mem_cgroup_move_charge_write, 4975 }, 4976 { 4977 .name = "oom_control", 4978 .seq_show = mem_cgroup_oom_control_read, 4979 .write_u64 = mem_cgroup_oom_control_write, 4980 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4981 }, 4982 { 4983 .name = "pressure_level", 4984 }, 4985 #ifdef CONFIG_NUMA 4986 { 4987 .name = "numa_stat", 4988 .seq_show = memcg_numa_stat_show, 4989 }, 4990 #endif 4991 { 4992 .name = "kmem.limit_in_bytes", 4993 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4994 .write = mem_cgroup_write, 4995 .read_u64 = mem_cgroup_read_u64, 4996 }, 4997 { 4998 .name = "kmem.usage_in_bytes", 4999 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5000 .read_u64 = mem_cgroup_read_u64, 5001 }, 5002 { 5003 .name = "kmem.failcnt", 5004 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5005 .write = mem_cgroup_reset, 5006 .read_u64 = mem_cgroup_read_u64, 5007 }, 5008 { 5009 .name = "kmem.max_usage_in_bytes", 5010 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5011 .write = mem_cgroup_reset, 5012 .read_u64 = mem_cgroup_read_u64, 5013 }, 5014 #if defined(CONFIG_MEMCG_KMEM) && \ 5015 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5016 { 5017 .name = "kmem.slabinfo", 5018 .seq_show = memcg_slab_show, 5019 }, 5020 #endif 5021 { 5022 .name = "kmem.tcp.limit_in_bytes", 5023 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5024 .write = mem_cgroup_write, 5025 .read_u64 = mem_cgroup_read_u64, 5026 }, 5027 { 5028 .name = "kmem.tcp.usage_in_bytes", 5029 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5030 .read_u64 = mem_cgroup_read_u64, 5031 }, 5032 { 5033 .name = "kmem.tcp.failcnt", 5034 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5035 .write = mem_cgroup_reset, 5036 .read_u64 = mem_cgroup_read_u64, 5037 }, 5038 { 5039 .name = "kmem.tcp.max_usage_in_bytes", 5040 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5041 .write = mem_cgroup_reset, 5042 .read_u64 = mem_cgroup_read_u64, 5043 }, 5044 { }, /* terminate */ 5045 }; 5046 5047 /* 5048 * Private memory cgroup IDR 5049 * 5050 * Swap-out records and page cache shadow entries need to store memcg 5051 * references in constrained space, so we maintain an ID space that is 5052 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5053 * memory-controlled cgroups to 64k. 5054 * 5055 * However, there usually are many references to the offline CSS after 5056 * the cgroup has been destroyed, such as page cache or reclaimable 5057 * slab objects, that don't need to hang on to the ID. We want to keep 5058 * those dead CSS from occupying IDs, or we might quickly exhaust the 5059 * relatively small ID space and prevent the creation of new cgroups 5060 * even when there are much fewer than 64k cgroups - possibly none. 5061 * 5062 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5063 * be freed and recycled when it's no longer needed, which is usually 5064 * when the CSS is offlined. 5065 * 5066 * The only exception to that are records of swapped out tmpfs/shmem 5067 * pages that need to be attributed to live ancestors on swapin. But 5068 * those references are manageable from userspace. 5069 */ 5070 5071 static DEFINE_IDR(mem_cgroup_idr); 5072 5073 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5074 { 5075 if (memcg->id.id > 0) { 5076 idr_remove(&mem_cgroup_idr, memcg->id.id); 5077 memcg->id.id = 0; 5078 } 5079 } 5080 5081 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5082 unsigned int n) 5083 { 5084 refcount_add(n, &memcg->id.ref); 5085 } 5086 5087 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5088 { 5089 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5090 mem_cgroup_id_remove(memcg); 5091 5092 /* Memcg ID pins CSS */ 5093 css_put(&memcg->css); 5094 } 5095 } 5096 5097 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5098 { 5099 mem_cgroup_id_put_many(memcg, 1); 5100 } 5101 5102 /** 5103 * mem_cgroup_from_id - look up a memcg from a memcg id 5104 * @id: the memcg id to look up 5105 * 5106 * Caller must hold rcu_read_lock(). 5107 */ 5108 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5109 { 5110 WARN_ON_ONCE(!rcu_read_lock_held()); 5111 return idr_find(&mem_cgroup_idr, id); 5112 } 5113 5114 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5115 { 5116 struct mem_cgroup_per_node *pn; 5117 int tmp = node; 5118 /* 5119 * This routine is called against possible nodes. 5120 * But it's BUG to call kmalloc() against offline node. 5121 * 5122 * TODO: this routine can waste much memory for nodes which will 5123 * never be onlined. It's better to use memory hotplug callback 5124 * function. 5125 */ 5126 if (!node_state(node, N_NORMAL_MEMORY)) 5127 tmp = -1; 5128 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5129 if (!pn) 5130 return 1; 5131 5132 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5133 GFP_KERNEL_ACCOUNT); 5134 if (!pn->lruvec_stat_local) { 5135 kfree(pn); 5136 return 1; 5137 } 5138 5139 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat, 5140 GFP_KERNEL_ACCOUNT); 5141 if (!pn->lruvec_stat_cpu) { 5142 free_percpu(pn->lruvec_stat_local); 5143 kfree(pn); 5144 return 1; 5145 } 5146 5147 lruvec_init(&pn->lruvec); 5148 pn->usage_in_excess = 0; 5149 pn->on_tree = false; 5150 pn->memcg = memcg; 5151 5152 memcg->nodeinfo[node] = pn; 5153 return 0; 5154 } 5155 5156 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5157 { 5158 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5159 5160 if (!pn) 5161 return; 5162 5163 free_percpu(pn->lruvec_stat_cpu); 5164 free_percpu(pn->lruvec_stat_local); 5165 kfree(pn); 5166 } 5167 5168 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5169 { 5170 int node; 5171 5172 for_each_node(node) 5173 free_mem_cgroup_per_node_info(memcg, node); 5174 free_percpu(memcg->vmstats_percpu); 5175 kfree(memcg); 5176 } 5177 5178 static void mem_cgroup_free(struct mem_cgroup *memcg) 5179 { 5180 int cpu; 5181 5182 memcg_wb_domain_exit(memcg); 5183 /* 5184 * Flush percpu lruvec stats to guarantee the value 5185 * correctness on parent's and all ancestor levels. 5186 */ 5187 for_each_online_cpu(cpu) 5188 memcg_flush_lruvec_page_state(memcg, cpu); 5189 __mem_cgroup_free(memcg); 5190 } 5191 5192 static struct mem_cgroup *mem_cgroup_alloc(void) 5193 { 5194 struct mem_cgroup *memcg; 5195 unsigned int size; 5196 int node; 5197 int __maybe_unused i; 5198 long error = -ENOMEM; 5199 5200 size = sizeof(struct mem_cgroup); 5201 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5202 5203 memcg = kzalloc(size, GFP_KERNEL); 5204 if (!memcg) 5205 return ERR_PTR(error); 5206 5207 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5208 1, MEM_CGROUP_ID_MAX, 5209 GFP_KERNEL); 5210 if (memcg->id.id < 0) { 5211 error = memcg->id.id; 5212 goto fail; 5213 } 5214 5215 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5216 GFP_KERNEL_ACCOUNT); 5217 if (!memcg->vmstats_percpu) 5218 goto fail; 5219 5220 for_each_node(node) 5221 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5222 goto fail; 5223 5224 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5225 goto fail; 5226 5227 INIT_WORK(&memcg->high_work, high_work_func); 5228 INIT_LIST_HEAD(&memcg->oom_notify); 5229 mutex_init(&memcg->thresholds_lock); 5230 spin_lock_init(&memcg->move_lock); 5231 vmpressure_init(&memcg->vmpressure); 5232 INIT_LIST_HEAD(&memcg->event_list); 5233 spin_lock_init(&memcg->event_list_lock); 5234 memcg->socket_pressure = jiffies; 5235 #ifdef CONFIG_MEMCG_KMEM 5236 memcg->kmemcg_id = -1; 5237 INIT_LIST_HEAD(&memcg->objcg_list); 5238 #endif 5239 #ifdef CONFIG_CGROUP_WRITEBACK 5240 INIT_LIST_HEAD(&memcg->cgwb_list); 5241 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5242 memcg->cgwb_frn[i].done = 5243 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5244 #endif 5245 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5246 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5247 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5248 memcg->deferred_split_queue.split_queue_len = 0; 5249 #endif 5250 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5251 return memcg; 5252 fail: 5253 mem_cgroup_id_remove(memcg); 5254 __mem_cgroup_free(memcg); 5255 return ERR_PTR(error); 5256 } 5257 5258 static struct cgroup_subsys_state * __ref 5259 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5260 { 5261 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5262 struct mem_cgroup *memcg, *old_memcg; 5263 long error = -ENOMEM; 5264 5265 old_memcg = set_active_memcg(parent); 5266 memcg = mem_cgroup_alloc(); 5267 set_active_memcg(old_memcg); 5268 if (IS_ERR(memcg)) 5269 return ERR_CAST(memcg); 5270 5271 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5272 memcg->soft_limit = PAGE_COUNTER_MAX; 5273 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5274 if (parent) { 5275 memcg->swappiness = mem_cgroup_swappiness(parent); 5276 memcg->oom_kill_disable = parent->oom_kill_disable; 5277 5278 page_counter_init(&memcg->memory, &parent->memory); 5279 page_counter_init(&memcg->swap, &parent->swap); 5280 page_counter_init(&memcg->kmem, &parent->kmem); 5281 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5282 } else { 5283 page_counter_init(&memcg->memory, NULL); 5284 page_counter_init(&memcg->swap, NULL); 5285 page_counter_init(&memcg->kmem, NULL); 5286 page_counter_init(&memcg->tcpmem, NULL); 5287 5288 root_mem_cgroup = memcg; 5289 return &memcg->css; 5290 } 5291 5292 /* The following stuff does not apply to the root */ 5293 error = memcg_online_kmem(memcg); 5294 if (error) 5295 goto fail; 5296 5297 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5298 static_branch_inc(&memcg_sockets_enabled_key); 5299 5300 return &memcg->css; 5301 fail: 5302 mem_cgroup_id_remove(memcg); 5303 mem_cgroup_free(memcg); 5304 return ERR_PTR(error); 5305 } 5306 5307 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5308 { 5309 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5310 5311 /* 5312 * A memcg must be visible for expand_shrinker_info() 5313 * by the time the maps are allocated. So, we allocate maps 5314 * here, when for_each_mem_cgroup() can't skip it. 5315 */ 5316 if (alloc_shrinker_info(memcg)) { 5317 mem_cgroup_id_remove(memcg); 5318 return -ENOMEM; 5319 } 5320 5321 /* Online state pins memcg ID, memcg ID pins CSS */ 5322 refcount_set(&memcg->id.ref, 1); 5323 css_get(css); 5324 return 0; 5325 } 5326 5327 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5328 { 5329 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5330 struct mem_cgroup_event *event, *tmp; 5331 5332 /* 5333 * Unregister events and notify userspace. 5334 * Notify userspace about cgroup removing only after rmdir of cgroup 5335 * directory to avoid race between userspace and kernelspace. 5336 */ 5337 spin_lock(&memcg->event_list_lock); 5338 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5339 list_del_init(&event->list); 5340 schedule_work(&event->remove); 5341 } 5342 spin_unlock(&memcg->event_list_lock); 5343 5344 page_counter_set_min(&memcg->memory, 0); 5345 page_counter_set_low(&memcg->memory, 0); 5346 5347 memcg_offline_kmem(memcg); 5348 reparent_shrinker_deferred(memcg); 5349 wb_memcg_offline(memcg); 5350 5351 drain_all_stock(memcg); 5352 5353 mem_cgroup_id_put(memcg); 5354 } 5355 5356 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5357 { 5358 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5359 5360 invalidate_reclaim_iterators(memcg); 5361 } 5362 5363 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5364 { 5365 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5366 int __maybe_unused i; 5367 5368 #ifdef CONFIG_CGROUP_WRITEBACK 5369 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5370 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5371 #endif 5372 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5373 static_branch_dec(&memcg_sockets_enabled_key); 5374 5375 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5376 static_branch_dec(&memcg_sockets_enabled_key); 5377 5378 vmpressure_cleanup(&memcg->vmpressure); 5379 cancel_work_sync(&memcg->high_work); 5380 mem_cgroup_remove_from_trees(memcg); 5381 free_shrinker_info(memcg); 5382 memcg_free_kmem(memcg); 5383 mem_cgroup_free(memcg); 5384 } 5385 5386 /** 5387 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5388 * @css: the target css 5389 * 5390 * Reset the states of the mem_cgroup associated with @css. This is 5391 * invoked when the userland requests disabling on the default hierarchy 5392 * but the memcg is pinned through dependency. The memcg should stop 5393 * applying policies and should revert to the vanilla state as it may be 5394 * made visible again. 5395 * 5396 * The current implementation only resets the essential configurations. 5397 * This needs to be expanded to cover all the visible parts. 5398 */ 5399 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5400 { 5401 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5402 5403 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5404 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5405 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5406 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5407 page_counter_set_min(&memcg->memory, 0); 5408 page_counter_set_low(&memcg->memory, 0); 5409 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5410 memcg->soft_limit = PAGE_COUNTER_MAX; 5411 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5412 memcg_wb_domain_size_changed(memcg); 5413 } 5414 5415 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5416 { 5417 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5418 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5419 struct memcg_vmstats_percpu *statc; 5420 long delta, v; 5421 int i; 5422 5423 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5424 5425 for (i = 0; i < MEMCG_NR_STAT; i++) { 5426 /* 5427 * Collect the aggregated propagation counts of groups 5428 * below us. We're in a per-cpu loop here and this is 5429 * a global counter, so the first cycle will get them. 5430 */ 5431 delta = memcg->vmstats.state_pending[i]; 5432 if (delta) 5433 memcg->vmstats.state_pending[i] = 0; 5434 5435 /* Add CPU changes on this level since the last flush */ 5436 v = READ_ONCE(statc->state[i]); 5437 if (v != statc->state_prev[i]) { 5438 delta += v - statc->state_prev[i]; 5439 statc->state_prev[i] = v; 5440 } 5441 5442 if (!delta) 5443 continue; 5444 5445 /* Aggregate counts on this level and propagate upwards */ 5446 memcg->vmstats.state[i] += delta; 5447 if (parent) 5448 parent->vmstats.state_pending[i] += delta; 5449 } 5450 5451 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 5452 delta = memcg->vmstats.events_pending[i]; 5453 if (delta) 5454 memcg->vmstats.events_pending[i] = 0; 5455 5456 v = READ_ONCE(statc->events[i]); 5457 if (v != statc->events_prev[i]) { 5458 delta += v - statc->events_prev[i]; 5459 statc->events_prev[i] = v; 5460 } 5461 5462 if (!delta) 5463 continue; 5464 5465 memcg->vmstats.events[i] += delta; 5466 if (parent) 5467 parent->vmstats.events_pending[i] += delta; 5468 } 5469 } 5470 5471 #ifdef CONFIG_MMU 5472 /* Handlers for move charge at task migration. */ 5473 static int mem_cgroup_do_precharge(unsigned long count) 5474 { 5475 int ret; 5476 5477 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5478 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5479 if (!ret) { 5480 mc.precharge += count; 5481 return ret; 5482 } 5483 5484 /* Try charges one by one with reclaim, but do not retry */ 5485 while (count--) { 5486 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5487 if (ret) 5488 return ret; 5489 mc.precharge++; 5490 cond_resched(); 5491 } 5492 return 0; 5493 } 5494 5495 union mc_target { 5496 struct page *page; 5497 swp_entry_t ent; 5498 }; 5499 5500 enum mc_target_type { 5501 MC_TARGET_NONE = 0, 5502 MC_TARGET_PAGE, 5503 MC_TARGET_SWAP, 5504 MC_TARGET_DEVICE, 5505 }; 5506 5507 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5508 unsigned long addr, pte_t ptent) 5509 { 5510 struct page *page = vm_normal_page(vma, addr, ptent); 5511 5512 if (!page || !page_mapped(page)) 5513 return NULL; 5514 if (PageAnon(page)) { 5515 if (!(mc.flags & MOVE_ANON)) 5516 return NULL; 5517 } else { 5518 if (!(mc.flags & MOVE_FILE)) 5519 return NULL; 5520 } 5521 if (!get_page_unless_zero(page)) 5522 return NULL; 5523 5524 return page; 5525 } 5526 5527 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5528 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5529 pte_t ptent, swp_entry_t *entry) 5530 { 5531 struct page *page = NULL; 5532 swp_entry_t ent = pte_to_swp_entry(ptent); 5533 5534 if (!(mc.flags & MOVE_ANON)) 5535 return NULL; 5536 5537 /* 5538 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5539 * a device and because they are not accessible by CPU they are store 5540 * as special swap entry in the CPU page table. 5541 */ 5542 if (is_device_private_entry(ent)) { 5543 page = pfn_swap_entry_to_page(ent); 5544 /* 5545 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5546 * a refcount of 1 when free (unlike normal page) 5547 */ 5548 if (!page_ref_add_unless(page, 1, 1)) 5549 return NULL; 5550 return page; 5551 } 5552 5553 if (non_swap_entry(ent)) 5554 return NULL; 5555 5556 /* 5557 * Because lookup_swap_cache() updates some statistics counter, 5558 * we call find_get_page() with swapper_space directly. 5559 */ 5560 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5561 entry->val = ent.val; 5562 5563 return page; 5564 } 5565 #else 5566 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5567 pte_t ptent, swp_entry_t *entry) 5568 { 5569 return NULL; 5570 } 5571 #endif 5572 5573 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5574 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5575 { 5576 if (!vma->vm_file) /* anonymous vma */ 5577 return NULL; 5578 if (!(mc.flags & MOVE_FILE)) 5579 return NULL; 5580 5581 /* page is moved even if it's not RSS of this task(page-faulted). */ 5582 /* shmem/tmpfs may report page out on swap: account for that too. */ 5583 return find_get_incore_page(vma->vm_file->f_mapping, 5584 linear_page_index(vma, addr)); 5585 } 5586 5587 /** 5588 * mem_cgroup_move_account - move account of the page 5589 * @page: the page 5590 * @compound: charge the page as compound or small page 5591 * @from: mem_cgroup which the page is moved from. 5592 * @to: mem_cgroup which the page is moved to. @from != @to. 5593 * 5594 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5595 * 5596 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5597 * from old cgroup. 5598 */ 5599 static int mem_cgroup_move_account(struct page *page, 5600 bool compound, 5601 struct mem_cgroup *from, 5602 struct mem_cgroup *to) 5603 { 5604 struct lruvec *from_vec, *to_vec; 5605 struct pglist_data *pgdat; 5606 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5607 int ret; 5608 5609 VM_BUG_ON(from == to); 5610 VM_BUG_ON_PAGE(PageLRU(page), page); 5611 VM_BUG_ON(compound && !PageTransHuge(page)); 5612 5613 /* 5614 * Prevent mem_cgroup_migrate() from looking at 5615 * page's memory cgroup of its source page while we change it. 5616 */ 5617 ret = -EBUSY; 5618 if (!trylock_page(page)) 5619 goto out; 5620 5621 ret = -EINVAL; 5622 if (page_memcg(page) != from) 5623 goto out_unlock; 5624 5625 pgdat = page_pgdat(page); 5626 from_vec = mem_cgroup_lruvec(from, pgdat); 5627 to_vec = mem_cgroup_lruvec(to, pgdat); 5628 5629 lock_page_memcg(page); 5630 5631 if (PageAnon(page)) { 5632 if (page_mapped(page)) { 5633 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5634 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5635 if (PageTransHuge(page)) { 5636 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5637 -nr_pages); 5638 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5639 nr_pages); 5640 } 5641 } 5642 } else { 5643 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5644 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5645 5646 if (PageSwapBacked(page)) { 5647 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5648 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5649 } 5650 5651 if (page_mapped(page)) { 5652 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5653 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5654 } 5655 5656 if (PageDirty(page)) { 5657 struct address_space *mapping = page_mapping(page); 5658 5659 if (mapping_can_writeback(mapping)) { 5660 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5661 -nr_pages); 5662 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5663 nr_pages); 5664 } 5665 } 5666 } 5667 5668 if (PageWriteback(page)) { 5669 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5670 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5671 } 5672 5673 /* 5674 * All state has been migrated, let's switch to the new memcg. 5675 * 5676 * It is safe to change page's memcg here because the page 5677 * is referenced, charged, isolated, and locked: we can't race 5678 * with (un)charging, migration, LRU putback, or anything else 5679 * that would rely on a stable page's memory cgroup. 5680 * 5681 * Note that lock_page_memcg is a memcg lock, not a page lock, 5682 * to save space. As soon as we switch page's memory cgroup to a 5683 * new memcg that isn't locked, the above state can change 5684 * concurrently again. Make sure we're truly done with it. 5685 */ 5686 smp_mb(); 5687 5688 css_get(&to->css); 5689 css_put(&from->css); 5690 5691 page->memcg_data = (unsigned long)to; 5692 5693 __unlock_page_memcg(from); 5694 5695 ret = 0; 5696 5697 local_irq_disable(); 5698 mem_cgroup_charge_statistics(to, page, nr_pages); 5699 memcg_check_events(to, page); 5700 mem_cgroup_charge_statistics(from, page, -nr_pages); 5701 memcg_check_events(from, page); 5702 local_irq_enable(); 5703 out_unlock: 5704 unlock_page(page); 5705 out: 5706 return ret; 5707 } 5708 5709 /** 5710 * get_mctgt_type - get target type of moving charge 5711 * @vma: the vma the pte to be checked belongs 5712 * @addr: the address corresponding to the pte to be checked 5713 * @ptent: the pte to be checked 5714 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5715 * 5716 * Returns 5717 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5718 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5719 * move charge. if @target is not NULL, the page is stored in target->page 5720 * with extra refcnt got(Callers should handle it). 5721 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5722 * target for charge migration. if @target is not NULL, the entry is stored 5723 * in target->ent. 5724 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5725 * (so ZONE_DEVICE page and thus not on the lru). 5726 * For now we such page is charge like a regular page would be as for all 5727 * intent and purposes it is just special memory taking the place of a 5728 * regular page. 5729 * 5730 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5731 * 5732 * Called with pte lock held. 5733 */ 5734 5735 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5736 unsigned long addr, pte_t ptent, union mc_target *target) 5737 { 5738 struct page *page = NULL; 5739 enum mc_target_type ret = MC_TARGET_NONE; 5740 swp_entry_t ent = { .val = 0 }; 5741 5742 if (pte_present(ptent)) 5743 page = mc_handle_present_pte(vma, addr, ptent); 5744 else if (is_swap_pte(ptent)) 5745 page = mc_handle_swap_pte(vma, ptent, &ent); 5746 else if (pte_none(ptent)) 5747 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5748 5749 if (!page && !ent.val) 5750 return ret; 5751 if (page) { 5752 /* 5753 * Do only loose check w/o serialization. 5754 * mem_cgroup_move_account() checks the page is valid or 5755 * not under LRU exclusion. 5756 */ 5757 if (page_memcg(page) == mc.from) { 5758 ret = MC_TARGET_PAGE; 5759 if (is_device_private_page(page)) 5760 ret = MC_TARGET_DEVICE; 5761 if (target) 5762 target->page = page; 5763 } 5764 if (!ret || !target) 5765 put_page(page); 5766 } 5767 /* 5768 * There is a swap entry and a page doesn't exist or isn't charged. 5769 * But we cannot move a tail-page in a THP. 5770 */ 5771 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5772 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5773 ret = MC_TARGET_SWAP; 5774 if (target) 5775 target->ent = ent; 5776 } 5777 return ret; 5778 } 5779 5780 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5781 /* 5782 * We don't consider PMD mapped swapping or file mapped pages because THP does 5783 * not support them for now. 5784 * Caller should make sure that pmd_trans_huge(pmd) is true. 5785 */ 5786 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5787 unsigned long addr, pmd_t pmd, union mc_target *target) 5788 { 5789 struct page *page = NULL; 5790 enum mc_target_type ret = MC_TARGET_NONE; 5791 5792 if (unlikely(is_swap_pmd(pmd))) { 5793 VM_BUG_ON(thp_migration_supported() && 5794 !is_pmd_migration_entry(pmd)); 5795 return ret; 5796 } 5797 page = pmd_page(pmd); 5798 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5799 if (!(mc.flags & MOVE_ANON)) 5800 return ret; 5801 if (page_memcg(page) == mc.from) { 5802 ret = MC_TARGET_PAGE; 5803 if (target) { 5804 get_page(page); 5805 target->page = page; 5806 } 5807 } 5808 return ret; 5809 } 5810 #else 5811 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5812 unsigned long addr, pmd_t pmd, union mc_target *target) 5813 { 5814 return MC_TARGET_NONE; 5815 } 5816 #endif 5817 5818 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5819 unsigned long addr, unsigned long end, 5820 struct mm_walk *walk) 5821 { 5822 struct vm_area_struct *vma = walk->vma; 5823 pte_t *pte; 5824 spinlock_t *ptl; 5825 5826 ptl = pmd_trans_huge_lock(pmd, vma); 5827 if (ptl) { 5828 /* 5829 * Note their can not be MC_TARGET_DEVICE for now as we do not 5830 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5831 * this might change. 5832 */ 5833 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5834 mc.precharge += HPAGE_PMD_NR; 5835 spin_unlock(ptl); 5836 return 0; 5837 } 5838 5839 if (pmd_trans_unstable(pmd)) 5840 return 0; 5841 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5842 for (; addr != end; pte++, addr += PAGE_SIZE) 5843 if (get_mctgt_type(vma, addr, *pte, NULL)) 5844 mc.precharge++; /* increment precharge temporarily */ 5845 pte_unmap_unlock(pte - 1, ptl); 5846 cond_resched(); 5847 5848 return 0; 5849 } 5850 5851 static const struct mm_walk_ops precharge_walk_ops = { 5852 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5853 }; 5854 5855 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5856 { 5857 unsigned long precharge; 5858 5859 mmap_read_lock(mm); 5860 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5861 mmap_read_unlock(mm); 5862 5863 precharge = mc.precharge; 5864 mc.precharge = 0; 5865 5866 return precharge; 5867 } 5868 5869 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5870 { 5871 unsigned long precharge = mem_cgroup_count_precharge(mm); 5872 5873 VM_BUG_ON(mc.moving_task); 5874 mc.moving_task = current; 5875 return mem_cgroup_do_precharge(precharge); 5876 } 5877 5878 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5879 static void __mem_cgroup_clear_mc(void) 5880 { 5881 struct mem_cgroup *from = mc.from; 5882 struct mem_cgroup *to = mc.to; 5883 5884 /* we must uncharge all the leftover precharges from mc.to */ 5885 if (mc.precharge) { 5886 cancel_charge(mc.to, mc.precharge); 5887 mc.precharge = 0; 5888 } 5889 /* 5890 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5891 * we must uncharge here. 5892 */ 5893 if (mc.moved_charge) { 5894 cancel_charge(mc.from, mc.moved_charge); 5895 mc.moved_charge = 0; 5896 } 5897 /* we must fixup refcnts and charges */ 5898 if (mc.moved_swap) { 5899 /* uncharge swap account from the old cgroup */ 5900 if (!mem_cgroup_is_root(mc.from)) 5901 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5902 5903 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5904 5905 /* 5906 * we charged both to->memory and to->memsw, so we 5907 * should uncharge to->memory. 5908 */ 5909 if (!mem_cgroup_is_root(mc.to)) 5910 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5911 5912 mc.moved_swap = 0; 5913 } 5914 memcg_oom_recover(from); 5915 memcg_oom_recover(to); 5916 wake_up_all(&mc.waitq); 5917 } 5918 5919 static void mem_cgroup_clear_mc(void) 5920 { 5921 struct mm_struct *mm = mc.mm; 5922 5923 /* 5924 * we must clear moving_task before waking up waiters at the end of 5925 * task migration. 5926 */ 5927 mc.moving_task = NULL; 5928 __mem_cgroup_clear_mc(); 5929 spin_lock(&mc.lock); 5930 mc.from = NULL; 5931 mc.to = NULL; 5932 mc.mm = NULL; 5933 spin_unlock(&mc.lock); 5934 5935 mmput(mm); 5936 } 5937 5938 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5939 { 5940 struct cgroup_subsys_state *css; 5941 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5942 struct mem_cgroup *from; 5943 struct task_struct *leader, *p; 5944 struct mm_struct *mm; 5945 unsigned long move_flags; 5946 int ret = 0; 5947 5948 /* charge immigration isn't supported on the default hierarchy */ 5949 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5950 return 0; 5951 5952 /* 5953 * Multi-process migrations only happen on the default hierarchy 5954 * where charge immigration is not used. Perform charge 5955 * immigration if @tset contains a leader and whine if there are 5956 * multiple. 5957 */ 5958 p = NULL; 5959 cgroup_taskset_for_each_leader(leader, css, tset) { 5960 WARN_ON_ONCE(p); 5961 p = leader; 5962 memcg = mem_cgroup_from_css(css); 5963 } 5964 if (!p) 5965 return 0; 5966 5967 /* 5968 * We are now committed to this value whatever it is. Changes in this 5969 * tunable will only affect upcoming migrations, not the current one. 5970 * So we need to save it, and keep it going. 5971 */ 5972 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5973 if (!move_flags) 5974 return 0; 5975 5976 from = mem_cgroup_from_task(p); 5977 5978 VM_BUG_ON(from == memcg); 5979 5980 mm = get_task_mm(p); 5981 if (!mm) 5982 return 0; 5983 /* We move charges only when we move a owner of the mm */ 5984 if (mm->owner == p) { 5985 VM_BUG_ON(mc.from); 5986 VM_BUG_ON(mc.to); 5987 VM_BUG_ON(mc.precharge); 5988 VM_BUG_ON(mc.moved_charge); 5989 VM_BUG_ON(mc.moved_swap); 5990 5991 spin_lock(&mc.lock); 5992 mc.mm = mm; 5993 mc.from = from; 5994 mc.to = memcg; 5995 mc.flags = move_flags; 5996 spin_unlock(&mc.lock); 5997 /* We set mc.moving_task later */ 5998 5999 ret = mem_cgroup_precharge_mc(mm); 6000 if (ret) 6001 mem_cgroup_clear_mc(); 6002 } else { 6003 mmput(mm); 6004 } 6005 return ret; 6006 } 6007 6008 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6009 { 6010 if (mc.to) 6011 mem_cgroup_clear_mc(); 6012 } 6013 6014 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6015 unsigned long addr, unsigned long end, 6016 struct mm_walk *walk) 6017 { 6018 int ret = 0; 6019 struct vm_area_struct *vma = walk->vma; 6020 pte_t *pte; 6021 spinlock_t *ptl; 6022 enum mc_target_type target_type; 6023 union mc_target target; 6024 struct page *page; 6025 6026 ptl = pmd_trans_huge_lock(pmd, vma); 6027 if (ptl) { 6028 if (mc.precharge < HPAGE_PMD_NR) { 6029 spin_unlock(ptl); 6030 return 0; 6031 } 6032 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6033 if (target_type == MC_TARGET_PAGE) { 6034 page = target.page; 6035 if (!isolate_lru_page(page)) { 6036 if (!mem_cgroup_move_account(page, true, 6037 mc.from, mc.to)) { 6038 mc.precharge -= HPAGE_PMD_NR; 6039 mc.moved_charge += HPAGE_PMD_NR; 6040 } 6041 putback_lru_page(page); 6042 } 6043 put_page(page); 6044 } else if (target_type == MC_TARGET_DEVICE) { 6045 page = target.page; 6046 if (!mem_cgroup_move_account(page, true, 6047 mc.from, mc.to)) { 6048 mc.precharge -= HPAGE_PMD_NR; 6049 mc.moved_charge += HPAGE_PMD_NR; 6050 } 6051 put_page(page); 6052 } 6053 spin_unlock(ptl); 6054 return 0; 6055 } 6056 6057 if (pmd_trans_unstable(pmd)) 6058 return 0; 6059 retry: 6060 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6061 for (; addr != end; addr += PAGE_SIZE) { 6062 pte_t ptent = *(pte++); 6063 bool device = false; 6064 swp_entry_t ent; 6065 6066 if (!mc.precharge) 6067 break; 6068 6069 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6070 case MC_TARGET_DEVICE: 6071 device = true; 6072 fallthrough; 6073 case MC_TARGET_PAGE: 6074 page = target.page; 6075 /* 6076 * We can have a part of the split pmd here. Moving it 6077 * can be done but it would be too convoluted so simply 6078 * ignore such a partial THP and keep it in original 6079 * memcg. There should be somebody mapping the head. 6080 */ 6081 if (PageTransCompound(page)) 6082 goto put; 6083 if (!device && isolate_lru_page(page)) 6084 goto put; 6085 if (!mem_cgroup_move_account(page, false, 6086 mc.from, mc.to)) { 6087 mc.precharge--; 6088 /* we uncharge from mc.from later. */ 6089 mc.moved_charge++; 6090 } 6091 if (!device) 6092 putback_lru_page(page); 6093 put: /* get_mctgt_type() gets the page */ 6094 put_page(page); 6095 break; 6096 case MC_TARGET_SWAP: 6097 ent = target.ent; 6098 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6099 mc.precharge--; 6100 mem_cgroup_id_get_many(mc.to, 1); 6101 /* we fixup other refcnts and charges later. */ 6102 mc.moved_swap++; 6103 } 6104 break; 6105 default: 6106 break; 6107 } 6108 } 6109 pte_unmap_unlock(pte - 1, ptl); 6110 cond_resched(); 6111 6112 if (addr != end) { 6113 /* 6114 * We have consumed all precharges we got in can_attach(). 6115 * We try charge one by one, but don't do any additional 6116 * charges to mc.to if we have failed in charge once in attach() 6117 * phase. 6118 */ 6119 ret = mem_cgroup_do_precharge(1); 6120 if (!ret) 6121 goto retry; 6122 } 6123 6124 return ret; 6125 } 6126 6127 static const struct mm_walk_ops charge_walk_ops = { 6128 .pmd_entry = mem_cgroup_move_charge_pte_range, 6129 }; 6130 6131 static void mem_cgroup_move_charge(void) 6132 { 6133 lru_add_drain_all(); 6134 /* 6135 * Signal lock_page_memcg() to take the memcg's move_lock 6136 * while we're moving its pages to another memcg. Then wait 6137 * for already started RCU-only updates to finish. 6138 */ 6139 atomic_inc(&mc.from->moving_account); 6140 synchronize_rcu(); 6141 retry: 6142 if (unlikely(!mmap_read_trylock(mc.mm))) { 6143 /* 6144 * Someone who are holding the mmap_lock might be waiting in 6145 * waitq. So we cancel all extra charges, wake up all waiters, 6146 * and retry. Because we cancel precharges, we might not be able 6147 * to move enough charges, but moving charge is a best-effort 6148 * feature anyway, so it wouldn't be a big problem. 6149 */ 6150 __mem_cgroup_clear_mc(); 6151 cond_resched(); 6152 goto retry; 6153 } 6154 /* 6155 * When we have consumed all precharges and failed in doing 6156 * additional charge, the page walk just aborts. 6157 */ 6158 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6159 NULL); 6160 6161 mmap_read_unlock(mc.mm); 6162 atomic_dec(&mc.from->moving_account); 6163 } 6164 6165 static void mem_cgroup_move_task(void) 6166 { 6167 if (mc.to) { 6168 mem_cgroup_move_charge(); 6169 mem_cgroup_clear_mc(); 6170 } 6171 } 6172 #else /* !CONFIG_MMU */ 6173 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6174 { 6175 return 0; 6176 } 6177 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6178 { 6179 } 6180 static void mem_cgroup_move_task(void) 6181 { 6182 } 6183 #endif 6184 6185 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6186 { 6187 if (value == PAGE_COUNTER_MAX) 6188 seq_puts(m, "max\n"); 6189 else 6190 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6191 6192 return 0; 6193 } 6194 6195 static u64 memory_current_read(struct cgroup_subsys_state *css, 6196 struct cftype *cft) 6197 { 6198 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6199 6200 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6201 } 6202 6203 static int memory_min_show(struct seq_file *m, void *v) 6204 { 6205 return seq_puts_memcg_tunable(m, 6206 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6207 } 6208 6209 static ssize_t memory_min_write(struct kernfs_open_file *of, 6210 char *buf, size_t nbytes, loff_t off) 6211 { 6212 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6213 unsigned long min; 6214 int err; 6215 6216 buf = strstrip(buf); 6217 err = page_counter_memparse(buf, "max", &min); 6218 if (err) 6219 return err; 6220 6221 page_counter_set_min(&memcg->memory, min); 6222 6223 return nbytes; 6224 } 6225 6226 static int memory_low_show(struct seq_file *m, void *v) 6227 { 6228 return seq_puts_memcg_tunable(m, 6229 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6230 } 6231 6232 static ssize_t memory_low_write(struct kernfs_open_file *of, 6233 char *buf, size_t nbytes, loff_t off) 6234 { 6235 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6236 unsigned long low; 6237 int err; 6238 6239 buf = strstrip(buf); 6240 err = page_counter_memparse(buf, "max", &low); 6241 if (err) 6242 return err; 6243 6244 page_counter_set_low(&memcg->memory, low); 6245 6246 return nbytes; 6247 } 6248 6249 static int memory_high_show(struct seq_file *m, void *v) 6250 { 6251 return seq_puts_memcg_tunable(m, 6252 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6253 } 6254 6255 static ssize_t memory_high_write(struct kernfs_open_file *of, 6256 char *buf, size_t nbytes, loff_t off) 6257 { 6258 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6259 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6260 bool drained = false; 6261 unsigned long high; 6262 int err; 6263 6264 buf = strstrip(buf); 6265 err = page_counter_memparse(buf, "max", &high); 6266 if (err) 6267 return err; 6268 6269 page_counter_set_high(&memcg->memory, high); 6270 6271 for (;;) { 6272 unsigned long nr_pages = page_counter_read(&memcg->memory); 6273 unsigned long reclaimed; 6274 6275 if (nr_pages <= high) 6276 break; 6277 6278 if (signal_pending(current)) 6279 break; 6280 6281 if (!drained) { 6282 drain_all_stock(memcg); 6283 drained = true; 6284 continue; 6285 } 6286 6287 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6288 GFP_KERNEL, true); 6289 6290 if (!reclaimed && !nr_retries--) 6291 break; 6292 } 6293 6294 memcg_wb_domain_size_changed(memcg); 6295 return nbytes; 6296 } 6297 6298 static int memory_max_show(struct seq_file *m, void *v) 6299 { 6300 return seq_puts_memcg_tunable(m, 6301 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6302 } 6303 6304 static ssize_t memory_max_write(struct kernfs_open_file *of, 6305 char *buf, size_t nbytes, loff_t off) 6306 { 6307 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6308 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6309 bool drained = false; 6310 unsigned long max; 6311 int err; 6312 6313 buf = strstrip(buf); 6314 err = page_counter_memparse(buf, "max", &max); 6315 if (err) 6316 return err; 6317 6318 xchg(&memcg->memory.max, max); 6319 6320 for (;;) { 6321 unsigned long nr_pages = page_counter_read(&memcg->memory); 6322 6323 if (nr_pages <= max) 6324 break; 6325 6326 if (signal_pending(current)) 6327 break; 6328 6329 if (!drained) { 6330 drain_all_stock(memcg); 6331 drained = true; 6332 continue; 6333 } 6334 6335 if (nr_reclaims) { 6336 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6337 GFP_KERNEL, true)) 6338 nr_reclaims--; 6339 continue; 6340 } 6341 6342 memcg_memory_event(memcg, MEMCG_OOM); 6343 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6344 break; 6345 } 6346 6347 memcg_wb_domain_size_changed(memcg); 6348 return nbytes; 6349 } 6350 6351 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6352 { 6353 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6354 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6355 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6356 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6357 seq_printf(m, "oom_kill %lu\n", 6358 atomic_long_read(&events[MEMCG_OOM_KILL])); 6359 } 6360 6361 static int memory_events_show(struct seq_file *m, void *v) 6362 { 6363 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6364 6365 __memory_events_show(m, memcg->memory_events); 6366 return 0; 6367 } 6368 6369 static int memory_events_local_show(struct seq_file *m, void *v) 6370 { 6371 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6372 6373 __memory_events_show(m, memcg->memory_events_local); 6374 return 0; 6375 } 6376 6377 static int memory_stat_show(struct seq_file *m, void *v) 6378 { 6379 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6380 char *buf; 6381 6382 buf = memory_stat_format(memcg); 6383 if (!buf) 6384 return -ENOMEM; 6385 seq_puts(m, buf); 6386 kfree(buf); 6387 return 0; 6388 } 6389 6390 #ifdef CONFIG_NUMA 6391 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6392 int item) 6393 { 6394 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6395 } 6396 6397 static int memory_numa_stat_show(struct seq_file *m, void *v) 6398 { 6399 int i; 6400 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6401 6402 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6403 int nid; 6404 6405 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6406 continue; 6407 6408 seq_printf(m, "%s", memory_stats[i].name); 6409 for_each_node_state(nid, N_MEMORY) { 6410 u64 size; 6411 struct lruvec *lruvec; 6412 6413 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6414 size = lruvec_page_state_output(lruvec, 6415 memory_stats[i].idx); 6416 seq_printf(m, " N%d=%llu", nid, size); 6417 } 6418 seq_putc(m, '\n'); 6419 } 6420 6421 return 0; 6422 } 6423 #endif 6424 6425 static int memory_oom_group_show(struct seq_file *m, void *v) 6426 { 6427 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6428 6429 seq_printf(m, "%d\n", memcg->oom_group); 6430 6431 return 0; 6432 } 6433 6434 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6435 char *buf, size_t nbytes, loff_t off) 6436 { 6437 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6438 int ret, oom_group; 6439 6440 buf = strstrip(buf); 6441 if (!buf) 6442 return -EINVAL; 6443 6444 ret = kstrtoint(buf, 0, &oom_group); 6445 if (ret) 6446 return ret; 6447 6448 if (oom_group != 0 && oom_group != 1) 6449 return -EINVAL; 6450 6451 memcg->oom_group = oom_group; 6452 6453 return nbytes; 6454 } 6455 6456 static struct cftype memory_files[] = { 6457 { 6458 .name = "current", 6459 .flags = CFTYPE_NOT_ON_ROOT, 6460 .read_u64 = memory_current_read, 6461 }, 6462 { 6463 .name = "min", 6464 .flags = CFTYPE_NOT_ON_ROOT, 6465 .seq_show = memory_min_show, 6466 .write = memory_min_write, 6467 }, 6468 { 6469 .name = "low", 6470 .flags = CFTYPE_NOT_ON_ROOT, 6471 .seq_show = memory_low_show, 6472 .write = memory_low_write, 6473 }, 6474 { 6475 .name = "high", 6476 .flags = CFTYPE_NOT_ON_ROOT, 6477 .seq_show = memory_high_show, 6478 .write = memory_high_write, 6479 }, 6480 { 6481 .name = "max", 6482 .flags = CFTYPE_NOT_ON_ROOT, 6483 .seq_show = memory_max_show, 6484 .write = memory_max_write, 6485 }, 6486 { 6487 .name = "events", 6488 .flags = CFTYPE_NOT_ON_ROOT, 6489 .file_offset = offsetof(struct mem_cgroup, events_file), 6490 .seq_show = memory_events_show, 6491 }, 6492 { 6493 .name = "events.local", 6494 .flags = CFTYPE_NOT_ON_ROOT, 6495 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6496 .seq_show = memory_events_local_show, 6497 }, 6498 { 6499 .name = "stat", 6500 .seq_show = memory_stat_show, 6501 }, 6502 #ifdef CONFIG_NUMA 6503 { 6504 .name = "numa_stat", 6505 .seq_show = memory_numa_stat_show, 6506 }, 6507 #endif 6508 { 6509 .name = "oom.group", 6510 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6511 .seq_show = memory_oom_group_show, 6512 .write = memory_oom_group_write, 6513 }, 6514 { } /* terminate */ 6515 }; 6516 6517 struct cgroup_subsys memory_cgrp_subsys = { 6518 .css_alloc = mem_cgroup_css_alloc, 6519 .css_online = mem_cgroup_css_online, 6520 .css_offline = mem_cgroup_css_offline, 6521 .css_released = mem_cgroup_css_released, 6522 .css_free = mem_cgroup_css_free, 6523 .css_reset = mem_cgroup_css_reset, 6524 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6525 .can_attach = mem_cgroup_can_attach, 6526 .cancel_attach = mem_cgroup_cancel_attach, 6527 .post_attach = mem_cgroup_move_task, 6528 .dfl_cftypes = memory_files, 6529 .legacy_cftypes = mem_cgroup_legacy_files, 6530 .early_init = 0, 6531 }; 6532 6533 /* 6534 * This function calculates an individual cgroup's effective 6535 * protection which is derived from its own memory.min/low, its 6536 * parent's and siblings' settings, as well as the actual memory 6537 * distribution in the tree. 6538 * 6539 * The following rules apply to the effective protection values: 6540 * 6541 * 1. At the first level of reclaim, effective protection is equal to 6542 * the declared protection in memory.min and memory.low. 6543 * 6544 * 2. To enable safe delegation of the protection configuration, at 6545 * subsequent levels the effective protection is capped to the 6546 * parent's effective protection. 6547 * 6548 * 3. To make complex and dynamic subtrees easier to configure, the 6549 * user is allowed to overcommit the declared protection at a given 6550 * level. If that is the case, the parent's effective protection is 6551 * distributed to the children in proportion to how much protection 6552 * they have declared and how much of it they are utilizing. 6553 * 6554 * This makes distribution proportional, but also work-conserving: 6555 * if one cgroup claims much more protection than it uses memory, 6556 * the unused remainder is available to its siblings. 6557 * 6558 * 4. Conversely, when the declared protection is undercommitted at a 6559 * given level, the distribution of the larger parental protection 6560 * budget is NOT proportional. A cgroup's protection from a sibling 6561 * is capped to its own memory.min/low setting. 6562 * 6563 * 5. However, to allow protecting recursive subtrees from each other 6564 * without having to declare each individual cgroup's fixed share 6565 * of the ancestor's claim to protection, any unutilized - 6566 * "floating" - protection from up the tree is distributed in 6567 * proportion to each cgroup's *usage*. This makes the protection 6568 * neutral wrt sibling cgroups and lets them compete freely over 6569 * the shared parental protection budget, but it protects the 6570 * subtree as a whole from neighboring subtrees. 6571 * 6572 * Note that 4. and 5. are not in conflict: 4. is about protecting 6573 * against immediate siblings whereas 5. is about protecting against 6574 * neighboring subtrees. 6575 */ 6576 static unsigned long effective_protection(unsigned long usage, 6577 unsigned long parent_usage, 6578 unsigned long setting, 6579 unsigned long parent_effective, 6580 unsigned long siblings_protected) 6581 { 6582 unsigned long protected; 6583 unsigned long ep; 6584 6585 protected = min(usage, setting); 6586 /* 6587 * If all cgroups at this level combined claim and use more 6588 * protection then what the parent affords them, distribute 6589 * shares in proportion to utilization. 6590 * 6591 * We are using actual utilization rather than the statically 6592 * claimed protection in order to be work-conserving: claimed 6593 * but unused protection is available to siblings that would 6594 * otherwise get a smaller chunk than what they claimed. 6595 */ 6596 if (siblings_protected > parent_effective) 6597 return protected * parent_effective / siblings_protected; 6598 6599 /* 6600 * Ok, utilized protection of all children is within what the 6601 * parent affords them, so we know whatever this child claims 6602 * and utilizes is effectively protected. 6603 * 6604 * If there is unprotected usage beyond this value, reclaim 6605 * will apply pressure in proportion to that amount. 6606 * 6607 * If there is unutilized protection, the cgroup will be fully 6608 * shielded from reclaim, but we do return a smaller value for 6609 * protection than what the group could enjoy in theory. This 6610 * is okay. With the overcommit distribution above, effective 6611 * protection is always dependent on how memory is actually 6612 * consumed among the siblings anyway. 6613 */ 6614 ep = protected; 6615 6616 /* 6617 * If the children aren't claiming (all of) the protection 6618 * afforded to them by the parent, distribute the remainder in 6619 * proportion to the (unprotected) memory of each cgroup. That 6620 * way, cgroups that aren't explicitly prioritized wrt each 6621 * other compete freely over the allowance, but they are 6622 * collectively protected from neighboring trees. 6623 * 6624 * We're using unprotected memory for the weight so that if 6625 * some cgroups DO claim explicit protection, we don't protect 6626 * the same bytes twice. 6627 * 6628 * Check both usage and parent_usage against the respective 6629 * protected values. One should imply the other, but they 6630 * aren't read atomically - make sure the division is sane. 6631 */ 6632 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6633 return ep; 6634 if (parent_effective > siblings_protected && 6635 parent_usage > siblings_protected && 6636 usage > protected) { 6637 unsigned long unclaimed; 6638 6639 unclaimed = parent_effective - siblings_protected; 6640 unclaimed *= usage - protected; 6641 unclaimed /= parent_usage - siblings_protected; 6642 6643 ep += unclaimed; 6644 } 6645 6646 return ep; 6647 } 6648 6649 /** 6650 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 6651 * @root: the top ancestor of the sub-tree being checked 6652 * @memcg: the memory cgroup to check 6653 * 6654 * WARNING: This function is not stateless! It can only be used as part 6655 * of a top-down tree iteration, not for isolated queries. 6656 */ 6657 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6658 struct mem_cgroup *memcg) 6659 { 6660 unsigned long usage, parent_usage; 6661 struct mem_cgroup *parent; 6662 6663 if (mem_cgroup_disabled()) 6664 return; 6665 6666 if (!root) 6667 root = root_mem_cgroup; 6668 6669 /* 6670 * Effective values of the reclaim targets are ignored so they 6671 * can be stale. Have a look at mem_cgroup_protection for more 6672 * details. 6673 * TODO: calculation should be more robust so that we do not need 6674 * that special casing. 6675 */ 6676 if (memcg == root) 6677 return; 6678 6679 usage = page_counter_read(&memcg->memory); 6680 if (!usage) 6681 return; 6682 6683 parent = parent_mem_cgroup(memcg); 6684 /* No parent means a non-hierarchical mode on v1 memcg */ 6685 if (!parent) 6686 return; 6687 6688 if (parent == root) { 6689 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6690 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6691 return; 6692 } 6693 6694 parent_usage = page_counter_read(&parent->memory); 6695 6696 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6697 READ_ONCE(memcg->memory.min), 6698 READ_ONCE(parent->memory.emin), 6699 atomic_long_read(&parent->memory.children_min_usage))); 6700 6701 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6702 READ_ONCE(memcg->memory.low), 6703 READ_ONCE(parent->memory.elow), 6704 atomic_long_read(&parent->memory.children_low_usage))); 6705 } 6706 6707 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg, 6708 gfp_t gfp) 6709 { 6710 unsigned int nr_pages = thp_nr_pages(page); 6711 int ret; 6712 6713 ret = try_charge(memcg, gfp, nr_pages); 6714 if (ret) 6715 goto out; 6716 6717 css_get(&memcg->css); 6718 commit_charge(page, memcg); 6719 6720 local_irq_disable(); 6721 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6722 memcg_check_events(memcg, page); 6723 local_irq_enable(); 6724 out: 6725 return ret; 6726 } 6727 6728 /** 6729 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6730 * @page: page to charge 6731 * @mm: mm context of the victim 6732 * @gfp_mask: reclaim mode 6733 * 6734 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6735 * pages according to @gfp_mask if necessary. if @mm is NULL, try to 6736 * charge to the active memcg. 6737 * 6738 * Do not use this for pages allocated for swapin. 6739 * 6740 * Returns 0 on success. Otherwise, an error code is returned. 6741 */ 6742 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6743 { 6744 struct mem_cgroup *memcg; 6745 int ret; 6746 6747 if (mem_cgroup_disabled()) 6748 return 0; 6749 6750 memcg = get_mem_cgroup_from_mm(mm); 6751 ret = __mem_cgroup_charge(page, memcg, gfp_mask); 6752 css_put(&memcg->css); 6753 6754 return ret; 6755 } 6756 6757 /** 6758 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin 6759 * @page: page to charge 6760 * @mm: mm context of the victim 6761 * @gfp: reclaim mode 6762 * @entry: swap entry for which the page is allocated 6763 * 6764 * This function charges a page allocated for swapin. Please call this before 6765 * adding the page to the swapcache. 6766 * 6767 * Returns 0 on success. Otherwise, an error code is returned. 6768 */ 6769 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 6770 gfp_t gfp, swp_entry_t entry) 6771 { 6772 struct mem_cgroup *memcg; 6773 unsigned short id; 6774 int ret; 6775 6776 if (mem_cgroup_disabled()) 6777 return 0; 6778 6779 id = lookup_swap_cgroup_id(entry); 6780 rcu_read_lock(); 6781 memcg = mem_cgroup_from_id(id); 6782 if (!memcg || !css_tryget_online(&memcg->css)) 6783 memcg = get_mem_cgroup_from_mm(mm); 6784 rcu_read_unlock(); 6785 6786 ret = __mem_cgroup_charge(page, memcg, gfp); 6787 6788 css_put(&memcg->css); 6789 return ret; 6790 } 6791 6792 /* 6793 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 6794 * @entry: swap entry for which the page is charged 6795 * 6796 * Call this function after successfully adding the charged page to swapcache. 6797 * 6798 * Note: This function assumes the page for which swap slot is being uncharged 6799 * is order 0 page. 6800 */ 6801 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 6802 { 6803 /* 6804 * Cgroup1's unified memory+swap counter has been charged with the 6805 * new swapcache page, finish the transfer by uncharging the swap 6806 * slot. The swap slot would also get uncharged when it dies, but 6807 * it can stick around indefinitely and we'd count the page twice 6808 * the entire time. 6809 * 6810 * Cgroup2 has separate resource counters for memory and swap, 6811 * so this is a non-issue here. Memory and swap charge lifetimes 6812 * correspond 1:1 to page and swap slot lifetimes: we charge the 6813 * page to memory here, and uncharge swap when the slot is freed. 6814 */ 6815 if (!mem_cgroup_disabled() && do_memsw_account()) { 6816 /* 6817 * The swap entry might not get freed for a long time, 6818 * let's not wait for it. The page already received a 6819 * memory+swap charge, drop the swap entry duplicate. 6820 */ 6821 mem_cgroup_uncharge_swap(entry, 1); 6822 } 6823 } 6824 6825 struct uncharge_gather { 6826 struct mem_cgroup *memcg; 6827 unsigned long nr_memory; 6828 unsigned long pgpgout; 6829 unsigned long nr_kmem; 6830 struct page *dummy_page; 6831 }; 6832 6833 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6834 { 6835 memset(ug, 0, sizeof(*ug)); 6836 } 6837 6838 static void uncharge_batch(const struct uncharge_gather *ug) 6839 { 6840 unsigned long flags; 6841 6842 if (ug->nr_memory) { 6843 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 6844 if (do_memsw_account()) 6845 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 6846 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6847 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6848 memcg_oom_recover(ug->memcg); 6849 } 6850 6851 local_irq_save(flags); 6852 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6853 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 6854 memcg_check_events(ug->memcg, ug->dummy_page); 6855 local_irq_restore(flags); 6856 6857 /* drop reference from uncharge_page */ 6858 css_put(&ug->memcg->css); 6859 } 6860 6861 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6862 { 6863 unsigned long nr_pages; 6864 struct mem_cgroup *memcg; 6865 struct obj_cgroup *objcg; 6866 bool use_objcg = PageMemcgKmem(page); 6867 6868 VM_BUG_ON_PAGE(PageLRU(page), page); 6869 6870 /* 6871 * Nobody should be changing or seriously looking at 6872 * page memcg or objcg at this point, we have fully 6873 * exclusive access to the page. 6874 */ 6875 if (use_objcg) { 6876 objcg = __page_objcg(page); 6877 /* 6878 * This get matches the put at the end of the function and 6879 * kmem pages do not hold memcg references anymore. 6880 */ 6881 memcg = get_mem_cgroup_from_objcg(objcg); 6882 } else { 6883 memcg = __page_memcg(page); 6884 } 6885 6886 if (!memcg) 6887 return; 6888 6889 if (ug->memcg != memcg) { 6890 if (ug->memcg) { 6891 uncharge_batch(ug); 6892 uncharge_gather_clear(ug); 6893 } 6894 ug->memcg = memcg; 6895 ug->dummy_page = page; 6896 6897 /* pairs with css_put in uncharge_batch */ 6898 css_get(&memcg->css); 6899 } 6900 6901 nr_pages = compound_nr(page); 6902 6903 if (use_objcg) { 6904 ug->nr_memory += nr_pages; 6905 ug->nr_kmem += nr_pages; 6906 6907 page->memcg_data = 0; 6908 obj_cgroup_put(objcg); 6909 } else { 6910 /* LRU pages aren't accounted at the root level */ 6911 if (!mem_cgroup_is_root(memcg)) 6912 ug->nr_memory += nr_pages; 6913 ug->pgpgout++; 6914 6915 page->memcg_data = 0; 6916 } 6917 6918 css_put(&memcg->css); 6919 } 6920 6921 /** 6922 * mem_cgroup_uncharge - uncharge a page 6923 * @page: page to uncharge 6924 * 6925 * Uncharge a page previously charged with mem_cgroup_charge(). 6926 */ 6927 void mem_cgroup_uncharge(struct page *page) 6928 { 6929 struct uncharge_gather ug; 6930 6931 if (mem_cgroup_disabled()) 6932 return; 6933 6934 /* Don't touch page->lru of any random page, pre-check: */ 6935 if (!page_memcg(page)) 6936 return; 6937 6938 uncharge_gather_clear(&ug); 6939 uncharge_page(page, &ug); 6940 uncharge_batch(&ug); 6941 } 6942 6943 /** 6944 * mem_cgroup_uncharge_list - uncharge a list of page 6945 * @page_list: list of pages to uncharge 6946 * 6947 * Uncharge a list of pages previously charged with 6948 * mem_cgroup_charge(). 6949 */ 6950 void mem_cgroup_uncharge_list(struct list_head *page_list) 6951 { 6952 struct uncharge_gather ug; 6953 struct page *page; 6954 6955 if (mem_cgroup_disabled()) 6956 return; 6957 6958 uncharge_gather_clear(&ug); 6959 list_for_each_entry(page, page_list, lru) 6960 uncharge_page(page, &ug); 6961 if (ug.memcg) 6962 uncharge_batch(&ug); 6963 } 6964 6965 /** 6966 * mem_cgroup_migrate - charge a page's replacement 6967 * @oldpage: currently circulating page 6968 * @newpage: replacement page 6969 * 6970 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6971 * be uncharged upon free. 6972 * 6973 * Both pages must be locked, @newpage->mapping must be set up. 6974 */ 6975 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6976 { 6977 struct mem_cgroup *memcg; 6978 unsigned int nr_pages; 6979 unsigned long flags; 6980 6981 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6982 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6983 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6984 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6985 newpage); 6986 6987 if (mem_cgroup_disabled()) 6988 return; 6989 6990 /* Page cache replacement: new page already charged? */ 6991 if (page_memcg(newpage)) 6992 return; 6993 6994 memcg = page_memcg(oldpage); 6995 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); 6996 if (!memcg) 6997 return; 6998 6999 /* Force-charge the new page. The old one will be freed soon */ 7000 nr_pages = thp_nr_pages(newpage); 7001 7002 if (!mem_cgroup_is_root(memcg)) { 7003 page_counter_charge(&memcg->memory, nr_pages); 7004 if (do_memsw_account()) 7005 page_counter_charge(&memcg->memsw, nr_pages); 7006 } 7007 7008 css_get(&memcg->css); 7009 commit_charge(newpage, memcg); 7010 7011 local_irq_save(flags); 7012 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7013 memcg_check_events(memcg, newpage); 7014 local_irq_restore(flags); 7015 } 7016 7017 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7018 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7019 7020 void mem_cgroup_sk_alloc(struct sock *sk) 7021 { 7022 struct mem_cgroup *memcg; 7023 7024 if (!mem_cgroup_sockets_enabled) 7025 return; 7026 7027 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7028 if (in_interrupt()) 7029 return; 7030 7031 rcu_read_lock(); 7032 memcg = mem_cgroup_from_task(current); 7033 if (memcg == root_mem_cgroup) 7034 goto out; 7035 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7036 goto out; 7037 if (css_tryget(&memcg->css)) 7038 sk->sk_memcg = memcg; 7039 out: 7040 rcu_read_unlock(); 7041 } 7042 7043 void mem_cgroup_sk_free(struct sock *sk) 7044 { 7045 if (sk->sk_memcg) 7046 css_put(&sk->sk_memcg->css); 7047 } 7048 7049 /** 7050 * mem_cgroup_charge_skmem - charge socket memory 7051 * @memcg: memcg to charge 7052 * @nr_pages: number of pages to charge 7053 * @gfp_mask: reclaim mode 7054 * 7055 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7056 * @memcg's configured limit, %false if it doesn't. 7057 */ 7058 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 7059 gfp_t gfp_mask) 7060 { 7061 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7062 struct page_counter *fail; 7063 7064 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7065 memcg->tcpmem_pressure = 0; 7066 return true; 7067 } 7068 memcg->tcpmem_pressure = 1; 7069 if (gfp_mask & __GFP_NOFAIL) { 7070 page_counter_charge(&memcg->tcpmem, nr_pages); 7071 return true; 7072 } 7073 return false; 7074 } 7075 7076 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 7077 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7078 return true; 7079 } 7080 7081 return false; 7082 } 7083 7084 /** 7085 * mem_cgroup_uncharge_skmem - uncharge socket memory 7086 * @memcg: memcg to uncharge 7087 * @nr_pages: number of pages to uncharge 7088 */ 7089 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7090 { 7091 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7092 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7093 return; 7094 } 7095 7096 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7097 7098 refill_stock(memcg, nr_pages); 7099 } 7100 7101 static int __init cgroup_memory(char *s) 7102 { 7103 char *token; 7104 7105 while ((token = strsep(&s, ",")) != NULL) { 7106 if (!*token) 7107 continue; 7108 if (!strcmp(token, "nosocket")) 7109 cgroup_memory_nosocket = true; 7110 if (!strcmp(token, "nokmem")) 7111 cgroup_memory_nokmem = true; 7112 } 7113 return 0; 7114 } 7115 __setup("cgroup.memory=", cgroup_memory); 7116 7117 /* 7118 * subsys_initcall() for memory controller. 7119 * 7120 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7121 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7122 * basically everything that doesn't depend on a specific mem_cgroup structure 7123 * should be initialized from here. 7124 */ 7125 static int __init mem_cgroup_init(void) 7126 { 7127 int cpu, node; 7128 7129 /* 7130 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7131 * used for per-memcg-per-cpu caching of per-node statistics. In order 7132 * to work fine, we should make sure that the overfill threshold can't 7133 * exceed S32_MAX / PAGE_SIZE. 7134 */ 7135 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7136 7137 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7138 memcg_hotplug_cpu_dead); 7139 7140 for_each_possible_cpu(cpu) 7141 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7142 drain_local_stock); 7143 7144 for_each_node(node) { 7145 struct mem_cgroup_tree_per_node *rtpn; 7146 7147 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7148 node_online(node) ? node : NUMA_NO_NODE); 7149 7150 rtpn->rb_root = RB_ROOT; 7151 rtpn->rb_rightmost = NULL; 7152 spin_lock_init(&rtpn->lock); 7153 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7154 } 7155 7156 return 0; 7157 } 7158 subsys_initcall(mem_cgroup_init); 7159 7160 #ifdef CONFIG_MEMCG_SWAP 7161 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7162 { 7163 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7164 /* 7165 * The root cgroup cannot be destroyed, so it's refcount must 7166 * always be >= 1. 7167 */ 7168 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7169 VM_BUG_ON(1); 7170 break; 7171 } 7172 memcg = parent_mem_cgroup(memcg); 7173 if (!memcg) 7174 memcg = root_mem_cgroup; 7175 } 7176 return memcg; 7177 } 7178 7179 /** 7180 * mem_cgroup_swapout - transfer a memsw charge to swap 7181 * @page: page whose memsw charge to transfer 7182 * @entry: swap entry to move the charge to 7183 * 7184 * Transfer the memsw charge of @page to @entry. 7185 */ 7186 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7187 { 7188 struct mem_cgroup *memcg, *swap_memcg; 7189 unsigned int nr_entries; 7190 unsigned short oldid; 7191 7192 VM_BUG_ON_PAGE(PageLRU(page), page); 7193 VM_BUG_ON_PAGE(page_count(page), page); 7194 7195 if (mem_cgroup_disabled()) 7196 return; 7197 7198 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7199 return; 7200 7201 memcg = page_memcg(page); 7202 7203 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7204 if (!memcg) 7205 return; 7206 7207 /* 7208 * In case the memcg owning these pages has been offlined and doesn't 7209 * have an ID allocated to it anymore, charge the closest online 7210 * ancestor for the swap instead and transfer the memory+swap charge. 7211 */ 7212 swap_memcg = mem_cgroup_id_get_online(memcg); 7213 nr_entries = thp_nr_pages(page); 7214 /* Get references for the tail pages, too */ 7215 if (nr_entries > 1) 7216 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7217 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7218 nr_entries); 7219 VM_BUG_ON_PAGE(oldid, page); 7220 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7221 7222 page->memcg_data = 0; 7223 7224 if (!mem_cgroup_is_root(memcg)) 7225 page_counter_uncharge(&memcg->memory, nr_entries); 7226 7227 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7228 if (!mem_cgroup_is_root(swap_memcg)) 7229 page_counter_charge(&swap_memcg->memsw, nr_entries); 7230 page_counter_uncharge(&memcg->memsw, nr_entries); 7231 } 7232 7233 /* 7234 * Interrupts should be disabled here because the caller holds the 7235 * i_pages lock which is taken with interrupts-off. It is 7236 * important here to have the interrupts disabled because it is the 7237 * only synchronisation we have for updating the per-CPU variables. 7238 */ 7239 VM_BUG_ON(!irqs_disabled()); 7240 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7241 memcg_check_events(memcg, page); 7242 7243 css_put(&memcg->css); 7244 } 7245 7246 /** 7247 * mem_cgroup_try_charge_swap - try charging swap space for a page 7248 * @page: page being added to swap 7249 * @entry: swap entry to charge 7250 * 7251 * Try to charge @page's memcg for the swap space at @entry. 7252 * 7253 * Returns 0 on success, -ENOMEM on failure. 7254 */ 7255 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7256 { 7257 unsigned int nr_pages = thp_nr_pages(page); 7258 struct page_counter *counter; 7259 struct mem_cgroup *memcg; 7260 unsigned short oldid; 7261 7262 if (mem_cgroup_disabled()) 7263 return 0; 7264 7265 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7266 return 0; 7267 7268 memcg = page_memcg(page); 7269 7270 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7271 if (!memcg) 7272 return 0; 7273 7274 if (!entry.val) { 7275 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7276 return 0; 7277 } 7278 7279 memcg = mem_cgroup_id_get_online(memcg); 7280 7281 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7282 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7283 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7284 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7285 mem_cgroup_id_put(memcg); 7286 return -ENOMEM; 7287 } 7288 7289 /* Get references for the tail pages, too */ 7290 if (nr_pages > 1) 7291 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7292 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7293 VM_BUG_ON_PAGE(oldid, page); 7294 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7295 7296 return 0; 7297 } 7298 7299 /** 7300 * mem_cgroup_uncharge_swap - uncharge swap space 7301 * @entry: swap entry to uncharge 7302 * @nr_pages: the amount of swap space to uncharge 7303 */ 7304 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7305 { 7306 struct mem_cgroup *memcg; 7307 unsigned short id; 7308 7309 id = swap_cgroup_record(entry, 0, nr_pages); 7310 rcu_read_lock(); 7311 memcg = mem_cgroup_from_id(id); 7312 if (memcg) { 7313 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7314 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7315 page_counter_uncharge(&memcg->swap, nr_pages); 7316 else 7317 page_counter_uncharge(&memcg->memsw, nr_pages); 7318 } 7319 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7320 mem_cgroup_id_put_many(memcg, nr_pages); 7321 } 7322 rcu_read_unlock(); 7323 } 7324 7325 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7326 { 7327 long nr_swap_pages = get_nr_swap_pages(); 7328 7329 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7330 return nr_swap_pages; 7331 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7332 nr_swap_pages = min_t(long, nr_swap_pages, 7333 READ_ONCE(memcg->swap.max) - 7334 page_counter_read(&memcg->swap)); 7335 return nr_swap_pages; 7336 } 7337 7338 bool mem_cgroup_swap_full(struct page *page) 7339 { 7340 struct mem_cgroup *memcg; 7341 7342 VM_BUG_ON_PAGE(!PageLocked(page), page); 7343 7344 if (vm_swap_full()) 7345 return true; 7346 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7347 return false; 7348 7349 memcg = page_memcg(page); 7350 if (!memcg) 7351 return false; 7352 7353 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7354 unsigned long usage = page_counter_read(&memcg->swap); 7355 7356 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7357 usage * 2 >= READ_ONCE(memcg->swap.max)) 7358 return true; 7359 } 7360 7361 return false; 7362 } 7363 7364 static int __init setup_swap_account(char *s) 7365 { 7366 if (!strcmp(s, "1")) 7367 cgroup_memory_noswap = false; 7368 else if (!strcmp(s, "0")) 7369 cgroup_memory_noswap = true; 7370 return 1; 7371 } 7372 __setup("swapaccount=", setup_swap_account); 7373 7374 static u64 swap_current_read(struct cgroup_subsys_state *css, 7375 struct cftype *cft) 7376 { 7377 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7378 7379 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7380 } 7381 7382 static int swap_high_show(struct seq_file *m, void *v) 7383 { 7384 return seq_puts_memcg_tunable(m, 7385 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7386 } 7387 7388 static ssize_t swap_high_write(struct kernfs_open_file *of, 7389 char *buf, size_t nbytes, loff_t off) 7390 { 7391 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7392 unsigned long high; 7393 int err; 7394 7395 buf = strstrip(buf); 7396 err = page_counter_memparse(buf, "max", &high); 7397 if (err) 7398 return err; 7399 7400 page_counter_set_high(&memcg->swap, high); 7401 7402 return nbytes; 7403 } 7404 7405 static int swap_max_show(struct seq_file *m, void *v) 7406 { 7407 return seq_puts_memcg_tunable(m, 7408 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7409 } 7410 7411 static ssize_t swap_max_write(struct kernfs_open_file *of, 7412 char *buf, size_t nbytes, loff_t off) 7413 { 7414 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7415 unsigned long max; 7416 int err; 7417 7418 buf = strstrip(buf); 7419 err = page_counter_memparse(buf, "max", &max); 7420 if (err) 7421 return err; 7422 7423 xchg(&memcg->swap.max, max); 7424 7425 return nbytes; 7426 } 7427 7428 static int swap_events_show(struct seq_file *m, void *v) 7429 { 7430 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7431 7432 seq_printf(m, "high %lu\n", 7433 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7434 seq_printf(m, "max %lu\n", 7435 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7436 seq_printf(m, "fail %lu\n", 7437 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7438 7439 return 0; 7440 } 7441 7442 static struct cftype swap_files[] = { 7443 { 7444 .name = "swap.current", 7445 .flags = CFTYPE_NOT_ON_ROOT, 7446 .read_u64 = swap_current_read, 7447 }, 7448 { 7449 .name = "swap.high", 7450 .flags = CFTYPE_NOT_ON_ROOT, 7451 .seq_show = swap_high_show, 7452 .write = swap_high_write, 7453 }, 7454 { 7455 .name = "swap.max", 7456 .flags = CFTYPE_NOT_ON_ROOT, 7457 .seq_show = swap_max_show, 7458 .write = swap_max_write, 7459 }, 7460 { 7461 .name = "swap.events", 7462 .flags = CFTYPE_NOT_ON_ROOT, 7463 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7464 .seq_show = swap_events_show, 7465 }, 7466 { } /* terminate */ 7467 }; 7468 7469 static struct cftype memsw_files[] = { 7470 { 7471 .name = "memsw.usage_in_bytes", 7472 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7473 .read_u64 = mem_cgroup_read_u64, 7474 }, 7475 { 7476 .name = "memsw.max_usage_in_bytes", 7477 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7478 .write = mem_cgroup_reset, 7479 .read_u64 = mem_cgroup_read_u64, 7480 }, 7481 { 7482 .name = "memsw.limit_in_bytes", 7483 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7484 .write = mem_cgroup_write, 7485 .read_u64 = mem_cgroup_read_u64, 7486 }, 7487 { 7488 .name = "memsw.failcnt", 7489 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7490 .write = mem_cgroup_reset, 7491 .read_u64 = mem_cgroup_read_u64, 7492 }, 7493 { }, /* terminate */ 7494 }; 7495 7496 /* 7497 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7498 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7499 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7500 * boot parameter. This may result in premature OOPS inside 7501 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7502 */ 7503 static int __init mem_cgroup_swap_init(void) 7504 { 7505 /* No memory control -> no swap control */ 7506 if (mem_cgroup_disabled()) 7507 cgroup_memory_noswap = true; 7508 7509 if (cgroup_memory_noswap) 7510 return 0; 7511 7512 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7513 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7514 7515 return 0; 7516 } 7517 core_initcall(mem_cgroup_swap_init); 7518 7519 #endif /* CONFIG_MEMCG_SWAP */ 7520