1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 82 83 /* Socket memory accounting disabled? */ 84 static bool cgroup_memory_nosocket __ro_after_init; 85 86 /* Kernel memory accounting disabled? */ 87 bool cgroup_memory_nokmem __ro_after_init; 88 89 /* Whether the swap controller is active */ 90 #ifdef CONFIG_MEMCG_SWAP 91 bool cgroup_memory_noswap __ro_after_init; 92 #else 93 #define cgroup_memory_noswap 1 94 #endif 95 96 #ifdef CONFIG_CGROUP_WRITEBACK 97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 98 #endif 99 100 /* Whether legacy memory+swap accounting is active */ 101 static bool do_memsw_account(void) 102 { 103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 104 } 105 106 #define THRESHOLDS_EVENTS_TARGET 128 107 #define SOFTLIMIT_EVENTS_TARGET 1024 108 109 /* 110 * Cgroups above their limits are maintained in a RB-Tree, independent of 111 * their hierarchy representation 112 */ 113 114 struct mem_cgroup_tree_per_node { 115 struct rb_root rb_root; 116 struct rb_node *rb_rightmost; 117 spinlock_t lock; 118 }; 119 120 struct mem_cgroup_tree { 121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 122 }; 123 124 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 125 126 /* for OOM */ 127 struct mem_cgroup_eventfd_list { 128 struct list_head list; 129 struct eventfd_ctx *eventfd; 130 }; 131 132 /* 133 * cgroup_event represents events which userspace want to receive. 134 */ 135 struct mem_cgroup_event { 136 /* 137 * memcg which the event belongs to. 138 */ 139 struct mem_cgroup *memcg; 140 /* 141 * eventfd to signal userspace about the event. 142 */ 143 struct eventfd_ctx *eventfd; 144 /* 145 * Each of these stored in a list by the cgroup. 146 */ 147 struct list_head list; 148 /* 149 * register_event() callback will be used to add new userspace 150 * waiter for changes related to this event. Use eventfd_signal() 151 * on eventfd to send notification to userspace. 152 */ 153 int (*register_event)(struct mem_cgroup *memcg, 154 struct eventfd_ctx *eventfd, const char *args); 155 /* 156 * unregister_event() callback will be called when userspace closes 157 * the eventfd or on cgroup removing. This callback must be set, 158 * if you want provide notification functionality. 159 */ 160 void (*unregister_event)(struct mem_cgroup *memcg, 161 struct eventfd_ctx *eventfd); 162 /* 163 * All fields below needed to unregister event when 164 * userspace closes eventfd. 165 */ 166 poll_table pt; 167 wait_queue_head_t *wqh; 168 wait_queue_entry_t wait; 169 struct work_struct remove; 170 }; 171 172 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 174 175 /* Stuffs for move charges at task migration. */ 176 /* 177 * Types of charges to be moved. 178 */ 179 #define MOVE_ANON 0x1U 180 #define MOVE_FILE 0x2U 181 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 182 183 /* "mc" and its members are protected by cgroup_mutex */ 184 static struct move_charge_struct { 185 spinlock_t lock; /* for from, to */ 186 struct mm_struct *mm; 187 struct mem_cgroup *from; 188 struct mem_cgroup *to; 189 unsigned long flags; 190 unsigned long precharge; 191 unsigned long moved_charge; 192 unsigned long moved_swap; 193 struct task_struct *moving_task; /* a task moving charges */ 194 wait_queue_head_t waitq; /* a waitq for other context */ 195 } mc = { 196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 198 }; 199 200 /* 201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 202 * limit reclaim to prevent infinite loops, if they ever occur. 203 */ 204 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 205 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 206 207 /* for encoding cft->private value on file */ 208 enum res_type { 209 _MEM, 210 _MEMSWAP, 211 _OOM_TYPE, 212 _KMEM, 213 _TCP, 214 }; 215 216 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 217 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 218 #define MEMFILE_ATTR(val) ((val) & 0xffff) 219 /* Used for OOM notifier */ 220 #define OOM_CONTROL (0) 221 222 /* 223 * Iteration constructs for visiting all cgroups (under a tree). If 224 * loops are exited prematurely (break), mem_cgroup_iter_break() must 225 * be used for reference counting. 226 */ 227 #define for_each_mem_cgroup_tree(iter, root) \ 228 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 229 iter != NULL; \ 230 iter = mem_cgroup_iter(root, iter, NULL)) 231 232 #define for_each_mem_cgroup(iter) \ 233 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 234 iter != NULL; \ 235 iter = mem_cgroup_iter(NULL, iter, NULL)) 236 237 static inline bool should_force_charge(void) 238 { 239 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 240 (current->flags & PF_EXITING); 241 } 242 243 /* Some nice accessors for the vmpressure. */ 244 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 245 { 246 if (!memcg) 247 memcg = root_mem_cgroup; 248 return &memcg->vmpressure; 249 } 250 251 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 252 { 253 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 254 } 255 256 #ifdef CONFIG_MEMCG_KMEM 257 extern spinlock_t css_set_lock; 258 259 bool mem_cgroup_kmem_disabled(void) 260 { 261 return cgroup_memory_nokmem; 262 } 263 264 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 265 unsigned int nr_pages); 266 267 static void obj_cgroup_release(struct percpu_ref *ref) 268 { 269 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 270 unsigned int nr_bytes; 271 unsigned int nr_pages; 272 unsigned long flags; 273 274 /* 275 * At this point all allocated objects are freed, and 276 * objcg->nr_charged_bytes can't have an arbitrary byte value. 277 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 278 * 279 * The following sequence can lead to it: 280 * 1) CPU0: objcg == stock->cached_objcg 281 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 282 * PAGE_SIZE bytes are charged 283 * 3) CPU1: a process from another memcg is allocating something, 284 * the stock if flushed, 285 * objcg->nr_charged_bytes = PAGE_SIZE - 92 286 * 5) CPU0: we do release this object, 287 * 92 bytes are added to stock->nr_bytes 288 * 6) CPU0: stock is flushed, 289 * 92 bytes are added to objcg->nr_charged_bytes 290 * 291 * In the result, nr_charged_bytes == PAGE_SIZE. 292 * This page will be uncharged in obj_cgroup_release(). 293 */ 294 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 295 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 296 nr_pages = nr_bytes >> PAGE_SHIFT; 297 298 if (nr_pages) 299 obj_cgroup_uncharge_pages(objcg, nr_pages); 300 301 spin_lock_irqsave(&css_set_lock, flags); 302 list_del(&objcg->list); 303 spin_unlock_irqrestore(&css_set_lock, flags); 304 305 percpu_ref_exit(ref); 306 kfree_rcu(objcg, rcu); 307 } 308 309 static struct obj_cgroup *obj_cgroup_alloc(void) 310 { 311 struct obj_cgroup *objcg; 312 int ret; 313 314 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 315 if (!objcg) 316 return NULL; 317 318 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 319 GFP_KERNEL); 320 if (ret) { 321 kfree(objcg); 322 return NULL; 323 } 324 INIT_LIST_HEAD(&objcg->list); 325 return objcg; 326 } 327 328 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 329 struct mem_cgroup *parent) 330 { 331 struct obj_cgroup *objcg, *iter; 332 333 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 334 335 spin_lock_irq(&css_set_lock); 336 337 /* 1) Ready to reparent active objcg. */ 338 list_add(&objcg->list, &memcg->objcg_list); 339 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 340 list_for_each_entry(iter, &memcg->objcg_list, list) 341 WRITE_ONCE(iter->memcg, parent); 342 /* 3) Move already reparented objcgs to the parent's list */ 343 list_splice(&memcg->objcg_list, &parent->objcg_list); 344 345 spin_unlock_irq(&css_set_lock); 346 347 percpu_ref_kill(&objcg->refcnt); 348 } 349 350 /* 351 * This will be used as a shrinker list's index. 352 * The main reason for not using cgroup id for this: 353 * this works better in sparse environments, where we have a lot of memcgs, 354 * but only a few kmem-limited. Or also, if we have, for instance, 200 355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 356 * 200 entry array for that. 357 * 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It 359 * will double each time we have to increase it. 360 */ 361 static DEFINE_IDA(memcg_cache_ida); 362 int memcg_nr_cache_ids; 363 364 /* Protects memcg_nr_cache_ids */ 365 static DECLARE_RWSEM(memcg_cache_ids_sem); 366 367 void memcg_get_cache_ids(void) 368 { 369 down_read(&memcg_cache_ids_sem); 370 } 371 372 void memcg_put_cache_ids(void) 373 { 374 up_read(&memcg_cache_ids_sem); 375 } 376 377 /* 378 * MIN_SIZE is different than 1, because we would like to avoid going through 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited 380 * cgroups is a reasonable guess. In the future, it could be a parameter or 381 * tunable, but that is strictly not necessary. 382 * 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 384 * this constant directly from cgroup, but it is understandable that this is 385 * better kept as an internal representation in cgroup.c. In any case, the 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily 387 * increase ours as well if it increases. 388 */ 389 #define MEMCG_CACHES_MIN_SIZE 4 390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 391 392 /* 393 * A lot of the calls to the cache allocation functions are expected to be 394 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 395 * conditional to this static branch, we'll have to allow modules that does 396 * kmem_cache_alloc and the such to see this symbol as well 397 */ 398 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 399 EXPORT_SYMBOL(memcg_kmem_enabled_key); 400 #endif 401 402 /** 403 * mem_cgroup_css_from_page - css of the memcg associated with a page 404 * @page: page of interest 405 * 406 * If memcg is bound to the default hierarchy, css of the memcg associated 407 * with @page is returned. The returned css remains associated with @page 408 * until it is released. 409 * 410 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 411 * is returned. 412 */ 413 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 414 { 415 struct mem_cgroup *memcg; 416 417 memcg = page_memcg(page); 418 419 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 420 memcg = root_mem_cgroup; 421 422 return &memcg->css; 423 } 424 425 /** 426 * page_cgroup_ino - return inode number of the memcg a page is charged to 427 * @page: the page 428 * 429 * Look up the closest online ancestor of the memory cgroup @page is charged to 430 * and return its inode number or 0 if @page is not charged to any cgroup. It 431 * is safe to call this function without holding a reference to @page. 432 * 433 * Note, this function is inherently racy, because there is nothing to prevent 434 * the cgroup inode from getting torn down and potentially reallocated a moment 435 * after page_cgroup_ino() returns, so it only should be used by callers that 436 * do not care (such as procfs interfaces). 437 */ 438 ino_t page_cgroup_ino(struct page *page) 439 { 440 struct mem_cgroup *memcg; 441 unsigned long ino = 0; 442 443 rcu_read_lock(); 444 memcg = page_memcg_check(page); 445 446 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 447 memcg = parent_mem_cgroup(memcg); 448 if (memcg) 449 ino = cgroup_ino(memcg->css.cgroup); 450 rcu_read_unlock(); 451 return ino; 452 } 453 454 static struct mem_cgroup_per_node * 455 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 456 { 457 int nid = page_to_nid(page); 458 459 return memcg->nodeinfo[nid]; 460 } 461 462 static struct mem_cgroup_tree_per_node * 463 soft_limit_tree_node(int nid) 464 { 465 return soft_limit_tree.rb_tree_per_node[nid]; 466 } 467 468 static struct mem_cgroup_tree_per_node * 469 soft_limit_tree_from_page(struct page *page) 470 { 471 int nid = page_to_nid(page); 472 473 return soft_limit_tree.rb_tree_per_node[nid]; 474 } 475 476 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 477 struct mem_cgroup_tree_per_node *mctz, 478 unsigned long new_usage_in_excess) 479 { 480 struct rb_node **p = &mctz->rb_root.rb_node; 481 struct rb_node *parent = NULL; 482 struct mem_cgroup_per_node *mz_node; 483 bool rightmost = true; 484 485 if (mz->on_tree) 486 return; 487 488 mz->usage_in_excess = new_usage_in_excess; 489 if (!mz->usage_in_excess) 490 return; 491 while (*p) { 492 parent = *p; 493 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 494 tree_node); 495 if (mz->usage_in_excess < mz_node->usage_in_excess) { 496 p = &(*p)->rb_left; 497 rightmost = false; 498 } else { 499 p = &(*p)->rb_right; 500 } 501 } 502 503 if (rightmost) 504 mctz->rb_rightmost = &mz->tree_node; 505 506 rb_link_node(&mz->tree_node, parent, p); 507 rb_insert_color(&mz->tree_node, &mctz->rb_root); 508 mz->on_tree = true; 509 } 510 511 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 512 struct mem_cgroup_tree_per_node *mctz) 513 { 514 if (!mz->on_tree) 515 return; 516 517 if (&mz->tree_node == mctz->rb_rightmost) 518 mctz->rb_rightmost = rb_prev(&mz->tree_node); 519 520 rb_erase(&mz->tree_node, &mctz->rb_root); 521 mz->on_tree = false; 522 } 523 524 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 525 struct mem_cgroup_tree_per_node *mctz) 526 { 527 unsigned long flags; 528 529 spin_lock_irqsave(&mctz->lock, flags); 530 __mem_cgroup_remove_exceeded(mz, mctz); 531 spin_unlock_irqrestore(&mctz->lock, flags); 532 } 533 534 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 535 { 536 unsigned long nr_pages = page_counter_read(&memcg->memory); 537 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 538 unsigned long excess = 0; 539 540 if (nr_pages > soft_limit) 541 excess = nr_pages - soft_limit; 542 543 return excess; 544 } 545 546 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 547 { 548 unsigned long excess; 549 struct mem_cgroup_per_node *mz; 550 struct mem_cgroup_tree_per_node *mctz; 551 552 mctz = soft_limit_tree_from_page(page); 553 if (!mctz) 554 return; 555 /* 556 * Necessary to update all ancestors when hierarchy is used. 557 * because their event counter is not touched. 558 */ 559 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 560 mz = mem_cgroup_page_nodeinfo(memcg, page); 561 excess = soft_limit_excess(memcg); 562 /* 563 * We have to update the tree if mz is on RB-tree or 564 * mem is over its softlimit. 565 */ 566 if (excess || mz->on_tree) { 567 unsigned long flags; 568 569 spin_lock_irqsave(&mctz->lock, flags); 570 /* if on-tree, remove it */ 571 if (mz->on_tree) 572 __mem_cgroup_remove_exceeded(mz, mctz); 573 /* 574 * Insert again. mz->usage_in_excess will be updated. 575 * If excess is 0, no tree ops. 576 */ 577 __mem_cgroup_insert_exceeded(mz, mctz, excess); 578 spin_unlock_irqrestore(&mctz->lock, flags); 579 } 580 } 581 } 582 583 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 584 { 585 struct mem_cgroup_tree_per_node *mctz; 586 struct mem_cgroup_per_node *mz; 587 int nid; 588 589 for_each_node(nid) { 590 mz = memcg->nodeinfo[nid]; 591 mctz = soft_limit_tree_node(nid); 592 if (mctz) 593 mem_cgroup_remove_exceeded(mz, mctz); 594 } 595 } 596 597 static struct mem_cgroup_per_node * 598 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 599 { 600 struct mem_cgroup_per_node *mz; 601 602 retry: 603 mz = NULL; 604 if (!mctz->rb_rightmost) 605 goto done; /* Nothing to reclaim from */ 606 607 mz = rb_entry(mctz->rb_rightmost, 608 struct mem_cgroup_per_node, tree_node); 609 /* 610 * Remove the node now but someone else can add it back, 611 * we will to add it back at the end of reclaim to its correct 612 * position in the tree. 613 */ 614 __mem_cgroup_remove_exceeded(mz, mctz); 615 if (!soft_limit_excess(mz->memcg) || 616 !css_tryget(&mz->memcg->css)) 617 goto retry; 618 done: 619 return mz; 620 } 621 622 static struct mem_cgroup_per_node * 623 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 624 { 625 struct mem_cgroup_per_node *mz; 626 627 spin_lock_irq(&mctz->lock); 628 mz = __mem_cgroup_largest_soft_limit_node(mctz); 629 spin_unlock_irq(&mctz->lock); 630 return mz; 631 } 632 633 /** 634 * __mod_memcg_state - update cgroup memory statistics 635 * @memcg: the memory cgroup 636 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 637 * @val: delta to add to the counter, can be negative 638 */ 639 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 640 { 641 if (mem_cgroup_disabled()) 642 return; 643 644 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 645 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 646 } 647 648 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 649 static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 650 { 651 long x = READ_ONCE(memcg->vmstats.state[idx]); 652 #ifdef CONFIG_SMP 653 if (x < 0) 654 x = 0; 655 #endif 656 return x; 657 } 658 659 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 660 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 661 { 662 long x = 0; 663 int cpu; 664 665 for_each_possible_cpu(cpu) 666 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 667 #ifdef CONFIG_SMP 668 if (x < 0) 669 x = 0; 670 #endif 671 return x; 672 } 673 674 static struct mem_cgroup_per_node * 675 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 676 { 677 struct mem_cgroup *parent; 678 679 parent = parent_mem_cgroup(pn->memcg); 680 if (!parent) 681 return NULL; 682 return parent->nodeinfo[nid]; 683 } 684 685 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 686 int val) 687 { 688 struct mem_cgroup_per_node *pn; 689 struct mem_cgroup *memcg; 690 long x, threshold = MEMCG_CHARGE_BATCH; 691 692 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 693 memcg = pn->memcg; 694 695 /* Update memcg */ 696 __mod_memcg_state(memcg, idx, val); 697 698 /* Update lruvec */ 699 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 700 701 if (vmstat_item_in_bytes(idx)) 702 threshold <<= PAGE_SHIFT; 703 704 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 705 if (unlikely(abs(x) > threshold)) { 706 pg_data_t *pgdat = lruvec_pgdat(lruvec); 707 struct mem_cgroup_per_node *pi; 708 709 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 710 atomic_long_add(x, &pi->lruvec_stat[idx]); 711 x = 0; 712 } 713 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 714 } 715 716 /** 717 * __mod_lruvec_state - update lruvec memory statistics 718 * @lruvec: the lruvec 719 * @idx: the stat item 720 * @val: delta to add to the counter, can be negative 721 * 722 * The lruvec is the intersection of the NUMA node and a cgroup. This 723 * function updates the all three counters that are affected by a 724 * change of state at this level: per-node, per-cgroup, per-lruvec. 725 */ 726 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 727 int val) 728 { 729 /* Update node */ 730 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 731 732 /* Update memcg and lruvec */ 733 if (!mem_cgroup_disabled()) 734 __mod_memcg_lruvec_state(lruvec, idx, val); 735 } 736 737 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 738 int val) 739 { 740 struct page *head = compound_head(page); /* rmap on tail pages */ 741 struct mem_cgroup *memcg; 742 pg_data_t *pgdat = page_pgdat(page); 743 struct lruvec *lruvec; 744 745 rcu_read_lock(); 746 memcg = page_memcg(head); 747 /* Untracked pages have no memcg, no lruvec. Update only the node */ 748 if (!memcg) { 749 rcu_read_unlock(); 750 __mod_node_page_state(pgdat, idx, val); 751 return; 752 } 753 754 lruvec = mem_cgroup_lruvec(memcg, pgdat); 755 __mod_lruvec_state(lruvec, idx, val); 756 rcu_read_unlock(); 757 } 758 EXPORT_SYMBOL(__mod_lruvec_page_state); 759 760 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 761 { 762 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 763 struct mem_cgroup *memcg; 764 struct lruvec *lruvec; 765 766 rcu_read_lock(); 767 memcg = mem_cgroup_from_obj(p); 768 769 /* 770 * Untracked pages have no memcg, no lruvec. Update only the 771 * node. If we reparent the slab objects to the root memcg, 772 * when we free the slab object, we need to update the per-memcg 773 * vmstats to keep it correct for the root memcg. 774 */ 775 if (!memcg) { 776 __mod_node_page_state(pgdat, idx, val); 777 } else { 778 lruvec = mem_cgroup_lruvec(memcg, pgdat); 779 __mod_lruvec_state(lruvec, idx, val); 780 } 781 rcu_read_unlock(); 782 } 783 784 /* 785 * mod_objcg_mlstate() may be called with irq enabled, so 786 * mod_memcg_lruvec_state() should be used. 787 */ 788 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 789 struct pglist_data *pgdat, 790 enum node_stat_item idx, int nr) 791 { 792 struct mem_cgroup *memcg; 793 struct lruvec *lruvec; 794 795 rcu_read_lock(); 796 memcg = obj_cgroup_memcg(objcg); 797 lruvec = mem_cgroup_lruvec(memcg, pgdat); 798 mod_memcg_lruvec_state(lruvec, idx, nr); 799 rcu_read_unlock(); 800 } 801 802 /** 803 * __count_memcg_events - account VM events in a cgroup 804 * @memcg: the memory cgroup 805 * @idx: the event item 806 * @count: the number of events that occurred 807 */ 808 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 809 unsigned long count) 810 { 811 if (mem_cgroup_disabled()) 812 return; 813 814 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); 815 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 816 } 817 818 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 819 { 820 return READ_ONCE(memcg->vmstats.events[event]); 821 } 822 823 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 824 { 825 long x = 0; 826 int cpu; 827 828 for_each_possible_cpu(cpu) 829 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); 830 return x; 831 } 832 833 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 834 struct page *page, 835 int nr_pages) 836 { 837 /* pagein of a big page is an event. So, ignore page size */ 838 if (nr_pages > 0) 839 __count_memcg_events(memcg, PGPGIN, 1); 840 else { 841 __count_memcg_events(memcg, PGPGOUT, 1); 842 nr_pages = -nr_pages; /* for event */ 843 } 844 845 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 846 } 847 848 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 849 enum mem_cgroup_events_target target) 850 { 851 unsigned long val, next; 852 853 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 854 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 855 /* from time_after() in jiffies.h */ 856 if ((long)(next - val) < 0) { 857 switch (target) { 858 case MEM_CGROUP_TARGET_THRESH: 859 next = val + THRESHOLDS_EVENTS_TARGET; 860 break; 861 case MEM_CGROUP_TARGET_SOFTLIMIT: 862 next = val + SOFTLIMIT_EVENTS_TARGET; 863 break; 864 default: 865 break; 866 } 867 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 868 return true; 869 } 870 return false; 871 } 872 873 /* 874 * Check events in order. 875 * 876 */ 877 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 878 { 879 /* threshold event is triggered in finer grain than soft limit */ 880 if (unlikely(mem_cgroup_event_ratelimit(memcg, 881 MEM_CGROUP_TARGET_THRESH))) { 882 bool do_softlimit; 883 884 do_softlimit = mem_cgroup_event_ratelimit(memcg, 885 MEM_CGROUP_TARGET_SOFTLIMIT); 886 mem_cgroup_threshold(memcg); 887 if (unlikely(do_softlimit)) 888 mem_cgroup_update_tree(memcg, page); 889 } 890 } 891 892 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 893 { 894 /* 895 * mm_update_next_owner() may clear mm->owner to NULL 896 * if it races with swapoff, page migration, etc. 897 * So this can be called with p == NULL. 898 */ 899 if (unlikely(!p)) 900 return NULL; 901 902 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 903 } 904 EXPORT_SYMBOL(mem_cgroup_from_task); 905 906 static __always_inline struct mem_cgroup *active_memcg(void) 907 { 908 if (in_interrupt()) 909 return this_cpu_read(int_active_memcg); 910 else 911 return current->active_memcg; 912 } 913 914 /** 915 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 916 * @mm: mm from which memcg should be extracted. It can be NULL. 917 * 918 * Obtain a reference on mm->memcg and returns it if successful. If mm 919 * is NULL, then the memcg is chosen as follows: 920 * 1) The active memcg, if set. 921 * 2) current->mm->memcg, if available 922 * 3) root memcg 923 * If mem_cgroup is disabled, NULL is returned. 924 */ 925 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 926 { 927 struct mem_cgroup *memcg; 928 929 if (mem_cgroup_disabled()) 930 return NULL; 931 932 /* 933 * Page cache insertions can happen without an 934 * actual mm context, e.g. during disk probing 935 * on boot, loopback IO, acct() writes etc. 936 * 937 * No need to css_get on root memcg as the reference 938 * counting is disabled on the root level in the 939 * cgroup core. See CSS_NO_REF. 940 */ 941 if (unlikely(!mm)) { 942 memcg = active_memcg(); 943 if (unlikely(memcg)) { 944 /* remote memcg must hold a ref */ 945 css_get(&memcg->css); 946 return memcg; 947 } 948 mm = current->mm; 949 if (unlikely(!mm)) 950 return root_mem_cgroup; 951 } 952 953 rcu_read_lock(); 954 do { 955 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 956 if (unlikely(!memcg)) 957 memcg = root_mem_cgroup; 958 } while (!css_tryget(&memcg->css)); 959 rcu_read_unlock(); 960 return memcg; 961 } 962 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 963 964 static __always_inline bool memcg_kmem_bypass(void) 965 { 966 /* Allow remote memcg charging from any context. */ 967 if (unlikely(active_memcg())) 968 return false; 969 970 /* Memcg to charge can't be determined. */ 971 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 972 return true; 973 974 return false; 975 } 976 977 /** 978 * mem_cgroup_iter - iterate over memory cgroup hierarchy 979 * @root: hierarchy root 980 * @prev: previously returned memcg, NULL on first invocation 981 * @reclaim: cookie for shared reclaim walks, NULL for full walks 982 * 983 * Returns references to children of the hierarchy below @root, or 984 * @root itself, or %NULL after a full round-trip. 985 * 986 * Caller must pass the return value in @prev on subsequent 987 * invocations for reference counting, or use mem_cgroup_iter_break() 988 * to cancel a hierarchy walk before the round-trip is complete. 989 * 990 * Reclaimers can specify a node in @reclaim to divide up the memcgs 991 * in the hierarchy among all concurrent reclaimers operating on the 992 * same node. 993 */ 994 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 995 struct mem_cgroup *prev, 996 struct mem_cgroup_reclaim_cookie *reclaim) 997 { 998 struct mem_cgroup_reclaim_iter *iter; 999 struct cgroup_subsys_state *css = NULL; 1000 struct mem_cgroup *memcg = NULL; 1001 struct mem_cgroup *pos = NULL; 1002 1003 if (mem_cgroup_disabled()) 1004 return NULL; 1005 1006 if (!root) 1007 root = root_mem_cgroup; 1008 1009 if (prev && !reclaim) 1010 pos = prev; 1011 1012 rcu_read_lock(); 1013 1014 if (reclaim) { 1015 struct mem_cgroup_per_node *mz; 1016 1017 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1018 iter = &mz->iter; 1019 1020 if (prev && reclaim->generation != iter->generation) 1021 goto out_unlock; 1022 1023 while (1) { 1024 pos = READ_ONCE(iter->position); 1025 if (!pos || css_tryget(&pos->css)) 1026 break; 1027 /* 1028 * css reference reached zero, so iter->position will 1029 * be cleared by ->css_released. However, we should not 1030 * rely on this happening soon, because ->css_released 1031 * is called from a work queue, and by busy-waiting we 1032 * might block it. So we clear iter->position right 1033 * away. 1034 */ 1035 (void)cmpxchg(&iter->position, pos, NULL); 1036 } 1037 } 1038 1039 if (pos) 1040 css = &pos->css; 1041 1042 for (;;) { 1043 css = css_next_descendant_pre(css, &root->css); 1044 if (!css) { 1045 /* 1046 * Reclaimers share the hierarchy walk, and a 1047 * new one might jump in right at the end of 1048 * the hierarchy - make sure they see at least 1049 * one group and restart from the beginning. 1050 */ 1051 if (!prev) 1052 continue; 1053 break; 1054 } 1055 1056 /* 1057 * Verify the css and acquire a reference. The root 1058 * is provided by the caller, so we know it's alive 1059 * and kicking, and don't take an extra reference. 1060 */ 1061 memcg = mem_cgroup_from_css(css); 1062 1063 if (css == &root->css) 1064 break; 1065 1066 if (css_tryget(css)) 1067 break; 1068 1069 memcg = NULL; 1070 } 1071 1072 if (reclaim) { 1073 /* 1074 * The position could have already been updated by a competing 1075 * thread, so check that the value hasn't changed since we read 1076 * it to avoid reclaiming from the same cgroup twice. 1077 */ 1078 (void)cmpxchg(&iter->position, pos, memcg); 1079 1080 if (pos) 1081 css_put(&pos->css); 1082 1083 if (!memcg) 1084 iter->generation++; 1085 else if (!prev) 1086 reclaim->generation = iter->generation; 1087 } 1088 1089 out_unlock: 1090 rcu_read_unlock(); 1091 if (prev && prev != root) 1092 css_put(&prev->css); 1093 1094 return memcg; 1095 } 1096 1097 /** 1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1099 * @root: hierarchy root 1100 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1101 */ 1102 void mem_cgroup_iter_break(struct mem_cgroup *root, 1103 struct mem_cgroup *prev) 1104 { 1105 if (!root) 1106 root = root_mem_cgroup; 1107 if (prev && prev != root) 1108 css_put(&prev->css); 1109 } 1110 1111 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1112 struct mem_cgroup *dead_memcg) 1113 { 1114 struct mem_cgroup_reclaim_iter *iter; 1115 struct mem_cgroup_per_node *mz; 1116 int nid; 1117 1118 for_each_node(nid) { 1119 mz = from->nodeinfo[nid]; 1120 iter = &mz->iter; 1121 cmpxchg(&iter->position, dead_memcg, NULL); 1122 } 1123 } 1124 1125 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1126 { 1127 struct mem_cgroup *memcg = dead_memcg; 1128 struct mem_cgroup *last; 1129 1130 do { 1131 __invalidate_reclaim_iterators(memcg, dead_memcg); 1132 last = memcg; 1133 } while ((memcg = parent_mem_cgroup(memcg))); 1134 1135 /* 1136 * When cgruop1 non-hierarchy mode is used, 1137 * parent_mem_cgroup() does not walk all the way up to the 1138 * cgroup root (root_mem_cgroup). So we have to handle 1139 * dead_memcg from cgroup root separately. 1140 */ 1141 if (last != root_mem_cgroup) 1142 __invalidate_reclaim_iterators(root_mem_cgroup, 1143 dead_memcg); 1144 } 1145 1146 /** 1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1148 * @memcg: hierarchy root 1149 * @fn: function to call for each task 1150 * @arg: argument passed to @fn 1151 * 1152 * This function iterates over tasks attached to @memcg or to any of its 1153 * descendants and calls @fn for each task. If @fn returns a non-zero 1154 * value, the function breaks the iteration loop and returns the value. 1155 * Otherwise, it will iterate over all tasks and return 0. 1156 * 1157 * This function must not be called for the root memory cgroup. 1158 */ 1159 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1160 int (*fn)(struct task_struct *, void *), void *arg) 1161 { 1162 struct mem_cgroup *iter; 1163 int ret = 0; 1164 1165 BUG_ON(memcg == root_mem_cgroup); 1166 1167 for_each_mem_cgroup_tree(iter, memcg) { 1168 struct css_task_iter it; 1169 struct task_struct *task; 1170 1171 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1172 while (!ret && (task = css_task_iter_next(&it))) 1173 ret = fn(task, arg); 1174 css_task_iter_end(&it); 1175 if (ret) { 1176 mem_cgroup_iter_break(memcg, iter); 1177 break; 1178 } 1179 } 1180 return ret; 1181 } 1182 1183 #ifdef CONFIG_DEBUG_VM 1184 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) 1185 { 1186 struct mem_cgroup *memcg; 1187 1188 if (mem_cgroup_disabled()) 1189 return; 1190 1191 memcg = page_memcg(page); 1192 1193 if (!memcg) 1194 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); 1195 else 1196 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); 1197 } 1198 #endif 1199 1200 /** 1201 * lock_page_lruvec - lock and return lruvec for a given page. 1202 * @page: the page 1203 * 1204 * These functions are safe to use under any of the following conditions: 1205 * - page locked 1206 * - PageLRU cleared 1207 * - lock_page_memcg() 1208 * - page->_refcount is zero 1209 */ 1210 struct lruvec *lock_page_lruvec(struct page *page) 1211 { 1212 struct lruvec *lruvec; 1213 1214 lruvec = mem_cgroup_page_lruvec(page); 1215 spin_lock(&lruvec->lru_lock); 1216 1217 lruvec_memcg_debug(lruvec, page); 1218 1219 return lruvec; 1220 } 1221 1222 struct lruvec *lock_page_lruvec_irq(struct page *page) 1223 { 1224 struct lruvec *lruvec; 1225 1226 lruvec = mem_cgroup_page_lruvec(page); 1227 spin_lock_irq(&lruvec->lru_lock); 1228 1229 lruvec_memcg_debug(lruvec, page); 1230 1231 return lruvec; 1232 } 1233 1234 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) 1235 { 1236 struct lruvec *lruvec; 1237 1238 lruvec = mem_cgroup_page_lruvec(page); 1239 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1240 1241 lruvec_memcg_debug(lruvec, page); 1242 1243 return lruvec; 1244 } 1245 1246 /** 1247 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1248 * @lruvec: mem_cgroup per zone lru vector 1249 * @lru: index of lru list the page is sitting on 1250 * @zid: zone id of the accounted pages 1251 * @nr_pages: positive when adding or negative when removing 1252 * 1253 * This function must be called under lru_lock, just before a page is added 1254 * to or just after a page is removed from an lru list (that ordering being 1255 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1256 */ 1257 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1258 int zid, int nr_pages) 1259 { 1260 struct mem_cgroup_per_node *mz; 1261 unsigned long *lru_size; 1262 long size; 1263 1264 if (mem_cgroup_disabled()) 1265 return; 1266 1267 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1268 lru_size = &mz->lru_zone_size[zid][lru]; 1269 1270 if (nr_pages < 0) 1271 *lru_size += nr_pages; 1272 1273 size = *lru_size; 1274 if (WARN_ONCE(size < 0, 1275 "%s(%p, %d, %d): lru_size %ld\n", 1276 __func__, lruvec, lru, nr_pages, size)) { 1277 VM_BUG_ON(1); 1278 *lru_size = 0; 1279 } 1280 1281 if (nr_pages > 0) 1282 *lru_size += nr_pages; 1283 } 1284 1285 /** 1286 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1287 * @memcg: the memory cgroup 1288 * 1289 * Returns the maximum amount of memory @mem can be charged with, in 1290 * pages. 1291 */ 1292 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1293 { 1294 unsigned long margin = 0; 1295 unsigned long count; 1296 unsigned long limit; 1297 1298 count = page_counter_read(&memcg->memory); 1299 limit = READ_ONCE(memcg->memory.max); 1300 if (count < limit) 1301 margin = limit - count; 1302 1303 if (do_memsw_account()) { 1304 count = page_counter_read(&memcg->memsw); 1305 limit = READ_ONCE(memcg->memsw.max); 1306 if (count < limit) 1307 margin = min(margin, limit - count); 1308 else 1309 margin = 0; 1310 } 1311 1312 return margin; 1313 } 1314 1315 /* 1316 * A routine for checking "mem" is under move_account() or not. 1317 * 1318 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1319 * moving cgroups. This is for waiting at high-memory pressure 1320 * caused by "move". 1321 */ 1322 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1323 { 1324 struct mem_cgroup *from; 1325 struct mem_cgroup *to; 1326 bool ret = false; 1327 /* 1328 * Unlike task_move routines, we access mc.to, mc.from not under 1329 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1330 */ 1331 spin_lock(&mc.lock); 1332 from = mc.from; 1333 to = mc.to; 1334 if (!from) 1335 goto unlock; 1336 1337 ret = mem_cgroup_is_descendant(from, memcg) || 1338 mem_cgroup_is_descendant(to, memcg); 1339 unlock: 1340 spin_unlock(&mc.lock); 1341 return ret; 1342 } 1343 1344 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1345 { 1346 if (mc.moving_task && current != mc.moving_task) { 1347 if (mem_cgroup_under_move(memcg)) { 1348 DEFINE_WAIT(wait); 1349 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1350 /* moving charge context might have finished. */ 1351 if (mc.moving_task) 1352 schedule(); 1353 finish_wait(&mc.waitq, &wait); 1354 return true; 1355 } 1356 } 1357 return false; 1358 } 1359 1360 struct memory_stat { 1361 const char *name; 1362 unsigned int idx; 1363 }; 1364 1365 static const struct memory_stat memory_stats[] = { 1366 { "anon", NR_ANON_MAPPED }, 1367 { "file", NR_FILE_PAGES }, 1368 { "kernel_stack", NR_KERNEL_STACK_KB }, 1369 { "pagetables", NR_PAGETABLE }, 1370 { "percpu", MEMCG_PERCPU_B }, 1371 { "sock", MEMCG_SOCK }, 1372 { "shmem", NR_SHMEM }, 1373 { "file_mapped", NR_FILE_MAPPED }, 1374 { "file_dirty", NR_FILE_DIRTY }, 1375 { "file_writeback", NR_WRITEBACK }, 1376 #ifdef CONFIG_SWAP 1377 { "swapcached", NR_SWAPCACHE }, 1378 #endif 1379 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1380 { "anon_thp", NR_ANON_THPS }, 1381 { "file_thp", NR_FILE_THPS }, 1382 { "shmem_thp", NR_SHMEM_THPS }, 1383 #endif 1384 { "inactive_anon", NR_INACTIVE_ANON }, 1385 { "active_anon", NR_ACTIVE_ANON }, 1386 { "inactive_file", NR_INACTIVE_FILE }, 1387 { "active_file", NR_ACTIVE_FILE }, 1388 { "unevictable", NR_UNEVICTABLE }, 1389 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1390 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1391 1392 /* The memory events */ 1393 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1394 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1395 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1396 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1397 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1398 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1399 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1400 }; 1401 1402 /* Translate stat items to the correct unit for memory.stat output */ 1403 static int memcg_page_state_unit(int item) 1404 { 1405 switch (item) { 1406 case MEMCG_PERCPU_B: 1407 case NR_SLAB_RECLAIMABLE_B: 1408 case NR_SLAB_UNRECLAIMABLE_B: 1409 case WORKINGSET_REFAULT_ANON: 1410 case WORKINGSET_REFAULT_FILE: 1411 case WORKINGSET_ACTIVATE_ANON: 1412 case WORKINGSET_ACTIVATE_FILE: 1413 case WORKINGSET_RESTORE_ANON: 1414 case WORKINGSET_RESTORE_FILE: 1415 case WORKINGSET_NODERECLAIM: 1416 return 1; 1417 case NR_KERNEL_STACK_KB: 1418 return SZ_1K; 1419 default: 1420 return PAGE_SIZE; 1421 } 1422 } 1423 1424 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1425 int item) 1426 { 1427 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1428 } 1429 1430 static char *memory_stat_format(struct mem_cgroup *memcg) 1431 { 1432 struct seq_buf s; 1433 int i; 1434 1435 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1436 if (!s.buffer) 1437 return NULL; 1438 1439 /* 1440 * Provide statistics on the state of the memory subsystem as 1441 * well as cumulative event counters that show past behavior. 1442 * 1443 * This list is ordered following a combination of these gradients: 1444 * 1) generic big picture -> specifics and details 1445 * 2) reflecting userspace activity -> reflecting kernel heuristics 1446 * 1447 * Current memory state: 1448 */ 1449 cgroup_rstat_flush(memcg->css.cgroup); 1450 1451 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1452 u64 size; 1453 1454 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1455 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1456 1457 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1458 size += memcg_page_state_output(memcg, 1459 NR_SLAB_RECLAIMABLE_B); 1460 seq_buf_printf(&s, "slab %llu\n", size); 1461 } 1462 } 1463 1464 /* Accumulated memory events */ 1465 1466 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1467 memcg_events(memcg, PGFAULT)); 1468 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1469 memcg_events(memcg, PGMAJFAULT)); 1470 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1471 memcg_events(memcg, PGREFILL)); 1472 seq_buf_printf(&s, "pgscan %lu\n", 1473 memcg_events(memcg, PGSCAN_KSWAPD) + 1474 memcg_events(memcg, PGSCAN_DIRECT)); 1475 seq_buf_printf(&s, "pgsteal %lu\n", 1476 memcg_events(memcg, PGSTEAL_KSWAPD) + 1477 memcg_events(memcg, PGSTEAL_DIRECT)); 1478 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1479 memcg_events(memcg, PGACTIVATE)); 1480 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1481 memcg_events(memcg, PGDEACTIVATE)); 1482 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1483 memcg_events(memcg, PGLAZYFREE)); 1484 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1485 memcg_events(memcg, PGLAZYFREED)); 1486 1487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1488 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1489 memcg_events(memcg, THP_FAULT_ALLOC)); 1490 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1491 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1492 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1493 1494 /* The above should easily fit into one page */ 1495 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1496 1497 return s.buffer; 1498 } 1499 1500 #define K(x) ((x) << (PAGE_SHIFT-10)) 1501 /** 1502 * mem_cgroup_print_oom_context: Print OOM information relevant to 1503 * memory controller. 1504 * @memcg: The memory cgroup that went over limit 1505 * @p: Task that is going to be killed 1506 * 1507 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1508 * enabled 1509 */ 1510 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1511 { 1512 rcu_read_lock(); 1513 1514 if (memcg) { 1515 pr_cont(",oom_memcg="); 1516 pr_cont_cgroup_path(memcg->css.cgroup); 1517 } else 1518 pr_cont(",global_oom"); 1519 if (p) { 1520 pr_cont(",task_memcg="); 1521 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1522 } 1523 rcu_read_unlock(); 1524 } 1525 1526 /** 1527 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1528 * memory controller. 1529 * @memcg: The memory cgroup that went over limit 1530 */ 1531 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1532 { 1533 char *buf; 1534 1535 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1536 K((u64)page_counter_read(&memcg->memory)), 1537 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1538 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1539 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1540 K((u64)page_counter_read(&memcg->swap)), 1541 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1542 else { 1543 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1544 K((u64)page_counter_read(&memcg->memsw)), 1545 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1546 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1547 K((u64)page_counter_read(&memcg->kmem)), 1548 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1549 } 1550 1551 pr_info("Memory cgroup stats for "); 1552 pr_cont_cgroup_path(memcg->css.cgroup); 1553 pr_cont(":"); 1554 buf = memory_stat_format(memcg); 1555 if (!buf) 1556 return; 1557 pr_info("%s", buf); 1558 kfree(buf); 1559 } 1560 1561 /* 1562 * Return the memory (and swap, if configured) limit for a memcg. 1563 */ 1564 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1565 { 1566 unsigned long max = READ_ONCE(memcg->memory.max); 1567 1568 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1569 if (mem_cgroup_swappiness(memcg)) 1570 max += min(READ_ONCE(memcg->swap.max), 1571 (unsigned long)total_swap_pages); 1572 } else { /* v1 */ 1573 if (mem_cgroup_swappiness(memcg)) { 1574 /* Calculate swap excess capacity from memsw limit */ 1575 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1576 1577 max += min(swap, (unsigned long)total_swap_pages); 1578 } 1579 } 1580 return max; 1581 } 1582 1583 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1584 { 1585 return page_counter_read(&memcg->memory); 1586 } 1587 1588 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1589 int order) 1590 { 1591 struct oom_control oc = { 1592 .zonelist = NULL, 1593 .nodemask = NULL, 1594 .memcg = memcg, 1595 .gfp_mask = gfp_mask, 1596 .order = order, 1597 }; 1598 bool ret = true; 1599 1600 if (mutex_lock_killable(&oom_lock)) 1601 return true; 1602 1603 if (mem_cgroup_margin(memcg) >= (1 << order)) 1604 goto unlock; 1605 1606 /* 1607 * A few threads which were not waiting at mutex_lock_killable() can 1608 * fail to bail out. Therefore, check again after holding oom_lock. 1609 */ 1610 ret = should_force_charge() || out_of_memory(&oc); 1611 1612 unlock: 1613 mutex_unlock(&oom_lock); 1614 return ret; 1615 } 1616 1617 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1618 pg_data_t *pgdat, 1619 gfp_t gfp_mask, 1620 unsigned long *total_scanned) 1621 { 1622 struct mem_cgroup *victim = NULL; 1623 int total = 0; 1624 int loop = 0; 1625 unsigned long excess; 1626 unsigned long nr_scanned; 1627 struct mem_cgroup_reclaim_cookie reclaim = { 1628 .pgdat = pgdat, 1629 }; 1630 1631 excess = soft_limit_excess(root_memcg); 1632 1633 while (1) { 1634 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1635 if (!victim) { 1636 loop++; 1637 if (loop >= 2) { 1638 /* 1639 * If we have not been able to reclaim 1640 * anything, it might because there are 1641 * no reclaimable pages under this hierarchy 1642 */ 1643 if (!total) 1644 break; 1645 /* 1646 * We want to do more targeted reclaim. 1647 * excess >> 2 is not to excessive so as to 1648 * reclaim too much, nor too less that we keep 1649 * coming back to reclaim from this cgroup 1650 */ 1651 if (total >= (excess >> 2) || 1652 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1653 break; 1654 } 1655 continue; 1656 } 1657 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1658 pgdat, &nr_scanned); 1659 *total_scanned += nr_scanned; 1660 if (!soft_limit_excess(root_memcg)) 1661 break; 1662 } 1663 mem_cgroup_iter_break(root_memcg, victim); 1664 return total; 1665 } 1666 1667 #ifdef CONFIG_LOCKDEP 1668 static struct lockdep_map memcg_oom_lock_dep_map = { 1669 .name = "memcg_oom_lock", 1670 }; 1671 #endif 1672 1673 static DEFINE_SPINLOCK(memcg_oom_lock); 1674 1675 /* 1676 * Check OOM-Killer is already running under our hierarchy. 1677 * If someone is running, return false. 1678 */ 1679 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1680 { 1681 struct mem_cgroup *iter, *failed = NULL; 1682 1683 spin_lock(&memcg_oom_lock); 1684 1685 for_each_mem_cgroup_tree(iter, memcg) { 1686 if (iter->oom_lock) { 1687 /* 1688 * this subtree of our hierarchy is already locked 1689 * so we cannot give a lock. 1690 */ 1691 failed = iter; 1692 mem_cgroup_iter_break(memcg, iter); 1693 break; 1694 } else 1695 iter->oom_lock = true; 1696 } 1697 1698 if (failed) { 1699 /* 1700 * OK, we failed to lock the whole subtree so we have 1701 * to clean up what we set up to the failing subtree 1702 */ 1703 for_each_mem_cgroup_tree(iter, memcg) { 1704 if (iter == failed) { 1705 mem_cgroup_iter_break(memcg, iter); 1706 break; 1707 } 1708 iter->oom_lock = false; 1709 } 1710 } else 1711 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1712 1713 spin_unlock(&memcg_oom_lock); 1714 1715 return !failed; 1716 } 1717 1718 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1719 { 1720 struct mem_cgroup *iter; 1721 1722 spin_lock(&memcg_oom_lock); 1723 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1724 for_each_mem_cgroup_tree(iter, memcg) 1725 iter->oom_lock = false; 1726 spin_unlock(&memcg_oom_lock); 1727 } 1728 1729 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1730 { 1731 struct mem_cgroup *iter; 1732 1733 spin_lock(&memcg_oom_lock); 1734 for_each_mem_cgroup_tree(iter, memcg) 1735 iter->under_oom++; 1736 spin_unlock(&memcg_oom_lock); 1737 } 1738 1739 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1740 { 1741 struct mem_cgroup *iter; 1742 1743 /* 1744 * Be careful about under_oom underflows because a child memcg 1745 * could have been added after mem_cgroup_mark_under_oom. 1746 */ 1747 spin_lock(&memcg_oom_lock); 1748 for_each_mem_cgroup_tree(iter, memcg) 1749 if (iter->under_oom > 0) 1750 iter->under_oom--; 1751 spin_unlock(&memcg_oom_lock); 1752 } 1753 1754 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1755 1756 struct oom_wait_info { 1757 struct mem_cgroup *memcg; 1758 wait_queue_entry_t wait; 1759 }; 1760 1761 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1762 unsigned mode, int sync, void *arg) 1763 { 1764 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1765 struct mem_cgroup *oom_wait_memcg; 1766 struct oom_wait_info *oom_wait_info; 1767 1768 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1769 oom_wait_memcg = oom_wait_info->memcg; 1770 1771 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1772 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1773 return 0; 1774 return autoremove_wake_function(wait, mode, sync, arg); 1775 } 1776 1777 static void memcg_oom_recover(struct mem_cgroup *memcg) 1778 { 1779 /* 1780 * For the following lockless ->under_oom test, the only required 1781 * guarantee is that it must see the state asserted by an OOM when 1782 * this function is called as a result of userland actions 1783 * triggered by the notification of the OOM. This is trivially 1784 * achieved by invoking mem_cgroup_mark_under_oom() before 1785 * triggering notification. 1786 */ 1787 if (memcg && memcg->under_oom) 1788 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1789 } 1790 1791 enum oom_status { 1792 OOM_SUCCESS, 1793 OOM_FAILED, 1794 OOM_ASYNC, 1795 OOM_SKIPPED 1796 }; 1797 1798 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1799 { 1800 enum oom_status ret; 1801 bool locked; 1802 1803 if (order > PAGE_ALLOC_COSTLY_ORDER) 1804 return OOM_SKIPPED; 1805 1806 memcg_memory_event(memcg, MEMCG_OOM); 1807 1808 /* 1809 * We are in the middle of the charge context here, so we 1810 * don't want to block when potentially sitting on a callstack 1811 * that holds all kinds of filesystem and mm locks. 1812 * 1813 * cgroup1 allows disabling the OOM killer and waiting for outside 1814 * handling until the charge can succeed; remember the context and put 1815 * the task to sleep at the end of the page fault when all locks are 1816 * released. 1817 * 1818 * On the other hand, in-kernel OOM killer allows for an async victim 1819 * memory reclaim (oom_reaper) and that means that we are not solely 1820 * relying on the oom victim to make a forward progress and we can 1821 * invoke the oom killer here. 1822 * 1823 * Please note that mem_cgroup_out_of_memory might fail to find a 1824 * victim and then we have to bail out from the charge path. 1825 */ 1826 if (memcg->oom_kill_disable) { 1827 if (!current->in_user_fault) 1828 return OOM_SKIPPED; 1829 css_get(&memcg->css); 1830 current->memcg_in_oom = memcg; 1831 current->memcg_oom_gfp_mask = mask; 1832 current->memcg_oom_order = order; 1833 1834 return OOM_ASYNC; 1835 } 1836 1837 mem_cgroup_mark_under_oom(memcg); 1838 1839 locked = mem_cgroup_oom_trylock(memcg); 1840 1841 if (locked) 1842 mem_cgroup_oom_notify(memcg); 1843 1844 mem_cgroup_unmark_under_oom(memcg); 1845 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1846 ret = OOM_SUCCESS; 1847 else 1848 ret = OOM_FAILED; 1849 1850 if (locked) 1851 mem_cgroup_oom_unlock(memcg); 1852 1853 return ret; 1854 } 1855 1856 /** 1857 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1858 * @handle: actually kill/wait or just clean up the OOM state 1859 * 1860 * This has to be called at the end of a page fault if the memcg OOM 1861 * handler was enabled. 1862 * 1863 * Memcg supports userspace OOM handling where failed allocations must 1864 * sleep on a waitqueue until the userspace task resolves the 1865 * situation. Sleeping directly in the charge context with all kinds 1866 * of locks held is not a good idea, instead we remember an OOM state 1867 * in the task and mem_cgroup_oom_synchronize() has to be called at 1868 * the end of the page fault to complete the OOM handling. 1869 * 1870 * Returns %true if an ongoing memcg OOM situation was detected and 1871 * completed, %false otherwise. 1872 */ 1873 bool mem_cgroup_oom_synchronize(bool handle) 1874 { 1875 struct mem_cgroup *memcg = current->memcg_in_oom; 1876 struct oom_wait_info owait; 1877 bool locked; 1878 1879 /* OOM is global, do not handle */ 1880 if (!memcg) 1881 return false; 1882 1883 if (!handle) 1884 goto cleanup; 1885 1886 owait.memcg = memcg; 1887 owait.wait.flags = 0; 1888 owait.wait.func = memcg_oom_wake_function; 1889 owait.wait.private = current; 1890 INIT_LIST_HEAD(&owait.wait.entry); 1891 1892 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1893 mem_cgroup_mark_under_oom(memcg); 1894 1895 locked = mem_cgroup_oom_trylock(memcg); 1896 1897 if (locked) 1898 mem_cgroup_oom_notify(memcg); 1899 1900 if (locked && !memcg->oom_kill_disable) { 1901 mem_cgroup_unmark_under_oom(memcg); 1902 finish_wait(&memcg_oom_waitq, &owait.wait); 1903 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1904 current->memcg_oom_order); 1905 } else { 1906 schedule(); 1907 mem_cgroup_unmark_under_oom(memcg); 1908 finish_wait(&memcg_oom_waitq, &owait.wait); 1909 } 1910 1911 if (locked) { 1912 mem_cgroup_oom_unlock(memcg); 1913 /* 1914 * There is no guarantee that an OOM-lock contender 1915 * sees the wakeups triggered by the OOM kill 1916 * uncharges. Wake any sleepers explicitly. 1917 */ 1918 memcg_oom_recover(memcg); 1919 } 1920 cleanup: 1921 current->memcg_in_oom = NULL; 1922 css_put(&memcg->css); 1923 return true; 1924 } 1925 1926 /** 1927 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1928 * @victim: task to be killed by the OOM killer 1929 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1930 * 1931 * Returns a pointer to a memory cgroup, which has to be cleaned up 1932 * by killing all belonging OOM-killable tasks. 1933 * 1934 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1935 */ 1936 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1937 struct mem_cgroup *oom_domain) 1938 { 1939 struct mem_cgroup *oom_group = NULL; 1940 struct mem_cgroup *memcg; 1941 1942 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1943 return NULL; 1944 1945 if (!oom_domain) 1946 oom_domain = root_mem_cgroup; 1947 1948 rcu_read_lock(); 1949 1950 memcg = mem_cgroup_from_task(victim); 1951 if (memcg == root_mem_cgroup) 1952 goto out; 1953 1954 /* 1955 * If the victim task has been asynchronously moved to a different 1956 * memory cgroup, we might end up killing tasks outside oom_domain. 1957 * In this case it's better to ignore memory.group.oom. 1958 */ 1959 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1960 goto out; 1961 1962 /* 1963 * Traverse the memory cgroup hierarchy from the victim task's 1964 * cgroup up to the OOMing cgroup (or root) to find the 1965 * highest-level memory cgroup with oom.group set. 1966 */ 1967 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1968 if (memcg->oom_group) 1969 oom_group = memcg; 1970 1971 if (memcg == oom_domain) 1972 break; 1973 } 1974 1975 if (oom_group) 1976 css_get(&oom_group->css); 1977 out: 1978 rcu_read_unlock(); 1979 1980 return oom_group; 1981 } 1982 1983 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1984 { 1985 pr_info("Tasks in "); 1986 pr_cont_cgroup_path(memcg->css.cgroup); 1987 pr_cont(" are going to be killed due to memory.oom.group set\n"); 1988 } 1989 1990 /** 1991 * lock_page_memcg - lock a page and memcg binding 1992 * @page: the page 1993 * 1994 * This function protects unlocked LRU pages from being moved to 1995 * another cgroup. 1996 * 1997 * It ensures lifetime of the locked memcg. Caller is responsible 1998 * for the lifetime of the page. 1999 */ 2000 void lock_page_memcg(struct page *page) 2001 { 2002 struct page *head = compound_head(page); /* rmap on tail pages */ 2003 struct mem_cgroup *memcg; 2004 unsigned long flags; 2005 2006 /* 2007 * The RCU lock is held throughout the transaction. The fast 2008 * path can get away without acquiring the memcg->move_lock 2009 * because page moving starts with an RCU grace period. 2010 */ 2011 rcu_read_lock(); 2012 2013 if (mem_cgroup_disabled()) 2014 return; 2015 again: 2016 memcg = page_memcg(head); 2017 if (unlikely(!memcg)) 2018 return; 2019 2020 #ifdef CONFIG_PROVE_LOCKING 2021 local_irq_save(flags); 2022 might_lock(&memcg->move_lock); 2023 local_irq_restore(flags); 2024 #endif 2025 2026 if (atomic_read(&memcg->moving_account) <= 0) 2027 return; 2028 2029 spin_lock_irqsave(&memcg->move_lock, flags); 2030 if (memcg != page_memcg(head)) { 2031 spin_unlock_irqrestore(&memcg->move_lock, flags); 2032 goto again; 2033 } 2034 2035 /* 2036 * When charge migration first begins, we can have multiple 2037 * critical sections holding the fast-path RCU lock and one 2038 * holding the slowpath move_lock. Track the task who has the 2039 * move_lock for unlock_page_memcg(). 2040 */ 2041 memcg->move_lock_task = current; 2042 memcg->move_lock_flags = flags; 2043 } 2044 EXPORT_SYMBOL(lock_page_memcg); 2045 2046 static void __unlock_page_memcg(struct mem_cgroup *memcg) 2047 { 2048 if (memcg && memcg->move_lock_task == current) { 2049 unsigned long flags = memcg->move_lock_flags; 2050 2051 memcg->move_lock_task = NULL; 2052 memcg->move_lock_flags = 0; 2053 2054 spin_unlock_irqrestore(&memcg->move_lock, flags); 2055 } 2056 2057 rcu_read_unlock(); 2058 } 2059 2060 /** 2061 * unlock_page_memcg - unlock a page and memcg binding 2062 * @page: the page 2063 */ 2064 void unlock_page_memcg(struct page *page) 2065 { 2066 struct page *head = compound_head(page); 2067 2068 __unlock_page_memcg(page_memcg(head)); 2069 } 2070 EXPORT_SYMBOL(unlock_page_memcg); 2071 2072 struct obj_stock { 2073 #ifdef CONFIG_MEMCG_KMEM 2074 struct obj_cgroup *cached_objcg; 2075 struct pglist_data *cached_pgdat; 2076 unsigned int nr_bytes; 2077 int nr_slab_reclaimable_b; 2078 int nr_slab_unreclaimable_b; 2079 #else 2080 int dummy[0]; 2081 #endif 2082 }; 2083 2084 struct memcg_stock_pcp { 2085 struct mem_cgroup *cached; /* this never be root cgroup */ 2086 unsigned int nr_pages; 2087 struct obj_stock task_obj; 2088 struct obj_stock irq_obj; 2089 2090 struct work_struct work; 2091 unsigned long flags; 2092 #define FLUSHING_CACHED_CHARGE 0 2093 }; 2094 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2095 static DEFINE_MUTEX(percpu_charge_mutex); 2096 2097 #ifdef CONFIG_MEMCG_KMEM 2098 static void drain_obj_stock(struct obj_stock *stock); 2099 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2100 struct mem_cgroup *root_memcg); 2101 2102 #else 2103 static inline void drain_obj_stock(struct obj_stock *stock) 2104 { 2105 } 2106 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2107 struct mem_cgroup *root_memcg) 2108 { 2109 return false; 2110 } 2111 #endif 2112 2113 /* 2114 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2115 * sequence used in this case to access content from object stock is slow. 2116 * To optimize for user context access, there are now two object stocks for 2117 * task context and interrupt context access respectively. 2118 * 2119 * The task context object stock can be accessed by disabling preemption only 2120 * which is cheap in non-preempt kernel. The interrupt context object stock 2121 * can only be accessed after disabling interrupt. User context code can 2122 * access interrupt object stock, but not vice versa. 2123 */ 2124 static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2125 { 2126 struct memcg_stock_pcp *stock; 2127 2128 if (likely(in_task())) { 2129 *pflags = 0UL; 2130 preempt_disable(); 2131 stock = this_cpu_ptr(&memcg_stock); 2132 return &stock->task_obj; 2133 } 2134 2135 local_irq_save(*pflags); 2136 stock = this_cpu_ptr(&memcg_stock); 2137 return &stock->irq_obj; 2138 } 2139 2140 static inline void put_obj_stock(unsigned long flags) 2141 { 2142 if (likely(in_task())) 2143 preempt_enable(); 2144 else 2145 local_irq_restore(flags); 2146 } 2147 2148 /** 2149 * consume_stock: Try to consume stocked charge on this cpu. 2150 * @memcg: memcg to consume from. 2151 * @nr_pages: how many pages to charge. 2152 * 2153 * The charges will only happen if @memcg matches the current cpu's memcg 2154 * stock, and at least @nr_pages are available in that stock. Failure to 2155 * service an allocation will refill the stock. 2156 * 2157 * returns true if successful, false otherwise. 2158 */ 2159 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2160 { 2161 struct memcg_stock_pcp *stock; 2162 unsigned long flags; 2163 bool ret = false; 2164 2165 if (nr_pages > MEMCG_CHARGE_BATCH) 2166 return ret; 2167 2168 local_irq_save(flags); 2169 2170 stock = this_cpu_ptr(&memcg_stock); 2171 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2172 stock->nr_pages -= nr_pages; 2173 ret = true; 2174 } 2175 2176 local_irq_restore(flags); 2177 2178 return ret; 2179 } 2180 2181 /* 2182 * Returns stocks cached in percpu and reset cached information. 2183 */ 2184 static void drain_stock(struct memcg_stock_pcp *stock) 2185 { 2186 struct mem_cgroup *old = stock->cached; 2187 2188 if (!old) 2189 return; 2190 2191 if (stock->nr_pages) { 2192 page_counter_uncharge(&old->memory, stock->nr_pages); 2193 if (do_memsw_account()) 2194 page_counter_uncharge(&old->memsw, stock->nr_pages); 2195 stock->nr_pages = 0; 2196 } 2197 2198 css_put(&old->css); 2199 stock->cached = NULL; 2200 } 2201 2202 static void drain_local_stock(struct work_struct *dummy) 2203 { 2204 struct memcg_stock_pcp *stock; 2205 unsigned long flags; 2206 2207 /* 2208 * The only protection from memory hotplug vs. drain_stock races is 2209 * that we always operate on local CPU stock here with IRQ disabled 2210 */ 2211 local_irq_save(flags); 2212 2213 stock = this_cpu_ptr(&memcg_stock); 2214 drain_obj_stock(&stock->irq_obj); 2215 if (in_task()) 2216 drain_obj_stock(&stock->task_obj); 2217 drain_stock(stock); 2218 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2219 2220 local_irq_restore(flags); 2221 } 2222 2223 /* 2224 * Cache charges(val) to local per_cpu area. 2225 * This will be consumed by consume_stock() function, later. 2226 */ 2227 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2228 { 2229 struct memcg_stock_pcp *stock; 2230 unsigned long flags; 2231 2232 local_irq_save(flags); 2233 2234 stock = this_cpu_ptr(&memcg_stock); 2235 if (stock->cached != memcg) { /* reset if necessary */ 2236 drain_stock(stock); 2237 css_get(&memcg->css); 2238 stock->cached = memcg; 2239 } 2240 stock->nr_pages += nr_pages; 2241 2242 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2243 drain_stock(stock); 2244 2245 local_irq_restore(flags); 2246 } 2247 2248 /* 2249 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2250 * of the hierarchy under it. 2251 */ 2252 static void drain_all_stock(struct mem_cgroup *root_memcg) 2253 { 2254 int cpu, curcpu; 2255 2256 /* If someone's already draining, avoid adding running more workers. */ 2257 if (!mutex_trylock(&percpu_charge_mutex)) 2258 return; 2259 /* 2260 * Notify other cpus that system-wide "drain" is running 2261 * We do not care about races with the cpu hotplug because cpu down 2262 * as well as workers from this path always operate on the local 2263 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2264 */ 2265 curcpu = get_cpu(); 2266 for_each_online_cpu(cpu) { 2267 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2268 struct mem_cgroup *memcg; 2269 bool flush = false; 2270 2271 rcu_read_lock(); 2272 memcg = stock->cached; 2273 if (memcg && stock->nr_pages && 2274 mem_cgroup_is_descendant(memcg, root_memcg)) 2275 flush = true; 2276 if (obj_stock_flush_required(stock, root_memcg)) 2277 flush = true; 2278 rcu_read_unlock(); 2279 2280 if (flush && 2281 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2282 if (cpu == curcpu) 2283 drain_local_stock(&stock->work); 2284 else 2285 schedule_work_on(cpu, &stock->work); 2286 } 2287 } 2288 put_cpu(); 2289 mutex_unlock(&percpu_charge_mutex); 2290 } 2291 2292 static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu) 2293 { 2294 int nid; 2295 2296 for_each_node(nid) { 2297 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 2298 unsigned long stat[NR_VM_NODE_STAT_ITEMS]; 2299 struct batched_lruvec_stat *lstatc; 2300 int i; 2301 2302 lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu); 2303 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 2304 stat[i] = lstatc->count[i]; 2305 lstatc->count[i] = 0; 2306 } 2307 2308 do { 2309 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 2310 atomic_long_add(stat[i], &pn->lruvec_stat[i]); 2311 } while ((pn = parent_nodeinfo(pn, nid))); 2312 } 2313 } 2314 2315 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2316 { 2317 struct memcg_stock_pcp *stock; 2318 struct mem_cgroup *memcg; 2319 2320 stock = &per_cpu(memcg_stock, cpu); 2321 drain_stock(stock); 2322 2323 for_each_mem_cgroup(memcg) 2324 memcg_flush_lruvec_page_state(memcg, cpu); 2325 2326 return 0; 2327 } 2328 2329 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2330 unsigned int nr_pages, 2331 gfp_t gfp_mask) 2332 { 2333 unsigned long nr_reclaimed = 0; 2334 2335 do { 2336 unsigned long pflags; 2337 2338 if (page_counter_read(&memcg->memory) <= 2339 READ_ONCE(memcg->memory.high)) 2340 continue; 2341 2342 memcg_memory_event(memcg, MEMCG_HIGH); 2343 2344 psi_memstall_enter(&pflags); 2345 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2346 gfp_mask, true); 2347 psi_memstall_leave(&pflags); 2348 } while ((memcg = parent_mem_cgroup(memcg)) && 2349 !mem_cgroup_is_root(memcg)); 2350 2351 return nr_reclaimed; 2352 } 2353 2354 static void high_work_func(struct work_struct *work) 2355 { 2356 struct mem_cgroup *memcg; 2357 2358 memcg = container_of(work, struct mem_cgroup, high_work); 2359 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2360 } 2361 2362 /* 2363 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2364 * enough to still cause a significant slowdown in most cases, while still 2365 * allowing diagnostics and tracing to proceed without becoming stuck. 2366 */ 2367 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2368 2369 /* 2370 * When calculating the delay, we use these either side of the exponentiation to 2371 * maintain precision and scale to a reasonable number of jiffies (see the table 2372 * below. 2373 * 2374 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2375 * overage ratio to a delay. 2376 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2377 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2378 * to produce a reasonable delay curve. 2379 * 2380 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2381 * reasonable delay curve compared to precision-adjusted overage, not 2382 * penalising heavily at first, but still making sure that growth beyond the 2383 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2384 * example, with a high of 100 megabytes: 2385 * 2386 * +-------+------------------------+ 2387 * | usage | time to allocate in ms | 2388 * +-------+------------------------+ 2389 * | 100M | 0 | 2390 * | 101M | 6 | 2391 * | 102M | 25 | 2392 * | 103M | 57 | 2393 * | 104M | 102 | 2394 * | 105M | 159 | 2395 * | 106M | 230 | 2396 * | 107M | 313 | 2397 * | 108M | 409 | 2398 * | 109M | 518 | 2399 * | 110M | 639 | 2400 * | 111M | 774 | 2401 * | 112M | 921 | 2402 * | 113M | 1081 | 2403 * | 114M | 1254 | 2404 * | 115M | 1439 | 2405 * | 116M | 1638 | 2406 * | 117M | 1849 | 2407 * | 118M | 2000 | 2408 * | 119M | 2000 | 2409 * | 120M | 2000 | 2410 * +-------+------------------------+ 2411 */ 2412 #define MEMCG_DELAY_PRECISION_SHIFT 20 2413 #define MEMCG_DELAY_SCALING_SHIFT 14 2414 2415 static u64 calculate_overage(unsigned long usage, unsigned long high) 2416 { 2417 u64 overage; 2418 2419 if (usage <= high) 2420 return 0; 2421 2422 /* 2423 * Prevent division by 0 in overage calculation by acting as if 2424 * it was a threshold of 1 page 2425 */ 2426 high = max(high, 1UL); 2427 2428 overage = usage - high; 2429 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2430 return div64_u64(overage, high); 2431 } 2432 2433 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2434 { 2435 u64 overage, max_overage = 0; 2436 2437 do { 2438 overage = calculate_overage(page_counter_read(&memcg->memory), 2439 READ_ONCE(memcg->memory.high)); 2440 max_overage = max(overage, max_overage); 2441 } while ((memcg = parent_mem_cgroup(memcg)) && 2442 !mem_cgroup_is_root(memcg)); 2443 2444 return max_overage; 2445 } 2446 2447 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2448 { 2449 u64 overage, max_overage = 0; 2450 2451 do { 2452 overage = calculate_overage(page_counter_read(&memcg->swap), 2453 READ_ONCE(memcg->swap.high)); 2454 if (overage) 2455 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2456 max_overage = max(overage, max_overage); 2457 } while ((memcg = parent_mem_cgroup(memcg)) && 2458 !mem_cgroup_is_root(memcg)); 2459 2460 return max_overage; 2461 } 2462 2463 /* 2464 * Get the number of jiffies that we should penalise a mischievous cgroup which 2465 * is exceeding its memory.high by checking both it and its ancestors. 2466 */ 2467 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2468 unsigned int nr_pages, 2469 u64 max_overage) 2470 { 2471 unsigned long penalty_jiffies; 2472 2473 if (!max_overage) 2474 return 0; 2475 2476 /* 2477 * We use overage compared to memory.high to calculate the number of 2478 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2479 * fairly lenient on small overages, and increasingly harsh when the 2480 * memcg in question makes it clear that it has no intention of stopping 2481 * its crazy behaviour, so we exponentially increase the delay based on 2482 * overage amount. 2483 */ 2484 penalty_jiffies = max_overage * max_overage * HZ; 2485 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2486 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2487 2488 /* 2489 * Factor in the task's own contribution to the overage, such that four 2490 * N-sized allocations are throttled approximately the same as one 2491 * 4N-sized allocation. 2492 * 2493 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2494 * larger the current charge patch is than that. 2495 */ 2496 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2497 } 2498 2499 /* 2500 * Scheduled by try_charge() to be executed from the userland return path 2501 * and reclaims memory over the high limit. 2502 */ 2503 void mem_cgroup_handle_over_high(void) 2504 { 2505 unsigned long penalty_jiffies; 2506 unsigned long pflags; 2507 unsigned long nr_reclaimed; 2508 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2509 int nr_retries = MAX_RECLAIM_RETRIES; 2510 struct mem_cgroup *memcg; 2511 bool in_retry = false; 2512 2513 if (likely(!nr_pages)) 2514 return; 2515 2516 memcg = get_mem_cgroup_from_mm(current->mm); 2517 current->memcg_nr_pages_over_high = 0; 2518 2519 retry_reclaim: 2520 /* 2521 * The allocating task should reclaim at least the batch size, but for 2522 * subsequent retries we only want to do what's necessary to prevent oom 2523 * or breaching resource isolation. 2524 * 2525 * This is distinct from memory.max or page allocator behaviour because 2526 * memory.high is currently batched, whereas memory.max and the page 2527 * allocator run every time an allocation is made. 2528 */ 2529 nr_reclaimed = reclaim_high(memcg, 2530 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2531 GFP_KERNEL); 2532 2533 /* 2534 * memory.high is breached and reclaim is unable to keep up. Throttle 2535 * allocators proactively to slow down excessive growth. 2536 */ 2537 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2538 mem_find_max_overage(memcg)); 2539 2540 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2541 swap_find_max_overage(memcg)); 2542 2543 /* 2544 * Clamp the max delay per usermode return so as to still keep the 2545 * application moving forwards and also permit diagnostics, albeit 2546 * extremely slowly. 2547 */ 2548 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2549 2550 /* 2551 * Don't sleep if the amount of jiffies this memcg owes us is so low 2552 * that it's not even worth doing, in an attempt to be nice to those who 2553 * go only a small amount over their memory.high value and maybe haven't 2554 * been aggressively reclaimed enough yet. 2555 */ 2556 if (penalty_jiffies <= HZ / 100) 2557 goto out; 2558 2559 /* 2560 * If reclaim is making forward progress but we're still over 2561 * memory.high, we want to encourage that rather than doing allocator 2562 * throttling. 2563 */ 2564 if (nr_reclaimed || nr_retries--) { 2565 in_retry = true; 2566 goto retry_reclaim; 2567 } 2568 2569 /* 2570 * If we exit early, we're guaranteed to die (since 2571 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2572 * need to account for any ill-begotten jiffies to pay them off later. 2573 */ 2574 psi_memstall_enter(&pflags); 2575 schedule_timeout_killable(penalty_jiffies); 2576 psi_memstall_leave(&pflags); 2577 2578 out: 2579 css_put(&memcg->css); 2580 } 2581 2582 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2583 unsigned int nr_pages) 2584 { 2585 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2586 int nr_retries = MAX_RECLAIM_RETRIES; 2587 struct mem_cgroup *mem_over_limit; 2588 struct page_counter *counter; 2589 enum oom_status oom_status; 2590 unsigned long nr_reclaimed; 2591 bool may_swap = true; 2592 bool drained = false; 2593 unsigned long pflags; 2594 2595 retry: 2596 if (consume_stock(memcg, nr_pages)) 2597 return 0; 2598 2599 if (!do_memsw_account() || 2600 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2601 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2602 goto done_restock; 2603 if (do_memsw_account()) 2604 page_counter_uncharge(&memcg->memsw, batch); 2605 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2606 } else { 2607 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2608 may_swap = false; 2609 } 2610 2611 if (batch > nr_pages) { 2612 batch = nr_pages; 2613 goto retry; 2614 } 2615 2616 /* 2617 * Memcg doesn't have a dedicated reserve for atomic 2618 * allocations. But like the global atomic pool, we need to 2619 * put the burden of reclaim on regular allocation requests 2620 * and let these go through as privileged allocations. 2621 */ 2622 if (gfp_mask & __GFP_ATOMIC) 2623 goto force; 2624 2625 /* 2626 * Unlike in global OOM situations, memcg is not in a physical 2627 * memory shortage. Allow dying and OOM-killed tasks to 2628 * bypass the last charges so that they can exit quickly and 2629 * free their memory. 2630 */ 2631 if (unlikely(should_force_charge())) 2632 goto force; 2633 2634 /* 2635 * Prevent unbounded recursion when reclaim operations need to 2636 * allocate memory. This might exceed the limits temporarily, 2637 * but we prefer facilitating memory reclaim and getting back 2638 * under the limit over triggering OOM kills in these cases. 2639 */ 2640 if (unlikely(current->flags & PF_MEMALLOC)) 2641 goto force; 2642 2643 if (unlikely(task_in_memcg_oom(current))) 2644 goto nomem; 2645 2646 if (!gfpflags_allow_blocking(gfp_mask)) 2647 goto nomem; 2648 2649 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2650 2651 psi_memstall_enter(&pflags); 2652 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2653 gfp_mask, may_swap); 2654 psi_memstall_leave(&pflags); 2655 2656 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2657 goto retry; 2658 2659 if (!drained) { 2660 drain_all_stock(mem_over_limit); 2661 drained = true; 2662 goto retry; 2663 } 2664 2665 if (gfp_mask & __GFP_NORETRY) 2666 goto nomem; 2667 /* 2668 * Even though the limit is exceeded at this point, reclaim 2669 * may have been able to free some pages. Retry the charge 2670 * before killing the task. 2671 * 2672 * Only for regular pages, though: huge pages are rather 2673 * unlikely to succeed so close to the limit, and we fall back 2674 * to regular pages anyway in case of failure. 2675 */ 2676 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2677 goto retry; 2678 /* 2679 * At task move, charge accounts can be doubly counted. So, it's 2680 * better to wait until the end of task_move if something is going on. 2681 */ 2682 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2683 goto retry; 2684 2685 if (nr_retries--) 2686 goto retry; 2687 2688 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2689 goto nomem; 2690 2691 if (fatal_signal_pending(current)) 2692 goto force; 2693 2694 /* 2695 * keep retrying as long as the memcg oom killer is able to make 2696 * a forward progress or bypass the charge if the oom killer 2697 * couldn't make any progress. 2698 */ 2699 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2700 get_order(nr_pages * PAGE_SIZE)); 2701 switch (oom_status) { 2702 case OOM_SUCCESS: 2703 nr_retries = MAX_RECLAIM_RETRIES; 2704 goto retry; 2705 case OOM_FAILED: 2706 goto force; 2707 default: 2708 goto nomem; 2709 } 2710 nomem: 2711 if (!(gfp_mask & __GFP_NOFAIL)) 2712 return -ENOMEM; 2713 force: 2714 /* 2715 * The allocation either can't fail or will lead to more memory 2716 * being freed very soon. Allow memory usage go over the limit 2717 * temporarily by force charging it. 2718 */ 2719 page_counter_charge(&memcg->memory, nr_pages); 2720 if (do_memsw_account()) 2721 page_counter_charge(&memcg->memsw, nr_pages); 2722 2723 return 0; 2724 2725 done_restock: 2726 if (batch > nr_pages) 2727 refill_stock(memcg, batch - nr_pages); 2728 2729 /* 2730 * If the hierarchy is above the normal consumption range, schedule 2731 * reclaim on returning to userland. We can perform reclaim here 2732 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2733 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2734 * not recorded as it most likely matches current's and won't 2735 * change in the meantime. As high limit is checked again before 2736 * reclaim, the cost of mismatch is negligible. 2737 */ 2738 do { 2739 bool mem_high, swap_high; 2740 2741 mem_high = page_counter_read(&memcg->memory) > 2742 READ_ONCE(memcg->memory.high); 2743 swap_high = page_counter_read(&memcg->swap) > 2744 READ_ONCE(memcg->swap.high); 2745 2746 /* Don't bother a random interrupted task */ 2747 if (in_interrupt()) { 2748 if (mem_high) { 2749 schedule_work(&memcg->high_work); 2750 break; 2751 } 2752 continue; 2753 } 2754 2755 if (mem_high || swap_high) { 2756 /* 2757 * The allocating tasks in this cgroup will need to do 2758 * reclaim or be throttled to prevent further growth 2759 * of the memory or swap footprints. 2760 * 2761 * Target some best-effort fairness between the tasks, 2762 * and distribute reclaim work and delay penalties 2763 * based on how much each task is actually allocating. 2764 */ 2765 current->memcg_nr_pages_over_high += batch; 2766 set_notify_resume(current); 2767 break; 2768 } 2769 } while ((memcg = parent_mem_cgroup(memcg))); 2770 2771 return 0; 2772 } 2773 2774 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2775 unsigned int nr_pages) 2776 { 2777 if (mem_cgroup_is_root(memcg)) 2778 return 0; 2779 2780 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2781 } 2782 2783 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2784 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2785 { 2786 if (mem_cgroup_is_root(memcg)) 2787 return; 2788 2789 page_counter_uncharge(&memcg->memory, nr_pages); 2790 if (do_memsw_account()) 2791 page_counter_uncharge(&memcg->memsw, nr_pages); 2792 } 2793 #endif 2794 2795 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2796 { 2797 VM_BUG_ON_PAGE(page_memcg(page), page); 2798 /* 2799 * Any of the following ensures page's memcg stability: 2800 * 2801 * - the page lock 2802 * - LRU isolation 2803 * - lock_page_memcg() 2804 * - exclusive reference 2805 */ 2806 page->memcg_data = (unsigned long)memcg; 2807 } 2808 2809 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 2810 { 2811 struct mem_cgroup *memcg; 2812 2813 rcu_read_lock(); 2814 retry: 2815 memcg = obj_cgroup_memcg(objcg); 2816 if (unlikely(!css_tryget(&memcg->css))) 2817 goto retry; 2818 rcu_read_unlock(); 2819 2820 return memcg; 2821 } 2822 2823 #ifdef CONFIG_MEMCG_KMEM 2824 /* 2825 * The allocated objcg pointers array is not accounted directly. 2826 * Moreover, it should not come from DMA buffer and is not readily 2827 * reclaimable. So those GFP bits should be masked off. 2828 */ 2829 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2830 2831 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2832 gfp_t gfp, bool new_page) 2833 { 2834 unsigned int objects = objs_per_slab_page(s, page); 2835 unsigned long memcg_data; 2836 void *vec; 2837 2838 gfp &= ~OBJCGS_CLEAR_MASK; 2839 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2840 page_to_nid(page)); 2841 if (!vec) 2842 return -ENOMEM; 2843 2844 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2845 if (new_page) { 2846 /* 2847 * If the slab page is brand new and nobody can yet access 2848 * it's memcg_data, no synchronization is required and 2849 * memcg_data can be simply assigned. 2850 */ 2851 page->memcg_data = memcg_data; 2852 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { 2853 /* 2854 * If the slab page is already in use, somebody can allocate 2855 * and assign obj_cgroups in parallel. In this case the existing 2856 * objcg vector should be reused. 2857 */ 2858 kfree(vec); 2859 return 0; 2860 } 2861 2862 kmemleak_not_leak(vec); 2863 return 0; 2864 } 2865 2866 /* 2867 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2868 * 2869 * A passed kernel object can be a slab object or a generic kernel page, so 2870 * different mechanisms for getting the memory cgroup pointer should be used. 2871 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2872 * can not know for sure how the kernel object is implemented. 2873 * mem_cgroup_from_obj() can be safely used in such cases. 2874 * 2875 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2876 * cgroup_mutex, etc. 2877 */ 2878 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2879 { 2880 struct page *page; 2881 2882 if (mem_cgroup_disabled()) 2883 return NULL; 2884 2885 page = virt_to_head_page(p); 2886 2887 /* 2888 * Slab objects are accounted individually, not per-page. 2889 * Memcg membership data for each individual object is saved in 2890 * the page->obj_cgroups. 2891 */ 2892 if (page_objcgs_check(page)) { 2893 struct obj_cgroup *objcg; 2894 unsigned int off; 2895 2896 off = obj_to_index(page->slab_cache, page, p); 2897 objcg = page_objcgs(page)[off]; 2898 if (objcg) 2899 return obj_cgroup_memcg(objcg); 2900 2901 return NULL; 2902 } 2903 2904 /* 2905 * page_memcg_check() is used here, because page_has_obj_cgroups() 2906 * check above could fail because the object cgroups vector wasn't set 2907 * at that moment, but it can be set concurrently. 2908 * page_memcg_check(page) will guarantee that a proper memory 2909 * cgroup pointer or NULL will be returned. 2910 */ 2911 return page_memcg_check(page); 2912 } 2913 2914 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2915 { 2916 struct obj_cgroup *objcg = NULL; 2917 struct mem_cgroup *memcg; 2918 2919 if (memcg_kmem_bypass()) 2920 return NULL; 2921 2922 rcu_read_lock(); 2923 if (unlikely(active_memcg())) 2924 memcg = active_memcg(); 2925 else 2926 memcg = mem_cgroup_from_task(current); 2927 2928 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2929 objcg = rcu_dereference(memcg->objcg); 2930 if (objcg && obj_cgroup_tryget(objcg)) 2931 break; 2932 objcg = NULL; 2933 } 2934 rcu_read_unlock(); 2935 2936 return objcg; 2937 } 2938 2939 static int memcg_alloc_cache_id(void) 2940 { 2941 int id, size; 2942 int err; 2943 2944 id = ida_simple_get(&memcg_cache_ida, 2945 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2946 if (id < 0) 2947 return id; 2948 2949 if (id < memcg_nr_cache_ids) 2950 return id; 2951 2952 /* 2953 * There's no space for the new id in memcg_caches arrays, 2954 * so we have to grow them. 2955 */ 2956 down_write(&memcg_cache_ids_sem); 2957 2958 size = 2 * (id + 1); 2959 if (size < MEMCG_CACHES_MIN_SIZE) 2960 size = MEMCG_CACHES_MIN_SIZE; 2961 else if (size > MEMCG_CACHES_MAX_SIZE) 2962 size = MEMCG_CACHES_MAX_SIZE; 2963 2964 err = memcg_update_all_list_lrus(size); 2965 if (!err) 2966 memcg_nr_cache_ids = size; 2967 2968 up_write(&memcg_cache_ids_sem); 2969 2970 if (err) { 2971 ida_simple_remove(&memcg_cache_ida, id); 2972 return err; 2973 } 2974 return id; 2975 } 2976 2977 static void memcg_free_cache_id(int id) 2978 { 2979 ida_simple_remove(&memcg_cache_ida, id); 2980 } 2981 2982 /* 2983 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2984 * @objcg: object cgroup to uncharge 2985 * @nr_pages: number of pages to uncharge 2986 */ 2987 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2988 unsigned int nr_pages) 2989 { 2990 struct mem_cgroup *memcg; 2991 2992 memcg = get_mem_cgroup_from_objcg(objcg); 2993 2994 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2995 page_counter_uncharge(&memcg->kmem, nr_pages); 2996 refill_stock(memcg, nr_pages); 2997 2998 css_put(&memcg->css); 2999 } 3000 3001 /* 3002 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 3003 * @objcg: object cgroup to charge 3004 * @gfp: reclaim mode 3005 * @nr_pages: number of pages to charge 3006 * 3007 * Returns 0 on success, an error code on failure. 3008 */ 3009 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3010 unsigned int nr_pages) 3011 { 3012 struct page_counter *counter; 3013 struct mem_cgroup *memcg; 3014 int ret; 3015 3016 memcg = get_mem_cgroup_from_objcg(objcg); 3017 3018 ret = try_charge_memcg(memcg, gfp, nr_pages); 3019 if (ret) 3020 goto out; 3021 3022 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3023 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3024 3025 /* 3026 * Enforce __GFP_NOFAIL allocation because callers are not 3027 * prepared to see failures and likely do not have any failure 3028 * handling code. 3029 */ 3030 if (gfp & __GFP_NOFAIL) { 3031 page_counter_charge(&memcg->kmem, nr_pages); 3032 goto out; 3033 } 3034 cancel_charge(memcg, nr_pages); 3035 ret = -ENOMEM; 3036 } 3037 out: 3038 css_put(&memcg->css); 3039 3040 return ret; 3041 } 3042 3043 /** 3044 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3045 * @page: page to charge 3046 * @gfp: reclaim mode 3047 * @order: allocation order 3048 * 3049 * Returns 0 on success, an error code on failure. 3050 */ 3051 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3052 { 3053 struct obj_cgroup *objcg; 3054 int ret = 0; 3055 3056 objcg = get_obj_cgroup_from_current(); 3057 if (objcg) { 3058 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3059 if (!ret) { 3060 page->memcg_data = (unsigned long)objcg | 3061 MEMCG_DATA_KMEM; 3062 return 0; 3063 } 3064 obj_cgroup_put(objcg); 3065 } 3066 return ret; 3067 } 3068 3069 /** 3070 * __memcg_kmem_uncharge_page: uncharge a kmem page 3071 * @page: page to uncharge 3072 * @order: allocation order 3073 */ 3074 void __memcg_kmem_uncharge_page(struct page *page, int order) 3075 { 3076 struct obj_cgroup *objcg; 3077 unsigned int nr_pages = 1 << order; 3078 3079 if (!PageMemcgKmem(page)) 3080 return; 3081 3082 objcg = __page_objcg(page); 3083 obj_cgroup_uncharge_pages(objcg, nr_pages); 3084 page->memcg_data = 0; 3085 obj_cgroup_put(objcg); 3086 } 3087 3088 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3089 enum node_stat_item idx, int nr) 3090 { 3091 unsigned long flags; 3092 struct obj_stock *stock = get_obj_stock(&flags); 3093 int *bytes; 3094 3095 /* 3096 * Save vmstat data in stock and skip vmstat array update unless 3097 * accumulating over a page of vmstat data or when pgdat or idx 3098 * changes. 3099 */ 3100 if (stock->cached_objcg != objcg) { 3101 drain_obj_stock(stock); 3102 obj_cgroup_get(objcg); 3103 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3104 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3105 stock->cached_objcg = objcg; 3106 stock->cached_pgdat = pgdat; 3107 } else if (stock->cached_pgdat != pgdat) { 3108 /* Flush the existing cached vmstat data */ 3109 if (stock->nr_slab_reclaimable_b) { 3110 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B, 3111 stock->nr_slab_reclaimable_b); 3112 stock->nr_slab_reclaimable_b = 0; 3113 } 3114 if (stock->nr_slab_unreclaimable_b) { 3115 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B, 3116 stock->nr_slab_unreclaimable_b); 3117 stock->nr_slab_unreclaimable_b = 0; 3118 } 3119 stock->cached_pgdat = pgdat; 3120 } 3121 3122 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3123 : &stock->nr_slab_unreclaimable_b; 3124 /* 3125 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3126 * cached locally at least once before pushing it out. 3127 */ 3128 if (!*bytes) { 3129 *bytes = nr; 3130 nr = 0; 3131 } else { 3132 *bytes += nr; 3133 if (abs(*bytes) > PAGE_SIZE) { 3134 nr = *bytes; 3135 *bytes = 0; 3136 } else { 3137 nr = 0; 3138 } 3139 } 3140 if (nr) 3141 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3142 3143 put_obj_stock(flags); 3144 } 3145 3146 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3147 { 3148 unsigned long flags; 3149 struct obj_stock *stock = get_obj_stock(&flags); 3150 bool ret = false; 3151 3152 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3153 stock->nr_bytes -= nr_bytes; 3154 ret = true; 3155 } 3156 3157 put_obj_stock(flags); 3158 3159 return ret; 3160 } 3161 3162 static void drain_obj_stock(struct obj_stock *stock) 3163 { 3164 struct obj_cgroup *old = stock->cached_objcg; 3165 3166 if (!old) 3167 return; 3168 3169 if (stock->nr_bytes) { 3170 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3171 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3172 3173 if (nr_pages) 3174 obj_cgroup_uncharge_pages(old, nr_pages); 3175 3176 /* 3177 * The leftover is flushed to the centralized per-memcg value. 3178 * On the next attempt to refill obj stock it will be moved 3179 * to a per-cpu stock (probably, on an other CPU), see 3180 * refill_obj_stock(). 3181 * 3182 * How often it's flushed is a trade-off between the memory 3183 * limit enforcement accuracy and potential CPU contention, 3184 * so it might be changed in the future. 3185 */ 3186 atomic_add(nr_bytes, &old->nr_charged_bytes); 3187 stock->nr_bytes = 0; 3188 } 3189 3190 /* 3191 * Flush the vmstat data in current stock 3192 */ 3193 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3194 if (stock->nr_slab_reclaimable_b) { 3195 mod_objcg_mlstate(old, stock->cached_pgdat, 3196 NR_SLAB_RECLAIMABLE_B, 3197 stock->nr_slab_reclaimable_b); 3198 stock->nr_slab_reclaimable_b = 0; 3199 } 3200 if (stock->nr_slab_unreclaimable_b) { 3201 mod_objcg_mlstate(old, stock->cached_pgdat, 3202 NR_SLAB_UNRECLAIMABLE_B, 3203 stock->nr_slab_unreclaimable_b); 3204 stock->nr_slab_unreclaimable_b = 0; 3205 } 3206 stock->cached_pgdat = NULL; 3207 } 3208 3209 obj_cgroup_put(old); 3210 stock->cached_objcg = NULL; 3211 } 3212 3213 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3214 struct mem_cgroup *root_memcg) 3215 { 3216 struct mem_cgroup *memcg; 3217 3218 if (in_task() && stock->task_obj.cached_objcg) { 3219 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); 3220 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3221 return true; 3222 } 3223 if (stock->irq_obj.cached_objcg) { 3224 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); 3225 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3226 return true; 3227 } 3228 3229 return false; 3230 } 3231 3232 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3233 bool allow_uncharge) 3234 { 3235 unsigned long flags; 3236 struct obj_stock *stock = get_obj_stock(&flags); 3237 unsigned int nr_pages = 0; 3238 3239 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3240 drain_obj_stock(stock); 3241 obj_cgroup_get(objcg); 3242 stock->cached_objcg = objcg; 3243 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3244 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3245 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3246 } 3247 stock->nr_bytes += nr_bytes; 3248 3249 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3250 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3251 stock->nr_bytes &= (PAGE_SIZE - 1); 3252 } 3253 3254 put_obj_stock(flags); 3255 3256 if (nr_pages) 3257 obj_cgroup_uncharge_pages(objcg, nr_pages); 3258 } 3259 3260 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3261 { 3262 unsigned int nr_pages, nr_bytes; 3263 int ret; 3264 3265 if (consume_obj_stock(objcg, size)) 3266 return 0; 3267 3268 /* 3269 * In theory, objcg->nr_charged_bytes can have enough 3270 * pre-charged bytes to satisfy the allocation. However, 3271 * flushing objcg->nr_charged_bytes requires two atomic 3272 * operations, and objcg->nr_charged_bytes can't be big. 3273 * The shared objcg->nr_charged_bytes can also become a 3274 * performance bottleneck if all tasks of the same memcg are 3275 * trying to update it. So it's better to ignore it and try 3276 * grab some new pages. The stock's nr_bytes will be flushed to 3277 * objcg->nr_charged_bytes later on when objcg changes. 3278 * 3279 * The stock's nr_bytes may contain enough pre-charged bytes 3280 * to allow one less page from being charged, but we can't rely 3281 * on the pre-charged bytes not being changed outside of 3282 * consume_obj_stock() or refill_obj_stock(). So ignore those 3283 * pre-charged bytes as well when charging pages. To avoid a 3284 * page uncharge right after a page charge, we set the 3285 * allow_uncharge flag to false when calling refill_obj_stock() 3286 * to temporarily allow the pre-charged bytes to exceed the page 3287 * size limit. The maximum reachable value of the pre-charged 3288 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3289 * race. 3290 */ 3291 nr_pages = size >> PAGE_SHIFT; 3292 nr_bytes = size & (PAGE_SIZE - 1); 3293 3294 if (nr_bytes) 3295 nr_pages += 1; 3296 3297 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3298 if (!ret && nr_bytes) 3299 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3300 3301 return ret; 3302 } 3303 3304 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3305 { 3306 refill_obj_stock(objcg, size, true); 3307 } 3308 3309 #endif /* CONFIG_MEMCG_KMEM */ 3310 3311 /* 3312 * Because page_memcg(head) is not set on tails, set it now. 3313 */ 3314 void split_page_memcg(struct page *head, unsigned int nr) 3315 { 3316 struct mem_cgroup *memcg = page_memcg(head); 3317 int i; 3318 3319 if (mem_cgroup_disabled() || !memcg) 3320 return; 3321 3322 for (i = 1; i < nr; i++) 3323 head[i].memcg_data = head->memcg_data; 3324 3325 if (PageMemcgKmem(head)) 3326 obj_cgroup_get_many(__page_objcg(head), nr - 1); 3327 else 3328 css_get_many(&memcg->css, nr - 1); 3329 } 3330 3331 #ifdef CONFIG_MEMCG_SWAP 3332 /** 3333 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3334 * @entry: swap entry to be moved 3335 * @from: mem_cgroup which the entry is moved from 3336 * @to: mem_cgroup which the entry is moved to 3337 * 3338 * It succeeds only when the swap_cgroup's record for this entry is the same 3339 * as the mem_cgroup's id of @from. 3340 * 3341 * Returns 0 on success, -EINVAL on failure. 3342 * 3343 * The caller must have charged to @to, IOW, called page_counter_charge() about 3344 * both res and memsw, and called css_get(). 3345 */ 3346 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3347 struct mem_cgroup *from, struct mem_cgroup *to) 3348 { 3349 unsigned short old_id, new_id; 3350 3351 old_id = mem_cgroup_id(from); 3352 new_id = mem_cgroup_id(to); 3353 3354 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3355 mod_memcg_state(from, MEMCG_SWAP, -1); 3356 mod_memcg_state(to, MEMCG_SWAP, 1); 3357 return 0; 3358 } 3359 return -EINVAL; 3360 } 3361 #else 3362 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3363 struct mem_cgroup *from, struct mem_cgroup *to) 3364 { 3365 return -EINVAL; 3366 } 3367 #endif 3368 3369 static DEFINE_MUTEX(memcg_max_mutex); 3370 3371 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3372 unsigned long max, bool memsw) 3373 { 3374 bool enlarge = false; 3375 bool drained = false; 3376 int ret; 3377 bool limits_invariant; 3378 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3379 3380 do { 3381 if (signal_pending(current)) { 3382 ret = -EINTR; 3383 break; 3384 } 3385 3386 mutex_lock(&memcg_max_mutex); 3387 /* 3388 * Make sure that the new limit (memsw or memory limit) doesn't 3389 * break our basic invariant rule memory.max <= memsw.max. 3390 */ 3391 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3392 max <= memcg->memsw.max; 3393 if (!limits_invariant) { 3394 mutex_unlock(&memcg_max_mutex); 3395 ret = -EINVAL; 3396 break; 3397 } 3398 if (max > counter->max) 3399 enlarge = true; 3400 ret = page_counter_set_max(counter, max); 3401 mutex_unlock(&memcg_max_mutex); 3402 3403 if (!ret) 3404 break; 3405 3406 if (!drained) { 3407 drain_all_stock(memcg); 3408 drained = true; 3409 continue; 3410 } 3411 3412 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3413 GFP_KERNEL, !memsw)) { 3414 ret = -EBUSY; 3415 break; 3416 } 3417 } while (true); 3418 3419 if (!ret && enlarge) 3420 memcg_oom_recover(memcg); 3421 3422 return ret; 3423 } 3424 3425 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3426 gfp_t gfp_mask, 3427 unsigned long *total_scanned) 3428 { 3429 unsigned long nr_reclaimed = 0; 3430 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3431 unsigned long reclaimed; 3432 int loop = 0; 3433 struct mem_cgroup_tree_per_node *mctz; 3434 unsigned long excess; 3435 unsigned long nr_scanned; 3436 3437 if (order > 0) 3438 return 0; 3439 3440 mctz = soft_limit_tree_node(pgdat->node_id); 3441 3442 /* 3443 * Do not even bother to check the largest node if the root 3444 * is empty. Do it lockless to prevent lock bouncing. Races 3445 * are acceptable as soft limit is best effort anyway. 3446 */ 3447 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3448 return 0; 3449 3450 /* 3451 * This loop can run a while, specially if mem_cgroup's continuously 3452 * keep exceeding their soft limit and putting the system under 3453 * pressure 3454 */ 3455 do { 3456 if (next_mz) 3457 mz = next_mz; 3458 else 3459 mz = mem_cgroup_largest_soft_limit_node(mctz); 3460 if (!mz) 3461 break; 3462 3463 nr_scanned = 0; 3464 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3465 gfp_mask, &nr_scanned); 3466 nr_reclaimed += reclaimed; 3467 *total_scanned += nr_scanned; 3468 spin_lock_irq(&mctz->lock); 3469 __mem_cgroup_remove_exceeded(mz, mctz); 3470 3471 /* 3472 * If we failed to reclaim anything from this memory cgroup 3473 * it is time to move on to the next cgroup 3474 */ 3475 next_mz = NULL; 3476 if (!reclaimed) 3477 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3478 3479 excess = soft_limit_excess(mz->memcg); 3480 /* 3481 * One school of thought says that we should not add 3482 * back the node to the tree if reclaim returns 0. 3483 * But our reclaim could return 0, simply because due 3484 * to priority we are exposing a smaller subset of 3485 * memory to reclaim from. Consider this as a longer 3486 * term TODO. 3487 */ 3488 /* If excess == 0, no tree ops */ 3489 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3490 spin_unlock_irq(&mctz->lock); 3491 css_put(&mz->memcg->css); 3492 loop++; 3493 /* 3494 * Could not reclaim anything and there are no more 3495 * mem cgroups to try or we seem to be looping without 3496 * reclaiming anything. 3497 */ 3498 if (!nr_reclaimed && 3499 (next_mz == NULL || 3500 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3501 break; 3502 } while (!nr_reclaimed); 3503 if (next_mz) 3504 css_put(&next_mz->memcg->css); 3505 return nr_reclaimed; 3506 } 3507 3508 /* 3509 * Reclaims as many pages from the given memcg as possible. 3510 * 3511 * Caller is responsible for holding css reference for memcg. 3512 */ 3513 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3514 { 3515 int nr_retries = MAX_RECLAIM_RETRIES; 3516 3517 /* we call try-to-free pages for make this cgroup empty */ 3518 lru_add_drain_all(); 3519 3520 drain_all_stock(memcg); 3521 3522 /* try to free all pages in this cgroup */ 3523 while (nr_retries && page_counter_read(&memcg->memory)) { 3524 int progress; 3525 3526 if (signal_pending(current)) 3527 return -EINTR; 3528 3529 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3530 GFP_KERNEL, true); 3531 if (!progress) { 3532 nr_retries--; 3533 /* maybe some writeback is necessary */ 3534 congestion_wait(BLK_RW_ASYNC, HZ/10); 3535 } 3536 3537 } 3538 3539 return 0; 3540 } 3541 3542 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3543 char *buf, size_t nbytes, 3544 loff_t off) 3545 { 3546 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3547 3548 if (mem_cgroup_is_root(memcg)) 3549 return -EINVAL; 3550 return mem_cgroup_force_empty(memcg) ?: nbytes; 3551 } 3552 3553 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3554 struct cftype *cft) 3555 { 3556 return 1; 3557 } 3558 3559 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3560 struct cftype *cft, u64 val) 3561 { 3562 if (val == 1) 3563 return 0; 3564 3565 pr_warn_once("Non-hierarchical mode is deprecated. " 3566 "Please report your usecase to linux-mm@kvack.org if you " 3567 "depend on this functionality.\n"); 3568 3569 return -EINVAL; 3570 } 3571 3572 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3573 { 3574 unsigned long val; 3575 3576 if (mem_cgroup_is_root(memcg)) { 3577 cgroup_rstat_flush(memcg->css.cgroup); 3578 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3579 memcg_page_state(memcg, NR_ANON_MAPPED); 3580 if (swap) 3581 val += memcg_page_state(memcg, MEMCG_SWAP); 3582 } else { 3583 if (!swap) 3584 val = page_counter_read(&memcg->memory); 3585 else 3586 val = page_counter_read(&memcg->memsw); 3587 } 3588 return val; 3589 } 3590 3591 enum { 3592 RES_USAGE, 3593 RES_LIMIT, 3594 RES_MAX_USAGE, 3595 RES_FAILCNT, 3596 RES_SOFT_LIMIT, 3597 }; 3598 3599 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3600 struct cftype *cft) 3601 { 3602 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3603 struct page_counter *counter; 3604 3605 switch (MEMFILE_TYPE(cft->private)) { 3606 case _MEM: 3607 counter = &memcg->memory; 3608 break; 3609 case _MEMSWAP: 3610 counter = &memcg->memsw; 3611 break; 3612 case _KMEM: 3613 counter = &memcg->kmem; 3614 break; 3615 case _TCP: 3616 counter = &memcg->tcpmem; 3617 break; 3618 default: 3619 BUG(); 3620 } 3621 3622 switch (MEMFILE_ATTR(cft->private)) { 3623 case RES_USAGE: 3624 if (counter == &memcg->memory) 3625 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3626 if (counter == &memcg->memsw) 3627 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3628 return (u64)page_counter_read(counter) * PAGE_SIZE; 3629 case RES_LIMIT: 3630 return (u64)counter->max * PAGE_SIZE; 3631 case RES_MAX_USAGE: 3632 return (u64)counter->watermark * PAGE_SIZE; 3633 case RES_FAILCNT: 3634 return counter->failcnt; 3635 case RES_SOFT_LIMIT: 3636 return (u64)memcg->soft_limit * PAGE_SIZE; 3637 default: 3638 BUG(); 3639 } 3640 } 3641 3642 #ifdef CONFIG_MEMCG_KMEM 3643 static int memcg_online_kmem(struct mem_cgroup *memcg) 3644 { 3645 struct obj_cgroup *objcg; 3646 int memcg_id; 3647 3648 if (cgroup_memory_nokmem) 3649 return 0; 3650 3651 BUG_ON(memcg->kmemcg_id >= 0); 3652 BUG_ON(memcg->kmem_state); 3653 3654 memcg_id = memcg_alloc_cache_id(); 3655 if (memcg_id < 0) 3656 return memcg_id; 3657 3658 objcg = obj_cgroup_alloc(); 3659 if (!objcg) { 3660 memcg_free_cache_id(memcg_id); 3661 return -ENOMEM; 3662 } 3663 objcg->memcg = memcg; 3664 rcu_assign_pointer(memcg->objcg, objcg); 3665 3666 static_branch_enable(&memcg_kmem_enabled_key); 3667 3668 memcg->kmemcg_id = memcg_id; 3669 memcg->kmem_state = KMEM_ONLINE; 3670 3671 return 0; 3672 } 3673 3674 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3675 { 3676 struct cgroup_subsys_state *css; 3677 struct mem_cgroup *parent, *child; 3678 int kmemcg_id; 3679 3680 if (memcg->kmem_state != KMEM_ONLINE) 3681 return; 3682 3683 memcg->kmem_state = KMEM_ALLOCATED; 3684 3685 parent = parent_mem_cgroup(memcg); 3686 if (!parent) 3687 parent = root_mem_cgroup; 3688 3689 memcg_reparent_objcgs(memcg, parent); 3690 3691 kmemcg_id = memcg->kmemcg_id; 3692 BUG_ON(kmemcg_id < 0); 3693 3694 /* 3695 * Change kmemcg_id of this cgroup and all its descendants to the 3696 * parent's id, and then move all entries from this cgroup's list_lrus 3697 * to ones of the parent. After we have finished, all list_lrus 3698 * corresponding to this cgroup are guaranteed to remain empty. The 3699 * ordering is imposed by list_lru_node->lock taken by 3700 * memcg_drain_all_list_lrus(). 3701 */ 3702 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3703 css_for_each_descendant_pre(css, &memcg->css) { 3704 child = mem_cgroup_from_css(css); 3705 BUG_ON(child->kmemcg_id != kmemcg_id); 3706 child->kmemcg_id = parent->kmemcg_id; 3707 } 3708 rcu_read_unlock(); 3709 3710 memcg_drain_all_list_lrus(kmemcg_id, parent); 3711 3712 memcg_free_cache_id(kmemcg_id); 3713 } 3714 3715 static void memcg_free_kmem(struct mem_cgroup *memcg) 3716 { 3717 /* css_alloc() failed, offlining didn't happen */ 3718 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3719 memcg_offline_kmem(memcg); 3720 } 3721 #else 3722 static int memcg_online_kmem(struct mem_cgroup *memcg) 3723 { 3724 return 0; 3725 } 3726 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3727 { 3728 } 3729 static void memcg_free_kmem(struct mem_cgroup *memcg) 3730 { 3731 } 3732 #endif /* CONFIG_MEMCG_KMEM */ 3733 3734 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3735 unsigned long max) 3736 { 3737 int ret; 3738 3739 mutex_lock(&memcg_max_mutex); 3740 ret = page_counter_set_max(&memcg->kmem, max); 3741 mutex_unlock(&memcg_max_mutex); 3742 return ret; 3743 } 3744 3745 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3746 { 3747 int ret; 3748 3749 mutex_lock(&memcg_max_mutex); 3750 3751 ret = page_counter_set_max(&memcg->tcpmem, max); 3752 if (ret) 3753 goto out; 3754 3755 if (!memcg->tcpmem_active) { 3756 /* 3757 * The active flag needs to be written after the static_key 3758 * update. This is what guarantees that the socket activation 3759 * function is the last one to run. See mem_cgroup_sk_alloc() 3760 * for details, and note that we don't mark any socket as 3761 * belonging to this memcg until that flag is up. 3762 * 3763 * We need to do this, because static_keys will span multiple 3764 * sites, but we can't control their order. If we mark a socket 3765 * as accounted, but the accounting functions are not patched in 3766 * yet, we'll lose accounting. 3767 * 3768 * We never race with the readers in mem_cgroup_sk_alloc(), 3769 * because when this value change, the code to process it is not 3770 * patched in yet. 3771 */ 3772 static_branch_inc(&memcg_sockets_enabled_key); 3773 memcg->tcpmem_active = true; 3774 } 3775 out: 3776 mutex_unlock(&memcg_max_mutex); 3777 return ret; 3778 } 3779 3780 /* 3781 * The user of this function is... 3782 * RES_LIMIT. 3783 */ 3784 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3785 char *buf, size_t nbytes, loff_t off) 3786 { 3787 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3788 unsigned long nr_pages; 3789 int ret; 3790 3791 buf = strstrip(buf); 3792 ret = page_counter_memparse(buf, "-1", &nr_pages); 3793 if (ret) 3794 return ret; 3795 3796 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3797 case RES_LIMIT: 3798 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3799 ret = -EINVAL; 3800 break; 3801 } 3802 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3803 case _MEM: 3804 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3805 break; 3806 case _MEMSWAP: 3807 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3808 break; 3809 case _KMEM: 3810 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3811 "Please report your usecase to linux-mm@kvack.org if you " 3812 "depend on this functionality.\n"); 3813 ret = memcg_update_kmem_max(memcg, nr_pages); 3814 break; 3815 case _TCP: 3816 ret = memcg_update_tcp_max(memcg, nr_pages); 3817 break; 3818 } 3819 break; 3820 case RES_SOFT_LIMIT: 3821 memcg->soft_limit = nr_pages; 3822 ret = 0; 3823 break; 3824 } 3825 return ret ?: nbytes; 3826 } 3827 3828 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3829 size_t nbytes, loff_t off) 3830 { 3831 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3832 struct page_counter *counter; 3833 3834 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3835 case _MEM: 3836 counter = &memcg->memory; 3837 break; 3838 case _MEMSWAP: 3839 counter = &memcg->memsw; 3840 break; 3841 case _KMEM: 3842 counter = &memcg->kmem; 3843 break; 3844 case _TCP: 3845 counter = &memcg->tcpmem; 3846 break; 3847 default: 3848 BUG(); 3849 } 3850 3851 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3852 case RES_MAX_USAGE: 3853 page_counter_reset_watermark(counter); 3854 break; 3855 case RES_FAILCNT: 3856 counter->failcnt = 0; 3857 break; 3858 default: 3859 BUG(); 3860 } 3861 3862 return nbytes; 3863 } 3864 3865 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3866 struct cftype *cft) 3867 { 3868 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3869 } 3870 3871 #ifdef CONFIG_MMU 3872 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3873 struct cftype *cft, u64 val) 3874 { 3875 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3876 3877 if (val & ~MOVE_MASK) 3878 return -EINVAL; 3879 3880 /* 3881 * No kind of locking is needed in here, because ->can_attach() will 3882 * check this value once in the beginning of the process, and then carry 3883 * on with stale data. This means that changes to this value will only 3884 * affect task migrations starting after the change. 3885 */ 3886 memcg->move_charge_at_immigrate = val; 3887 return 0; 3888 } 3889 #else 3890 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3891 struct cftype *cft, u64 val) 3892 { 3893 return -ENOSYS; 3894 } 3895 #endif 3896 3897 #ifdef CONFIG_NUMA 3898 3899 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3900 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3901 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3902 3903 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3904 int nid, unsigned int lru_mask, bool tree) 3905 { 3906 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3907 unsigned long nr = 0; 3908 enum lru_list lru; 3909 3910 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3911 3912 for_each_lru(lru) { 3913 if (!(BIT(lru) & lru_mask)) 3914 continue; 3915 if (tree) 3916 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3917 else 3918 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3919 } 3920 return nr; 3921 } 3922 3923 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3924 unsigned int lru_mask, 3925 bool tree) 3926 { 3927 unsigned long nr = 0; 3928 enum lru_list lru; 3929 3930 for_each_lru(lru) { 3931 if (!(BIT(lru) & lru_mask)) 3932 continue; 3933 if (tree) 3934 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3935 else 3936 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3937 } 3938 return nr; 3939 } 3940 3941 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3942 { 3943 struct numa_stat { 3944 const char *name; 3945 unsigned int lru_mask; 3946 }; 3947 3948 static const struct numa_stat stats[] = { 3949 { "total", LRU_ALL }, 3950 { "file", LRU_ALL_FILE }, 3951 { "anon", LRU_ALL_ANON }, 3952 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3953 }; 3954 const struct numa_stat *stat; 3955 int nid; 3956 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3957 3958 cgroup_rstat_flush(memcg->css.cgroup); 3959 3960 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3961 seq_printf(m, "%s=%lu", stat->name, 3962 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3963 false)); 3964 for_each_node_state(nid, N_MEMORY) 3965 seq_printf(m, " N%d=%lu", nid, 3966 mem_cgroup_node_nr_lru_pages(memcg, nid, 3967 stat->lru_mask, false)); 3968 seq_putc(m, '\n'); 3969 } 3970 3971 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3972 3973 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3974 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3975 true)); 3976 for_each_node_state(nid, N_MEMORY) 3977 seq_printf(m, " N%d=%lu", nid, 3978 mem_cgroup_node_nr_lru_pages(memcg, nid, 3979 stat->lru_mask, true)); 3980 seq_putc(m, '\n'); 3981 } 3982 3983 return 0; 3984 } 3985 #endif /* CONFIG_NUMA */ 3986 3987 static const unsigned int memcg1_stats[] = { 3988 NR_FILE_PAGES, 3989 NR_ANON_MAPPED, 3990 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3991 NR_ANON_THPS, 3992 #endif 3993 NR_SHMEM, 3994 NR_FILE_MAPPED, 3995 NR_FILE_DIRTY, 3996 NR_WRITEBACK, 3997 MEMCG_SWAP, 3998 }; 3999 4000 static const char *const memcg1_stat_names[] = { 4001 "cache", 4002 "rss", 4003 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4004 "rss_huge", 4005 #endif 4006 "shmem", 4007 "mapped_file", 4008 "dirty", 4009 "writeback", 4010 "swap", 4011 }; 4012 4013 /* Universal VM events cgroup1 shows, original sort order */ 4014 static const unsigned int memcg1_events[] = { 4015 PGPGIN, 4016 PGPGOUT, 4017 PGFAULT, 4018 PGMAJFAULT, 4019 }; 4020 4021 static int memcg_stat_show(struct seq_file *m, void *v) 4022 { 4023 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4024 unsigned long memory, memsw; 4025 struct mem_cgroup *mi; 4026 unsigned int i; 4027 4028 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4029 4030 cgroup_rstat_flush(memcg->css.cgroup); 4031 4032 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4033 unsigned long nr; 4034 4035 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4036 continue; 4037 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4038 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4039 } 4040 4041 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4042 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4043 memcg_events_local(memcg, memcg1_events[i])); 4044 4045 for (i = 0; i < NR_LRU_LISTS; i++) 4046 seq_printf(m, "%s %lu\n", lru_list_name(i), 4047 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4048 PAGE_SIZE); 4049 4050 /* Hierarchical information */ 4051 memory = memsw = PAGE_COUNTER_MAX; 4052 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4053 memory = min(memory, READ_ONCE(mi->memory.max)); 4054 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4055 } 4056 seq_printf(m, "hierarchical_memory_limit %llu\n", 4057 (u64)memory * PAGE_SIZE); 4058 if (do_memsw_account()) 4059 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4060 (u64)memsw * PAGE_SIZE); 4061 4062 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4063 unsigned long nr; 4064 4065 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4066 continue; 4067 nr = memcg_page_state(memcg, memcg1_stats[i]); 4068 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4069 (u64)nr * PAGE_SIZE); 4070 } 4071 4072 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4073 seq_printf(m, "total_%s %llu\n", 4074 vm_event_name(memcg1_events[i]), 4075 (u64)memcg_events(memcg, memcg1_events[i])); 4076 4077 for (i = 0; i < NR_LRU_LISTS; i++) 4078 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4079 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4080 PAGE_SIZE); 4081 4082 #ifdef CONFIG_DEBUG_VM 4083 { 4084 pg_data_t *pgdat; 4085 struct mem_cgroup_per_node *mz; 4086 unsigned long anon_cost = 0; 4087 unsigned long file_cost = 0; 4088 4089 for_each_online_pgdat(pgdat) { 4090 mz = memcg->nodeinfo[pgdat->node_id]; 4091 4092 anon_cost += mz->lruvec.anon_cost; 4093 file_cost += mz->lruvec.file_cost; 4094 } 4095 seq_printf(m, "anon_cost %lu\n", anon_cost); 4096 seq_printf(m, "file_cost %lu\n", file_cost); 4097 } 4098 #endif 4099 4100 return 0; 4101 } 4102 4103 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4104 struct cftype *cft) 4105 { 4106 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4107 4108 return mem_cgroup_swappiness(memcg); 4109 } 4110 4111 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4112 struct cftype *cft, u64 val) 4113 { 4114 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4115 4116 if (val > 100) 4117 return -EINVAL; 4118 4119 if (!mem_cgroup_is_root(memcg)) 4120 memcg->swappiness = val; 4121 else 4122 vm_swappiness = val; 4123 4124 return 0; 4125 } 4126 4127 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4128 { 4129 struct mem_cgroup_threshold_ary *t; 4130 unsigned long usage; 4131 int i; 4132 4133 rcu_read_lock(); 4134 if (!swap) 4135 t = rcu_dereference(memcg->thresholds.primary); 4136 else 4137 t = rcu_dereference(memcg->memsw_thresholds.primary); 4138 4139 if (!t) 4140 goto unlock; 4141 4142 usage = mem_cgroup_usage(memcg, swap); 4143 4144 /* 4145 * current_threshold points to threshold just below or equal to usage. 4146 * If it's not true, a threshold was crossed after last 4147 * call of __mem_cgroup_threshold(). 4148 */ 4149 i = t->current_threshold; 4150 4151 /* 4152 * Iterate backward over array of thresholds starting from 4153 * current_threshold and check if a threshold is crossed. 4154 * If none of thresholds below usage is crossed, we read 4155 * only one element of the array here. 4156 */ 4157 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4158 eventfd_signal(t->entries[i].eventfd, 1); 4159 4160 /* i = current_threshold + 1 */ 4161 i++; 4162 4163 /* 4164 * Iterate forward over array of thresholds starting from 4165 * current_threshold+1 and check if a threshold is crossed. 4166 * If none of thresholds above usage is crossed, we read 4167 * only one element of the array here. 4168 */ 4169 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4170 eventfd_signal(t->entries[i].eventfd, 1); 4171 4172 /* Update current_threshold */ 4173 t->current_threshold = i - 1; 4174 unlock: 4175 rcu_read_unlock(); 4176 } 4177 4178 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4179 { 4180 while (memcg) { 4181 __mem_cgroup_threshold(memcg, false); 4182 if (do_memsw_account()) 4183 __mem_cgroup_threshold(memcg, true); 4184 4185 memcg = parent_mem_cgroup(memcg); 4186 } 4187 } 4188 4189 static int compare_thresholds(const void *a, const void *b) 4190 { 4191 const struct mem_cgroup_threshold *_a = a; 4192 const struct mem_cgroup_threshold *_b = b; 4193 4194 if (_a->threshold > _b->threshold) 4195 return 1; 4196 4197 if (_a->threshold < _b->threshold) 4198 return -1; 4199 4200 return 0; 4201 } 4202 4203 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4204 { 4205 struct mem_cgroup_eventfd_list *ev; 4206 4207 spin_lock(&memcg_oom_lock); 4208 4209 list_for_each_entry(ev, &memcg->oom_notify, list) 4210 eventfd_signal(ev->eventfd, 1); 4211 4212 spin_unlock(&memcg_oom_lock); 4213 return 0; 4214 } 4215 4216 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4217 { 4218 struct mem_cgroup *iter; 4219 4220 for_each_mem_cgroup_tree(iter, memcg) 4221 mem_cgroup_oom_notify_cb(iter); 4222 } 4223 4224 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4225 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4226 { 4227 struct mem_cgroup_thresholds *thresholds; 4228 struct mem_cgroup_threshold_ary *new; 4229 unsigned long threshold; 4230 unsigned long usage; 4231 int i, size, ret; 4232 4233 ret = page_counter_memparse(args, "-1", &threshold); 4234 if (ret) 4235 return ret; 4236 4237 mutex_lock(&memcg->thresholds_lock); 4238 4239 if (type == _MEM) { 4240 thresholds = &memcg->thresholds; 4241 usage = mem_cgroup_usage(memcg, false); 4242 } else if (type == _MEMSWAP) { 4243 thresholds = &memcg->memsw_thresholds; 4244 usage = mem_cgroup_usage(memcg, true); 4245 } else 4246 BUG(); 4247 4248 /* Check if a threshold crossed before adding a new one */ 4249 if (thresholds->primary) 4250 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4251 4252 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4253 4254 /* Allocate memory for new array of thresholds */ 4255 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4256 if (!new) { 4257 ret = -ENOMEM; 4258 goto unlock; 4259 } 4260 new->size = size; 4261 4262 /* Copy thresholds (if any) to new array */ 4263 if (thresholds->primary) 4264 memcpy(new->entries, thresholds->primary->entries, 4265 flex_array_size(new, entries, size - 1)); 4266 4267 /* Add new threshold */ 4268 new->entries[size - 1].eventfd = eventfd; 4269 new->entries[size - 1].threshold = threshold; 4270 4271 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4272 sort(new->entries, size, sizeof(*new->entries), 4273 compare_thresholds, NULL); 4274 4275 /* Find current threshold */ 4276 new->current_threshold = -1; 4277 for (i = 0; i < size; i++) { 4278 if (new->entries[i].threshold <= usage) { 4279 /* 4280 * new->current_threshold will not be used until 4281 * rcu_assign_pointer(), so it's safe to increment 4282 * it here. 4283 */ 4284 ++new->current_threshold; 4285 } else 4286 break; 4287 } 4288 4289 /* Free old spare buffer and save old primary buffer as spare */ 4290 kfree(thresholds->spare); 4291 thresholds->spare = thresholds->primary; 4292 4293 rcu_assign_pointer(thresholds->primary, new); 4294 4295 /* To be sure that nobody uses thresholds */ 4296 synchronize_rcu(); 4297 4298 unlock: 4299 mutex_unlock(&memcg->thresholds_lock); 4300 4301 return ret; 4302 } 4303 4304 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4305 struct eventfd_ctx *eventfd, const char *args) 4306 { 4307 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4308 } 4309 4310 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4311 struct eventfd_ctx *eventfd, const char *args) 4312 { 4313 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4314 } 4315 4316 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4317 struct eventfd_ctx *eventfd, enum res_type type) 4318 { 4319 struct mem_cgroup_thresholds *thresholds; 4320 struct mem_cgroup_threshold_ary *new; 4321 unsigned long usage; 4322 int i, j, size, entries; 4323 4324 mutex_lock(&memcg->thresholds_lock); 4325 4326 if (type == _MEM) { 4327 thresholds = &memcg->thresholds; 4328 usage = mem_cgroup_usage(memcg, false); 4329 } else if (type == _MEMSWAP) { 4330 thresholds = &memcg->memsw_thresholds; 4331 usage = mem_cgroup_usage(memcg, true); 4332 } else 4333 BUG(); 4334 4335 if (!thresholds->primary) 4336 goto unlock; 4337 4338 /* Check if a threshold crossed before removing */ 4339 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4340 4341 /* Calculate new number of threshold */ 4342 size = entries = 0; 4343 for (i = 0; i < thresholds->primary->size; i++) { 4344 if (thresholds->primary->entries[i].eventfd != eventfd) 4345 size++; 4346 else 4347 entries++; 4348 } 4349 4350 new = thresholds->spare; 4351 4352 /* If no items related to eventfd have been cleared, nothing to do */ 4353 if (!entries) 4354 goto unlock; 4355 4356 /* Set thresholds array to NULL if we don't have thresholds */ 4357 if (!size) { 4358 kfree(new); 4359 new = NULL; 4360 goto swap_buffers; 4361 } 4362 4363 new->size = size; 4364 4365 /* Copy thresholds and find current threshold */ 4366 new->current_threshold = -1; 4367 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4368 if (thresholds->primary->entries[i].eventfd == eventfd) 4369 continue; 4370 4371 new->entries[j] = thresholds->primary->entries[i]; 4372 if (new->entries[j].threshold <= usage) { 4373 /* 4374 * new->current_threshold will not be used 4375 * until rcu_assign_pointer(), so it's safe to increment 4376 * it here. 4377 */ 4378 ++new->current_threshold; 4379 } 4380 j++; 4381 } 4382 4383 swap_buffers: 4384 /* Swap primary and spare array */ 4385 thresholds->spare = thresholds->primary; 4386 4387 rcu_assign_pointer(thresholds->primary, new); 4388 4389 /* To be sure that nobody uses thresholds */ 4390 synchronize_rcu(); 4391 4392 /* If all events are unregistered, free the spare array */ 4393 if (!new) { 4394 kfree(thresholds->spare); 4395 thresholds->spare = NULL; 4396 } 4397 unlock: 4398 mutex_unlock(&memcg->thresholds_lock); 4399 } 4400 4401 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4402 struct eventfd_ctx *eventfd) 4403 { 4404 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4405 } 4406 4407 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4408 struct eventfd_ctx *eventfd) 4409 { 4410 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4411 } 4412 4413 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4414 struct eventfd_ctx *eventfd, const char *args) 4415 { 4416 struct mem_cgroup_eventfd_list *event; 4417 4418 event = kmalloc(sizeof(*event), GFP_KERNEL); 4419 if (!event) 4420 return -ENOMEM; 4421 4422 spin_lock(&memcg_oom_lock); 4423 4424 event->eventfd = eventfd; 4425 list_add(&event->list, &memcg->oom_notify); 4426 4427 /* already in OOM ? */ 4428 if (memcg->under_oom) 4429 eventfd_signal(eventfd, 1); 4430 spin_unlock(&memcg_oom_lock); 4431 4432 return 0; 4433 } 4434 4435 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4436 struct eventfd_ctx *eventfd) 4437 { 4438 struct mem_cgroup_eventfd_list *ev, *tmp; 4439 4440 spin_lock(&memcg_oom_lock); 4441 4442 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4443 if (ev->eventfd == eventfd) { 4444 list_del(&ev->list); 4445 kfree(ev); 4446 } 4447 } 4448 4449 spin_unlock(&memcg_oom_lock); 4450 } 4451 4452 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4453 { 4454 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4455 4456 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4457 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4458 seq_printf(sf, "oom_kill %lu\n", 4459 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4460 return 0; 4461 } 4462 4463 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4464 struct cftype *cft, u64 val) 4465 { 4466 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4467 4468 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4469 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4470 return -EINVAL; 4471 4472 memcg->oom_kill_disable = val; 4473 if (!val) 4474 memcg_oom_recover(memcg); 4475 4476 return 0; 4477 } 4478 4479 #ifdef CONFIG_CGROUP_WRITEBACK 4480 4481 #include <trace/events/writeback.h> 4482 4483 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4484 { 4485 return wb_domain_init(&memcg->cgwb_domain, gfp); 4486 } 4487 4488 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4489 { 4490 wb_domain_exit(&memcg->cgwb_domain); 4491 } 4492 4493 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4494 { 4495 wb_domain_size_changed(&memcg->cgwb_domain); 4496 } 4497 4498 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4499 { 4500 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4501 4502 if (!memcg->css.parent) 4503 return NULL; 4504 4505 return &memcg->cgwb_domain; 4506 } 4507 4508 /** 4509 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4510 * @wb: bdi_writeback in question 4511 * @pfilepages: out parameter for number of file pages 4512 * @pheadroom: out parameter for number of allocatable pages according to memcg 4513 * @pdirty: out parameter for number of dirty pages 4514 * @pwriteback: out parameter for number of pages under writeback 4515 * 4516 * Determine the numbers of file, headroom, dirty, and writeback pages in 4517 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4518 * is a bit more involved. 4519 * 4520 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4521 * headroom is calculated as the lowest headroom of itself and the 4522 * ancestors. Note that this doesn't consider the actual amount of 4523 * available memory in the system. The caller should further cap 4524 * *@pheadroom accordingly. 4525 */ 4526 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4527 unsigned long *pheadroom, unsigned long *pdirty, 4528 unsigned long *pwriteback) 4529 { 4530 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4531 struct mem_cgroup *parent; 4532 4533 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); 4534 4535 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4536 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4537 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4538 memcg_page_state(memcg, NR_ACTIVE_FILE); 4539 4540 *pheadroom = PAGE_COUNTER_MAX; 4541 while ((parent = parent_mem_cgroup(memcg))) { 4542 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4543 READ_ONCE(memcg->memory.high)); 4544 unsigned long used = page_counter_read(&memcg->memory); 4545 4546 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4547 memcg = parent; 4548 } 4549 } 4550 4551 /* 4552 * Foreign dirty flushing 4553 * 4554 * There's an inherent mismatch between memcg and writeback. The former 4555 * tracks ownership per-page while the latter per-inode. This was a 4556 * deliberate design decision because honoring per-page ownership in the 4557 * writeback path is complicated, may lead to higher CPU and IO overheads 4558 * and deemed unnecessary given that write-sharing an inode across 4559 * different cgroups isn't a common use-case. 4560 * 4561 * Combined with inode majority-writer ownership switching, this works well 4562 * enough in most cases but there are some pathological cases. For 4563 * example, let's say there are two cgroups A and B which keep writing to 4564 * different but confined parts of the same inode. B owns the inode and 4565 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4566 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4567 * triggering background writeback. A will be slowed down without a way to 4568 * make writeback of the dirty pages happen. 4569 * 4570 * Conditions like the above can lead to a cgroup getting repeatedly and 4571 * severely throttled after making some progress after each 4572 * dirty_expire_interval while the underlying IO device is almost 4573 * completely idle. 4574 * 4575 * Solving this problem completely requires matching the ownership tracking 4576 * granularities between memcg and writeback in either direction. However, 4577 * the more egregious behaviors can be avoided by simply remembering the 4578 * most recent foreign dirtying events and initiating remote flushes on 4579 * them when local writeback isn't enough to keep the memory clean enough. 4580 * 4581 * The following two functions implement such mechanism. When a foreign 4582 * page - a page whose memcg and writeback ownerships don't match - is 4583 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4584 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4585 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4586 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4587 * foreign bdi_writebacks which haven't expired. Both the numbers of 4588 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4589 * limited to MEMCG_CGWB_FRN_CNT. 4590 * 4591 * The mechanism only remembers IDs and doesn't hold any object references. 4592 * As being wrong occasionally doesn't matter, updates and accesses to the 4593 * records are lockless and racy. 4594 */ 4595 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4596 struct bdi_writeback *wb) 4597 { 4598 struct mem_cgroup *memcg = page_memcg(page); 4599 struct memcg_cgwb_frn *frn; 4600 u64 now = get_jiffies_64(); 4601 u64 oldest_at = now; 4602 int oldest = -1; 4603 int i; 4604 4605 trace_track_foreign_dirty(page, wb); 4606 4607 /* 4608 * Pick the slot to use. If there is already a slot for @wb, keep 4609 * using it. If not replace the oldest one which isn't being 4610 * written out. 4611 */ 4612 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4613 frn = &memcg->cgwb_frn[i]; 4614 if (frn->bdi_id == wb->bdi->id && 4615 frn->memcg_id == wb->memcg_css->id) 4616 break; 4617 if (time_before64(frn->at, oldest_at) && 4618 atomic_read(&frn->done.cnt) == 1) { 4619 oldest = i; 4620 oldest_at = frn->at; 4621 } 4622 } 4623 4624 if (i < MEMCG_CGWB_FRN_CNT) { 4625 /* 4626 * Re-using an existing one. Update timestamp lazily to 4627 * avoid making the cacheline hot. We want them to be 4628 * reasonably up-to-date and significantly shorter than 4629 * dirty_expire_interval as that's what expires the record. 4630 * Use the shorter of 1s and dirty_expire_interval / 8. 4631 */ 4632 unsigned long update_intv = 4633 min_t(unsigned long, HZ, 4634 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4635 4636 if (time_before64(frn->at, now - update_intv)) 4637 frn->at = now; 4638 } else if (oldest >= 0) { 4639 /* replace the oldest free one */ 4640 frn = &memcg->cgwb_frn[oldest]; 4641 frn->bdi_id = wb->bdi->id; 4642 frn->memcg_id = wb->memcg_css->id; 4643 frn->at = now; 4644 } 4645 } 4646 4647 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4648 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4649 { 4650 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4651 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4652 u64 now = jiffies_64; 4653 int i; 4654 4655 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4656 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4657 4658 /* 4659 * If the record is older than dirty_expire_interval, 4660 * writeback on it has already started. No need to kick it 4661 * off again. Also, don't start a new one if there's 4662 * already one in flight. 4663 */ 4664 if (time_after64(frn->at, now - intv) && 4665 atomic_read(&frn->done.cnt) == 1) { 4666 frn->at = 0; 4667 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4668 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4669 WB_REASON_FOREIGN_FLUSH, 4670 &frn->done); 4671 } 4672 } 4673 } 4674 4675 #else /* CONFIG_CGROUP_WRITEBACK */ 4676 4677 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4678 { 4679 return 0; 4680 } 4681 4682 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4683 { 4684 } 4685 4686 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4687 { 4688 } 4689 4690 #endif /* CONFIG_CGROUP_WRITEBACK */ 4691 4692 /* 4693 * DO NOT USE IN NEW FILES. 4694 * 4695 * "cgroup.event_control" implementation. 4696 * 4697 * This is way over-engineered. It tries to support fully configurable 4698 * events for each user. Such level of flexibility is completely 4699 * unnecessary especially in the light of the planned unified hierarchy. 4700 * 4701 * Please deprecate this and replace with something simpler if at all 4702 * possible. 4703 */ 4704 4705 /* 4706 * Unregister event and free resources. 4707 * 4708 * Gets called from workqueue. 4709 */ 4710 static void memcg_event_remove(struct work_struct *work) 4711 { 4712 struct mem_cgroup_event *event = 4713 container_of(work, struct mem_cgroup_event, remove); 4714 struct mem_cgroup *memcg = event->memcg; 4715 4716 remove_wait_queue(event->wqh, &event->wait); 4717 4718 event->unregister_event(memcg, event->eventfd); 4719 4720 /* Notify userspace the event is going away. */ 4721 eventfd_signal(event->eventfd, 1); 4722 4723 eventfd_ctx_put(event->eventfd); 4724 kfree(event); 4725 css_put(&memcg->css); 4726 } 4727 4728 /* 4729 * Gets called on EPOLLHUP on eventfd when user closes it. 4730 * 4731 * Called with wqh->lock held and interrupts disabled. 4732 */ 4733 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4734 int sync, void *key) 4735 { 4736 struct mem_cgroup_event *event = 4737 container_of(wait, struct mem_cgroup_event, wait); 4738 struct mem_cgroup *memcg = event->memcg; 4739 __poll_t flags = key_to_poll(key); 4740 4741 if (flags & EPOLLHUP) { 4742 /* 4743 * If the event has been detached at cgroup removal, we 4744 * can simply return knowing the other side will cleanup 4745 * for us. 4746 * 4747 * We can't race against event freeing since the other 4748 * side will require wqh->lock via remove_wait_queue(), 4749 * which we hold. 4750 */ 4751 spin_lock(&memcg->event_list_lock); 4752 if (!list_empty(&event->list)) { 4753 list_del_init(&event->list); 4754 /* 4755 * We are in atomic context, but cgroup_event_remove() 4756 * may sleep, so we have to call it in workqueue. 4757 */ 4758 schedule_work(&event->remove); 4759 } 4760 spin_unlock(&memcg->event_list_lock); 4761 } 4762 4763 return 0; 4764 } 4765 4766 static void memcg_event_ptable_queue_proc(struct file *file, 4767 wait_queue_head_t *wqh, poll_table *pt) 4768 { 4769 struct mem_cgroup_event *event = 4770 container_of(pt, struct mem_cgroup_event, pt); 4771 4772 event->wqh = wqh; 4773 add_wait_queue(wqh, &event->wait); 4774 } 4775 4776 /* 4777 * DO NOT USE IN NEW FILES. 4778 * 4779 * Parse input and register new cgroup event handler. 4780 * 4781 * Input must be in format '<event_fd> <control_fd> <args>'. 4782 * Interpretation of args is defined by control file implementation. 4783 */ 4784 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4785 char *buf, size_t nbytes, loff_t off) 4786 { 4787 struct cgroup_subsys_state *css = of_css(of); 4788 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4789 struct mem_cgroup_event *event; 4790 struct cgroup_subsys_state *cfile_css; 4791 unsigned int efd, cfd; 4792 struct fd efile; 4793 struct fd cfile; 4794 const char *name; 4795 char *endp; 4796 int ret; 4797 4798 buf = strstrip(buf); 4799 4800 efd = simple_strtoul(buf, &endp, 10); 4801 if (*endp != ' ') 4802 return -EINVAL; 4803 buf = endp + 1; 4804 4805 cfd = simple_strtoul(buf, &endp, 10); 4806 if ((*endp != ' ') && (*endp != '\0')) 4807 return -EINVAL; 4808 buf = endp + 1; 4809 4810 event = kzalloc(sizeof(*event), GFP_KERNEL); 4811 if (!event) 4812 return -ENOMEM; 4813 4814 event->memcg = memcg; 4815 INIT_LIST_HEAD(&event->list); 4816 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4817 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4818 INIT_WORK(&event->remove, memcg_event_remove); 4819 4820 efile = fdget(efd); 4821 if (!efile.file) { 4822 ret = -EBADF; 4823 goto out_kfree; 4824 } 4825 4826 event->eventfd = eventfd_ctx_fileget(efile.file); 4827 if (IS_ERR(event->eventfd)) { 4828 ret = PTR_ERR(event->eventfd); 4829 goto out_put_efile; 4830 } 4831 4832 cfile = fdget(cfd); 4833 if (!cfile.file) { 4834 ret = -EBADF; 4835 goto out_put_eventfd; 4836 } 4837 4838 /* the process need read permission on control file */ 4839 /* AV: shouldn't we check that it's been opened for read instead? */ 4840 ret = file_permission(cfile.file, MAY_READ); 4841 if (ret < 0) 4842 goto out_put_cfile; 4843 4844 /* 4845 * Determine the event callbacks and set them in @event. This used 4846 * to be done via struct cftype but cgroup core no longer knows 4847 * about these events. The following is crude but the whole thing 4848 * is for compatibility anyway. 4849 * 4850 * DO NOT ADD NEW FILES. 4851 */ 4852 name = cfile.file->f_path.dentry->d_name.name; 4853 4854 if (!strcmp(name, "memory.usage_in_bytes")) { 4855 event->register_event = mem_cgroup_usage_register_event; 4856 event->unregister_event = mem_cgroup_usage_unregister_event; 4857 } else if (!strcmp(name, "memory.oom_control")) { 4858 event->register_event = mem_cgroup_oom_register_event; 4859 event->unregister_event = mem_cgroup_oom_unregister_event; 4860 } else if (!strcmp(name, "memory.pressure_level")) { 4861 event->register_event = vmpressure_register_event; 4862 event->unregister_event = vmpressure_unregister_event; 4863 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4864 event->register_event = memsw_cgroup_usage_register_event; 4865 event->unregister_event = memsw_cgroup_usage_unregister_event; 4866 } else { 4867 ret = -EINVAL; 4868 goto out_put_cfile; 4869 } 4870 4871 /* 4872 * Verify @cfile should belong to @css. Also, remaining events are 4873 * automatically removed on cgroup destruction but the removal is 4874 * asynchronous, so take an extra ref on @css. 4875 */ 4876 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4877 &memory_cgrp_subsys); 4878 ret = -EINVAL; 4879 if (IS_ERR(cfile_css)) 4880 goto out_put_cfile; 4881 if (cfile_css != css) { 4882 css_put(cfile_css); 4883 goto out_put_cfile; 4884 } 4885 4886 ret = event->register_event(memcg, event->eventfd, buf); 4887 if (ret) 4888 goto out_put_css; 4889 4890 vfs_poll(efile.file, &event->pt); 4891 4892 spin_lock(&memcg->event_list_lock); 4893 list_add(&event->list, &memcg->event_list); 4894 spin_unlock(&memcg->event_list_lock); 4895 4896 fdput(cfile); 4897 fdput(efile); 4898 4899 return nbytes; 4900 4901 out_put_css: 4902 css_put(css); 4903 out_put_cfile: 4904 fdput(cfile); 4905 out_put_eventfd: 4906 eventfd_ctx_put(event->eventfd); 4907 out_put_efile: 4908 fdput(efile); 4909 out_kfree: 4910 kfree(event); 4911 4912 return ret; 4913 } 4914 4915 static struct cftype mem_cgroup_legacy_files[] = { 4916 { 4917 .name = "usage_in_bytes", 4918 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4919 .read_u64 = mem_cgroup_read_u64, 4920 }, 4921 { 4922 .name = "max_usage_in_bytes", 4923 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4924 .write = mem_cgroup_reset, 4925 .read_u64 = mem_cgroup_read_u64, 4926 }, 4927 { 4928 .name = "limit_in_bytes", 4929 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4930 .write = mem_cgroup_write, 4931 .read_u64 = mem_cgroup_read_u64, 4932 }, 4933 { 4934 .name = "soft_limit_in_bytes", 4935 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4936 .write = mem_cgroup_write, 4937 .read_u64 = mem_cgroup_read_u64, 4938 }, 4939 { 4940 .name = "failcnt", 4941 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4942 .write = mem_cgroup_reset, 4943 .read_u64 = mem_cgroup_read_u64, 4944 }, 4945 { 4946 .name = "stat", 4947 .seq_show = memcg_stat_show, 4948 }, 4949 { 4950 .name = "force_empty", 4951 .write = mem_cgroup_force_empty_write, 4952 }, 4953 { 4954 .name = "use_hierarchy", 4955 .write_u64 = mem_cgroup_hierarchy_write, 4956 .read_u64 = mem_cgroup_hierarchy_read, 4957 }, 4958 { 4959 .name = "cgroup.event_control", /* XXX: for compat */ 4960 .write = memcg_write_event_control, 4961 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4962 }, 4963 { 4964 .name = "swappiness", 4965 .read_u64 = mem_cgroup_swappiness_read, 4966 .write_u64 = mem_cgroup_swappiness_write, 4967 }, 4968 { 4969 .name = "move_charge_at_immigrate", 4970 .read_u64 = mem_cgroup_move_charge_read, 4971 .write_u64 = mem_cgroup_move_charge_write, 4972 }, 4973 { 4974 .name = "oom_control", 4975 .seq_show = mem_cgroup_oom_control_read, 4976 .write_u64 = mem_cgroup_oom_control_write, 4977 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4978 }, 4979 { 4980 .name = "pressure_level", 4981 }, 4982 #ifdef CONFIG_NUMA 4983 { 4984 .name = "numa_stat", 4985 .seq_show = memcg_numa_stat_show, 4986 }, 4987 #endif 4988 { 4989 .name = "kmem.limit_in_bytes", 4990 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4991 .write = mem_cgroup_write, 4992 .read_u64 = mem_cgroup_read_u64, 4993 }, 4994 { 4995 .name = "kmem.usage_in_bytes", 4996 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4997 .read_u64 = mem_cgroup_read_u64, 4998 }, 4999 { 5000 .name = "kmem.failcnt", 5001 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5002 .write = mem_cgroup_reset, 5003 .read_u64 = mem_cgroup_read_u64, 5004 }, 5005 { 5006 .name = "kmem.max_usage_in_bytes", 5007 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5008 .write = mem_cgroup_reset, 5009 .read_u64 = mem_cgroup_read_u64, 5010 }, 5011 #if defined(CONFIG_MEMCG_KMEM) && \ 5012 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5013 { 5014 .name = "kmem.slabinfo", 5015 .seq_show = memcg_slab_show, 5016 }, 5017 #endif 5018 { 5019 .name = "kmem.tcp.limit_in_bytes", 5020 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5021 .write = mem_cgroup_write, 5022 .read_u64 = mem_cgroup_read_u64, 5023 }, 5024 { 5025 .name = "kmem.tcp.usage_in_bytes", 5026 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5027 .read_u64 = mem_cgroup_read_u64, 5028 }, 5029 { 5030 .name = "kmem.tcp.failcnt", 5031 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5032 .write = mem_cgroup_reset, 5033 .read_u64 = mem_cgroup_read_u64, 5034 }, 5035 { 5036 .name = "kmem.tcp.max_usage_in_bytes", 5037 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5038 .write = mem_cgroup_reset, 5039 .read_u64 = mem_cgroup_read_u64, 5040 }, 5041 { }, /* terminate */ 5042 }; 5043 5044 /* 5045 * Private memory cgroup IDR 5046 * 5047 * Swap-out records and page cache shadow entries need to store memcg 5048 * references in constrained space, so we maintain an ID space that is 5049 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5050 * memory-controlled cgroups to 64k. 5051 * 5052 * However, there usually are many references to the offline CSS after 5053 * the cgroup has been destroyed, such as page cache or reclaimable 5054 * slab objects, that don't need to hang on to the ID. We want to keep 5055 * those dead CSS from occupying IDs, or we might quickly exhaust the 5056 * relatively small ID space and prevent the creation of new cgroups 5057 * even when there are much fewer than 64k cgroups - possibly none. 5058 * 5059 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5060 * be freed and recycled when it's no longer needed, which is usually 5061 * when the CSS is offlined. 5062 * 5063 * The only exception to that are records of swapped out tmpfs/shmem 5064 * pages that need to be attributed to live ancestors on swapin. But 5065 * those references are manageable from userspace. 5066 */ 5067 5068 static DEFINE_IDR(mem_cgroup_idr); 5069 5070 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5071 { 5072 if (memcg->id.id > 0) { 5073 idr_remove(&mem_cgroup_idr, memcg->id.id); 5074 memcg->id.id = 0; 5075 } 5076 } 5077 5078 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5079 unsigned int n) 5080 { 5081 refcount_add(n, &memcg->id.ref); 5082 } 5083 5084 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5085 { 5086 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5087 mem_cgroup_id_remove(memcg); 5088 5089 /* Memcg ID pins CSS */ 5090 css_put(&memcg->css); 5091 } 5092 } 5093 5094 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5095 { 5096 mem_cgroup_id_put_many(memcg, 1); 5097 } 5098 5099 /** 5100 * mem_cgroup_from_id - look up a memcg from a memcg id 5101 * @id: the memcg id to look up 5102 * 5103 * Caller must hold rcu_read_lock(). 5104 */ 5105 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5106 { 5107 WARN_ON_ONCE(!rcu_read_lock_held()); 5108 return idr_find(&mem_cgroup_idr, id); 5109 } 5110 5111 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5112 { 5113 struct mem_cgroup_per_node *pn; 5114 int tmp = node; 5115 /* 5116 * This routine is called against possible nodes. 5117 * But it's BUG to call kmalloc() against offline node. 5118 * 5119 * TODO: this routine can waste much memory for nodes which will 5120 * never be onlined. It's better to use memory hotplug callback 5121 * function. 5122 */ 5123 if (!node_state(node, N_NORMAL_MEMORY)) 5124 tmp = -1; 5125 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5126 if (!pn) 5127 return 1; 5128 5129 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5130 GFP_KERNEL_ACCOUNT); 5131 if (!pn->lruvec_stat_local) { 5132 kfree(pn); 5133 return 1; 5134 } 5135 5136 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat, 5137 GFP_KERNEL_ACCOUNT); 5138 if (!pn->lruvec_stat_cpu) { 5139 free_percpu(pn->lruvec_stat_local); 5140 kfree(pn); 5141 return 1; 5142 } 5143 5144 lruvec_init(&pn->lruvec); 5145 pn->usage_in_excess = 0; 5146 pn->on_tree = false; 5147 pn->memcg = memcg; 5148 5149 memcg->nodeinfo[node] = pn; 5150 return 0; 5151 } 5152 5153 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5154 { 5155 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5156 5157 if (!pn) 5158 return; 5159 5160 free_percpu(pn->lruvec_stat_cpu); 5161 free_percpu(pn->lruvec_stat_local); 5162 kfree(pn); 5163 } 5164 5165 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5166 { 5167 int node; 5168 5169 for_each_node(node) 5170 free_mem_cgroup_per_node_info(memcg, node); 5171 free_percpu(memcg->vmstats_percpu); 5172 kfree(memcg); 5173 } 5174 5175 static void mem_cgroup_free(struct mem_cgroup *memcg) 5176 { 5177 int cpu; 5178 5179 memcg_wb_domain_exit(memcg); 5180 /* 5181 * Flush percpu lruvec stats to guarantee the value 5182 * correctness on parent's and all ancestor levels. 5183 */ 5184 for_each_online_cpu(cpu) 5185 memcg_flush_lruvec_page_state(memcg, cpu); 5186 __mem_cgroup_free(memcg); 5187 } 5188 5189 static struct mem_cgroup *mem_cgroup_alloc(void) 5190 { 5191 struct mem_cgroup *memcg; 5192 unsigned int size; 5193 int node; 5194 int __maybe_unused i; 5195 long error = -ENOMEM; 5196 5197 size = sizeof(struct mem_cgroup); 5198 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5199 5200 memcg = kzalloc(size, GFP_KERNEL); 5201 if (!memcg) 5202 return ERR_PTR(error); 5203 5204 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5205 1, MEM_CGROUP_ID_MAX, 5206 GFP_KERNEL); 5207 if (memcg->id.id < 0) { 5208 error = memcg->id.id; 5209 goto fail; 5210 } 5211 5212 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5213 GFP_KERNEL_ACCOUNT); 5214 if (!memcg->vmstats_percpu) 5215 goto fail; 5216 5217 for_each_node(node) 5218 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5219 goto fail; 5220 5221 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5222 goto fail; 5223 5224 INIT_WORK(&memcg->high_work, high_work_func); 5225 INIT_LIST_HEAD(&memcg->oom_notify); 5226 mutex_init(&memcg->thresholds_lock); 5227 spin_lock_init(&memcg->move_lock); 5228 vmpressure_init(&memcg->vmpressure); 5229 INIT_LIST_HEAD(&memcg->event_list); 5230 spin_lock_init(&memcg->event_list_lock); 5231 memcg->socket_pressure = jiffies; 5232 #ifdef CONFIG_MEMCG_KMEM 5233 memcg->kmemcg_id = -1; 5234 INIT_LIST_HEAD(&memcg->objcg_list); 5235 #endif 5236 #ifdef CONFIG_CGROUP_WRITEBACK 5237 INIT_LIST_HEAD(&memcg->cgwb_list); 5238 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5239 memcg->cgwb_frn[i].done = 5240 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5241 #endif 5242 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5243 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5244 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5245 memcg->deferred_split_queue.split_queue_len = 0; 5246 #endif 5247 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5248 return memcg; 5249 fail: 5250 mem_cgroup_id_remove(memcg); 5251 __mem_cgroup_free(memcg); 5252 return ERR_PTR(error); 5253 } 5254 5255 static struct cgroup_subsys_state * __ref 5256 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5257 { 5258 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5259 struct mem_cgroup *memcg, *old_memcg; 5260 long error = -ENOMEM; 5261 5262 old_memcg = set_active_memcg(parent); 5263 memcg = mem_cgroup_alloc(); 5264 set_active_memcg(old_memcg); 5265 if (IS_ERR(memcg)) 5266 return ERR_CAST(memcg); 5267 5268 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5269 memcg->soft_limit = PAGE_COUNTER_MAX; 5270 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5271 if (parent) { 5272 memcg->swappiness = mem_cgroup_swappiness(parent); 5273 memcg->oom_kill_disable = parent->oom_kill_disable; 5274 5275 page_counter_init(&memcg->memory, &parent->memory); 5276 page_counter_init(&memcg->swap, &parent->swap); 5277 page_counter_init(&memcg->kmem, &parent->kmem); 5278 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5279 } else { 5280 page_counter_init(&memcg->memory, NULL); 5281 page_counter_init(&memcg->swap, NULL); 5282 page_counter_init(&memcg->kmem, NULL); 5283 page_counter_init(&memcg->tcpmem, NULL); 5284 5285 root_mem_cgroup = memcg; 5286 return &memcg->css; 5287 } 5288 5289 /* The following stuff does not apply to the root */ 5290 error = memcg_online_kmem(memcg); 5291 if (error) 5292 goto fail; 5293 5294 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5295 static_branch_inc(&memcg_sockets_enabled_key); 5296 5297 return &memcg->css; 5298 fail: 5299 mem_cgroup_id_remove(memcg); 5300 mem_cgroup_free(memcg); 5301 return ERR_PTR(error); 5302 } 5303 5304 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5305 { 5306 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5307 5308 /* 5309 * A memcg must be visible for expand_shrinker_info() 5310 * by the time the maps are allocated. So, we allocate maps 5311 * here, when for_each_mem_cgroup() can't skip it. 5312 */ 5313 if (alloc_shrinker_info(memcg)) { 5314 mem_cgroup_id_remove(memcg); 5315 return -ENOMEM; 5316 } 5317 5318 /* Online state pins memcg ID, memcg ID pins CSS */ 5319 refcount_set(&memcg->id.ref, 1); 5320 css_get(css); 5321 return 0; 5322 } 5323 5324 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5325 { 5326 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5327 struct mem_cgroup_event *event, *tmp; 5328 5329 /* 5330 * Unregister events and notify userspace. 5331 * Notify userspace about cgroup removing only after rmdir of cgroup 5332 * directory to avoid race between userspace and kernelspace. 5333 */ 5334 spin_lock(&memcg->event_list_lock); 5335 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5336 list_del_init(&event->list); 5337 schedule_work(&event->remove); 5338 } 5339 spin_unlock(&memcg->event_list_lock); 5340 5341 page_counter_set_min(&memcg->memory, 0); 5342 page_counter_set_low(&memcg->memory, 0); 5343 5344 memcg_offline_kmem(memcg); 5345 reparent_shrinker_deferred(memcg); 5346 wb_memcg_offline(memcg); 5347 5348 drain_all_stock(memcg); 5349 5350 mem_cgroup_id_put(memcg); 5351 } 5352 5353 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5354 { 5355 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5356 5357 invalidate_reclaim_iterators(memcg); 5358 } 5359 5360 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5361 { 5362 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5363 int __maybe_unused i; 5364 5365 #ifdef CONFIG_CGROUP_WRITEBACK 5366 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5367 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5368 #endif 5369 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5370 static_branch_dec(&memcg_sockets_enabled_key); 5371 5372 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5373 static_branch_dec(&memcg_sockets_enabled_key); 5374 5375 vmpressure_cleanup(&memcg->vmpressure); 5376 cancel_work_sync(&memcg->high_work); 5377 mem_cgroup_remove_from_trees(memcg); 5378 free_shrinker_info(memcg); 5379 memcg_free_kmem(memcg); 5380 mem_cgroup_free(memcg); 5381 } 5382 5383 /** 5384 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5385 * @css: the target css 5386 * 5387 * Reset the states of the mem_cgroup associated with @css. This is 5388 * invoked when the userland requests disabling on the default hierarchy 5389 * but the memcg is pinned through dependency. The memcg should stop 5390 * applying policies and should revert to the vanilla state as it may be 5391 * made visible again. 5392 * 5393 * The current implementation only resets the essential configurations. 5394 * This needs to be expanded to cover all the visible parts. 5395 */ 5396 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5397 { 5398 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5399 5400 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5401 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5402 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5403 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5404 page_counter_set_min(&memcg->memory, 0); 5405 page_counter_set_low(&memcg->memory, 0); 5406 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5407 memcg->soft_limit = PAGE_COUNTER_MAX; 5408 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5409 memcg_wb_domain_size_changed(memcg); 5410 } 5411 5412 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5413 { 5414 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5415 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5416 struct memcg_vmstats_percpu *statc; 5417 long delta, v; 5418 int i; 5419 5420 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5421 5422 for (i = 0; i < MEMCG_NR_STAT; i++) { 5423 /* 5424 * Collect the aggregated propagation counts of groups 5425 * below us. We're in a per-cpu loop here and this is 5426 * a global counter, so the first cycle will get them. 5427 */ 5428 delta = memcg->vmstats.state_pending[i]; 5429 if (delta) 5430 memcg->vmstats.state_pending[i] = 0; 5431 5432 /* Add CPU changes on this level since the last flush */ 5433 v = READ_ONCE(statc->state[i]); 5434 if (v != statc->state_prev[i]) { 5435 delta += v - statc->state_prev[i]; 5436 statc->state_prev[i] = v; 5437 } 5438 5439 if (!delta) 5440 continue; 5441 5442 /* Aggregate counts on this level and propagate upwards */ 5443 memcg->vmstats.state[i] += delta; 5444 if (parent) 5445 parent->vmstats.state_pending[i] += delta; 5446 } 5447 5448 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 5449 delta = memcg->vmstats.events_pending[i]; 5450 if (delta) 5451 memcg->vmstats.events_pending[i] = 0; 5452 5453 v = READ_ONCE(statc->events[i]); 5454 if (v != statc->events_prev[i]) { 5455 delta += v - statc->events_prev[i]; 5456 statc->events_prev[i] = v; 5457 } 5458 5459 if (!delta) 5460 continue; 5461 5462 memcg->vmstats.events[i] += delta; 5463 if (parent) 5464 parent->vmstats.events_pending[i] += delta; 5465 } 5466 } 5467 5468 #ifdef CONFIG_MMU 5469 /* Handlers for move charge at task migration. */ 5470 static int mem_cgroup_do_precharge(unsigned long count) 5471 { 5472 int ret; 5473 5474 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5475 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5476 if (!ret) { 5477 mc.precharge += count; 5478 return ret; 5479 } 5480 5481 /* Try charges one by one with reclaim, but do not retry */ 5482 while (count--) { 5483 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5484 if (ret) 5485 return ret; 5486 mc.precharge++; 5487 cond_resched(); 5488 } 5489 return 0; 5490 } 5491 5492 union mc_target { 5493 struct page *page; 5494 swp_entry_t ent; 5495 }; 5496 5497 enum mc_target_type { 5498 MC_TARGET_NONE = 0, 5499 MC_TARGET_PAGE, 5500 MC_TARGET_SWAP, 5501 MC_TARGET_DEVICE, 5502 }; 5503 5504 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5505 unsigned long addr, pte_t ptent) 5506 { 5507 struct page *page = vm_normal_page(vma, addr, ptent); 5508 5509 if (!page || !page_mapped(page)) 5510 return NULL; 5511 if (PageAnon(page)) { 5512 if (!(mc.flags & MOVE_ANON)) 5513 return NULL; 5514 } else { 5515 if (!(mc.flags & MOVE_FILE)) 5516 return NULL; 5517 } 5518 if (!get_page_unless_zero(page)) 5519 return NULL; 5520 5521 return page; 5522 } 5523 5524 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5525 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5526 pte_t ptent, swp_entry_t *entry) 5527 { 5528 struct page *page = NULL; 5529 swp_entry_t ent = pte_to_swp_entry(ptent); 5530 5531 if (!(mc.flags & MOVE_ANON)) 5532 return NULL; 5533 5534 /* 5535 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5536 * a device and because they are not accessible by CPU they are store 5537 * as special swap entry in the CPU page table. 5538 */ 5539 if (is_device_private_entry(ent)) { 5540 page = pfn_swap_entry_to_page(ent); 5541 /* 5542 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5543 * a refcount of 1 when free (unlike normal page) 5544 */ 5545 if (!page_ref_add_unless(page, 1, 1)) 5546 return NULL; 5547 return page; 5548 } 5549 5550 if (non_swap_entry(ent)) 5551 return NULL; 5552 5553 /* 5554 * Because lookup_swap_cache() updates some statistics counter, 5555 * we call find_get_page() with swapper_space directly. 5556 */ 5557 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5558 entry->val = ent.val; 5559 5560 return page; 5561 } 5562 #else 5563 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5564 pte_t ptent, swp_entry_t *entry) 5565 { 5566 return NULL; 5567 } 5568 #endif 5569 5570 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5571 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5572 { 5573 if (!vma->vm_file) /* anonymous vma */ 5574 return NULL; 5575 if (!(mc.flags & MOVE_FILE)) 5576 return NULL; 5577 5578 /* page is moved even if it's not RSS of this task(page-faulted). */ 5579 /* shmem/tmpfs may report page out on swap: account for that too. */ 5580 return find_get_incore_page(vma->vm_file->f_mapping, 5581 linear_page_index(vma, addr)); 5582 } 5583 5584 /** 5585 * mem_cgroup_move_account - move account of the page 5586 * @page: the page 5587 * @compound: charge the page as compound or small page 5588 * @from: mem_cgroup which the page is moved from. 5589 * @to: mem_cgroup which the page is moved to. @from != @to. 5590 * 5591 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5592 * 5593 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5594 * from old cgroup. 5595 */ 5596 static int mem_cgroup_move_account(struct page *page, 5597 bool compound, 5598 struct mem_cgroup *from, 5599 struct mem_cgroup *to) 5600 { 5601 struct lruvec *from_vec, *to_vec; 5602 struct pglist_data *pgdat; 5603 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5604 int ret; 5605 5606 VM_BUG_ON(from == to); 5607 VM_BUG_ON_PAGE(PageLRU(page), page); 5608 VM_BUG_ON(compound && !PageTransHuge(page)); 5609 5610 /* 5611 * Prevent mem_cgroup_migrate() from looking at 5612 * page's memory cgroup of its source page while we change it. 5613 */ 5614 ret = -EBUSY; 5615 if (!trylock_page(page)) 5616 goto out; 5617 5618 ret = -EINVAL; 5619 if (page_memcg(page) != from) 5620 goto out_unlock; 5621 5622 pgdat = page_pgdat(page); 5623 from_vec = mem_cgroup_lruvec(from, pgdat); 5624 to_vec = mem_cgroup_lruvec(to, pgdat); 5625 5626 lock_page_memcg(page); 5627 5628 if (PageAnon(page)) { 5629 if (page_mapped(page)) { 5630 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5631 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5632 if (PageTransHuge(page)) { 5633 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5634 -nr_pages); 5635 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5636 nr_pages); 5637 } 5638 } 5639 } else { 5640 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5641 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5642 5643 if (PageSwapBacked(page)) { 5644 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5645 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5646 } 5647 5648 if (page_mapped(page)) { 5649 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5650 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5651 } 5652 5653 if (PageDirty(page)) { 5654 struct address_space *mapping = page_mapping(page); 5655 5656 if (mapping_can_writeback(mapping)) { 5657 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5658 -nr_pages); 5659 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5660 nr_pages); 5661 } 5662 } 5663 } 5664 5665 if (PageWriteback(page)) { 5666 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5667 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5668 } 5669 5670 /* 5671 * All state has been migrated, let's switch to the new memcg. 5672 * 5673 * It is safe to change page's memcg here because the page 5674 * is referenced, charged, isolated, and locked: we can't race 5675 * with (un)charging, migration, LRU putback, or anything else 5676 * that would rely on a stable page's memory cgroup. 5677 * 5678 * Note that lock_page_memcg is a memcg lock, not a page lock, 5679 * to save space. As soon as we switch page's memory cgroup to a 5680 * new memcg that isn't locked, the above state can change 5681 * concurrently again. Make sure we're truly done with it. 5682 */ 5683 smp_mb(); 5684 5685 css_get(&to->css); 5686 css_put(&from->css); 5687 5688 page->memcg_data = (unsigned long)to; 5689 5690 __unlock_page_memcg(from); 5691 5692 ret = 0; 5693 5694 local_irq_disable(); 5695 mem_cgroup_charge_statistics(to, page, nr_pages); 5696 memcg_check_events(to, page); 5697 mem_cgroup_charge_statistics(from, page, -nr_pages); 5698 memcg_check_events(from, page); 5699 local_irq_enable(); 5700 out_unlock: 5701 unlock_page(page); 5702 out: 5703 return ret; 5704 } 5705 5706 /** 5707 * get_mctgt_type - get target type of moving charge 5708 * @vma: the vma the pte to be checked belongs 5709 * @addr: the address corresponding to the pte to be checked 5710 * @ptent: the pte to be checked 5711 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5712 * 5713 * Returns 5714 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5715 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5716 * move charge. if @target is not NULL, the page is stored in target->page 5717 * with extra refcnt got(Callers should handle it). 5718 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5719 * target for charge migration. if @target is not NULL, the entry is stored 5720 * in target->ent. 5721 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5722 * (so ZONE_DEVICE page and thus not on the lru). 5723 * For now we such page is charge like a regular page would be as for all 5724 * intent and purposes it is just special memory taking the place of a 5725 * regular page. 5726 * 5727 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5728 * 5729 * Called with pte lock held. 5730 */ 5731 5732 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5733 unsigned long addr, pte_t ptent, union mc_target *target) 5734 { 5735 struct page *page = NULL; 5736 enum mc_target_type ret = MC_TARGET_NONE; 5737 swp_entry_t ent = { .val = 0 }; 5738 5739 if (pte_present(ptent)) 5740 page = mc_handle_present_pte(vma, addr, ptent); 5741 else if (is_swap_pte(ptent)) 5742 page = mc_handle_swap_pte(vma, ptent, &ent); 5743 else if (pte_none(ptent)) 5744 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5745 5746 if (!page && !ent.val) 5747 return ret; 5748 if (page) { 5749 /* 5750 * Do only loose check w/o serialization. 5751 * mem_cgroup_move_account() checks the page is valid or 5752 * not under LRU exclusion. 5753 */ 5754 if (page_memcg(page) == mc.from) { 5755 ret = MC_TARGET_PAGE; 5756 if (is_device_private_page(page)) 5757 ret = MC_TARGET_DEVICE; 5758 if (target) 5759 target->page = page; 5760 } 5761 if (!ret || !target) 5762 put_page(page); 5763 } 5764 /* 5765 * There is a swap entry and a page doesn't exist or isn't charged. 5766 * But we cannot move a tail-page in a THP. 5767 */ 5768 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5769 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5770 ret = MC_TARGET_SWAP; 5771 if (target) 5772 target->ent = ent; 5773 } 5774 return ret; 5775 } 5776 5777 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5778 /* 5779 * We don't consider PMD mapped swapping or file mapped pages because THP does 5780 * not support them for now. 5781 * Caller should make sure that pmd_trans_huge(pmd) is true. 5782 */ 5783 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5784 unsigned long addr, pmd_t pmd, union mc_target *target) 5785 { 5786 struct page *page = NULL; 5787 enum mc_target_type ret = MC_TARGET_NONE; 5788 5789 if (unlikely(is_swap_pmd(pmd))) { 5790 VM_BUG_ON(thp_migration_supported() && 5791 !is_pmd_migration_entry(pmd)); 5792 return ret; 5793 } 5794 page = pmd_page(pmd); 5795 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5796 if (!(mc.flags & MOVE_ANON)) 5797 return ret; 5798 if (page_memcg(page) == mc.from) { 5799 ret = MC_TARGET_PAGE; 5800 if (target) { 5801 get_page(page); 5802 target->page = page; 5803 } 5804 } 5805 return ret; 5806 } 5807 #else 5808 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5809 unsigned long addr, pmd_t pmd, union mc_target *target) 5810 { 5811 return MC_TARGET_NONE; 5812 } 5813 #endif 5814 5815 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5816 unsigned long addr, unsigned long end, 5817 struct mm_walk *walk) 5818 { 5819 struct vm_area_struct *vma = walk->vma; 5820 pte_t *pte; 5821 spinlock_t *ptl; 5822 5823 ptl = pmd_trans_huge_lock(pmd, vma); 5824 if (ptl) { 5825 /* 5826 * Note their can not be MC_TARGET_DEVICE for now as we do not 5827 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5828 * this might change. 5829 */ 5830 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5831 mc.precharge += HPAGE_PMD_NR; 5832 spin_unlock(ptl); 5833 return 0; 5834 } 5835 5836 if (pmd_trans_unstable(pmd)) 5837 return 0; 5838 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5839 for (; addr != end; pte++, addr += PAGE_SIZE) 5840 if (get_mctgt_type(vma, addr, *pte, NULL)) 5841 mc.precharge++; /* increment precharge temporarily */ 5842 pte_unmap_unlock(pte - 1, ptl); 5843 cond_resched(); 5844 5845 return 0; 5846 } 5847 5848 static const struct mm_walk_ops precharge_walk_ops = { 5849 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5850 }; 5851 5852 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5853 { 5854 unsigned long precharge; 5855 5856 mmap_read_lock(mm); 5857 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5858 mmap_read_unlock(mm); 5859 5860 precharge = mc.precharge; 5861 mc.precharge = 0; 5862 5863 return precharge; 5864 } 5865 5866 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5867 { 5868 unsigned long precharge = mem_cgroup_count_precharge(mm); 5869 5870 VM_BUG_ON(mc.moving_task); 5871 mc.moving_task = current; 5872 return mem_cgroup_do_precharge(precharge); 5873 } 5874 5875 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5876 static void __mem_cgroup_clear_mc(void) 5877 { 5878 struct mem_cgroup *from = mc.from; 5879 struct mem_cgroup *to = mc.to; 5880 5881 /* we must uncharge all the leftover precharges from mc.to */ 5882 if (mc.precharge) { 5883 cancel_charge(mc.to, mc.precharge); 5884 mc.precharge = 0; 5885 } 5886 /* 5887 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5888 * we must uncharge here. 5889 */ 5890 if (mc.moved_charge) { 5891 cancel_charge(mc.from, mc.moved_charge); 5892 mc.moved_charge = 0; 5893 } 5894 /* we must fixup refcnts and charges */ 5895 if (mc.moved_swap) { 5896 /* uncharge swap account from the old cgroup */ 5897 if (!mem_cgroup_is_root(mc.from)) 5898 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5899 5900 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5901 5902 /* 5903 * we charged both to->memory and to->memsw, so we 5904 * should uncharge to->memory. 5905 */ 5906 if (!mem_cgroup_is_root(mc.to)) 5907 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5908 5909 mc.moved_swap = 0; 5910 } 5911 memcg_oom_recover(from); 5912 memcg_oom_recover(to); 5913 wake_up_all(&mc.waitq); 5914 } 5915 5916 static void mem_cgroup_clear_mc(void) 5917 { 5918 struct mm_struct *mm = mc.mm; 5919 5920 /* 5921 * we must clear moving_task before waking up waiters at the end of 5922 * task migration. 5923 */ 5924 mc.moving_task = NULL; 5925 __mem_cgroup_clear_mc(); 5926 spin_lock(&mc.lock); 5927 mc.from = NULL; 5928 mc.to = NULL; 5929 mc.mm = NULL; 5930 spin_unlock(&mc.lock); 5931 5932 mmput(mm); 5933 } 5934 5935 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5936 { 5937 struct cgroup_subsys_state *css; 5938 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5939 struct mem_cgroup *from; 5940 struct task_struct *leader, *p; 5941 struct mm_struct *mm; 5942 unsigned long move_flags; 5943 int ret = 0; 5944 5945 /* charge immigration isn't supported on the default hierarchy */ 5946 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5947 return 0; 5948 5949 /* 5950 * Multi-process migrations only happen on the default hierarchy 5951 * where charge immigration is not used. Perform charge 5952 * immigration if @tset contains a leader and whine if there are 5953 * multiple. 5954 */ 5955 p = NULL; 5956 cgroup_taskset_for_each_leader(leader, css, tset) { 5957 WARN_ON_ONCE(p); 5958 p = leader; 5959 memcg = mem_cgroup_from_css(css); 5960 } 5961 if (!p) 5962 return 0; 5963 5964 /* 5965 * We are now committed to this value whatever it is. Changes in this 5966 * tunable will only affect upcoming migrations, not the current one. 5967 * So we need to save it, and keep it going. 5968 */ 5969 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5970 if (!move_flags) 5971 return 0; 5972 5973 from = mem_cgroup_from_task(p); 5974 5975 VM_BUG_ON(from == memcg); 5976 5977 mm = get_task_mm(p); 5978 if (!mm) 5979 return 0; 5980 /* We move charges only when we move a owner of the mm */ 5981 if (mm->owner == p) { 5982 VM_BUG_ON(mc.from); 5983 VM_BUG_ON(mc.to); 5984 VM_BUG_ON(mc.precharge); 5985 VM_BUG_ON(mc.moved_charge); 5986 VM_BUG_ON(mc.moved_swap); 5987 5988 spin_lock(&mc.lock); 5989 mc.mm = mm; 5990 mc.from = from; 5991 mc.to = memcg; 5992 mc.flags = move_flags; 5993 spin_unlock(&mc.lock); 5994 /* We set mc.moving_task later */ 5995 5996 ret = mem_cgroup_precharge_mc(mm); 5997 if (ret) 5998 mem_cgroup_clear_mc(); 5999 } else { 6000 mmput(mm); 6001 } 6002 return ret; 6003 } 6004 6005 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6006 { 6007 if (mc.to) 6008 mem_cgroup_clear_mc(); 6009 } 6010 6011 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6012 unsigned long addr, unsigned long end, 6013 struct mm_walk *walk) 6014 { 6015 int ret = 0; 6016 struct vm_area_struct *vma = walk->vma; 6017 pte_t *pte; 6018 spinlock_t *ptl; 6019 enum mc_target_type target_type; 6020 union mc_target target; 6021 struct page *page; 6022 6023 ptl = pmd_trans_huge_lock(pmd, vma); 6024 if (ptl) { 6025 if (mc.precharge < HPAGE_PMD_NR) { 6026 spin_unlock(ptl); 6027 return 0; 6028 } 6029 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6030 if (target_type == MC_TARGET_PAGE) { 6031 page = target.page; 6032 if (!isolate_lru_page(page)) { 6033 if (!mem_cgroup_move_account(page, true, 6034 mc.from, mc.to)) { 6035 mc.precharge -= HPAGE_PMD_NR; 6036 mc.moved_charge += HPAGE_PMD_NR; 6037 } 6038 putback_lru_page(page); 6039 } 6040 put_page(page); 6041 } else if (target_type == MC_TARGET_DEVICE) { 6042 page = target.page; 6043 if (!mem_cgroup_move_account(page, true, 6044 mc.from, mc.to)) { 6045 mc.precharge -= HPAGE_PMD_NR; 6046 mc.moved_charge += HPAGE_PMD_NR; 6047 } 6048 put_page(page); 6049 } 6050 spin_unlock(ptl); 6051 return 0; 6052 } 6053 6054 if (pmd_trans_unstable(pmd)) 6055 return 0; 6056 retry: 6057 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6058 for (; addr != end; addr += PAGE_SIZE) { 6059 pte_t ptent = *(pte++); 6060 bool device = false; 6061 swp_entry_t ent; 6062 6063 if (!mc.precharge) 6064 break; 6065 6066 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6067 case MC_TARGET_DEVICE: 6068 device = true; 6069 fallthrough; 6070 case MC_TARGET_PAGE: 6071 page = target.page; 6072 /* 6073 * We can have a part of the split pmd here. Moving it 6074 * can be done but it would be too convoluted so simply 6075 * ignore such a partial THP and keep it in original 6076 * memcg. There should be somebody mapping the head. 6077 */ 6078 if (PageTransCompound(page)) 6079 goto put; 6080 if (!device && isolate_lru_page(page)) 6081 goto put; 6082 if (!mem_cgroup_move_account(page, false, 6083 mc.from, mc.to)) { 6084 mc.precharge--; 6085 /* we uncharge from mc.from later. */ 6086 mc.moved_charge++; 6087 } 6088 if (!device) 6089 putback_lru_page(page); 6090 put: /* get_mctgt_type() gets the page */ 6091 put_page(page); 6092 break; 6093 case MC_TARGET_SWAP: 6094 ent = target.ent; 6095 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6096 mc.precharge--; 6097 mem_cgroup_id_get_many(mc.to, 1); 6098 /* we fixup other refcnts and charges later. */ 6099 mc.moved_swap++; 6100 } 6101 break; 6102 default: 6103 break; 6104 } 6105 } 6106 pte_unmap_unlock(pte - 1, ptl); 6107 cond_resched(); 6108 6109 if (addr != end) { 6110 /* 6111 * We have consumed all precharges we got in can_attach(). 6112 * We try charge one by one, but don't do any additional 6113 * charges to mc.to if we have failed in charge once in attach() 6114 * phase. 6115 */ 6116 ret = mem_cgroup_do_precharge(1); 6117 if (!ret) 6118 goto retry; 6119 } 6120 6121 return ret; 6122 } 6123 6124 static const struct mm_walk_ops charge_walk_ops = { 6125 .pmd_entry = mem_cgroup_move_charge_pte_range, 6126 }; 6127 6128 static void mem_cgroup_move_charge(void) 6129 { 6130 lru_add_drain_all(); 6131 /* 6132 * Signal lock_page_memcg() to take the memcg's move_lock 6133 * while we're moving its pages to another memcg. Then wait 6134 * for already started RCU-only updates to finish. 6135 */ 6136 atomic_inc(&mc.from->moving_account); 6137 synchronize_rcu(); 6138 retry: 6139 if (unlikely(!mmap_read_trylock(mc.mm))) { 6140 /* 6141 * Someone who are holding the mmap_lock might be waiting in 6142 * waitq. So we cancel all extra charges, wake up all waiters, 6143 * and retry. Because we cancel precharges, we might not be able 6144 * to move enough charges, but moving charge is a best-effort 6145 * feature anyway, so it wouldn't be a big problem. 6146 */ 6147 __mem_cgroup_clear_mc(); 6148 cond_resched(); 6149 goto retry; 6150 } 6151 /* 6152 * When we have consumed all precharges and failed in doing 6153 * additional charge, the page walk just aborts. 6154 */ 6155 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6156 NULL); 6157 6158 mmap_read_unlock(mc.mm); 6159 atomic_dec(&mc.from->moving_account); 6160 } 6161 6162 static void mem_cgroup_move_task(void) 6163 { 6164 if (mc.to) { 6165 mem_cgroup_move_charge(); 6166 mem_cgroup_clear_mc(); 6167 } 6168 } 6169 #else /* !CONFIG_MMU */ 6170 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6171 { 6172 return 0; 6173 } 6174 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6175 { 6176 } 6177 static void mem_cgroup_move_task(void) 6178 { 6179 } 6180 #endif 6181 6182 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6183 { 6184 if (value == PAGE_COUNTER_MAX) 6185 seq_puts(m, "max\n"); 6186 else 6187 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6188 6189 return 0; 6190 } 6191 6192 static u64 memory_current_read(struct cgroup_subsys_state *css, 6193 struct cftype *cft) 6194 { 6195 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6196 6197 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6198 } 6199 6200 static int memory_min_show(struct seq_file *m, void *v) 6201 { 6202 return seq_puts_memcg_tunable(m, 6203 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6204 } 6205 6206 static ssize_t memory_min_write(struct kernfs_open_file *of, 6207 char *buf, size_t nbytes, loff_t off) 6208 { 6209 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6210 unsigned long min; 6211 int err; 6212 6213 buf = strstrip(buf); 6214 err = page_counter_memparse(buf, "max", &min); 6215 if (err) 6216 return err; 6217 6218 page_counter_set_min(&memcg->memory, min); 6219 6220 return nbytes; 6221 } 6222 6223 static int memory_low_show(struct seq_file *m, void *v) 6224 { 6225 return seq_puts_memcg_tunable(m, 6226 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6227 } 6228 6229 static ssize_t memory_low_write(struct kernfs_open_file *of, 6230 char *buf, size_t nbytes, loff_t off) 6231 { 6232 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6233 unsigned long low; 6234 int err; 6235 6236 buf = strstrip(buf); 6237 err = page_counter_memparse(buf, "max", &low); 6238 if (err) 6239 return err; 6240 6241 page_counter_set_low(&memcg->memory, low); 6242 6243 return nbytes; 6244 } 6245 6246 static int memory_high_show(struct seq_file *m, void *v) 6247 { 6248 return seq_puts_memcg_tunable(m, 6249 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6250 } 6251 6252 static ssize_t memory_high_write(struct kernfs_open_file *of, 6253 char *buf, size_t nbytes, loff_t off) 6254 { 6255 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6256 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6257 bool drained = false; 6258 unsigned long high; 6259 int err; 6260 6261 buf = strstrip(buf); 6262 err = page_counter_memparse(buf, "max", &high); 6263 if (err) 6264 return err; 6265 6266 page_counter_set_high(&memcg->memory, high); 6267 6268 for (;;) { 6269 unsigned long nr_pages = page_counter_read(&memcg->memory); 6270 unsigned long reclaimed; 6271 6272 if (nr_pages <= high) 6273 break; 6274 6275 if (signal_pending(current)) 6276 break; 6277 6278 if (!drained) { 6279 drain_all_stock(memcg); 6280 drained = true; 6281 continue; 6282 } 6283 6284 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6285 GFP_KERNEL, true); 6286 6287 if (!reclaimed && !nr_retries--) 6288 break; 6289 } 6290 6291 memcg_wb_domain_size_changed(memcg); 6292 return nbytes; 6293 } 6294 6295 static int memory_max_show(struct seq_file *m, void *v) 6296 { 6297 return seq_puts_memcg_tunable(m, 6298 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6299 } 6300 6301 static ssize_t memory_max_write(struct kernfs_open_file *of, 6302 char *buf, size_t nbytes, loff_t off) 6303 { 6304 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6305 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6306 bool drained = false; 6307 unsigned long max; 6308 int err; 6309 6310 buf = strstrip(buf); 6311 err = page_counter_memparse(buf, "max", &max); 6312 if (err) 6313 return err; 6314 6315 xchg(&memcg->memory.max, max); 6316 6317 for (;;) { 6318 unsigned long nr_pages = page_counter_read(&memcg->memory); 6319 6320 if (nr_pages <= max) 6321 break; 6322 6323 if (signal_pending(current)) 6324 break; 6325 6326 if (!drained) { 6327 drain_all_stock(memcg); 6328 drained = true; 6329 continue; 6330 } 6331 6332 if (nr_reclaims) { 6333 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6334 GFP_KERNEL, true)) 6335 nr_reclaims--; 6336 continue; 6337 } 6338 6339 memcg_memory_event(memcg, MEMCG_OOM); 6340 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6341 break; 6342 } 6343 6344 memcg_wb_domain_size_changed(memcg); 6345 return nbytes; 6346 } 6347 6348 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6349 { 6350 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6351 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6352 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6353 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6354 seq_printf(m, "oom_kill %lu\n", 6355 atomic_long_read(&events[MEMCG_OOM_KILL])); 6356 } 6357 6358 static int memory_events_show(struct seq_file *m, void *v) 6359 { 6360 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6361 6362 __memory_events_show(m, memcg->memory_events); 6363 return 0; 6364 } 6365 6366 static int memory_events_local_show(struct seq_file *m, void *v) 6367 { 6368 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6369 6370 __memory_events_show(m, memcg->memory_events_local); 6371 return 0; 6372 } 6373 6374 static int memory_stat_show(struct seq_file *m, void *v) 6375 { 6376 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6377 char *buf; 6378 6379 buf = memory_stat_format(memcg); 6380 if (!buf) 6381 return -ENOMEM; 6382 seq_puts(m, buf); 6383 kfree(buf); 6384 return 0; 6385 } 6386 6387 #ifdef CONFIG_NUMA 6388 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6389 int item) 6390 { 6391 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6392 } 6393 6394 static int memory_numa_stat_show(struct seq_file *m, void *v) 6395 { 6396 int i; 6397 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6398 6399 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6400 int nid; 6401 6402 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6403 continue; 6404 6405 seq_printf(m, "%s", memory_stats[i].name); 6406 for_each_node_state(nid, N_MEMORY) { 6407 u64 size; 6408 struct lruvec *lruvec; 6409 6410 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6411 size = lruvec_page_state_output(lruvec, 6412 memory_stats[i].idx); 6413 seq_printf(m, " N%d=%llu", nid, size); 6414 } 6415 seq_putc(m, '\n'); 6416 } 6417 6418 return 0; 6419 } 6420 #endif 6421 6422 static int memory_oom_group_show(struct seq_file *m, void *v) 6423 { 6424 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6425 6426 seq_printf(m, "%d\n", memcg->oom_group); 6427 6428 return 0; 6429 } 6430 6431 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6432 char *buf, size_t nbytes, loff_t off) 6433 { 6434 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6435 int ret, oom_group; 6436 6437 buf = strstrip(buf); 6438 if (!buf) 6439 return -EINVAL; 6440 6441 ret = kstrtoint(buf, 0, &oom_group); 6442 if (ret) 6443 return ret; 6444 6445 if (oom_group != 0 && oom_group != 1) 6446 return -EINVAL; 6447 6448 memcg->oom_group = oom_group; 6449 6450 return nbytes; 6451 } 6452 6453 static struct cftype memory_files[] = { 6454 { 6455 .name = "current", 6456 .flags = CFTYPE_NOT_ON_ROOT, 6457 .read_u64 = memory_current_read, 6458 }, 6459 { 6460 .name = "min", 6461 .flags = CFTYPE_NOT_ON_ROOT, 6462 .seq_show = memory_min_show, 6463 .write = memory_min_write, 6464 }, 6465 { 6466 .name = "low", 6467 .flags = CFTYPE_NOT_ON_ROOT, 6468 .seq_show = memory_low_show, 6469 .write = memory_low_write, 6470 }, 6471 { 6472 .name = "high", 6473 .flags = CFTYPE_NOT_ON_ROOT, 6474 .seq_show = memory_high_show, 6475 .write = memory_high_write, 6476 }, 6477 { 6478 .name = "max", 6479 .flags = CFTYPE_NOT_ON_ROOT, 6480 .seq_show = memory_max_show, 6481 .write = memory_max_write, 6482 }, 6483 { 6484 .name = "events", 6485 .flags = CFTYPE_NOT_ON_ROOT, 6486 .file_offset = offsetof(struct mem_cgroup, events_file), 6487 .seq_show = memory_events_show, 6488 }, 6489 { 6490 .name = "events.local", 6491 .flags = CFTYPE_NOT_ON_ROOT, 6492 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6493 .seq_show = memory_events_local_show, 6494 }, 6495 { 6496 .name = "stat", 6497 .seq_show = memory_stat_show, 6498 }, 6499 #ifdef CONFIG_NUMA 6500 { 6501 .name = "numa_stat", 6502 .seq_show = memory_numa_stat_show, 6503 }, 6504 #endif 6505 { 6506 .name = "oom.group", 6507 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6508 .seq_show = memory_oom_group_show, 6509 .write = memory_oom_group_write, 6510 }, 6511 { } /* terminate */ 6512 }; 6513 6514 struct cgroup_subsys memory_cgrp_subsys = { 6515 .css_alloc = mem_cgroup_css_alloc, 6516 .css_online = mem_cgroup_css_online, 6517 .css_offline = mem_cgroup_css_offline, 6518 .css_released = mem_cgroup_css_released, 6519 .css_free = mem_cgroup_css_free, 6520 .css_reset = mem_cgroup_css_reset, 6521 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6522 .can_attach = mem_cgroup_can_attach, 6523 .cancel_attach = mem_cgroup_cancel_attach, 6524 .post_attach = mem_cgroup_move_task, 6525 .dfl_cftypes = memory_files, 6526 .legacy_cftypes = mem_cgroup_legacy_files, 6527 .early_init = 0, 6528 }; 6529 6530 /* 6531 * This function calculates an individual cgroup's effective 6532 * protection which is derived from its own memory.min/low, its 6533 * parent's and siblings' settings, as well as the actual memory 6534 * distribution in the tree. 6535 * 6536 * The following rules apply to the effective protection values: 6537 * 6538 * 1. At the first level of reclaim, effective protection is equal to 6539 * the declared protection in memory.min and memory.low. 6540 * 6541 * 2. To enable safe delegation of the protection configuration, at 6542 * subsequent levels the effective protection is capped to the 6543 * parent's effective protection. 6544 * 6545 * 3. To make complex and dynamic subtrees easier to configure, the 6546 * user is allowed to overcommit the declared protection at a given 6547 * level. If that is the case, the parent's effective protection is 6548 * distributed to the children in proportion to how much protection 6549 * they have declared and how much of it they are utilizing. 6550 * 6551 * This makes distribution proportional, but also work-conserving: 6552 * if one cgroup claims much more protection than it uses memory, 6553 * the unused remainder is available to its siblings. 6554 * 6555 * 4. Conversely, when the declared protection is undercommitted at a 6556 * given level, the distribution of the larger parental protection 6557 * budget is NOT proportional. A cgroup's protection from a sibling 6558 * is capped to its own memory.min/low setting. 6559 * 6560 * 5. However, to allow protecting recursive subtrees from each other 6561 * without having to declare each individual cgroup's fixed share 6562 * of the ancestor's claim to protection, any unutilized - 6563 * "floating" - protection from up the tree is distributed in 6564 * proportion to each cgroup's *usage*. This makes the protection 6565 * neutral wrt sibling cgroups and lets them compete freely over 6566 * the shared parental protection budget, but it protects the 6567 * subtree as a whole from neighboring subtrees. 6568 * 6569 * Note that 4. and 5. are not in conflict: 4. is about protecting 6570 * against immediate siblings whereas 5. is about protecting against 6571 * neighboring subtrees. 6572 */ 6573 static unsigned long effective_protection(unsigned long usage, 6574 unsigned long parent_usage, 6575 unsigned long setting, 6576 unsigned long parent_effective, 6577 unsigned long siblings_protected) 6578 { 6579 unsigned long protected; 6580 unsigned long ep; 6581 6582 protected = min(usage, setting); 6583 /* 6584 * If all cgroups at this level combined claim and use more 6585 * protection then what the parent affords them, distribute 6586 * shares in proportion to utilization. 6587 * 6588 * We are using actual utilization rather than the statically 6589 * claimed protection in order to be work-conserving: claimed 6590 * but unused protection is available to siblings that would 6591 * otherwise get a smaller chunk than what they claimed. 6592 */ 6593 if (siblings_protected > parent_effective) 6594 return protected * parent_effective / siblings_protected; 6595 6596 /* 6597 * Ok, utilized protection of all children is within what the 6598 * parent affords them, so we know whatever this child claims 6599 * and utilizes is effectively protected. 6600 * 6601 * If there is unprotected usage beyond this value, reclaim 6602 * will apply pressure in proportion to that amount. 6603 * 6604 * If there is unutilized protection, the cgroup will be fully 6605 * shielded from reclaim, but we do return a smaller value for 6606 * protection than what the group could enjoy in theory. This 6607 * is okay. With the overcommit distribution above, effective 6608 * protection is always dependent on how memory is actually 6609 * consumed among the siblings anyway. 6610 */ 6611 ep = protected; 6612 6613 /* 6614 * If the children aren't claiming (all of) the protection 6615 * afforded to them by the parent, distribute the remainder in 6616 * proportion to the (unprotected) memory of each cgroup. That 6617 * way, cgroups that aren't explicitly prioritized wrt each 6618 * other compete freely over the allowance, but they are 6619 * collectively protected from neighboring trees. 6620 * 6621 * We're using unprotected memory for the weight so that if 6622 * some cgroups DO claim explicit protection, we don't protect 6623 * the same bytes twice. 6624 * 6625 * Check both usage and parent_usage against the respective 6626 * protected values. One should imply the other, but they 6627 * aren't read atomically - make sure the division is sane. 6628 */ 6629 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6630 return ep; 6631 if (parent_effective > siblings_protected && 6632 parent_usage > siblings_protected && 6633 usage > protected) { 6634 unsigned long unclaimed; 6635 6636 unclaimed = parent_effective - siblings_protected; 6637 unclaimed *= usage - protected; 6638 unclaimed /= parent_usage - siblings_protected; 6639 6640 ep += unclaimed; 6641 } 6642 6643 return ep; 6644 } 6645 6646 /** 6647 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 6648 * @root: the top ancestor of the sub-tree being checked 6649 * @memcg: the memory cgroup to check 6650 * 6651 * WARNING: This function is not stateless! It can only be used as part 6652 * of a top-down tree iteration, not for isolated queries. 6653 */ 6654 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6655 struct mem_cgroup *memcg) 6656 { 6657 unsigned long usage, parent_usage; 6658 struct mem_cgroup *parent; 6659 6660 if (mem_cgroup_disabled()) 6661 return; 6662 6663 if (!root) 6664 root = root_mem_cgroup; 6665 6666 /* 6667 * Effective values of the reclaim targets are ignored so they 6668 * can be stale. Have a look at mem_cgroup_protection for more 6669 * details. 6670 * TODO: calculation should be more robust so that we do not need 6671 * that special casing. 6672 */ 6673 if (memcg == root) 6674 return; 6675 6676 usage = page_counter_read(&memcg->memory); 6677 if (!usage) 6678 return; 6679 6680 parent = parent_mem_cgroup(memcg); 6681 /* No parent means a non-hierarchical mode on v1 memcg */ 6682 if (!parent) 6683 return; 6684 6685 if (parent == root) { 6686 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6687 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6688 return; 6689 } 6690 6691 parent_usage = page_counter_read(&parent->memory); 6692 6693 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6694 READ_ONCE(memcg->memory.min), 6695 READ_ONCE(parent->memory.emin), 6696 atomic_long_read(&parent->memory.children_min_usage))); 6697 6698 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6699 READ_ONCE(memcg->memory.low), 6700 READ_ONCE(parent->memory.elow), 6701 atomic_long_read(&parent->memory.children_low_usage))); 6702 } 6703 6704 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg, 6705 gfp_t gfp) 6706 { 6707 unsigned int nr_pages = thp_nr_pages(page); 6708 int ret; 6709 6710 ret = try_charge(memcg, gfp, nr_pages); 6711 if (ret) 6712 goto out; 6713 6714 css_get(&memcg->css); 6715 commit_charge(page, memcg); 6716 6717 local_irq_disable(); 6718 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6719 memcg_check_events(memcg, page); 6720 local_irq_enable(); 6721 out: 6722 return ret; 6723 } 6724 6725 /** 6726 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6727 * @page: page to charge 6728 * @mm: mm context of the victim 6729 * @gfp_mask: reclaim mode 6730 * 6731 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6732 * pages according to @gfp_mask if necessary. if @mm is NULL, try to 6733 * charge to the active memcg. 6734 * 6735 * Do not use this for pages allocated for swapin. 6736 * 6737 * Returns 0 on success. Otherwise, an error code is returned. 6738 */ 6739 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6740 { 6741 struct mem_cgroup *memcg; 6742 int ret; 6743 6744 if (mem_cgroup_disabled()) 6745 return 0; 6746 6747 memcg = get_mem_cgroup_from_mm(mm); 6748 ret = __mem_cgroup_charge(page, memcg, gfp_mask); 6749 css_put(&memcg->css); 6750 6751 return ret; 6752 } 6753 6754 /** 6755 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin 6756 * @page: page to charge 6757 * @mm: mm context of the victim 6758 * @gfp: reclaim mode 6759 * @entry: swap entry for which the page is allocated 6760 * 6761 * This function charges a page allocated for swapin. Please call this before 6762 * adding the page to the swapcache. 6763 * 6764 * Returns 0 on success. Otherwise, an error code is returned. 6765 */ 6766 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 6767 gfp_t gfp, swp_entry_t entry) 6768 { 6769 struct mem_cgroup *memcg; 6770 unsigned short id; 6771 int ret; 6772 6773 if (mem_cgroup_disabled()) 6774 return 0; 6775 6776 id = lookup_swap_cgroup_id(entry); 6777 rcu_read_lock(); 6778 memcg = mem_cgroup_from_id(id); 6779 if (!memcg || !css_tryget_online(&memcg->css)) 6780 memcg = get_mem_cgroup_from_mm(mm); 6781 rcu_read_unlock(); 6782 6783 ret = __mem_cgroup_charge(page, memcg, gfp); 6784 6785 css_put(&memcg->css); 6786 return ret; 6787 } 6788 6789 /* 6790 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 6791 * @entry: swap entry for which the page is charged 6792 * 6793 * Call this function after successfully adding the charged page to swapcache. 6794 * 6795 * Note: This function assumes the page for which swap slot is being uncharged 6796 * is order 0 page. 6797 */ 6798 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 6799 { 6800 /* 6801 * Cgroup1's unified memory+swap counter has been charged with the 6802 * new swapcache page, finish the transfer by uncharging the swap 6803 * slot. The swap slot would also get uncharged when it dies, but 6804 * it can stick around indefinitely and we'd count the page twice 6805 * the entire time. 6806 * 6807 * Cgroup2 has separate resource counters for memory and swap, 6808 * so this is a non-issue here. Memory and swap charge lifetimes 6809 * correspond 1:1 to page and swap slot lifetimes: we charge the 6810 * page to memory here, and uncharge swap when the slot is freed. 6811 */ 6812 if (!mem_cgroup_disabled() && do_memsw_account()) { 6813 /* 6814 * The swap entry might not get freed for a long time, 6815 * let's not wait for it. The page already received a 6816 * memory+swap charge, drop the swap entry duplicate. 6817 */ 6818 mem_cgroup_uncharge_swap(entry, 1); 6819 } 6820 } 6821 6822 struct uncharge_gather { 6823 struct mem_cgroup *memcg; 6824 unsigned long nr_memory; 6825 unsigned long pgpgout; 6826 unsigned long nr_kmem; 6827 struct page *dummy_page; 6828 }; 6829 6830 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6831 { 6832 memset(ug, 0, sizeof(*ug)); 6833 } 6834 6835 static void uncharge_batch(const struct uncharge_gather *ug) 6836 { 6837 unsigned long flags; 6838 6839 if (ug->nr_memory) { 6840 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 6841 if (do_memsw_account()) 6842 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 6843 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6844 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6845 memcg_oom_recover(ug->memcg); 6846 } 6847 6848 local_irq_save(flags); 6849 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6850 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 6851 memcg_check_events(ug->memcg, ug->dummy_page); 6852 local_irq_restore(flags); 6853 6854 /* drop reference from uncharge_page */ 6855 css_put(&ug->memcg->css); 6856 } 6857 6858 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6859 { 6860 unsigned long nr_pages; 6861 struct mem_cgroup *memcg; 6862 struct obj_cgroup *objcg; 6863 bool use_objcg = PageMemcgKmem(page); 6864 6865 VM_BUG_ON_PAGE(PageLRU(page), page); 6866 6867 /* 6868 * Nobody should be changing or seriously looking at 6869 * page memcg or objcg at this point, we have fully 6870 * exclusive access to the page. 6871 */ 6872 if (use_objcg) { 6873 objcg = __page_objcg(page); 6874 /* 6875 * This get matches the put at the end of the function and 6876 * kmem pages do not hold memcg references anymore. 6877 */ 6878 memcg = get_mem_cgroup_from_objcg(objcg); 6879 } else { 6880 memcg = __page_memcg(page); 6881 } 6882 6883 if (!memcg) 6884 return; 6885 6886 if (ug->memcg != memcg) { 6887 if (ug->memcg) { 6888 uncharge_batch(ug); 6889 uncharge_gather_clear(ug); 6890 } 6891 ug->memcg = memcg; 6892 ug->dummy_page = page; 6893 6894 /* pairs with css_put in uncharge_batch */ 6895 css_get(&memcg->css); 6896 } 6897 6898 nr_pages = compound_nr(page); 6899 6900 if (use_objcg) { 6901 ug->nr_memory += nr_pages; 6902 ug->nr_kmem += nr_pages; 6903 6904 page->memcg_data = 0; 6905 obj_cgroup_put(objcg); 6906 } else { 6907 /* LRU pages aren't accounted at the root level */ 6908 if (!mem_cgroup_is_root(memcg)) 6909 ug->nr_memory += nr_pages; 6910 ug->pgpgout++; 6911 6912 page->memcg_data = 0; 6913 } 6914 6915 css_put(&memcg->css); 6916 } 6917 6918 /** 6919 * mem_cgroup_uncharge - uncharge a page 6920 * @page: page to uncharge 6921 * 6922 * Uncharge a page previously charged with mem_cgroup_charge(). 6923 */ 6924 void mem_cgroup_uncharge(struct page *page) 6925 { 6926 struct uncharge_gather ug; 6927 6928 if (mem_cgroup_disabled()) 6929 return; 6930 6931 /* Don't touch page->lru of any random page, pre-check: */ 6932 if (!page_memcg(page)) 6933 return; 6934 6935 uncharge_gather_clear(&ug); 6936 uncharge_page(page, &ug); 6937 uncharge_batch(&ug); 6938 } 6939 6940 /** 6941 * mem_cgroup_uncharge_list - uncharge a list of page 6942 * @page_list: list of pages to uncharge 6943 * 6944 * Uncharge a list of pages previously charged with 6945 * mem_cgroup_charge(). 6946 */ 6947 void mem_cgroup_uncharge_list(struct list_head *page_list) 6948 { 6949 struct uncharge_gather ug; 6950 struct page *page; 6951 6952 if (mem_cgroup_disabled()) 6953 return; 6954 6955 uncharge_gather_clear(&ug); 6956 list_for_each_entry(page, page_list, lru) 6957 uncharge_page(page, &ug); 6958 if (ug.memcg) 6959 uncharge_batch(&ug); 6960 } 6961 6962 /** 6963 * mem_cgroup_migrate - charge a page's replacement 6964 * @oldpage: currently circulating page 6965 * @newpage: replacement page 6966 * 6967 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6968 * be uncharged upon free. 6969 * 6970 * Both pages must be locked, @newpage->mapping must be set up. 6971 */ 6972 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6973 { 6974 struct mem_cgroup *memcg; 6975 unsigned int nr_pages; 6976 unsigned long flags; 6977 6978 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6979 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6980 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6981 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6982 newpage); 6983 6984 if (mem_cgroup_disabled()) 6985 return; 6986 6987 /* Page cache replacement: new page already charged? */ 6988 if (page_memcg(newpage)) 6989 return; 6990 6991 memcg = page_memcg(oldpage); 6992 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); 6993 if (!memcg) 6994 return; 6995 6996 /* Force-charge the new page. The old one will be freed soon */ 6997 nr_pages = thp_nr_pages(newpage); 6998 6999 if (!mem_cgroup_is_root(memcg)) { 7000 page_counter_charge(&memcg->memory, nr_pages); 7001 if (do_memsw_account()) 7002 page_counter_charge(&memcg->memsw, nr_pages); 7003 } 7004 7005 css_get(&memcg->css); 7006 commit_charge(newpage, memcg); 7007 7008 local_irq_save(flags); 7009 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7010 memcg_check_events(memcg, newpage); 7011 local_irq_restore(flags); 7012 } 7013 7014 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7015 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7016 7017 void mem_cgroup_sk_alloc(struct sock *sk) 7018 { 7019 struct mem_cgroup *memcg; 7020 7021 if (!mem_cgroup_sockets_enabled) 7022 return; 7023 7024 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7025 if (in_interrupt()) 7026 return; 7027 7028 rcu_read_lock(); 7029 memcg = mem_cgroup_from_task(current); 7030 if (memcg == root_mem_cgroup) 7031 goto out; 7032 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7033 goto out; 7034 if (css_tryget(&memcg->css)) 7035 sk->sk_memcg = memcg; 7036 out: 7037 rcu_read_unlock(); 7038 } 7039 7040 void mem_cgroup_sk_free(struct sock *sk) 7041 { 7042 if (sk->sk_memcg) 7043 css_put(&sk->sk_memcg->css); 7044 } 7045 7046 /** 7047 * mem_cgroup_charge_skmem - charge socket memory 7048 * @memcg: memcg to charge 7049 * @nr_pages: number of pages to charge 7050 * 7051 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7052 * @memcg's configured limit, %false if the charge had to be forced. 7053 */ 7054 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7055 { 7056 gfp_t gfp_mask = GFP_KERNEL; 7057 7058 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7059 struct page_counter *fail; 7060 7061 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7062 memcg->tcpmem_pressure = 0; 7063 return true; 7064 } 7065 page_counter_charge(&memcg->tcpmem, nr_pages); 7066 memcg->tcpmem_pressure = 1; 7067 return false; 7068 } 7069 7070 /* Don't block in the packet receive path */ 7071 if (in_softirq()) 7072 gfp_mask = GFP_NOWAIT; 7073 7074 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7075 7076 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7077 return true; 7078 7079 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7080 return false; 7081 } 7082 7083 /** 7084 * mem_cgroup_uncharge_skmem - uncharge socket memory 7085 * @memcg: memcg to uncharge 7086 * @nr_pages: number of pages to uncharge 7087 */ 7088 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7089 { 7090 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7091 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7092 return; 7093 } 7094 7095 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7096 7097 refill_stock(memcg, nr_pages); 7098 } 7099 7100 static int __init cgroup_memory(char *s) 7101 { 7102 char *token; 7103 7104 while ((token = strsep(&s, ",")) != NULL) { 7105 if (!*token) 7106 continue; 7107 if (!strcmp(token, "nosocket")) 7108 cgroup_memory_nosocket = true; 7109 if (!strcmp(token, "nokmem")) 7110 cgroup_memory_nokmem = true; 7111 } 7112 return 0; 7113 } 7114 __setup("cgroup.memory=", cgroup_memory); 7115 7116 /* 7117 * subsys_initcall() for memory controller. 7118 * 7119 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7120 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7121 * basically everything that doesn't depend on a specific mem_cgroup structure 7122 * should be initialized from here. 7123 */ 7124 static int __init mem_cgroup_init(void) 7125 { 7126 int cpu, node; 7127 7128 /* 7129 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7130 * used for per-memcg-per-cpu caching of per-node statistics. In order 7131 * to work fine, we should make sure that the overfill threshold can't 7132 * exceed S32_MAX / PAGE_SIZE. 7133 */ 7134 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7135 7136 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7137 memcg_hotplug_cpu_dead); 7138 7139 for_each_possible_cpu(cpu) 7140 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7141 drain_local_stock); 7142 7143 for_each_node(node) { 7144 struct mem_cgroup_tree_per_node *rtpn; 7145 7146 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7147 node_online(node) ? node : NUMA_NO_NODE); 7148 7149 rtpn->rb_root = RB_ROOT; 7150 rtpn->rb_rightmost = NULL; 7151 spin_lock_init(&rtpn->lock); 7152 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7153 } 7154 7155 return 0; 7156 } 7157 subsys_initcall(mem_cgroup_init); 7158 7159 #ifdef CONFIG_MEMCG_SWAP 7160 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7161 { 7162 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7163 /* 7164 * The root cgroup cannot be destroyed, so it's refcount must 7165 * always be >= 1. 7166 */ 7167 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7168 VM_BUG_ON(1); 7169 break; 7170 } 7171 memcg = parent_mem_cgroup(memcg); 7172 if (!memcg) 7173 memcg = root_mem_cgroup; 7174 } 7175 return memcg; 7176 } 7177 7178 /** 7179 * mem_cgroup_swapout - transfer a memsw charge to swap 7180 * @page: page whose memsw charge to transfer 7181 * @entry: swap entry to move the charge to 7182 * 7183 * Transfer the memsw charge of @page to @entry. 7184 */ 7185 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7186 { 7187 struct mem_cgroup *memcg, *swap_memcg; 7188 unsigned int nr_entries; 7189 unsigned short oldid; 7190 7191 VM_BUG_ON_PAGE(PageLRU(page), page); 7192 VM_BUG_ON_PAGE(page_count(page), page); 7193 7194 if (mem_cgroup_disabled()) 7195 return; 7196 7197 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7198 return; 7199 7200 memcg = page_memcg(page); 7201 7202 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7203 if (!memcg) 7204 return; 7205 7206 /* 7207 * In case the memcg owning these pages has been offlined and doesn't 7208 * have an ID allocated to it anymore, charge the closest online 7209 * ancestor for the swap instead and transfer the memory+swap charge. 7210 */ 7211 swap_memcg = mem_cgroup_id_get_online(memcg); 7212 nr_entries = thp_nr_pages(page); 7213 /* Get references for the tail pages, too */ 7214 if (nr_entries > 1) 7215 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7216 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7217 nr_entries); 7218 VM_BUG_ON_PAGE(oldid, page); 7219 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7220 7221 page->memcg_data = 0; 7222 7223 if (!mem_cgroup_is_root(memcg)) 7224 page_counter_uncharge(&memcg->memory, nr_entries); 7225 7226 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7227 if (!mem_cgroup_is_root(swap_memcg)) 7228 page_counter_charge(&swap_memcg->memsw, nr_entries); 7229 page_counter_uncharge(&memcg->memsw, nr_entries); 7230 } 7231 7232 /* 7233 * Interrupts should be disabled here because the caller holds the 7234 * i_pages lock which is taken with interrupts-off. It is 7235 * important here to have the interrupts disabled because it is the 7236 * only synchronisation we have for updating the per-CPU variables. 7237 */ 7238 VM_BUG_ON(!irqs_disabled()); 7239 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7240 memcg_check_events(memcg, page); 7241 7242 css_put(&memcg->css); 7243 } 7244 7245 /** 7246 * mem_cgroup_try_charge_swap - try charging swap space for a page 7247 * @page: page being added to swap 7248 * @entry: swap entry to charge 7249 * 7250 * Try to charge @page's memcg for the swap space at @entry. 7251 * 7252 * Returns 0 on success, -ENOMEM on failure. 7253 */ 7254 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7255 { 7256 unsigned int nr_pages = thp_nr_pages(page); 7257 struct page_counter *counter; 7258 struct mem_cgroup *memcg; 7259 unsigned short oldid; 7260 7261 if (mem_cgroup_disabled()) 7262 return 0; 7263 7264 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7265 return 0; 7266 7267 memcg = page_memcg(page); 7268 7269 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7270 if (!memcg) 7271 return 0; 7272 7273 if (!entry.val) { 7274 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7275 return 0; 7276 } 7277 7278 memcg = mem_cgroup_id_get_online(memcg); 7279 7280 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7281 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7282 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7283 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7284 mem_cgroup_id_put(memcg); 7285 return -ENOMEM; 7286 } 7287 7288 /* Get references for the tail pages, too */ 7289 if (nr_pages > 1) 7290 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7291 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7292 VM_BUG_ON_PAGE(oldid, page); 7293 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7294 7295 return 0; 7296 } 7297 7298 /** 7299 * mem_cgroup_uncharge_swap - uncharge swap space 7300 * @entry: swap entry to uncharge 7301 * @nr_pages: the amount of swap space to uncharge 7302 */ 7303 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7304 { 7305 struct mem_cgroup *memcg; 7306 unsigned short id; 7307 7308 id = swap_cgroup_record(entry, 0, nr_pages); 7309 rcu_read_lock(); 7310 memcg = mem_cgroup_from_id(id); 7311 if (memcg) { 7312 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7313 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7314 page_counter_uncharge(&memcg->swap, nr_pages); 7315 else 7316 page_counter_uncharge(&memcg->memsw, nr_pages); 7317 } 7318 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7319 mem_cgroup_id_put_many(memcg, nr_pages); 7320 } 7321 rcu_read_unlock(); 7322 } 7323 7324 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7325 { 7326 long nr_swap_pages = get_nr_swap_pages(); 7327 7328 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7329 return nr_swap_pages; 7330 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7331 nr_swap_pages = min_t(long, nr_swap_pages, 7332 READ_ONCE(memcg->swap.max) - 7333 page_counter_read(&memcg->swap)); 7334 return nr_swap_pages; 7335 } 7336 7337 bool mem_cgroup_swap_full(struct page *page) 7338 { 7339 struct mem_cgroup *memcg; 7340 7341 VM_BUG_ON_PAGE(!PageLocked(page), page); 7342 7343 if (vm_swap_full()) 7344 return true; 7345 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7346 return false; 7347 7348 memcg = page_memcg(page); 7349 if (!memcg) 7350 return false; 7351 7352 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7353 unsigned long usage = page_counter_read(&memcg->swap); 7354 7355 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7356 usage * 2 >= READ_ONCE(memcg->swap.max)) 7357 return true; 7358 } 7359 7360 return false; 7361 } 7362 7363 static int __init setup_swap_account(char *s) 7364 { 7365 if (!strcmp(s, "1")) 7366 cgroup_memory_noswap = false; 7367 else if (!strcmp(s, "0")) 7368 cgroup_memory_noswap = true; 7369 return 1; 7370 } 7371 __setup("swapaccount=", setup_swap_account); 7372 7373 static u64 swap_current_read(struct cgroup_subsys_state *css, 7374 struct cftype *cft) 7375 { 7376 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7377 7378 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7379 } 7380 7381 static int swap_high_show(struct seq_file *m, void *v) 7382 { 7383 return seq_puts_memcg_tunable(m, 7384 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7385 } 7386 7387 static ssize_t swap_high_write(struct kernfs_open_file *of, 7388 char *buf, size_t nbytes, loff_t off) 7389 { 7390 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7391 unsigned long high; 7392 int err; 7393 7394 buf = strstrip(buf); 7395 err = page_counter_memparse(buf, "max", &high); 7396 if (err) 7397 return err; 7398 7399 page_counter_set_high(&memcg->swap, high); 7400 7401 return nbytes; 7402 } 7403 7404 static int swap_max_show(struct seq_file *m, void *v) 7405 { 7406 return seq_puts_memcg_tunable(m, 7407 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7408 } 7409 7410 static ssize_t swap_max_write(struct kernfs_open_file *of, 7411 char *buf, size_t nbytes, loff_t off) 7412 { 7413 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7414 unsigned long max; 7415 int err; 7416 7417 buf = strstrip(buf); 7418 err = page_counter_memparse(buf, "max", &max); 7419 if (err) 7420 return err; 7421 7422 xchg(&memcg->swap.max, max); 7423 7424 return nbytes; 7425 } 7426 7427 static int swap_events_show(struct seq_file *m, void *v) 7428 { 7429 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7430 7431 seq_printf(m, "high %lu\n", 7432 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7433 seq_printf(m, "max %lu\n", 7434 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7435 seq_printf(m, "fail %lu\n", 7436 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7437 7438 return 0; 7439 } 7440 7441 static struct cftype swap_files[] = { 7442 { 7443 .name = "swap.current", 7444 .flags = CFTYPE_NOT_ON_ROOT, 7445 .read_u64 = swap_current_read, 7446 }, 7447 { 7448 .name = "swap.high", 7449 .flags = CFTYPE_NOT_ON_ROOT, 7450 .seq_show = swap_high_show, 7451 .write = swap_high_write, 7452 }, 7453 { 7454 .name = "swap.max", 7455 .flags = CFTYPE_NOT_ON_ROOT, 7456 .seq_show = swap_max_show, 7457 .write = swap_max_write, 7458 }, 7459 { 7460 .name = "swap.events", 7461 .flags = CFTYPE_NOT_ON_ROOT, 7462 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7463 .seq_show = swap_events_show, 7464 }, 7465 { } /* terminate */ 7466 }; 7467 7468 static struct cftype memsw_files[] = { 7469 { 7470 .name = "memsw.usage_in_bytes", 7471 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7472 .read_u64 = mem_cgroup_read_u64, 7473 }, 7474 { 7475 .name = "memsw.max_usage_in_bytes", 7476 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7477 .write = mem_cgroup_reset, 7478 .read_u64 = mem_cgroup_read_u64, 7479 }, 7480 { 7481 .name = "memsw.limit_in_bytes", 7482 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7483 .write = mem_cgroup_write, 7484 .read_u64 = mem_cgroup_read_u64, 7485 }, 7486 { 7487 .name = "memsw.failcnt", 7488 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7489 .write = mem_cgroup_reset, 7490 .read_u64 = mem_cgroup_read_u64, 7491 }, 7492 { }, /* terminate */ 7493 }; 7494 7495 /* 7496 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7497 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7498 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7499 * boot parameter. This may result in premature OOPS inside 7500 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7501 */ 7502 static int __init mem_cgroup_swap_init(void) 7503 { 7504 /* No memory control -> no swap control */ 7505 if (mem_cgroup_disabled()) 7506 cgroup_memory_noswap = true; 7507 7508 if (cgroup_memory_noswap) 7509 return 0; 7510 7511 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7512 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7513 7514 return 0; 7515 } 7516 core_initcall(mem_cgroup_swap_init); 7517 7518 #endif /* CONFIG_MEMCG_SWAP */ 7519