1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/memremap.h> 57 #include <linux/mm_inline.h> 58 #include <linux/swap_cgroup.h> 59 #include <linux/cpu.h> 60 #include <linux/oom.h> 61 #include <linux/lockdep.h> 62 #include <linux/file.h> 63 #include <linux/resume_user_mode.h> 64 #include <linux/psi.h> 65 #include <linux/seq_buf.h> 66 #include <linux/sched/isolation.h> 67 #include "internal.h" 68 #include <net/sock.h> 69 #include <net/ip.h> 70 #include "slab.h" 71 #include "swap.h" 72 73 #include <linux/uaccess.h> 74 75 #include <trace/events/vmscan.h> 76 77 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 78 EXPORT_SYMBOL(memory_cgrp_subsys); 79 80 struct mem_cgroup *root_mem_cgroup __read_mostly; 81 82 /* Active memory cgroup to use from an interrupt context */ 83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 85 86 /* Socket memory accounting disabled? */ 87 static bool cgroup_memory_nosocket __ro_after_init; 88 89 /* Kernel memory accounting disabled? */ 90 static bool cgroup_memory_nokmem __ro_after_init; 91 92 /* BPF memory accounting disabled? */ 93 static bool cgroup_memory_nobpf __ro_after_init; 94 95 #ifdef CONFIG_CGROUP_WRITEBACK 96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 97 #endif 98 99 /* Whether legacy memory+swap accounting is active */ 100 static bool do_memsw_account(void) 101 { 102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys); 103 } 104 105 #define THRESHOLDS_EVENTS_TARGET 128 106 #define SOFTLIMIT_EVENTS_TARGET 1024 107 108 /* 109 * Cgroups above their limits are maintained in a RB-Tree, independent of 110 * their hierarchy representation 111 */ 112 113 struct mem_cgroup_tree_per_node { 114 struct rb_root rb_root; 115 struct rb_node *rb_rightmost; 116 spinlock_t lock; 117 }; 118 119 struct mem_cgroup_tree { 120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 121 }; 122 123 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 124 125 /* for OOM */ 126 struct mem_cgroup_eventfd_list { 127 struct list_head list; 128 struct eventfd_ctx *eventfd; 129 }; 130 131 /* 132 * cgroup_event represents events which userspace want to receive. 133 */ 134 struct mem_cgroup_event { 135 /* 136 * memcg which the event belongs to. 137 */ 138 struct mem_cgroup *memcg; 139 /* 140 * eventfd to signal userspace about the event. 141 */ 142 struct eventfd_ctx *eventfd; 143 /* 144 * Each of these stored in a list by the cgroup. 145 */ 146 struct list_head list; 147 /* 148 * register_event() callback will be used to add new userspace 149 * waiter for changes related to this event. Use eventfd_signal() 150 * on eventfd to send notification to userspace. 151 */ 152 int (*register_event)(struct mem_cgroup *memcg, 153 struct eventfd_ctx *eventfd, const char *args); 154 /* 155 * unregister_event() callback will be called when userspace closes 156 * the eventfd or on cgroup removing. This callback must be set, 157 * if you want provide notification functionality. 158 */ 159 void (*unregister_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd); 161 /* 162 * All fields below needed to unregister event when 163 * userspace closes eventfd. 164 */ 165 poll_table pt; 166 wait_queue_head_t *wqh; 167 wait_queue_entry_t wait; 168 struct work_struct remove; 169 }; 170 171 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 173 174 /* Stuffs for move charges at task migration. */ 175 /* 176 * Types of charges to be moved. 177 */ 178 #define MOVE_ANON 0x1U 179 #define MOVE_FILE 0x2U 180 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 181 182 /* "mc" and its members are protected by cgroup_mutex */ 183 static struct move_charge_struct { 184 spinlock_t lock; /* for from, to */ 185 struct mm_struct *mm; 186 struct mem_cgroup *from; 187 struct mem_cgroup *to; 188 unsigned long flags; 189 unsigned long precharge; 190 unsigned long moved_charge; 191 unsigned long moved_swap; 192 struct task_struct *moving_task; /* a task moving charges */ 193 wait_queue_head_t waitq; /* a waitq for other context */ 194 } mc = { 195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 197 }; 198 199 /* 200 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft 201 * limit reclaim to prevent infinite loops, if they ever occur. 202 */ 203 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 204 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 205 206 /* for encoding cft->private value on file */ 207 enum res_type { 208 _MEM, 209 _MEMSWAP, 210 _KMEM, 211 _TCP, 212 }; 213 214 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 215 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 216 #define MEMFILE_ATTR(val) ((val) & 0xffff) 217 218 /* 219 * Iteration constructs for visiting all cgroups (under a tree). If 220 * loops are exited prematurely (break), mem_cgroup_iter_break() must 221 * be used for reference counting. 222 */ 223 #define for_each_mem_cgroup_tree(iter, root) \ 224 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 225 iter != NULL; \ 226 iter = mem_cgroup_iter(root, iter, NULL)) 227 228 #define for_each_mem_cgroup(iter) \ 229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 230 iter != NULL; \ 231 iter = mem_cgroup_iter(NULL, iter, NULL)) 232 233 static inline bool task_is_dying(void) 234 { 235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 236 (current->flags & PF_EXITING); 237 } 238 239 /* Some nice accessors for the vmpressure. */ 240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 241 { 242 if (!memcg) 243 memcg = root_mem_cgroup; 244 return &memcg->vmpressure; 245 } 246 247 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 248 { 249 return container_of(vmpr, struct mem_cgroup, vmpressure); 250 } 251 252 #ifdef CONFIG_MEMCG_KMEM 253 static DEFINE_SPINLOCK(objcg_lock); 254 255 bool mem_cgroup_kmem_disabled(void) 256 { 257 return cgroup_memory_nokmem; 258 } 259 260 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 261 unsigned int nr_pages); 262 263 static void obj_cgroup_release(struct percpu_ref *ref) 264 { 265 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 266 unsigned int nr_bytes; 267 unsigned int nr_pages; 268 unsigned long flags; 269 270 /* 271 * At this point all allocated objects are freed, and 272 * objcg->nr_charged_bytes can't have an arbitrary byte value. 273 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 274 * 275 * The following sequence can lead to it: 276 * 1) CPU0: objcg == stock->cached_objcg 277 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 278 * PAGE_SIZE bytes are charged 279 * 3) CPU1: a process from another memcg is allocating something, 280 * the stock if flushed, 281 * objcg->nr_charged_bytes = PAGE_SIZE - 92 282 * 5) CPU0: we do release this object, 283 * 92 bytes are added to stock->nr_bytes 284 * 6) CPU0: stock is flushed, 285 * 92 bytes are added to objcg->nr_charged_bytes 286 * 287 * In the result, nr_charged_bytes == PAGE_SIZE. 288 * This page will be uncharged in obj_cgroup_release(). 289 */ 290 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 291 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 292 nr_pages = nr_bytes >> PAGE_SHIFT; 293 294 if (nr_pages) 295 obj_cgroup_uncharge_pages(objcg, nr_pages); 296 297 spin_lock_irqsave(&objcg_lock, flags); 298 list_del(&objcg->list); 299 spin_unlock_irqrestore(&objcg_lock, flags); 300 301 percpu_ref_exit(ref); 302 kfree_rcu(objcg, rcu); 303 } 304 305 static struct obj_cgroup *obj_cgroup_alloc(void) 306 { 307 struct obj_cgroup *objcg; 308 int ret; 309 310 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 311 if (!objcg) 312 return NULL; 313 314 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 315 GFP_KERNEL); 316 if (ret) { 317 kfree(objcg); 318 return NULL; 319 } 320 INIT_LIST_HEAD(&objcg->list); 321 return objcg; 322 } 323 324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 325 struct mem_cgroup *parent) 326 { 327 struct obj_cgroup *objcg, *iter; 328 329 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 330 331 spin_lock_irq(&objcg_lock); 332 333 /* 1) Ready to reparent active objcg. */ 334 list_add(&objcg->list, &memcg->objcg_list); 335 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 336 list_for_each_entry(iter, &memcg->objcg_list, list) 337 WRITE_ONCE(iter->memcg, parent); 338 /* 3) Move already reparented objcgs to the parent's list */ 339 list_splice(&memcg->objcg_list, &parent->objcg_list); 340 341 spin_unlock_irq(&objcg_lock); 342 343 percpu_ref_kill(&objcg->refcnt); 344 } 345 346 /* 347 * A lot of the calls to the cache allocation functions are expected to be 348 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 349 * conditional to this static branch, we'll have to allow modules that does 350 * kmem_cache_alloc and the such to see this symbol as well 351 */ 352 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 353 EXPORT_SYMBOL(memcg_kmem_online_key); 354 355 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 356 EXPORT_SYMBOL(memcg_bpf_enabled_key); 357 #endif 358 359 /** 360 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 361 * @folio: folio of interest 362 * 363 * If memcg is bound to the default hierarchy, css of the memcg associated 364 * with @folio is returned. The returned css remains associated with @folio 365 * until it is released. 366 * 367 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 368 * is returned. 369 */ 370 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 371 { 372 struct mem_cgroup *memcg = folio_memcg(folio); 373 374 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 375 memcg = root_mem_cgroup; 376 377 return &memcg->css; 378 } 379 380 /** 381 * page_cgroup_ino - return inode number of the memcg a page is charged to 382 * @page: the page 383 * 384 * Look up the closest online ancestor of the memory cgroup @page is charged to 385 * and return its inode number or 0 if @page is not charged to any cgroup. It 386 * is safe to call this function without holding a reference to @page. 387 * 388 * Note, this function is inherently racy, because there is nothing to prevent 389 * the cgroup inode from getting torn down and potentially reallocated a moment 390 * after page_cgroup_ino() returns, so it only should be used by callers that 391 * do not care (such as procfs interfaces). 392 */ 393 ino_t page_cgroup_ino(struct page *page) 394 { 395 struct mem_cgroup *memcg; 396 unsigned long ino = 0; 397 398 rcu_read_lock(); 399 /* page_folio() is racy here, but the entire function is racy anyway */ 400 memcg = folio_memcg_check(page_folio(page)); 401 402 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 403 memcg = parent_mem_cgroup(memcg); 404 if (memcg) 405 ino = cgroup_ino(memcg->css.cgroup); 406 rcu_read_unlock(); 407 return ino; 408 } 409 410 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 411 struct mem_cgroup_tree_per_node *mctz, 412 unsigned long new_usage_in_excess) 413 { 414 struct rb_node **p = &mctz->rb_root.rb_node; 415 struct rb_node *parent = NULL; 416 struct mem_cgroup_per_node *mz_node; 417 bool rightmost = true; 418 419 if (mz->on_tree) 420 return; 421 422 mz->usage_in_excess = new_usage_in_excess; 423 if (!mz->usage_in_excess) 424 return; 425 while (*p) { 426 parent = *p; 427 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 428 tree_node); 429 if (mz->usage_in_excess < mz_node->usage_in_excess) { 430 p = &(*p)->rb_left; 431 rightmost = false; 432 } else { 433 p = &(*p)->rb_right; 434 } 435 } 436 437 if (rightmost) 438 mctz->rb_rightmost = &mz->tree_node; 439 440 rb_link_node(&mz->tree_node, parent, p); 441 rb_insert_color(&mz->tree_node, &mctz->rb_root); 442 mz->on_tree = true; 443 } 444 445 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 446 struct mem_cgroup_tree_per_node *mctz) 447 { 448 if (!mz->on_tree) 449 return; 450 451 if (&mz->tree_node == mctz->rb_rightmost) 452 mctz->rb_rightmost = rb_prev(&mz->tree_node); 453 454 rb_erase(&mz->tree_node, &mctz->rb_root); 455 mz->on_tree = false; 456 } 457 458 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 459 struct mem_cgroup_tree_per_node *mctz) 460 { 461 unsigned long flags; 462 463 spin_lock_irqsave(&mctz->lock, flags); 464 __mem_cgroup_remove_exceeded(mz, mctz); 465 spin_unlock_irqrestore(&mctz->lock, flags); 466 } 467 468 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 469 { 470 unsigned long nr_pages = page_counter_read(&memcg->memory); 471 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 472 unsigned long excess = 0; 473 474 if (nr_pages > soft_limit) 475 excess = nr_pages - soft_limit; 476 477 return excess; 478 } 479 480 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) 481 { 482 unsigned long excess; 483 struct mem_cgroup_per_node *mz; 484 struct mem_cgroup_tree_per_node *mctz; 485 486 if (lru_gen_enabled()) { 487 if (soft_limit_excess(memcg)) 488 lru_gen_soft_reclaim(memcg, nid); 489 return; 490 } 491 492 mctz = soft_limit_tree.rb_tree_per_node[nid]; 493 if (!mctz) 494 return; 495 /* 496 * Necessary to update all ancestors when hierarchy is used. 497 * because their event counter is not touched. 498 */ 499 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 500 mz = memcg->nodeinfo[nid]; 501 excess = soft_limit_excess(memcg); 502 /* 503 * We have to update the tree if mz is on RB-tree or 504 * mem is over its softlimit. 505 */ 506 if (excess || mz->on_tree) { 507 unsigned long flags; 508 509 spin_lock_irqsave(&mctz->lock, flags); 510 /* if on-tree, remove it */ 511 if (mz->on_tree) 512 __mem_cgroup_remove_exceeded(mz, mctz); 513 /* 514 * Insert again. mz->usage_in_excess will be updated. 515 * If excess is 0, no tree ops. 516 */ 517 __mem_cgroup_insert_exceeded(mz, mctz, excess); 518 spin_unlock_irqrestore(&mctz->lock, flags); 519 } 520 } 521 } 522 523 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 524 { 525 struct mem_cgroup_tree_per_node *mctz; 526 struct mem_cgroup_per_node *mz; 527 int nid; 528 529 for_each_node(nid) { 530 mz = memcg->nodeinfo[nid]; 531 mctz = soft_limit_tree.rb_tree_per_node[nid]; 532 if (mctz) 533 mem_cgroup_remove_exceeded(mz, mctz); 534 } 535 } 536 537 static struct mem_cgroup_per_node * 538 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 539 { 540 struct mem_cgroup_per_node *mz; 541 542 retry: 543 mz = NULL; 544 if (!mctz->rb_rightmost) 545 goto done; /* Nothing to reclaim from */ 546 547 mz = rb_entry(mctz->rb_rightmost, 548 struct mem_cgroup_per_node, tree_node); 549 /* 550 * Remove the node now but someone else can add it back, 551 * we will to add it back at the end of reclaim to its correct 552 * position in the tree. 553 */ 554 __mem_cgroup_remove_exceeded(mz, mctz); 555 if (!soft_limit_excess(mz->memcg) || 556 !css_tryget(&mz->memcg->css)) 557 goto retry; 558 done: 559 return mz; 560 } 561 562 static struct mem_cgroup_per_node * 563 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 564 { 565 struct mem_cgroup_per_node *mz; 566 567 spin_lock_irq(&mctz->lock); 568 mz = __mem_cgroup_largest_soft_limit_node(mctz); 569 spin_unlock_irq(&mctz->lock); 570 return mz; 571 } 572 573 /* 574 * memcg and lruvec stats flushing 575 * 576 * Many codepaths leading to stats update or read are performance sensitive and 577 * adding stats flushing in such codepaths is not desirable. So, to optimize the 578 * flushing the kernel does: 579 * 580 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 581 * rstat update tree grow unbounded. 582 * 583 * 2) Flush the stats synchronously on reader side only when there are more than 584 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 585 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 586 * only for 2 seconds due to (1). 587 */ 588 static void flush_memcg_stats_dwork(struct work_struct *w); 589 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 590 static DEFINE_PER_CPU(unsigned int, stats_updates); 591 static atomic_t stats_flush_ongoing = ATOMIC_INIT(0); 592 static atomic_t stats_flush_threshold = ATOMIC_INIT(0); 593 static u64 flush_next_time; 594 595 #define FLUSH_TIME (2UL*HZ) 596 597 /* 598 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can 599 * not rely on this as part of an acquired spinlock_t lock. These functions are 600 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion 601 * is sufficient. 602 */ 603 static void memcg_stats_lock(void) 604 { 605 preempt_disable_nested(); 606 VM_WARN_ON_IRQS_ENABLED(); 607 } 608 609 static void __memcg_stats_lock(void) 610 { 611 preempt_disable_nested(); 612 } 613 614 static void memcg_stats_unlock(void) 615 { 616 preempt_enable_nested(); 617 } 618 619 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) 620 { 621 unsigned int x; 622 623 if (!val) 624 return; 625 626 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 627 628 x = __this_cpu_add_return(stats_updates, abs(val)); 629 if (x > MEMCG_CHARGE_BATCH) { 630 /* 631 * If stats_flush_threshold exceeds the threshold 632 * (>num_online_cpus()), cgroup stats update will be triggered 633 * in __mem_cgroup_flush_stats(). Increasing this var further 634 * is redundant and simply adds overhead in atomic update. 635 */ 636 if (atomic_read(&stats_flush_threshold) <= num_online_cpus()) 637 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold); 638 __this_cpu_write(stats_updates, 0); 639 } 640 } 641 642 static void do_flush_stats(void) 643 { 644 /* 645 * We always flush the entire tree, so concurrent flushers can just 646 * skip. This avoids a thundering herd problem on the rstat global lock 647 * from memcg flushers (e.g. reclaim, refault, etc). 648 */ 649 if (atomic_read(&stats_flush_ongoing) || 650 atomic_xchg(&stats_flush_ongoing, 1)) 651 return; 652 653 WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME); 654 655 cgroup_rstat_flush(root_mem_cgroup->css.cgroup); 656 657 atomic_set(&stats_flush_threshold, 0); 658 atomic_set(&stats_flush_ongoing, 0); 659 } 660 661 void mem_cgroup_flush_stats(void) 662 { 663 if (atomic_read(&stats_flush_threshold) > num_online_cpus()) 664 do_flush_stats(); 665 } 666 667 void mem_cgroup_flush_stats_ratelimited(void) 668 { 669 if (time_after64(jiffies_64, READ_ONCE(flush_next_time))) 670 mem_cgroup_flush_stats(); 671 } 672 673 static void flush_memcg_stats_dwork(struct work_struct *w) 674 { 675 /* 676 * Always flush here so that flushing in latency-sensitive paths is 677 * as cheap as possible. 678 */ 679 do_flush_stats(); 680 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); 681 } 682 683 /* Subset of vm_event_item to report for memcg event stats */ 684 static const unsigned int memcg_vm_event_stat[] = { 685 PGPGIN, 686 PGPGOUT, 687 PGSCAN_KSWAPD, 688 PGSCAN_DIRECT, 689 PGSCAN_KHUGEPAGED, 690 PGSTEAL_KSWAPD, 691 PGSTEAL_DIRECT, 692 PGSTEAL_KHUGEPAGED, 693 PGFAULT, 694 PGMAJFAULT, 695 PGREFILL, 696 PGACTIVATE, 697 PGDEACTIVATE, 698 PGLAZYFREE, 699 PGLAZYFREED, 700 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 701 ZSWPIN, 702 ZSWPOUT, 703 #endif 704 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 705 THP_FAULT_ALLOC, 706 THP_COLLAPSE_ALLOC, 707 #endif 708 }; 709 710 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 711 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 712 713 static void init_memcg_events(void) 714 { 715 int i; 716 717 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 718 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; 719 } 720 721 static inline int memcg_events_index(enum vm_event_item idx) 722 { 723 return mem_cgroup_events_index[idx] - 1; 724 } 725 726 struct memcg_vmstats_percpu { 727 /* Local (CPU and cgroup) page state & events */ 728 long state[MEMCG_NR_STAT]; 729 unsigned long events[NR_MEMCG_EVENTS]; 730 731 /* Delta calculation for lockless upward propagation */ 732 long state_prev[MEMCG_NR_STAT]; 733 unsigned long events_prev[NR_MEMCG_EVENTS]; 734 735 /* Cgroup1: threshold notifications & softlimit tree updates */ 736 unsigned long nr_page_events; 737 unsigned long targets[MEM_CGROUP_NTARGETS]; 738 }; 739 740 struct memcg_vmstats { 741 /* Aggregated (CPU and subtree) page state & events */ 742 long state[MEMCG_NR_STAT]; 743 unsigned long events[NR_MEMCG_EVENTS]; 744 745 /* Non-hierarchical (CPU aggregated) page state & events */ 746 long state_local[MEMCG_NR_STAT]; 747 unsigned long events_local[NR_MEMCG_EVENTS]; 748 749 /* Pending child counts during tree propagation */ 750 long state_pending[MEMCG_NR_STAT]; 751 unsigned long events_pending[NR_MEMCG_EVENTS]; 752 }; 753 754 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 755 { 756 long x = READ_ONCE(memcg->vmstats->state[idx]); 757 #ifdef CONFIG_SMP 758 if (x < 0) 759 x = 0; 760 #endif 761 return x; 762 } 763 764 /** 765 * __mod_memcg_state - update cgroup memory statistics 766 * @memcg: the memory cgroup 767 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 768 * @val: delta to add to the counter, can be negative 769 */ 770 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 771 { 772 if (mem_cgroup_disabled()) 773 return; 774 775 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 776 memcg_rstat_updated(memcg, val); 777 } 778 779 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 780 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 781 { 782 long x = READ_ONCE(memcg->vmstats->state_local[idx]); 783 784 #ifdef CONFIG_SMP 785 if (x < 0) 786 x = 0; 787 #endif 788 return x; 789 } 790 791 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 792 int val) 793 { 794 struct mem_cgroup_per_node *pn; 795 struct mem_cgroup *memcg; 796 797 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 798 memcg = pn->memcg; 799 800 /* 801 * The caller from rmap relay on disabled preemption becase they never 802 * update their counter from in-interrupt context. For these two 803 * counters we check that the update is never performed from an 804 * interrupt context while other caller need to have disabled interrupt. 805 */ 806 __memcg_stats_lock(); 807 if (IS_ENABLED(CONFIG_DEBUG_VM)) { 808 switch (idx) { 809 case NR_ANON_MAPPED: 810 case NR_FILE_MAPPED: 811 case NR_ANON_THPS: 812 case NR_SHMEM_PMDMAPPED: 813 case NR_FILE_PMDMAPPED: 814 WARN_ON_ONCE(!in_task()); 815 break; 816 default: 817 VM_WARN_ON_IRQS_ENABLED(); 818 } 819 } 820 821 /* Update memcg */ 822 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 823 824 /* Update lruvec */ 825 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); 826 827 memcg_rstat_updated(memcg, val); 828 memcg_stats_unlock(); 829 } 830 831 /** 832 * __mod_lruvec_state - update lruvec memory statistics 833 * @lruvec: the lruvec 834 * @idx: the stat item 835 * @val: delta to add to the counter, can be negative 836 * 837 * The lruvec is the intersection of the NUMA node and a cgroup. This 838 * function updates the all three counters that are affected by a 839 * change of state at this level: per-node, per-cgroup, per-lruvec. 840 */ 841 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 842 int val) 843 { 844 /* Update node */ 845 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 846 847 /* Update memcg and lruvec */ 848 if (!mem_cgroup_disabled()) 849 __mod_memcg_lruvec_state(lruvec, idx, val); 850 } 851 852 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 853 int val) 854 { 855 struct page *head = compound_head(page); /* rmap on tail pages */ 856 struct mem_cgroup *memcg; 857 pg_data_t *pgdat = page_pgdat(page); 858 struct lruvec *lruvec; 859 860 rcu_read_lock(); 861 memcg = page_memcg(head); 862 /* Untracked pages have no memcg, no lruvec. Update only the node */ 863 if (!memcg) { 864 rcu_read_unlock(); 865 __mod_node_page_state(pgdat, idx, val); 866 return; 867 } 868 869 lruvec = mem_cgroup_lruvec(memcg, pgdat); 870 __mod_lruvec_state(lruvec, idx, val); 871 rcu_read_unlock(); 872 } 873 EXPORT_SYMBOL(__mod_lruvec_page_state); 874 875 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 876 { 877 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 878 struct mem_cgroup *memcg; 879 struct lruvec *lruvec; 880 881 rcu_read_lock(); 882 memcg = mem_cgroup_from_slab_obj(p); 883 884 /* 885 * Untracked pages have no memcg, no lruvec. Update only the 886 * node. If we reparent the slab objects to the root memcg, 887 * when we free the slab object, we need to update the per-memcg 888 * vmstats to keep it correct for the root memcg. 889 */ 890 if (!memcg) { 891 __mod_node_page_state(pgdat, idx, val); 892 } else { 893 lruvec = mem_cgroup_lruvec(memcg, pgdat); 894 __mod_lruvec_state(lruvec, idx, val); 895 } 896 rcu_read_unlock(); 897 } 898 899 /** 900 * __count_memcg_events - account VM events in a cgroup 901 * @memcg: the memory cgroup 902 * @idx: the event item 903 * @count: the number of events that occurred 904 */ 905 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 906 unsigned long count) 907 { 908 int index = memcg_events_index(idx); 909 910 if (mem_cgroup_disabled() || index < 0) 911 return; 912 913 memcg_stats_lock(); 914 __this_cpu_add(memcg->vmstats_percpu->events[index], count); 915 memcg_rstat_updated(memcg, count); 916 memcg_stats_unlock(); 917 } 918 919 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 920 { 921 int index = memcg_events_index(event); 922 923 if (index < 0) 924 return 0; 925 return READ_ONCE(memcg->vmstats->events[index]); 926 } 927 928 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 929 { 930 int index = memcg_events_index(event); 931 932 if (index < 0) 933 return 0; 934 935 return READ_ONCE(memcg->vmstats->events_local[index]); 936 } 937 938 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 939 int nr_pages) 940 { 941 /* pagein of a big page is an event. So, ignore page size */ 942 if (nr_pages > 0) 943 __count_memcg_events(memcg, PGPGIN, 1); 944 else { 945 __count_memcg_events(memcg, PGPGOUT, 1); 946 nr_pages = -nr_pages; /* for event */ 947 } 948 949 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 950 } 951 952 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 953 enum mem_cgroup_events_target target) 954 { 955 unsigned long val, next; 956 957 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 958 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 959 /* from time_after() in jiffies.h */ 960 if ((long)(next - val) < 0) { 961 switch (target) { 962 case MEM_CGROUP_TARGET_THRESH: 963 next = val + THRESHOLDS_EVENTS_TARGET; 964 break; 965 case MEM_CGROUP_TARGET_SOFTLIMIT: 966 next = val + SOFTLIMIT_EVENTS_TARGET; 967 break; 968 default: 969 break; 970 } 971 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 972 return true; 973 } 974 return false; 975 } 976 977 /* 978 * Check events in order. 979 * 980 */ 981 static void memcg_check_events(struct mem_cgroup *memcg, int nid) 982 { 983 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 984 return; 985 986 /* threshold event is triggered in finer grain than soft limit */ 987 if (unlikely(mem_cgroup_event_ratelimit(memcg, 988 MEM_CGROUP_TARGET_THRESH))) { 989 bool do_softlimit; 990 991 do_softlimit = mem_cgroup_event_ratelimit(memcg, 992 MEM_CGROUP_TARGET_SOFTLIMIT); 993 mem_cgroup_threshold(memcg); 994 if (unlikely(do_softlimit)) 995 mem_cgroup_update_tree(memcg, nid); 996 } 997 } 998 999 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1000 { 1001 /* 1002 * mm_update_next_owner() may clear mm->owner to NULL 1003 * if it races with swapoff, page migration, etc. 1004 * So this can be called with p == NULL. 1005 */ 1006 if (unlikely(!p)) 1007 return NULL; 1008 1009 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1010 } 1011 EXPORT_SYMBOL(mem_cgroup_from_task); 1012 1013 static __always_inline struct mem_cgroup *active_memcg(void) 1014 { 1015 if (!in_task()) 1016 return this_cpu_read(int_active_memcg); 1017 else 1018 return current->active_memcg; 1019 } 1020 1021 /** 1022 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1023 * @mm: mm from which memcg should be extracted. It can be NULL. 1024 * 1025 * Obtain a reference on mm->memcg and returns it if successful. If mm 1026 * is NULL, then the memcg is chosen as follows: 1027 * 1) The active memcg, if set. 1028 * 2) current->mm->memcg, if available 1029 * 3) root memcg 1030 * If mem_cgroup is disabled, NULL is returned. 1031 */ 1032 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1033 { 1034 struct mem_cgroup *memcg; 1035 1036 if (mem_cgroup_disabled()) 1037 return NULL; 1038 1039 /* 1040 * Page cache insertions can happen without an 1041 * actual mm context, e.g. during disk probing 1042 * on boot, loopback IO, acct() writes etc. 1043 * 1044 * No need to css_get on root memcg as the reference 1045 * counting is disabled on the root level in the 1046 * cgroup core. See CSS_NO_REF. 1047 */ 1048 if (unlikely(!mm)) { 1049 memcg = active_memcg(); 1050 if (unlikely(memcg)) { 1051 /* remote memcg must hold a ref */ 1052 css_get(&memcg->css); 1053 return memcg; 1054 } 1055 mm = current->mm; 1056 if (unlikely(!mm)) 1057 return root_mem_cgroup; 1058 } 1059 1060 rcu_read_lock(); 1061 do { 1062 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1063 if (unlikely(!memcg)) 1064 memcg = root_mem_cgroup; 1065 } while (!css_tryget(&memcg->css)); 1066 rcu_read_unlock(); 1067 return memcg; 1068 } 1069 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1070 1071 static __always_inline bool memcg_kmem_bypass(void) 1072 { 1073 /* Allow remote memcg charging from any context. */ 1074 if (unlikely(active_memcg())) 1075 return false; 1076 1077 /* Memcg to charge can't be determined. */ 1078 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) 1079 return true; 1080 1081 return false; 1082 } 1083 1084 /** 1085 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1086 * @root: hierarchy root 1087 * @prev: previously returned memcg, NULL on first invocation 1088 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1089 * 1090 * Returns references to children of the hierarchy below @root, or 1091 * @root itself, or %NULL after a full round-trip. 1092 * 1093 * Caller must pass the return value in @prev on subsequent 1094 * invocations for reference counting, or use mem_cgroup_iter_break() 1095 * to cancel a hierarchy walk before the round-trip is complete. 1096 * 1097 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1098 * in the hierarchy among all concurrent reclaimers operating on the 1099 * same node. 1100 */ 1101 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1102 struct mem_cgroup *prev, 1103 struct mem_cgroup_reclaim_cookie *reclaim) 1104 { 1105 struct mem_cgroup_reclaim_iter *iter; 1106 struct cgroup_subsys_state *css = NULL; 1107 struct mem_cgroup *memcg = NULL; 1108 struct mem_cgroup *pos = NULL; 1109 1110 if (mem_cgroup_disabled()) 1111 return NULL; 1112 1113 if (!root) 1114 root = root_mem_cgroup; 1115 1116 rcu_read_lock(); 1117 1118 if (reclaim) { 1119 struct mem_cgroup_per_node *mz; 1120 1121 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1122 iter = &mz->iter; 1123 1124 /* 1125 * On start, join the current reclaim iteration cycle. 1126 * Exit when a concurrent walker completes it. 1127 */ 1128 if (!prev) 1129 reclaim->generation = iter->generation; 1130 else if (reclaim->generation != iter->generation) 1131 goto out_unlock; 1132 1133 while (1) { 1134 pos = READ_ONCE(iter->position); 1135 if (!pos || css_tryget(&pos->css)) 1136 break; 1137 /* 1138 * css reference reached zero, so iter->position will 1139 * be cleared by ->css_released. However, we should not 1140 * rely on this happening soon, because ->css_released 1141 * is called from a work queue, and by busy-waiting we 1142 * might block it. So we clear iter->position right 1143 * away. 1144 */ 1145 (void)cmpxchg(&iter->position, pos, NULL); 1146 } 1147 } else if (prev) { 1148 pos = prev; 1149 } 1150 1151 if (pos) 1152 css = &pos->css; 1153 1154 for (;;) { 1155 css = css_next_descendant_pre(css, &root->css); 1156 if (!css) { 1157 /* 1158 * Reclaimers share the hierarchy walk, and a 1159 * new one might jump in right at the end of 1160 * the hierarchy - make sure they see at least 1161 * one group and restart from the beginning. 1162 */ 1163 if (!prev) 1164 continue; 1165 break; 1166 } 1167 1168 /* 1169 * Verify the css and acquire a reference. The root 1170 * is provided by the caller, so we know it's alive 1171 * and kicking, and don't take an extra reference. 1172 */ 1173 if (css == &root->css || css_tryget(css)) { 1174 memcg = mem_cgroup_from_css(css); 1175 break; 1176 } 1177 } 1178 1179 if (reclaim) { 1180 /* 1181 * The position could have already been updated by a competing 1182 * thread, so check that the value hasn't changed since we read 1183 * it to avoid reclaiming from the same cgroup twice. 1184 */ 1185 (void)cmpxchg(&iter->position, pos, memcg); 1186 1187 if (pos) 1188 css_put(&pos->css); 1189 1190 if (!memcg) 1191 iter->generation++; 1192 } 1193 1194 out_unlock: 1195 rcu_read_unlock(); 1196 if (prev && prev != root) 1197 css_put(&prev->css); 1198 1199 return memcg; 1200 } 1201 1202 /** 1203 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1204 * @root: hierarchy root 1205 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1206 */ 1207 void mem_cgroup_iter_break(struct mem_cgroup *root, 1208 struct mem_cgroup *prev) 1209 { 1210 if (!root) 1211 root = root_mem_cgroup; 1212 if (prev && prev != root) 1213 css_put(&prev->css); 1214 } 1215 1216 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1217 struct mem_cgroup *dead_memcg) 1218 { 1219 struct mem_cgroup_reclaim_iter *iter; 1220 struct mem_cgroup_per_node *mz; 1221 int nid; 1222 1223 for_each_node(nid) { 1224 mz = from->nodeinfo[nid]; 1225 iter = &mz->iter; 1226 cmpxchg(&iter->position, dead_memcg, NULL); 1227 } 1228 } 1229 1230 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1231 { 1232 struct mem_cgroup *memcg = dead_memcg; 1233 struct mem_cgroup *last; 1234 1235 do { 1236 __invalidate_reclaim_iterators(memcg, dead_memcg); 1237 last = memcg; 1238 } while ((memcg = parent_mem_cgroup(memcg))); 1239 1240 /* 1241 * When cgroup1 non-hierarchy mode is used, 1242 * parent_mem_cgroup() does not walk all the way up to the 1243 * cgroup root (root_mem_cgroup). So we have to handle 1244 * dead_memcg from cgroup root separately. 1245 */ 1246 if (!mem_cgroup_is_root(last)) 1247 __invalidate_reclaim_iterators(root_mem_cgroup, 1248 dead_memcg); 1249 } 1250 1251 /** 1252 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1253 * @memcg: hierarchy root 1254 * @fn: function to call for each task 1255 * @arg: argument passed to @fn 1256 * 1257 * This function iterates over tasks attached to @memcg or to any of its 1258 * descendants and calls @fn for each task. If @fn returns a non-zero 1259 * value, the function breaks the iteration loop. Otherwise, it will iterate 1260 * over all tasks and return 0. 1261 * 1262 * This function must not be called for the root memory cgroup. 1263 */ 1264 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1265 int (*fn)(struct task_struct *, void *), void *arg) 1266 { 1267 struct mem_cgroup *iter; 1268 int ret = 0; 1269 1270 BUG_ON(mem_cgroup_is_root(memcg)); 1271 1272 for_each_mem_cgroup_tree(iter, memcg) { 1273 struct css_task_iter it; 1274 struct task_struct *task; 1275 1276 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1277 while (!ret && (task = css_task_iter_next(&it))) 1278 ret = fn(task, arg); 1279 css_task_iter_end(&it); 1280 if (ret) { 1281 mem_cgroup_iter_break(memcg, iter); 1282 break; 1283 } 1284 } 1285 } 1286 1287 #ifdef CONFIG_DEBUG_VM 1288 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1289 { 1290 struct mem_cgroup *memcg; 1291 1292 if (mem_cgroup_disabled()) 1293 return; 1294 1295 memcg = folio_memcg(folio); 1296 1297 if (!memcg) 1298 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1299 else 1300 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1301 } 1302 #endif 1303 1304 /** 1305 * folio_lruvec_lock - Lock the lruvec for a folio. 1306 * @folio: Pointer to the folio. 1307 * 1308 * These functions are safe to use under any of the following conditions: 1309 * - folio locked 1310 * - folio_test_lru false 1311 * - folio_memcg_lock() 1312 * - folio frozen (refcount of 0) 1313 * 1314 * Return: The lruvec this folio is on with its lock held. 1315 */ 1316 struct lruvec *folio_lruvec_lock(struct folio *folio) 1317 { 1318 struct lruvec *lruvec = folio_lruvec(folio); 1319 1320 spin_lock(&lruvec->lru_lock); 1321 lruvec_memcg_debug(lruvec, folio); 1322 1323 return lruvec; 1324 } 1325 1326 /** 1327 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1328 * @folio: Pointer to the folio. 1329 * 1330 * These functions are safe to use under any of the following conditions: 1331 * - folio locked 1332 * - folio_test_lru false 1333 * - folio_memcg_lock() 1334 * - folio frozen (refcount of 0) 1335 * 1336 * Return: The lruvec this folio is on with its lock held and interrupts 1337 * disabled. 1338 */ 1339 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1340 { 1341 struct lruvec *lruvec = folio_lruvec(folio); 1342 1343 spin_lock_irq(&lruvec->lru_lock); 1344 lruvec_memcg_debug(lruvec, folio); 1345 1346 return lruvec; 1347 } 1348 1349 /** 1350 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1351 * @folio: Pointer to the folio. 1352 * @flags: Pointer to irqsave flags. 1353 * 1354 * These functions are safe to use under any of the following conditions: 1355 * - folio locked 1356 * - folio_test_lru false 1357 * - folio_memcg_lock() 1358 * - folio frozen (refcount of 0) 1359 * 1360 * Return: The lruvec this folio is on with its lock held and interrupts 1361 * disabled. 1362 */ 1363 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1364 unsigned long *flags) 1365 { 1366 struct lruvec *lruvec = folio_lruvec(folio); 1367 1368 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1369 lruvec_memcg_debug(lruvec, folio); 1370 1371 return lruvec; 1372 } 1373 1374 /** 1375 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1376 * @lruvec: mem_cgroup per zone lru vector 1377 * @lru: index of lru list the page is sitting on 1378 * @zid: zone id of the accounted pages 1379 * @nr_pages: positive when adding or negative when removing 1380 * 1381 * This function must be called under lru_lock, just before a page is added 1382 * to or just after a page is removed from an lru list. 1383 */ 1384 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1385 int zid, int nr_pages) 1386 { 1387 struct mem_cgroup_per_node *mz; 1388 unsigned long *lru_size; 1389 long size; 1390 1391 if (mem_cgroup_disabled()) 1392 return; 1393 1394 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1395 lru_size = &mz->lru_zone_size[zid][lru]; 1396 1397 if (nr_pages < 0) 1398 *lru_size += nr_pages; 1399 1400 size = *lru_size; 1401 if (WARN_ONCE(size < 0, 1402 "%s(%p, %d, %d): lru_size %ld\n", 1403 __func__, lruvec, lru, nr_pages, size)) { 1404 VM_BUG_ON(1); 1405 *lru_size = 0; 1406 } 1407 1408 if (nr_pages > 0) 1409 *lru_size += nr_pages; 1410 } 1411 1412 /** 1413 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1414 * @memcg: the memory cgroup 1415 * 1416 * Returns the maximum amount of memory @mem can be charged with, in 1417 * pages. 1418 */ 1419 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1420 { 1421 unsigned long margin = 0; 1422 unsigned long count; 1423 unsigned long limit; 1424 1425 count = page_counter_read(&memcg->memory); 1426 limit = READ_ONCE(memcg->memory.max); 1427 if (count < limit) 1428 margin = limit - count; 1429 1430 if (do_memsw_account()) { 1431 count = page_counter_read(&memcg->memsw); 1432 limit = READ_ONCE(memcg->memsw.max); 1433 if (count < limit) 1434 margin = min(margin, limit - count); 1435 else 1436 margin = 0; 1437 } 1438 1439 return margin; 1440 } 1441 1442 /* 1443 * A routine for checking "mem" is under move_account() or not. 1444 * 1445 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1446 * moving cgroups. This is for waiting at high-memory pressure 1447 * caused by "move". 1448 */ 1449 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1450 { 1451 struct mem_cgroup *from; 1452 struct mem_cgroup *to; 1453 bool ret = false; 1454 /* 1455 * Unlike task_move routines, we access mc.to, mc.from not under 1456 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1457 */ 1458 spin_lock(&mc.lock); 1459 from = mc.from; 1460 to = mc.to; 1461 if (!from) 1462 goto unlock; 1463 1464 ret = mem_cgroup_is_descendant(from, memcg) || 1465 mem_cgroup_is_descendant(to, memcg); 1466 unlock: 1467 spin_unlock(&mc.lock); 1468 return ret; 1469 } 1470 1471 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1472 { 1473 if (mc.moving_task && current != mc.moving_task) { 1474 if (mem_cgroup_under_move(memcg)) { 1475 DEFINE_WAIT(wait); 1476 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1477 /* moving charge context might have finished. */ 1478 if (mc.moving_task) 1479 schedule(); 1480 finish_wait(&mc.waitq, &wait); 1481 return true; 1482 } 1483 } 1484 return false; 1485 } 1486 1487 struct memory_stat { 1488 const char *name; 1489 unsigned int idx; 1490 }; 1491 1492 static const struct memory_stat memory_stats[] = { 1493 { "anon", NR_ANON_MAPPED }, 1494 { "file", NR_FILE_PAGES }, 1495 { "kernel", MEMCG_KMEM }, 1496 { "kernel_stack", NR_KERNEL_STACK_KB }, 1497 { "pagetables", NR_PAGETABLE }, 1498 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1499 { "percpu", MEMCG_PERCPU_B }, 1500 { "sock", MEMCG_SOCK }, 1501 { "vmalloc", MEMCG_VMALLOC }, 1502 { "shmem", NR_SHMEM }, 1503 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1504 { "zswap", MEMCG_ZSWAP_B }, 1505 { "zswapped", MEMCG_ZSWAPPED }, 1506 #endif 1507 { "file_mapped", NR_FILE_MAPPED }, 1508 { "file_dirty", NR_FILE_DIRTY }, 1509 { "file_writeback", NR_WRITEBACK }, 1510 #ifdef CONFIG_SWAP 1511 { "swapcached", NR_SWAPCACHE }, 1512 #endif 1513 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1514 { "anon_thp", NR_ANON_THPS }, 1515 { "file_thp", NR_FILE_THPS }, 1516 { "shmem_thp", NR_SHMEM_THPS }, 1517 #endif 1518 { "inactive_anon", NR_INACTIVE_ANON }, 1519 { "active_anon", NR_ACTIVE_ANON }, 1520 { "inactive_file", NR_INACTIVE_FILE }, 1521 { "active_file", NR_ACTIVE_FILE }, 1522 { "unevictable", NR_UNEVICTABLE }, 1523 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1524 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1525 1526 /* The memory events */ 1527 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1528 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1529 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1530 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1531 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1532 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1533 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1534 }; 1535 1536 /* Translate stat items to the correct unit for memory.stat output */ 1537 static int memcg_page_state_unit(int item) 1538 { 1539 switch (item) { 1540 case MEMCG_PERCPU_B: 1541 case MEMCG_ZSWAP_B: 1542 case NR_SLAB_RECLAIMABLE_B: 1543 case NR_SLAB_UNRECLAIMABLE_B: 1544 case WORKINGSET_REFAULT_ANON: 1545 case WORKINGSET_REFAULT_FILE: 1546 case WORKINGSET_ACTIVATE_ANON: 1547 case WORKINGSET_ACTIVATE_FILE: 1548 case WORKINGSET_RESTORE_ANON: 1549 case WORKINGSET_RESTORE_FILE: 1550 case WORKINGSET_NODERECLAIM: 1551 return 1; 1552 case NR_KERNEL_STACK_KB: 1553 return SZ_1K; 1554 default: 1555 return PAGE_SIZE; 1556 } 1557 } 1558 1559 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1560 int item) 1561 { 1562 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1563 } 1564 1565 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1566 { 1567 int i; 1568 1569 /* 1570 * Provide statistics on the state of the memory subsystem as 1571 * well as cumulative event counters that show past behavior. 1572 * 1573 * This list is ordered following a combination of these gradients: 1574 * 1) generic big picture -> specifics and details 1575 * 2) reflecting userspace activity -> reflecting kernel heuristics 1576 * 1577 * Current memory state: 1578 */ 1579 mem_cgroup_flush_stats(); 1580 1581 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1582 u64 size; 1583 1584 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1585 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); 1586 1587 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1588 size += memcg_page_state_output(memcg, 1589 NR_SLAB_RECLAIMABLE_B); 1590 seq_buf_printf(s, "slab %llu\n", size); 1591 } 1592 } 1593 1594 /* Accumulated memory events */ 1595 seq_buf_printf(s, "pgscan %lu\n", 1596 memcg_events(memcg, PGSCAN_KSWAPD) + 1597 memcg_events(memcg, PGSCAN_DIRECT) + 1598 memcg_events(memcg, PGSCAN_KHUGEPAGED)); 1599 seq_buf_printf(s, "pgsteal %lu\n", 1600 memcg_events(memcg, PGSTEAL_KSWAPD) + 1601 memcg_events(memcg, PGSTEAL_DIRECT) + 1602 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); 1603 1604 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1605 if (memcg_vm_event_stat[i] == PGPGIN || 1606 memcg_vm_event_stat[i] == PGPGOUT) 1607 continue; 1608 1609 seq_buf_printf(s, "%s %lu\n", 1610 vm_event_name(memcg_vm_event_stat[i]), 1611 memcg_events(memcg, memcg_vm_event_stat[i])); 1612 } 1613 1614 /* The above should easily fit into one page */ 1615 WARN_ON_ONCE(seq_buf_has_overflowed(s)); 1616 } 1617 1618 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s); 1619 1620 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 1621 { 1622 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1623 memcg_stat_format(memcg, s); 1624 else 1625 memcg1_stat_format(memcg, s); 1626 WARN_ON_ONCE(seq_buf_has_overflowed(s)); 1627 } 1628 1629 /** 1630 * mem_cgroup_print_oom_context: Print OOM information relevant to 1631 * memory controller. 1632 * @memcg: The memory cgroup that went over limit 1633 * @p: Task that is going to be killed 1634 * 1635 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1636 * enabled 1637 */ 1638 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1639 { 1640 rcu_read_lock(); 1641 1642 if (memcg) { 1643 pr_cont(",oom_memcg="); 1644 pr_cont_cgroup_path(memcg->css.cgroup); 1645 } else 1646 pr_cont(",global_oom"); 1647 if (p) { 1648 pr_cont(",task_memcg="); 1649 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1650 } 1651 rcu_read_unlock(); 1652 } 1653 1654 /** 1655 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1656 * memory controller. 1657 * @memcg: The memory cgroup that went over limit 1658 */ 1659 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1660 { 1661 /* Use static buffer, for the caller is holding oom_lock. */ 1662 static char buf[PAGE_SIZE]; 1663 struct seq_buf s; 1664 1665 lockdep_assert_held(&oom_lock); 1666 1667 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1668 K((u64)page_counter_read(&memcg->memory)), 1669 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1670 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1671 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1672 K((u64)page_counter_read(&memcg->swap)), 1673 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1674 else { 1675 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1676 K((u64)page_counter_read(&memcg->memsw)), 1677 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1678 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1679 K((u64)page_counter_read(&memcg->kmem)), 1680 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1681 } 1682 1683 pr_info("Memory cgroup stats for "); 1684 pr_cont_cgroup_path(memcg->css.cgroup); 1685 pr_cont(":"); 1686 seq_buf_init(&s, buf, sizeof(buf)); 1687 memory_stat_format(memcg, &s); 1688 seq_buf_do_printk(&s, KERN_INFO); 1689 } 1690 1691 /* 1692 * Return the memory (and swap, if configured) limit for a memcg. 1693 */ 1694 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1695 { 1696 unsigned long max = READ_ONCE(memcg->memory.max); 1697 1698 if (do_memsw_account()) { 1699 if (mem_cgroup_swappiness(memcg)) { 1700 /* Calculate swap excess capacity from memsw limit */ 1701 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1702 1703 max += min(swap, (unsigned long)total_swap_pages); 1704 } 1705 } else { 1706 if (mem_cgroup_swappiness(memcg)) 1707 max += min(READ_ONCE(memcg->swap.max), 1708 (unsigned long)total_swap_pages); 1709 } 1710 return max; 1711 } 1712 1713 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1714 { 1715 return page_counter_read(&memcg->memory); 1716 } 1717 1718 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1719 int order) 1720 { 1721 struct oom_control oc = { 1722 .zonelist = NULL, 1723 .nodemask = NULL, 1724 .memcg = memcg, 1725 .gfp_mask = gfp_mask, 1726 .order = order, 1727 }; 1728 bool ret = true; 1729 1730 if (mutex_lock_killable(&oom_lock)) 1731 return true; 1732 1733 if (mem_cgroup_margin(memcg) >= (1 << order)) 1734 goto unlock; 1735 1736 /* 1737 * A few threads which were not waiting at mutex_lock_killable() can 1738 * fail to bail out. Therefore, check again after holding oom_lock. 1739 */ 1740 ret = task_is_dying() || out_of_memory(&oc); 1741 1742 unlock: 1743 mutex_unlock(&oom_lock); 1744 return ret; 1745 } 1746 1747 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1748 pg_data_t *pgdat, 1749 gfp_t gfp_mask, 1750 unsigned long *total_scanned) 1751 { 1752 struct mem_cgroup *victim = NULL; 1753 int total = 0; 1754 int loop = 0; 1755 unsigned long excess; 1756 unsigned long nr_scanned; 1757 struct mem_cgroup_reclaim_cookie reclaim = { 1758 .pgdat = pgdat, 1759 }; 1760 1761 excess = soft_limit_excess(root_memcg); 1762 1763 while (1) { 1764 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1765 if (!victim) { 1766 loop++; 1767 if (loop >= 2) { 1768 /* 1769 * If we have not been able to reclaim 1770 * anything, it might because there are 1771 * no reclaimable pages under this hierarchy 1772 */ 1773 if (!total) 1774 break; 1775 /* 1776 * We want to do more targeted reclaim. 1777 * excess >> 2 is not to excessive so as to 1778 * reclaim too much, nor too less that we keep 1779 * coming back to reclaim from this cgroup 1780 */ 1781 if (total >= (excess >> 2) || 1782 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1783 break; 1784 } 1785 continue; 1786 } 1787 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1788 pgdat, &nr_scanned); 1789 *total_scanned += nr_scanned; 1790 if (!soft_limit_excess(root_memcg)) 1791 break; 1792 } 1793 mem_cgroup_iter_break(root_memcg, victim); 1794 return total; 1795 } 1796 1797 #ifdef CONFIG_LOCKDEP 1798 static struct lockdep_map memcg_oom_lock_dep_map = { 1799 .name = "memcg_oom_lock", 1800 }; 1801 #endif 1802 1803 static DEFINE_SPINLOCK(memcg_oom_lock); 1804 1805 /* 1806 * Check OOM-Killer is already running under our hierarchy. 1807 * If someone is running, return false. 1808 */ 1809 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1810 { 1811 struct mem_cgroup *iter, *failed = NULL; 1812 1813 spin_lock(&memcg_oom_lock); 1814 1815 for_each_mem_cgroup_tree(iter, memcg) { 1816 if (iter->oom_lock) { 1817 /* 1818 * this subtree of our hierarchy is already locked 1819 * so we cannot give a lock. 1820 */ 1821 failed = iter; 1822 mem_cgroup_iter_break(memcg, iter); 1823 break; 1824 } else 1825 iter->oom_lock = true; 1826 } 1827 1828 if (failed) { 1829 /* 1830 * OK, we failed to lock the whole subtree so we have 1831 * to clean up what we set up to the failing subtree 1832 */ 1833 for_each_mem_cgroup_tree(iter, memcg) { 1834 if (iter == failed) { 1835 mem_cgroup_iter_break(memcg, iter); 1836 break; 1837 } 1838 iter->oom_lock = false; 1839 } 1840 } else 1841 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1842 1843 spin_unlock(&memcg_oom_lock); 1844 1845 return !failed; 1846 } 1847 1848 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1849 { 1850 struct mem_cgroup *iter; 1851 1852 spin_lock(&memcg_oom_lock); 1853 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1854 for_each_mem_cgroup_tree(iter, memcg) 1855 iter->oom_lock = false; 1856 spin_unlock(&memcg_oom_lock); 1857 } 1858 1859 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1860 { 1861 struct mem_cgroup *iter; 1862 1863 spin_lock(&memcg_oom_lock); 1864 for_each_mem_cgroup_tree(iter, memcg) 1865 iter->under_oom++; 1866 spin_unlock(&memcg_oom_lock); 1867 } 1868 1869 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1870 { 1871 struct mem_cgroup *iter; 1872 1873 /* 1874 * Be careful about under_oom underflows because a child memcg 1875 * could have been added after mem_cgroup_mark_under_oom. 1876 */ 1877 spin_lock(&memcg_oom_lock); 1878 for_each_mem_cgroup_tree(iter, memcg) 1879 if (iter->under_oom > 0) 1880 iter->under_oom--; 1881 spin_unlock(&memcg_oom_lock); 1882 } 1883 1884 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1885 1886 struct oom_wait_info { 1887 struct mem_cgroup *memcg; 1888 wait_queue_entry_t wait; 1889 }; 1890 1891 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1892 unsigned mode, int sync, void *arg) 1893 { 1894 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1895 struct mem_cgroup *oom_wait_memcg; 1896 struct oom_wait_info *oom_wait_info; 1897 1898 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1899 oom_wait_memcg = oom_wait_info->memcg; 1900 1901 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1902 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1903 return 0; 1904 return autoremove_wake_function(wait, mode, sync, arg); 1905 } 1906 1907 static void memcg_oom_recover(struct mem_cgroup *memcg) 1908 { 1909 /* 1910 * For the following lockless ->under_oom test, the only required 1911 * guarantee is that it must see the state asserted by an OOM when 1912 * this function is called as a result of userland actions 1913 * triggered by the notification of the OOM. This is trivially 1914 * achieved by invoking mem_cgroup_mark_under_oom() before 1915 * triggering notification. 1916 */ 1917 if (memcg && memcg->under_oom) 1918 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1919 } 1920 1921 /* 1922 * Returns true if successfully killed one or more processes. Though in some 1923 * corner cases it can return true even without killing any process. 1924 */ 1925 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1926 { 1927 bool locked, ret; 1928 1929 if (order > PAGE_ALLOC_COSTLY_ORDER) 1930 return false; 1931 1932 memcg_memory_event(memcg, MEMCG_OOM); 1933 1934 /* 1935 * We are in the middle of the charge context here, so we 1936 * don't want to block when potentially sitting on a callstack 1937 * that holds all kinds of filesystem and mm locks. 1938 * 1939 * cgroup1 allows disabling the OOM killer and waiting for outside 1940 * handling until the charge can succeed; remember the context and put 1941 * the task to sleep at the end of the page fault when all locks are 1942 * released. 1943 * 1944 * On the other hand, in-kernel OOM killer allows for an async victim 1945 * memory reclaim (oom_reaper) and that means that we are not solely 1946 * relying on the oom victim to make a forward progress and we can 1947 * invoke the oom killer here. 1948 * 1949 * Please note that mem_cgroup_out_of_memory might fail to find a 1950 * victim and then we have to bail out from the charge path. 1951 */ 1952 if (READ_ONCE(memcg->oom_kill_disable)) { 1953 if (current->in_user_fault) { 1954 css_get(&memcg->css); 1955 current->memcg_in_oom = memcg; 1956 current->memcg_oom_gfp_mask = mask; 1957 current->memcg_oom_order = order; 1958 } 1959 return false; 1960 } 1961 1962 mem_cgroup_mark_under_oom(memcg); 1963 1964 locked = mem_cgroup_oom_trylock(memcg); 1965 1966 if (locked) 1967 mem_cgroup_oom_notify(memcg); 1968 1969 mem_cgroup_unmark_under_oom(memcg); 1970 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1971 1972 if (locked) 1973 mem_cgroup_oom_unlock(memcg); 1974 1975 return ret; 1976 } 1977 1978 /** 1979 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1980 * @handle: actually kill/wait or just clean up the OOM state 1981 * 1982 * This has to be called at the end of a page fault if the memcg OOM 1983 * handler was enabled. 1984 * 1985 * Memcg supports userspace OOM handling where failed allocations must 1986 * sleep on a waitqueue until the userspace task resolves the 1987 * situation. Sleeping directly in the charge context with all kinds 1988 * of locks held is not a good idea, instead we remember an OOM state 1989 * in the task and mem_cgroup_oom_synchronize() has to be called at 1990 * the end of the page fault to complete the OOM handling. 1991 * 1992 * Returns %true if an ongoing memcg OOM situation was detected and 1993 * completed, %false otherwise. 1994 */ 1995 bool mem_cgroup_oom_synchronize(bool handle) 1996 { 1997 struct mem_cgroup *memcg = current->memcg_in_oom; 1998 struct oom_wait_info owait; 1999 bool locked; 2000 2001 /* OOM is global, do not handle */ 2002 if (!memcg) 2003 return false; 2004 2005 if (!handle) 2006 goto cleanup; 2007 2008 owait.memcg = memcg; 2009 owait.wait.flags = 0; 2010 owait.wait.func = memcg_oom_wake_function; 2011 owait.wait.private = current; 2012 INIT_LIST_HEAD(&owait.wait.entry); 2013 2014 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2015 mem_cgroup_mark_under_oom(memcg); 2016 2017 locked = mem_cgroup_oom_trylock(memcg); 2018 2019 if (locked) 2020 mem_cgroup_oom_notify(memcg); 2021 2022 schedule(); 2023 mem_cgroup_unmark_under_oom(memcg); 2024 finish_wait(&memcg_oom_waitq, &owait.wait); 2025 2026 if (locked) 2027 mem_cgroup_oom_unlock(memcg); 2028 cleanup: 2029 current->memcg_in_oom = NULL; 2030 css_put(&memcg->css); 2031 return true; 2032 } 2033 2034 /** 2035 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2036 * @victim: task to be killed by the OOM killer 2037 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2038 * 2039 * Returns a pointer to a memory cgroup, which has to be cleaned up 2040 * by killing all belonging OOM-killable tasks. 2041 * 2042 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2043 */ 2044 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2045 struct mem_cgroup *oom_domain) 2046 { 2047 struct mem_cgroup *oom_group = NULL; 2048 struct mem_cgroup *memcg; 2049 2050 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2051 return NULL; 2052 2053 if (!oom_domain) 2054 oom_domain = root_mem_cgroup; 2055 2056 rcu_read_lock(); 2057 2058 memcg = mem_cgroup_from_task(victim); 2059 if (mem_cgroup_is_root(memcg)) 2060 goto out; 2061 2062 /* 2063 * If the victim task has been asynchronously moved to a different 2064 * memory cgroup, we might end up killing tasks outside oom_domain. 2065 * In this case it's better to ignore memory.group.oom. 2066 */ 2067 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2068 goto out; 2069 2070 /* 2071 * Traverse the memory cgroup hierarchy from the victim task's 2072 * cgroup up to the OOMing cgroup (or root) to find the 2073 * highest-level memory cgroup with oom.group set. 2074 */ 2075 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2076 if (READ_ONCE(memcg->oom_group)) 2077 oom_group = memcg; 2078 2079 if (memcg == oom_domain) 2080 break; 2081 } 2082 2083 if (oom_group) 2084 css_get(&oom_group->css); 2085 out: 2086 rcu_read_unlock(); 2087 2088 return oom_group; 2089 } 2090 2091 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2092 { 2093 pr_info("Tasks in "); 2094 pr_cont_cgroup_path(memcg->css.cgroup); 2095 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2096 } 2097 2098 /** 2099 * folio_memcg_lock - Bind a folio to its memcg. 2100 * @folio: The folio. 2101 * 2102 * This function prevents unlocked LRU folios from being moved to 2103 * another cgroup. 2104 * 2105 * It ensures lifetime of the bound memcg. The caller is responsible 2106 * for the lifetime of the folio. 2107 */ 2108 void folio_memcg_lock(struct folio *folio) 2109 { 2110 struct mem_cgroup *memcg; 2111 unsigned long flags; 2112 2113 /* 2114 * The RCU lock is held throughout the transaction. The fast 2115 * path can get away without acquiring the memcg->move_lock 2116 * because page moving starts with an RCU grace period. 2117 */ 2118 rcu_read_lock(); 2119 2120 if (mem_cgroup_disabled()) 2121 return; 2122 again: 2123 memcg = folio_memcg(folio); 2124 if (unlikely(!memcg)) 2125 return; 2126 2127 #ifdef CONFIG_PROVE_LOCKING 2128 local_irq_save(flags); 2129 might_lock(&memcg->move_lock); 2130 local_irq_restore(flags); 2131 #endif 2132 2133 if (atomic_read(&memcg->moving_account) <= 0) 2134 return; 2135 2136 spin_lock_irqsave(&memcg->move_lock, flags); 2137 if (memcg != folio_memcg(folio)) { 2138 spin_unlock_irqrestore(&memcg->move_lock, flags); 2139 goto again; 2140 } 2141 2142 /* 2143 * When charge migration first begins, we can have multiple 2144 * critical sections holding the fast-path RCU lock and one 2145 * holding the slowpath move_lock. Track the task who has the 2146 * move_lock for folio_memcg_unlock(). 2147 */ 2148 memcg->move_lock_task = current; 2149 memcg->move_lock_flags = flags; 2150 } 2151 2152 static void __folio_memcg_unlock(struct mem_cgroup *memcg) 2153 { 2154 if (memcg && memcg->move_lock_task == current) { 2155 unsigned long flags = memcg->move_lock_flags; 2156 2157 memcg->move_lock_task = NULL; 2158 memcg->move_lock_flags = 0; 2159 2160 spin_unlock_irqrestore(&memcg->move_lock, flags); 2161 } 2162 2163 rcu_read_unlock(); 2164 } 2165 2166 /** 2167 * folio_memcg_unlock - Release the binding between a folio and its memcg. 2168 * @folio: The folio. 2169 * 2170 * This releases the binding created by folio_memcg_lock(). This does 2171 * not change the accounting of this folio to its memcg, but it does 2172 * permit others to change it. 2173 */ 2174 void folio_memcg_unlock(struct folio *folio) 2175 { 2176 __folio_memcg_unlock(folio_memcg(folio)); 2177 } 2178 2179 struct memcg_stock_pcp { 2180 local_lock_t stock_lock; 2181 struct mem_cgroup *cached; /* this never be root cgroup */ 2182 unsigned int nr_pages; 2183 2184 #ifdef CONFIG_MEMCG_KMEM 2185 struct obj_cgroup *cached_objcg; 2186 struct pglist_data *cached_pgdat; 2187 unsigned int nr_bytes; 2188 int nr_slab_reclaimable_b; 2189 int nr_slab_unreclaimable_b; 2190 #endif 2191 2192 struct work_struct work; 2193 unsigned long flags; 2194 #define FLUSHING_CACHED_CHARGE 0 2195 }; 2196 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { 2197 .stock_lock = INIT_LOCAL_LOCK(stock_lock), 2198 }; 2199 static DEFINE_MUTEX(percpu_charge_mutex); 2200 2201 #ifdef CONFIG_MEMCG_KMEM 2202 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock); 2203 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2204 struct mem_cgroup *root_memcg); 2205 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages); 2206 2207 #else 2208 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 2209 { 2210 return NULL; 2211 } 2212 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2213 struct mem_cgroup *root_memcg) 2214 { 2215 return false; 2216 } 2217 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) 2218 { 2219 } 2220 #endif 2221 2222 /** 2223 * consume_stock: Try to consume stocked charge on this cpu. 2224 * @memcg: memcg to consume from. 2225 * @nr_pages: how many pages to charge. 2226 * 2227 * The charges will only happen if @memcg matches the current cpu's memcg 2228 * stock, and at least @nr_pages are available in that stock. Failure to 2229 * service an allocation will refill the stock. 2230 * 2231 * returns true if successful, false otherwise. 2232 */ 2233 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2234 { 2235 struct memcg_stock_pcp *stock; 2236 unsigned long flags; 2237 bool ret = false; 2238 2239 if (nr_pages > MEMCG_CHARGE_BATCH) 2240 return ret; 2241 2242 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2243 2244 stock = this_cpu_ptr(&memcg_stock); 2245 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { 2246 stock->nr_pages -= nr_pages; 2247 ret = true; 2248 } 2249 2250 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2251 2252 return ret; 2253 } 2254 2255 /* 2256 * Returns stocks cached in percpu and reset cached information. 2257 */ 2258 static void drain_stock(struct memcg_stock_pcp *stock) 2259 { 2260 struct mem_cgroup *old = READ_ONCE(stock->cached); 2261 2262 if (!old) 2263 return; 2264 2265 if (stock->nr_pages) { 2266 page_counter_uncharge(&old->memory, stock->nr_pages); 2267 if (do_memsw_account()) 2268 page_counter_uncharge(&old->memsw, stock->nr_pages); 2269 stock->nr_pages = 0; 2270 } 2271 2272 css_put(&old->css); 2273 WRITE_ONCE(stock->cached, NULL); 2274 } 2275 2276 static void drain_local_stock(struct work_struct *dummy) 2277 { 2278 struct memcg_stock_pcp *stock; 2279 struct obj_cgroup *old = NULL; 2280 unsigned long flags; 2281 2282 /* 2283 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. 2284 * drain_stock races is that we always operate on local CPU stock 2285 * here with IRQ disabled 2286 */ 2287 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2288 2289 stock = this_cpu_ptr(&memcg_stock); 2290 old = drain_obj_stock(stock); 2291 drain_stock(stock); 2292 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2293 2294 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2295 if (old) 2296 obj_cgroup_put(old); 2297 } 2298 2299 /* 2300 * Cache charges(val) to local per_cpu area. 2301 * This will be consumed by consume_stock() function, later. 2302 */ 2303 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2304 { 2305 struct memcg_stock_pcp *stock; 2306 2307 stock = this_cpu_ptr(&memcg_stock); 2308 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ 2309 drain_stock(stock); 2310 css_get(&memcg->css); 2311 WRITE_ONCE(stock->cached, memcg); 2312 } 2313 stock->nr_pages += nr_pages; 2314 2315 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2316 drain_stock(stock); 2317 } 2318 2319 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2320 { 2321 unsigned long flags; 2322 2323 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2324 __refill_stock(memcg, nr_pages); 2325 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2326 } 2327 2328 /* 2329 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2330 * of the hierarchy under it. 2331 */ 2332 static void drain_all_stock(struct mem_cgroup *root_memcg) 2333 { 2334 int cpu, curcpu; 2335 2336 /* If someone's already draining, avoid adding running more workers. */ 2337 if (!mutex_trylock(&percpu_charge_mutex)) 2338 return; 2339 /* 2340 * Notify other cpus that system-wide "drain" is running 2341 * We do not care about races with the cpu hotplug because cpu down 2342 * as well as workers from this path always operate on the local 2343 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2344 */ 2345 migrate_disable(); 2346 curcpu = smp_processor_id(); 2347 for_each_online_cpu(cpu) { 2348 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2349 struct mem_cgroup *memcg; 2350 bool flush = false; 2351 2352 rcu_read_lock(); 2353 memcg = READ_ONCE(stock->cached); 2354 if (memcg && stock->nr_pages && 2355 mem_cgroup_is_descendant(memcg, root_memcg)) 2356 flush = true; 2357 else if (obj_stock_flush_required(stock, root_memcg)) 2358 flush = true; 2359 rcu_read_unlock(); 2360 2361 if (flush && 2362 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2363 if (cpu == curcpu) 2364 drain_local_stock(&stock->work); 2365 else if (!cpu_is_isolated(cpu)) 2366 schedule_work_on(cpu, &stock->work); 2367 } 2368 } 2369 migrate_enable(); 2370 mutex_unlock(&percpu_charge_mutex); 2371 } 2372 2373 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2374 { 2375 struct memcg_stock_pcp *stock; 2376 2377 stock = &per_cpu(memcg_stock, cpu); 2378 drain_stock(stock); 2379 2380 return 0; 2381 } 2382 2383 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2384 unsigned int nr_pages, 2385 gfp_t gfp_mask) 2386 { 2387 unsigned long nr_reclaimed = 0; 2388 2389 do { 2390 unsigned long pflags; 2391 2392 if (page_counter_read(&memcg->memory) <= 2393 READ_ONCE(memcg->memory.high)) 2394 continue; 2395 2396 memcg_memory_event(memcg, MEMCG_HIGH); 2397 2398 psi_memstall_enter(&pflags); 2399 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2400 gfp_mask, 2401 MEMCG_RECLAIM_MAY_SWAP); 2402 psi_memstall_leave(&pflags); 2403 } while ((memcg = parent_mem_cgroup(memcg)) && 2404 !mem_cgroup_is_root(memcg)); 2405 2406 return nr_reclaimed; 2407 } 2408 2409 static void high_work_func(struct work_struct *work) 2410 { 2411 struct mem_cgroup *memcg; 2412 2413 memcg = container_of(work, struct mem_cgroup, high_work); 2414 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2415 } 2416 2417 /* 2418 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2419 * enough to still cause a significant slowdown in most cases, while still 2420 * allowing diagnostics and tracing to proceed without becoming stuck. 2421 */ 2422 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2423 2424 /* 2425 * When calculating the delay, we use these either side of the exponentiation to 2426 * maintain precision and scale to a reasonable number of jiffies (see the table 2427 * below. 2428 * 2429 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2430 * overage ratio to a delay. 2431 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2432 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2433 * to produce a reasonable delay curve. 2434 * 2435 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2436 * reasonable delay curve compared to precision-adjusted overage, not 2437 * penalising heavily at first, but still making sure that growth beyond the 2438 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2439 * example, with a high of 100 megabytes: 2440 * 2441 * +-------+------------------------+ 2442 * | usage | time to allocate in ms | 2443 * +-------+------------------------+ 2444 * | 100M | 0 | 2445 * | 101M | 6 | 2446 * | 102M | 25 | 2447 * | 103M | 57 | 2448 * | 104M | 102 | 2449 * | 105M | 159 | 2450 * | 106M | 230 | 2451 * | 107M | 313 | 2452 * | 108M | 409 | 2453 * | 109M | 518 | 2454 * | 110M | 639 | 2455 * | 111M | 774 | 2456 * | 112M | 921 | 2457 * | 113M | 1081 | 2458 * | 114M | 1254 | 2459 * | 115M | 1439 | 2460 * | 116M | 1638 | 2461 * | 117M | 1849 | 2462 * | 118M | 2000 | 2463 * | 119M | 2000 | 2464 * | 120M | 2000 | 2465 * +-------+------------------------+ 2466 */ 2467 #define MEMCG_DELAY_PRECISION_SHIFT 20 2468 #define MEMCG_DELAY_SCALING_SHIFT 14 2469 2470 static u64 calculate_overage(unsigned long usage, unsigned long high) 2471 { 2472 u64 overage; 2473 2474 if (usage <= high) 2475 return 0; 2476 2477 /* 2478 * Prevent division by 0 in overage calculation by acting as if 2479 * it was a threshold of 1 page 2480 */ 2481 high = max(high, 1UL); 2482 2483 overage = usage - high; 2484 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2485 return div64_u64(overage, high); 2486 } 2487 2488 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2489 { 2490 u64 overage, max_overage = 0; 2491 2492 do { 2493 overage = calculate_overage(page_counter_read(&memcg->memory), 2494 READ_ONCE(memcg->memory.high)); 2495 max_overage = max(overage, max_overage); 2496 } while ((memcg = parent_mem_cgroup(memcg)) && 2497 !mem_cgroup_is_root(memcg)); 2498 2499 return max_overage; 2500 } 2501 2502 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2503 { 2504 u64 overage, max_overage = 0; 2505 2506 do { 2507 overage = calculate_overage(page_counter_read(&memcg->swap), 2508 READ_ONCE(memcg->swap.high)); 2509 if (overage) 2510 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2511 max_overage = max(overage, max_overage); 2512 } while ((memcg = parent_mem_cgroup(memcg)) && 2513 !mem_cgroup_is_root(memcg)); 2514 2515 return max_overage; 2516 } 2517 2518 /* 2519 * Get the number of jiffies that we should penalise a mischievous cgroup which 2520 * is exceeding its memory.high by checking both it and its ancestors. 2521 */ 2522 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2523 unsigned int nr_pages, 2524 u64 max_overage) 2525 { 2526 unsigned long penalty_jiffies; 2527 2528 if (!max_overage) 2529 return 0; 2530 2531 /* 2532 * We use overage compared to memory.high to calculate the number of 2533 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2534 * fairly lenient on small overages, and increasingly harsh when the 2535 * memcg in question makes it clear that it has no intention of stopping 2536 * its crazy behaviour, so we exponentially increase the delay based on 2537 * overage amount. 2538 */ 2539 penalty_jiffies = max_overage * max_overage * HZ; 2540 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2541 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2542 2543 /* 2544 * Factor in the task's own contribution to the overage, such that four 2545 * N-sized allocations are throttled approximately the same as one 2546 * 4N-sized allocation. 2547 * 2548 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2549 * larger the current charge patch is than that. 2550 */ 2551 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2552 } 2553 2554 /* 2555 * Scheduled by try_charge() to be executed from the userland return path 2556 * and reclaims memory over the high limit. 2557 */ 2558 void mem_cgroup_handle_over_high(void) 2559 { 2560 unsigned long penalty_jiffies; 2561 unsigned long pflags; 2562 unsigned long nr_reclaimed; 2563 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2564 int nr_retries = MAX_RECLAIM_RETRIES; 2565 struct mem_cgroup *memcg; 2566 bool in_retry = false; 2567 2568 if (likely(!nr_pages)) 2569 return; 2570 2571 memcg = get_mem_cgroup_from_mm(current->mm); 2572 current->memcg_nr_pages_over_high = 0; 2573 2574 retry_reclaim: 2575 /* 2576 * The allocating task should reclaim at least the batch size, but for 2577 * subsequent retries we only want to do what's necessary to prevent oom 2578 * or breaching resource isolation. 2579 * 2580 * This is distinct from memory.max or page allocator behaviour because 2581 * memory.high is currently batched, whereas memory.max and the page 2582 * allocator run every time an allocation is made. 2583 */ 2584 nr_reclaimed = reclaim_high(memcg, 2585 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2586 GFP_KERNEL); 2587 2588 /* 2589 * memory.high is breached and reclaim is unable to keep up. Throttle 2590 * allocators proactively to slow down excessive growth. 2591 */ 2592 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2593 mem_find_max_overage(memcg)); 2594 2595 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2596 swap_find_max_overage(memcg)); 2597 2598 /* 2599 * Clamp the max delay per usermode return so as to still keep the 2600 * application moving forwards and also permit diagnostics, albeit 2601 * extremely slowly. 2602 */ 2603 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2604 2605 /* 2606 * Don't sleep if the amount of jiffies this memcg owes us is so low 2607 * that it's not even worth doing, in an attempt to be nice to those who 2608 * go only a small amount over their memory.high value and maybe haven't 2609 * been aggressively reclaimed enough yet. 2610 */ 2611 if (penalty_jiffies <= HZ / 100) 2612 goto out; 2613 2614 /* 2615 * If reclaim is making forward progress but we're still over 2616 * memory.high, we want to encourage that rather than doing allocator 2617 * throttling. 2618 */ 2619 if (nr_reclaimed || nr_retries--) { 2620 in_retry = true; 2621 goto retry_reclaim; 2622 } 2623 2624 /* 2625 * If we exit early, we're guaranteed to die (since 2626 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2627 * need to account for any ill-begotten jiffies to pay them off later. 2628 */ 2629 psi_memstall_enter(&pflags); 2630 schedule_timeout_killable(penalty_jiffies); 2631 psi_memstall_leave(&pflags); 2632 2633 out: 2634 css_put(&memcg->css); 2635 } 2636 2637 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2638 unsigned int nr_pages) 2639 { 2640 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2641 int nr_retries = MAX_RECLAIM_RETRIES; 2642 struct mem_cgroup *mem_over_limit; 2643 struct page_counter *counter; 2644 unsigned long nr_reclaimed; 2645 bool passed_oom = false; 2646 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2647 bool drained = false; 2648 bool raised_max_event = false; 2649 unsigned long pflags; 2650 2651 retry: 2652 if (consume_stock(memcg, nr_pages)) 2653 return 0; 2654 2655 if (!do_memsw_account() || 2656 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2657 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2658 goto done_restock; 2659 if (do_memsw_account()) 2660 page_counter_uncharge(&memcg->memsw, batch); 2661 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2662 } else { 2663 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2664 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2665 } 2666 2667 if (batch > nr_pages) { 2668 batch = nr_pages; 2669 goto retry; 2670 } 2671 2672 /* 2673 * Prevent unbounded recursion when reclaim operations need to 2674 * allocate memory. This might exceed the limits temporarily, 2675 * but we prefer facilitating memory reclaim and getting back 2676 * under the limit over triggering OOM kills in these cases. 2677 */ 2678 if (unlikely(current->flags & PF_MEMALLOC)) 2679 goto force; 2680 2681 if (unlikely(task_in_memcg_oom(current))) 2682 goto nomem; 2683 2684 if (!gfpflags_allow_blocking(gfp_mask)) 2685 goto nomem; 2686 2687 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2688 raised_max_event = true; 2689 2690 psi_memstall_enter(&pflags); 2691 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2692 gfp_mask, reclaim_options); 2693 psi_memstall_leave(&pflags); 2694 2695 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2696 goto retry; 2697 2698 if (!drained) { 2699 drain_all_stock(mem_over_limit); 2700 drained = true; 2701 goto retry; 2702 } 2703 2704 if (gfp_mask & __GFP_NORETRY) 2705 goto nomem; 2706 /* 2707 * Even though the limit is exceeded at this point, reclaim 2708 * may have been able to free some pages. Retry the charge 2709 * before killing the task. 2710 * 2711 * Only for regular pages, though: huge pages are rather 2712 * unlikely to succeed so close to the limit, and we fall back 2713 * to regular pages anyway in case of failure. 2714 */ 2715 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2716 goto retry; 2717 /* 2718 * At task move, charge accounts can be doubly counted. So, it's 2719 * better to wait until the end of task_move if something is going on. 2720 */ 2721 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2722 goto retry; 2723 2724 if (nr_retries--) 2725 goto retry; 2726 2727 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2728 goto nomem; 2729 2730 /* Avoid endless loop for tasks bypassed by the oom killer */ 2731 if (passed_oom && task_is_dying()) 2732 goto nomem; 2733 2734 /* 2735 * keep retrying as long as the memcg oom killer is able to make 2736 * a forward progress or bypass the charge if the oom killer 2737 * couldn't make any progress. 2738 */ 2739 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2740 get_order(nr_pages * PAGE_SIZE))) { 2741 passed_oom = true; 2742 nr_retries = MAX_RECLAIM_RETRIES; 2743 goto retry; 2744 } 2745 nomem: 2746 /* 2747 * Memcg doesn't have a dedicated reserve for atomic 2748 * allocations. But like the global atomic pool, we need to 2749 * put the burden of reclaim on regular allocation requests 2750 * and let these go through as privileged allocations. 2751 */ 2752 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2753 return -ENOMEM; 2754 force: 2755 /* 2756 * If the allocation has to be enforced, don't forget to raise 2757 * a MEMCG_MAX event. 2758 */ 2759 if (!raised_max_event) 2760 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2761 2762 /* 2763 * The allocation either can't fail or will lead to more memory 2764 * being freed very soon. Allow memory usage go over the limit 2765 * temporarily by force charging it. 2766 */ 2767 page_counter_charge(&memcg->memory, nr_pages); 2768 if (do_memsw_account()) 2769 page_counter_charge(&memcg->memsw, nr_pages); 2770 2771 return 0; 2772 2773 done_restock: 2774 if (batch > nr_pages) 2775 refill_stock(memcg, batch - nr_pages); 2776 2777 /* 2778 * If the hierarchy is above the normal consumption range, schedule 2779 * reclaim on returning to userland. We can perform reclaim here 2780 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2781 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2782 * not recorded as it most likely matches current's and won't 2783 * change in the meantime. As high limit is checked again before 2784 * reclaim, the cost of mismatch is negligible. 2785 */ 2786 do { 2787 bool mem_high, swap_high; 2788 2789 mem_high = page_counter_read(&memcg->memory) > 2790 READ_ONCE(memcg->memory.high); 2791 swap_high = page_counter_read(&memcg->swap) > 2792 READ_ONCE(memcg->swap.high); 2793 2794 /* Don't bother a random interrupted task */ 2795 if (!in_task()) { 2796 if (mem_high) { 2797 schedule_work(&memcg->high_work); 2798 break; 2799 } 2800 continue; 2801 } 2802 2803 if (mem_high || swap_high) { 2804 /* 2805 * The allocating tasks in this cgroup will need to do 2806 * reclaim or be throttled to prevent further growth 2807 * of the memory or swap footprints. 2808 * 2809 * Target some best-effort fairness between the tasks, 2810 * and distribute reclaim work and delay penalties 2811 * based on how much each task is actually allocating. 2812 */ 2813 current->memcg_nr_pages_over_high += batch; 2814 set_notify_resume(current); 2815 break; 2816 } 2817 } while ((memcg = parent_mem_cgroup(memcg))); 2818 2819 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2820 !(current->flags & PF_MEMALLOC) && 2821 gfpflags_allow_blocking(gfp_mask)) { 2822 mem_cgroup_handle_over_high(); 2823 } 2824 return 0; 2825 } 2826 2827 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2828 unsigned int nr_pages) 2829 { 2830 if (mem_cgroup_is_root(memcg)) 2831 return 0; 2832 2833 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2834 } 2835 2836 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2837 { 2838 if (mem_cgroup_is_root(memcg)) 2839 return; 2840 2841 page_counter_uncharge(&memcg->memory, nr_pages); 2842 if (do_memsw_account()) 2843 page_counter_uncharge(&memcg->memsw, nr_pages); 2844 } 2845 2846 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2847 { 2848 VM_BUG_ON_FOLIO(folio_memcg(folio), folio); 2849 /* 2850 * Any of the following ensures page's memcg stability: 2851 * 2852 * - the page lock 2853 * - LRU isolation 2854 * - folio_memcg_lock() 2855 * - exclusive reference 2856 * - mem_cgroup_trylock_pages() 2857 */ 2858 folio->memcg_data = (unsigned long)memcg; 2859 } 2860 2861 #ifdef CONFIG_MEMCG_KMEM 2862 /* 2863 * The allocated objcg pointers array is not accounted directly. 2864 * Moreover, it should not come from DMA buffer and is not readily 2865 * reclaimable. So those GFP bits should be masked off. 2866 */ 2867 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2868 2869 /* 2870 * mod_objcg_mlstate() may be called with irq enabled, so 2871 * mod_memcg_lruvec_state() should be used. 2872 */ 2873 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 2874 struct pglist_data *pgdat, 2875 enum node_stat_item idx, int nr) 2876 { 2877 struct mem_cgroup *memcg; 2878 struct lruvec *lruvec; 2879 2880 rcu_read_lock(); 2881 memcg = obj_cgroup_memcg(objcg); 2882 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2883 mod_memcg_lruvec_state(lruvec, idx, nr); 2884 rcu_read_unlock(); 2885 } 2886 2887 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, 2888 gfp_t gfp, bool new_slab) 2889 { 2890 unsigned int objects = objs_per_slab(s, slab); 2891 unsigned long memcg_data; 2892 void *vec; 2893 2894 gfp &= ~OBJCGS_CLEAR_MASK; 2895 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2896 slab_nid(slab)); 2897 if (!vec) 2898 return -ENOMEM; 2899 2900 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2901 if (new_slab) { 2902 /* 2903 * If the slab is brand new and nobody can yet access its 2904 * memcg_data, no synchronization is required and memcg_data can 2905 * be simply assigned. 2906 */ 2907 slab->memcg_data = memcg_data; 2908 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { 2909 /* 2910 * If the slab is already in use, somebody can allocate and 2911 * assign obj_cgroups in parallel. In this case the existing 2912 * objcg vector should be reused. 2913 */ 2914 kfree(vec); 2915 return 0; 2916 } 2917 2918 kmemleak_not_leak(vec); 2919 return 0; 2920 } 2921 2922 static __always_inline 2923 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) 2924 { 2925 /* 2926 * Slab objects are accounted individually, not per-page. 2927 * Memcg membership data for each individual object is saved in 2928 * slab->memcg_data. 2929 */ 2930 if (folio_test_slab(folio)) { 2931 struct obj_cgroup **objcgs; 2932 struct slab *slab; 2933 unsigned int off; 2934 2935 slab = folio_slab(folio); 2936 objcgs = slab_objcgs(slab); 2937 if (!objcgs) 2938 return NULL; 2939 2940 off = obj_to_index(slab->slab_cache, slab, p); 2941 if (objcgs[off]) 2942 return obj_cgroup_memcg(objcgs[off]); 2943 2944 return NULL; 2945 } 2946 2947 /* 2948 * folio_memcg_check() is used here, because in theory we can encounter 2949 * a folio where the slab flag has been cleared already, but 2950 * slab->memcg_data has not been freed yet 2951 * folio_memcg_check() will guarantee that a proper memory 2952 * cgroup pointer or NULL will be returned. 2953 */ 2954 return folio_memcg_check(folio); 2955 } 2956 2957 /* 2958 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2959 * 2960 * A passed kernel object can be a slab object, vmalloc object or a generic 2961 * kernel page, so different mechanisms for getting the memory cgroup pointer 2962 * should be used. 2963 * 2964 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2965 * can not know for sure how the kernel object is implemented. 2966 * mem_cgroup_from_obj() can be safely used in such cases. 2967 * 2968 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2969 * cgroup_mutex, etc. 2970 */ 2971 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2972 { 2973 struct folio *folio; 2974 2975 if (mem_cgroup_disabled()) 2976 return NULL; 2977 2978 if (unlikely(is_vmalloc_addr(p))) 2979 folio = page_folio(vmalloc_to_page(p)); 2980 else 2981 folio = virt_to_folio(p); 2982 2983 return mem_cgroup_from_obj_folio(folio, p); 2984 } 2985 2986 /* 2987 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2988 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects, 2989 * allocated using vmalloc(). 2990 * 2991 * A passed kernel object must be a slab object or a generic kernel page. 2992 * 2993 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2994 * cgroup_mutex, etc. 2995 */ 2996 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 2997 { 2998 if (mem_cgroup_disabled()) 2999 return NULL; 3000 3001 return mem_cgroup_from_obj_folio(virt_to_folio(p), p); 3002 } 3003 3004 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 3005 { 3006 struct obj_cgroup *objcg = NULL; 3007 3008 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 3009 objcg = rcu_dereference(memcg->objcg); 3010 if (objcg && obj_cgroup_tryget(objcg)) 3011 break; 3012 objcg = NULL; 3013 } 3014 return objcg; 3015 } 3016 3017 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 3018 { 3019 struct obj_cgroup *objcg = NULL; 3020 struct mem_cgroup *memcg; 3021 3022 if (memcg_kmem_bypass()) 3023 return NULL; 3024 3025 rcu_read_lock(); 3026 if (unlikely(active_memcg())) 3027 memcg = active_memcg(); 3028 else 3029 memcg = mem_cgroup_from_task(current); 3030 objcg = __get_obj_cgroup_from_memcg(memcg); 3031 rcu_read_unlock(); 3032 return objcg; 3033 } 3034 3035 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 3036 { 3037 struct obj_cgroup *objcg; 3038 3039 if (!memcg_kmem_online()) 3040 return NULL; 3041 3042 if (folio_memcg_kmem(folio)) { 3043 objcg = __folio_objcg(folio); 3044 obj_cgroup_get(objcg); 3045 } else { 3046 struct mem_cgroup *memcg; 3047 3048 rcu_read_lock(); 3049 memcg = __folio_memcg(folio); 3050 if (memcg) 3051 objcg = __get_obj_cgroup_from_memcg(memcg); 3052 else 3053 objcg = NULL; 3054 rcu_read_unlock(); 3055 } 3056 return objcg; 3057 } 3058 3059 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) 3060 { 3061 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); 3062 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 3063 if (nr_pages > 0) 3064 page_counter_charge(&memcg->kmem, nr_pages); 3065 else 3066 page_counter_uncharge(&memcg->kmem, -nr_pages); 3067 } 3068 } 3069 3070 3071 /* 3072 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 3073 * @objcg: object cgroup to uncharge 3074 * @nr_pages: number of pages to uncharge 3075 */ 3076 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 3077 unsigned int nr_pages) 3078 { 3079 struct mem_cgroup *memcg; 3080 3081 memcg = get_mem_cgroup_from_objcg(objcg); 3082 3083 memcg_account_kmem(memcg, -nr_pages); 3084 refill_stock(memcg, nr_pages); 3085 3086 css_put(&memcg->css); 3087 } 3088 3089 /* 3090 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 3091 * @objcg: object cgroup to charge 3092 * @gfp: reclaim mode 3093 * @nr_pages: number of pages to charge 3094 * 3095 * Returns 0 on success, an error code on failure. 3096 */ 3097 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3098 unsigned int nr_pages) 3099 { 3100 struct mem_cgroup *memcg; 3101 int ret; 3102 3103 memcg = get_mem_cgroup_from_objcg(objcg); 3104 3105 ret = try_charge_memcg(memcg, gfp, nr_pages); 3106 if (ret) 3107 goto out; 3108 3109 memcg_account_kmem(memcg, nr_pages); 3110 out: 3111 css_put(&memcg->css); 3112 3113 return ret; 3114 } 3115 3116 /** 3117 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3118 * @page: page to charge 3119 * @gfp: reclaim mode 3120 * @order: allocation order 3121 * 3122 * Returns 0 on success, an error code on failure. 3123 */ 3124 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3125 { 3126 struct obj_cgroup *objcg; 3127 int ret = 0; 3128 3129 objcg = get_obj_cgroup_from_current(); 3130 if (objcg) { 3131 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3132 if (!ret) { 3133 page->memcg_data = (unsigned long)objcg | 3134 MEMCG_DATA_KMEM; 3135 return 0; 3136 } 3137 obj_cgroup_put(objcg); 3138 } 3139 return ret; 3140 } 3141 3142 /** 3143 * __memcg_kmem_uncharge_page: uncharge a kmem page 3144 * @page: page to uncharge 3145 * @order: allocation order 3146 */ 3147 void __memcg_kmem_uncharge_page(struct page *page, int order) 3148 { 3149 struct folio *folio = page_folio(page); 3150 struct obj_cgroup *objcg; 3151 unsigned int nr_pages = 1 << order; 3152 3153 if (!folio_memcg_kmem(folio)) 3154 return; 3155 3156 objcg = __folio_objcg(folio); 3157 obj_cgroup_uncharge_pages(objcg, nr_pages); 3158 folio->memcg_data = 0; 3159 obj_cgroup_put(objcg); 3160 } 3161 3162 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3163 enum node_stat_item idx, int nr) 3164 { 3165 struct memcg_stock_pcp *stock; 3166 struct obj_cgroup *old = NULL; 3167 unsigned long flags; 3168 int *bytes; 3169 3170 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3171 stock = this_cpu_ptr(&memcg_stock); 3172 3173 /* 3174 * Save vmstat data in stock and skip vmstat array update unless 3175 * accumulating over a page of vmstat data or when pgdat or idx 3176 * changes. 3177 */ 3178 if (READ_ONCE(stock->cached_objcg) != objcg) { 3179 old = drain_obj_stock(stock); 3180 obj_cgroup_get(objcg); 3181 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3182 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3183 WRITE_ONCE(stock->cached_objcg, objcg); 3184 stock->cached_pgdat = pgdat; 3185 } else if (stock->cached_pgdat != pgdat) { 3186 /* Flush the existing cached vmstat data */ 3187 struct pglist_data *oldpg = stock->cached_pgdat; 3188 3189 if (stock->nr_slab_reclaimable_b) { 3190 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 3191 stock->nr_slab_reclaimable_b); 3192 stock->nr_slab_reclaimable_b = 0; 3193 } 3194 if (stock->nr_slab_unreclaimable_b) { 3195 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 3196 stock->nr_slab_unreclaimable_b); 3197 stock->nr_slab_unreclaimable_b = 0; 3198 } 3199 stock->cached_pgdat = pgdat; 3200 } 3201 3202 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3203 : &stock->nr_slab_unreclaimable_b; 3204 /* 3205 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3206 * cached locally at least once before pushing it out. 3207 */ 3208 if (!*bytes) { 3209 *bytes = nr; 3210 nr = 0; 3211 } else { 3212 *bytes += nr; 3213 if (abs(*bytes) > PAGE_SIZE) { 3214 nr = *bytes; 3215 *bytes = 0; 3216 } else { 3217 nr = 0; 3218 } 3219 } 3220 if (nr) 3221 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3222 3223 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3224 if (old) 3225 obj_cgroup_put(old); 3226 } 3227 3228 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3229 { 3230 struct memcg_stock_pcp *stock; 3231 unsigned long flags; 3232 bool ret = false; 3233 3234 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3235 3236 stock = this_cpu_ptr(&memcg_stock); 3237 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { 3238 stock->nr_bytes -= nr_bytes; 3239 ret = true; 3240 } 3241 3242 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3243 3244 return ret; 3245 } 3246 3247 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 3248 { 3249 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); 3250 3251 if (!old) 3252 return NULL; 3253 3254 if (stock->nr_bytes) { 3255 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3256 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3257 3258 if (nr_pages) { 3259 struct mem_cgroup *memcg; 3260 3261 memcg = get_mem_cgroup_from_objcg(old); 3262 3263 memcg_account_kmem(memcg, -nr_pages); 3264 __refill_stock(memcg, nr_pages); 3265 3266 css_put(&memcg->css); 3267 } 3268 3269 /* 3270 * The leftover is flushed to the centralized per-memcg value. 3271 * On the next attempt to refill obj stock it will be moved 3272 * to a per-cpu stock (probably, on an other CPU), see 3273 * refill_obj_stock(). 3274 * 3275 * How often it's flushed is a trade-off between the memory 3276 * limit enforcement accuracy and potential CPU contention, 3277 * so it might be changed in the future. 3278 */ 3279 atomic_add(nr_bytes, &old->nr_charged_bytes); 3280 stock->nr_bytes = 0; 3281 } 3282 3283 /* 3284 * Flush the vmstat data in current stock 3285 */ 3286 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3287 if (stock->nr_slab_reclaimable_b) { 3288 mod_objcg_mlstate(old, stock->cached_pgdat, 3289 NR_SLAB_RECLAIMABLE_B, 3290 stock->nr_slab_reclaimable_b); 3291 stock->nr_slab_reclaimable_b = 0; 3292 } 3293 if (stock->nr_slab_unreclaimable_b) { 3294 mod_objcg_mlstate(old, stock->cached_pgdat, 3295 NR_SLAB_UNRECLAIMABLE_B, 3296 stock->nr_slab_unreclaimable_b); 3297 stock->nr_slab_unreclaimable_b = 0; 3298 } 3299 stock->cached_pgdat = NULL; 3300 } 3301 3302 WRITE_ONCE(stock->cached_objcg, NULL); 3303 /* 3304 * The `old' objects needs to be released by the caller via 3305 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. 3306 */ 3307 return old; 3308 } 3309 3310 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3311 struct mem_cgroup *root_memcg) 3312 { 3313 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); 3314 struct mem_cgroup *memcg; 3315 3316 if (objcg) { 3317 memcg = obj_cgroup_memcg(objcg); 3318 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3319 return true; 3320 } 3321 3322 return false; 3323 } 3324 3325 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3326 bool allow_uncharge) 3327 { 3328 struct memcg_stock_pcp *stock; 3329 struct obj_cgroup *old = NULL; 3330 unsigned long flags; 3331 unsigned int nr_pages = 0; 3332 3333 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3334 3335 stock = this_cpu_ptr(&memcg_stock); 3336 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ 3337 old = drain_obj_stock(stock); 3338 obj_cgroup_get(objcg); 3339 WRITE_ONCE(stock->cached_objcg, objcg); 3340 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3341 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3342 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3343 } 3344 stock->nr_bytes += nr_bytes; 3345 3346 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3347 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3348 stock->nr_bytes &= (PAGE_SIZE - 1); 3349 } 3350 3351 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3352 if (old) 3353 obj_cgroup_put(old); 3354 3355 if (nr_pages) 3356 obj_cgroup_uncharge_pages(objcg, nr_pages); 3357 } 3358 3359 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3360 { 3361 unsigned int nr_pages, nr_bytes; 3362 int ret; 3363 3364 if (consume_obj_stock(objcg, size)) 3365 return 0; 3366 3367 /* 3368 * In theory, objcg->nr_charged_bytes can have enough 3369 * pre-charged bytes to satisfy the allocation. However, 3370 * flushing objcg->nr_charged_bytes requires two atomic 3371 * operations, and objcg->nr_charged_bytes can't be big. 3372 * The shared objcg->nr_charged_bytes can also become a 3373 * performance bottleneck if all tasks of the same memcg are 3374 * trying to update it. So it's better to ignore it and try 3375 * grab some new pages. The stock's nr_bytes will be flushed to 3376 * objcg->nr_charged_bytes later on when objcg changes. 3377 * 3378 * The stock's nr_bytes may contain enough pre-charged bytes 3379 * to allow one less page from being charged, but we can't rely 3380 * on the pre-charged bytes not being changed outside of 3381 * consume_obj_stock() or refill_obj_stock(). So ignore those 3382 * pre-charged bytes as well when charging pages. To avoid a 3383 * page uncharge right after a page charge, we set the 3384 * allow_uncharge flag to false when calling refill_obj_stock() 3385 * to temporarily allow the pre-charged bytes to exceed the page 3386 * size limit. The maximum reachable value of the pre-charged 3387 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3388 * race. 3389 */ 3390 nr_pages = size >> PAGE_SHIFT; 3391 nr_bytes = size & (PAGE_SIZE - 1); 3392 3393 if (nr_bytes) 3394 nr_pages += 1; 3395 3396 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3397 if (!ret && nr_bytes) 3398 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3399 3400 return ret; 3401 } 3402 3403 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3404 { 3405 refill_obj_stock(objcg, size, true); 3406 } 3407 3408 #endif /* CONFIG_MEMCG_KMEM */ 3409 3410 /* 3411 * Because page_memcg(head) is not set on tails, set it now. 3412 */ 3413 void split_page_memcg(struct page *head, unsigned int nr) 3414 { 3415 struct folio *folio = page_folio(head); 3416 struct mem_cgroup *memcg = folio_memcg(folio); 3417 int i; 3418 3419 if (mem_cgroup_disabled() || !memcg) 3420 return; 3421 3422 for (i = 1; i < nr; i++) 3423 folio_page(folio, i)->memcg_data = folio->memcg_data; 3424 3425 if (folio_memcg_kmem(folio)) 3426 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); 3427 else 3428 css_get_many(&memcg->css, nr - 1); 3429 } 3430 3431 #ifdef CONFIG_SWAP 3432 /** 3433 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3434 * @entry: swap entry to be moved 3435 * @from: mem_cgroup which the entry is moved from 3436 * @to: mem_cgroup which the entry is moved to 3437 * 3438 * It succeeds only when the swap_cgroup's record for this entry is the same 3439 * as the mem_cgroup's id of @from. 3440 * 3441 * Returns 0 on success, -EINVAL on failure. 3442 * 3443 * The caller must have charged to @to, IOW, called page_counter_charge() about 3444 * both res and memsw, and called css_get(). 3445 */ 3446 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3447 struct mem_cgroup *from, struct mem_cgroup *to) 3448 { 3449 unsigned short old_id, new_id; 3450 3451 old_id = mem_cgroup_id(from); 3452 new_id = mem_cgroup_id(to); 3453 3454 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3455 mod_memcg_state(from, MEMCG_SWAP, -1); 3456 mod_memcg_state(to, MEMCG_SWAP, 1); 3457 return 0; 3458 } 3459 return -EINVAL; 3460 } 3461 #else 3462 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3463 struct mem_cgroup *from, struct mem_cgroup *to) 3464 { 3465 return -EINVAL; 3466 } 3467 #endif 3468 3469 static DEFINE_MUTEX(memcg_max_mutex); 3470 3471 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3472 unsigned long max, bool memsw) 3473 { 3474 bool enlarge = false; 3475 bool drained = false; 3476 int ret; 3477 bool limits_invariant; 3478 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3479 3480 do { 3481 if (signal_pending(current)) { 3482 ret = -EINTR; 3483 break; 3484 } 3485 3486 mutex_lock(&memcg_max_mutex); 3487 /* 3488 * Make sure that the new limit (memsw or memory limit) doesn't 3489 * break our basic invariant rule memory.max <= memsw.max. 3490 */ 3491 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3492 max <= memcg->memsw.max; 3493 if (!limits_invariant) { 3494 mutex_unlock(&memcg_max_mutex); 3495 ret = -EINVAL; 3496 break; 3497 } 3498 if (max > counter->max) 3499 enlarge = true; 3500 ret = page_counter_set_max(counter, max); 3501 mutex_unlock(&memcg_max_mutex); 3502 3503 if (!ret) 3504 break; 3505 3506 if (!drained) { 3507 drain_all_stock(memcg); 3508 drained = true; 3509 continue; 3510 } 3511 3512 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, 3513 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) { 3514 ret = -EBUSY; 3515 break; 3516 } 3517 } while (true); 3518 3519 if (!ret && enlarge) 3520 memcg_oom_recover(memcg); 3521 3522 return ret; 3523 } 3524 3525 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3526 gfp_t gfp_mask, 3527 unsigned long *total_scanned) 3528 { 3529 unsigned long nr_reclaimed = 0; 3530 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3531 unsigned long reclaimed; 3532 int loop = 0; 3533 struct mem_cgroup_tree_per_node *mctz; 3534 unsigned long excess; 3535 3536 if (lru_gen_enabled()) 3537 return 0; 3538 3539 if (order > 0) 3540 return 0; 3541 3542 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; 3543 3544 /* 3545 * Do not even bother to check the largest node if the root 3546 * is empty. Do it lockless to prevent lock bouncing. Races 3547 * are acceptable as soft limit is best effort anyway. 3548 */ 3549 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3550 return 0; 3551 3552 /* 3553 * This loop can run a while, specially if mem_cgroup's continuously 3554 * keep exceeding their soft limit and putting the system under 3555 * pressure 3556 */ 3557 do { 3558 if (next_mz) 3559 mz = next_mz; 3560 else 3561 mz = mem_cgroup_largest_soft_limit_node(mctz); 3562 if (!mz) 3563 break; 3564 3565 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3566 gfp_mask, total_scanned); 3567 nr_reclaimed += reclaimed; 3568 spin_lock_irq(&mctz->lock); 3569 3570 /* 3571 * If we failed to reclaim anything from this memory cgroup 3572 * it is time to move on to the next cgroup 3573 */ 3574 next_mz = NULL; 3575 if (!reclaimed) 3576 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3577 3578 excess = soft_limit_excess(mz->memcg); 3579 /* 3580 * One school of thought says that we should not add 3581 * back the node to the tree if reclaim returns 0. 3582 * But our reclaim could return 0, simply because due 3583 * to priority we are exposing a smaller subset of 3584 * memory to reclaim from. Consider this as a longer 3585 * term TODO. 3586 */ 3587 /* If excess == 0, no tree ops */ 3588 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3589 spin_unlock_irq(&mctz->lock); 3590 css_put(&mz->memcg->css); 3591 loop++; 3592 /* 3593 * Could not reclaim anything and there are no more 3594 * mem cgroups to try or we seem to be looping without 3595 * reclaiming anything. 3596 */ 3597 if (!nr_reclaimed && 3598 (next_mz == NULL || 3599 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3600 break; 3601 } while (!nr_reclaimed); 3602 if (next_mz) 3603 css_put(&next_mz->memcg->css); 3604 return nr_reclaimed; 3605 } 3606 3607 /* 3608 * Reclaims as many pages from the given memcg as possible. 3609 * 3610 * Caller is responsible for holding css reference for memcg. 3611 */ 3612 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3613 { 3614 int nr_retries = MAX_RECLAIM_RETRIES; 3615 3616 /* we call try-to-free pages for make this cgroup empty */ 3617 lru_add_drain_all(); 3618 3619 drain_all_stock(memcg); 3620 3621 /* try to free all pages in this cgroup */ 3622 while (nr_retries && page_counter_read(&memcg->memory)) { 3623 if (signal_pending(current)) 3624 return -EINTR; 3625 3626 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, 3627 MEMCG_RECLAIM_MAY_SWAP)) 3628 nr_retries--; 3629 } 3630 3631 return 0; 3632 } 3633 3634 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3635 char *buf, size_t nbytes, 3636 loff_t off) 3637 { 3638 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3639 3640 if (mem_cgroup_is_root(memcg)) 3641 return -EINVAL; 3642 return mem_cgroup_force_empty(memcg) ?: nbytes; 3643 } 3644 3645 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3646 struct cftype *cft) 3647 { 3648 return 1; 3649 } 3650 3651 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3652 struct cftype *cft, u64 val) 3653 { 3654 if (val == 1) 3655 return 0; 3656 3657 pr_warn_once("Non-hierarchical mode is deprecated. " 3658 "Please report your usecase to linux-mm@kvack.org if you " 3659 "depend on this functionality.\n"); 3660 3661 return -EINVAL; 3662 } 3663 3664 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3665 { 3666 unsigned long val; 3667 3668 if (mem_cgroup_is_root(memcg)) { 3669 /* 3670 * Approximate root's usage from global state. This isn't 3671 * perfect, but the root usage was always an approximation. 3672 */ 3673 val = global_node_page_state(NR_FILE_PAGES) + 3674 global_node_page_state(NR_ANON_MAPPED); 3675 if (swap) 3676 val += total_swap_pages - get_nr_swap_pages(); 3677 } else { 3678 if (!swap) 3679 val = page_counter_read(&memcg->memory); 3680 else 3681 val = page_counter_read(&memcg->memsw); 3682 } 3683 return val; 3684 } 3685 3686 enum { 3687 RES_USAGE, 3688 RES_LIMIT, 3689 RES_MAX_USAGE, 3690 RES_FAILCNT, 3691 RES_SOFT_LIMIT, 3692 }; 3693 3694 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3695 struct cftype *cft) 3696 { 3697 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3698 struct page_counter *counter; 3699 3700 switch (MEMFILE_TYPE(cft->private)) { 3701 case _MEM: 3702 counter = &memcg->memory; 3703 break; 3704 case _MEMSWAP: 3705 counter = &memcg->memsw; 3706 break; 3707 case _KMEM: 3708 counter = &memcg->kmem; 3709 break; 3710 case _TCP: 3711 counter = &memcg->tcpmem; 3712 break; 3713 default: 3714 BUG(); 3715 } 3716 3717 switch (MEMFILE_ATTR(cft->private)) { 3718 case RES_USAGE: 3719 if (counter == &memcg->memory) 3720 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3721 if (counter == &memcg->memsw) 3722 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3723 return (u64)page_counter_read(counter) * PAGE_SIZE; 3724 case RES_LIMIT: 3725 return (u64)counter->max * PAGE_SIZE; 3726 case RES_MAX_USAGE: 3727 return (u64)counter->watermark * PAGE_SIZE; 3728 case RES_FAILCNT: 3729 return counter->failcnt; 3730 case RES_SOFT_LIMIT: 3731 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; 3732 default: 3733 BUG(); 3734 } 3735 } 3736 3737 /* 3738 * This function doesn't do anything useful. Its only job is to provide a read 3739 * handler for a file so that cgroup_file_mode() will add read permissions. 3740 */ 3741 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m, 3742 __always_unused void *v) 3743 { 3744 return -EINVAL; 3745 } 3746 3747 #ifdef CONFIG_MEMCG_KMEM 3748 static int memcg_online_kmem(struct mem_cgroup *memcg) 3749 { 3750 struct obj_cgroup *objcg; 3751 3752 if (mem_cgroup_kmem_disabled()) 3753 return 0; 3754 3755 if (unlikely(mem_cgroup_is_root(memcg))) 3756 return 0; 3757 3758 objcg = obj_cgroup_alloc(); 3759 if (!objcg) 3760 return -ENOMEM; 3761 3762 objcg->memcg = memcg; 3763 rcu_assign_pointer(memcg->objcg, objcg); 3764 3765 static_branch_enable(&memcg_kmem_online_key); 3766 3767 memcg->kmemcg_id = memcg->id.id; 3768 3769 return 0; 3770 } 3771 3772 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3773 { 3774 struct mem_cgroup *parent; 3775 3776 if (mem_cgroup_kmem_disabled()) 3777 return; 3778 3779 if (unlikely(mem_cgroup_is_root(memcg))) 3780 return; 3781 3782 parent = parent_mem_cgroup(memcg); 3783 if (!parent) 3784 parent = root_mem_cgroup; 3785 3786 memcg_reparent_objcgs(memcg, parent); 3787 3788 /* 3789 * After we have finished memcg_reparent_objcgs(), all list_lrus 3790 * corresponding to this cgroup are guaranteed to remain empty. 3791 * The ordering is imposed by list_lru_node->lock taken by 3792 * memcg_reparent_list_lrus(). 3793 */ 3794 memcg_reparent_list_lrus(memcg, parent); 3795 } 3796 #else 3797 static int memcg_online_kmem(struct mem_cgroup *memcg) 3798 { 3799 return 0; 3800 } 3801 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3802 { 3803 } 3804 #endif /* CONFIG_MEMCG_KMEM */ 3805 3806 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3807 { 3808 int ret; 3809 3810 mutex_lock(&memcg_max_mutex); 3811 3812 ret = page_counter_set_max(&memcg->tcpmem, max); 3813 if (ret) 3814 goto out; 3815 3816 if (!memcg->tcpmem_active) { 3817 /* 3818 * The active flag needs to be written after the static_key 3819 * update. This is what guarantees that the socket activation 3820 * function is the last one to run. See mem_cgroup_sk_alloc() 3821 * for details, and note that we don't mark any socket as 3822 * belonging to this memcg until that flag is up. 3823 * 3824 * We need to do this, because static_keys will span multiple 3825 * sites, but we can't control their order. If we mark a socket 3826 * as accounted, but the accounting functions are not patched in 3827 * yet, we'll lose accounting. 3828 * 3829 * We never race with the readers in mem_cgroup_sk_alloc(), 3830 * because when this value change, the code to process it is not 3831 * patched in yet. 3832 */ 3833 static_branch_inc(&memcg_sockets_enabled_key); 3834 memcg->tcpmem_active = true; 3835 } 3836 out: 3837 mutex_unlock(&memcg_max_mutex); 3838 return ret; 3839 } 3840 3841 /* 3842 * The user of this function is... 3843 * RES_LIMIT. 3844 */ 3845 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3846 char *buf, size_t nbytes, loff_t off) 3847 { 3848 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3849 unsigned long nr_pages; 3850 int ret; 3851 3852 buf = strstrip(buf); 3853 ret = page_counter_memparse(buf, "-1", &nr_pages); 3854 if (ret) 3855 return ret; 3856 3857 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3858 case RES_LIMIT: 3859 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3860 ret = -EINVAL; 3861 break; 3862 } 3863 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3864 case _MEM: 3865 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3866 break; 3867 case _MEMSWAP: 3868 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3869 break; 3870 case _TCP: 3871 ret = memcg_update_tcp_max(memcg, nr_pages); 3872 break; 3873 } 3874 break; 3875 case RES_SOFT_LIMIT: 3876 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 3877 ret = -EOPNOTSUPP; 3878 } else { 3879 WRITE_ONCE(memcg->soft_limit, nr_pages); 3880 ret = 0; 3881 } 3882 break; 3883 } 3884 return ret ?: nbytes; 3885 } 3886 3887 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3888 size_t nbytes, loff_t off) 3889 { 3890 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3891 struct page_counter *counter; 3892 3893 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3894 case _MEM: 3895 counter = &memcg->memory; 3896 break; 3897 case _MEMSWAP: 3898 counter = &memcg->memsw; 3899 break; 3900 case _KMEM: 3901 counter = &memcg->kmem; 3902 break; 3903 case _TCP: 3904 counter = &memcg->tcpmem; 3905 break; 3906 default: 3907 BUG(); 3908 } 3909 3910 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3911 case RES_MAX_USAGE: 3912 page_counter_reset_watermark(counter); 3913 break; 3914 case RES_FAILCNT: 3915 counter->failcnt = 0; 3916 break; 3917 default: 3918 BUG(); 3919 } 3920 3921 return nbytes; 3922 } 3923 3924 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3925 struct cftype *cft) 3926 { 3927 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3928 } 3929 3930 #ifdef CONFIG_MMU 3931 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3932 struct cftype *cft, u64 val) 3933 { 3934 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3935 3936 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. " 3937 "Please report your usecase to linux-mm@kvack.org if you " 3938 "depend on this functionality.\n"); 3939 3940 if (val & ~MOVE_MASK) 3941 return -EINVAL; 3942 3943 /* 3944 * No kind of locking is needed in here, because ->can_attach() will 3945 * check this value once in the beginning of the process, and then carry 3946 * on with stale data. This means that changes to this value will only 3947 * affect task migrations starting after the change. 3948 */ 3949 memcg->move_charge_at_immigrate = val; 3950 return 0; 3951 } 3952 #else 3953 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3954 struct cftype *cft, u64 val) 3955 { 3956 return -ENOSYS; 3957 } 3958 #endif 3959 3960 #ifdef CONFIG_NUMA 3961 3962 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3963 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3964 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3965 3966 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3967 int nid, unsigned int lru_mask, bool tree) 3968 { 3969 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3970 unsigned long nr = 0; 3971 enum lru_list lru; 3972 3973 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3974 3975 for_each_lru(lru) { 3976 if (!(BIT(lru) & lru_mask)) 3977 continue; 3978 if (tree) 3979 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3980 else 3981 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3982 } 3983 return nr; 3984 } 3985 3986 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3987 unsigned int lru_mask, 3988 bool tree) 3989 { 3990 unsigned long nr = 0; 3991 enum lru_list lru; 3992 3993 for_each_lru(lru) { 3994 if (!(BIT(lru) & lru_mask)) 3995 continue; 3996 if (tree) 3997 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3998 else 3999 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 4000 } 4001 return nr; 4002 } 4003 4004 static int memcg_numa_stat_show(struct seq_file *m, void *v) 4005 { 4006 struct numa_stat { 4007 const char *name; 4008 unsigned int lru_mask; 4009 }; 4010 4011 static const struct numa_stat stats[] = { 4012 { "total", LRU_ALL }, 4013 { "file", LRU_ALL_FILE }, 4014 { "anon", LRU_ALL_ANON }, 4015 { "unevictable", BIT(LRU_UNEVICTABLE) }, 4016 }; 4017 const struct numa_stat *stat; 4018 int nid; 4019 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4020 4021 mem_cgroup_flush_stats(); 4022 4023 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4024 seq_printf(m, "%s=%lu", stat->name, 4025 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4026 false)); 4027 for_each_node_state(nid, N_MEMORY) 4028 seq_printf(m, " N%d=%lu", nid, 4029 mem_cgroup_node_nr_lru_pages(memcg, nid, 4030 stat->lru_mask, false)); 4031 seq_putc(m, '\n'); 4032 } 4033 4034 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4035 4036 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4037 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4038 true)); 4039 for_each_node_state(nid, N_MEMORY) 4040 seq_printf(m, " N%d=%lu", nid, 4041 mem_cgroup_node_nr_lru_pages(memcg, nid, 4042 stat->lru_mask, true)); 4043 seq_putc(m, '\n'); 4044 } 4045 4046 return 0; 4047 } 4048 #endif /* CONFIG_NUMA */ 4049 4050 static const unsigned int memcg1_stats[] = { 4051 NR_FILE_PAGES, 4052 NR_ANON_MAPPED, 4053 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4054 NR_ANON_THPS, 4055 #endif 4056 NR_SHMEM, 4057 NR_FILE_MAPPED, 4058 NR_FILE_DIRTY, 4059 NR_WRITEBACK, 4060 WORKINGSET_REFAULT_ANON, 4061 WORKINGSET_REFAULT_FILE, 4062 MEMCG_SWAP, 4063 }; 4064 4065 static const char *const memcg1_stat_names[] = { 4066 "cache", 4067 "rss", 4068 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4069 "rss_huge", 4070 #endif 4071 "shmem", 4072 "mapped_file", 4073 "dirty", 4074 "writeback", 4075 "workingset_refault_anon", 4076 "workingset_refault_file", 4077 "swap", 4078 }; 4079 4080 /* Universal VM events cgroup1 shows, original sort order */ 4081 static const unsigned int memcg1_events[] = { 4082 PGPGIN, 4083 PGPGOUT, 4084 PGFAULT, 4085 PGMAJFAULT, 4086 }; 4087 4088 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) 4089 { 4090 unsigned long memory, memsw; 4091 struct mem_cgroup *mi; 4092 unsigned int i; 4093 4094 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4095 4096 mem_cgroup_flush_stats(); 4097 4098 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4099 unsigned long nr; 4100 4101 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4102 continue; 4103 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4104 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], 4105 nr * memcg_page_state_unit(memcg1_stats[i])); 4106 } 4107 4108 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4109 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]), 4110 memcg_events_local(memcg, memcg1_events[i])); 4111 4112 for (i = 0; i < NR_LRU_LISTS; i++) 4113 seq_buf_printf(s, "%s %lu\n", lru_list_name(i), 4114 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4115 PAGE_SIZE); 4116 4117 /* Hierarchical information */ 4118 memory = memsw = PAGE_COUNTER_MAX; 4119 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4120 memory = min(memory, READ_ONCE(mi->memory.max)); 4121 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4122 } 4123 seq_buf_printf(s, "hierarchical_memory_limit %llu\n", 4124 (u64)memory * PAGE_SIZE); 4125 if (do_memsw_account()) 4126 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n", 4127 (u64)memsw * PAGE_SIZE); 4128 4129 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4130 unsigned long nr; 4131 4132 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4133 continue; 4134 nr = memcg_page_state(memcg, memcg1_stats[i]); 4135 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i], 4136 (u64)nr * memcg_page_state_unit(memcg1_stats[i])); 4137 } 4138 4139 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4140 seq_buf_printf(s, "total_%s %llu\n", 4141 vm_event_name(memcg1_events[i]), 4142 (u64)memcg_events(memcg, memcg1_events[i])); 4143 4144 for (i = 0; i < NR_LRU_LISTS; i++) 4145 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i), 4146 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4147 PAGE_SIZE); 4148 4149 #ifdef CONFIG_DEBUG_VM 4150 { 4151 pg_data_t *pgdat; 4152 struct mem_cgroup_per_node *mz; 4153 unsigned long anon_cost = 0; 4154 unsigned long file_cost = 0; 4155 4156 for_each_online_pgdat(pgdat) { 4157 mz = memcg->nodeinfo[pgdat->node_id]; 4158 4159 anon_cost += mz->lruvec.anon_cost; 4160 file_cost += mz->lruvec.file_cost; 4161 } 4162 seq_buf_printf(s, "anon_cost %lu\n", anon_cost); 4163 seq_buf_printf(s, "file_cost %lu\n", file_cost); 4164 } 4165 #endif 4166 } 4167 4168 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4169 struct cftype *cft) 4170 { 4171 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4172 4173 return mem_cgroup_swappiness(memcg); 4174 } 4175 4176 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4177 struct cftype *cft, u64 val) 4178 { 4179 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4180 4181 if (val > 200) 4182 return -EINVAL; 4183 4184 if (!mem_cgroup_is_root(memcg)) 4185 WRITE_ONCE(memcg->swappiness, val); 4186 else 4187 WRITE_ONCE(vm_swappiness, val); 4188 4189 return 0; 4190 } 4191 4192 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4193 { 4194 struct mem_cgroup_threshold_ary *t; 4195 unsigned long usage; 4196 int i; 4197 4198 rcu_read_lock(); 4199 if (!swap) 4200 t = rcu_dereference(memcg->thresholds.primary); 4201 else 4202 t = rcu_dereference(memcg->memsw_thresholds.primary); 4203 4204 if (!t) 4205 goto unlock; 4206 4207 usage = mem_cgroup_usage(memcg, swap); 4208 4209 /* 4210 * current_threshold points to threshold just below or equal to usage. 4211 * If it's not true, a threshold was crossed after last 4212 * call of __mem_cgroup_threshold(). 4213 */ 4214 i = t->current_threshold; 4215 4216 /* 4217 * Iterate backward over array of thresholds starting from 4218 * current_threshold and check if a threshold is crossed. 4219 * If none of thresholds below usage is crossed, we read 4220 * only one element of the array here. 4221 */ 4222 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4223 eventfd_signal(t->entries[i].eventfd, 1); 4224 4225 /* i = current_threshold + 1 */ 4226 i++; 4227 4228 /* 4229 * Iterate forward over array of thresholds starting from 4230 * current_threshold+1 and check if a threshold is crossed. 4231 * If none of thresholds above usage is crossed, we read 4232 * only one element of the array here. 4233 */ 4234 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4235 eventfd_signal(t->entries[i].eventfd, 1); 4236 4237 /* Update current_threshold */ 4238 t->current_threshold = i - 1; 4239 unlock: 4240 rcu_read_unlock(); 4241 } 4242 4243 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4244 { 4245 while (memcg) { 4246 __mem_cgroup_threshold(memcg, false); 4247 if (do_memsw_account()) 4248 __mem_cgroup_threshold(memcg, true); 4249 4250 memcg = parent_mem_cgroup(memcg); 4251 } 4252 } 4253 4254 static int compare_thresholds(const void *a, const void *b) 4255 { 4256 const struct mem_cgroup_threshold *_a = a; 4257 const struct mem_cgroup_threshold *_b = b; 4258 4259 if (_a->threshold > _b->threshold) 4260 return 1; 4261 4262 if (_a->threshold < _b->threshold) 4263 return -1; 4264 4265 return 0; 4266 } 4267 4268 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4269 { 4270 struct mem_cgroup_eventfd_list *ev; 4271 4272 spin_lock(&memcg_oom_lock); 4273 4274 list_for_each_entry(ev, &memcg->oom_notify, list) 4275 eventfd_signal(ev->eventfd, 1); 4276 4277 spin_unlock(&memcg_oom_lock); 4278 return 0; 4279 } 4280 4281 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4282 { 4283 struct mem_cgroup *iter; 4284 4285 for_each_mem_cgroup_tree(iter, memcg) 4286 mem_cgroup_oom_notify_cb(iter); 4287 } 4288 4289 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4290 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4291 { 4292 struct mem_cgroup_thresholds *thresholds; 4293 struct mem_cgroup_threshold_ary *new; 4294 unsigned long threshold; 4295 unsigned long usage; 4296 int i, size, ret; 4297 4298 ret = page_counter_memparse(args, "-1", &threshold); 4299 if (ret) 4300 return ret; 4301 4302 mutex_lock(&memcg->thresholds_lock); 4303 4304 if (type == _MEM) { 4305 thresholds = &memcg->thresholds; 4306 usage = mem_cgroup_usage(memcg, false); 4307 } else if (type == _MEMSWAP) { 4308 thresholds = &memcg->memsw_thresholds; 4309 usage = mem_cgroup_usage(memcg, true); 4310 } else 4311 BUG(); 4312 4313 /* Check if a threshold crossed before adding a new one */ 4314 if (thresholds->primary) 4315 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4316 4317 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4318 4319 /* Allocate memory for new array of thresholds */ 4320 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4321 if (!new) { 4322 ret = -ENOMEM; 4323 goto unlock; 4324 } 4325 new->size = size; 4326 4327 /* Copy thresholds (if any) to new array */ 4328 if (thresholds->primary) 4329 memcpy(new->entries, thresholds->primary->entries, 4330 flex_array_size(new, entries, size - 1)); 4331 4332 /* Add new threshold */ 4333 new->entries[size - 1].eventfd = eventfd; 4334 new->entries[size - 1].threshold = threshold; 4335 4336 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4337 sort(new->entries, size, sizeof(*new->entries), 4338 compare_thresholds, NULL); 4339 4340 /* Find current threshold */ 4341 new->current_threshold = -1; 4342 for (i = 0; i < size; i++) { 4343 if (new->entries[i].threshold <= usage) { 4344 /* 4345 * new->current_threshold will not be used until 4346 * rcu_assign_pointer(), so it's safe to increment 4347 * it here. 4348 */ 4349 ++new->current_threshold; 4350 } else 4351 break; 4352 } 4353 4354 /* Free old spare buffer and save old primary buffer as spare */ 4355 kfree(thresholds->spare); 4356 thresholds->spare = thresholds->primary; 4357 4358 rcu_assign_pointer(thresholds->primary, new); 4359 4360 /* To be sure that nobody uses thresholds */ 4361 synchronize_rcu(); 4362 4363 unlock: 4364 mutex_unlock(&memcg->thresholds_lock); 4365 4366 return ret; 4367 } 4368 4369 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4370 struct eventfd_ctx *eventfd, const char *args) 4371 { 4372 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4373 } 4374 4375 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4376 struct eventfd_ctx *eventfd, const char *args) 4377 { 4378 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4379 } 4380 4381 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4382 struct eventfd_ctx *eventfd, enum res_type type) 4383 { 4384 struct mem_cgroup_thresholds *thresholds; 4385 struct mem_cgroup_threshold_ary *new; 4386 unsigned long usage; 4387 int i, j, size, entries; 4388 4389 mutex_lock(&memcg->thresholds_lock); 4390 4391 if (type == _MEM) { 4392 thresholds = &memcg->thresholds; 4393 usage = mem_cgroup_usage(memcg, false); 4394 } else if (type == _MEMSWAP) { 4395 thresholds = &memcg->memsw_thresholds; 4396 usage = mem_cgroup_usage(memcg, true); 4397 } else 4398 BUG(); 4399 4400 if (!thresholds->primary) 4401 goto unlock; 4402 4403 /* Check if a threshold crossed before removing */ 4404 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4405 4406 /* Calculate new number of threshold */ 4407 size = entries = 0; 4408 for (i = 0; i < thresholds->primary->size; i++) { 4409 if (thresholds->primary->entries[i].eventfd != eventfd) 4410 size++; 4411 else 4412 entries++; 4413 } 4414 4415 new = thresholds->spare; 4416 4417 /* If no items related to eventfd have been cleared, nothing to do */ 4418 if (!entries) 4419 goto unlock; 4420 4421 /* Set thresholds array to NULL if we don't have thresholds */ 4422 if (!size) { 4423 kfree(new); 4424 new = NULL; 4425 goto swap_buffers; 4426 } 4427 4428 new->size = size; 4429 4430 /* Copy thresholds and find current threshold */ 4431 new->current_threshold = -1; 4432 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4433 if (thresholds->primary->entries[i].eventfd == eventfd) 4434 continue; 4435 4436 new->entries[j] = thresholds->primary->entries[i]; 4437 if (new->entries[j].threshold <= usage) { 4438 /* 4439 * new->current_threshold will not be used 4440 * until rcu_assign_pointer(), so it's safe to increment 4441 * it here. 4442 */ 4443 ++new->current_threshold; 4444 } 4445 j++; 4446 } 4447 4448 swap_buffers: 4449 /* Swap primary and spare array */ 4450 thresholds->spare = thresholds->primary; 4451 4452 rcu_assign_pointer(thresholds->primary, new); 4453 4454 /* To be sure that nobody uses thresholds */ 4455 synchronize_rcu(); 4456 4457 /* If all events are unregistered, free the spare array */ 4458 if (!new) { 4459 kfree(thresholds->spare); 4460 thresholds->spare = NULL; 4461 } 4462 unlock: 4463 mutex_unlock(&memcg->thresholds_lock); 4464 } 4465 4466 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4467 struct eventfd_ctx *eventfd) 4468 { 4469 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4470 } 4471 4472 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4473 struct eventfd_ctx *eventfd) 4474 { 4475 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4476 } 4477 4478 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4479 struct eventfd_ctx *eventfd, const char *args) 4480 { 4481 struct mem_cgroup_eventfd_list *event; 4482 4483 event = kmalloc(sizeof(*event), GFP_KERNEL); 4484 if (!event) 4485 return -ENOMEM; 4486 4487 spin_lock(&memcg_oom_lock); 4488 4489 event->eventfd = eventfd; 4490 list_add(&event->list, &memcg->oom_notify); 4491 4492 /* already in OOM ? */ 4493 if (memcg->under_oom) 4494 eventfd_signal(eventfd, 1); 4495 spin_unlock(&memcg_oom_lock); 4496 4497 return 0; 4498 } 4499 4500 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4501 struct eventfd_ctx *eventfd) 4502 { 4503 struct mem_cgroup_eventfd_list *ev, *tmp; 4504 4505 spin_lock(&memcg_oom_lock); 4506 4507 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4508 if (ev->eventfd == eventfd) { 4509 list_del(&ev->list); 4510 kfree(ev); 4511 } 4512 } 4513 4514 spin_unlock(&memcg_oom_lock); 4515 } 4516 4517 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4518 { 4519 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4520 4521 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); 4522 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4523 seq_printf(sf, "oom_kill %lu\n", 4524 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4525 return 0; 4526 } 4527 4528 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4529 struct cftype *cft, u64 val) 4530 { 4531 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4532 4533 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4534 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4535 return -EINVAL; 4536 4537 WRITE_ONCE(memcg->oom_kill_disable, val); 4538 if (!val) 4539 memcg_oom_recover(memcg); 4540 4541 return 0; 4542 } 4543 4544 #ifdef CONFIG_CGROUP_WRITEBACK 4545 4546 #include <trace/events/writeback.h> 4547 4548 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4549 { 4550 return wb_domain_init(&memcg->cgwb_domain, gfp); 4551 } 4552 4553 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4554 { 4555 wb_domain_exit(&memcg->cgwb_domain); 4556 } 4557 4558 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4559 { 4560 wb_domain_size_changed(&memcg->cgwb_domain); 4561 } 4562 4563 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4564 { 4565 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4566 4567 if (!memcg->css.parent) 4568 return NULL; 4569 4570 return &memcg->cgwb_domain; 4571 } 4572 4573 /** 4574 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4575 * @wb: bdi_writeback in question 4576 * @pfilepages: out parameter for number of file pages 4577 * @pheadroom: out parameter for number of allocatable pages according to memcg 4578 * @pdirty: out parameter for number of dirty pages 4579 * @pwriteback: out parameter for number of pages under writeback 4580 * 4581 * Determine the numbers of file, headroom, dirty, and writeback pages in 4582 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4583 * is a bit more involved. 4584 * 4585 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4586 * headroom is calculated as the lowest headroom of itself and the 4587 * ancestors. Note that this doesn't consider the actual amount of 4588 * available memory in the system. The caller should further cap 4589 * *@pheadroom accordingly. 4590 */ 4591 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4592 unsigned long *pheadroom, unsigned long *pdirty, 4593 unsigned long *pwriteback) 4594 { 4595 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4596 struct mem_cgroup *parent; 4597 4598 mem_cgroup_flush_stats(); 4599 4600 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4601 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4602 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4603 memcg_page_state(memcg, NR_ACTIVE_FILE); 4604 4605 *pheadroom = PAGE_COUNTER_MAX; 4606 while ((parent = parent_mem_cgroup(memcg))) { 4607 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4608 READ_ONCE(memcg->memory.high)); 4609 unsigned long used = page_counter_read(&memcg->memory); 4610 4611 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4612 memcg = parent; 4613 } 4614 } 4615 4616 /* 4617 * Foreign dirty flushing 4618 * 4619 * There's an inherent mismatch between memcg and writeback. The former 4620 * tracks ownership per-page while the latter per-inode. This was a 4621 * deliberate design decision because honoring per-page ownership in the 4622 * writeback path is complicated, may lead to higher CPU and IO overheads 4623 * and deemed unnecessary given that write-sharing an inode across 4624 * different cgroups isn't a common use-case. 4625 * 4626 * Combined with inode majority-writer ownership switching, this works well 4627 * enough in most cases but there are some pathological cases. For 4628 * example, let's say there are two cgroups A and B which keep writing to 4629 * different but confined parts of the same inode. B owns the inode and 4630 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4631 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4632 * triggering background writeback. A will be slowed down without a way to 4633 * make writeback of the dirty pages happen. 4634 * 4635 * Conditions like the above can lead to a cgroup getting repeatedly and 4636 * severely throttled after making some progress after each 4637 * dirty_expire_interval while the underlying IO device is almost 4638 * completely idle. 4639 * 4640 * Solving this problem completely requires matching the ownership tracking 4641 * granularities between memcg and writeback in either direction. However, 4642 * the more egregious behaviors can be avoided by simply remembering the 4643 * most recent foreign dirtying events and initiating remote flushes on 4644 * them when local writeback isn't enough to keep the memory clean enough. 4645 * 4646 * The following two functions implement such mechanism. When a foreign 4647 * page - a page whose memcg and writeback ownerships don't match - is 4648 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4649 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4650 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4651 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4652 * foreign bdi_writebacks which haven't expired. Both the numbers of 4653 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4654 * limited to MEMCG_CGWB_FRN_CNT. 4655 * 4656 * The mechanism only remembers IDs and doesn't hold any object references. 4657 * As being wrong occasionally doesn't matter, updates and accesses to the 4658 * records are lockless and racy. 4659 */ 4660 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 4661 struct bdi_writeback *wb) 4662 { 4663 struct mem_cgroup *memcg = folio_memcg(folio); 4664 struct memcg_cgwb_frn *frn; 4665 u64 now = get_jiffies_64(); 4666 u64 oldest_at = now; 4667 int oldest = -1; 4668 int i; 4669 4670 trace_track_foreign_dirty(folio, wb); 4671 4672 /* 4673 * Pick the slot to use. If there is already a slot for @wb, keep 4674 * using it. If not replace the oldest one which isn't being 4675 * written out. 4676 */ 4677 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4678 frn = &memcg->cgwb_frn[i]; 4679 if (frn->bdi_id == wb->bdi->id && 4680 frn->memcg_id == wb->memcg_css->id) 4681 break; 4682 if (time_before64(frn->at, oldest_at) && 4683 atomic_read(&frn->done.cnt) == 1) { 4684 oldest = i; 4685 oldest_at = frn->at; 4686 } 4687 } 4688 4689 if (i < MEMCG_CGWB_FRN_CNT) { 4690 /* 4691 * Re-using an existing one. Update timestamp lazily to 4692 * avoid making the cacheline hot. We want them to be 4693 * reasonably up-to-date and significantly shorter than 4694 * dirty_expire_interval as that's what expires the record. 4695 * Use the shorter of 1s and dirty_expire_interval / 8. 4696 */ 4697 unsigned long update_intv = 4698 min_t(unsigned long, HZ, 4699 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4700 4701 if (time_before64(frn->at, now - update_intv)) 4702 frn->at = now; 4703 } else if (oldest >= 0) { 4704 /* replace the oldest free one */ 4705 frn = &memcg->cgwb_frn[oldest]; 4706 frn->bdi_id = wb->bdi->id; 4707 frn->memcg_id = wb->memcg_css->id; 4708 frn->at = now; 4709 } 4710 } 4711 4712 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4713 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4714 { 4715 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4716 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4717 u64 now = jiffies_64; 4718 int i; 4719 4720 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4721 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4722 4723 /* 4724 * If the record is older than dirty_expire_interval, 4725 * writeback on it has already started. No need to kick it 4726 * off again. Also, don't start a new one if there's 4727 * already one in flight. 4728 */ 4729 if (time_after64(frn->at, now - intv) && 4730 atomic_read(&frn->done.cnt) == 1) { 4731 frn->at = 0; 4732 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4733 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 4734 WB_REASON_FOREIGN_FLUSH, 4735 &frn->done); 4736 } 4737 } 4738 } 4739 4740 #else /* CONFIG_CGROUP_WRITEBACK */ 4741 4742 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4743 { 4744 return 0; 4745 } 4746 4747 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4748 { 4749 } 4750 4751 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4752 { 4753 } 4754 4755 #endif /* CONFIG_CGROUP_WRITEBACK */ 4756 4757 /* 4758 * DO NOT USE IN NEW FILES. 4759 * 4760 * "cgroup.event_control" implementation. 4761 * 4762 * This is way over-engineered. It tries to support fully configurable 4763 * events for each user. Such level of flexibility is completely 4764 * unnecessary especially in the light of the planned unified hierarchy. 4765 * 4766 * Please deprecate this and replace with something simpler if at all 4767 * possible. 4768 */ 4769 4770 /* 4771 * Unregister event and free resources. 4772 * 4773 * Gets called from workqueue. 4774 */ 4775 static void memcg_event_remove(struct work_struct *work) 4776 { 4777 struct mem_cgroup_event *event = 4778 container_of(work, struct mem_cgroup_event, remove); 4779 struct mem_cgroup *memcg = event->memcg; 4780 4781 remove_wait_queue(event->wqh, &event->wait); 4782 4783 event->unregister_event(memcg, event->eventfd); 4784 4785 /* Notify userspace the event is going away. */ 4786 eventfd_signal(event->eventfd, 1); 4787 4788 eventfd_ctx_put(event->eventfd); 4789 kfree(event); 4790 css_put(&memcg->css); 4791 } 4792 4793 /* 4794 * Gets called on EPOLLHUP on eventfd when user closes it. 4795 * 4796 * Called with wqh->lock held and interrupts disabled. 4797 */ 4798 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4799 int sync, void *key) 4800 { 4801 struct mem_cgroup_event *event = 4802 container_of(wait, struct mem_cgroup_event, wait); 4803 struct mem_cgroup *memcg = event->memcg; 4804 __poll_t flags = key_to_poll(key); 4805 4806 if (flags & EPOLLHUP) { 4807 /* 4808 * If the event has been detached at cgroup removal, we 4809 * can simply return knowing the other side will cleanup 4810 * for us. 4811 * 4812 * We can't race against event freeing since the other 4813 * side will require wqh->lock via remove_wait_queue(), 4814 * which we hold. 4815 */ 4816 spin_lock(&memcg->event_list_lock); 4817 if (!list_empty(&event->list)) { 4818 list_del_init(&event->list); 4819 /* 4820 * We are in atomic context, but cgroup_event_remove() 4821 * may sleep, so we have to call it in workqueue. 4822 */ 4823 schedule_work(&event->remove); 4824 } 4825 spin_unlock(&memcg->event_list_lock); 4826 } 4827 4828 return 0; 4829 } 4830 4831 static void memcg_event_ptable_queue_proc(struct file *file, 4832 wait_queue_head_t *wqh, poll_table *pt) 4833 { 4834 struct mem_cgroup_event *event = 4835 container_of(pt, struct mem_cgroup_event, pt); 4836 4837 event->wqh = wqh; 4838 add_wait_queue(wqh, &event->wait); 4839 } 4840 4841 /* 4842 * DO NOT USE IN NEW FILES. 4843 * 4844 * Parse input and register new cgroup event handler. 4845 * 4846 * Input must be in format '<event_fd> <control_fd> <args>'. 4847 * Interpretation of args is defined by control file implementation. 4848 */ 4849 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4850 char *buf, size_t nbytes, loff_t off) 4851 { 4852 struct cgroup_subsys_state *css = of_css(of); 4853 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4854 struct mem_cgroup_event *event; 4855 struct cgroup_subsys_state *cfile_css; 4856 unsigned int efd, cfd; 4857 struct fd efile; 4858 struct fd cfile; 4859 struct dentry *cdentry; 4860 const char *name; 4861 char *endp; 4862 int ret; 4863 4864 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 4865 return -EOPNOTSUPP; 4866 4867 buf = strstrip(buf); 4868 4869 efd = simple_strtoul(buf, &endp, 10); 4870 if (*endp != ' ') 4871 return -EINVAL; 4872 buf = endp + 1; 4873 4874 cfd = simple_strtoul(buf, &endp, 10); 4875 if ((*endp != ' ') && (*endp != '\0')) 4876 return -EINVAL; 4877 buf = endp + 1; 4878 4879 event = kzalloc(sizeof(*event), GFP_KERNEL); 4880 if (!event) 4881 return -ENOMEM; 4882 4883 event->memcg = memcg; 4884 INIT_LIST_HEAD(&event->list); 4885 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4886 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4887 INIT_WORK(&event->remove, memcg_event_remove); 4888 4889 efile = fdget(efd); 4890 if (!efile.file) { 4891 ret = -EBADF; 4892 goto out_kfree; 4893 } 4894 4895 event->eventfd = eventfd_ctx_fileget(efile.file); 4896 if (IS_ERR(event->eventfd)) { 4897 ret = PTR_ERR(event->eventfd); 4898 goto out_put_efile; 4899 } 4900 4901 cfile = fdget(cfd); 4902 if (!cfile.file) { 4903 ret = -EBADF; 4904 goto out_put_eventfd; 4905 } 4906 4907 /* the process need read permission on control file */ 4908 /* AV: shouldn't we check that it's been opened for read instead? */ 4909 ret = file_permission(cfile.file, MAY_READ); 4910 if (ret < 0) 4911 goto out_put_cfile; 4912 4913 /* 4914 * The control file must be a regular cgroup1 file. As a regular cgroup 4915 * file can't be renamed, it's safe to access its name afterwards. 4916 */ 4917 cdentry = cfile.file->f_path.dentry; 4918 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { 4919 ret = -EINVAL; 4920 goto out_put_cfile; 4921 } 4922 4923 /* 4924 * Determine the event callbacks and set them in @event. This used 4925 * to be done via struct cftype but cgroup core no longer knows 4926 * about these events. The following is crude but the whole thing 4927 * is for compatibility anyway. 4928 * 4929 * DO NOT ADD NEW FILES. 4930 */ 4931 name = cdentry->d_name.name; 4932 4933 if (!strcmp(name, "memory.usage_in_bytes")) { 4934 event->register_event = mem_cgroup_usage_register_event; 4935 event->unregister_event = mem_cgroup_usage_unregister_event; 4936 } else if (!strcmp(name, "memory.oom_control")) { 4937 event->register_event = mem_cgroup_oom_register_event; 4938 event->unregister_event = mem_cgroup_oom_unregister_event; 4939 } else if (!strcmp(name, "memory.pressure_level")) { 4940 event->register_event = vmpressure_register_event; 4941 event->unregister_event = vmpressure_unregister_event; 4942 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4943 event->register_event = memsw_cgroup_usage_register_event; 4944 event->unregister_event = memsw_cgroup_usage_unregister_event; 4945 } else { 4946 ret = -EINVAL; 4947 goto out_put_cfile; 4948 } 4949 4950 /* 4951 * Verify @cfile should belong to @css. Also, remaining events are 4952 * automatically removed on cgroup destruction but the removal is 4953 * asynchronous, so take an extra ref on @css. 4954 */ 4955 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, 4956 &memory_cgrp_subsys); 4957 ret = -EINVAL; 4958 if (IS_ERR(cfile_css)) 4959 goto out_put_cfile; 4960 if (cfile_css != css) { 4961 css_put(cfile_css); 4962 goto out_put_cfile; 4963 } 4964 4965 ret = event->register_event(memcg, event->eventfd, buf); 4966 if (ret) 4967 goto out_put_css; 4968 4969 vfs_poll(efile.file, &event->pt); 4970 4971 spin_lock_irq(&memcg->event_list_lock); 4972 list_add(&event->list, &memcg->event_list); 4973 spin_unlock_irq(&memcg->event_list_lock); 4974 4975 fdput(cfile); 4976 fdput(efile); 4977 4978 return nbytes; 4979 4980 out_put_css: 4981 css_put(css); 4982 out_put_cfile: 4983 fdput(cfile); 4984 out_put_eventfd: 4985 eventfd_ctx_put(event->eventfd); 4986 out_put_efile: 4987 fdput(efile); 4988 out_kfree: 4989 kfree(event); 4990 4991 return ret; 4992 } 4993 4994 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 4995 static int mem_cgroup_slab_show(struct seq_file *m, void *p) 4996 { 4997 /* 4998 * Deprecated. 4999 * Please, take a look at tools/cgroup/memcg_slabinfo.py . 5000 */ 5001 return 0; 5002 } 5003 #endif 5004 5005 static int memory_stat_show(struct seq_file *m, void *v); 5006 5007 static struct cftype mem_cgroup_legacy_files[] = { 5008 { 5009 .name = "usage_in_bytes", 5010 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 5011 .read_u64 = mem_cgroup_read_u64, 5012 }, 5013 { 5014 .name = "max_usage_in_bytes", 5015 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 5016 .write = mem_cgroup_reset, 5017 .read_u64 = mem_cgroup_read_u64, 5018 }, 5019 { 5020 .name = "limit_in_bytes", 5021 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 5022 .write = mem_cgroup_write, 5023 .read_u64 = mem_cgroup_read_u64, 5024 }, 5025 { 5026 .name = "soft_limit_in_bytes", 5027 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 5028 .write = mem_cgroup_write, 5029 .read_u64 = mem_cgroup_read_u64, 5030 }, 5031 { 5032 .name = "failcnt", 5033 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5034 .write = mem_cgroup_reset, 5035 .read_u64 = mem_cgroup_read_u64, 5036 }, 5037 { 5038 .name = "stat", 5039 .seq_show = memory_stat_show, 5040 }, 5041 { 5042 .name = "force_empty", 5043 .write = mem_cgroup_force_empty_write, 5044 }, 5045 { 5046 .name = "use_hierarchy", 5047 .write_u64 = mem_cgroup_hierarchy_write, 5048 .read_u64 = mem_cgroup_hierarchy_read, 5049 }, 5050 { 5051 .name = "cgroup.event_control", /* XXX: for compat */ 5052 .write = memcg_write_event_control, 5053 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5054 }, 5055 { 5056 .name = "swappiness", 5057 .read_u64 = mem_cgroup_swappiness_read, 5058 .write_u64 = mem_cgroup_swappiness_write, 5059 }, 5060 { 5061 .name = "move_charge_at_immigrate", 5062 .read_u64 = mem_cgroup_move_charge_read, 5063 .write_u64 = mem_cgroup_move_charge_write, 5064 }, 5065 { 5066 .name = "oom_control", 5067 .seq_show = mem_cgroup_oom_control_read, 5068 .write_u64 = mem_cgroup_oom_control_write, 5069 }, 5070 { 5071 .name = "pressure_level", 5072 .seq_show = mem_cgroup_dummy_seq_show, 5073 }, 5074 #ifdef CONFIG_NUMA 5075 { 5076 .name = "numa_stat", 5077 .seq_show = memcg_numa_stat_show, 5078 }, 5079 #endif 5080 { 5081 .name = "kmem.usage_in_bytes", 5082 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5083 .read_u64 = mem_cgroup_read_u64, 5084 }, 5085 { 5086 .name = "kmem.failcnt", 5087 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5088 .write = mem_cgroup_reset, 5089 .read_u64 = mem_cgroup_read_u64, 5090 }, 5091 { 5092 .name = "kmem.max_usage_in_bytes", 5093 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5094 .write = mem_cgroup_reset, 5095 .read_u64 = mem_cgroup_read_u64, 5096 }, 5097 #if defined(CONFIG_MEMCG_KMEM) && \ 5098 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5099 { 5100 .name = "kmem.slabinfo", 5101 .seq_show = mem_cgroup_slab_show, 5102 }, 5103 #endif 5104 { 5105 .name = "kmem.tcp.limit_in_bytes", 5106 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5107 .write = mem_cgroup_write, 5108 .read_u64 = mem_cgroup_read_u64, 5109 }, 5110 { 5111 .name = "kmem.tcp.usage_in_bytes", 5112 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5113 .read_u64 = mem_cgroup_read_u64, 5114 }, 5115 { 5116 .name = "kmem.tcp.failcnt", 5117 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5118 .write = mem_cgroup_reset, 5119 .read_u64 = mem_cgroup_read_u64, 5120 }, 5121 { 5122 .name = "kmem.tcp.max_usage_in_bytes", 5123 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5124 .write = mem_cgroup_reset, 5125 .read_u64 = mem_cgroup_read_u64, 5126 }, 5127 { }, /* terminate */ 5128 }; 5129 5130 /* 5131 * Private memory cgroup IDR 5132 * 5133 * Swap-out records and page cache shadow entries need to store memcg 5134 * references in constrained space, so we maintain an ID space that is 5135 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5136 * memory-controlled cgroups to 64k. 5137 * 5138 * However, there usually are many references to the offline CSS after 5139 * the cgroup has been destroyed, such as page cache or reclaimable 5140 * slab objects, that don't need to hang on to the ID. We want to keep 5141 * those dead CSS from occupying IDs, or we might quickly exhaust the 5142 * relatively small ID space and prevent the creation of new cgroups 5143 * even when there are much fewer than 64k cgroups - possibly none. 5144 * 5145 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5146 * be freed and recycled when it's no longer needed, which is usually 5147 * when the CSS is offlined. 5148 * 5149 * The only exception to that are records of swapped out tmpfs/shmem 5150 * pages that need to be attributed to live ancestors on swapin. But 5151 * those references are manageable from userspace. 5152 */ 5153 5154 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) 5155 static DEFINE_IDR(mem_cgroup_idr); 5156 5157 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5158 { 5159 if (memcg->id.id > 0) { 5160 idr_remove(&mem_cgroup_idr, memcg->id.id); 5161 memcg->id.id = 0; 5162 } 5163 } 5164 5165 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5166 unsigned int n) 5167 { 5168 refcount_add(n, &memcg->id.ref); 5169 } 5170 5171 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5172 { 5173 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5174 mem_cgroup_id_remove(memcg); 5175 5176 /* Memcg ID pins CSS */ 5177 css_put(&memcg->css); 5178 } 5179 } 5180 5181 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5182 { 5183 mem_cgroup_id_put_many(memcg, 1); 5184 } 5185 5186 /** 5187 * mem_cgroup_from_id - look up a memcg from a memcg id 5188 * @id: the memcg id to look up 5189 * 5190 * Caller must hold rcu_read_lock(). 5191 */ 5192 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5193 { 5194 WARN_ON_ONCE(!rcu_read_lock_held()); 5195 return idr_find(&mem_cgroup_idr, id); 5196 } 5197 5198 #ifdef CONFIG_SHRINKER_DEBUG 5199 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 5200 { 5201 struct cgroup *cgrp; 5202 struct cgroup_subsys_state *css; 5203 struct mem_cgroup *memcg; 5204 5205 cgrp = cgroup_get_from_id(ino); 5206 if (IS_ERR(cgrp)) 5207 return ERR_CAST(cgrp); 5208 5209 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 5210 if (css) 5211 memcg = container_of(css, struct mem_cgroup, css); 5212 else 5213 memcg = ERR_PTR(-ENOENT); 5214 5215 cgroup_put(cgrp); 5216 5217 return memcg; 5218 } 5219 #endif 5220 5221 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5222 { 5223 struct mem_cgroup_per_node *pn; 5224 5225 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node); 5226 if (!pn) 5227 return 1; 5228 5229 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 5230 GFP_KERNEL_ACCOUNT); 5231 if (!pn->lruvec_stats_percpu) { 5232 kfree(pn); 5233 return 1; 5234 } 5235 5236 lruvec_init(&pn->lruvec); 5237 pn->memcg = memcg; 5238 5239 memcg->nodeinfo[node] = pn; 5240 return 0; 5241 } 5242 5243 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5244 { 5245 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5246 5247 if (!pn) 5248 return; 5249 5250 free_percpu(pn->lruvec_stats_percpu); 5251 kfree(pn); 5252 } 5253 5254 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5255 { 5256 int node; 5257 5258 for_each_node(node) 5259 free_mem_cgroup_per_node_info(memcg, node); 5260 kfree(memcg->vmstats); 5261 free_percpu(memcg->vmstats_percpu); 5262 kfree(memcg); 5263 } 5264 5265 static void mem_cgroup_free(struct mem_cgroup *memcg) 5266 { 5267 lru_gen_exit_memcg(memcg); 5268 memcg_wb_domain_exit(memcg); 5269 __mem_cgroup_free(memcg); 5270 } 5271 5272 static struct mem_cgroup *mem_cgroup_alloc(void) 5273 { 5274 struct mem_cgroup *memcg; 5275 int node; 5276 int __maybe_unused i; 5277 long error = -ENOMEM; 5278 5279 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); 5280 if (!memcg) 5281 return ERR_PTR(error); 5282 5283 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5284 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); 5285 if (memcg->id.id < 0) { 5286 error = memcg->id.id; 5287 goto fail; 5288 } 5289 5290 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL); 5291 if (!memcg->vmstats) 5292 goto fail; 5293 5294 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5295 GFP_KERNEL_ACCOUNT); 5296 if (!memcg->vmstats_percpu) 5297 goto fail; 5298 5299 for_each_node(node) 5300 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5301 goto fail; 5302 5303 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5304 goto fail; 5305 5306 INIT_WORK(&memcg->high_work, high_work_func); 5307 INIT_LIST_HEAD(&memcg->oom_notify); 5308 mutex_init(&memcg->thresholds_lock); 5309 spin_lock_init(&memcg->move_lock); 5310 vmpressure_init(&memcg->vmpressure); 5311 INIT_LIST_HEAD(&memcg->event_list); 5312 spin_lock_init(&memcg->event_list_lock); 5313 memcg->socket_pressure = jiffies; 5314 #ifdef CONFIG_MEMCG_KMEM 5315 memcg->kmemcg_id = -1; 5316 INIT_LIST_HEAD(&memcg->objcg_list); 5317 #endif 5318 #ifdef CONFIG_CGROUP_WRITEBACK 5319 INIT_LIST_HEAD(&memcg->cgwb_list); 5320 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5321 memcg->cgwb_frn[i].done = 5322 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5323 #endif 5324 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5325 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5326 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5327 memcg->deferred_split_queue.split_queue_len = 0; 5328 #endif 5329 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5330 lru_gen_init_memcg(memcg); 5331 return memcg; 5332 fail: 5333 mem_cgroup_id_remove(memcg); 5334 __mem_cgroup_free(memcg); 5335 return ERR_PTR(error); 5336 } 5337 5338 static struct cgroup_subsys_state * __ref 5339 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5340 { 5341 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5342 struct mem_cgroup *memcg, *old_memcg; 5343 5344 old_memcg = set_active_memcg(parent); 5345 memcg = mem_cgroup_alloc(); 5346 set_active_memcg(old_memcg); 5347 if (IS_ERR(memcg)) 5348 return ERR_CAST(memcg); 5349 5350 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5351 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); 5352 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 5353 memcg->zswap_max = PAGE_COUNTER_MAX; 5354 #endif 5355 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5356 if (parent) { 5357 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 5358 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 5359 5360 page_counter_init(&memcg->memory, &parent->memory); 5361 page_counter_init(&memcg->swap, &parent->swap); 5362 page_counter_init(&memcg->kmem, &parent->kmem); 5363 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5364 } else { 5365 init_memcg_events(); 5366 page_counter_init(&memcg->memory, NULL); 5367 page_counter_init(&memcg->swap, NULL); 5368 page_counter_init(&memcg->kmem, NULL); 5369 page_counter_init(&memcg->tcpmem, NULL); 5370 5371 root_mem_cgroup = memcg; 5372 return &memcg->css; 5373 } 5374 5375 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5376 static_branch_inc(&memcg_sockets_enabled_key); 5377 5378 #if defined(CONFIG_MEMCG_KMEM) 5379 if (!cgroup_memory_nobpf) 5380 static_branch_inc(&memcg_bpf_enabled_key); 5381 #endif 5382 5383 return &memcg->css; 5384 } 5385 5386 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5387 { 5388 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5389 5390 if (memcg_online_kmem(memcg)) 5391 goto remove_id; 5392 5393 /* 5394 * A memcg must be visible for expand_shrinker_info() 5395 * by the time the maps are allocated. So, we allocate maps 5396 * here, when for_each_mem_cgroup() can't skip it. 5397 */ 5398 if (alloc_shrinker_info(memcg)) 5399 goto offline_kmem; 5400 5401 /* Online state pins memcg ID, memcg ID pins CSS */ 5402 refcount_set(&memcg->id.ref, 1); 5403 css_get(css); 5404 5405 if (unlikely(mem_cgroup_is_root(memcg))) 5406 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 5407 FLUSH_TIME); 5408 lru_gen_online_memcg(memcg); 5409 return 0; 5410 offline_kmem: 5411 memcg_offline_kmem(memcg); 5412 remove_id: 5413 mem_cgroup_id_remove(memcg); 5414 return -ENOMEM; 5415 } 5416 5417 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5418 { 5419 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5420 struct mem_cgroup_event *event, *tmp; 5421 5422 /* 5423 * Unregister events and notify userspace. 5424 * Notify userspace about cgroup removing only after rmdir of cgroup 5425 * directory to avoid race between userspace and kernelspace. 5426 */ 5427 spin_lock_irq(&memcg->event_list_lock); 5428 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5429 list_del_init(&event->list); 5430 schedule_work(&event->remove); 5431 } 5432 spin_unlock_irq(&memcg->event_list_lock); 5433 5434 page_counter_set_min(&memcg->memory, 0); 5435 page_counter_set_low(&memcg->memory, 0); 5436 5437 memcg_offline_kmem(memcg); 5438 reparent_shrinker_deferred(memcg); 5439 wb_memcg_offline(memcg); 5440 lru_gen_offline_memcg(memcg); 5441 5442 drain_all_stock(memcg); 5443 5444 mem_cgroup_id_put(memcg); 5445 } 5446 5447 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5448 { 5449 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5450 5451 invalidate_reclaim_iterators(memcg); 5452 lru_gen_release_memcg(memcg); 5453 } 5454 5455 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5456 { 5457 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5458 int __maybe_unused i; 5459 5460 #ifdef CONFIG_CGROUP_WRITEBACK 5461 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5462 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5463 #endif 5464 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5465 static_branch_dec(&memcg_sockets_enabled_key); 5466 5467 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5468 static_branch_dec(&memcg_sockets_enabled_key); 5469 5470 #if defined(CONFIG_MEMCG_KMEM) 5471 if (!cgroup_memory_nobpf) 5472 static_branch_dec(&memcg_bpf_enabled_key); 5473 #endif 5474 5475 vmpressure_cleanup(&memcg->vmpressure); 5476 cancel_work_sync(&memcg->high_work); 5477 mem_cgroup_remove_from_trees(memcg); 5478 free_shrinker_info(memcg); 5479 mem_cgroup_free(memcg); 5480 } 5481 5482 /** 5483 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5484 * @css: the target css 5485 * 5486 * Reset the states of the mem_cgroup associated with @css. This is 5487 * invoked when the userland requests disabling on the default hierarchy 5488 * but the memcg is pinned through dependency. The memcg should stop 5489 * applying policies and should revert to the vanilla state as it may be 5490 * made visible again. 5491 * 5492 * The current implementation only resets the essential configurations. 5493 * This needs to be expanded to cover all the visible parts. 5494 */ 5495 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5496 { 5497 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5498 5499 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5500 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5501 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5502 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5503 page_counter_set_min(&memcg->memory, 0); 5504 page_counter_set_low(&memcg->memory, 0); 5505 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5506 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); 5507 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5508 memcg_wb_domain_size_changed(memcg); 5509 } 5510 5511 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5512 { 5513 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5514 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5515 struct memcg_vmstats_percpu *statc; 5516 long delta, delta_cpu, v; 5517 int i, nid; 5518 5519 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5520 5521 for (i = 0; i < MEMCG_NR_STAT; i++) { 5522 /* 5523 * Collect the aggregated propagation counts of groups 5524 * below us. We're in a per-cpu loop here and this is 5525 * a global counter, so the first cycle will get them. 5526 */ 5527 delta = memcg->vmstats->state_pending[i]; 5528 if (delta) 5529 memcg->vmstats->state_pending[i] = 0; 5530 5531 /* Add CPU changes on this level since the last flush */ 5532 delta_cpu = 0; 5533 v = READ_ONCE(statc->state[i]); 5534 if (v != statc->state_prev[i]) { 5535 delta_cpu = v - statc->state_prev[i]; 5536 delta += delta_cpu; 5537 statc->state_prev[i] = v; 5538 } 5539 5540 /* Aggregate counts on this level and propagate upwards */ 5541 if (delta_cpu) 5542 memcg->vmstats->state_local[i] += delta_cpu; 5543 5544 if (delta) { 5545 memcg->vmstats->state[i] += delta; 5546 if (parent) 5547 parent->vmstats->state_pending[i] += delta; 5548 } 5549 } 5550 5551 for (i = 0; i < NR_MEMCG_EVENTS; i++) { 5552 delta = memcg->vmstats->events_pending[i]; 5553 if (delta) 5554 memcg->vmstats->events_pending[i] = 0; 5555 5556 delta_cpu = 0; 5557 v = READ_ONCE(statc->events[i]); 5558 if (v != statc->events_prev[i]) { 5559 delta_cpu = v - statc->events_prev[i]; 5560 delta += delta_cpu; 5561 statc->events_prev[i] = v; 5562 } 5563 5564 if (delta_cpu) 5565 memcg->vmstats->events_local[i] += delta_cpu; 5566 5567 if (delta) { 5568 memcg->vmstats->events[i] += delta; 5569 if (parent) 5570 parent->vmstats->events_pending[i] += delta; 5571 } 5572 } 5573 5574 for_each_node_state(nid, N_MEMORY) { 5575 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 5576 struct mem_cgroup_per_node *ppn = NULL; 5577 struct lruvec_stats_percpu *lstatc; 5578 5579 if (parent) 5580 ppn = parent->nodeinfo[nid]; 5581 5582 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 5583 5584 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 5585 delta = pn->lruvec_stats.state_pending[i]; 5586 if (delta) 5587 pn->lruvec_stats.state_pending[i] = 0; 5588 5589 delta_cpu = 0; 5590 v = READ_ONCE(lstatc->state[i]); 5591 if (v != lstatc->state_prev[i]) { 5592 delta_cpu = v - lstatc->state_prev[i]; 5593 delta += delta_cpu; 5594 lstatc->state_prev[i] = v; 5595 } 5596 5597 if (delta_cpu) 5598 pn->lruvec_stats.state_local[i] += delta_cpu; 5599 5600 if (delta) { 5601 pn->lruvec_stats.state[i] += delta; 5602 if (ppn) 5603 ppn->lruvec_stats.state_pending[i] += delta; 5604 } 5605 } 5606 } 5607 } 5608 5609 #ifdef CONFIG_MMU 5610 /* Handlers for move charge at task migration. */ 5611 static int mem_cgroup_do_precharge(unsigned long count) 5612 { 5613 int ret; 5614 5615 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5616 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5617 if (!ret) { 5618 mc.precharge += count; 5619 return ret; 5620 } 5621 5622 /* Try charges one by one with reclaim, but do not retry */ 5623 while (count--) { 5624 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5625 if (ret) 5626 return ret; 5627 mc.precharge++; 5628 cond_resched(); 5629 } 5630 return 0; 5631 } 5632 5633 union mc_target { 5634 struct page *page; 5635 swp_entry_t ent; 5636 }; 5637 5638 enum mc_target_type { 5639 MC_TARGET_NONE = 0, 5640 MC_TARGET_PAGE, 5641 MC_TARGET_SWAP, 5642 MC_TARGET_DEVICE, 5643 }; 5644 5645 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5646 unsigned long addr, pte_t ptent) 5647 { 5648 struct page *page = vm_normal_page(vma, addr, ptent); 5649 5650 if (!page) 5651 return NULL; 5652 if (PageAnon(page)) { 5653 if (!(mc.flags & MOVE_ANON)) 5654 return NULL; 5655 } else { 5656 if (!(mc.flags & MOVE_FILE)) 5657 return NULL; 5658 } 5659 get_page(page); 5660 5661 return page; 5662 } 5663 5664 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5665 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5666 pte_t ptent, swp_entry_t *entry) 5667 { 5668 struct page *page = NULL; 5669 swp_entry_t ent = pte_to_swp_entry(ptent); 5670 5671 if (!(mc.flags & MOVE_ANON)) 5672 return NULL; 5673 5674 /* 5675 * Handle device private pages that are not accessible by the CPU, but 5676 * stored as special swap entries in the page table. 5677 */ 5678 if (is_device_private_entry(ent)) { 5679 page = pfn_swap_entry_to_page(ent); 5680 if (!get_page_unless_zero(page)) 5681 return NULL; 5682 return page; 5683 } 5684 5685 if (non_swap_entry(ent)) 5686 return NULL; 5687 5688 /* 5689 * Because swap_cache_get_folio() updates some statistics counter, 5690 * we call find_get_page() with swapper_space directly. 5691 */ 5692 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5693 entry->val = ent.val; 5694 5695 return page; 5696 } 5697 #else 5698 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5699 pte_t ptent, swp_entry_t *entry) 5700 { 5701 return NULL; 5702 } 5703 #endif 5704 5705 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5706 unsigned long addr, pte_t ptent) 5707 { 5708 unsigned long index; 5709 struct folio *folio; 5710 5711 if (!vma->vm_file) /* anonymous vma */ 5712 return NULL; 5713 if (!(mc.flags & MOVE_FILE)) 5714 return NULL; 5715 5716 /* folio is moved even if it's not RSS of this task(page-faulted). */ 5717 /* shmem/tmpfs may report page out on swap: account for that too. */ 5718 index = linear_page_index(vma, addr); 5719 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); 5720 if (IS_ERR(folio)) 5721 return NULL; 5722 return folio_file_page(folio, index); 5723 } 5724 5725 /** 5726 * mem_cgroup_move_account - move account of the page 5727 * @page: the page 5728 * @compound: charge the page as compound or small page 5729 * @from: mem_cgroup which the page is moved from. 5730 * @to: mem_cgroup which the page is moved to. @from != @to. 5731 * 5732 * The page must be locked and not on the LRU. 5733 * 5734 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5735 * from old cgroup. 5736 */ 5737 static int mem_cgroup_move_account(struct page *page, 5738 bool compound, 5739 struct mem_cgroup *from, 5740 struct mem_cgroup *to) 5741 { 5742 struct folio *folio = page_folio(page); 5743 struct lruvec *from_vec, *to_vec; 5744 struct pglist_data *pgdat; 5745 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1; 5746 int nid, ret; 5747 5748 VM_BUG_ON(from == to); 5749 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5750 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5751 VM_BUG_ON(compound && !folio_test_large(folio)); 5752 5753 ret = -EINVAL; 5754 if (folio_memcg(folio) != from) 5755 goto out; 5756 5757 pgdat = folio_pgdat(folio); 5758 from_vec = mem_cgroup_lruvec(from, pgdat); 5759 to_vec = mem_cgroup_lruvec(to, pgdat); 5760 5761 folio_memcg_lock(folio); 5762 5763 if (folio_test_anon(folio)) { 5764 if (folio_mapped(folio)) { 5765 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5766 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5767 if (folio_test_pmd_mappable(folio)) { 5768 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5769 -nr_pages); 5770 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5771 nr_pages); 5772 } 5773 } 5774 } else { 5775 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5776 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5777 5778 if (folio_test_swapbacked(folio)) { 5779 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5780 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5781 } 5782 5783 if (folio_mapped(folio)) { 5784 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5785 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5786 } 5787 5788 if (folio_test_dirty(folio)) { 5789 struct address_space *mapping = folio_mapping(folio); 5790 5791 if (mapping_can_writeback(mapping)) { 5792 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5793 -nr_pages); 5794 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5795 nr_pages); 5796 } 5797 } 5798 } 5799 5800 #ifdef CONFIG_SWAP 5801 if (folio_test_swapcache(folio)) { 5802 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages); 5803 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages); 5804 } 5805 #endif 5806 if (folio_test_writeback(folio)) { 5807 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5808 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5809 } 5810 5811 /* 5812 * All state has been migrated, let's switch to the new memcg. 5813 * 5814 * It is safe to change page's memcg here because the page 5815 * is referenced, charged, isolated, and locked: we can't race 5816 * with (un)charging, migration, LRU putback, or anything else 5817 * that would rely on a stable page's memory cgroup. 5818 * 5819 * Note that folio_memcg_lock is a memcg lock, not a page lock, 5820 * to save space. As soon as we switch page's memory cgroup to a 5821 * new memcg that isn't locked, the above state can change 5822 * concurrently again. Make sure we're truly done with it. 5823 */ 5824 smp_mb(); 5825 5826 css_get(&to->css); 5827 css_put(&from->css); 5828 5829 folio->memcg_data = (unsigned long)to; 5830 5831 __folio_memcg_unlock(from); 5832 5833 ret = 0; 5834 nid = folio_nid(folio); 5835 5836 local_irq_disable(); 5837 mem_cgroup_charge_statistics(to, nr_pages); 5838 memcg_check_events(to, nid); 5839 mem_cgroup_charge_statistics(from, -nr_pages); 5840 memcg_check_events(from, nid); 5841 local_irq_enable(); 5842 out: 5843 return ret; 5844 } 5845 5846 /** 5847 * get_mctgt_type - get target type of moving charge 5848 * @vma: the vma the pte to be checked belongs 5849 * @addr: the address corresponding to the pte to be checked 5850 * @ptent: the pte to be checked 5851 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5852 * 5853 * Context: Called with pte lock held. 5854 * Return: 5855 * * MC_TARGET_NONE - If the pte is not a target for move charge. 5856 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for 5857 * move charge. If @target is not NULL, the page is stored in target->page 5858 * with extra refcnt taken (Caller should release it). 5859 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a 5860 * target for charge migration. If @target is not NULL, the entry is 5861 * stored in target->ent. 5862 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and 5863 * thus not on the lru. For now such page is charged like a regular page 5864 * would be as it is just special memory taking the place of a regular page. 5865 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5866 */ 5867 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5868 unsigned long addr, pte_t ptent, union mc_target *target) 5869 { 5870 struct page *page = NULL; 5871 enum mc_target_type ret = MC_TARGET_NONE; 5872 swp_entry_t ent = { .val = 0 }; 5873 5874 if (pte_present(ptent)) 5875 page = mc_handle_present_pte(vma, addr, ptent); 5876 else if (pte_none_mostly(ptent)) 5877 /* 5878 * PTE markers should be treated as a none pte here, separated 5879 * from other swap handling below. 5880 */ 5881 page = mc_handle_file_pte(vma, addr, ptent); 5882 else if (is_swap_pte(ptent)) 5883 page = mc_handle_swap_pte(vma, ptent, &ent); 5884 5885 if (target && page) { 5886 if (!trylock_page(page)) { 5887 put_page(page); 5888 return ret; 5889 } 5890 /* 5891 * page_mapped() must be stable during the move. This 5892 * pte is locked, so if it's present, the page cannot 5893 * become unmapped. If it isn't, we have only partial 5894 * control over the mapped state: the page lock will 5895 * prevent new faults against pagecache and swapcache, 5896 * so an unmapped page cannot become mapped. However, 5897 * if the page is already mapped elsewhere, it can 5898 * unmap, and there is nothing we can do about it. 5899 * Alas, skip moving the page in this case. 5900 */ 5901 if (!pte_present(ptent) && page_mapped(page)) { 5902 unlock_page(page); 5903 put_page(page); 5904 return ret; 5905 } 5906 } 5907 5908 if (!page && !ent.val) 5909 return ret; 5910 if (page) { 5911 /* 5912 * Do only loose check w/o serialization. 5913 * mem_cgroup_move_account() checks the page is valid or 5914 * not under LRU exclusion. 5915 */ 5916 if (page_memcg(page) == mc.from) { 5917 ret = MC_TARGET_PAGE; 5918 if (is_device_private_page(page) || 5919 is_device_coherent_page(page)) 5920 ret = MC_TARGET_DEVICE; 5921 if (target) 5922 target->page = page; 5923 } 5924 if (!ret || !target) { 5925 if (target) 5926 unlock_page(page); 5927 put_page(page); 5928 } 5929 } 5930 /* 5931 * There is a swap entry and a page doesn't exist or isn't charged. 5932 * But we cannot move a tail-page in a THP. 5933 */ 5934 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5935 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5936 ret = MC_TARGET_SWAP; 5937 if (target) 5938 target->ent = ent; 5939 } 5940 return ret; 5941 } 5942 5943 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5944 /* 5945 * We don't consider PMD mapped swapping or file mapped pages because THP does 5946 * not support them for now. 5947 * Caller should make sure that pmd_trans_huge(pmd) is true. 5948 */ 5949 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5950 unsigned long addr, pmd_t pmd, union mc_target *target) 5951 { 5952 struct page *page = NULL; 5953 enum mc_target_type ret = MC_TARGET_NONE; 5954 5955 if (unlikely(is_swap_pmd(pmd))) { 5956 VM_BUG_ON(thp_migration_supported() && 5957 !is_pmd_migration_entry(pmd)); 5958 return ret; 5959 } 5960 page = pmd_page(pmd); 5961 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5962 if (!(mc.flags & MOVE_ANON)) 5963 return ret; 5964 if (page_memcg(page) == mc.from) { 5965 ret = MC_TARGET_PAGE; 5966 if (target) { 5967 get_page(page); 5968 if (!trylock_page(page)) { 5969 put_page(page); 5970 return MC_TARGET_NONE; 5971 } 5972 target->page = page; 5973 } 5974 } 5975 return ret; 5976 } 5977 #else 5978 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5979 unsigned long addr, pmd_t pmd, union mc_target *target) 5980 { 5981 return MC_TARGET_NONE; 5982 } 5983 #endif 5984 5985 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5986 unsigned long addr, unsigned long end, 5987 struct mm_walk *walk) 5988 { 5989 struct vm_area_struct *vma = walk->vma; 5990 pte_t *pte; 5991 spinlock_t *ptl; 5992 5993 ptl = pmd_trans_huge_lock(pmd, vma); 5994 if (ptl) { 5995 /* 5996 * Note their can not be MC_TARGET_DEVICE for now as we do not 5997 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5998 * this might change. 5999 */ 6000 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6001 mc.precharge += HPAGE_PMD_NR; 6002 spin_unlock(ptl); 6003 return 0; 6004 } 6005 6006 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6007 if (!pte) 6008 return 0; 6009 for (; addr != end; pte++, addr += PAGE_SIZE) 6010 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL)) 6011 mc.precharge++; /* increment precharge temporarily */ 6012 pte_unmap_unlock(pte - 1, ptl); 6013 cond_resched(); 6014 6015 return 0; 6016 } 6017 6018 static const struct mm_walk_ops precharge_walk_ops = { 6019 .pmd_entry = mem_cgroup_count_precharge_pte_range, 6020 .walk_lock = PGWALK_RDLOCK, 6021 }; 6022 6023 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 6024 { 6025 unsigned long precharge; 6026 6027 mmap_read_lock(mm); 6028 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL); 6029 mmap_read_unlock(mm); 6030 6031 precharge = mc.precharge; 6032 mc.precharge = 0; 6033 6034 return precharge; 6035 } 6036 6037 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 6038 { 6039 unsigned long precharge = mem_cgroup_count_precharge(mm); 6040 6041 VM_BUG_ON(mc.moving_task); 6042 mc.moving_task = current; 6043 return mem_cgroup_do_precharge(precharge); 6044 } 6045 6046 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 6047 static void __mem_cgroup_clear_mc(void) 6048 { 6049 struct mem_cgroup *from = mc.from; 6050 struct mem_cgroup *to = mc.to; 6051 6052 /* we must uncharge all the leftover precharges from mc.to */ 6053 if (mc.precharge) { 6054 cancel_charge(mc.to, mc.precharge); 6055 mc.precharge = 0; 6056 } 6057 /* 6058 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 6059 * we must uncharge here. 6060 */ 6061 if (mc.moved_charge) { 6062 cancel_charge(mc.from, mc.moved_charge); 6063 mc.moved_charge = 0; 6064 } 6065 /* we must fixup refcnts and charges */ 6066 if (mc.moved_swap) { 6067 /* uncharge swap account from the old cgroup */ 6068 if (!mem_cgroup_is_root(mc.from)) 6069 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 6070 6071 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 6072 6073 /* 6074 * we charged both to->memory and to->memsw, so we 6075 * should uncharge to->memory. 6076 */ 6077 if (!mem_cgroup_is_root(mc.to)) 6078 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 6079 6080 mc.moved_swap = 0; 6081 } 6082 memcg_oom_recover(from); 6083 memcg_oom_recover(to); 6084 wake_up_all(&mc.waitq); 6085 } 6086 6087 static void mem_cgroup_clear_mc(void) 6088 { 6089 struct mm_struct *mm = mc.mm; 6090 6091 /* 6092 * we must clear moving_task before waking up waiters at the end of 6093 * task migration. 6094 */ 6095 mc.moving_task = NULL; 6096 __mem_cgroup_clear_mc(); 6097 spin_lock(&mc.lock); 6098 mc.from = NULL; 6099 mc.to = NULL; 6100 mc.mm = NULL; 6101 spin_unlock(&mc.lock); 6102 6103 mmput(mm); 6104 } 6105 6106 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6107 { 6108 struct cgroup_subsys_state *css; 6109 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 6110 struct mem_cgroup *from; 6111 struct task_struct *leader, *p; 6112 struct mm_struct *mm; 6113 unsigned long move_flags; 6114 int ret = 0; 6115 6116 /* charge immigration isn't supported on the default hierarchy */ 6117 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6118 return 0; 6119 6120 /* 6121 * Multi-process migrations only happen on the default hierarchy 6122 * where charge immigration is not used. Perform charge 6123 * immigration if @tset contains a leader and whine if there are 6124 * multiple. 6125 */ 6126 p = NULL; 6127 cgroup_taskset_for_each_leader(leader, css, tset) { 6128 WARN_ON_ONCE(p); 6129 p = leader; 6130 memcg = mem_cgroup_from_css(css); 6131 } 6132 if (!p) 6133 return 0; 6134 6135 /* 6136 * We are now committed to this value whatever it is. Changes in this 6137 * tunable will only affect upcoming migrations, not the current one. 6138 * So we need to save it, and keep it going. 6139 */ 6140 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 6141 if (!move_flags) 6142 return 0; 6143 6144 from = mem_cgroup_from_task(p); 6145 6146 VM_BUG_ON(from == memcg); 6147 6148 mm = get_task_mm(p); 6149 if (!mm) 6150 return 0; 6151 /* We move charges only when we move a owner of the mm */ 6152 if (mm->owner == p) { 6153 VM_BUG_ON(mc.from); 6154 VM_BUG_ON(mc.to); 6155 VM_BUG_ON(mc.precharge); 6156 VM_BUG_ON(mc.moved_charge); 6157 VM_BUG_ON(mc.moved_swap); 6158 6159 spin_lock(&mc.lock); 6160 mc.mm = mm; 6161 mc.from = from; 6162 mc.to = memcg; 6163 mc.flags = move_flags; 6164 spin_unlock(&mc.lock); 6165 /* We set mc.moving_task later */ 6166 6167 ret = mem_cgroup_precharge_mc(mm); 6168 if (ret) 6169 mem_cgroup_clear_mc(); 6170 } else { 6171 mmput(mm); 6172 } 6173 return ret; 6174 } 6175 6176 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6177 { 6178 if (mc.to) 6179 mem_cgroup_clear_mc(); 6180 } 6181 6182 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6183 unsigned long addr, unsigned long end, 6184 struct mm_walk *walk) 6185 { 6186 int ret = 0; 6187 struct vm_area_struct *vma = walk->vma; 6188 pte_t *pte; 6189 spinlock_t *ptl; 6190 enum mc_target_type target_type; 6191 union mc_target target; 6192 struct page *page; 6193 6194 ptl = pmd_trans_huge_lock(pmd, vma); 6195 if (ptl) { 6196 if (mc.precharge < HPAGE_PMD_NR) { 6197 spin_unlock(ptl); 6198 return 0; 6199 } 6200 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6201 if (target_type == MC_TARGET_PAGE) { 6202 page = target.page; 6203 if (isolate_lru_page(page)) { 6204 if (!mem_cgroup_move_account(page, true, 6205 mc.from, mc.to)) { 6206 mc.precharge -= HPAGE_PMD_NR; 6207 mc.moved_charge += HPAGE_PMD_NR; 6208 } 6209 putback_lru_page(page); 6210 } 6211 unlock_page(page); 6212 put_page(page); 6213 } else if (target_type == MC_TARGET_DEVICE) { 6214 page = target.page; 6215 if (!mem_cgroup_move_account(page, true, 6216 mc.from, mc.to)) { 6217 mc.precharge -= HPAGE_PMD_NR; 6218 mc.moved_charge += HPAGE_PMD_NR; 6219 } 6220 unlock_page(page); 6221 put_page(page); 6222 } 6223 spin_unlock(ptl); 6224 return 0; 6225 } 6226 6227 retry: 6228 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6229 if (!pte) 6230 return 0; 6231 for (; addr != end; addr += PAGE_SIZE) { 6232 pte_t ptent = ptep_get(pte++); 6233 bool device = false; 6234 swp_entry_t ent; 6235 6236 if (!mc.precharge) 6237 break; 6238 6239 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6240 case MC_TARGET_DEVICE: 6241 device = true; 6242 fallthrough; 6243 case MC_TARGET_PAGE: 6244 page = target.page; 6245 /* 6246 * We can have a part of the split pmd here. Moving it 6247 * can be done but it would be too convoluted so simply 6248 * ignore such a partial THP and keep it in original 6249 * memcg. There should be somebody mapping the head. 6250 */ 6251 if (PageTransCompound(page)) 6252 goto put; 6253 if (!device && !isolate_lru_page(page)) 6254 goto put; 6255 if (!mem_cgroup_move_account(page, false, 6256 mc.from, mc.to)) { 6257 mc.precharge--; 6258 /* we uncharge from mc.from later. */ 6259 mc.moved_charge++; 6260 } 6261 if (!device) 6262 putback_lru_page(page); 6263 put: /* get_mctgt_type() gets & locks the page */ 6264 unlock_page(page); 6265 put_page(page); 6266 break; 6267 case MC_TARGET_SWAP: 6268 ent = target.ent; 6269 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6270 mc.precharge--; 6271 mem_cgroup_id_get_many(mc.to, 1); 6272 /* we fixup other refcnts and charges later. */ 6273 mc.moved_swap++; 6274 } 6275 break; 6276 default: 6277 break; 6278 } 6279 } 6280 pte_unmap_unlock(pte - 1, ptl); 6281 cond_resched(); 6282 6283 if (addr != end) { 6284 /* 6285 * We have consumed all precharges we got in can_attach(). 6286 * We try charge one by one, but don't do any additional 6287 * charges to mc.to if we have failed in charge once in attach() 6288 * phase. 6289 */ 6290 ret = mem_cgroup_do_precharge(1); 6291 if (!ret) 6292 goto retry; 6293 } 6294 6295 return ret; 6296 } 6297 6298 static const struct mm_walk_ops charge_walk_ops = { 6299 .pmd_entry = mem_cgroup_move_charge_pte_range, 6300 .walk_lock = PGWALK_RDLOCK, 6301 }; 6302 6303 static void mem_cgroup_move_charge(void) 6304 { 6305 lru_add_drain_all(); 6306 /* 6307 * Signal folio_memcg_lock() to take the memcg's move_lock 6308 * while we're moving its pages to another memcg. Then wait 6309 * for already started RCU-only updates to finish. 6310 */ 6311 atomic_inc(&mc.from->moving_account); 6312 synchronize_rcu(); 6313 retry: 6314 if (unlikely(!mmap_read_trylock(mc.mm))) { 6315 /* 6316 * Someone who are holding the mmap_lock might be waiting in 6317 * waitq. So we cancel all extra charges, wake up all waiters, 6318 * and retry. Because we cancel precharges, we might not be able 6319 * to move enough charges, but moving charge is a best-effort 6320 * feature anyway, so it wouldn't be a big problem. 6321 */ 6322 __mem_cgroup_clear_mc(); 6323 cond_resched(); 6324 goto retry; 6325 } 6326 /* 6327 * When we have consumed all precharges and failed in doing 6328 * additional charge, the page walk just aborts. 6329 */ 6330 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL); 6331 mmap_read_unlock(mc.mm); 6332 atomic_dec(&mc.from->moving_account); 6333 } 6334 6335 static void mem_cgroup_move_task(void) 6336 { 6337 if (mc.to) { 6338 mem_cgroup_move_charge(); 6339 mem_cgroup_clear_mc(); 6340 } 6341 } 6342 #else /* !CONFIG_MMU */ 6343 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6344 { 6345 return 0; 6346 } 6347 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6348 { 6349 } 6350 static void mem_cgroup_move_task(void) 6351 { 6352 } 6353 #endif 6354 6355 #ifdef CONFIG_LRU_GEN 6356 static void mem_cgroup_attach(struct cgroup_taskset *tset) 6357 { 6358 struct task_struct *task; 6359 struct cgroup_subsys_state *css; 6360 6361 /* find the first leader if there is any */ 6362 cgroup_taskset_for_each_leader(task, css, tset) 6363 break; 6364 6365 if (!task) 6366 return; 6367 6368 task_lock(task); 6369 if (task->mm && READ_ONCE(task->mm->owner) == task) 6370 lru_gen_migrate_mm(task->mm); 6371 task_unlock(task); 6372 } 6373 #else 6374 static void mem_cgroup_attach(struct cgroup_taskset *tset) 6375 { 6376 } 6377 #endif /* CONFIG_LRU_GEN */ 6378 6379 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6380 { 6381 if (value == PAGE_COUNTER_MAX) 6382 seq_puts(m, "max\n"); 6383 else 6384 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6385 6386 return 0; 6387 } 6388 6389 static u64 memory_current_read(struct cgroup_subsys_state *css, 6390 struct cftype *cft) 6391 { 6392 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6393 6394 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6395 } 6396 6397 static u64 memory_peak_read(struct cgroup_subsys_state *css, 6398 struct cftype *cft) 6399 { 6400 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6401 6402 return (u64)memcg->memory.watermark * PAGE_SIZE; 6403 } 6404 6405 static int memory_min_show(struct seq_file *m, void *v) 6406 { 6407 return seq_puts_memcg_tunable(m, 6408 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6409 } 6410 6411 static ssize_t memory_min_write(struct kernfs_open_file *of, 6412 char *buf, size_t nbytes, loff_t off) 6413 { 6414 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6415 unsigned long min; 6416 int err; 6417 6418 buf = strstrip(buf); 6419 err = page_counter_memparse(buf, "max", &min); 6420 if (err) 6421 return err; 6422 6423 page_counter_set_min(&memcg->memory, min); 6424 6425 return nbytes; 6426 } 6427 6428 static int memory_low_show(struct seq_file *m, void *v) 6429 { 6430 return seq_puts_memcg_tunable(m, 6431 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6432 } 6433 6434 static ssize_t memory_low_write(struct kernfs_open_file *of, 6435 char *buf, size_t nbytes, loff_t off) 6436 { 6437 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6438 unsigned long low; 6439 int err; 6440 6441 buf = strstrip(buf); 6442 err = page_counter_memparse(buf, "max", &low); 6443 if (err) 6444 return err; 6445 6446 page_counter_set_low(&memcg->memory, low); 6447 6448 return nbytes; 6449 } 6450 6451 static int memory_high_show(struct seq_file *m, void *v) 6452 { 6453 return seq_puts_memcg_tunable(m, 6454 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6455 } 6456 6457 static ssize_t memory_high_write(struct kernfs_open_file *of, 6458 char *buf, size_t nbytes, loff_t off) 6459 { 6460 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6461 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6462 bool drained = false; 6463 unsigned long high; 6464 int err; 6465 6466 buf = strstrip(buf); 6467 err = page_counter_memparse(buf, "max", &high); 6468 if (err) 6469 return err; 6470 6471 page_counter_set_high(&memcg->memory, high); 6472 6473 for (;;) { 6474 unsigned long nr_pages = page_counter_read(&memcg->memory); 6475 unsigned long reclaimed; 6476 6477 if (nr_pages <= high) 6478 break; 6479 6480 if (signal_pending(current)) 6481 break; 6482 6483 if (!drained) { 6484 drain_all_stock(memcg); 6485 drained = true; 6486 continue; 6487 } 6488 6489 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6490 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP); 6491 6492 if (!reclaimed && !nr_retries--) 6493 break; 6494 } 6495 6496 memcg_wb_domain_size_changed(memcg); 6497 return nbytes; 6498 } 6499 6500 static int memory_max_show(struct seq_file *m, void *v) 6501 { 6502 return seq_puts_memcg_tunable(m, 6503 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6504 } 6505 6506 static ssize_t memory_max_write(struct kernfs_open_file *of, 6507 char *buf, size_t nbytes, loff_t off) 6508 { 6509 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6510 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6511 bool drained = false; 6512 unsigned long max; 6513 int err; 6514 6515 buf = strstrip(buf); 6516 err = page_counter_memparse(buf, "max", &max); 6517 if (err) 6518 return err; 6519 6520 xchg(&memcg->memory.max, max); 6521 6522 for (;;) { 6523 unsigned long nr_pages = page_counter_read(&memcg->memory); 6524 6525 if (nr_pages <= max) 6526 break; 6527 6528 if (signal_pending(current)) 6529 break; 6530 6531 if (!drained) { 6532 drain_all_stock(memcg); 6533 drained = true; 6534 continue; 6535 } 6536 6537 if (nr_reclaims) { 6538 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6539 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP)) 6540 nr_reclaims--; 6541 continue; 6542 } 6543 6544 memcg_memory_event(memcg, MEMCG_OOM); 6545 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6546 break; 6547 } 6548 6549 memcg_wb_domain_size_changed(memcg); 6550 return nbytes; 6551 } 6552 6553 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6554 { 6555 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6556 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6557 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6558 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6559 seq_printf(m, "oom_kill %lu\n", 6560 atomic_long_read(&events[MEMCG_OOM_KILL])); 6561 seq_printf(m, "oom_group_kill %lu\n", 6562 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 6563 } 6564 6565 static int memory_events_show(struct seq_file *m, void *v) 6566 { 6567 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6568 6569 __memory_events_show(m, memcg->memory_events); 6570 return 0; 6571 } 6572 6573 static int memory_events_local_show(struct seq_file *m, void *v) 6574 { 6575 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6576 6577 __memory_events_show(m, memcg->memory_events_local); 6578 return 0; 6579 } 6580 6581 static int memory_stat_show(struct seq_file *m, void *v) 6582 { 6583 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6584 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 6585 struct seq_buf s; 6586 6587 if (!buf) 6588 return -ENOMEM; 6589 seq_buf_init(&s, buf, PAGE_SIZE); 6590 memory_stat_format(memcg, &s); 6591 seq_puts(m, buf); 6592 kfree(buf); 6593 return 0; 6594 } 6595 6596 #ifdef CONFIG_NUMA 6597 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6598 int item) 6599 { 6600 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6601 } 6602 6603 static int memory_numa_stat_show(struct seq_file *m, void *v) 6604 { 6605 int i; 6606 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6607 6608 mem_cgroup_flush_stats(); 6609 6610 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6611 int nid; 6612 6613 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6614 continue; 6615 6616 seq_printf(m, "%s", memory_stats[i].name); 6617 for_each_node_state(nid, N_MEMORY) { 6618 u64 size; 6619 struct lruvec *lruvec; 6620 6621 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6622 size = lruvec_page_state_output(lruvec, 6623 memory_stats[i].idx); 6624 seq_printf(m, " N%d=%llu", nid, size); 6625 } 6626 seq_putc(m, '\n'); 6627 } 6628 6629 return 0; 6630 } 6631 #endif 6632 6633 static int memory_oom_group_show(struct seq_file *m, void *v) 6634 { 6635 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6636 6637 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 6638 6639 return 0; 6640 } 6641 6642 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6643 char *buf, size_t nbytes, loff_t off) 6644 { 6645 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6646 int ret, oom_group; 6647 6648 buf = strstrip(buf); 6649 if (!buf) 6650 return -EINVAL; 6651 6652 ret = kstrtoint(buf, 0, &oom_group); 6653 if (ret) 6654 return ret; 6655 6656 if (oom_group != 0 && oom_group != 1) 6657 return -EINVAL; 6658 6659 WRITE_ONCE(memcg->oom_group, oom_group); 6660 6661 return nbytes; 6662 } 6663 6664 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 6665 size_t nbytes, loff_t off) 6666 { 6667 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6668 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6669 unsigned long nr_to_reclaim, nr_reclaimed = 0; 6670 unsigned int reclaim_options; 6671 int err; 6672 6673 buf = strstrip(buf); 6674 err = page_counter_memparse(buf, "", &nr_to_reclaim); 6675 if (err) 6676 return err; 6677 6678 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; 6679 while (nr_reclaimed < nr_to_reclaim) { 6680 unsigned long reclaimed; 6681 6682 if (signal_pending(current)) 6683 return -EINTR; 6684 6685 /* 6686 * This is the final attempt, drain percpu lru caches in the 6687 * hope of introducing more evictable pages for 6688 * try_to_free_mem_cgroup_pages(). 6689 */ 6690 if (!nr_retries) 6691 lru_add_drain_all(); 6692 6693 reclaimed = try_to_free_mem_cgroup_pages(memcg, 6694 min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX), 6695 GFP_KERNEL, reclaim_options); 6696 6697 if (!reclaimed && !nr_retries--) 6698 return -EAGAIN; 6699 6700 nr_reclaimed += reclaimed; 6701 } 6702 6703 return nbytes; 6704 } 6705 6706 static struct cftype memory_files[] = { 6707 { 6708 .name = "current", 6709 .flags = CFTYPE_NOT_ON_ROOT, 6710 .read_u64 = memory_current_read, 6711 }, 6712 { 6713 .name = "peak", 6714 .flags = CFTYPE_NOT_ON_ROOT, 6715 .read_u64 = memory_peak_read, 6716 }, 6717 { 6718 .name = "min", 6719 .flags = CFTYPE_NOT_ON_ROOT, 6720 .seq_show = memory_min_show, 6721 .write = memory_min_write, 6722 }, 6723 { 6724 .name = "low", 6725 .flags = CFTYPE_NOT_ON_ROOT, 6726 .seq_show = memory_low_show, 6727 .write = memory_low_write, 6728 }, 6729 { 6730 .name = "high", 6731 .flags = CFTYPE_NOT_ON_ROOT, 6732 .seq_show = memory_high_show, 6733 .write = memory_high_write, 6734 }, 6735 { 6736 .name = "max", 6737 .flags = CFTYPE_NOT_ON_ROOT, 6738 .seq_show = memory_max_show, 6739 .write = memory_max_write, 6740 }, 6741 { 6742 .name = "events", 6743 .flags = CFTYPE_NOT_ON_ROOT, 6744 .file_offset = offsetof(struct mem_cgroup, events_file), 6745 .seq_show = memory_events_show, 6746 }, 6747 { 6748 .name = "events.local", 6749 .flags = CFTYPE_NOT_ON_ROOT, 6750 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6751 .seq_show = memory_events_local_show, 6752 }, 6753 { 6754 .name = "stat", 6755 .seq_show = memory_stat_show, 6756 }, 6757 #ifdef CONFIG_NUMA 6758 { 6759 .name = "numa_stat", 6760 .seq_show = memory_numa_stat_show, 6761 }, 6762 #endif 6763 { 6764 .name = "oom.group", 6765 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6766 .seq_show = memory_oom_group_show, 6767 .write = memory_oom_group_write, 6768 }, 6769 { 6770 .name = "reclaim", 6771 .flags = CFTYPE_NS_DELEGATABLE, 6772 .write = memory_reclaim, 6773 }, 6774 { } /* terminate */ 6775 }; 6776 6777 struct cgroup_subsys memory_cgrp_subsys = { 6778 .css_alloc = mem_cgroup_css_alloc, 6779 .css_online = mem_cgroup_css_online, 6780 .css_offline = mem_cgroup_css_offline, 6781 .css_released = mem_cgroup_css_released, 6782 .css_free = mem_cgroup_css_free, 6783 .css_reset = mem_cgroup_css_reset, 6784 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6785 .can_attach = mem_cgroup_can_attach, 6786 .attach = mem_cgroup_attach, 6787 .cancel_attach = mem_cgroup_cancel_attach, 6788 .post_attach = mem_cgroup_move_task, 6789 .dfl_cftypes = memory_files, 6790 .legacy_cftypes = mem_cgroup_legacy_files, 6791 .early_init = 0, 6792 }; 6793 6794 /* 6795 * This function calculates an individual cgroup's effective 6796 * protection which is derived from its own memory.min/low, its 6797 * parent's and siblings' settings, as well as the actual memory 6798 * distribution in the tree. 6799 * 6800 * The following rules apply to the effective protection values: 6801 * 6802 * 1. At the first level of reclaim, effective protection is equal to 6803 * the declared protection in memory.min and memory.low. 6804 * 6805 * 2. To enable safe delegation of the protection configuration, at 6806 * subsequent levels the effective protection is capped to the 6807 * parent's effective protection. 6808 * 6809 * 3. To make complex and dynamic subtrees easier to configure, the 6810 * user is allowed to overcommit the declared protection at a given 6811 * level. If that is the case, the parent's effective protection is 6812 * distributed to the children in proportion to how much protection 6813 * they have declared and how much of it they are utilizing. 6814 * 6815 * This makes distribution proportional, but also work-conserving: 6816 * if one cgroup claims much more protection than it uses memory, 6817 * the unused remainder is available to its siblings. 6818 * 6819 * 4. Conversely, when the declared protection is undercommitted at a 6820 * given level, the distribution of the larger parental protection 6821 * budget is NOT proportional. A cgroup's protection from a sibling 6822 * is capped to its own memory.min/low setting. 6823 * 6824 * 5. However, to allow protecting recursive subtrees from each other 6825 * without having to declare each individual cgroup's fixed share 6826 * of the ancestor's claim to protection, any unutilized - 6827 * "floating" - protection from up the tree is distributed in 6828 * proportion to each cgroup's *usage*. This makes the protection 6829 * neutral wrt sibling cgroups and lets them compete freely over 6830 * the shared parental protection budget, but it protects the 6831 * subtree as a whole from neighboring subtrees. 6832 * 6833 * Note that 4. and 5. are not in conflict: 4. is about protecting 6834 * against immediate siblings whereas 5. is about protecting against 6835 * neighboring subtrees. 6836 */ 6837 static unsigned long effective_protection(unsigned long usage, 6838 unsigned long parent_usage, 6839 unsigned long setting, 6840 unsigned long parent_effective, 6841 unsigned long siblings_protected) 6842 { 6843 unsigned long protected; 6844 unsigned long ep; 6845 6846 protected = min(usage, setting); 6847 /* 6848 * If all cgroups at this level combined claim and use more 6849 * protection than what the parent affords them, distribute 6850 * shares in proportion to utilization. 6851 * 6852 * We are using actual utilization rather than the statically 6853 * claimed protection in order to be work-conserving: claimed 6854 * but unused protection is available to siblings that would 6855 * otherwise get a smaller chunk than what they claimed. 6856 */ 6857 if (siblings_protected > parent_effective) 6858 return protected * parent_effective / siblings_protected; 6859 6860 /* 6861 * Ok, utilized protection of all children is within what the 6862 * parent affords them, so we know whatever this child claims 6863 * and utilizes is effectively protected. 6864 * 6865 * If there is unprotected usage beyond this value, reclaim 6866 * will apply pressure in proportion to that amount. 6867 * 6868 * If there is unutilized protection, the cgroup will be fully 6869 * shielded from reclaim, but we do return a smaller value for 6870 * protection than what the group could enjoy in theory. This 6871 * is okay. With the overcommit distribution above, effective 6872 * protection is always dependent on how memory is actually 6873 * consumed among the siblings anyway. 6874 */ 6875 ep = protected; 6876 6877 /* 6878 * If the children aren't claiming (all of) the protection 6879 * afforded to them by the parent, distribute the remainder in 6880 * proportion to the (unprotected) memory of each cgroup. That 6881 * way, cgroups that aren't explicitly prioritized wrt each 6882 * other compete freely over the allowance, but they are 6883 * collectively protected from neighboring trees. 6884 * 6885 * We're using unprotected memory for the weight so that if 6886 * some cgroups DO claim explicit protection, we don't protect 6887 * the same bytes twice. 6888 * 6889 * Check both usage and parent_usage against the respective 6890 * protected values. One should imply the other, but they 6891 * aren't read atomically - make sure the division is sane. 6892 */ 6893 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6894 return ep; 6895 if (parent_effective > siblings_protected && 6896 parent_usage > siblings_protected && 6897 usage > protected) { 6898 unsigned long unclaimed; 6899 6900 unclaimed = parent_effective - siblings_protected; 6901 unclaimed *= usage - protected; 6902 unclaimed /= parent_usage - siblings_protected; 6903 6904 ep += unclaimed; 6905 } 6906 6907 return ep; 6908 } 6909 6910 /** 6911 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 6912 * @root: the top ancestor of the sub-tree being checked 6913 * @memcg: the memory cgroup to check 6914 * 6915 * WARNING: This function is not stateless! It can only be used as part 6916 * of a top-down tree iteration, not for isolated queries. 6917 */ 6918 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6919 struct mem_cgroup *memcg) 6920 { 6921 unsigned long usage, parent_usage; 6922 struct mem_cgroup *parent; 6923 6924 if (mem_cgroup_disabled()) 6925 return; 6926 6927 if (!root) 6928 root = root_mem_cgroup; 6929 6930 /* 6931 * Effective values of the reclaim targets are ignored so they 6932 * can be stale. Have a look at mem_cgroup_protection for more 6933 * details. 6934 * TODO: calculation should be more robust so that we do not need 6935 * that special casing. 6936 */ 6937 if (memcg == root) 6938 return; 6939 6940 usage = page_counter_read(&memcg->memory); 6941 if (!usage) 6942 return; 6943 6944 parent = parent_mem_cgroup(memcg); 6945 6946 if (parent == root) { 6947 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6948 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6949 return; 6950 } 6951 6952 parent_usage = page_counter_read(&parent->memory); 6953 6954 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6955 READ_ONCE(memcg->memory.min), 6956 READ_ONCE(parent->memory.emin), 6957 atomic_long_read(&parent->memory.children_min_usage))); 6958 6959 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6960 READ_ONCE(memcg->memory.low), 6961 READ_ONCE(parent->memory.elow), 6962 atomic_long_read(&parent->memory.children_low_usage))); 6963 } 6964 6965 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 6966 gfp_t gfp) 6967 { 6968 long nr_pages = folio_nr_pages(folio); 6969 int ret; 6970 6971 ret = try_charge(memcg, gfp, nr_pages); 6972 if (ret) 6973 goto out; 6974 6975 css_get(&memcg->css); 6976 commit_charge(folio, memcg); 6977 6978 local_irq_disable(); 6979 mem_cgroup_charge_statistics(memcg, nr_pages); 6980 memcg_check_events(memcg, folio_nid(folio)); 6981 local_irq_enable(); 6982 out: 6983 return ret; 6984 } 6985 6986 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 6987 { 6988 struct mem_cgroup *memcg; 6989 int ret; 6990 6991 memcg = get_mem_cgroup_from_mm(mm); 6992 ret = charge_memcg(folio, memcg, gfp); 6993 css_put(&memcg->css); 6994 6995 return ret; 6996 } 6997 6998 /** 6999 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 7000 * @folio: folio to charge. 7001 * @mm: mm context of the victim 7002 * @gfp: reclaim mode 7003 * @entry: swap entry for which the folio is allocated 7004 * 7005 * This function charges a folio allocated for swapin. Please call this before 7006 * adding the folio to the swapcache. 7007 * 7008 * Returns 0 on success. Otherwise, an error code is returned. 7009 */ 7010 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 7011 gfp_t gfp, swp_entry_t entry) 7012 { 7013 struct mem_cgroup *memcg; 7014 unsigned short id; 7015 int ret; 7016 7017 if (mem_cgroup_disabled()) 7018 return 0; 7019 7020 id = lookup_swap_cgroup_id(entry); 7021 rcu_read_lock(); 7022 memcg = mem_cgroup_from_id(id); 7023 if (!memcg || !css_tryget_online(&memcg->css)) 7024 memcg = get_mem_cgroup_from_mm(mm); 7025 rcu_read_unlock(); 7026 7027 ret = charge_memcg(folio, memcg, gfp); 7028 7029 css_put(&memcg->css); 7030 return ret; 7031 } 7032 7033 /* 7034 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 7035 * @entry: swap entry for which the page is charged 7036 * 7037 * Call this function after successfully adding the charged page to swapcache. 7038 * 7039 * Note: This function assumes the page for which swap slot is being uncharged 7040 * is order 0 page. 7041 */ 7042 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 7043 { 7044 /* 7045 * Cgroup1's unified memory+swap counter has been charged with the 7046 * new swapcache page, finish the transfer by uncharging the swap 7047 * slot. The swap slot would also get uncharged when it dies, but 7048 * it can stick around indefinitely and we'd count the page twice 7049 * the entire time. 7050 * 7051 * Cgroup2 has separate resource counters for memory and swap, 7052 * so this is a non-issue here. Memory and swap charge lifetimes 7053 * correspond 1:1 to page and swap slot lifetimes: we charge the 7054 * page to memory here, and uncharge swap when the slot is freed. 7055 */ 7056 if (!mem_cgroup_disabled() && do_memsw_account()) { 7057 /* 7058 * The swap entry might not get freed for a long time, 7059 * let's not wait for it. The page already received a 7060 * memory+swap charge, drop the swap entry duplicate. 7061 */ 7062 mem_cgroup_uncharge_swap(entry, 1); 7063 } 7064 } 7065 7066 struct uncharge_gather { 7067 struct mem_cgroup *memcg; 7068 unsigned long nr_memory; 7069 unsigned long pgpgout; 7070 unsigned long nr_kmem; 7071 int nid; 7072 }; 7073 7074 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 7075 { 7076 memset(ug, 0, sizeof(*ug)); 7077 } 7078 7079 static void uncharge_batch(const struct uncharge_gather *ug) 7080 { 7081 unsigned long flags; 7082 7083 if (ug->nr_memory) { 7084 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 7085 if (do_memsw_account()) 7086 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 7087 if (ug->nr_kmem) 7088 memcg_account_kmem(ug->memcg, -ug->nr_kmem); 7089 memcg_oom_recover(ug->memcg); 7090 } 7091 7092 local_irq_save(flags); 7093 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 7094 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 7095 memcg_check_events(ug->memcg, ug->nid); 7096 local_irq_restore(flags); 7097 7098 /* drop reference from uncharge_folio */ 7099 css_put(&ug->memcg->css); 7100 } 7101 7102 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 7103 { 7104 long nr_pages; 7105 struct mem_cgroup *memcg; 7106 struct obj_cgroup *objcg; 7107 7108 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 7109 7110 /* 7111 * Nobody should be changing or seriously looking at 7112 * folio memcg or objcg at this point, we have fully 7113 * exclusive access to the folio. 7114 */ 7115 if (folio_memcg_kmem(folio)) { 7116 objcg = __folio_objcg(folio); 7117 /* 7118 * This get matches the put at the end of the function and 7119 * kmem pages do not hold memcg references anymore. 7120 */ 7121 memcg = get_mem_cgroup_from_objcg(objcg); 7122 } else { 7123 memcg = __folio_memcg(folio); 7124 } 7125 7126 if (!memcg) 7127 return; 7128 7129 if (ug->memcg != memcg) { 7130 if (ug->memcg) { 7131 uncharge_batch(ug); 7132 uncharge_gather_clear(ug); 7133 } 7134 ug->memcg = memcg; 7135 ug->nid = folio_nid(folio); 7136 7137 /* pairs with css_put in uncharge_batch */ 7138 css_get(&memcg->css); 7139 } 7140 7141 nr_pages = folio_nr_pages(folio); 7142 7143 if (folio_memcg_kmem(folio)) { 7144 ug->nr_memory += nr_pages; 7145 ug->nr_kmem += nr_pages; 7146 7147 folio->memcg_data = 0; 7148 obj_cgroup_put(objcg); 7149 } else { 7150 /* LRU pages aren't accounted at the root level */ 7151 if (!mem_cgroup_is_root(memcg)) 7152 ug->nr_memory += nr_pages; 7153 ug->pgpgout++; 7154 7155 folio->memcg_data = 0; 7156 } 7157 7158 css_put(&memcg->css); 7159 } 7160 7161 void __mem_cgroup_uncharge(struct folio *folio) 7162 { 7163 struct uncharge_gather ug; 7164 7165 /* Don't touch folio->lru of any random page, pre-check: */ 7166 if (!folio_memcg(folio)) 7167 return; 7168 7169 uncharge_gather_clear(&ug); 7170 uncharge_folio(folio, &ug); 7171 uncharge_batch(&ug); 7172 } 7173 7174 /** 7175 * __mem_cgroup_uncharge_list - uncharge a list of page 7176 * @page_list: list of pages to uncharge 7177 * 7178 * Uncharge a list of pages previously charged with 7179 * __mem_cgroup_charge(). 7180 */ 7181 void __mem_cgroup_uncharge_list(struct list_head *page_list) 7182 { 7183 struct uncharge_gather ug; 7184 struct folio *folio; 7185 7186 uncharge_gather_clear(&ug); 7187 list_for_each_entry(folio, page_list, lru) 7188 uncharge_folio(folio, &ug); 7189 if (ug.memcg) 7190 uncharge_batch(&ug); 7191 } 7192 7193 /** 7194 * mem_cgroup_migrate - Charge a folio's replacement. 7195 * @old: Currently circulating folio. 7196 * @new: Replacement folio. 7197 * 7198 * Charge @new as a replacement folio for @old. @old will 7199 * be uncharged upon free. 7200 * 7201 * Both folios must be locked, @new->mapping must be set up. 7202 */ 7203 void mem_cgroup_migrate(struct folio *old, struct folio *new) 7204 { 7205 struct mem_cgroup *memcg; 7206 long nr_pages = folio_nr_pages(new); 7207 unsigned long flags; 7208 7209 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 7210 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 7211 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 7212 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 7213 7214 if (mem_cgroup_disabled()) 7215 return; 7216 7217 /* Page cache replacement: new folio already charged? */ 7218 if (folio_memcg(new)) 7219 return; 7220 7221 memcg = folio_memcg(old); 7222 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 7223 if (!memcg) 7224 return; 7225 7226 /* Force-charge the new page. The old one will be freed soon */ 7227 if (!mem_cgroup_is_root(memcg)) { 7228 page_counter_charge(&memcg->memory, nr_pages); 7229 if (do_memsw_account()) 7230 page_counter_charge(&memcg->memsw, nr_pages); 7231 } 7232 7233 css_get(&memcg->css); 7234 commit_charge(new, memcg); 7235 7236 local_irq_save(flags); 7237 mem_cgroup_charge_statistics(memcg, nr_pages); 7238 memcg_check_events(memcg, folio_nid(new)); 7239 local_irq_restore(flags); 7240 } 7241 7242 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7243 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7244 7245 void mem_cgroup_sk_alloc(struct sock *sk) 7246 { 7247 struct mem_cgroup *memcg; 7248 7249 if (!mem_cgroup_sockets_enabled) 7250 return; 7251 7252 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7253 if (!in_task()) 7254 return; 7255 7256 rcu_read_lock(); 7257 memcg = mem_cgroup_from_task(current); 7258 if (mem_cgroup_is_root(memcg)) 7259 goto out; 7260 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7261 goto out; 7262 if (css_tryget(&memcg->css)) 7263 sk->sk_memcg = memcg; 7264 out: 7265 rcu_read_unlock(); 7266 } 7267 7268 void mem_cgroup_sk_free(struct sock *sk) 7269 { 7270 if (sk->sk_memcg) 7271 css_put(&sk->sk_memcg->css); 7272 } 7273 7274 /** 7275 * mem_cgroup_charge_skmem - charge socket memory 7276 * @memcg: memcg to charge 7277 * @nr_pages: number of pages to charge 7278 * @gfp_mask: reclaim mode 7279 * 7280 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7281 * @memcg's configured limit, %false if it doesn't. 7282 */ 7283 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 7284 gfp_t gfp_mask) 7285 { 7286 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7287 struct page_counter *fail; 7288 7289 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7290 memcg->tcpmem_pressure = 0; 7291 return true; 7292 } 7293 memcg->tcpmem_pressure = 1; 7294 if (gfp_mask & __GFP_NOFAIL) { 7295 page_counter_charge(&memcg->tcpmem, nr_pages); 7296 return true; 7297 } 7298 return false; 7299 } 7300 7301 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 7302 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7303 return true; 7304 } 7305 7306 return false; 7307 } 7308 7309 /** 7310 * mem_cgroup_uncharge_skmem - uncharge socket memory 7311 * @memcg: memcg to uncharge 7312 * @nr_pages: number of pages to uncharge 7313 */ 7314 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7315 { 7316 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7317 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7318 return; 7319 } 7320 7321 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7322 7323 refill_stock(memcg, nr_pages); 7324 } 7325 7326 static int __init cgroup_memory(char *s) 7327 { 7328 char *token; 7329 7330 while ((token = strsep(&s, ",")) != NULL) { 7331 if (!*token) 7332 continue; 7333 if (!strcmp(token, "nosocket")) 7334 cgroup_memory_nosocket = true; 7335 if (!strcmp(token, "nokmem")) 7336 cgroup_memory_nokmem = true; 7337 if (!strcmp(token, "nobpf")) 7338 cgroup_memory_nobpf = true; 7339 } 7340 return 1; 7341 } 7342 __setup("cgroup.memory=", cgroup_memory); 7343 7344 /* 7345 * subsys_initcall() for memory controller. 7346 * 7347 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7348 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7349 * basically everything that doesn't depend on a specific mem_cgroup structure 7350 * should be initialized from here. 7351 */ 7352 static int __init mem_cgroup_init(void) 7353 { 7354 int cpu, node; 7355 7356 /* 7357 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7358 * used for per-memcg-per-cpu caching of per-node statistics. In order 7359 * to work fine, we should make sure that the overfill threshold can't 7360 * exceed S32_MAX / PAGE_SIZE. 7361 */ 7362 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7363 7364 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7365 memcg_hotplug_cpu_dead); 7366 7367 for_each_possible_cpu(cpu) 7368 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7369 drain_local_stock); 7370 7371 for_each_node(node) { 7372 struct mem_cgroup_tree_per_node *rtpn; 7373 7374 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node); 7375 7376 rtpn->rb_root = RB_ROOT; 7377 rtpn->rb_rightmost = NULL; 7378 spin_lock_init(&rtpn->lock); 7379 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7380 } 7381 7382 return 0; 7383 } 7384 subsys_initcall(mem_cgroup_init); 7385 7386 #ifdef CONFIG_SWAP 7387 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7388 { 7389 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7390 /* 7391 * The root cgroup cannot be destroyed, so it's refcount must 7392 * always be >= 1. 7393 */ 7394 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 7395 VM_BUG_ON(1); 7396 break; 7397 } 7398 memcg = parent_mem_cgroup(memcg); 7399 if (!memcg) 7400 memcg = root_mem_cgroup; 7401 } 7402 return memcg; 7403 } 7404 7405 /** 7406 * mem_cgroup_swapout - transfer a memsw charge to swap 7407 * @folio: folio whose memsw charge to transfer 7408 * @entry: swap entry to move the charge to 7409 * 7410 * Transfer the memsw charge of @folio to @entry. 7411 */ 7412 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) 7413 { 7414 struct mem_cgroup *memcg, *swap_memcg; 7415 unsigned int nr_entries; 7416 unsigned short oldid; 7417 7418 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 7419 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 7420 7421 if (mem_cgroup_disabled()) 7422 return; 7423 7424 if (!do_memsw_account()) 7425 return; 7426 7427 memcg = folio_memcg(folio); 7428 7429 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 7430 if (!memcg) 7431 return; 7432 7433 /* 7434 * In case the memcg owning these pages has been offlined and doesn't 7435 * have an ID allocated to it anymore, charge the closest online 7436 * ancestor for the swap instead and transfer the memory+swap charge. 7437 */ 7438 swap_memcg = mem_cgroup_id_get_online(memcg); 7439 nr_entries = folio_nr_pages(folio); 7440 /* Get references for the tail pages, too */ 7441 if (nr_entries > 1) 7442 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7443 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7444 nr_entries); 7445 VM_BUG_ON_FOLIO(oldid, folio); 7446 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7447 7448 folio->memcg_data = 0; 7449 7450 if (!mem_cgroup_is_root(memcg)) 7451 page_counter_uncharge(&memcg->memory, nr_entries); 7452 7453 if (memcg != swap_memcg) { 7454 if (!mem_cgroup_is_root(swap_memcg)) 7455 page_counter_charge(&swap_memcg->memsw, nr_entries); 7456 page_counter_uncharge(&memcg->memsw, nr_entries); 7457 } 7458 7459 /* 7460 * Interrupts should be disabled here because the caller holds the 7461 * i_pages lock which is taken with interrupts-off. It is 7462 * important here to have the interrupts disabled because it is the 7463 * only synchronisation we have for updating the per-CPU variables. 7464 */ 7465 memcg_stats_lock(); 7466 mem_cgroup_charge_statistics(memcg, -nr_entries); 7467 memcg_stats_unlock(); 7468 memcg_check_events(memcg, folio_nid(folio)); 7469 7470 css_put(&memcg->css); 7471 } 7472 7473 /** 7474 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 7475 * @folio: folio being added to swap 7476 * @entry: swap entry to charge 7477 * 7478 * Try to charge @folio's memcg for the swap space at @entry. 7479 * 7480 * Returns 0 on success, -ENOMEM on failure. 7481 */ 7482 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 7483 { 7484 unsigned int nr_pages = folio_nr_pages(folio); 7485 struct page_counter *counter; 7486 struct mem_cgroup *memcg; 7487 unsigned short oldid; 7488 7489 if (do_memsw_account()) 7490 return 0; 7491 7492 memcg = folio_memcg(folio); 7493 7494 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 7495 if (!memcg) 7496 return 0; 7497 7498 if (!entry.val) { 7499 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7500 return 0; 7501 } 7502 7503 memcg = mem_cgroup_id_get_online(memcg); 7504 7505 if (!mem_cgroup_is_root(memcg) && 7506 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7507 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7508 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7509 mem_cgroup_id_put(memcg); 7510 return -ENOMEM; 7511 } 7512 7513 /* Get references for the tail pages, too */ 7514 if (nr_pages > 1) 7515 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7516 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7517 VM_BUG_ON_FOLIO(oldid, folio); 7518 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7519 7520 return 0; 7521 } 7522 7523 /** 7524 * __mem_cgroup_uncharge_swap - uncharge swap space 7525 * @entry: swap entry to uncharge 7526 * @nr_pages: the amount of swap space to uncharge 7527 */ 7528 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7529 { 7530 struct mem_cgroup *memcg; 7531 unsigned short id; 7532 7533 id = swap_cgroup_record(entry, 0, nr_pages); 7534 rcu_read_lock(); 7535 memcg = mem_cgroup_from_id(id); 7536 if (memcg) { 7537 if (!mem_cgroup_is_root(memcg)) { 7538 if (do_memsw_account()) 7539 page_counter_uncharge(&memcg->memsw, nr_pages); 7540 else 7541 page_counter_uncharge(&memcg->swap, nr_pages); 7542 } 7543 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7544 mem_cgroup_id_put_many(memcg, nr_pages); 7545 } 7546 rcu_read_unlock(); 7547 } 7548 7549 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7550 { 7551 long nr_swap_pages = get_nr_swap_pages(); 7552 7553 if (mem_cgroup_disabled() || do_memsw_account()) 7554 return nr_swap_pages; 7555 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 7556 nr_swap_pages = min_t(long, nr_swap_pages, 7557 READ_ONCE(memcg->swap.max) - 7558 page_counter_read(&memcg->swap)); 7559 return nr_swap_pages; 7560 } 7561 7562 bool mem_cgroup_swap_full(struct folio *folio) 7563 { 7564 struct mem_cgroup *memcg; 7565 7566 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 7567 7568 if (vm_swap_full()) 7569 return true; 7570 if (do_memsw_account()) 7571 return false; 7572 7573 memcg = folio_memcg(folio); 7574 if (!memcg) 7575 return false; 7576 7577 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 7578 unsigned long usage = page_counter_read(&memcg->swap); 7579 7580 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7581 usage * 2 >= READ_ONCE(memcg->swap.max)) 7582 return true; 7583 } 7584 7585 return false; 7586 } 7587 7588 static int __init setup_swap_account(char *s) 7589 { 7590 pr_warn_once("The swapaccount= commandline option is deprecated. " 7591 "Please report your usecase to linux-mm@kvack.org if you " 7592 "depend on this functionality.\n"); 7593 return 1; 7594 } 7595 __setup("swapaccount=", setup_swap_account); 7596 7597 static u64 swap_current_read(struct cgroup_subsys_state *css, 7598 struct cftype *cft) 7599 { 7600 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7601 7602 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7603 } 7604 7605 static u64 swap_peak_read(struct cgroup_subsys_state *css, 7606 struct cftype *cft) 7607 { 7608 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7609 7610 return (u64)memcg->swap.watermark * PAGE_SIZE; 7611 } 7612 7613 static int swap_high_show(struct seq_file *m, void *v) 7614 { 7615 return seq_puts_memcg_tunable(m, 7616 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7617 } 7618 7619 static ssize_t swap_high_write(struct kernfs_open_file *of, 7620 char *buf, size_t nbytes, loff_t off) 7621 { 7622 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7623 unsigned long high; 7624 int err; 7625 7626 buf = strstrip(buf); 7627 err = page_counter_memparse(buf, "max", &high); 7628 if (err) 7629 return err; 7630 7631 page_counter_set_high(&memcg->swap, high); 7632 7633 return nbytes; 7634 } 7635 7636 static int swap_max_show(struct seq_file *m, void *v) 7637 { 7638 return seq_puts_memcg_tunable(m, 7639 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7640 } 7641 7642 static ssize_t swap_max_write(struct kernfs_open_file *of, 7643 char *buf, size_t nbytes, loff_t off) 7644 { 7645 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7646 unsigned long max; 7647 int err; 7648 7649 buf = strstrip(buf); 7650 err = page_counter_memparse(buf, "max", &max); 7651 if (err) 7652 return err; 7653 7654 xchg(&memcg->swap.max, max); 7655 7656 return nbytes; 7657 } 7658 7659 static int swap_events_show(struct seq_file *m, void *v) 7660 { 7661 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7662 7663 seq_printf(m, "high %lu\n", 7664 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7665 seq_printf(m, "max %lu\n", 7666 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7667 seq_printf(m, "fail %lu\n", 7668 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7669 7670 return 0; 7671 } 7672 7673 static struct cftype swap_files[] = { 7674 { 7675 .name = "swap.current", 7676 .flags = CFTYPE_NOT_ON_ROOT, 7677 .read_u64 = swap_current_read, 7678 }, 7679 { 7680 .name = "swap.high", 7681 .flags = CFTYPE_NOT_ON_ROOT, 7682 .seq_show = swap_high_show, 7683 .write = swap_high_write, 7684 }, 7685 { 7686 .name = "swap.max", 7687 .flags = CFTYPE_NOT_ON_ROOT, 7688 .seq_show = swap_max_show, 7689 .write = swap_max_write, 7690 }, 7691 { 7692 .name = "swap.peak", 7693 .flags = CFTYPE_NOT_ON_ROOT, 7694 .read_u64 = swap_peak_read, 7695 }, 7696 { 7697 .name = "swap.events", 7698 .flags = CFTYPE_NOT_ON_ROOT, 7699 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7700 .seq_show = swap_events_show, 7701 }, 7702 { } /* terminate */ 7703 }; 7704 7705 static struct cftype memsw_files[] = { 7706 { 7707 .name = "memsw.usage_in_bytes", 7708 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7709 .read_u64 = mem_cgroup_read_u64, 7710 }, 7711 { 7712 .name = "memsw.max_usage_in_bytes", 7713 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7714 .write = mem_cgroup_reset, 7715 .read_u64 = mem_cgroup_read_u64, 7716 }, 7717 { 7718 .name = "memsw.limit_in_bytes", 7719 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7720 .write = mem_cgroup_write, 7721 .read_u64 = mem_cgroup_read_u64, 7722 }, 7723 { 7724 .name = "memsw.failcnt", 7725 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7726 .write = mem_cgroup_reset, 7727 .read_u64 = mem_cgroup_read_u64, 7728 }, 7729 { }, /* terminate */ 7730 }; 7731 7732 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 7733 /** 7734 * obj_cgroup_may_zswap - check if this cgroup can zswap 7735 * @objcg: the object cgroup 7736 * 7737 * Check if the hierarchical zswap limit has been reached. 7738 * 7739 * This doesn't check for specific headroom, and it is not atomic 7740 * either. But with zswap, the size of the allocation is only known 7741 * once compression has occured, and this optimistic pre-check avoids 7742 * spending cycles on compression when there is already no room left 7743 * or zswap is disabled altogether somewhere in the hierarchy. 7744 */ 7745 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 7746 { 7747 struct mem_cgroup *memcg, *original_memcg; 7748 bool ret = true; 7749 7750 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7751 return true; 7752 7753 original_memcg = get_mem_cgroup_from_objcg(objcg); 7754 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 7755 memcg = parent_mem_cgroup(memcg)) { 7756 unsigned long max = READ_ONCE(memcg->zswap_max); 7757 unsigned long pages; 7758 7759 if (max == PAGE_COUNTER_MAX) 7760 continue; 7761 if (max == 0) { 7762 ret = false; 7763 break; 7764 } 7765 7766 cgroup_rstat_flush(memcg->css.cgroup); 7767 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 7768 if (pages < max) 7769 continue; 7770 ret = false; 7771 break; 7772 } 7773 mem_cgroup_put(original_memcg); 7774 return ret; 7775 } 7776 7777 /** 7778 * obj_cgroup_charge_zswap - charge compression backend memory 7779 * @objcg: the object cgroup 7780 * @size: size of compressed object 7781 * 7782 * This forces the charge after obj_cgroup_may_zswap() allowed 7783 * compression and storage in zwap for this cgroup to go ahead. 7784 */ 7785 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 7786 { 7787 struct mem_cgroup *memcg; 7788 7789 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7790 return; 7791 7792 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 7793 7794 /* PF_MEMALLOC context, charging must succeed */ 7795 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 7796 VM_WARN_ON_ONCE(1); 7797 7798 rcu_read_lock(); 7799 memcg = obj_cgroup_memcg(objcg); 7800 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 7801 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 7802 rcu_read_unlock(); 7803 } 7804 7805 /** 7806 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 7807 * @objcg: the object cgroup 7808 * @size: size of compressed object 7809 * 7810 * Uncharges zswap memory on page in. 7811 */ 7812 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 7813 { 7814 struct mem_cgroup *memcg; 7815 7816 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7817 return; 7818 7819 obj_cgroup_uncharge(objcg, size); 7820 7821 rcu_read_lock(); 7822 memcg = obj_cgroup_memcg(objcg); 7823 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 7824 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 7825 rcu_read_unlock(); 7826 } 7827 7828 static u64 zswap_current_read(struct cgroup_subsys_state *css, 7829 struct cftype *cft) 7830 { 7831 cgroup_rstat_flush(css->cgroup); 7832 return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B); 7833 } 7834 7835 static int zswap_max_show(struct seq_file *m, void *v) 7836 { 7837 return seq_puts_memcg_tunable(m, 7838 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 7839 } 7840 7841 static ssize_t zswap_max_write(struct kernfs_open_file *of, 7842 char *buf, size_t nbytes, loff_t off) 7843 { 7844 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7845 unsigned long max; 7846 int err; 7847 7848 buf = strstrip(buf); 7849 err = page_counter_memparse(buf, "max", &max); 7850 if (err) 7851 return err; 7852 7853 xchg(&memcg->zswap_max, max); 7854 7855 return nbytes; 7856 } 7857 7858 static struct cftype zswap_files[] = { 7859 { 7860 .name = "zswap.current", 7861 .flags = CFTYPE_NOT_ON_ROOT, 7862 .read_u64 = zswap_current_read, 7863 }, 7864 { 7865 .name = "zswap.max", 7866 .flags = CFTYPE_NOT_ON_ROOT, 7867 .seq_show = zswap_max_show, 7868 .write = zswap_max_write, 7869 }, 7870 { } /* terminate */ 7871 }; 7872 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */ 7873 7874 static int __init mem_cgroup_swap_init(void) 7875 { 7876 if (mem_cgroup_disabled()) 7877 return 0; 7878 7879 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7880 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7881 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 7882 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 7883 #endif 7884 return 0; 7885 } 7886 subsys_initcall(mem_cgroup_swap_init); 7887 7888 #endif /* CONFIG_SWAP */ 7889