1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/memremap.h> 57 #include <linux/mm_inline.h> 58 #include <linux/swap_cgroup.h> 59 #include <linux/cpu.h> 60 #include <linux/oom.h> 61 #include <linux/lockdep.h> 62 #include <linux/file.h> 63 #include <linux/resume_user_mode.h> 64 #include <linux/psi.h> 65 #include <linux/seq_buf.h> 66 #include <linux/sched/isolation.h> 67 #include "internal.h" 68 #include <net/sock.h> 69 #include <net/ip.h> 70 #include "slab.h" 71 #include "swap.h" 72 73 #include <linux/uaccess.h> 74 75 #include <trace/events/vmscan.h> 76 77 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 78 EXPORT_SYMBOL(memory_cgrp_subsys); 79 80 struct mem_cgroup *root_mem_cgroup __read_mostly; 81 82 /* Active memory cgroup to use from an interrupt context */ 83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 85 86 /* Socket memory accounting disabled? */ 87 static bool cgroup_memory_nosocket __ro_after_init; 88 89 /* Kernel memory accounting disabled? */ 90 static bool cgroup_memory_nokmem __ro_after_init; 91 92 /* BPF memory accounting disabled? */ 93 static bool cgroup_memory_nobpf __ro_after_init; 94 95 #ifdef CONFIG_CGROUP_WRITEBACK 96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 97 #endif 98 99 /* Whether legacy memory+swap accounting is active */ 100 static bool do_memsw_account(void) 101 { 102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys); 103 } 104 105 #define THRESHOLDS_EVENTS_TARGET 128 106 #define SOFTLIMIT_EVENTS_TARGET 1024 107 108 /* 109 * Cgroups above their limits are maintained in a RB-Tree, independent of 110 * their hierarchy representation 111 */ 112 113 struct mem_cgroup_tree_per_node { 114 struct rb_root rb_root; 115 struct rb_node *rb_rightmost; 116 spinlock_t lock; 117 }; 118 119 struct mem_cgroup_tree { 120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 121 }; 122 123 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 124 125 /* for OOM */ 126 struct mem_cgroup_eventfd_list { 127 struct list_head list; 128 struct eventfd_ctx *eventfd; 129 }; 130 131 /* 132 * cgroup_event represents events which userspace want to receive. 133 */ 134 struct mem_cgroup_event { 135 /* 136 * memcg which the event belongs to. 137 */ 138 struct mem_cgroup *memcg; 139 /* 140 * eventfd to signal userspace about the event. 141 */ 142 struct eventfd_ctx *eventfd; 143 /* 144 * Each of these stored in a list by the cgroup. 145 */ 146 struct list_head list; 147 /* 148 * register_event() callback will be used to add new userspace 149 * waiter for changes related to this event. Use eventfd_signal() 150 * on eventfd to send notification to userspace. 151 */ 152 int (*register_event)(struct mem_cgroup *memcg, 153 struct eventfd_ctx *eventfd, const char *args); 154 /* 155 * unregister_event() callback will be called when userspace closes 156 * the eventfd or on cgroup removing. This callback must be set, 157 * if you want provide notification functionality. 158 */ 159 void (*unregister_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd); 161 /* 162 * All fields below needed to unregister event when 163 * userspace closes eventfd. 164 */ 165 poll_table pt; 166 wait_queue_head_t *wqh; 167 wait_queue_entry_t wait; 168 struct work_struct remove; 169 }; 170 171 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 173 174 /* Stuffs for move charges at task migration. */ 175 /* 176 * Types of charges to be moved. 177 */ 178 #define MOVE_ANON 0x1U 179 #define MOVE_FILE 0x2U 180 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 181 182 /* "mc" and its members are protected by cgroup_mutex */ 183 static struct move_charge_struct { 184 spinlock_t lock; /* for from, to */ 185 struct mm_struct *mm; 186 struct mem_cgroup *from; 187 struct mem_cgroup *to; 188 unsigned long flags; 189 unsigned long precharge; 190 unsigned long moved_charge; 191 unsigned long moved_swap; 192 struct task_struct *moving_task; /* a task moving charges */ 193 wait_queue_head_t waitq; /* a waitq for other context */ 194 } mc = { 195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 197 }; 198 199 /* 200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 201 * limit reclaim to prevent infinite loops, if they ever occur. 202 */ 203 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 204 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 205 206 /* for encoding cft->private value on file */ 207 enum res_type { 208 _MEM, 209 _MEMSWAP, 210 _KMEM, 211 _TCP, 212 }; 213 214 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 215 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 216 #define MEMFILE_ATTR(val) ((val) & 0xffff) 217 218 /* 219 * Iteration constructs for visiting all cgroups (under a tree). If 220 * loops are exited prematurely (break), mem_cgroup_iter_break() must 221 * be used for reference counting. 222 */ 223 #define for_each_mem_cgroup_tree(iter, root) \ 224 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 225 iter != NULL; \ 226 iter = mem_cgroup_iter(root, iter, NULL)) 227 228 #define for_each_mem_cgroup(iter) \ 229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 230 iter != NULL; \ 231 iter = mem_cgroup_iter(NULL, iter, NULL)) 232 233 static inline bool task_is_dying(void) 234 { 235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 236 (current->flags & PF_EXITING); 237 } 238 239 /* Some nice accessors for the vmpressure. */ 240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 241 { 242 if (!memcg) 243 memcg = root_mem_cgroup; 244 return &memcg->vmpressure; 245 } 246 247 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 248 { 249 return container_of(vmpr, struct mem_cgroup, vmpressure); 250 } 251 252 #ifdef CONFIG_MEMCG_KMEM 253 static DEFINE_SPINLOCK(objcg_lock); 254 255 bool mem_cgroup_kmem_disabled(void) 256 { 257 return cgroup_memory_nokmem; 258 } 259 260 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 261 unsigned int nr_pages); 262 263 static void obj_cgroup_release(struct percpu_ref *ref) 264 { 265 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 266 unsigned int nr_bytes; 267 unsigned int nr_pages; 268 unsigned long flags; 269 270 /* 271 * At this point all allocated objects are freed, and 272 * objcg->nr_charged_bytes can't have an arbitrary byte value. 273 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 274 * 275 * The following sequence can lead to it: 276 * 1) CPU0: objcg == stock->cached_objcg 277 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 278 * PAGE_SIZE bytes are charged 279 * 3) CPU1: a process from another memcg is allocating something, 280 * the stock if flushed, 281 * objcg->nr_charged_bytes = PAGE_SIZE - 92 282 * 5) CPU0: we do release this object, 283 * 92 bytes are added to stock->nr_bytes 284 * 6) CPU0: stock is flushed, 285 * 92 bytes are added to objcg->nr_charged_bytes 286 * 287 * In the result, nr_charged_bytes == PAGE_SIZE. 288 * This page will be uncharged in obj_cgroup_release(). 289 */ 290 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 291 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 292 nr_pages = nr_bytes >> PAGE_SHIFT; 293 294 if (nr_pages) 295 obj_cgroup_uncharge_pages(objcg, nr_pages); 296 297 spin_lock_irqsave(&objcg_lock, flags); 298 list_del(&objcg->list); 299 spin_unlock_irqrestore(&objcg_lock, flags); 300 301 percpu_ref_exit(ref); 302 kfree_rcu(objcg, rcu); 303 } 304 305 static struct obj_cgroup *obj_cgroup_alloc(void) 306 { 307 struct obj_cgroup *objcg; 308 int ret; 309 310 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 311 if (!objcg) 312 return NULL; 313 314 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 315 GFP_KERNEL); 316 if (ret) { 317 kfree(objcg); 318 return NULL; 319 } 320 INIT_LIST_HEAD(&objcg->list); 321 return objcg; 322 } 323 324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 325 struct mem_cgroup *parent) 326 { 327 struct obj_cgroup *objcg, *iter; 328 329 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 330 331 spin_lock_irq(&objcg_lock); 332 333 /* 1) Ready to reparent active objcg. */ 334 list_add(&objcg->list, &memcg->objcg_list); 335 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 336 list_for_each_entry(iter, &memcg->objcg_list, list) 337 WRITE_ONCE(iter->memcg, parent); 338 /* 3) Move already reparented objcgs to the parent's list */ 339 list_splice(&memcg->objcg_list, &parent->objcg_list); 340 341 spin_unlock_irq(&objcg_lock); 342 343 percpu_ref_kill(&objcg->refcnt); 344 } 345 346 /* 347 * A lot of the calls to the cache allocation functions are expected to be 348 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 349 * conditional to this static branch, we'll have to allow modules that does 350 * kmem_cache_alloc and the such to see this symbol as well 351 */ 352 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); 353 EXPORT_SYMBOL(memcg_kmem_online_key); 354 355 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); 356 EXPORT_SYMBOL(memcg_bpf_enabled_key); 357 #endif 358 359 /** 360 * mem_cgroup_css_from_folio - css of the memcg associated with a folio 361 * @folio: folio of interest 362 * 363 * If memcg is bound to the default hierarchy, css of the memcg associated 364 * with @folio is returned. The returned css remains associated with @folio 365 * until it is released. 366 * 367 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 368 * is returned. 369 */ 370 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) 371 { 372 struct mem_cgroup *memcg = folio_memcg(folio); 373 374 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 375 memcg = root_mem_cgroup; 376 377 return &memcg->css; 378 } 379 380 /** 381 * page_cgroup_ino - return inode number of the memcg a page is charged to 382 * @page: the page 383 * 384 * Look up the closest online ancestor of the memory cgroup @page is charged to 385 * and return its inode number or 0 if @page is not charged to any cgroup. It 386 * is safe to call this function without holding a reference to @page. 387 * 388 * Note, this function is inherently racy, because there is nothing to prevent 389 * the cgroup inode from getting torn down and potentially reallocated a moment 390 * after page_cgroup_ino() returns, so it only should be used by callers that 391 * do not care (such as procfs interfaces). 392 */ 393 ino_t page_cgroup_ino(struct page *page) 394 { 395 struct mem_cgroup *memcg; 396 unsigned long ino = 0; 397 398 rcu_read_lock(); 399 /* page_folio() is racy here, but the entire function is racy anyway */ 400 memcg = folio_memcg_check(page_folio(page)); 401 402 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 403 memcg = parent_mem_cgroup(memcg); 404 if (memcg) 405 ino = cgroup_ino(memcg->css.cgroup); 406 rcu_read_unlock(); 407 return ino; 408 } 409 410 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 411 struct mem_cgroup_tree_per_node *mctz, 412 unsigned long new_usage_in_excess) 413 { 414 struct rb_node **p = &mctz->rb_root.rb_node; 415 struct rb_node *parent = NULL; 416 struct mem_cgroup_per_node *mz_node; 417 bool rightmost = true; 418 419 if (mz->on_tree) 420 return; 421 422 mz->usage_in_excess = new_usage_in_excess; 423 if (!mz->usage_in_excess) 424 return; 425 while (*p) { 426 parent = *p; 427 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 428 tree_node); 429 if (mz->usage_in_excess < mz_node->usage_in_excess) { 430 p = &(*p)->rb_left; 431 rightmost = false; 432 } else { 433 p = &(*p)->rb_right; 434 } 435 } 436 437 if (rightmost) 438 mctz->rb_rightmost = &mz->tree_node; 439 440 rb_link_node(&mz->tree_node, parent, p); 441 rb_insert_color(&mz->tree_node, &mctz->rb_root); 442 mz->on_tree = true; 443 } 444 445 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 446 struct mem_cgroup_tree_per_node *mctz) 447 { 448 if (!mz->on_tree) 449 return; 450 451 if (&mz->tree_node == mctz->rb_rightmost) 452 mctz->rb_rightmost = rb_prev(&mz->tree_node); 453 454 rb_erase(&mz->tree_node, &mctz->rb_root); 455 mz->on_tree = false; 456 } 457 458 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 459 struct mem_cgroup_tree_per_node *mctz) 460 { 461 unsigned long flags; 462 463 spin_lock_irqsave(&mctz->lock, flags); 464 __mem_cgroup_remove_exceeded(mz, mctz); 465 spin_unlock_irqrestore(&mctz->lock, flags); 466 } 467 468 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 469 { 470 unsigned long nr_pages = page_counter_read(&memcg->memory); 471 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 472 unsigned long excess = 0; 473 474 if (nr_pages > soft_limit) 475 excess = nr_pages - soft_limit; 476 477 return excess; 478 } 479 480 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) 481 { 482 unsigned long excess; 483 struct mem_cgroup_per_node *mz; 484 struct mem_cgroup_tree_per_node *mctz; 485 486 if (lru_gen_enabled()) { 487 if (soft_limit_excess(memcg)) 488 lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec); 489 return; 490 } 491 492 mctz = soft_limit_tree.rb_tree_per_node[nid]; 493 if (!mctz) 494 return; 495 /* 496 * Necessary to update all ancestors when hierarchy is used. 497 * because their event counter is not touched. 498 */ 499 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 500 mz = memcg->nodeinfo[nid]; 501 excess = soft_limit_excess(memcg); 502 /* 503 * We have to update the tree if mz is on RB-tree or 504 * mem is over its softlimit. 505 */ 506 if (excess || mz->on_tree) { 507 unsigned long flags; 508 509 spin_lock_irqsave(&mctz->lock, flags); 510 /* if on-tree, remove it */ 511 if (mz->on_tree) 512 __mem_cgroup_remove_exceeded(mz, mctz); 513 /* 514 * Insert again. mz->usage_in_excess will be updated. 515 * If excess is 0, no tree ops. 516 */ 517 __mem_cgroup_insert_exceeded(mz, mctz, excess); 518 spin_unlock_irqrestore(&mctz->lock, flags); 519 } 520 } 521 } 522 523 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 524 { 525 struct mem_cgroup_tree_per_node *mctz; 526 struct mem_cgroup_per_node *mz; 527 int nid; 528 529 for_each_node(nid) { 530 mz = memcg->nodeinfo[nid]; 531 mctz = soft_limit_tree.rb_tree_per_node[nid]; 532 if (mctz) 533 mem_cgroup_remove_exceeded(mz, mctz); 534 } 535 } 536 537 static struct mem_cgroup_per_node * 538 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 539 { 540 struct mem_cgroup_per_node *mz; 541 542 retry: 543 mz = NULL; 544 if (!mctz->rb_rightmost) 545 goto done; /* Nothing to reclaim from */ 546 547 mz = rb_entry(mctz->rb_rightmost, 548 struct mem_cgroup_per_node, tree_node); 549 /* 550 * Remove the node now but someone else can add it back, 551 * we will to add it back at the end of reclaim to its correct 552 * position in the tree. 553 */ 554 __mem_cgroup_remove_exceeded(mz, mctz); 555 if (!soft_limit_excess(mz->memcg) || 556 !css_tryget(&mz->memcg->css)) 557 goto retry; 558 done: 559 return mz; 560 } 561 562 static struct mem_cgroup_per_node * 563 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 564 { 565 struct mem_cgroup_per_node *mz; 566 567 spin_lock_irq(&mctz->lock); 568 mz = __mem_cgroup_largest_soft_limit_node(mctz); 569 spin_unlock_irq(&mctz->lock); 570 return mz; 571 } 572 573 /* 574 * memcg and lruvec stats flushing 575 * 576 * Many codepaths leading to stats update or read are performance sensitive and 577 * adding stats flushing in such codepaths is not desirable. So, to optimize the 578 * flushing the kernel does: 579 * 580 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 581 * rstat update tree grow unbounded. 582 * 583 * 2) Flush the stats synchronously on reader side only when there are more than 584 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 585 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 586 * only for 2 seconds due to (1). 587 */ 588 static void flush_memcg_stats_dwork(struct work_struct *w); 589 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 590 static DEFINE_PER_CPU(unsigned int, stats_updates); 591 static atomic_t stats_flush_ongoing = ATOMIC_INIT(0); 592 static atomic_t stats_flush_threshold = ATOMIC_INIT(0); 593 static u64 flush_next_time; 594 595 #define FLUSH_TIME (2UL*HZ) 596 597 /* 598 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can 599 * not rely on this as part of an acquired spinlock_t lock. These functions are 600 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion 601 * is sufficient. 602 */ 603 static void memcg_stats_lock(void) 604 { 605 preempt_disable_nested(); 606 VM_WARN_ON_IRQS_ENABLED(); 607 } 608 609 static void __memcg_stats_lock(void) 610 { 611 preempt_disable_nested(); 612 } 613 614 static void memcg_stats_unlock(void) 615 { 616 preempt_enable_nested(); 617 } 618 619 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) 620 { 621 unsigned int x; 622 623 if (!val) 624 return; 625 626 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 627 628 x = __this_cpu_add_return(stats_updates, abs(val)); 629 if (x > MEMCG_CHARGE_BATCH) { 630 /* 631 * If stats_flush_threshold exceeds the threshold 632 * (>num_online_cpus()), cgroup stats update will be triggered 633 * in __mem_cgroup_flush_stats(). Increasing this var further 634 * is redundant and simply adds overhead in atomic update. 635 */ 636 if (atomic_read(&stats_flush_threshold) <= num_online_cpus()) 637 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold); 638 __this_cpu_write(stats_updates, 0); 639 } 640 } 641 642 static void do_flush_stats(bool atomic) 643 { 644 /* 645 * We always flush the entire tree, so concurrent flushers can just 646 * skip. This avoids a thundering herd problem on the rstat global lock 647 * from memcg flushers (e.g. reclaim, refault, etc). 648 */ 649 if (atomic_read(&stats_flush_ongoing) || 650 atomic_xchg(&stats_flush_ongoing, 1)) 651 return; 652 653 WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME); 654 655 if (atomic) 656 cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup); 657 else 658 cgroup_rstat_flush(root_mem_cgroup->css.cgroup); 659 660 atomic_set(&stats_flush_threshold, 0); 661 atomic_set(&stats_flush_ongoing, 0); 662 } 663 664 static bool should_flush_stats(void) 665 { 666 return atomic_read(&stats_flush_threshold) > num_online_cpus(); 667 } 668 669 void mem_cgroup_flush_stats(void) 670 { 671 if (should_flush_stats()) 672 do_flush_stats(false); 673 } 674 675 void mem_cgroup_flush_stats_atomic(void) 676 { 677 if (should_flush_stats()) 678 do_flush_stats(true); 679 } 680 681 void mem_cgroup_flush_stats_ratelimited(void) 682 { 683 if (time_after64(jiffies_64, READ_ONCE(flush_next_time))) 684 mem_cgroup_flush_stats(); 685 } 686 687 static void flush_memcg_stats_dwork(struct work_struct *w) 688 { 689 /* 690 * Always flush here so that flushing in latency-sensitive paths is 691 * as cheap as possible. 692 */ 693 do_flush_stats(false); 694 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); 695 } 696 697 /* Subset of vm_event_item to report for memcg event stats */ 698 static const unsigned int memcg_vm_event_stat[] = { 699 PGPGIN, 700 PGPGOUT, 701 PGSCAN_KSWAPD, 702 PGSCAN_DIRECT, 703 PGSCAN_KHUGEPAGED, 704 PGSTEAL_KSWAPD, 705 PGSTEAL_DIRECT, 706 PGSTEAL_KHUGEPAGED, 707 PGFAULT, 708 PGMAJFAULT, 709 PGREFILL, 710 PGACTIVATE, 711 PGDEACTIVATE, 712 PGLAZYFREE, 713 PGLAZYFREED, 714 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 715 ZSWPIN, 716 ZSWPOUT, 717 #endif 718 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 719 THP_FAULT_ALLOC, 720 THP_COLLAPSE_ALLOC, 721 #endif 722 }; 723 724 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) 725 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; 726 727 static void init_memcg_events(void) 728 { 729 int i; 730 731 for (i = 0; i < NR_MEMCG_EVENTS; ++i) 732 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; 733 } 734 735 static inline int memcg_events_index(enum vm_event_item idx) 736 { 737 return mem_cgroup_events_index[idx] - 1; 738 } 739 740 struct memcg_vmstats_percpu { 741 /* Local (CPU and cgroup) page state & events */ 742 long state[MEMCG_NR_STAT]; 743 unsigned long events[NR_MEMCG_EVENTS]; 744 745 /* Delta calculation for lockless upward propagation */ 746 long state_prev[MEMCG_NR_STAT]; 747 unsigned long events_prev[NR_MEMCG_EVENTS]; 748 749 /* Cgroup1: threshold notifications & softlimit tree updates */ 750 unsigned long nr_page_events; 751 unsigned long targets[MEM_CGROUP_NTARGETS]; 752 }; 753 754 struct memcg_vmstats { 755 /* Aggregated (CPU and subtree) page state & events */ 756 long state[MEMCG_NR_STAT]; 757 unsigned long events[NR_MEMCG_EVENTS]; 758 759 /* Pending child counts during tree propagation */ 760 long state_pending[MEMCG_NR_STAT]; 761 unsigned long events_pending[NR_MEMCG_EVENTS]; 762 }; 763 764 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 765 { 766 long x = READ_ONCE(memcg->vmstats->state[idx]); 767 #ifdef CONFIG_SMP 768 if (x < 0) 769 x = 0; 770 #endif 771 return x; 772 } 773 774 /** 775 * __mod_memcg_state - update cgroup memory statistics 776 * @memcg: the memory cgroup 777 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 778 * @val: delta to add to the counter, can be negative 779 */ 780 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 781 { 782 if (mem_cgroup_disabled()) 783 return; 784 785 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 786 memcg_rstat_updated(memcg, val); 787 } 788 789 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 790 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 791 { 792 long x = 0; 793 int cpu; 794 795 for_each_possible_cpu(cpu) 796 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 797 #ifdef CONFIG_SMP 798 if (x < 0) 799 x = 0; 800 #endif 801 return x; 802 } 803 804 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 805 int val) 806 { 807 struct mem_cgroup_per_node *pn; 808 struct mem_cgroup *memcg; 809 810 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 811 memcg = pn->memcg; 812 813 /* 814 * The caller from rmap relay on disabled preemption becase they never 815 * update their counter from in-interrupt context. For these two 816 * counters we check that the update is never performed from an 817 * interrupt context while other caller need to have disabled interrupt. 818 */ 819 __memcg_stats_lock(); 820 if (IS_ENABLED(CONFIG_DEBUG_VM)) { 821 switch (idx) { 822 case NR_ANON_MAPPED: 823 case NR_FILE_MAPPED: 824 case NR_ANON_THPS: 825 case NR_SHMEM_PMDMAPPED: 826 case NR_FILE_PMDMAPPED: 827 WARN_ON_ONCE(!in_task()); 828 break; 829 default: 830 VM_WARN_ON_IRQS_ENABLED(); 831 } 832 } 833 834 /* Update memcg */ 835 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 836 837 /* Update lruvec */ 838 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); 839 840 memcg_rstat_updated(memcg, val); 841 memcg_stats_unlock(); 842 } 843 844 /** 845 * __mod_lruvec_state - update lruvec memory statistics 846 * @lruvec: the lruvec 847 * @idx: the stat item 848 * @val: delta to add to the counter, can be negative 849 * 850 * The lruvec is the intersection of the NUMA node and a cgroup. This 851 * function updates the all three counters that are affected by a 852 * change of state at this level: per-node, per-cgroup, per-lruvec. 853 */ 854 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 855 int val) 856 { 857 /* Update node */ 858 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 859 860 /* Update memcg and lruvec */ 861 if (!mem_cgroup_disabled()) 862 __mod_memcg_lruvec_state(lruvec, idx, val); 863 } 864 865 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 866 int val) 867 { 868 struct page *head = compound_head(page); /* rmap on tail pages */ 869 struct mem_cgroup *memcg; 870 pg_data_t *pgdat = page_pgdat(page); 871 struct lruvec *lruvec; 872 873 rcu_read_lock(); 874 memcg = page_memcg(head); 875 /* Untracked pages have no memcg, no lruvec. Update only the node */ 876 if (!memcg) { 877 rcu_read_unlock(); 878 __mod_node_page_state(pgdat, idx, val); 879 return; 880 } 881 882 lruvec = mem_cgroup_lruvec(memcg, pgdat); 883 __mod_lruvec_state(lruvec, idx, val); 884 rcu_read_unlock(); 885 } 886 EXPORT_SYMBOL(__mod_lruvec_page_state); 887 888 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 889 { 890 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 891 struct mem_cgroup *memcg; 892 struct lruvec *lruvec; 893 894 rcu_read_lock(); 895 memcg = mem_cgroup_from_slab_obj(p); 896 897 /* 898 * Untracked pages have no memcg, no lruvec. Update only the 899 * node. If we reparent the slab objects to the root memcg, 900 * when we free the slab object, we need to update the per-memcg 901 * vmstats to keep it correct for the root memcg. 902 */ 903 if (!memcg) { 904 __mod_node_page_state(pgdat, idx, val); 905 } else { 906 lruvec = mem_cgroup_lruvec(memcg, pgdat); 907 __mod_lruvec_state(lruvec, idx, val); 908 } 909 rcu_read_unlock(); 910 } 911 912 /** 913 * __count_memcg_events - account VM events in a cgroup 914 * @memcg: the memory cgroup 915 * @idx: the event item 916 * @count: the number of events that occurred 917 */ 918 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 919 unsigned long count) 920 { 921 int index = memcg_events_index(idx); 922 923 if (mem_cgroup_disabled() || index < 0) 924 return; 925 926 memcg_stats_lock(); 927 __this_cpu_add(memcg->vmstats_percpu->events[index], count); 928 memcg_rstat_updated(memcg, count); 929 memcg_stats_unlock(); 930 } 931 932 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 933 { 934 int index = memcg_events_index(event); 935 936 if (index < 0) 937 return 0; 938 return READ_ONCE(memcg->vmstats->events[index]); 939 } 940 941 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 942 { 943 long x = 0; 944 int cpu; 945 int index = memcg_events_index(event); 946 947 if (index < 0) 948 return 0; 949 950 for_each_possible_cpu(cpu) 951 x += per_cpu(memcg->vmstats_percpu->events[index], cpu); 952 return x; 953 } 954 955 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 956 int nr_pages) 957 { 958 /* pagein of a big page is an event. So, ignore page size */ 959 if (nr_pages > 0) 960 __count_memcg_events(memcg, PGPGIN, 1); 961 else { 962 __count_memcg_events(memcg, PGPGOUT, 1); 963 nr_pages = -nr_pages; /* for event */ 964 } 965 966 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 967 } 968 969 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 970 enum mem_cgroup_events_target target) 971 { 972 unsigned long val, next; 973 974 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 975 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 976 /* from time_after() in jiffies.h */ 977 if ((long)(next - val) < 0) { 978 switch (target) { 979 case MEM_CGROUP_TARGET_THRESH: 980 next = val + THRESHOLDS_EVENTS_TARGET; 981 break; 982 case MEM_CGROUP_TARGET_SOFTLIMIT: 983 next = val + SOFTLIMIT_EVENTS_TARGET; 984 break; 985 default: 986 break; 987 } 988 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 989 return true; 990 } 991 return false; 992 } 993 994 /* 995 * Check events in order. 996 * 997 */ 998 static void memcg_check_events(struct mem_cgroup *memcg, int nid) 999 { 1000 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1001 return; 1002 1003 /* threshold event is triggered in finer grain than soft limit */ 1004 if (unlikely(mem_cgroup_event_ratelimit(memcg, 1005 MEM_CGROUP_TARGET_THRESH))) { 1006 bool do_softlimit; 1007 1008 do_softlimit = mem_cgroup_event_ratelimit(memcg, 1009 MEM_CGROUP_TARGET_SOFTLIMIT); 1010 mem_cgroup_threshold(memcg); 1011 if (unlikely(do_softlimit)) 1012 mem_cgroup_update_tree(memcg, nid); 1013 } 1014 } 1015 1016 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 1017 { 1018 /* 1019 * mm_update_next_owner() may clear mm->owner to NULL 1020 * if it races with swapoff, page migration, etc. 1021 * So this can be called with p == NULL. 1022 */ 1023 if (unlikely(!p)) 1024 return NULL; 1025 1026 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1027 } 1028 EXPORT_SYMBOL(mem_cgroup_from_task); 1029 1030 static __always_inline struct mem_cgroup *active_memcg(void) 1031 { 1032 if (!in_task()) 1033 return this_cpu_read(int_active_memcg); 1034 else 1035 return current->active_memcg; 1036 } 1037 1038 /** 1039 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1040 * @mm: mm from which memcg should be extracted. It can be NULL. 1041 * 1042 * Obtain a reference on mm->memcg and returns it if successful. If mm 1043 * is NULL, then the memcg is chosen as follows: 1044 * 1) The active memcg, if set. 1045 * 2) current->mm->memcg, if available 1046 * 3) root memcg 1047 * If mem_cgroup is disabled, NULL is returned. 1048 */ 1049 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1050 { 1051 struct mem_cgroup *memcg; 1052 1053 if (mem_cgroup_disabled()) 1054 return NULL; 1055 1056 /* 1057 * Page cache insertions can happen without an 1058 * actual mm context, e.g. during disk probing 1059 * on boot, loopback IO, acct() writes etc. 1060 * 1061 * No need to css_get on root memcg as the reference 1062 * counting is disabled on the root level in the 1063 * cgroup core. See CSS_NO_REF. 1064 */ 1065 if (unlikely(!mm)) { 1066 memcg = active_memcg(); 1067 if (unlikely(memcg)) { 1068 /* remote memcg must hold a ref */ 1069 css_get(&memcg->css); 1070 return memcg; 1071 } 1072 mm = current->mm; 1073 if (unlikely(!mm)) 1074 return root_mem_cgroup; 1075 } 1076 1077 rcu_read_lock(); 1078 do { 1079 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1080 if (unlikely(!memcg)) 1081 memcg = root_mem_cgroup; 1082 } while (!css_tryget(&memcg->css)); 1083 rcu_read_unlock(); 1084 return memcg; 1085 } 1086 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1087 1088 static __always_inline bool memcg_kmem_bypass(void) 1089 { 1090 /* Allow remote memcg charging from any context. */ 1091 if (unlikely(active_memcg())) 1092 return false; 1093 1094 /* Memcg to charge can't be determined. */ 1095 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) 1096 return true; 1097 1098 return false; 1099 } 1100 1101 /** 1102 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1103 * @root: hierarchy root 1104 * @prev: previously returned memcg, NULL on first invocation 1105 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1106 * 1107 * Returns references to children of the hierarchy below @root, or 1108 * @root itself, or %NULL after a full round-trip. 1109 * 1110 * Caller must pass the return value in @prev on subsequent 1111 * invocations for reference counting, or use mem_cgroup_iter_break() 1112 * to cancel a hierarchy walk before the round-trip is complete. 1113 * 1114 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1115 * in the hierarchy among all concurrent reclaimers operating on the 1116 * same node. 1117 */ 1118 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1119 struct mem_cgroup *prev, 1120 struct mem_cgroup_reclaim_cookie *reclaim) 1121 { 1122 struct mem_cgroup_reclaim_iter *iter; 1123 struct cgroup_subsys_state *css = NULL; 1124 struct mem_cgroup *memcg = NULL; 1125 struct mem_cgroup *pos = NULL; 1126 1127 if (mem_cgroup_disabled()) 1128 return NULL; 1129 1130 if (!root) 1131 root = root_mem_cgroup; 1132 1133 rcu_read_lock(); 1134 1135 if (reclaim) { 1136 struct mem_cgroup_per_node *mz; 1137 1138 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1139 iter = &mz->iter; 1140 1141 /* 1142 * On start, join the current reclaim iteration cycle. 1143 * Exit when a concurrent walker completes it. 1144 */ 1145 if (!prev) 1146 reclaim->generation = iter->generation; 1147 else if (reclaim->generation != iter->generation) 1148 goto out_unlock; 1149 1150 while (1) { 1151 pos = READ_ONCE(iter->position); 1152 if (!pos || css_tryget(&pos->css)) 1153 break; 1154 /* 1155 * css reference reached zero, so iter->position will 1156 * be cleared by ->css_released. However, we should not 1157 * rely on this happening soon, because ->css_released 1158 * is called from a work queue, and by busy-waiting we 1159 * might block it. So we clear iter->position right 1160 * away. 1161 */ 1162 (void)cmpxchg(&iter->position, pos, NULL); 1163 } 1164 } else if (prev) { 1165 pos = prev; 1166 } 1167 1168 if (pos) 1169 css = &pos->css; 1170 1171 for (;;) { 1172 css = css_next_descendant_pre(css, &root->css); 1173 if (!css) { 1174 /* 1175 * Reclaimers share the hierarchy walk, and a 1176 * new one might jump in right at the end of 1177 * the hierarchy - make sure they see at least 1178 * one group and restart from the beginning. 1179 */ 1180 if (!prev) 1181 continue; 1182 break; 1183 } 1184 1185 /* 1186 * Verify the css and acquire a reference. The root 1187 * is provided by the caller, so we know it's alive 1188 * and kicking, and don't take an extra reference. 1189 */ 1190 if (css == &root->css || css_tryget(css)) { 1191 memcg = mem_cgroup_from_css(css); 1192 break; 1193 } 1194 } 1195 1196 if (reclaim) { 1197 /* 1198 * The position could have already been updated by a competing 1199 * thread, so check that the value hasn't changed since we read 1200 * it to avoid reclaiming from the same cgroup twice. 1201 */ 1202 (void)cmpxchg(&iter->position, pos, memcg); 1203 1204 if (pos) 1205 css_put(&pos->css); 1206 1207 if (!memcg) 1208 iter->generation++; 1209 } 1210 1211 out_unlock: 1212 rcu_read_unlock(); 1213 if (prev && prev != root) 1214 css_put(&prev->css); 1215 1216 return memcg; 1217 } 1218 1219 /** 1220 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1221 * @root: hierarchy root 1222 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1223 */ 1224 void mem_cgroup_iter_break(struct mem_cgroup *root, 1225 struct mem_cgroup *prev) 1226 { 1227 if (!root) 1228 root = root_mem_cgroup; 1229 if (prev && prev != root) 1230 css_put(&prev->css); 1231 } 1232 1233 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1234 struct mem_cgroup *dead_memcg) 1235 { 1236 struct mem_cgroup_reclaim_iter *iter; 1237 struct mem_cgroup_per_node *mz; 1238 int nid; 1239 1240 for_each_node(nid) { 1241 mz = from->nodeinfo[nid]; 1242 iter = &mz->iter; 1243 cmpxchg(&iter->position, dead_memcg, NULL); 1244 } 1245 } 1246 1247 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1248 { 1249 struct mem_cgroup *memcg = dead_memcg; 1250 struct mem_cgroup *last; 1251 1252 do { 1253 __invalidate_reclaim_iterators(memcg, dead_memcg); 1254 last = memcg; 1255 } while ((memcg = parent_mem_cgroup(memcg))); 1256 1257 /* 1258 * When cgroup1 non-hierarchy mode is used, 1259 * parent_mem_cgroup() does not walk all the way up to the 1260 * cgroup root (root_mem_cgroup). So we have to handle 1261 * dead_memcg from cgroup root separately. 1262 */ 1263 if (!mem_cgroup_is_root(last)) 1264 __invalidate_reclaim_iterators(root_mem_cgroup, 1265 dead_memcg); 1266 } 1267 1268 /** 1269 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1270 * @memcg: hierarchy root 1271 * @fn: function to call for each task 1272 * @arg: argument passed to @fn 1273 * 1274 * This function iterates over tasks attached to @memcg or to any of its 1275 * descendants and calls @fn for each task. If @fn returns a non-zero 1276 * value, the function breaks the iteration loop and returns the value. 1277 * Otherwise, it will iterate over all tasks and return 0. 1278 * 1279 * This function must not be called for the root memory cgroup. 1280 */ 1281 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1282 int (*fn)(struct task_struct *, void *), void *arg) 1283 { 1284 struct mem_cgroup *iter; 1285 int ret = 0; 1286 1287 BUG_ON(mem_cgroup_is_root(memcg)); 1288 1289 for_each_mem_cgroup_tree(iter, memcg) { 1290 struct css_task_iter it; 1291 struct task_struct *task; 1292 1293 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1294 while (!ret && (task = css_task_iter_next(&it))) 1295 ret = fn(task, arg); 1296 css_task_iter_end(&it); 1297 if (ret) { 1298 mem_cgroup_iter_break(memcg, iter); 1299 break; 1300 } 1301 } 1302 return ret; 1303 } 1304 1305 #ifdef CONFIG_DEBUG_VM 1306 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1307 { 1308 struct mem_cgroup *memcg; 1309 1310 if (mem_cgroup_disabled()) 1311 return; 1312 1313 memcg = folio_memcg(folio); 1314 1315 if (!memcg) 1316 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); 1317 else 1318 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1319 } 1320 #endif 1321 1322 /** 1323 * folio_lruvec_lock - Lock the lruvec for a folio. 1324 * @folio: Pointer to the folio. 1325 * 1326 * These functions are safe to use under any of the following conditions: 1327 * - folio locked 1328 * - folio_test_lru false 1329 * - folio_memcg_lock() 1330 * - folio frozen (refcount of 0) 1331 * 1332 * Return: The lruvec this folio is on with its lock held. 1333 */ 1334 struct lruvec *folio_lruvec_lock(struct folio *folio) 1335 { 1336 struct lruvec *lruvec = folio_lruvec(folio); 1337 1338 spin_lock(&lruvec->lru_lock); 1339 lruvec_memcg_debug(lruvec, folio); 1340 1341 return lruvec; 1342 } 1343 1344 /** 1345 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1346 * @folio: Pointer to the folio. 1347 * 1348 * These functions are safe to use under any of the following conditions: 1349 * - folio locked 1350 * - folio_test_lru false 1351 * - folio_memcg_lock() 1352 * - folio frozen (refcount of 0) 1353 * 1354 * Return: The lruvec this folio is on with its lock held and interrupts 1355 * disabled. 1356 */ 1357 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1358 { 1359 struct lruvec *lruvec = folio_lruvec(folio); 1360 1361 spin_lock_irq(&lruvec->lru_lock); 1362 lruvec_memcg_debug(lruvec, folio); 1363 1364 return lruvec; 1365 } 1366 1367 /** 1368 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1369 * @folio: Pointer to the folio. 1370 * @flags: Pointer to irqsave flags. 1371 * 1372 * These functions are safe to use under any of the following conditions: 1373 * - folio locked 1374 * - folio_test_lru false 1375 * - folio_memcg_lock() 1376 * - folio frozen (refcount of 0) 1377 * 1378 * Return: The lruvec this folio is on with its lock held and interrupts 1379 * disabled. 1380 */ 1381 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1382 unsigned long *flags) 1383 { 1384 struct lruvec *lruvec = folio_lruvec(folio); 1385 1386 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1387 lruvec_memcg_debug(lruvec, folio); 1388 1389 return lruvec; 1390 } 1391 1392 /** 1393 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1394 * @lruvec: mem_cgroup per zone lru vector 1395 * @lru: index of lru list the page is sitting on 1396 * @zid: zone id of the accounted pages 1397 * @nr_pages: positive when adding or negative when removing 1398 * 1399 * This function must be called under lru_lock, just before a page is added 1400 * to or just after a page is removed from an lru list. 1401 */ 1402 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1403 int zid, int nr_pages) 1404 { 1405 struct mem_cgroup_per_node *mz; 1406 unsigned long *lru_size; 1407 long size; 1408 1409 if (mem_cgroup_disabled()) 1410 return; 1411 1412 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1413 lru_size = &mz->lru_zone_size[zid][lru]; 1414 1415 if (nr_pages < 0) 1416 *lru_size += nr_pages; 1417 1418 size = *lru_size; 1419 if (WARN_ONCE(size < 0, 1420 "%s(%p, %d, %d): lru_size %ld\n", 1421 __func__, lruvec, lru, nr_pages, size)) { 1422 VM_BUG_ON(1); 1423 *lru_size = 0; 1424 } 1425 1426 if (nr_pages > 0) 1427 *lru_size += nr_pages; 1428 } 1429 1430 /** 1431 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1432 * @memcg: the memory cgroup 1433 * 1434 * Returns the maximum amount of memory @mem can be charged with, in 1435 * pages. 1436 */ 1437 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1438 { 1439 unsigned long margin = 0; 1440 unsigned long count; 1441 unsigned long limit; 1442 1443 count = page_counter_read(&memcg->memory); 1444 limit = READ_ONCE(memcg->memory.max); 1445 if (count < limit) 1446 margin = limit - count; 1447 1448 if (do_memsw_account()) { 1449 count = page_counter_read(&memcg->memsw); 1450 limit = READ_ONCE(memcg->memsw.max); 1451 if (count < limit) 1452 margin = min(margin, limit - count); 1453 else 1454 margin = 0; 1455 } 1456 1457 return margin; 1458 } 1459 1460 /* 1461 * A routine for checking "mem" is under move_account() or not. 1462 * 1463 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1464 * moving cgroups. This is for waiting at high-memory pressure 1465 * caused by "move". 1466 */ 1467 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1468 { 1469 struct mem_cgroup *from; 1470 struct mem_cgroup *to; 1471 bool ret = false; 1472 /* 1473 * Unlike task_move routines, we access mc.to, mc.from not under 1474 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1475 */ 1476 spin_lock(&mc.lock); 1477 from = mc.from; 1478 to = mc.to; 1479 if (!from) 1480 goto unlock; 1481 1482 ret = mem_cgroup_is_descendant(from, memcg) || 1483 mem_cgroup_is_descendant(to, memcg); 1484 unlock: 1485 spin_unlock(&mc.lock); 1486 return ret; 1487 } 1488 1489 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1490 { 1491 if (mc.moving_task && current != mc.moving_task) { 1492 if (mem_cgroup_under_move(memcg)) { 1493 DEFINE_WAIT(wait); 1494 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1495 /* moving charge context might have finished. */ 1496 if (mc.moving_task) 1497 schedule(); 1498 finish_wait(&mc.waitq, &wait); 1499 return true; 1500 } 1501 } 1502 return false; 1503 } 1504 1505 struct memory_stat { 1506 const char *name; 1507 unsigned int idx; 1508 }; 1509 1510 static const struct memory_stat memory_stats[] = { 1511 { "anon", NR_ANON_MAPPED }, 1512 { "file", NR_FILE_PAGES }, 1513 { "kernel", MEMCG_KMEM }, 1514 { "kernel_stack", NR_KERNEL_STACK_KB }, 1515 { "pagetables", NR_PAGETABLE }, 1516 { "sec_pagetables", NR_SECONDARY_PAGETABLE }, 1517 { "percpu", MEMCG_PERCPU_B }, 1518 { "sock", MEMCG_SOCK }, 1519 { "vmalloc", MEMCG_VMALLOC }, 1520 { "shmem", NR_SHMEM }, 1521 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1522 { "zswap", MEMCG_ZSWAP_B }, 1523 { "zswapped", MEMCG_ZSWAPPED }, 1524 #endif 1525 { "file_mapped", NR_FILE_MAPPED }, 1526 { "file_dirty", NR_FILE_DIRTY }, 1527 { "file_writeback", NR_WRITEBACK }, 1528 #ifdef CONFIG_SWAP 1529 { "swapcached", NR_SWAPCACHE }, 1530 #endif 1531 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1532 { "anon_thp", NR_ANON_THPS }, 1533 { "file_thp", NR_FILE_THPS }, 1534 { "shmem_thp", NR_SHMEM_THPS }, 1535 #endif 1536 { "inactive_anon", NR_INACTIVE_ANON }, 1537 { "active_anon", NR_ACTIVE_ANON }, 1538 { "inactive_file", NR_INACTIVE_FILE }, 1539 { "active_file", NR_ACTIVE_FILE }, 1540 { "unevictable", NR_UNEVICTABLE }, 1541 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1542 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1543 1544 /* The memory events */ 1545 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1546 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1547 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1548 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1549 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1550 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1551 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1552 }; 1553 1554 /* Translate stat items to the correct unit for memory.stat output */ 1555 static int memcg_page_state_unit(int item) 1556 { 1557 switch (item) { 1558 case MEMCG_PERCPU_B: 1559 case MEMCG_ZSWAP_B: 1560 case NR_SLAB_RECLAIMABLE_B: 1561 case NR_SLAB_UNRECLAIMABLE_B: 1562 case WORKINGSET_REFAULT_ANON: 1563 case WORKINGSET_REFAULT_FILE: 1564 case WORKINGSET_ACTIVATE_ANON: 1565 case WORKINGSET_ACTIVATE_FILE: 1566 case WORKINGSET_RESTORE_ANON: 1567 case WORKINGSET_RESTORE_FILE: 1568 case WORKINGSET_NODERECLAIM: 1569 return 1; 1570 case NR_KERNEL_STACK_KB: 1571 return SZ_1K; 1572 default: 1573 return PAGE_SIZE; 1574 } 1575 } 1576 1577 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1578 int item) 1579 { 1580 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1581 } 1582 1583 static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) 1584 { 1585 struct seq_buf s; 1586 int i; 1587 1588 seq_buf_init(&s, buf, bufsize); 1589 1590 /* 1591 * Provide statistics on the state of the memory subsystem as 1592 * well as cumulative event counters that show past behavior. 1593 * 1594 * This list is ordered following a combination of these gradients: 1595 * 1) generic big picture -> specifics and details 1596 * 2) reflecting userspace activity -> reflecting kernel heuristics 1597 * 1598 * Current memory state: 1599 */ 1600 mem_cgroup_flush_stats(); 1601 1602 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1603 u64 size; 1604 1605 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1606 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1607 1608 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1609 size += memcg_page_state_output(memcg, 1610 NR_SLAB_RECLAIMABLE_B); 1611 seq_buf_printf(&s, "slab %llu\n", size); 1612 } 1613 } 1614 1615 /* Accumulated memory events */ 1616 seq_buf_printf(&s, "pgscan %lu\n", 1617 memcg_events(memcg, PGSCAN_KSWAPD) + 1618 memcg_events(memcg, PGSCAN_DIRECT) + 1619 memcg_events(memcg, PGSCAN_KHUGEPAGED)); 1620 seq_buf_printf(&s, "pgsteal %lu\n", 1621 memcg_events(memcg, PGSTEAL_KSWAPD) + 1622 memcg_events(memcg, PGSTEAL_DIRECT) + 1623 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); 1624 1625 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { 1626 if (memcg_vm_event_stat[i] == PGPGIN || 1627 memcg_vm_event_stat[i] == PGPGOUT) 1628 continue; 1629 1630 seq_buf_printf(&s, "%s %lu\n", 1631 vm_event_name(memcg_vm_event_stat[i]), 1632 memcg_events(memcg, memcg_vm_event_stat[i])); 1633 } 1634 1635 /* The above should easily fit into one page */ 1636 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1637 } 1638 1639 #define K(x) ((x) << (PAGE_SHIFT-10)) 1640 /** 1641 * mem_cgroup_print_oom_context: Print OOM information relevant to 1642 * memory controller. 1643 * @memcg: The memory cgroup that went over limit 1644 * @p: Task that is going to be killed 1645 * 1646 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1647 * enabled 1648 */ 1649 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1650 { 1651 rcu_read_lock(); 1652 1653 if (memcg) { 1654 pr_cont(",oom_memcg="); 1655 pr_cont_cgroup_path(memcg->css.cgroup); 1656 } else 1657 pr_cont(",global_oom"); 1658 if (p) { 1659 pr_cont(",task_memcg="); 1660 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1661 } 1662 rcu_read_unlock(); 1663 } 1664 1665 /** 1666 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1667 * memory controller. 1668 * @memcg: The memory cgroup that went over limit 1669 */ 1670 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1671 { 1672 /* Use static buffer, for the caller is holding oom_lock. */ 1673 static char buf[PAGE_SIZE]; 1674 1675 lockdep_assert_held(&oom_lock); 1676 1677 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1678 K((u64)page_counter_read(&memcg->memory)), 1679 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1680 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1681 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1682 K((u64)page_counter_read(&memcg->swap)), 1683 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1684 else { 1685 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1686 K((u64)page_counter_read(&memcg->memsw)), 1687 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1688 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1689 K((u64)page_counter_read(&memcg->kmem)), 1690 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1691 } 1692 1693 pr_info("Memory cgroup stats for "); 1694 pr_cont_cgroup_path(memcg->css.cgroup); 1695 pr_cont(":"); 1696 memory_stat_format(memcg, buf, sizeof(buf)); 1697 pr_info("%s", buf); 1698 } 1699 1700 /* 1701 * Return the memory (and swap, if configured) limit for a memcg. 1702 */ 1703 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1704 { 1705 unsigned long max = READ_ONCE(memcg->memory.max); 1706 1707 if (do_memsw_account()) { 1708 if (mem_cgroup_swappiness(memcg)) { 1709 /* Calculate swap excess capacity from memsw limit */ 1710 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1711 1712 max += min(swap, (unsigned long)total_swap_pages); 1713 } 1714 } else { 1715 if (mem_cgroup_swappiness(memcg)) 1716 max += min(READ_ONCE(memcg->swap.max), 1717 (unsigned long)total_swap_pages); 1718 } 1719 return max; 1720 } 1721 1722 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1723 { 1724 return page_counter_read(&memcg->memory); 1725 } 1726 1727 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1728 int order) 1729 { 1730 struct oom_control oc = { 1731 .zonelist = NULL, 1732 .nodemask = NULL, 1733 .memcg = memcg, 1734 .gfp_mask = gfp_mask, 1735 .order = order, 1736 }; 1737 bool ret = true; 1738 1739 if (mutex_lock_killable(&oom_lock)) 1740 return true; 1741 1742 if (mem_cgroup_margin(memcg) >= (1 << order)) 1743 goto unlock; 1744 1745 /* 1746 * A few threads which were not waiting at mutex_lock_killable() can 1747 * fail to bail out. Therefore, check again after holding oom_lock. 1748 */ 1749 ret = task_is_dying() || out_of_memory(&oc); 1750 1751 unlock: 1752 mutex_unlock(&oom_lock); 1753 return ret; 1754 } 1755 1756 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1757 pg_data_t *pgdat, 1758 gfp_t gfp_mask, 1759 unsigned long *total_scanned) 1760 { 1761 struct mem_cgroup *victim = NULL; 1762 int total = 0; 1763 int loop = 0; 1764 unsigned long excess; 1765 unsigned long nr_scanned; 1766 struct mem_cgroup_reclaim_cookie reclaim = { 1767 .pgdat = pgdat, 1768 }; 1769 1770 excess = soft_limit_excess(root_memcg); 1771 1772 while (1) { 1773 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1774 if (!victim) { 1775 loop++; 1776 if (loop >= 2) { 1777 /* 1778 * If we have not been able to reclaim 1779 * anything, it might because there are 1780 * no reclaimable pages under this hierarchy 1781 */ 1782 if (!total) 1783 break; 1784 /* 1785 * We want to do more targeted reclaim. 1786 * excess >> 2 is not to excessive so as to 1787 * reclaim too much, nor too less that we keep 1788 * coming back to reclaim from this cgroup 1789 */ 1790 if (total >= (excess >> 2) || 1791 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1792 break; 1793 } 1794 continue; 1795 } 1796 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1797 pgdat, &nr_scanned); 1798 *total_scanned += nr_scanned; 1799 if (!soft_limit_excess(root_memcg)) 1800 break; 1801 } 1802 mem_cgroup_iter_break(root_memcg, victim); 1803 return total; 1804 } 1805 1806 #ifdef CONFIG_LOCKDEP 1807 static struct lockdep_map memcg_oom_lock_dep_map = { 1808 .name = "memcg_oom_lock", 1809 }; 1810 #endif 1811 1812 static DEFINE_SPINLOCK(memcg_oom_lock); 1813 1814 /* 1815 * Check OOM-Killer is already running under our hierarchy. 1816 * If someone is running, return false. 1817 */ 1818 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1819 { 1820 struct mem_cgroup *iter, *failed = NULL; 1821 1822 spin_lock(&memcg_oom_lock); 1823 1824 for_each_mem_cgroup_tree(iter, memcg) { 1825 if (iter->oom_lock) { 1826 /* 1827 * this subtree of our hierarchy is already locked 1828 * so we cannot give a lock. 1829 */ 1830 failed = iter; 1831 mem_cgroup_iter_break(memcg, iter); 1832 break; 1833 } else 1834 iter->oom_lock = true; 1835 } 1836 1837 if (failed) { 1838 /* 1839 * OK, we failed to lock the whole subtree so we have 1840 * to clean up what we set up to the failing subtree 1841 */ 1842 for_each_mem_cgroup_tree(iter, memcg) { 1843 if (iter == failed) { 1844 mem_cgroup_iter_break(memcg, iter); 1845 break; 1846 } 1847 iter->oom_lock = false; 1848 } 1849 } else 1850 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1851 1852 spin_unlock(&memcg_oom_lock); 1853 1854 return !failed; 1855 } 1856 1857 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1858 { 1859 struct mem_cgroup *iter; 1860 1861 spin_lock(&memcg_oom_lock); 1862 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1863 for_each_mem_cgroup_tree(iter, memcg) 1864 iter->oom_lock = false; 1865 spin_unlock(&memcg_oom_lock); 1866 } 1867 1868 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1869 { 1870 struct mem_cgroup *iter; 1871 1872 spin_lock(&memcg_oom_lock); 1873 for_each_mem_cgroup_tree(iter, memcg) 1874 iter->under_oom++; 1875 spin_unlock(&memcg_oom_lock); 1876 } 1877 1878 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1879 { 1880 struct mem_cgroup *iter; 1881 1882 /* 1883 * Be careful about under_oom underflows because a child memcg 1884 * could have been added after mem_cgroup_mark_under_oom. 1885 */ 1886 spin_lock(&memcg_oom_lock); 1887 for_each_mem_cgroup_tree(iter, memcg) 1888 if (iter->under_oom > 0) 1889 iter->under_oom--; 1890 spin_unlock(&memcg_oom_lock); 1891 } 1892 1893 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1894 1895 struct oom_wait_info { 1896 struct mem_cgroup *memcg; 1897 wait_queue_entry_t wait; 1898 }; 1899 1900 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1901 unsigned mode, int sync, void *arg) 1902 { 1903 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1904 struct mem_cgroup *oom_wait_memcg; 1905 struct oom_wait_info *oom_wait_info; 1906 1907 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1908 oom_wait_memcg = oom_wait_info->memcg; 1909 1910 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1911 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1912 return 0; 1913 return autoremove_wake_function(wait, mode, sync, arg); 1914 } 1915 1916 static void memcg_oom_recover(struct mem_cgroup *memcg) 1917 { 1918 /* 1919 * For the following lockless ->under_oom test, the only required 1920 * guarantee is that it must see the state asserted by an OOM when 1921 * this function is called as a result of userland actions 1922 * triggered by the notification of the OOM. This is trivially 1923 * achieved by invoking mem_cgroup_mark_under_oom() before 1924 * triggering notification. 1925 */ 1926 if (memcg && memcg->under_oom) 1927 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1928 } 1929 1930 /* 1931 * Returns true if successfully killed one or more processes. Though in some 1932 * corner cases it can return true even without killing any process. 1933 */ 1934 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1935 { 1936 bool locked, ret; 1937 1938 if (order > PAGE_ALLOC_COSTLY_ORDER) 1939 return false; 1940 1941 memcg_memory_event(memcg, MEMCG_OOM); 1942 1943 /* 1944 * We are in the middle of the charge context here, so we 1945 * don't want to block when potentially sitting on a callstack 1946 * that holds all kinds of filesystem and mm locks. 1947 * 1948 * cgroup1 allows disabling the OOM killer and waiting for outside 1949 * handling until the charge can succeed; remember the context and put 1950 * the task to sleep at the end of the page fault when all locks are 1951 * released. 1952 * 1953 * On the other hand, in-kernel OOM killer allows for an async victim 1954 * memory reclaim (oom_reaper) and that means that we are not solely 1955 * relying on the oom victim to make a forward progress and we can 1956 * invoke the oom killer here. 1957 * 1958 * Please note that mem_cgroup_out_of_memory might fail to find a 1959 * victim and then we have to bail out from the charge path. 1960 */ 1961 if (READ_ONCE(memcg->oom_kill_disable)) { 1962 if (current->in_user_fault) { 1963 css_get(&memcg->css); 1964 current->memcg_in_oom = memcg; 1965 current->memcg_oom_gfp_mask = mask; 1966 current->memcg_oom_order = order; 1967 } 1968 return false; 1969 } 1970 1971 mem_cgroup_mark_under_oom(memcg); 1972 1973 locked = mem_cgroup_oom_trylock(memcg); 1974 1975 if (locked) 1976 mem_cgroup_oom_notify(memcg); 1977 1978 mem_cgroup_unmark_under_oom(memcg); 1979 ret = mem_cgroup_out_of_memory(memcg, mask, order); 1980 1981 if (locked) 1982 mem_cgroup_oom_unlock(memcg); 1983 1984 return ret; 1985 } 1986 1987 /** 1988 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1989 * @handle: actually kill/wait or just clean up the OOM state 1990 * 1991 * This has to be called at the end of a page fault if the memcg OOM 1992 * handler was enabled. 1993 * 1994 * Memcg supports userspace OOM handling where failed allocations must 1995 * sleep on a waitqueue until the userspace task resolves the 1996 * situation. Sleeping directly in the charge context with all kinds 1997 * of locks held is not a good idea, instead we remember an OOM state 1998 * in the task and mem_cgroup_oom_synchronize() has to be called at 1999 * the end of the page fault to complete the OOM handling. 2000 * 2001 * Returns %true if an ongoing memcg OOM situation was detected and 2002 * completed, %false otherwise. 2003 */ 2004 bool mem_cgroup_oom_synchronize(bool handle) 2005 { 2006 struct mem_cgroup *memcg = current->memcg_in_oom; 2007 struct oom_wait_info owait; 2008 bool locked; 2009 2010 /* OOM is global, do not handle */ 2011 if (!memcg) 2012 return false; 2013 2014 if (!handle) 2015 goto cleanup; 2016 2017 owait.memcg = memcg; 2018 owait.wait.flags = 0; 2019 owait.wait.func = memcg_oom_wake_function; 2020 owait.wait.private = current; 2021 INIT_LIST_HEAD(&owait.wait.entry); 2022 2023 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2024 mem_cgroup_mark_under_oom(memcg); 2025 2026 locked = mem_cgroup_oom_trylock(memcg); 2027 2028 if (locked) 2029 mem_cgroup_oom_notify(memcg); 2030 2031 if (locked && !READ_ONCE(memcg->oom_kill_disable)) { 2032 mem_cgroup_unmark_under_oom(memcg); 2033 finish_wait(&memcg_oom_waitq, &owait.wait); 2034 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 2035 current->memcg_oom_order); 2036 } else { 2037 schedule(); 2038 mem_cgroup_unmark_under_oom(memcg); 2039 finish_wait(&memcg_oom_waitq, &owait.wait); 2040 } 2041 2042 if (locked) { 2043 mem_cgroup_oom_unlock(memcg); 2044 /* 2045 * There is no guarantee that an OOM-lock contender 2046 * sees the wakeups triggered by the OOM kill 2047 * uncharges. Wake any sleepers explicitly. 2048 */ 2049 memcg_oom_recover(memcg); 2050 } 2051 cleanup: 2052 current->memcg_in_oom = NULL; 2053 css_put(&memcg->css); 2054 return true; 2055 } 2056 2057 /** 2058 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2059 * @victim: task to be killed by the OOM killer 2060 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2061 * 2062 * Returns a pointer to a memory cgroup, which has to be cleaned up 2063 * by killing all belonging OOM-killable tasks. 2064 * 2065 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2066 */ 2067 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2068 struct mem_cgroup *oom_domain) 2069 { 2070 struct mem_cgroup *oom_group = NULL; 2071 struct mem_cgroup *memcg; 2072 2073 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2074 return NULL; 2075 2076 if (!oom_domain) 2077 oom_domain = root_mem_cgroup; 2078 2079 rcu_read_lock(); 2080 2081 memcg = mem_cgroup_from_task(victim); 2082 if (mem_cgroup_is_root(memcg)) 2083 goto out; 2084 2085 /* 2086 * If the victim task has been asynchronously moved to a different 2087 * memory cgroup, we might end up killing tasks outside oom_domain. 2088 * In this case it's better to ignore memory.group.oom. 2089 */ 2090 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2091 goto out; 2092 2093 /* 2094 * Traverse the memory cgroup hierarchy from the victim task's 2095 * cgroup up to the OOMing cgroup (or root) to find the 2096 * highest-level memory cgroup with oom.group set. 2097 */ 2098 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2099 if (READ_ONCE(memcg->oom_group)) 2100 oom_group = memcg; 2101 2102 if (memcg == oom_domain) 2103 break; 2104 } 2105 2106 if (oom_group) 2107 css_get(&oom_group->css); 2108 out: 2109 rcu_read_unlock(); 2110 2111 return oom_group; 2112 } 2113 2114 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2115 { 2116 pr_info("Tasks in "); 2117 pr_cont_cgroup_path(memcg->css.cgroup); 2118 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2119 } 2120 2121 /** 2122 * folio_memcg_lock - Bind a folio to its memcg. 2123 * @folio: The folio. 2124 * 2125 * This function prevents unlocked LRU folios from being moved to 2126 * another cgroup. 2127 * 2128 * It ensures lifetime of the bound memcg. The caller is responsible 2129 * for the lifetime of the folio. 2130 */ 2131 void folio_memcg_lock(struct folio *folio) 2132 { 2133 struct mem_cgroup *memcg; 2134 unsigned long flags; 2135 2136 /* 2137 * The RCU lock is held throughout the transaction. The fast 2138 * path can get away without acquiring the memcg->move_lock 2139 * because page moving starts with an RCU grace period. 2140 */ 2141 rcu_read_lock(); 2142 2143 if (mem_cgroup_disabled()) 2144 return; 2145 again: 2146 memcg = folio_memcg(folio); 2147 if (unlikely(!memcg)) 2148 return; 2149 2150 #ifdef CONFIG_PROVE_LOCKING 2151 local_irq_save(flags); 2152 might_lock(&memcg->move_lock); 2153 local_irq_restore(flags); 2154 #endif 2155 2156 if (atomic_read(&memcg->moving_account) <= 0) 2157 return; 2158 2159 spin_lock_irqsave(&memcg->move_lock, flags); 2160 if (memcg != folio_memcg(folio)) { 2161 spin_unlock_irqrestore(&memcg->move_lock, flags); 2162 goto again; 2163 } 2164 2165 /* 2166 * When charge migration first begins, we can have multiple 2167 * critical sections holding the fast-path RCU lock and one 2168 * holding the slowpath move_lock. Track the task who has the 2169 * move_lock for unlock_page_memcg(). 2170 */ 2171 memcg->move_lock_task = current; 2172 memcg->move_lock_flags = flags; 2173 } 2174 2175 void lock_page_memcg(struct page *page) 2176 { 2177 folio_memcg_lock(page_folio(page)); 2178 } 2179 2180 static void __folio_memcg_unlock(struct mem_cgroup *memcg) 2181 { 2182 if (memcg && memcg->move_lock_task == current) { 2183 unsigned long flags = memcg->move_lock_flags; 2184 2185 memcg->move_lock_task = NULL; 2186 memcg->move_lock_flags = 0; 2187 2188 spin_unlock_irqrestore(&memcg->move_lock, flags); 2189 } 2190 2191 rcu_read_unlock(); 2192 } 2193 2194 /** 2195 * folio_memcg_unlock - Release the binding between a folio and its memcg. 2196 * @folio: The folio. 2197 * 2198 * This releases the binding created by folio_memcg_lock(). This does 2199 * not change the accounting of this folio to its memcg, but it does 2200 * permit others to change it. 2201 */ 2202 void folio_memcg_unlock(struct folio *folio) 2203 { 2204 __folio_memcg_unlock(folio_memcg(folio)); 2205 } 2206 2207 void unlock_page_memcg(struct page *page) 2208 { 2209 folio_memcg_unlock(page_folio(page)); 2210 } 2211 2212 struct memcg_stock_pcp { 2213 local_lock_t stock_lock; 2214 struct mem_cgroup *cached; /* this never be root cgroup */ 2215 unsigned int nr_pages; 2216 2217 #ifdef CONFIG_MEMCG_KMEM 2218 struct obj_cgroup *cached_objcg; 2219 struct pglist_data *cached_pgdat; 2220 unsigned int nr_bytes; 2221 int nr_slab_reclaimable_b; 2222 int nr_slab_unreclaimable_b; 2223 #endif 2224 2225 struct work_struct work; 2226 unsigned long flags; 2227 #define FLUSHING_CACHED_CHARGE 0 2228 }; 2229 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { 2230 .stock_lock = INIT_LOCAL_LOCK(stock_lock), 2231 }; 2232 static DEFINE_MUTEX(percpu_charge_mutex); 2233 2234 #ifdef CONFIG_MEMCG_KMEM 2235 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock); 2236 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2237 struct mem_cgroup *root_memcg); 2238 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages); 2239 2240 #else 2241 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 2242 { 2243 return NULL; 2244 } 2245 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2246 struct mem_cgroup *root_memcg) 2247 { 2248 return false; 2249 } 2250 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) 2251 { 2252 } 2253 #endif 2254 2255 /** 2256 * consume_stock: Try to consume stocked charge on this cpu. 2257 * @memcg: memcg to consume from. 2258 * @nr_pages: how many pages to charge. 2259 * 2260 * The charges will only happen if @memcg matches the current cpu's memcg 2261 * stock, and at least @nr_pages are available in that stock. Failure to 2262 * service an allocation will refill the stock. 2263 * 2264 * returns true if successful, false otherwise. 2265 */ 2266 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2267 { 2268 struct memcg_stock_pcp *stock; 2269 unsigned long flags; 2270 bool ret = false; 2271 2272 if (nr_pages > MEMCG_CHARGE_BATCH) 2273 return ret; 2274 2275 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2276 2277 stock = this_cpu_ptr(&memcg_stock); 2278 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2279 stock->nr_pages -= nr_pages; 2280 ret = true; 2281 } 2282 2283 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2284 2285 return ret; 2286 } 2287 2288 /* 2289 * Returns stocks cached in percpu and reset cached information. 2290 */ 2291 static void drain_stock(struct memcg_stock_pcp *stock) 2292 { 2293 struct mem_cgroup *old = stock->cached; 2294 2295 if (!old) 2296 return; 2297 2298 if (stock->nr_pages) { 2299 page_counter_uncharge(&old->memory, stock->nr_pages); 2300 if (do_memsw_account()) 2301 page_counter_uncharge(&old->memsw, stock->nr_pages); 2302 stock->nr_pages = 0; 2303 } 2304 2305 css_put(&old->css); 2306 stock->cached = NULL; 2307 } 2308 2309 static void drain_local_stock(struct work_struct *dummy) 2310 { 2311 struct memcg_stock_pcp *stock; 2312 struct obj_cgroup *old = NULL; 2313 unsigned long flags; 2314 2315 /* 2316 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. 2317 * drain_stock races is that we always operate on local CPU stock 2318 * here with IRQ disabled 2319 */ 2320 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2321 2322 stock = this_cpu_ptr(&memcg_stock); 2323 old = drain_obj_stock(stock); 2324 drain_stock(stock); 2325 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2326 2327 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2328 if (old) 2329 obj_cgroup_put(old); 2330 } 2331 2332 /* 2333 * Cache charges(val) to local per_cpu area. 2334 * This will be consumed by consume_stock() function, later. 2335 */ 2336 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2337 { 2338 struct memcg_stock_pcp *stock; 2339 2340 stock = this_cpu_ptr(&memcg_stock); 2341 if (stock->cached != memcg) { /* reset if necessary */ 2342 drain_stock(stock); 2343 css_get(&memcg->css); 2344 stock->cached = memcg; 2345 } 2346 stock->nr_pages += nr_pages; 2347 2348 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2349 drain_stock(stock); 2350 } 2351 2352 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2353 { 2354 unsigned long flags; 2355 2356 local_lock_irqsave(&memcg_stock.stock_lock, flags); 2357 __refill_stock(memcg, nr_pages); 2358 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 2359 } 2360 2361 /* 2362 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2363 * of the hierarchy under it. 2364 */ 2365 static void drain_all_stock(struct mem_cgroup *root_memcg) 2366 { 2367 int cpu, curcpu; 2368 2369 /* If someone's already draining, avoid adding running more workers. */ 2370 if (!mutex_trylock(&percpu_charge_mutex)) 2371 return; 2372 /* 2373 * Notify other cpus that system-wide "drain" is running 2374 * We do not care about races with the cpu hotplug because cpu down 2375 * as well as workers from this path always operate on the local 2376 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2377 */ 2378 migrate_disable(); 2379 curcpu = smp_processor_id(); 2380 for_each_online_cpu(cpu) { 2381 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2382 struct mem_cgroup *memcg; 2383 bool flush = false; 2384 2385 rcu_read_lock(); 2386 memcg = stock->cached; 2387 if (memcg && stock->nr_pages && 2388 mem_cgroup_is_descendant(memcg, root_memcg)) 2389 flush = true; 2390 else if (obj_stock_flush_required(stock, root_memcg)) 2391 flush = true; 2392 rcu_read_unlock(); 2393 2394 if (flush && 2395 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2396 if (cpu == curcpu) 2397 drain_local_stock(&stock->work); 2398 else if (!cpu_is_isolated(cpu)) 2399 schedule_work_on(cpu, &stock->work); 2400 } 2401 } 2402 migrate_enable(); 2403 mutex_unlock(&percpu_charge_mutex); 2404 } 2405 2406 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2407 { 2408 struct memcg_stock_pcp *stock; 2409 2410 stock = &per_cpu(memcg_stock, cpu); 2411 drain_stock(stock); 2412 2413 return 0; 2414 } 2415 2416 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2417 unsigned int nr_pages, 2418 gfp_t gfp_mask) 2419 { 2420 unsigned long nr_reclaimed = 0; 2421 2422 do { 2423 unsigned long pflags; 2424 2425 if (page_counter_read(&memcg->memory) <= 2426 READ_ONCE(memcg->memory.high)) 2427 continue; 2428 2429 memcg_memory_event(memcg, MEMCG_HIGH); 2430 2431 psi_memstall_enter(&pflags); 2432 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2433 gfp_mask, 2434 MEMCG_RECLAIM_MAY_SWAP); 2435 psi_memstall_leave(&pflags); 2436 } while ((memcg = parent_mem_cgroup(memcg)) && 2437 !mem_cgroup_is_root(memcg)); 2438 2439 return nr_reclaimed; 2440 } 2441 2442 static void high_work_func(struct work_struct *work) 2443 { 2444 struct mem_cgroup *memcg; 2445 2446 memcg = container_of(work, struct mem_cgroup, high_work); 2447 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2448 } 2449 2450 /* 2451 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2452 * enough to still cause a significant slowdown in most cases, while still 2453 * allowing diagnostics and tracing to proceed without becoming stuck. 2454 */ 2455 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2456 2457 /* 2458 * When calculating the delay, we use these either side of the exponentiation to 2459 * maintain precision and scale to a reasonable number of jiffies (see the table 2460 * below. 2461 * 2462 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2463 * overage ratio to a delay. 2464 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2465 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2466 * to produce a reasonable delay curve. 2467 * 2468 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2469 * reasonable delay curve compared to precision-adjusted overage, not 2470 * penalising heavily at first, but still making sure that growth beyond the 2471 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2472 * example, with a high of 100 megabytes: 2473 * 2474 * +-------+------------------------+ 2475 * | usage | time to allocate in ms | 2476 * +-------+------------------------+ 2477 * | 100M | 0 | 2478 * | 101M | 6 | 2479 * | 102M | 25 | 2480 * | 103M | 57 | 2481 * | 104M | 102 | 2482 * | 105M | 159 | 2483 * | 106M | 230 | 2484 * | 107M | 313 | 2485 * | 108M | 409 | 2486 * | 109M | 518 | 2487 * | 110M | 639 | 2488 * | 111M | 774 | 2489 * | 112M | 921 | 2490 * | 113M | 1081 | 2491 * | 114M | 1254 | 2492 * | 115M | 1439 | 2493 * | 116M | 1638 | 2494 * | 117M | 1849 | 2495 * | 118M | 2000 | 2496 * | 119M | 2000 | 2497 * | 120M | 2000 | 2498 * +-------+------------------------+ 2499 */ 2500 #define MEMCG_DELAY_PRECISION_SHIFT 20 2501 #define MEMCG_DELAY_SCALING_SHIFT 14 2502 2503 static u64 calculate_overage(unsigned long usage, unsigned long high) 2504 { 2505 u64 overage; 2506 2507 if (usage <= high) 2508 return 0; 2509 2510 /* 2511 * Prevent division by 0 in overage calculation by acting as if 2512 * it was a threshold of 1 page 2513 */ 2514 high = max(high, 1UL); 2515 2516 overage = usage - high; 2517 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2518 return div64_u64(overage, high); 2519 } 2520 2521 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2522 { 2523 u64 overage, max_overage = 0; 2524 2525 do { 2526 overage = calculate_overage(page_counter_read(&memcg->memory), 2527 READ_ONCE(memcg->memory.high)); 2528 max_overage = max(overage, max_overage); 2529 } while ((memcg = parent_mem_cgroup(memcg)) && 2530 !mem_cgroup_is_root(memcg)); 2531 2532 return max_overage; 2533 } 2534 2535 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2536 { 2537 u64 overage, max_overage = 0; 2538 2539 do { 2540 overage = calculate_overage(page_counter_read(&memcg->swap), 2541 READ_ONCE(memcg->swap.high)); 2542 if (overage) 2543 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2544 max_overage = max(overage, max_overage); 2545 } while ((memcg = parent_mem_cgroup(memcg)) && 2546 !mem_cgroup_is_root(memcg)); 2547 2548 return max_overage; 2549 } 2550 2551 /* 2552 * Get the number of jiffies that we should penalise a mischievous cgroup which 2553 * is exceeding its memory.high by checking both it and its ancestors. 2554 */ 2555 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2556 unsigned int nr_pages, 2557 u64 max_overage) 2558 { 2559 unsigned long penalty_jiffies; 2560 2561 if (!max_overage) 2562 return 0; 2563 2564 /* 2565 * We use overage compared to memory.high to calculate the number of 2566 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2567 * fairly lenient on small overages, and increasingly harsh when the 2568 * memcg in question makes it clear that it has no intention of stopping 2569 * its crazy behaviour, so we exponentially increase the delay based on 2570 * overage amount. 2571 */ 2572 penalty_jiffies = max_overage * max_overage * HZ; 2573 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2574 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2575 2576 /* 2577 * Factor in the task's own contribution to the overage, such that four 2578 * N-sized allocations are throttled approximately the same as one 2579 * 4N-sized allocation. 2580 * 2581 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2582 * larger the current charge patch is than that. 2583 */ 2584 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2585 } 2586 2587 /* 2588 * Scheduled by try_charge() to be executed from the userland return path 2589 * and reclaims memory over the high limit. 2590 */ 2591 void mem_cgroup_handle_over_high(void) 2592 { 2593 unsigned long penalty_jiffies; 2594 unsigned long pflags; 2595 unsigned long nr_reclaimed; 2596 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2597 int nr_retries = MAX_RECLAIM_RETRIES; 2598 struct mem_cgroup *memcg; 2599 bool in_retry = false; 2600 2601 if (likely(!nr_pages)) 2602 return; 2603 2604 memcg = get_mem_cgroup_from_mm(current->mm); 2605 current->memcg_nr_pages_over_high = 0; 2606 2607 retry_reclaim: 2608 /* 2609 * The allocating task should reclaim at least the batch size, but for 2610 * subsequent retries we only want to do what's necessary to prevent oom 2611 * or breaching resource isolation. 2612 * 2613 * This is distinct from memory.max or page allocator behaviour because 2614 * memory.high is currently batched, whereas memory.max and the page 2615 * allocator run every time an allocation is made. 2616 */ 2617 nr_reclaimed = reclaim_high(memcg, 2618 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2619 GFP_KERNEL); 2620 2621 /* 2622 * memory.high is breached and reclaim is unable to keep up. Throttle 2623 * allocators proactively to slow down excessive growth. 2624 */ 2625 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2626 mem_find_max_overage(memcg)); 2627 2628 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2629 swap_find_max_overage(memcg)); 2630 2631 /* 2632 * Clamp the max delay per usermode return so as to still keep the 2633 * application moving forwards and also permit diagnostics, albeit 2634 * extremely slowly. 2635 */ 2636 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2637 2638 /* 2639 * Don't sleep if the amount of jiffies this memcg owes us is so low 2640 * that it's not even worth doing, in an attempt to be nice to those who 2641 * go only a small amount over their memory.high value and maybe haven't 2642 * been aggressively reclaimed enough yet. 2643 */ 2644 if (penalty_jiffies <= HZ / 100) 2645 goto out; 2646 2647 /* 2648 * If reclaim is making forward progress but we're still over 2649 * memory.high, we want to encourage that rather than doing allocator 2650 * throttling. 2651 */ 2652 if (nr_reclaimed || nr_retries--) { 2653 in_retry = true; 2654 goto retry_reclaim; 2655 } 2656 2657 /* 2658 * If we exit early, we're guaranteed to die (since 2659 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2660 * need to account for any ill-begotten jiffies to pay them off later. 2661 */ 2662 psi_memstall_enter(&pflags); 2663 schedule_timeout_killable(penalty_jiffies); 2664 psi_memstall_leave(&pflags); 2665 2666 out: 2667 css_put(&memcg->css); 2668 } 2669 2670 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2671 unsigned int nr_pages) 2672 { 2673 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2674 int nr_retries = MAX_RECLAIM_RETRIES; 2675 struct mem_cgroup *mem_over_limit; 2676 struct page_counter *counter; 2677 unsigned long nr_reclaimed; 2678 bool passed_oom = false; 2679 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; 2680 bool drained = false; 2681 bool raised_max_event = false; 2682 unsigned long pflags; 2683 2684 retry: 2685 if (consume_stock(memcg, nr_pages)) 2686 return 0; 2687 2688 if (!do_memsw_account() || 2689 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2690 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2691 goto done_restock; 2692 if (do_memsw_account()) 2693 page_counter_uncharge(&memcg->memsw, batch); 2694 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2695 } else { 2696 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2697 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; 2698 } 2699 2700 if (batch > nr_pages) { 2701 batch = nr_pages; 2702 goto retry; 2703 } 2704 2705 /* 2706 * Prevent unbounded recursion when reclaim operations need to 2707 * allocate memory. This might exceed the limits temporarily, 2708 * but we prefer facilitating memory reclaim and getting back 2709 * under the limit over triggering OOM kills in these cases. 2710 */ 2711 if (unlikely(current->flags & PF_MEMALLOC)) 2712 goto force; 2713 2714 if (unlikely(task_in_memcg_oom(current))) 2715 goto nomem; 2716 2717 if (!gfpflags_allow_blocking(gfp_mask)) 2718 goto nomem; 2719 2720 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2721 raised_max_event = true; 2722 2723 psi_memstall_enter(&pflags); 2724 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2725 gfp_mask, reclaim_options); 2726 psi_memstall_leave(&pflags); 2727 2728 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2729 goto retry; 2730 2731 if (!drained) { 2732 drain_all_stock(mem_over_limit); 2733 drained = true; 2734 goto retry; 2735 } 2736 2737 if (gfp_mask & __GFP_NORETRY) 2738 goto nomem; 2739 /* 2740 * Even though the limit is exceeded at this point, reclaim 2741 * may have been able to free some pages. Retry the charge 2742 * before killing the task. 2743 * 2744 * Only for regular pages, though: huge pages are rather 2745 * unlikely to succeed so close to the limit, and we fall back 2746 * to regular pages anyway in case of failure. 2747 */ 2748 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2749 goto retry; 2750 /* 2751 * At task move, charge accounts can be doubly counted. So, it's 2752 * better to wait until the end of task_move if something is going on. 2753 */ 2754 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2755 goto retry; 2756 2757 if (nr_retries--) 2758 goto retry; 2759 2760 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2761 goto nomem; 2762 2763 /* Avoid endless loop for tasks bypassed by the oom killer */ 2764 if (passed_oom && task_is_dying()) 2765 goto nomem; 2766 2767 /* 2768 * keep retrying as long as the memcg oom killer is able to make 2769 * a forward progress or bypass the charge if the oom killer 2770 * couldn't make any progress. 2771 */ 2772 if (mem_cgroup_oom(mem_over_limit, gfp_mask, 2773 get_order(nr_pages * PAGE_SIZE))) { 2774 passed_oom = true; 2775 nr_retries = MAX_RECLAIM_RETRIES; 2776 goto retry; 2777 } 2778 nomem: 2779 /* 2780 * Memcg doesn't have a dedicated reserve for atomic 2781 * allocations. But like the global atomic pool, we need to 2782 * put the burden of reclaim on regular allocation requests 2783 * and let these go through as privileged allocations. 2784 */ 2785 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) 2786 return -ENOMEM; 2787 force: 2788 /* 2789 * If the allocation has to be enforced, don't forget to raise 2790 * a MEMCG_MAX event. 2791 */ 2792 if (!raised_max_event) 2793 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2794 2795 /* 2796 * The allocation either can't fail or will lead to more memory 2797 * being freed very soon. Allow memory usage go over the limit 2798 * temporarily by force charging it. 2799 */ 2800 page_counter_charge(&memcg->memory, nr_pages); 2801 if (do_memsw_account()) 2802 page_counter_charge(&memcg->memsw, nr_pages); 2803 2804 return 0; 2805 2806 done_restock: 2807 if (batch > nr_pages) 2808 refill_stock(memcg, batch - nr_pages); 2809 2810 /* 2811 * If the hierarchy is above the normal consumption range, schedule 2812 * reclaim on returning to userland. We can perform reclaim here 2813 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2814 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2815 * not recorded as it most likely matches current's and won't 2816 * change in the meantime. As high limit is checked again before 2817 * reclaim, the cost of mismatch is negligible. 2818 */ 2819 do { 2820 bool mem_high, swap_high; 2821 2822 mem_high = page_counter_read(&memcg->memory) > 2823 READ_ONCE(memcg->memory.high); 2824 swap_high = page_counter_read(&memcg->swap) > 2825 READ_ONCE(memcg->swap.high); 2826 2827 /* Don't bother a random interrupted task */ 2828 if (!in_task()) { 2829 if (mem_high) { 2830 schedule_work(&memcg->high_work); 2831 break; 2832 } 2833 continue; 2834 } 2835 2836 if (mem_high || swap_high) { 2837 /* 2838 * The allocating tasks in this cgroup will need to do 2839 * reclaim or be throttled to prevent further growth 2840 * of the memory or swap footprints. 2841 * 2842 * Target some best-effort fairness between the tasks, 2843 * and distribute reclaim work and delay penalties 2844 * based on how much each task is actually allocating. 2845 */ 2846 current->memcg_nr_pages_over_high += batch; 2847 set_notify_resume(current); 2848 break; 2849 } 2850 } while ((memcg = parent_mem_cgroup(memcg))); 2851 2852 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && 2853 !(current->flags & PF_MEMALLOC) && 2854 gfpflags_allow_blocking(gfp_mask)) { 2855 mem_cgroup_handle_over_high(); 2856 } 2857 return 0; 2858 } 2859 2860 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2861 unsigned int nr_pages) 2862 { 2863 if (mem_cgroup_is_root(memcg)) 2864 return 0; 2865 2866 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2867 } 2868 2869 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2870 { 2871 if (mem_cgroup_is_root(memcg)) 2872 return; 2873 2874 page_counter_uncharge(&memcg->memory, nr_pages); 2875 if (do_memsw_account()) 2876 page_counter_uncharge(&memcg->memsw, nr_pages); 2877 } 2878 2879 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2880 { 2881 VM_BUG_ON_FOLIO(folio_memcg(folio), folio); 2882 /* 2883 * Any of the following ensures page's memcg stability: 2884 * 2885 * - the page lock 2886 * - LRU isolation 2887 * - lock_page_memcg() 2888 * - exclusive reference 2889 * - mem_cgroup_trylock_pages() 2890 */ 2891 folio->memcg_data = (unsigned long)memcg; 2892 } 2893 2894 #ifdef CONFIG_MEMCG_KMEM 2895 /* 2896 * The allocated objcg pointers array is not accounted directly. 2897 * Moreover, it should not come from DMA buffer and is not readily 2898 * reclaimable. So those GFP bits should be masked off. 2899 */ 2900 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2901 2902 /* 2903 * mod_objcg_mlstate() may be called with irq enabled, so 2904 * mod_memcg_lruvec_state() should be used. 2905 */ 2906 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 2907 struct pglist_data *pgdat, 2908 enum node_stat_item idx, int nr) 2909 { 2910 struct mem_cgroup *memcg; 2911 struct lruvec *lruvec; 2912 2913 rcu_read_lock(); 2914 memcg = obj_cgroup_memcg(objcg); 2915 lruvec = mem_cgroup_lruvec(memcg, pgdat); 2916 mod_memcg_lruvec_state(lruvec, idx, nr); 2917 rcu_read_unlock(); 2918 } 2919 2920 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, 2921 gfp_t gfp, bool new_slab) 2922 { 2923 unsigned int objects = objs_per_slab(s, slab); 2924 unsigned long memcg_data; 2925 void *vec; 2926 2927 gfp &= ~OBJCGS_CLEAR_MASK; 2928 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2929 slab_nid(slab)); 2930 if (!vec) 2931 return -ENOMEM; 2932 2933 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2934 if (new_slab) { 2935 /* 2936 * If the slab is brand new and nobody can yet access its 2937 * memcg_data, no synchronization is required and memcg_data can 2938 * be simply assigned. 2939 */ 2940 slab->memcg_data = memcg_data; 2941 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { 2942 /* 2943 * If the slab is already in use, somebody can allocate and 2944 * assign obj_cgroups in parallel. In this case the existing 2945 * objcg vector should be reused. 2946 */ 2947 kfree(vec); 2948 return 0; 2949 } 2950 2951 kmemleak_not_leak(vec); 2952 return 0; 2953 } 2954 2955 static __always_inline 2956 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) 2957 { 2958 /* 2959 * Slab objects are accounted individually, not per-page. 2960 * Memcg membership data for each individual object is saved in 2961 * slab->memcg_data. 2962 */ 2963 if (folio_test_slab(folio)) { 2964 struct obj_cgroup **objcgs; 2965 struct slab *slab; 2966 unsigned int off; 2967 2968 slab = folio_slab(folio); 2969 objcgs = slab_objcgs(slab); 2970 if (!objcgs) 2971 return NULL; 2972 2973 off = obj_to_index(slab->slab_cache, slab, p); 2974 if (objcgs[off]) 2975 return obj_cgroup_memcg(objcgs[off]); 2976 2977 return NULL; 2978 } 2979 2980 /* 2981 * folio_memcg_check() is used here, because in theory we can encounter 2982 * a folio where the slab flag has been cleared already, but 2983 * slab->memcg_data has not been freed yet 2984 * folio_memcg_check() will guarantee that a proper memory 2985 * cgroup pointer or NULL will be returned. 2986 */ 2987 return folio_memcg_check(folio); 2988 } 2989 2990 /* 2991 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2992 * 2993 * A passed kernel object can be a slab object, vmalloc object or a generic 2994 * kernel page, so different mechanisms for getting the memory cgroup pointer 2995 * should be used. 2996 * 2997 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2998 * can not know for sure how the kernel object is implemented. 2999 * mem_cgroup_from_obj() can be safely used in such cases. 3000 * 3001 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 3002 * cgroup_mutex, etc. 3003 */ 3004 struct mem_cgroup *mem_cgroup_from_obj(void *p) 3005 { 3006 struct folio *folio; 3007 3008 if (mem_cgroup_disabled()) 3009 return NULL; 3010 3011 if (unlikely(is_vmalloc_addr(p))) 3012 folio = page_folio(vmalloc_to_page(p)); 3013 else 3014 folio = virt_to_folio(p); 3015 3016 return mem_cgroup_from_obj_folio(folio, p); 3017 } 3018 3019 /* 3020 * Returns a pointer to the memory cgroup to which the kernel object is charged. 3021 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects, 3022 * allocated using vmalloc(). 3023 * 3024 * A passed kernel object must be a slab object or a generic kernel page. 3025 * 3026 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 3027 * cgroup_mutex, etc. 3028 */ 3029 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 3030 { 3031 if (mem_cgroup_disabled()) 3032 return NULL; 3033 3034 return mem_cgroup_from_obj_folio(virt_to_folio(p), p); 3035 } 3036 3037 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) 3038 { 3039 struct obj_cgroup *objcg = NULL; 3040 3041 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 3042 objcg = rcu_dereference(memcg->objcg); 3043 if (objcg && obj_cgroup_tryget(objcg)) 3044 break; 3045 objcg = NULL; 3046 } 3047 return objcg; 3048 } 3049 3050 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 3051 { 3052 struct obj_cgroup *objcg = NULL; 3053 struct mem_cgroup *memcg; 3054 3055 if (memcg_kmem_bypass()) 3056 return NULL; 3057 3058 rcu_read_lock(); 3059 if (unlikely(active_memcg())) 3060 memcg = active_memcg(); 3061 else 3062 memcg = mem_cgroup_from_task(current); 3063 objcg = __get_obj_cgroup_from_memcg(memcg); 3064 rcu_read_unlock(); 3065 return objcg; 3066 } 3067 3068 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) 3069 { 3070 struct obj_cgroup *objcg; 3071 3072 if (!memcg_kmem_online()) 3073 return NULL; 3074 3075 if (PageMemcgKmem(page)) { 3076 objcg = __folio_objcg(page_folio(page)); 3077 obj_cgroup_get(objcg); 3078 } else { 3079 struct mem_cgroup *memcg; 3080 3081 rcu_read_lock(); 3082 memcg = __folio_memcg(page_folio(page)); 3083 if (memcg) 3084 objcg = __get_obj_cgroup_from_memcg(memcg); 3085 else 3086 objcg = NULL; 3087 rcu_read_unlock(); 3088 } 3089 return objcg; 3090 } 3091 3092 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages) 3093 { 3094 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); 3095 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 3096 if (nr_pages > 0) 3097 page_counter_charge(&memcg->kmem, nr_pages); 3098 else 3099 page_counter_uncharge(&memcg->kmem, -nr_pages); 3100 } 3101 } 3102 3103 3104 /* 3105 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 3106 * @objcg: object cgroup to uncharge 3107 * @nr_pages: number of pages to uncharge 3108 */ 3109 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 3110 unsigned int nr_pages) 3111 { 3112 struct mem_cgroup *memcg; 3113 3114 memcg = get_mem_cgroup_from_objcg(objcg); 3115 3116 memcg_account_kmem(memcg, -nr_pages); 3117 refill_stock(memcg, nr_pages); 3118 3119 css_put(&memcg->css); 3120 } 3121 3122 /* 3123 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 3124 * @objcg: object cgroup to charge 3125 * @gfp: reclaim mode 3126 * @nr_pages: number of pages to charge 3127 * 3128 * Returns 0 on success, an error code on failure. 3129 */ 3130 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3131 unsigned int nr_pages) 3132 { 3133 struct mem_cgroup *memcg; 3134 int ret; 3135 3136 memcg = get_mem_cgroup_from_objcg(objcg); 3137 3138 ret = try_charge_memcg(memcg, gfp, nr_pages); 3139 if (ret) 3140 goto out; 3141 3142 memcg_account_kmem(memcg, nr_pages); 3143 out: 3144 css_put(&memcg->css); 3145 3146 return ret; 3147 } 3148 3149 /** 3150 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3151 * @page: page to charge 3152 * @gfp: reclaim mode 3153 * @order: allocation order 3154 * 3155 * Returns 0 on success, an error code on failure. 3156 */ 3157 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3158 { 3159 struct obj_cgroup *objcg; 3160 int ret = 0; 3161 3162 objcg = get_obj_cgroup_from_current(); 3163 if (objcg) { 3164 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3165 if (!ret) { 3166 page->memcg_data = (unsigned long)objcg | 3167 MEMCG_DATA_KMEM; 3168 return 0; 3169 } 3170 obj_cgroup_put(objcg); 3171 } 3172 return ret; 3173 } 3174 3175 /** 3176 * __memcg_kmem_uncharge_page: uncharge a kmem page 3177 * @page: page to uncharge 3178 * @order: allocation order 3179 */ 3180 void __memcg_kmem_uncharge_page(struct page *page, int order) 3181 { 3182 struct folio *folio = page_folio(page); 3183 struct obj_cgroup *objcg; 3184 unsigned int nr_pages = 1 << order; 3185 3186 if (!folio_memcg_kmem(folio)) 3187 return; 3188 3189 objcg = __folio_objcg(folio); 3190 obj_cgroup_uncharge_pages(objcg, nr_pages); 3191 folio->memcg_data = 0; 3192 obj_cgroup_put(objcg); 3193 } 3194 3195 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3196 enum node_stat_item idx, int nr) 3197 { 3198 struct memcg_stock_pcp *stock; 3199 struct obj_cgroup *old = NULL; 3200 unsigned long flags; 3201 int *bytes; 3202 3203 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3204 stock = this_cpu_ptr(&memcg_stock); 3205 3206 /* 3207 * Save vmstat data in stock and skip vmstat array update unless 3208 * accumulating over a page of vmstat data or when pgdat or idx 3209 * changes. 3210 */ 3211 if (stock->cached_objcg != objcg) { 3212 old = drain_obj_stock(stock); 3213 obj_cgroup_get(objcg); 3214 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3215 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3216 stock->cached_objcg = objcg; 3217 stock->cached_pgdat = pgdat; 3218 } else if (stock->cached_pgdat != pgdat) { 3219 /* Flush the existing cached vmstat data */ 3220 struct pglist_data *oldpg = stock->cached_pgdat; 3221 3222 if (stock->nr_slab_reclaimable_b) { 3223 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 3224 stock->nr_slab_reclaimable_b); 3225 stock->nr_slab_reclaimable_b = 0; 3226 } 3227 if (stock->nr_slab_unreclaimable_b) { 3228 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 3229 stock->nr_slab_unreclaimable_b); 3230 stock->nr_slab_unreclaimable_b = 0; 3231 } 3232 stock->cached_pgdat = pgdat; 3233 } 3234 3235 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3236 : &stock->nr_slab_unreclaimable_b; 3237 /* 3238 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3239 * cached locally at least once before pushing it out. 3240 */ 3241 if (!*bytes) { 3242 *bytes = nr; 3243 nr = 0; 3244 } else { 3245 *bytes += nr; 3246 if (abs(*bytes) > PAGE_SIZE) { 3247 nr = *bytes; 3248 *bytes = 0; 3249 } else { 3250 nr = 0; 3251 } 3252 } 3253 if (nr) 3254 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3255 3256 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3257 if (old) 3258 obj_cgroup_put(old); 3259 } 3260 3261 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3262 { 3263 struct memcg_stock_pcp *stock; 3264 unsigned long flags; 3265 bool ret = false; 3266 3267 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3268 3269 stock = this_cpu_ptr(&memcg_stock); 3270 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3271 stock->nr_bytes -= nr_bytes; 3272 ret = true; 3273 } 3274 3275 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3276 3277 return ret; 3278 } 3279 3280 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) 3281 { 3282 struct obj_cgroup *old = stock->cached_objcg; 3283 3284 if (!old) 3285 return NULL; 3286 3287 if (stock->nr_bytes) { 3288 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3289 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3290 3291 if (nr_pages) { 3292 struct mem_cgroup *memcg; 3293 3294 memcg = get_mem_cgroup_from_objcg(old); 3295 3296 memcg_account_kmem(memcg, -nr_pages); 3297 __refill_stock(memcg, nr_pages); 3298 3299 css_put(&memcg->css); 3300 } 3301 3302 /* 3303 * The leftover is flushed to the centralized per-memcg value. 3304 * On the next attempt to refill obj stock it will be moved 3305 * to a per-cpu stock (probably, on an other CPU), see 3306 * refill_obj_stock(). 3307 * 3308 * How often it's flushed is a trade-off between the memory 3309 * limit enforcement accuracy and potential CPU contention, 3310 * so it might be changed in the future. 3311 */ 3312 atomic_add(nr_bytes, &old->nr_charged_bytes); 3313 stock->nr_bytes = 0; 3314 } 3315 3316 /* 3317 * Flush the vmstat data in current stock 3318 */ 3319 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3320 if (stock->nr_slab_reclaimable_b) { 3321 mod_objcg_mlstate(old, stock->cached_pgdat, 3322 NR_SLAB_RECLAIMABLE_B, 3323 stock->nr_slab_reclaimable_b); 3324 stock->nr_slab_reclaimable_b = 0; 3325 } 3326 if (stock->nr_slab_unreclaimable_b) { 3327 mod_objcg_mlstate(old, stock->cached_pgdat, 3328 NR_SLAB_UNRECLAIMABLE_B, 3329 stock->nr_slab_unreclaimable_b); 3330 stock->nr_slab_unreclaimable_b = 0; 3331 } 3332 stock->cached_pgdat = NULL; 3333 } 3334 3335 stock->cached_objcg = NULL; 3336 /* 3337 * The `old' objects needs to be released by the caller via 3338 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. 3339 */ 3340 return old; 3341 } 3342 3343 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3344 struct mem_cgroup *root_memcg) 3345 { 3346 struct mem_cgroup *memcg; 3347 3348 if (stock->cached_objcg) { 3349 memcg = obj_cgroup_memcg(stock->cached_objcg); 3350 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3351 return true; 3352 } 3353 3354 return false; 3355 } 3356 3357 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3358 bool allow_uncharge) 3359 { 3360 struct memcg_stock_pcp *stock; 3361 struct obj_cgroup *old = NULL; 3362 unsigned long flags; 3363 unsigned int nr_pages = 0; 3364 3365 local_lock_irqsave(&memcg_stock.stock_lock, flags); 3366 3367 stock = this_cpu_ptr(&memcg_stock); 3368 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3369 old = drain_obj_stock(stock); 3370 obj_cgroup_get(objcg); 3371 stock->cached_objcg = objcg; 3372 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3373 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3374 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3375 } 3376 stock->nr_bytes += nr_bytes; 3377 3378 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3379 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3380 stock->nr_bytes &= (PAGE_SIZE - 1); 3381 } 3382 3383 local_unlock_irqrestore(&memcg_stock.stock_lock, flags); 3384 if (old) 3385 obj_cgroup_put(old); 3386 3387 if (nr_pages) 3388 obj_cgroup_uncharge_pages(objcg, nr_pages); 3389 } 3390 3391 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3392 { 3393 unsigned int nr_pages, nr_bytes; 3394 int ret; 3395 3396 if (consume_obj_stock(objcg, size)) 3397 return 0; 3398 3399 /* 3400 * In theory, objcg->nr_charged_bytes can have enough 3401 * pre-charged bytes to satisfy the allocation. However, 3402 * flushing objcg->nr_charged_bytes requires two atomic 3403 * operations, and objcg->nr_charged_bytes can't be big. 3404 * The shared objcg->nr_charged_bytes can also become a 3405 * performance bottleneck if all tasks of the same memcg are 3406 * trying to update it. So it's better to ignore it and try 3407 * grab some new pages. The stock's nr_bytes will be flushed to 3408 * objcg->nr_charged_bytes later on when objcg changes. 3409 * 3410 * The stock's nr_bytes may contain enough pre-charged bytes 3411 * to allow one less page from being charged, but we can't rely 3412 * on the pre-charged bytes not being changed outside of 3413 * consume_obj_stock() or refill_obj_stock(). So ignore those 3414 * pre-charged bytes as well when charging pages. To avoid a 3415 * page uncharge right after a page charge, we set the 3416 * allow_uncharge flag to false when calling refill_obj_stock() 3417 * to temporarily allow the pre-charged bytes to exceed the page 3418 * size limit. The maximum reachable value of the pre-charged 3419 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3420 * race. 3421 */ 3422 nr_pages = size >> PAGE_SHIFT; 3423 nr_bytes = size & (PAGE_SIZE - 1); 3424 3425 if (nr_bytes) 3426 nr_pages += 1; 3427 3428 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3429 if (!ret && nr_bytes) 3430 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3431 3432 return ret; 3433 } 3434 3435 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3436 { 3437 refill_obj_stock(objcg, size, true); 3438 } 3439 3440 #endif /* CONFIG_MEMCG_KMEM */ 3441 3442 /* 3443 * Because page_memcg(head) is not set on tails, set it now. 3444 */ 3445 void split_page_memcg(struct page *head, unsigned int nr) 3446 { 3447 struct folio *folio = page_folio(head); 3448 struct mem_cgroup *memcg = folio_memcg(folio); 3449 int i; 3450 3451 if (mem_cgroup_disabled() || !memcg) 3452 return; 3453 3454 for (i = 1; i < nr; i++) 3455 folio_page(folio, i)->memcg_data = folio->memcg_data; 3456 3457 if (folio_memcg_kmem(folio)) 3458 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); 3459 else 3460 css_get_many(&memcg->css, nr - 1); 3461 } 3462 3463 #ifdef CONFIG_SWAP 3464 /** 3465 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3466 * @entry: swap entry to be moved 3467 * @from: mem_cgroup which the entry is moved from 3468 * @to: mem_cgroup which the entry is moved to 3469 * 3470 * It succeeds only when the swap_cgroup's record for this entry is the same 3471 * as the mem_cgroup's id of @from. 3472 * 3473 * Returns 0 on success, -EINVAL on failure. 3474 * 3475 * The caller must have charged to @to, IOW, called page_counter_charge() about 3476 * both res and memsw, and called css_get(). 3477 */ 3478 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3479 struct mem_cgroup *from, struct mem_cgroup *to) 3480 { 3481 unsigned short old_id, new_id; 3482 3483 old_id = mem_cgroup_id(from); 3484 new_id = mem_cgroup_id(to); 3485 3486 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3487 mod_memcg_state(from, MEMCG_SWAP, -1); 3488 mod_memcg_state(to, MEMCG_SWAP, 1); 3489 return 0; 3490 } 3491 return -EINVAL; 3492 } 3493 #else 3494 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3495 struct mem_cgroup *from, struct mem_cgroup *to) 3496 { 3497 return -EINVAL; 3498 } 3499 #endif 3500 3501 static DEFINE_MUTEX(memcg_max_mutex); 3502 3503 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3504 unsigned long max, bool memsw) 3505 { 3506 bool enlarge = false; 3507 bool drained = false; 3508 int ret; 3509 bool limits_invariant; 3510 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3511 3512 do { 3513 if (signal_pending(current)) { 3514 ret = -EINTR; 3515 break; 3516 } 3517 3518 mutex_lock(&memcg_max_mutex); 3519 /* 3520 * Make sure that the new limit (memsw or memory limit) doesn't 3521 * break our basic invariant rule memory.max <= memsw.max. 3522 */ 3523 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3524 max <= memcg->memsw.max; 3525 if (!limits_invariant) { 3526 mutex_unlock(&memcg_max_mutex); 3527 ret = -EINVAL; 3528 break; 3529 } 3530 if (max > counter->max) 3531 enlarge = true; 3532 ret = page_counter_set_max(counter, max); 3533 mutex_unlock(&memcg_max_mutex); 3534 3535 if (!ret) 3536 break; 3537 3538 if (!drained) { 3539 drain_all_stock(memcg); 3540 drained = true; 3541 continue; 3542 } 3543 3544 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, 3545 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) { 3546 ret = -EBUSY; 3547 break; 3548 } 3549 } while (true); 3550 3551 if (!ret && enlarge) 3552 memcg_oom_recover(memcg); 3553 3554 return ret; 3555 } 3556 3557 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3558 gfp_t gfp_mask, 3559 unsigned long *total_scanned) 3560 { 3561 unsigned long nr_reclaimed = 0; 3562 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3563 unsigned long reclaimed; 3564 int loop = 0; 3565 struct mem_cgroup_tree_per_node *mctz; 3566 unsigned long excess; 3567 3568 if (lru_gen_enabled()) 3569 return 0; 3570 3571 if (order > 0) 3572 return 0; 3573 3574 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; 3575 3576 /* 3577 * Do not even bother to check the largest node if the root 3578 * is empty. Do it lockless to prevent lock bouncing. Races 3579 * are acceptable as soft limit is best effort anyway. 3580 */ 3581 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3582 return 0; 3583 3584 /* 3585 * This loop can run a while, specially if mem_cgroup's continuously 3586 * keep exceeding their soft limit and putting the system under 3587 * pressure 3588 */ 3589 do { 3590 if (next_mz) 3591 mz = next_mz; 3592 else 3593 mz = mem_cgroup_largest_soft_limit_node(mctz); 3594 if (!mz) 3595 break; 3596 3597 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3598 gfp_mask, total_scanned); 3599 nr_reclaimed += reclaimed; 3600 spin_lock_irq(&mctz->lock); 3601 3602 /* 3603 * If we failed to reclaim anything from this memory cgroup 3604 * it is time to move on to the next cgroup 3605 */ 3606 next_mz = NULL; 3607 if (!reclaimed) 3608 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3609 3610 excess = soft_limit_excess(mz->memcg); 3611 /* 3612 * One school of thought says that we should not add 3613 * back the node to the tree if reclaim returns 0. 3614 * But our reclaim could return 0, simply because due 3615 * to priority we are exposing a smaller subset of 3616 * memory to reclaim from. Consider this as a longer 3617 * term TODO. 3618 */ 3619 /* If excess == 0, no tree ops */ 3620 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3621 spin_unlock_irq(&mctz->lock); 3622 css_put(&mz->memcg->css); 3623 loop++; 3624 /* 3625 * Could not reclaim anything and there are no more 3626 * mem cgroups to try or we seem to be looping without 3627 * reclaiming anything. 3628 */ 3629 if (!nr_reclaimed && 3630 (next_mz == NULL || 3631 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3632 break; 3633 } while (!nr_reclaimed); 3634 if (next_mz) 3635 css_put(&next_mz->memcg->css); 3636 return nr_reclaimed; 3637 } 3638 3639 /* 3640 * Reclaims as many pages from the given memcg as possible. 3641 * 3642 * Caller is responsible for holding css reference for memcg. 3643 */ 3644 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3645 { 3646 int nr_retries = MAX_RECLAIM_RETRIES; 3647 3648 /* we call try-to-free pages for make this cgroup empty */ 3649 lru_add_drain_all(); 3650 3651 drain_all_stock(memcg); 3652 3653 /* try to free all pages in this cgroup */ 3654 while (nr_retries && page_counter_read(&memcg->memory)) { 3655 if (signal_pending(current)) 3656 return -EINTR; 3657 3658 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, 3659 MEMCG_RECLAIM_MAY_SWAP)) 3660 nr_retries--; 3661 } 3662 3663 return 0; 3664 } 3665 3666 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3667 char *buf, size_t nbytes, 3668 loff_t off) 3669 { 3670 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3671 3672 if (mem_cgroup_is_root(memcg)) 3673 return -EINVAL; 3674 return mem_cgroup_force_empty(memcg) ?: nbytes; 3675 } 3676 3677 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3678 struct cftype *cft) 3679 { 3680 return 1; 3681 } 3682 3683 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3684 struct cftype *cft, u64 val) 3685 { 3686 if (val == 1) 3687 return 0; 3688 3689 pr_warn_once("Non-hierarchical mode is deprecated. " 3690 "Please report your usecase to linux-mm@kvack.org if you " 3691 "depend on this functionality.\n"); 3692 3693 return -EINVAL; 3694 } 3695 3696 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3697 { 3698 unsigned long val; 3699 3700 if (mem_cgroup_is_root(memcg)) { 3701 /* 3702 * We can reach here from irq context through: 3703 * uncharge_batch() 3704 * |--memcg_check_events() 3705 * |--mem_cgroup_threshold() 3706 * |--__mem_cgroup_threshold() 3707 * |--mem_cgroup_usage 3708 * 3709 * rstat flushing is an expensive operation that should not be 3710 * done from irq context; use stale stats in this case. 3711 * Arguably, usage threshold events are not reliable on the root 3712 * memcg anyway since its usage is ill-defined. 3713 * 3714 * Additionally, other call paths through memcg_check_events() 3715 * disable irqs, so make sure we are flushing stats atomically. 3716 */ 3717 if (in_task()) 3718 mem_cgroup_flush_stats_atomic(); 3719 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3720 memcg_page_state(memcg, NR_ANON_MAPPED); 3721 if (swap) 3722 val += memcg_page_state(memcg, MEMCG_SWAP); 3723 } else { 3724 if (!swap) 3725 val = page_counter_read(&memcg->memory); 3726 else 3727 val = page_counter_read(&memcg->memsw); 3728 } 3729 return val; 3730 } 3731 3732 enum { 3733 RES_USAGE, 3734 RES_LIMIT, 3735 RES_MAX_USAGE, 3736 RES_FAILCNT, 3737 RES_SOFT_LIMIT, 3738 }; 3739 3740 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3741 struct cftype *cft) 3742 { 3743 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3744 struct page_counter *counter; 3745 3746 switch (MEMFILE_TYPE(cft->private)) { 3747 case _MEM: 3748 counter = &memcg->memory; 3749 break; 3750 case _MEMSWAP: 3751 counter = &memcg->memsw; 3752 break; 3753 case _KMEM: 3754 counter = &memcg->kmem; 3755 break; 3756 case _TCP: 3757 counter = &memcg->tcpmem; 3758 break; 3759 default: 3760 BUG(); 3761 } 3762 3763 switch (MEMFILE_ATTR(cft->private)) { 3764 case RES_USAGE: 3765 if (counter == &memcg->memory) 3766 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3767 if (counter == &memcg->memsw) 3768 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3769 return (u64)page_counter_read(counter) * PAGE_SIZE; 3770 case RES_LIMIT: 3771 return (u64)counter->max * PAGE_SIZE; 3772 case RES_MAX_USAGE: 3773 return (u64)counter->watermark * PAGE_SIZE; 3774 case RES_FAILCNT: 3775 return counter->failcnt; 3776 case RES_SOFT_LIMIT: 3777 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; 3778 default: 3779 BUG(); 3780 } 3781 } 3782 3783 /* 3784 * This function doesn't do anything useful. Its only job is to provide a read 3785 * handler for a file so that cgroup_file_mode() will add read permissions. 3786 */ 3787 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m, 3788 __always_unused void *v) 3789 { 3790 return -EINVAL; 3791 } 3792 3793 #ifdef CONFIG_MEMCG_KMEM 3794 static int memcg_online_kmem(struct mem_cgroup *memcg) 3795 { 3796 struct obj_cgroup *objcg; 3797 3798 if (mem_cgroup_kmem_disabled()) 3799 return 0; 3800 3801 if (unlikely(mem_cgroup_is_root(memcg))) 3802 return 0; 3803 3804 objcg = obj_cgroup_alloc(); 3805 if (!objcg) 3806 return -ENOMEM; 3807 3808 objcg->memcg = memcg; 3809 rcu_assign_pointer(memcg->objcg, objcg); 3810 3811 static_branch_enable(&memcg_kmem_online_key); 3812 3813 memcg->kmemcg_id = memcg->id.id; 3814 3815 return 0; 3816 } 3817 3818 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3819 { 3820 struct mem_cgroup *parent; 3821 3822 if (mem_cgroup_kmem_disabled()) 3823 return; 3824 3825 if (unlikely(mem_cgroup_is_root(memcg))) 3826 return; 3827 3828 parent = parent_mem_cgroup(memcg); 3829 if (!parent) 3830 parent = root_mem_cgroup; 3831 3832 memcg_reparent_objcgs(memcg, parent); 3833 3834 /* 3835 * After we have finished memcg_reparent_objcgs(), all list_lrus 3836 * corresponding to this cgroup are guaranteed to remain empty. 3837 * The ordering is imposed by list_lru_node->lock taken by 3838 * memcg_reparent_list_lrus(). 3839 */ 3840 memcg_reparent_list_lrus(memcg, parent); 3841 } 3842 #else 3843 static int memcg_online_kmem(struct mem_cgroup *memcg) 3844 { 3845 return 0; 3846 } 3847 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3848 { 3849 } 3850 #endif /* CONFIG_MEMCG_KMEM */ 3851 3852 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3853 { 3854 int ret; 3855 3856 mutex_lock(&memcg_max_mutex); 3857 3858 ret = page_counter_set_max(&memcg->tcpmem, max); 3859 if (ret) 3860 goto out; 3861 3862 if (!memcg->tcpmem_active) { 3863 /* 3864 * The active flag needs to be written after the static_key 3865 * update. This is what guarantees that the socket activation 3866 * function is the last one to run. See mem_cgroup_sk_alloc() 3867 * for details, and note that we don't mark any socket as 3868 * belonging to this memcg until that flag is up. 3869 * 3870 * We need to do this, because static_keys will span multiple 3871 * sites, but we can't control their order. If we mark a socket 3872 * as accounted, but the accounting functions are not patched in 3873 * yet, we'll lose accounting. 3874 * 3875 * We never race with the readers in mem_cgroup_sk_alloc(), 3876 * because when this value change, the code to process it is not 3877 * patched in yet. 3878 */ 3879 static_branch_inc(&memcg_sockets_enabled_key); 3880 memcg->tcpmem_active = true; 3881 } 3882 out: 3883 mutex_unlock(&memcg_max_mutex); 3884 return ret; 3885 } 3886 3887 /* 3888 * The user of this function is... 3889 * RES_LIMIT. 3890 */ 3891 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3892 char *buf, size_t nbytes, loff_t off) 3893 { 3894 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3895 unsigned long nr_pages; 3896 int ret; 3897 3898 buf = strstrip(buf); 3899 ret = page_counter_memparse(buf, "-1", &nr_pages); 3900 if (ret) 3901 return ret; 3902 3903 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3904 case RES_LIMIT: 3905 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3906 ret = -EINVAL; 3907 break; 3908 } 3909 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3910 case _MEM: 3911 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3912 break; 3913 case _MEMSWAP: 3914 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3915 break; 3916 case _KMEM: 3917 /* kmem.limit_in_bytes is deprecated. */ 3918 ret = -EOPNOTSUPP; 3919 break; 3920 case _TCP: 3921 ret = memcg_update_tcp_max(memcg, nr_pages); 3922 break; 3923 } 3924 break; 3925 case RES_SOFT_LIMIT: 3926 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 3927 ret = -EOPNOTSUPP; 3928 } else { 3929 WRITE_ONCE(memcg->soft_limit, nr_pages); 3930 ret = 0; 3931 } 3932 break; 3933 } 3934 return ret ?: nbytes; 3935 } 3936 3937 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3938 size_t nbytes, loff_t off) 3939 { 3940 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3941 struct page_counter *counter; 3942 3943 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3944 case _MEM: 3945 counter = &memcg->memory; 3946 break; 3947 case _MEMSWAP: 3948 counter = &memcg->memsw; 3949 break; 3950 case _KMEM: 3951 counter = &memcg->kmem; 3952 break; 3953 case _TCP: 3954 counter = &memcg->tcpmem; 3955 break; 3956 default: 3957 BUG(); 3958 } 3959 3960 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3961 case RES_MAX_USAGE: 3962 page_counter_reset_watermark(counter); 3963 break; 3964 case RES_FAILCNT: 3965 counter->failcnt = 0; 3966 break; 3967 default: 3968 BUG(); 3969 } 3970 3971 return nbytes; 3972 } 3973 3974 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3975 struct cftype *cft) 3976 { 3977 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3978 } 3979 3980 #ifdef CONFIG_MMU 3981 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3982 struct cftype *cft, u64 val) 3983 { 3984 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3985 3986 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. " 3987 "Please report your usecase to linux-mm@kvack.org if you " 3988 "depend on this functionality.\n"); 3989 3990 if (val & ~MOVE_MASK) 3991 return -EINVAL; 3992 3993 /* 3994 * No kind of locking is needed in here, because ->can_attach() will 3995 * check this value once in the beginning of the process, and then carry 3996 * on with stale data. This means that changes to this value will only 3997 * affect task migrations starting after the change. 3998 */ 3999 memcg->move_charge_at_immigrate = val; 4000 return 0; 4001 } 4002 #else 4003 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 4004 struct cftype *cft, u64 val) 4005 { 4006 return -ENOSYS; 4007 } 4008 #endif 4009 4010 #ifdef CONFIG_NUMA 4011 4012 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 4013 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 4014 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 4015 4016 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 4017 int nid, unsigned int lru_mask, bool tree) 4018 { 4019 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 4020 unsigned long nr = 0; 4021 enum lru_list lru; 4022 4023 VM_BUG_ON((unsigned)nid >= nr_node_ids); 4024 4025 for_each_lru(lru) { 4026 if (!(BIT(lru) & lru_mask)) 4027 continue; 4028 if (tree) 4029 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 4030 else 4031 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 4032 } 4033 return nr; 4034 } 4035 4036 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 4037 unsigned int lru_mask, 4038 bool tree) 4039 { 4040 unsigned long nr = 0; 4041 enum lru_list lru; 4042 4043 for_each_lru(lru) { 4044 if (!(BIT(lru) & lru_mask)) 4045 continue; 4046 if (tree) 4047 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 4048 else 4049 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 4050 } 4051 return nr; 4052 } 4053 4054 static int memcg_numa_stat_show(struct seq_file *m, void *v) 4055 { 4056 struct numa_stat { 4057 const char *name; 4058 unsigned int lru_mask; 4059 }; 4060 4061 static const struct numa_stat stats[] = { 4062 { "total", LRU_ALL }, 4063 { "file", LRU_ALL_FILE }, 4064 { "anon", LRU_ALL_ANON }, 4065 { "unevictable", BIT(LRU_UNEVICTABLE) }, 4066 }; 4067 const struct numa_stat *stat; 4068 int nid; 4069 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4070 4071 mem_cgroup_flush_stats(); 4072 4073 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4074 seq_printf(m, "%s=%lu", stat->name, 4075 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4076 false)); 4077 for_each_node_state(nid, N_MEMORY) 4078 seq_printf(m, " N%d=%lu", nid, 4079 mem_cgroup_node_nr_lru_pages(memcg, nid, 4080 stat->lru_mask, false)); 4081 seq_putc(m, '\n'); 4082 } 4083 4084 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4085 4086 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4087 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4088 true)); 4089 for_each_node_state(nid, N_MEMORY) 4090 seq_printf(m, " N%d=%lu", nid, 4091 mem_cgroup_node_nr_lru_pages(memcg, nid, 4092 stat->lru_mask, true)); 4093 seq_putc(m, '\n'); 4094 } 4095 4096 return 0; 4097 } 4098 #endif /* CONFIG_NUMA */ 4099 4100 static const unsigned int memcg1_stats[] = { 4101 NR_FILE_PAGES, 4102 NR_ANON_MAPPED, 4103 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4104 NR_ANON_THPS, 4105 #endif 4106 NR_SHMEM, 4107 NR_FILE_MAPPED, 4108 NR_FILE_DIRTY, 4109 NR_WRITEBACK, 4110 WORKINGSET_REFAULT_ANON, 4111 WORKINGSET_REFAULT_FILE, 4112 MEMCG_SWAP, 4113 }; 4114 4115 static const char *const memcg1_stat_names[] = { 4116 "cache", 4117 "rss", 4118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4119 "rss_huge", 4120 #endif 4121 "shmem", 4122 "mapped_file", 4123 "dirty", 4124 "writeback", 4125 "workingset_refault_anon", 4126 "workingset_refault_file", 4127 "swap", 4128 }; 4129 4130 /* Universal VM events cgroup1 shows, original sort order */ 4131 static const unsigned int memcg1_events[] = { 4132 PGPGIN, 4133 PGPGOUT, 4134 PGFAULT, 4135 PGMAJFAULT, 4136 }; 4137 4138 static int memcg_stat_show(struct seq_file *m, void *v) 4139 { 4140 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4141 unsigned long memory, memsw; 4142 struct mem_cgroup *mi; 4143 unsigned int i; 4144 4145 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4146 4147 mem_cgroup_flush_stats(); 4148 4149 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4150 unsigned long nr; 4151 4152 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4153 continue; 4154 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4155 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 4156 nr * memcg_page_state_unit(memcg1_stats[i])); 4157 } 4158 4159 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4160 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4161 memcg_events_local(memcg, memcg1_events[i])); 4162 4163 for (i = 0; i < NR_LRU_LISTS; i++) 4164 seq_printf(m, "%s %lu\n", lru_list_name(i), 4165 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4166 PAGE_SIZE); 4167 4168 /* Hierarchical information */ 4169 memory = memsw = PAGE_COUNTER_MAX; 4170 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4171 memory = min(memory, READ_ONCE(mi->memory.max)); 4172 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4173 } 4174 seq_printf(m, "hierarchical_memory_limit %llu\n", 4175 (u64)memory * PAGE_SIZE); 4176 if (do_memsw_account()) 4177 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4178 (u64)memsw * PAGE_SIZE); 4179 4180 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4181 unsigned long nr; 4182 4183 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4184 continue; 4185 nr = memcg_page_state(memcg, memcg1_stats[i]); 4186 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4187 (u64)nr * memcg_page_state_unit(memcg1_stats[i])); 4188 } 4189 4190 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4191 seq_printf(m, "total_%s %llu\n", 4192 vm_event_name(memcg1_events[i]), 4193 (u64)memcg_events(memcg, memcg1_events[i])); 4194 4195 for (i = 0; i < NR_LRU_LISTS; i++) 4196 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4197 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4198 PAGE_SIZE); 4199 4200 #ifdef CONFIG_DEBUG_VM 4201 { 4202 pg_data_t *pgdat; 4203 struct mem_cgroup_per_node *mz; 4204 unsigned long anon_cost = 0; 4205 unsigned long file_cost = 0; 4206 4207 for_each_online_pgdat(pgdat) { 4208 mz = memcg->nodeinfo[pgdat->node_id]; 4209 4210 anon_cost += mz->lruvec.anon_cost; 4211 file_cost += mz->lruvec.file_cost; 4212 } 4213 seq_printf(m, "anon_cost %lu\n", anon_cost); 4214 seq_printf(m, "file_cost %lu\n", file_cost); 4215 } 4216 #endif 4217 4218 return 0; 4219 } 4220 4221 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4222 struct cftype *cft) 4223 { 4224 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4225 4226 return mem_cgroup_swappiness(memcg); 4227 } 4228 4229 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4230 struct cftype *cft, u64 val) 4231 { 4232 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4233 4234 if (val > 200) 4235 return -EINVAL; 4236 4237 if (!mem_cgroup_is_root(memcg)) 4238 WRITE_ONCE(memcg->swappiness, val); 4239 else 4240 WRITE_ONCE(vm_swappiness, val); 4241 4242 return 0; 4243 } 4244 4245 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4246 { 4247 struct mem_cgroup_threshold_ary *t; 4248 unsigned long usage; 4249 int i; 4250 4251 rcu_read_lock(); 4252 if (!swap) 4253 t = rcu_dereference(memcg->thresholds.primary); 4254 else 4255 t = rcu_dereference(memcg->memsw_thresholds.primary); 4256 4257 if (!t) 4258 goto unlock; 4259 4260 usage = mem_cgroup_usage(memcg, swap); 4261 4262 /* 4263 * current_threshold points to threshold just below or equal to usage. 4264 * If it's not true, a threshold was crossed after last 4265 * call of __mem_cgroup_threshold(). 4266 */ 4267 i = t->current_threshold; 4268 4269 /* 4270 * Iterate backward over array of thresholds starting from 4271 * current_threshold and check if a threshold is crossed. 4272 * If none of thresholds below usage is crossed, we read 4273 * only one element of the array here. 4274 */ 4275 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4276 eventfd_signal(t->entries[i].eventfd, 1); 4277 4278 /* i = current_threshold + 1 */ 4279 i++; 4280 4281 /* 4282 * Iterate forward over array of thresholds starting from 4283 * current_threshold+1 and check if a threshold is crossed. 4284 * If none of thresholds above usage is crossed, we read 4285 * only one element of the array here. 4286 */ 4287 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4288 eventfd_signal(t->entries[i].eventfd, 1); 4289 4290 /* Update current_threshold */ 4291 t->current_threshold = i - 1; 4292 unlock: 4293 rcu_read_unlock(); 4294 } 4295 4296 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4297 { 4298 while (memcg) { 4299 __mem_cgroup_threshold(memcg, false); 4300 if (do_memsw_account()) 4301 __mem_cgroup_threshold(memcg, true); 4302 4303 memcg = parent_mem_cgroup(memcg); 4304 } 4305 } 4306 4307 static int compare_thresholds(const void *a, const void *b) 4308 { 4309 const struct mem_cgroup_threshold *_a = a; 4310 const struct mem_cgroup_threshold *_b = b; 4311 4312 if (_a->threshold > _b->threshold) 4313 return 1; 4314 4315 if (_a->threshold < _b->threshold) 4316 return -1; 4317 4318 return 0; 4319 } 4320 4321 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4322 { 4323 struct mem_cgroup_eventfd_list *ev; 4324 4325 spin_lock(&memcg_oom_lock); 4326 4327 list_for_each_entry(ev, &memcg->oom_notify, list) 4328 eventfd_signal(ev->eventfd, 1); 4329 4330 spin_unlock(&memcg_oom_lock); 4331 return 0; 4332 } 4333 4334 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4335 { 4336 struct mem_cgroup *iter; 4337 4338 for_each_mem_cgroup_tree(iter, memcg) 4339 mem_cgroup_oom_notify_cb(iter); 4340 } 4341 4342 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4343 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4344 { 4345 struct mem_cgroup_thresholds *thresholds; 4346 struct mem_cgroup_threshold_ary *new; 4347 unsigned long threshold; 4348 unsigned long usage; 4349 int i, size, ret; 4350 4351 ret = page_counter_memparse(args, "-1", &threshold); 4352 if (ret) 4353 return ret; 4354 4355 mutex_lock(&memcg->thresholds_lock); 4356 4357 if (type == _MEM) { 4358 thresholds = &memcg->thresholds; 4359 usage = mem_cgroup_usage(memcg, false); 4360 } else if (type == _MEMSWAP) { 4361 thresholds = &memcg->memsw_thresholds; 4362 usage = mem_cgroup_usage(memcg, true); 4363 } else 4364 BUG(); 4365 4366 /* Check if a threshold crossed before adding a new one */ 4367 if (thresholds->primary) 4368 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4369 4370 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4371 4372 /* Allocate memory for new array of thresholds */ 4373 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4374 if (!new) { 4375 ret = -ENOMEM; 4376 goto unlock; 4377 } 4378 new->size = size; 4379 4380 /* Copy thresholds (if any) to new array */ 4381 if (thresholds->primary) 4382 memcpy(new->entries, thresholds->primary->entries, 4383 flex_array_size(new, entries, size - 1)); 4384 4385 /* Add new threshold */ 4386 new->entries[size - 1].eventfd = eventfd; 4387 new->entries[size - 1].threshold = threshold; 4388 4389 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4390 sort(new->entries, size, sizeof(*new->entries), 4391 compare_thresholds, NULL); 4392 4393 /* Find current threshold */ 4394 new->current_threshold = -1; 4395 for (i = 0; i < size; i++) { 4396 if (new->entries[i].threshold <= usage) { 4397 /* 4398 * new->current_threshold will not be used until 4399 * rcu_assign_pointer(), so it's safe to increment 4400 * it here. 4401 */ 4402 ++new->current_threshold; 4403 } else 4404 break; 4405 } 4406 4407 /* Free old spare buffer and save old primary buffer as spare */ 4408 kfree(thresholds->spare); 4409 thresholds->spare = thresholds->primary; 4410 4411 rcu_assign_pointer(thresholds->primary, new); 4412 4413 /* To be sure that nobody uses thresholds */ 4414 synchronize_rcu(); 4415 4416 unlock: 4417 mutex_unlock(&memcg->thresholds_lock); 4418 4419 return ret; 4420 } 4421 4422 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4423 struct eventfd_ctx *eventfd, const char *args) 4424 { 4425 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4426 } 4427 4428 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4429 struct eventfd_ctx *eventfd, const char *args) 4430 { 4431 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4432 } 4433 4434 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4435 struct eventfd_ctx *eventfd, enum res_type type) 4436 { 4437 struct mem_cgroup_thresholds *thresholds; 4438 struct mem_cgroup_threshold_ary *new; 4439 unsigned long usage; 4440 int i, j, size, entries; 4441 4442 mutex_lock(&memcg->thresholds_lock); 4443 4444 if (type == _MEM) { 4445 thresholds = &memcg->thresholds; 4446 usage = mem_cgroup_usage(memcg, false); 4447 } else if (type == _MEMSWAP) { 4448 thresholds = &memcg->memsw_thresholds; 4449 usage = mem_cgroup_usage(memcg, true); 4450 } else 4451 BUG(); 4452 4453 if (!thresholds->primary) 4454 goto unlock; 4455 4456 /* Check if a threshold crossed before removing */ 4457 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4458 4459 /* Calculate new number of threshold */ 4460 size = entries = 0; 4461 for (i = 0; i < thresholds->primary->size; i++) { 4462 if (thresholds->primary->entries[i].eventfd != eventfd) 4463 size++; 4464 else 4465 entries++; 4466 } 4467 4468 new = thresholds->spare; 4469 4470 /* If no items related to eventfd have been cleared, nothing to do */ 4471 if (!entries) 4472 goto unlock; 4473 4474 /* Set thresholds array to NULL if we don't have thresholds */ 4475 if (!size) { 4476 kfree(new); 4477 new = NULL; 4478 goto swap_buffers; 4479 } 4480 4481 new->size = size; 4482 4483 /* Copy thresholds and find current threshold */ 4484 new->current_threshold = -1; 4485 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4486 if (thresholds->primary->entries[i].eventfd == eventfd) 4487 continue; 4488 4489 new->entries[j] = thresholds->primary->entries[i]; 4490 if (new->entries[j].threshold <= usage) { 4491 /* 4492 * new->current_threshold will not be used 4493 * until rcu_assign_pointer(), so it's safe to increment 4494 * it here. 4495 */ 4496 ++new->current_threshold; 4497 } 4498 j++; 4499 } 4500 4501 swap_buffers: 4502 /* Swap primary and spare array */ 4503 thresholds->spare = thresholds->primary; 4504 4505 rcu_assign_pointer(thresholds->primary, new); 4506 4507 /* To be sure that nobody uses thresholds */ 4508 synchronize_rcu(); 4509 4510 /* If all events are unregistered, free the spare array */ 4511 if (!new) { 4512 kfree(thresholds->spare); 4513 thresholds->spare = NULL; 4514 } 4515 unlock: 4516 mutex_unlock(&memcg->thresholds_lock); 4517 } 4518 4519 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4520 struct eventfd_ctx *eventfd) 4521 { 4522 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4523 } 4524 4525 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4526 struct eventfd_ctx *eventfd) 4527 { 4528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4529 } 4530 4531 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4532 struct eventfd_ctx *eventfd, const char *args) 4533 { 4534 struct mem_cgroup_eventfd_list *event; 4535 4536 event = kmalloc(sizeof(*event), GFP_KERNEL); 4537 if (!event) 4538 return -ENOMEM; 4539 4540 spin_lock(&memcg_oom_lock); 4541 4542 event->eventfd = eventfd; 4543 list_add(&event->list, &memcg->oom_notify); 4544 4545 /* already in OOM ? */ 4546 if (memcg->under_oom) 4547 eventfd_signal(eventfd, 1); 4548 spin_unlock(&memcg_oom_lock); 4549 4550 return 0; 4551 } 4552 4553 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4554 struct eventfd_ctx *eventfd) 4555 { 4556 struct mem_cgroup_eventfd_list *ev, *tmp; 4557 4558 spin_lock(&memcg_oom_lock); 4559 4560 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4561 if (ev->eventfd == eventfd) { 4562 list_del(&ev->list); 4563 kfree(ev); 4564 } 4565 } 4566 4567 spin_unlock(&memcg_oom_lock); 4568 } 4569 4570 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4571 { 4572 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4573 4574 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); 4575 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4576 seq_printf(sf, "oom_kill %lu\n", 4577 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4578 return 0; 4579 } 4580 4581 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4582 struct cftype *cft, u64 val) 4583 { 4584 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4585 4586 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4587 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4588 return -EINVAL; 4589 4590 WRITE_ONCE(memcg->oom_kill_disable, val); 4591 if (!val) 4592 memcg_oom_recover(memcg); 4593 4594 return 0; 4595 } 4596 4597 #ifdef CONFIG_CGROUP_WRITEBACK 4598 4599 #include <trace/events/writeback.h> 4600 4601 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4602 { 4603 return wb_domain_init(&memcg->cgwb_domain, gfp); 4604 } 4605 4606 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4607 { 4608 wb_domain_exit(&memcg->cgwb_domain); 4609 } 4610 4611 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4612 { 4613 wb_domain_size_changed(&memcg->cgwb_domain); 4614 } 4615 4616 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4617 { 4618 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4619 4620 if (!memcg->css.parent) 4621 return NULL; 4622 4623 return &memcg->cgwb_domain; 4624 } 4625 4626 /** 4627 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4628 * @wb: bdi_writeback in question 4629 * @pfilepages: out parameter for number of file pages 4630 * @pheadroom: out parameter for number of allocatable pages according to memcg 4631 * @pdirty: out parameter for number of dirty pages 4632 * @pwriteback: out parameter for number of pages under writeback 4633 * 4634 * Determine the numbers of file, headroom, dirty, and writeback pages in 4635 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4636 * is a bit more involved. 4637 * 4638 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4639 * headroom is calculated as the lowest headroom of itself and the 4640 * ancestors. Note that this doesn't consider the actual amount of 4641 * available memory in the system. The caller should further cap 4642 * *@pheadroom accordingly. 4643 */ 4644 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4645 unsigned long *pheadroom, unsigned long *pdirty, 4646 unsigned long *pwriteback) 4647 { 4648 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4649 struct mem_cgroup *parent; 4650 4651 /* 4652 * wb_writeback() takes a spinlock and calls 4653 * wb_over_bg_thresh()->mem_cgroup_wb_stats(). Do not sleep. 4654 */ 4655 mem_cgroup_flush_stats_atomic(); 4656 4657 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4658 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4659 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4660 memcg_page_state(memcg, NR_ACTIVE_FILE); 4661 4662 *pheadroom = PAGE_COUNTER_MAX; 4663 while ((parent = parent_mem_cgroup(memcg))) { 4664 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4665 READ_ONCE(memcg->memory.high)); 4666 unsigned long used = page_counter_read(&memcg->memory); 4667 4668 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4669 memcg = parent; 4670 } 4671 } 4672 4673 /* 4674 * Foreign dirty flushing 4675 * 4676 * There's an inherent mismatch between memcg and writeback. The former 4677 * tracks ownership per-page while the latter per-inode. This was a 4678 * deliberate design decision because honoring per-page ownership in the 4679 * writeback path is complicated, may lead to higher CPU and IO overheads 4680 * and deemed unnecessary given that write-sharing an inode across 4681 * different cgroups isn't a common use-case. 4682 * 4683 * Combined with inode majority-writer ownership switching, this works well 4684 * enough in most cases but there are some pathological cases. For 4685 * example, let's say there are two cgroups A and B which keep writing to 4686 * different but confined parts of the same inode. B owns the inode and 4687 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4688 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4689 * triggering background writeback. A will be slowed down without a way to 4690 * make writeback of the dirty pages happen. 4691 * 4692 * Conditions like the above can lead to a cgroup getting repeatedly and 4693 * severely throttled after making some progress after each 4694 * dirty_expire_interval while the underlying IO device is almost 4695 * completely idle. 4696 * 4697 * Solving this problem completely requires matching the ownership tracking 4698 * granularities between memcg and writeback in either direction. However, 4699 * the more egregious behaviors can be avoided by simply remembering the 4700 * most recent foreign dirtying events and initiating remote flushes on 4701 * them when local writeback isn't enough to keep the memory clean enough. 4702 * 4703 * The following two functions implement such mechanism. When a foreign 4704 * page - a page whose memcg and writeback ownerships don't match - is 4705 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4706 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4707 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4708 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4709 * foreign bdi_writebacks which haven't expired. Both the numbers of 4710 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4711 * limited to MEMCG_CGWB_FRN_CNT. 4712 * 4713 * The mechanism only remembers IDs and doesn't hold any object references. 4714 * As being wrong occasionally doesn't matter, updates and accesses to the 4715 * records are lockless and racy. 4716 */ 4717 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 4718 struct bdi_writeback *wb) 4719 { 4720 struct mem_cgroup *memcg = folio_memcg(folio); 4721 struct memcg_cgwb_frn *frn; 4722 u64 now = get_jiffies_64(); 4723 u64 oldest_at = now; 4724 int oldest = -1; 4725 int i; 4726 4727 trace_track_foreign_dirty(folio, wb); 4728 4729 /* 4730 * Pick the slot to use. If there is already a slot for @wb, keep 4731 * using it. If not replace the oldest one which isn't being 4732 * written out. 4733 */ 4734 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4735 frn = &memcg->cgwb_frn[i]; 4736 if (frn->bdi_id == wb->bdi->id && 4737 frn->memcg_id == wb->memcg_css->id) 4738 break; 4739 if (time_before64(frn->at, oldest_at) && 4740 atomic_read(&frn->done.cnt) == 1) { 4741 oldest = i; 4742 oldest_at = frn->at; 4743 } 4744 } 4745 4746 if (i < MEMCG_CGWB_FRN_CNT) { 4747 /* 4748 * Re-using an existing one. Update timestamp lazily to 4749 * avoid making the cacheline hot. We want them to be 4750 * reasonably up-to-date and significantly shorter than 4751 * dirty_expire_interval as that's what expires the record. 4752 * Use the shorter of 1s and dirty_expire_interval / 8. 4753 */ 4754 unsigned long update_intv = 4755 min_t(unsigned long, HZ, 4756 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4757 4758 if (time_before64(frn->at, now - update_intv)) 4759 frn->at = now; 4760 } else if (oldest >= 0) { 4761 /* replace the oldest free one */ 4762 frn = &memcg->cgwb_frn[oldest]; 4763 frn->bdi_id = wb->bdi->id; 4764 frn->memcg_id = wb->memcg_css->id; 4765 frn->at = now; 4766 } 4767 } 4768 4769 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4770 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4771 { 4772 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4773 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4774 u64 now = jiffies_64; 4775 int i; 4776 4777 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4778 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4779 4780 /* 4781 * If the record is older than dirty_expire_interval, 4782 * writeback on it has already started. No need to kick it 4783 * off again. Also, don't start a new one if there's 4784 * already one in flight. 4785 */ 4786 if (time_after64(frn->at, now - intv) && 4787 atomic_read(&frn->done.cnt) == 1) { 4788 frn->at = 0; 4789 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4790 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 4791 WB_REASON_FOREIGN_FLUSH, 4792 &frn->done); 4793 } 4794 } 4795 } 4796 4797 #else /* CONFIG_CGROUP_WRITEBACK */ 4798 4799 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4800 { 4801 return 0; 4802 } 4803 4804 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4805 { 4806 } 4807 4808 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4809 { 4810 } 4811 4812 #endif /* CONFIG_CGROUP_WRITEBACK */ 4813 4814 /* 4815 * DO NOT USE IN NEW FILES. 4816 * 4817 * "cgroup.event_control" implementation. 4818 * 4819 * This is way over-engineered. It tries to support fully configurable 4820 * events for each user. Such level of flexibility is completely 4821 * unnecessary especially in the light of the planned unified hierarchy. 4822 * 4823 * Please deprecate this and replace with something simpler if at all 4824 * possible. 4825 */ 4826 4827 /* 4828 * Unregister event and free resources. 4829 * 4830 * Gets called from workqueue. 4831 */ 4832 static void memcg_event_remove(struct work_struct *work) 4833 { 4834 struct mem_cgroup_event *event = 4835 container_of(work, struct mem_cgroup_event, remove); 4836 struct mem_cgroup *memcg = event->memcg; 4837 4838 remove_wait_queue(event->wqh, &event->wait); 4839 4840 event->unregister_event(memcg, event->eventfd); 4841 4842 /* Notify userspace the event is going away. */ 4843 eventfd_signal(event->eventfd, 1); 4844 4845 eventfd_ctx_put(event->eventfd); 4846 kfree(event); 4847 css_put(&memcg->css); 4848 } 4849 4850 /* 4851 * Gets called on EPOLLHUP on eventfd when user closes it. 4852 * 4853 * Called with wqh->lock held and interrupts disabled. 4854 */ 4855 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4856 int sync, void *key) 4857 { 4858 struct mem_cgroup_event *event = 4859 container_of(wait, struct mem_cgroup_event, wait); 4860 struct mem_cgroup *memcg = event->memcg; 4861 __poll_t flags = key_to_poll(key); 4862 4863 if (flags & EPOLLHUP) { 4864 /* 4865 * If the event has been detached at cgroup removal, we 4866 * can simply return knowing the other side will cleanup 4867 * for us. 4868 * 4869 * We can't race against event freeing since the other 4870 * side will require wqh->lock via remove_wait_queue(), 4871 * which we hold. 4872 */ 4873 spin_lock(&memcg->event_list_lock); 4874 if (!list_empty(&event->list)) { 4875 list_del_init(&event->list); 4876 /* 4877 * We are in atomic context, but cgroup_event_remove() 4878 * may sleep, so we have to call it in workqueue. 4879 */ 4880 schedule_work(&event->remove); 4881 } 4882 spin_unlock(&memcg->event_list_lock); 4883 } 4884 4885 return 0; 4886 } 4887 4888 static void memcg_event_ptable_queue_proc(struct file *file, 4889 wait_queue_head_t *wqh, poll_table *pt) 4890 { 4891 struct mem_cgroup_event *event = 4892 container_of(pt, struct mem_cgroup_event, pt); 4893 4894 event->wqh = wqh; 4895 add_wait_queue(wqh, &event->wait); 4896 } 4897 4898 /* 4899 * DO NOT USE IN NEW FILES. 4900 * 4901 * Parse input and register new cgroup event handler. 4902 * 4903 * Input must be in format '<event_fd> <control_fd> <args>'. 4904 * Interpretation of args is defined by control file implementation. 4905 */ 4906 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4907 char *buf, size_t nbytes, loff_t off) 4908 { 4909 struct cgroup_subsys_state *css = of_css(of); 4910 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4911 struct mem_cgroup_event *event; 4912 struct cgroup_subsys_state *cfile_css; 4913 unsigned int efd, cfd; 4914 struct fd efile; 4915 struct fd cfile; 4916 struct dentry *cdentry; 4917 const char *name; 4918 char *endp; 4919 int ret; 4920 4921 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 4922 return -EOPNOTSUPP; 4923 4924 buf = strstrip(buf); 4925 4926 efd = simple_strtoul(buf, &endp, 10); 4927 if (*endp != ' ') 4928 return -EINVAL; 4929 buf = endp + 1; 4930 4931 cfd = simple_strtoul(buf, &endp, 10); 4932 if ((*endp != ' ') && (*endp != '\0')) 4933 return -EINVAL; 4934 buf = endp + 1; 4935 4936 event = kzalloc(sizeof(*event), GFP_KERNEL); 4937 if (!event) 4938 return -ENOMEM; 4939 4940 event->memcg = memcg; 4941 INIT_LIST_HEAD(&event->list); 4942 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4943 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4944 INIT_WORK(&event->remove, memcg_event_remove); 4945 4946 efile = fdget(efd); 4947 if (!efile.file) { 4948 ret = -EBADF; 4949 goto out_kfree; 4950 } 4951 4952 event->eventfd = eventfd_ctx_fileget(efile.file); 4953 if (IS_ERR(event->eventfd)) { 4954 ret = PTR_ERR(event->eventfd); 4955 goto out_put_efile; 4956 } 4957 4958 cfile = fdget(cfd); 4959 if (!cfile.file) { 4960 ret = -EBADF; 4961 goto out_put_eventfd; 4962 } 4963 4964 /* the process need read permission on control file */ 4965 /* AV: shouldn't we check that it's been opened for read instead? */ 4966 ret = file_permission(cfile.file, MAY_READ); 4967 if (ret < 0) 4968 goto out_put_cfile; 4969 4970 /* 4971 * The control file must be a regular cgroup1 file. As a regular cgroup 4972 * file can't be renamed, it's safe to access its name afterwards. 4973 */ 4974 cdentry = cfile.file->f_path.dentry; 4975 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { 4976 ret = -EINVAL; 4977 goto out_put_cfile; 4978 } 4979 4980 /* 4981 * Determine the event callbacks and set them in @event. This used 4982 * to be done via struct cftype but cgroup core no longer knows 4983 * about these events. The following is crude but the whole thing 4984 * is for compatibility anyway. 4985 * 4986 * DO NOT ADD NEW FILES. 4987 */ 4988 name = cdentry->d_name.name; 4989 4990 if (!strcmp(name, "memory.usage_in_bytes")) { 4991 event->register_event = mem_cgroup_usage_register_event; 4992 event->unregister_event = mem_cgroup_usage_unregister_event; 4993 } else if (!strcmp(name, "memory.oom_control")) { 4994 event->register_event = mem_cgroup_oom_register_event; 4995 event->unregister_event = mem_cgroup_oom_unregister_event; 4996 } else if (!strcmp(name, "memory.pressure_level")) { 4997 event->register_event = vmpressure_register_event; 4998 event->unregister_event = vmpressure_unregister_event; 4999 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 5000 event->register_event = memsw_cgroup_usage_register_event; 5001 event->unregister_event = memsw_cgroup_usage_unregister_event; 5002 } else { 5003 ret = -EINVAL; 5004 goto out_put_cfile; 5005 } 5006 5007 /* 5008 * Verify @cfile should belong to @css. Also, remaining events are 5009 * automatically removed on cgroup destruction but the removal is 5010 * asynchronous, so take an extra ref on @css. 5011 */ 5012 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, 5013 &memory_cgrp_subsys); 5014 ret = -EINVAL; 5015 if (IS_ERR(cfile_css)) 5016 goto out_put_cfile; 5017 if (cfile_css != css) { 5018 css_put(cfile_css); 5019 goto out_put_cfile; 5020 } 5021 5022 ret = event->register_event(memcg, event->eventfd, buf); 5023 if (ret) 5024 goto out_put_css; 5025 5026 vfs_poll(efile.file, &event->pt); 5027 5028 spin_lock_irq(&memcg->event_list_lock); 5029 list_add(&event->list, &memcg->event_list); 5030 spin_unlock_irq(&memcg->event_list_lock); 5031 5032 fdput(cfile); 5033 fdput(efile); 5034 5035 return nbytes; 5036 5037 out_put_css: 5038 css_put(css); 5039 out_put_cfile: 5040 fdput(cfile); 5041 out_put_eventfd: 5042 eventfd_ctx_put(event->eventfd); 5043 out_put_efile: 5044 fdput(efile); 5045 out_kfree: 5046 kfree(event); 5047 5048 return ret; 5049 } 5050 5051 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5052 static int mem_cgroup_slab_show(struct seq_file *m, void *p) 5053 { 5054 /* 5055 * Deprecated. 5056 * Please, take a look at tools/cgroup/memcg_slabinfo.py . 5057 */ 5058 return 0; 5059 } 5060 #endif 5061 5062 static struct cftype mem_cgroup_legacy_files[] = { 5063 { 5064 .name = "usage_in_bytes", 5065 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 5066 .read_u64 = mem_cgroup_read_u64, 5067 }, 5068 { 5069 .name = "max_usage_in_bytes", 5070 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 5071 .write = mem_cgroup_reset, 5072 .read_u64 = mem_cgroup_read_u64, 5073 }, 5074 { 5075 .name = "limit_in_bytes", 5076 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 5077 .write = mem_cgroup_write, 5078 .read_u64 = mem_cgroup_read_u64, 5079 }, 5080 { 5081 .name = "soft_limit_in_bytes", 5082 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 5083 .write = mem_cgroup_write, 5084 .read_u64 = mem_cgroup_read_u64, 5085 }, 5086 { 5087 .name = "failcnt", 5088 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5089 .write = mem_cgroup_reset, 5090 .read_u64 = mem_cgroup_read_u64, 5091 }, 5092 { 5093 .name = "stat", 5094 .seq_show = memcg_stat_show, 5095 }, 5096 { 5097 .name = "force_empty", 5098 .write = mem_cgroup_force_empty_write, 5099 }, 5100 { 5101 .name = "use_hierarchy", 5102 .write_u64 = mem_cgroup_hierarchy_write, 5103 .read_u64 = mem_cgroup_hierarchy_read, 5104 }, 5105 { 5106 .name = "cgroup.event_control", /* XXX: for compat */ 5107 .write = memcg_write_event_control, 5108 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5109 }, 5110 { 5111 .name = "swappiness", 5112 .read_u64 = mem_cgroup_swappiness_read, 5113 .write_u64 = mem_cgroup_swappiness_write, 5114 }, 5115 { 5116 .name = "move_charge_at_immigrate", 5117 .read_u64 = mem_cgroup_move_charge_read, 5118 .write_u64 = mem_cgroup_move_charge_write, 5119 }, 5120 { 5121 .name = "oom_control", 5122 .seq_show = mem_cgroup_oom_control_read, 5123 .write_u64 = mem_cgroup_oom_control_write, 5124 }, 5125 { 5126 .name = "pressure_level", 5127 .seq_show = mem_cgroup_dummy_seq_show, 5128 }, 5129 #ifdef CONFIG_NUMA 5130 { 5131 .name = "numa_stat", 5132 .seq_show = memcg_numa_stat_show, 5133 }, 5134 #endif 5135 { 5136 .name = "kmem.limit_in_bytes", 5137 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5138 .write = mem_cgroup_write, 5139 .read_u64 = mem_cgroup_read_u64, 5140 }, 5141 { 5142 .name = "kmem.usage_in_bytes", 5143 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5144 .read_u64 = mem_cgroup_read_u64, 5145 }, 5146 { 5147 .name = "kmem.failcnt", 5148 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5149 .write = mem_cgroup_reset, 5150 .read_u64 = mem_cgroup_read_u64, 5151 }, 5152 { 5153 .name = "kmem.max_usage_in_bytes", 5154 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5155 .write = mem_cgroup_reset, 5156 .read_u64 = mem_cgroup_read_u64, 5157 }, 5158 #if defined(CONFIG_MEMCG_KMEM) && \ 5159 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5160 { 5161 .name = "kmem.slabinfo", 5162 .seq_show = mem_cgroup_slab_show, 5163 }, 5164 #endif 5165 { 5166 .name = "kmem.tcp.limit_in_bytes", 5167 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5168 .write = mem_cgroup_write, 5169 .read_u64 = mem_cgroup_read_u64, 5170 }, 5171 { 5172 .name = "kmem.tcp.usage_in_bytes", 5173 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5174 .read_u64 = mem_cgroup_read_u64, 5175 }, 5176 { 5177 .name = "kmem.tcp.failcnt", 5178 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5179 .write = mem_cgroup_reset, 5180 .read_u64 = mem_cgroup_read_u64, 5181 }, 5182 { 5183 .name = "kmem.tcp.max_usage_in_bytes", 5184 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5185 .write = mem_cgroup_reset, 5186 .read_u64 = mem_cgroup_read_u64, 5187 }, 5188 { }, /* terminate */ 5189 }; 5190 5191 /* 5192 * Private memory cgroup IDR 5193 * 5194 * Swap-out records and page cache shadow entries need to store memcg 5195 * references in constrained space, so we maintain an ID space that is 5196 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5197 * memory-controlled cgroups to 64k. 5198 * 5199 * However, there usually are many references to the offline CSS after 5200 * the cgroup has been destroyed, such as page cache or reclaimable 5201 * slab objects, that don't need to hang on to the ID. We want to keep 5202 * those dead CSS from occupying IDs, or we might quickly exhaust the 5203 * relatively small ID space and prevent the creation of new cgroups 5204 * even when there are much fewer than 64k cgroups - possibly none. 5205 * 5206 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5207 * be freed and recycled when it's no longer needed, which is usually 5208 * when the CSS is offlined. 5209 * 5210 * The only exception to that are records of swapped out tmpfs/shmem 5211 * pages that need to be attributed to live ancestors on swapin. But 5212 * those references are manageable from userspace. 5213 */ 5214 5215 static DEFINE_IDR(mem_cgroup_idr); 5216 5217 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5218 { 5219 if (memcg->id.id > 0) { 5220 idr_remove(&mem_cgroup_idr, memcg->id.id); 5221 memcg->id.id = 0; 5222 } 5223 } 5224 5225 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5226 unsigned int n) 5227 { 5228 refcount_add(n, &memcg->id.ref); 5229 } 5230 5231 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5232 { 5233 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5234 mem_cgroup_id_remove(memcg); 5235 5236 /* Memcg ID pins CSS */ 5237 css_put(&memcg->css); 5238 } 5239 } 5240 5241 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5242 { 5243 mem_cgroup_id_put_many(memcg, 1); 5244 } 5245 5246 /** 5247 * mem_cgroup_from_id - look up a memcg from a memcg id 5248 * @id: the memcg id to look up 5249 * 5250 * Caller must hold rcu_read_lock(). 5251 */ 5252 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5253 { 5254 WARN_ON_ONCE(!rcu_read_lock_held()); 5255 return idr_find(&mem_cgroup_idr, id); 5256 } 5257 5258 #ifdef CONFIG_SHRINKER_DEBUG 5259 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 5260 { 5261 struct cgroup *cgrp; 5262 struct cgroup_subsys_state *css; 5263 struct mem_cgroup *memcg; 5264 5265 cgrp = cgroup_get_from_id(ino); 5266 if (IS_ERR(cgrp)) 5267 return ERR_CAST(cgrp); 5268 5269 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); 5270 if (css) 5271 memcg = container_of(css, struct mem_cgroup, css); 5272 else 5273 memcg = ERR_PTR(-ENOENT); 5274 5275 cgroup_put(cgrp); 5276 5277 return memcg; 5278 } 5279 #endif 5280 5281 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5282 { 5283 struct mem_cgroup_per_node *pn; 5284 5285 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node); 5286 if (!pn) 5287 return 1; 5288 5289 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 5290 GFP_KERNEL_ACCOUNT); 5291 if (!pn->lruvec_stats_percpu) { 5292 kfree(pn); 5293 return 1; 5294 } 5295 5296 lruvec_init(&pn->lruvec); 5297 pn->memcg = memcg; 5298 5299 memcg->nodeinfo[node] = pn; 5300 return 0; 5301 } 5302 5303 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5304 { 5305 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5306 5307 if (!pn) 5308 return; 5309 5310 free_percpu(pn->lruvec_stats_percpu); 5311 kfree(pn); 5312 } 5313 5314 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5315 { 5316 int node; 5317 5318 for_each_node(node) 5319 free_mem_cgroup_per_node_info(memcg, node); 5320 kfree(memcg->vmstats); 5321 free_percpu(memcg->vmstats_percpu); 5322 kfree(memcg); 5323 } 5324 5325 static void mem_cgroup_free(struct mem_cgroup *memcg) 5326 { 5327 lru_gen_exit_memcg(memcg); 5328 memcg_wb_domain_exit(memcg); 5329 __mem_cgroup_free(memcg); 5330 } 5331 5332 static struct mem_cgroup *mem_cgroup_alloc(void) 5333 { 5334 struct mem_cgroup *memcg; 5335 int node; 5336 int __maybe_unused i; 5337 long error = -ENOMEM; 5338 5339 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); 5340 if (!memcg) 5341 return ERR_PTR(error); 5342 5343 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5344 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); 5345 if (memcg->id.id < 0) { 5346 error = memcg->id.id; 5347 goto fail; 5348 } 5349 5350 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL); 5351 if (!memcg->vmstats) 5352 goto fail; 5353 5354 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5355 GFP_KERNEL_ACCOUNT); 5356 if (!memcg->vmstats_percpu) 5357 goto fail; 5358 5359 for_each_node(node) 5360 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5361 goto fail; 5362 5363 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5364 goto fail; 5365 5366 INIT_WORK(&memcg->high_work, high_work_func); 5367 INIT_LIST_HEAD(&memcg->oom_notify); 5368 mutex_init(&memcg->thresholds_lock); 5369 spin_lock_init(&memcg->move_lock); 5370 vmpressure_init(&memcg->vmpressure); 5371 INIT_LIST_HEAD(&memcg->event_list); 5372 spin_lock_init(&memcg->event_list_lock); 5373 memcg->socket_pressure = jiffies; 5374 #ifdef CONFIG_MEMCG_KMEM 5375 memcg->kmemcg_id = -1; 5376 INIT_LIST_HEAD(&memcg->objcg_list); 5377 #endif 5378 #ifdef CONFIG_CGROUP_WRITEBACK 5379 INIT_LIST_HEAD(&memcg->cgwb_list); 5380 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5381 memcg->cgwb_frn[i].done = 5382 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5383 #endif 5384 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5385 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5386 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5387 memcg->deferred_split_queue.split_queue_len = 0; 5388 #endif 5389 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5390 lru_gen_init_memcg(memcg); 5391 return memcg; 5392 fail: 5393 mem_cgroup_id_remove(memcg); 5394 __mem_cgroup_free(memcg); 5395 return ERR_PTR(error); 5396 } 5397 5398 static struct cgroup_subsys_state * __ref 5399 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5400 { 5401 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5402 struct mem_cgroup *memcg, *old_memcg; 5403 5404 old_memcg = set_active_memcg(parent); 5405 memcg = mem_cgroup_alloc(); 5406 set_active_memcg(old_memcg); 5407 if (IS_ERR(memcg)) 5408 return ERR_CAST(memcg); 5409 5410 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5411 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); 5412 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 5413 memcg->zswap_max = PAGE_COUNTER_MAX; 5414 #endif 5415 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5416 if (parent) { 5417 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); 5418 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); 5419 5420 page_counter_init(&memcg->memory, &parent->memory); 5421 page_counter_init(&memcg->swap, &parent->swap); 5422 page_counter_init(&memcg->kmem, &parent->kmem); 5423 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5424 } else { 5425 init_memcg_events(); 5426 page_counter_init(&memcg->memory, NULL); 5427 page_counter_init(&memcg->swap, NULL); 5428 page_counter_init(&memcg->kmem, NULL); 5429 page_counter_init(&memcg->tcpmem, NULL); 5430 5431 root_mem_cgroup = memcg; 5432 return &memcg->css; 5433 } 5434 5435 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5436 static_branch_inc(&memcg_sockets_enabled_key); 5437 5438 #if defined(CONFIG_MEMCG_KMEM) 5439 if (!cgroup_memory_nobpf) 5440 static_branch_inc(&memcg_bpf_enabled_key); 5441 #endif 5442 5443 return &memcg->css; 5444 } 5445 5446 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5447 { 5448 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5449 5450 if (memcg_online_kmem(memcg)) 5451 goto remove_id; 5452 5453 /* 5454 * A memcg must be visible for expand_shrinker_info() 5455 * by the time the maps are allocated. So, we allocate maps 5456 * here, when for_each_mem_cgroup() can't skip it. 5457 */ 5458 if (alloc_shrinker_info(memcg)) 5459 goto offline_kmem; 5460 5461 /* Online state pins memcg ID, memcg ID pins CSS */ 5462 refcount_set(&memcg->id.ref, 1); 5463 css_get(css); 5464 5465 if (unlikely(mem_cgroup_is_root(memcg))) 5466 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 5467 2UL*HZ); 5468 lru_gen_online_memcg(memcg); 5469 return 0; 5470 offline_kmem: 5471 memcg_offline_kmem(memcg); 5472 remove_id: 5473 mem_cgroup_id_remove(memcg); 5474 return -ENOMEM; 5475 } 5476 5477 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5478 { 5479 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5480 struct mem_cgroup_event *event, *tmp; 5481 5482 /* 5483 * Unregister events and notify userspace. 5484 * Notify userspace about cgroup removing only after rmdir of cgroup 5485 * directory to avoid race between userspace and kernelspace. 5486 */ 5487 spin_lock_irq(&memcg->event_list_lock); 5488 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5489 list_del_init(&event->list); 5490 schedule_work(&event->remove); 5491 } 5492 spin_unlock_irq(&memcg->event_list_lock); 5493 5494 page_counter_set_min(&memcg->memory, 0); 5495 page_counter_set_low(&memcg->memory, 0); 5496 5497 memcg_offline_kmem(memcg); 5498 reparent_shrinker_deferred(memcg); 5499 wb_memcg_offline(memcg); 5500 lru_gen_offline_memcg(memcg); 5501 5502 drain_all_stock(memcg); 5503 5504 mem_cgroup_id_put(memcg); 5505 } 5506 5507 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5508 { 5509 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5510 5511 invalidate_reclaim_iterators(memcg); 5512 lru_gen_release_memcg(memcg); 5513 } 5514 5515 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5516 { 5517 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5518 int __maybe_unused i; 5519 5520 #ifdef CONFIG_CGROUP_WRITEBACK 5521 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5522 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5523 #endif 5524 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5525 static_branch_dec(&memcg_sockets_enabled_key); 5526 5527 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5528 static_branch_dec(&memcg_sockets_enabled_key); 5529 5530 #if defined(CONFIG_MEMCG_KMEM) 5531 if (!cgroup_memory_nobpf) 5532 static_branch_dec(&memcg_bpf_enabled_key); 5533 #endif 5534 5535 vmpressure_cleanup(&memcg->vmpressure); 5536 cancel_work_sync(&memcg->high_work); 5537 mem_cgroup_remove_from_trees(memcg); 5538 free_shrinker_info(memcg); 5539 mem_cgroup_free(memcg); 5540 } 5541 5542 /** 5543 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5544 * @css: the target css 5545 * 5546 * Reset the states of the mem_cgroup associated with @css. This is 5547 * invoked when the userland requests disabling on the default hierarchy 5548 * but the memcg is pinned through dependency. The memcg should stop 5549 * applying policies and should revert to the vanilla state as it may be 5550 * made visible again. 5551 * 5552 * The current implementation only resets the essential configurations. 5553 * This needs to be expanded to cover all the visible parts. 5554 */ 5555 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5556 { 5557 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5558 5559 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5560 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5561 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5562 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5563 page_counter_set_min(&memcg->memory, 0); 5564 page_counter_set_low(&memcg->memory, 0); 5565 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5566 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); 5567 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5568 memcg_wb_domain_size_changed(memcg); 5569 } 5570 5571 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5572 { 5573 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5574 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5575 struct memcg_vmstats_percpu *statc; 5576 long delta, v; 5577 int i, nid; 5578 5579 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5580 5581 for (i = 0; i < MEMCG_NR_STAT; i++) { 5582 /* 5583 * Collect the aggregated propagation counts of groups 5584 * below us. We're in a per-cpu loop here and this is 5585 * a global counter, so the first cycle will get them. 5586 */ 5587 delta = memcg->vmstats->state_pending[i]; 5588 if (delta) 5589 memcg->vmstats->state_pending[i] = 0; 5590 5591 /* Add CPU changes on this level since the last flush */ 5592 v = READ_ONCE(statc->state[i]); 5593 if (v != statc->state_prev[i]) { 5594 delta += v - statc->state_prev[i]; 5595 statc->state_prev[i] = v; 5596 } 5597 5598 if (!delta) 5599 continue; 5600 5601 /* Aggregate counts on this level and propagate upwards */ 5602 memcg->vmstats->state[i] += delta; 5603 if (parent) 5604 parent->vmstats->state_pending[i] += delta; 5605 } 5606 5607 for (i = 0; i < NR_MEMCG_EVENTS; i++) { 5608 delta = memcg->vmstats->events_pending[i]; 5609 if (delta) 5610 memcg->vmstats->events_pending[i] = 0; 5611 5612 v = READ_ONCE(statc->events[i]); 5613 if (v != statc->events_prev[i]) { 5614 delta += v - statc->events_prev[i]; 5615 statc->events_prev[i] = v; 5616 } 5617 5618 if (!delta) 5619 continue; 5620 5621 memcg->vmstats->events[i] += delta; 5622 if (parent) 5623 parent->vmstats->events_pending[i] += delta; 5624 } 5625 5626 for_each_node_state(nid, N_MEMORY) { 5627 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 5628 struct mem_cgroup_per_node *ppn = NULL; 5629 struct lruvec_stats_percpu *lstatc; 5630 5631 if (parent) 5632 ppn = parent->nodeinfo[nid]; 5633 5634 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 5635 5636 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 5637 delta = pn->lruvec_stats.state_pending[i]; 5638 if (delta) 5639 pn->lruvec_stats.state_pending[i] = 0; 5640 5641 v = READ_ONCE(lstatc->state[i]); 5642 if (v != lstatc->state_prev[i]) { 5643 delta += v - lstatc->state_prev[i]; 5644 lstatc->state_prev[i] = v; 5645 } 5646 5647 if (!delta) 5648 continue; 5649 5650 pn->lruvec_stats.state[i] += delta; 5651 if (ppn) 5652 ppn->lruvec_stats.state_pending[i] += delta; 5653 } 5654 } 5655 } 5656 5657 #ifdef CONFIG_MMU 5658 /* Handlers for move charge at task migration. */ 5659 static int mem_cgroup_do_precharge(unsigned long count) 5660 { 5661 int ret; 5662 5663 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5664 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5665 if (!ret) { 5666 mc.precharge += count; 5667 return ret; 5668 } 5669 5670 /* Try charges one by one with reclaim, but do not retry */ 5671 while (count--) { 5672 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5673 if (ret) 5674 return ret; 5675 mc.precharge++; 5676 cond_resched(); 5677 } 5678 return 0; 5679 } 5680 5681 union mc_target { 5682 struct page *page; 5683 swp_entry_t ent; 5684 }; 5685 5686 enum mc_target_type { 5687 MC_TARGET_NONE = 0, 5688 MC_TARGET_PAGE, 5689 MC_TARGET_SWAP, 5690 MC_TARGET_DEVICE, 5691 }; 5692 5693 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5694 unsigned long addr, pte_t ptent) 5695 { 5696 struct page *page = vm_normal_page(vma, addr, ptent); 5697 5698 if (!page || !page_mapped(page)) 5699 return NULL; 5700 if (PageAnon(page)) { 5701 if (!(mc.flags & MOVE_ANON)) 5702 return NULL; 5703 } else { 5704 if (!(mc.flags & MOVE_FILE)) 5705 return NULL; 5706 } 5707 if (!get_page_unless_zero(page)) 5708 return NULL; 5709 5710 return page; 5711 } 5712 5713 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5714 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5715 pte_t ptent, swp_entry_t *entry) 5716 { 5717 struct page *page = NULL; 5718 swp_entry_t ent = pte_to_swp_entry(ptent); 5719 5720 if (!(mc.flags & MOVE_ANON)) 5721 return NULL; 5722 5723 /* 5724 * Handle device private pages that are not accessible by the CPU, but 5725 * stored as special swap entries in the page table. 5726 */ 5727 if (is_device_private_entry(ent)) { 5728 page = pfn_swap_entry_to_page(ent); 5729 if (!get_page_unless_zero(page)) 5730 return NULL; 5731 return page; 5732 } 5733 5734 if (non_swap_entry(ent)) 5735 return NULL; 5736 5737 /* 5738 * Because swap_cache_get_folio() updates some statistics counter, 5739 * we call find_get_page() with swapper_space directly. 5740 */ 5741 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5742 entry->val = ent.val; 5743 5744 return page; 5745 } 5746 #else 5747 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5748 pte_t ptent, swp_entry_t *entry) 5749 { 5750 return NULL; 5751 } 5752 #endif 5753 5754 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5755 unsigned long addr, pte_t ptent) 5756 { 5757 unsigned long index; 5758 struct folio *folio; 5759 5760 if (!vma->vm_file) /* anonymous vma */ 5761 return NULL; 5762 if (!(mc.flags & MOVE_FILE)) 5763 return NULL; 5764 5765 /* folio is moved even if it's not RSS of this task(page-faulted). */ 5766 /* shmem/tmpfs may report page out on swap: account for that too. */ 5767 index = linear_page_index(vma, addr); 5768 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); 5769 if (IS_ERR(folio)) 5770 return NULL; 5771 return folio_file_page(folio, index); 5772 } 5773 5774 /** 5775 * mem_cgroup_move_account - move account of the page 5776 * @page: the page 5777 * @compound: charge the page as compound or small page 5778 * @from: mem_cgroup which the page is moved from. 5779 * @to: mem_cgroup which the page is moved to. @from != @to. 5780 * 5781 * The page must be locked and not on the LRU. 5782 * 5783 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5784 * from old cgroup. 5785 */ 5786 static int mem_cgroup_move_account(struct page *page, 5787 bool compound, 5788 struct mem_cgroup *from, 5789 struct mem_cgroup *to) 5790 { 5791 struct folio *folio = page_folio(page); 5792 struct lruvec *from_vec, *to_vec; 5793 struct pglist_data *pgdat; 5794 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1; 5795 int nid, ret; 5796 5797 VM_BUG_ON(from == to); 5798 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 5799 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5800 VM_BUG_ON(compound && !folio_test_large(folio)); 5801 5802 ret = -EINVAL; 5803 if (folio_memcg(folio) != from) 5804 goto out; 5805 5806 pgdat = folio_pgdat(folio); 5807 from_vec = mem_cgroup_lruvec(from, pgdat); 5808 to_vec = mem_cgroup_lruvec(to, pgdat); 5809 5810 folio_memcg_lock(folio); 5811 5812 if (folio_test_anon(folio)) { 5813 if (folio_mapped(folio)) { 5814 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5815 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5816 if (folio_test_transhuge(folio)) { 5817 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5818 -nr_pages); 5819 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5820 nr_pages); 5821 } 5822 } 5823 } else { 5824 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5825 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5826 5827 if (folio_test_swapbacked(folio)) { 5828 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5829 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5830 } 5831 5832 if (folio_mapped(folio)) { 5833 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5834 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5835 } 5836 5837 if (folio_test_dirty(folio)) { 5838 struct address_space *mapping = folio_mapping(folio); 5839 5840 if (mapping_can_writeback(mapping)) { 5841 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5842 -nr_pages); 5843 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5844 nr_pages); 5845 } 5846 } 5847 } 5848 5849 #ifdef CONFIG_SWAP 5850 if (folio_test_swapcache(folio)) { 5851 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages); 5852 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages); 5853 } 5854 #endif 5855 if (folio_test_writeback(folio)) { 5856 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5857 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5858 } 5859 5860 /* 5861 * All state has been migrated, let's switch to the new memcg. 5862 * 5863 * It is safe to change page's memcg here because the page 5864 * is referenced, charged, isolated, and locked: we can't race 5865 * with (un)charging, migration, LRU putback, or anything else 5866 * that would rely on a stable page's memory cgroup. 5867 * 5868 * Note that lock_page_memcg is a memcg lock, not a page lock, 5869 * to save space. As soon as we switch page's memory cgroup to a 5870 * new memcg that isn't locked, the above state can change 5871 * concurrently again. Make sure we're truly done with it. 5872 */ 5873 smp_mb(); 5874 5875 css_get(&to->css); 5876 css_put(&from->css); 5877 5878 folio->memcg_data = (unsigned long)to; 5879 5880 __folio_memcg_unlock(from); 5881 5882 ret = 0; 5883 nid = folio_nid(folio); 5884 5885 local_irq_disable(); 5886 mem_cgroup_charge_statistics(to, nr_pages); 5887 memcg_check_events(to, nid); 5888 mem_cgroup_charge_statistics(from, -nr_pages); 5889 memcg_check_events(from, nid); 5890 local_irq_enable(); 5891 out: 5892 return ret; 5893 } 5894 5895 /** 5896 * get_mctgt_type - get target type of moving charge 5897 * @vma: the vma the pte to be checked belongs 5898 * @addr: the address corresponding to the pte to be checked 5899 * @ptent: the pte to be checked 5900 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5901 * 5902 * Returns 5903 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5904 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5905 * move charge. if @target is not NULL, the page is stored in target->page 5906 * with extra refcnt got(Callers should handle it). 5907 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5908 * target for charge migration. if @target is not NULL, the entry is stored 5909 * in target->ent. 5910 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is device memory and 5911 * thus not on the lru. 5912 * For now we such page is charge like a regular page would be as for all 5913 * intent and purposes it is just special memory taking the place of a 5914 * regular page. 5915 * 5916 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5917 * 5918 * Called with pte lock held. 5919 */ 5920 5921 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5922 unsigned long addr, pte_t ptent, union mc_target *target) 5923 { 5924 struct page *page = NULL; 5925 enum mc_target_type ret = MC_TARGET_NONE; 5926 swp_entry_t ent = { .val = 0 }; 5927 5928 if (pte_present(ptent)) 5929 page = mc_handle_present_pte(vma, addr, ptent); 5930 else if (pte_none_mostly(ptent)) 5931 /* 5932 * PTE markers should be treated as a none pte here, separated 5933 * from other swap handling below. 5934 */ 5935 page = mc_handle_file_pte(vma, addr, ptent); 5936 else if (is_swap_pte(ptent)) 5937 page = mc_handle_swap_pte(vma, ptent, &ent); 5938 5939 if (target && page) { 5940 if (!trylock_page(page)) { 5941 put_page(page); 5942 return ret; 5943 } 5944 /* 5945 * page_mapped() must be stable during the move. This 5946 * pte is locked, so if it's present, the page cannot 5947 * become unmapped. If it isn't, we have only partial 5948 * control over the mapped state: the page lock will 5949 * prevent new faults against pagecache and swapcache, 5950 * so an unmapped page cannot become mapped. However, 5951 * if the page is already mapped elsewhere, it can 5952 * unmap, and there is nothing we can do about it. 5953 * Alas, skip moving the page in this case. 5954 */ 5955 if (!pte_present(ptent) && page_mapped(page)) { 5956 unlock_page(page); 5957 put_page(page); 5958 return ret; 5959 } 5960 } 5961 5962 if (!page && !ent.val) 5963 return ret; 5964 if (page) { 5965 /* 5966 * Do only loose check w/o serialization. 5967 * mem_cgroup_move_account() checks the page is valid or 5968 * not under LRU exclusion. 5969 */ 5970 if (page_memcg(page) == mc.from) { 5971 ret = MC_TARGET_PAGE; 5972 if (is_device_private_page(page) || 5973 is_device_coherent_page(page)) 5974 ret = MC_TARGET_DEVICE; 5975 if (target) 5976 target->page = page; 5977 } 5978 if (!ret || !target) { 5979 if (target) 5980 unlock_page(page); 5981 put_page(page); 5982 } 5983 } 5984 /* 5985 * There is a swap entry and a page doesn't exist or isn't charged. 5986 * But we cannot move a tail-page in a THP. 5987 */ 5988 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5989 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5990 ret = MC_TARGET_SWAP; 5991 if (target) 5992 target->ent = ent; 5993 } 5994 return ret; 5995 } 5996 5997 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5998 /* 5999 * We don't consider PMD mapped swapping or file mapped pages because THP does 6000 * not support them for now. 6001 * Caller should make sure that pmd_trans_huge(pmd) is true. 6002 */ 6003 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6004 unsigned long addr, pmd_t pmd, union mc_target *target) 6005 { 6006 struct page *page = NULL; 6007 enum mc_target_type ret = MC_TARGET_NONE; 6008 6009 if (unlikely(is_swap_pmd(pmd))) { 6010 VM_BUG_ON(thp_migration_supported() && 6011 !is_pmd_migration_entry(pmd)); 6012 return ret; 6013 } 6014 page = pmd_page(pmd); 6015 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 6016 if (!(mc.flags & MOVE_ANON)) 6017 return ret; 6018 if (page_memcg(page) == mc.from) { 6019 ret = MC_TARGET_PAGE; 6020 if (target) { 6021 get_page(page); 6022 if (!trylock_page(page)) { 6023 put_page(page); 6024 return MC_TARGET_NONE; 6025 } 6026 target->page = page; 6027 } 6028 } 6029 return ret; 6030 } 6031 #else 6032 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 6033 unsigned long addr, pmd_t pmd, union mc_target *target) 6034 { 6035 return MC_TARGET_NONE; 6036 } 6037 #endif 6038 6039 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 6040 unsigned long addr, unsigned long end, 6041 struct mm_walk *walk) 6042 { 6043 struct vm_area_struct *vma = walk->vma; 6044 pte_t *pte; 6045 spinlock_t *ptl; 6046 6047 ptl = pmd_trans_huge_lock(pmd, vma); 6048 if (ptl) { 6049 /* 6050 * Note their can not be MC_TARGET_DEVICE for now as we do not 6051 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 6052 * this might change. 6053 */ 6054 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 6055 mc.precharge += HPAGE_PMD_NR; 6056 spin_unlock(ptl); 6057 return 0; 6058 } 6059 6060 if (pmd_trans_unstable(pmd)) 6061 return 0; 6062 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6063 for (; addr != end; pte++, addr += PAGE_SIZE) 6064 if (get_mctgt_type(vma, addr, *pte, NULL)) 6065 mc.precharge++; /* increment precharge temporarily */ 6066 pte_unmap_unlock(pte - 1, ptl); 6067 cond_resched(); 6068 6069 return 0; 6070 } 6071 6072 static const struct mm_walk_ops precharge_walk_ops = { 6073 .pmd_entry = mem_cgroup_count_precharge_pte_range, 6074 }; 6075 6076 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 6077 { 6078 unsigned long precharge; 6079 6080 mmap_read_lock(mm); 6081 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL); 6082 mmap_read_unlock(mm); 6083 6084 precharge = mc.precharge; 6085 mc.precharge = 0; 6086 6087 return precharge; 6088 } 6089 6090 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 6091 { 6092 unsigned long precharge = mem_cgroup_count_precharge(mm); 6093 6094 VM_BUG_ON(mc.moving_task); 6095 mc.moving_task = current; 6096 return mem_cgroup_do_precharge(precharge); 6097 } 6098 6099 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 6100 static void __mem_cgroup_clear_mc(void) 6101 { 6102 struct mem_cgroup *from = mc.from; 6103 struct mem_cgroup *to = mc.to; 6104 6105 /* we must uncharge all the leftover precharges from mc.to */ 6106 if (mc.precharge) { 6107 cancel_charge(mc.to, mc.precharge); 6108 mc.precharge = 0; 6109 } 6110 /* 6111 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 6112 * we must uncharge here. 6113 */ 6114 if (mc.moved_charge) { 6115 cancel_charge(mc.from, mc.moved_charge); 6116 mc.moved_charge = 0; 6117 } 6118 /* we must fixup refcnts and charges */ 6119 if (mc.moved_swap) { 6120 /* uncharge swap account from the old cgroup */ 6121 if (!mem_cgroup_is_root(mc.from)) 6122 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 6123 6124 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 6125 6126 /* 6127 * we charged both to->memory and to->memsw, so we 6128 * should uncharge to->memory. 6129 */ 6130 if (!mem_cgroup_is_root(mc.to)) 6131 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 6132 6133 mc.moved_swap = 0; 6134 } 6135 memcg_oom_recover(from); 6136 memcg_oom_recover(to); 6137 wake_up_all(&mc.waitq); 6138 } 6139 6140 static void mem_cgroup_clear_mc(void) 6141 { 6142 struct mm_struct *mm = mc.mm; 6143 6144 /* 6145 * we must clear moving_task before waking up waiters at the end of 6146 * task migration. 6147 */ 6148 mc.moving_task = NULL; 6149 __mem_cgroup_clear_mc(); 6150 spin_lock(&mc.lock); 6151 mc.from = NULL; 6152 mc.to = NULL; 6153 mc.mm = NULL; 6154 spin_unlock(&mc.lock); 6155 6156 mmput(mm); 6157 } 6158 6159 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6160 { 6161 struct cgroup_subsys_state *css; 6162 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 6163 struct mem_cgroup *from; 6164 struct task_struct *leader, *p; 6165 struct mm_struct *mm; 6166 unsigned long move_flags; 6167 int ret = 0; 6168 6169 /* charge immigration isn't supported on the default hierarchy */ 6170 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6171 return 0; 6172 6173 /* 6174 * Multi-process migrations only happen on the default hierarchy 6175 * where charge immigration is not used. Perform charge 6176 * immigration if @tset contains a leader and whine if there are 6177 * multiple. 6178 */ 6179 p = NULL; 6180 cgroup_taskset_for_each_leader(leader, css, tset) { 6181 WARN_ON_ONCE(p); 6182 p = leader; 6183 memcg = mem_cgroup_from_css(css); 6184 } 6185 if (!p) 6186 return 0; 6187 6188 /* 6189 * We are now committed to this value whatever it is. Changes in this 6190 * tunable will only affect upcoming migrations, not the current one. 6191 * So we need to save it, and keep it going. 6192 */ 6193 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 6194 if (!move_flags) 6195 return 0; 6196 6197 from = mem_cgroup_from_task(p); 6198 6199 VM_BUG_ON(from == memcg); 6200 6201 mm = get_task_mm(p); 6202 if (!mm) 6203 return 0; 6204 /* We move charges only when we move a owner of the mm */ 6205 if (mm->owner == p) { 6206 VM_BUG_ON(mc.from); 6207 VM_BUG_ON(mc.to); 6208 VM_BUG_ON(mc.precharge); 6209 VM_BUG_ON(mc.moved_charge); 6210 VM_BUG_ON(mc.moved_swap); 6211 6212 spin_lock(&mc.lock); 6213 mc.mm = mm; 6214 mc.from = from; 6215 mc.to = memcg; 6216 mc.flags = move_flags; 6217 spin_unlock(&mc.lock); 6218 /* We set mc.moving_task later */ 6219 6220 ret = mem_cgroup_precharge_mc(mm); 6221 if (ret) 6222 mem_cgroup_clear_mc(); 6223 } else { 6224 mmput(mm); 6225 } 6226 return ret; 6227 } 6228 6229 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6230 { 6231 if (mc.to) 6232 mem_cgroup_clear_mc(); 6233 } 6234 6235 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6236 unsigned long addr, unsigned long end, 6237 struct mm_walk *walk) 6238 { 6239 int ret = 0; 6240 struct vm_area_struct *vma = walk->vma; 6241 pte_t *pte; 6242 spinlock_t *ptl; 6243 enum mc_target_type target_type; 6244 union mc_target target; 6245 struct page *page; 6246 6247 ptl = pmd_trans_huge_lock(pmd, vma); 6248 if (ptl) { 6249 if (mc.precharge < HPAGE_PMD_NR) { 6250 spin_unlock(ptl); 6251 return 0; 6252 } 6253 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6254 if (target_type == MC_TARGET_PAGE) { 6255 page = target.page; 6256 if (isolate_lru_page(page)) { 6257 if (!mem_cgroup_move_account(page, true, 6258 mc.from, mc.to)) { 6259 mc.precharge -= HPAGE_PMD_NR; 6260 mc.moved_charge += HPAGE_PMD_NR; 6261 } 6262 putback_lru_page(page); 6263 } 6264 unlock_page(page); 6265 put_page(page); 6266 } else if (target_type == MC_TARGET_DEVICE) { 6267 page = target.page; 6268 if (!mem_cgroup_move_account(page, true, 6269 mc.from, mc.to)) { 6270 mc.precharge -= HPAGE_PMD_NR; 6271 mc.moved_charge += HPAGE_PMD_NR; 6272 } 6273 unlock_page(page); 6274 put_page(page); 6275 } 6276 spin_unlock(ptl); 6277 return 0; 6278 } 6279 6280 if (pmd_trans_unstable(pmd)) 6281 return 0; 6282 retry: 6283 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6284 for (; addr != end; addr += PAGE_SIZE) { 6285 pte_t ptent = *(pte++); 6286 bool device = false; 6287 swp_entry_t ent; 6288 6289 if (!mc.precharge) 6290 break; 6291 6292 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6293 case MC_TARGET_DEVICE: 6294 device = true; 6295 fallthrough; 6296 case MC_TARGET_PAGE: 6297 page = target.page; 6298 /* 6299 * We can have a part of the split pmd here. Moving it 6300 * can be done but it would be too convoluted so simply 6301 * ignore such a partial THP and keep it in original 6302 * memcg. There should be somebody mapping the head. 6303 */ 6304 if (PageTransCompound(page)) 6305 goto put; 6306 if (!device && !isolate_lru_page(page)) 6307 goto put; 6308 if (!mem_cgroup_move_account(page, false, 6309 mc.from, mc.to)) { 6310 mc.precharge--; 6311 /* we uncharge from mc.from later. */ 6312 mc.moved_charge++; 6313 } 6314 if (!device) 6315 putback_lru_page(page); 6316 put: /* get_mctgt_type() gets & locks the page */ 6317 unlock_page(page); 6318 put_page(page); 6319 break; 6320 case MC_TARGET_SWAP: 6321 ent = target.ent; 6322 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6323 mc.precharge--; 6324 mem_cgroup_id_get_many(mc.to, 1); 6325 /* we fixup other refcnts and charges later. */ 6326 mc.moved_swap++; 6327 } 6328 break; 6329 default: 6330 break; 6331 } 6332 } 6333 pte_unmap_unlock(pte - 1, ptl); 6334 cond_resched(); 6335 6336 if (addr != end) { 6337 /* 6338 * We have consumed all precharges we got in can_attach(). 6339 * We try charge one by one, but don't do any additional 6340 * charges to mc.to if we have failed in charge once in attach() 6341 * phase. 6342 */ 6343 ret = mem_cgroup_do_precharge(1); 6344 if (!ret) 6345 goto retry; 6346 } 6347 6348 return ret; 6349 } 6350 6351 static const struct mm_walk_ops charge_walk_ops = { 6352 .pmd_entry = mem_cgroup_move_charge_pte_range, 6353 }; 6354 6355 static void mem_cgroup_move_charge(void) 6356 { 6357 lru_add_drain_all(); 6358 /* 6359 * Signal lock_page_memcg() to take the memcg's move_lock 6360 * while we're moving its pages to another memcg. Then wait 6361 * for already started RCU-only updates to finish. 6362 */ 6363 atomic_inc(&mc.from->moving_account); 6364 synchronize_rcu(); 6365 retry: 6366 if (unlikely(!mmap_read_trylock(mc.mm))) { 6367 /* 6368 * Someone who are holding the mmap_lock might be waiting in 6369 * waitq. So we cancel all extra charges, wake up all waiters, 6370 * and retry. Because we cancel precharges, we might not be able 6371 * to move enough charges, but moving charge is a best-effort 6372 * feature anyway, so it wouldn't be a big problem. 6373 */ 6374 __mem_cgroup_clear_mc(); 6375 cond_resched(); 6376 goto retry; 6377 } 6378 /* 6379 * When we have consumed all precharges and failed in doing 6380 * additional charge, the page walk just aborts. 6381 */ 6382 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL); 6383 mmap_read_unlock(mc.mm); 6384 atomic_dec(&mc.from->moving_account); 6385 } 6386 6387 static void mem_cgroup_move_task(void) 6388 { 6389 if (mc.to) { 6390 mem_cgroup_move_charge(); 6391 mem_cgroup_clear_mc(); 6392 } 6393 } 6394 #else /* !CONFIG_MMU */ 6395 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6396 { 6397 return 0; 6398 } 6399 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6400 { 6401 } 6402 static void mem_cgroup_move_task(void) 6403 { 6404 } 6405 #endif 6406 6407 #ifdef CONFIG_LRU_GEN 6408 static void mem_cgroup_attach(struct cgroup_taskset *tset) 6409 { 6410 struct task_struct *task; 6411 struct cgroup_subsys_state *css; 6412 6413 /* find the first leader if there is any */ 6414 cgroup_taskset_for_each_leader(task, css, tset) 6415 break; 6416 6417 if (!task) 6418 return; 6419 6420 task_lock(task); 6421 if (task->mm && READ_ONCE(task->mm->owner) == task) 6422 lru_gen_migrate_mm(task->mm); 6423 task_unlock(task); 6424 } 6425 #else 6426 static void mem_cgroup_attach(struct cgroup_taskset *tset) 6427 { 6428 } 6429 #endif /* CONFIG_LRU_GEN */ 6430 6431 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6432 { 6433 if (value == PAGE_COUNTER_MAX) 6434 seq_puts(m, "max\n"); 6435 else 6436 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6437 6438 return 0; 6439 } 6440 6441 static u64 memory_current_read(struct cgroup_subsys_state *css, 6442 struct cftype *cft) 6443 { 6444 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6445 6446 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6447 } 6448 6449 static u64 memory_peak_read(struct cgroup_subsys_state *css, 6450 struct cftype *cft) 6451 { 6452 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6453 6454 return (u64)memcg->memory.watermark * PAGE_SIZE; 6455 } 6456 6457 static int memory_min_show(struct seq_file *m, void *v) 6458 { 6459 return seq_puts_memcg_tunable(m, 6460 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6461 } 6462 6463 static ssize_t memory_min_write(struct kernfs_open_file *of, 6464 char *buf, size_t nbytes, loff_t off) 6465 { 6466 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6467 unsigned long min; 6468 int err; 6469 6470 buf = strstrip(buf); 6471 err = page_counter_memparse(buf, "max", &min); 6472 if (err) 6473 return err; 6474 6475 page_counter_set_min(&memcg->memory, min); 6476 6477 return nbytes; 6478 } 6479 6480 static int memory_low_show(struct seq_file *m, void *v) 6481 { 6482 return seq_puts_memcg_tunable(m, 6483 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6484 } 6485 6486 static ssize_t memory_low_write(struct kernfs_open_file *of, 6487 char *buf, size_t nbytes, loff_t off) 6488 { 6489 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6490 unsigned long low; 6491 int err; 6492 6493 buf = strstrip(buf); 6494 err = page_counter_memparse(buf, "max", &low); 6495 if (err) 6496 return err; 6497 6498 page_counter_set_low(&memcg->memory, low); 6499 6500 return nbytes; 6501 } 6502 6503 static int memory_high_show(struct seq_file *m, void *v) 6504 { 6505 return seq_puts_memcg_tunable(m, 6506 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6507 } 6508 6509 static ssize_t memory_high_write(struct kernfs_open_file *of, 6510 char *buf, size_t nbytes, loff_t off) 6511 { 6512 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6513 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6514 bool drained = false; 6515 unsigned long high; 6516 int err; 6517 6518 buf = strstrip(buf); 6519 err = page_counter_memparse(buf, "max", &high); 6520 if (err) 6521 return err; 6522 6523 page_counter_set_high(&memcg->memory, high); 6524 6525 for (;;) { 6526 unsigned long nr_pages = page_counter_read(&memcg->memory); 6527 unsigned long reclaimed; 6528 6529 if (nr_pages <= high) 6530 break; 6531 6532 if (signal_pending(current)) 6533 break; 6534 6535 if (!drained) { 6536 drain_all_stock(memcg); 6537 drained = true; 6538 continue; 6539 } 6540 6541 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6542 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP); 6543 6544 if (!reclaimed && !nr_retries--) 6545 break; 6546 } 6547 6548 memcg_wb_domain_size_changed(memcg); 6549 return nbytes; 6550 } 6551 6552 static int memory_max_show(struct seq_file *m, void *v) 6553 { 6554 return seq_puts_memcg_tunable(m, 6555 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6556 } 6557 6558 static ssize_t memory_max_write(struct kernfs_open_file *of, 6559 char *buf, size_t nbytes, loff_t off) 6560 { 6561 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6562 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6563 bool drained = false; 6564 unsigned long max; 6565 int err; 6566 6567 buf = strstrip(buf); 6568 err = page_counter_memparse(buf, "max", &max); 6569 if (err) 6570 return err; 6571 6572 xchg(&memcg->memory.max, max); 6573 6574 for (;;) { 6575 unsigned long nr_pages = page_counter_read(&memcg->memory); 6576 6577 if (nr_pages <= max) 6578 break; 6579 6580 if (signal_pending(current)) 6581 break; 6582 6583 if (!drained) { 6584 drain_all_stock(memcg); 6585 drained = true; 6586 continue; 6587 } 6588 6589 if (nr_reclaims) { 6590 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6591 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP)) 6592 nr_reclaims--; 6593 continue; 6594 } 6595 6596 memcg_memory_event(memcg, MEMCG_OOM); 6597 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6598 break; 6599 } 6600 6601 memcg_wb_domain_size_changed(memcg); 6602 return nbytes; 6603 } 6604 6605 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6606 { 6607 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6608 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6609 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6610 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6611 seq_printf(m, "oom_kill %lu\n", 6612 atomic_long_read(&events[MEMCG_OOM_KILL])); 6613 seq_printf(m, "oom_group_kill %lu\n", 6614 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); 6615 } 6616 6617 static int memory_events_show(struct seq_file *m, void *v) 6618 { 6619 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6620 6621 __memory_events_show(m, memcg->memory_events); 6622 return 0; 6623 } 6624 6625 static int memory_events_local_show(struct seq_file *m, void *v) 6626 { 6627 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6628 6629 __memory_events_show(m, memcg->memory_events_local); 6630 return 0; 6631 } 6632 6633 static int memory_stat_show(struct seq_file *m, void *v) 6634 { 6635 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6636 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 6637 6638 if (!buf) 6639 return -ENOMEM; 6640 memory_stat_format(memcg, buf, PAGE_SIZE); 6641 seq_puts(m, buf); 6642 kfree(buf); 6643 return 0; 6644 } 6645 6646 #ifdef CONFIG_NUMA 6647 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6648 int item) 6649 { 6650 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6651 } 6652 6653 static int memory_numa_stat_show(struct seq_file *m, void *v) 6654 { 6655 int i; 6656 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6657 6658 mem_cgroup_flush_stats(); 6659 6660 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6661 int nid; 6662 6663 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6664 continue; 6665 6666 seq_printf(m, "%s", memory_stats[i].name); 6667 for_each_node_state(nid, N_MEMORY) { 6668 u64 size; 6669 struct lruvec *lruvec; 6670 6671 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6672 size = lruvec_page_state_output(lruvec, 6673 memory_stats[i].idx); 6674 seq_printf(m, " N%d=%llu", nid, size); 6675 } 6676 seq_putc(m, '\n'); 6677 } 6678 6679 return 0; 6680 } 6681 #endif 6682 6683 static int memory_oom_group_show(struct seq_file *m, void *v) 6684 { 6685 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6686 6687 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); 6688 6689 return 0; 6690 } 6691 6692 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6693 char *buf, size_t nbytes, loff_t off) 6694 { 6695 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6696 int ret, oom_group; 6697 6698 buf = strstrip(buf); 6699 if (!buf) 6700 return -EINVAL; 6701 6702 ret = kstrtoint(buf, 0, &oom_group); 6703 if (ret) 6704 return ret; 6705 6706 if (oom_group != 0 && oom_group != 1) 6707 return -EINVAL; 6708 6709 WRITE_ONCE(memcg->oom_group, oom_group); 6710 6711 return nbytes; 6712 } 6713 6714 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, 6715 size_t nbytes, loff_t off) 6716 { 6717 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6718 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6719 unsigned long nr_to_reclaim, nr_reclaimed = 0; 6720 unsigned int reclaim_options; 6721 int err; 6722 6723 buf = strstrip(buf); 6724 err = page_counter_memparse(buf, "", &nr_to_reclaim); 6725 if (err) 6726 return err; 6727 6728 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; 6729 while (nr_reclaimed < nr_to_reclaim) { 6730 unsigned long reclaimed; 6731 6732 if (signal_pending(current)) 6733 return -EINTR; 6734 6735 /* 6736 * This is the final attempt, drain percpu lru caches in the 6737 * hope of introducing more evictable pages for 6738 * try_to_free_mem_cgroup_pages(). 6739 */ 6740 if (!nr_retries) 6741 lru_add_drain_all(); 6742 6743 reclaimed = try_to_free_mem_cgroup_pages(memcg, 6744 nr_to_reclaim - nr_reclaimed, 6745 GFP_KERNEL, reclaim_options); 6746 6747 if (!reclaimed && !nr_retries--) 6748 return -EAGAIN; 6749 6750 nr_reclaimed += reclaimed; 6751 } 6752 6753 return nbytes; 6754 } 6755 6756 static struct cftype memory_files[] = { 6757 { 6758 .name = "current", 6759 .flags = CFTYPE_NOT_ON_ROOT, 6760 .read_u64 = memory_current_read, 6761 }, 6762 { 6763 .name = "peak", 6764 .flags = CFTYPE_NOT_ON_ROOT, 6765 .read_u64 = memory_peak_read, 6766 }, 6767 { 6768 .name = "min", 6769 .flags = CFTYPE_NOT_ON_ROOT, 6770 .seq_show = memory_min_show, 6771 .write = memory_min_write, 6772 }, 6773 { 6774 .name = "low", 6775 .flags = CFTYPE_NOT_ON_ROOT, 6776 .seq_show = memory_low_show, 6777 .write = memory_low_write, 6778 }, 6779 { 6780 .name = "high", 6781 .flags = CFTYPE_NOT_ON_ROOT, 6782 .seq_show = memory_high_show, 6783 .write = memory_high_write, 6784 }, 6785 { 6786 .name = "max", 6787 .flags = CFTYPE_NOT_ON_ROOT, 6788 .seq_show = memory_max_show, 6789 .write = memory_max_write, 6790 }, 6791 { 6792 .name = "events", 6793 .flags = CFTYPE_NOT_ON_ROOT, 6794 .file_offset = offsetof(struct mem_cgroup, events_file), 6795 .seq_show = memory_events_show, 6796 }, 6797 { 6798 .name = "events.local", 6799 .flags = CFTYPE_NOT_ON_ROOT, 6800 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6801 .seq_show = memory_events_local_show, 6802 }, 6803 { 6804 .name = "stat", 6805 .seq_show = memory_stat_show, 6806 }, 6807 #ifdef CONFIG_NUMA 6808 { 6809 .name = "numa_stat", 6810 .seq_show = memory_numa_stat_show, 6811 }, 6812 #endif 6813 { 6814 .name = "oom.group", 6815 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6816 .seq_show = memory_oom_group_show, 6817 .write = memory_oom_group_write, 6818 }, 6819 { 6820 .name = "reclaim", 6821 .flags = CFTYPE_NS_DELEGATABLE, 6822 .write = memory_reclaim, 6823 }, 6824 { } /* terminate */ 6825 }; 6826 6827 struct cgroup_subsys memory_cgrp_subsys = { 6828 .css_alloc = mem_cgroup_css_alloc, 6829 .css_online = mem_cgroup_css_online, 6830 .css_offline = mem_cgroup_css_offline, 6831 .css_released = mem_cgroup_css_released, 6832 .css_free = mem_cgroup_css_free, 6833 .css_reset = mem_cgroup_css_reset, 6834 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6835 .can_attach = mem_cgroup_can_attach, 6836 .attach = mem_cgroup_attach, 6837 .cancel_attach = mem_cgroup_cancel_attach, 6838 .post_attach = mem_cgroup_move_task, 6839 .dfl_cftypes = memory_files, 6840 .legacy_cftypes = mem_cgroup_legacy_files, 6841 .early_init = 0, 6842 }; 6843 6844 /* 6845 * This function calculates an individual cgroup's effective 6846 * protection which is derived from its own memory.min/low, its 6847 * parent's and siblings' settings, as well as the actual memory 6848 * distribution in the tree. 6849 * 6850 * The following rules apply to the effective protection values: 6851 * 6852 * 1. At the first level of reclaim, effective protection is equal to 6853 * the declared protection in memory.min and memory.low. 6854 * 6855 * 2. To enable safe delegation of the protection configuration, at 6856 * subsequent levels the effective protection is capped to the 6857 * parent's effective protection. 6858 * 6859 * 3. To make complex and dynamic subtrees easier to configure, the 6860 * user is allowed to overcommit the declared protection at a given 6861 * level. If that is the case, the parent's effective protection is 6862 * distributed to the children in proportion to how much protection 6863 * they have declared and how much of it they are utilizing. 6864 * 6865 * This makes distribution proportional, but also work-conserving: 6866 * if one cgroup claims much more protection than it uses memory, 6867 * the unused remainder is available to its siblings. 6868 * 6869 * 4. Conversely, when the declared protection is undercommitted at a 6870 * given level, the distribution of the larger parental protection 6871 * budget is NOT proportional. A cgroup's protection from a sibling 6872 * is capped to its own memory.min/low setting. 6873 * 6874 * 5. However, to allow protecting recursive subtrees from each other 6875 * without having to declare each individual cgroup's fixed share 6876 * of the ancestor's claim to protection, any unutilized - 6877 * "floating" - protection from up the tree is distributed in 6878 * proportion to each cgroup's *usage*. This makes the protection 6879 * neutral wrt sibling cgroups and lets them compete freely over 6880 * the shared parental protection budget, but it protects the 6881 * subtree as a whole from neighboring subtrees. 6882 * 6883 * Note that 4. and 5. are not in conflict: 4. is about protecting 6884 * against immediate siblings whereas 5. is about protecting against 6885 * neighboring subtrees. 6886 */ 6887 static unsigned long effective_protection(unsigned long usage, 6888 unsigned long parent_usage, 6889 unsigned long setting, 6890 unsigned long parent_effective, 6891 unsigned long siblings_protected) 6892 { 6893 unsigned long protected; 6894 unsigned long ep; 6895 6896 protected = min(usage, setting); 6897 /* 6898 * If all cgroups at this level combined claim and use more 6899 * protection then what the parent affords them, distribute 6900 * shares in proportion to utilization. 6901 * 6902 * We are using actual utilization rather than the statically 6903 * claimed protection in order to be work-conserving: claimed 6904 * but unused protection is available to siblings that would 6905 * otherwise get a smaller chunk than what they claimed. 6906 */ 6907 if (siblings_protected > parent_effective) 6908 return protected * parent_effective / siblings_protected; 6909 6910 /* 6911 * Ok, utilized protection of all children is within what the 6912 * parent affords them, so we know whatever this child claims 6913 * and utilizes is effectively protected. 6914 * 6915 * If there is unprotected usage beyond this value, reclaim 6916 * will apply pressure in proportion to that amount. 6917 * 6918 * If there is unutilized protection, the cgroup will be fully 6919 * shielded from reclaim, but we do return a smaller value for 6920 * protection than what the group could enjoy in theory. This 6921 * is okay. With the overcommit distribution above, effective 6922 * protection is always dependent on how memory is actually 6923 * consumed among the siblings anyway. 6924 */ 6925 ep = protected; 6926 6927 /* 6928 * If the children aren't claiming (all of) the protection 6929 * afforded to them by the parent, distribute the remainder in 6930 * proportion to the (unprotected) memory of each cgroup. That 6931 * way, cgroups that aren't explicitly prioritized wrt each 6932 * other compete freely over the allowance, but they are 6933 * collectively protected from neighboring trees. 6934 * 6935 * We're using unprotected memory for the weight so that if 6936 * some cgroups DO claim explicit protection, we don't protect 6937 * the same bytes twice. 6938 * 6939 * Check both usage and parent_usage against the respective 6940 * protected values. One should imply the other, but they 6941 * aren't read atomically - make sure the division is sane. 6942 */ 6943 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6944 return ep; 6945 if (parent_effective > siblings_protected && 6946 parent_usage > siblings_protected && 6947 usage > protected) { 6948 unsigned long unclaimed; 6949 6950 unclaimed = parent_effective - siblings_protected; 6951 unclaimed *= usage - protected; 6952 unclaimed /= parent_usage - siblings_protected; 6953 6954 ep += unclaimed; 6955 } 6956 6957 return ep; 6958 } 6959 6960 /** 6961 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 6962 * @root: the top ancestor of the sub-tree being checked 6963 * @memcg: the memory cgroup to check 6964 * 6965 * WARNING: This function is not stateless! It can only be used as part 6966 * of a top-down tree iteration, not for isolated queries. 6967 */ 6968 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6969 struct mem_cgroup *memcg) 6970 { 6971 unsigned long usage, parent_usage; 6972 struct mem_cgroup *parent; 6973 6974 if (mem_cgroup_disabled()) 6975 return; 6976 6977 if (!root) 6978 root = root_mem_cgroup; 6979 6980 /* 6981 * Effective values of the reclaim targets are ignored so they 6982 * can be stale. Have a look at mem_cgroup_protection for more 6983 * details. 6984 * TODO: calculation should be more robust so that we do not need 6985 * that special casing. 6986 */ 6987 if (memcg == root) 6988 return; 6989 6990 usage = page_counter_read(&memcg->memory); 6991 if (!usage) 6992 return; 6993 6994 parent = parent_mem_cgroup(memcg); 6995 6996 if (parent == root) { 6997 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6998 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6999 return; 7000 } 7001 7002 parent_usage = page_counter_read(&parent->memory); 7003 7004 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 7005 READ_ONCE(memcg->memory.min), 7006 READ_ONCE(parent->memory.emin), 7007 atomic_long_read(&parent->memory.children_min_usage))); 7008 7009 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 7010 READ_ONCE(memcg->memory.low), 7011 READ_ONCE(parent->memory.elow), 7012 atomic_long_read(&parent->memory.children_low_usage))); 7013 } 7014 7015 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 7016 gfp_t gfp) 7017 { 7018 long nr_pages = folio_nr_pages(folio); 7019 int ret; 7020 7021 ret = try_charge(memcg, gfp, nr_pages); 7022 if (ret) 7023 goto out; 7024 7025 css_get(&memcg->css); 7026 commit_charge(folio, memcg); 7027 7028 local_irq_disable(); 7029 mem_cgroup_charge_statistics(memcg, nr_pages); 7030 memcg_check_events(memcg, folio_nid(folio)); 7031 local_irq_enable(); 7032 out: 7033 return ret; 7034 } 7035 7036 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 7037 { 7038 struct mem_cgroup *memcg; 7039 int ret; 7040 7041 memcg = get_mem_cgroup_from_mm(mm); 7042 ret = charge_memcg(folio, memcg, gfp); 7043 css_put(&memcg->css); 7044 7045 return ret; 7046 } 7047 7048 /** 7049 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. 7050 * @folio: folio to charge. 7051 * @mm: mm context of the victim 7052 * @gfp: reclaim mode 7053 * @entry: swap entry for which the folio is allocated 7054 * 7055 * This function charges a folio allocated for swapin. Please call this before 7056 * adding the folio to the swapcache. 7057 * 7058 * Returns 0 on success. Otherwise, an error code is returned. 7059 */ 7060 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 7061 gfp_t gfp, swp_entry_t entry) 7062 { 7063 struct mem_cgroup *memcg; 7064 unsigned short id; 7065 int ret; 7066 7067 if (mem_cgroup_disabled()) 7068 return 0; 7069 7070 id = lookup_swap_cgroup_id(entry); 7071 rcu_read_lock(); 7072 memcg = mem_cgroup_from_id(id); 7073 if (!memcg || !css_tryget_online(&memcg->css)) 7074 memcg = get_mem_cgroup_from_mm(mm); 7075 rcu_read_unlock(); 7076 7077 ret = charge_memcg(folio, memcg, gfp); 7078 7079 css_put(&memcg->css); 7080 return ret; 7081 } 7082 7083 /* 7084 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 7085 * @entry: swap entry for which the page is charged 7086 * 7087 * Call this function after successfully adding the charged page to swapcache. 7088 * 7089 * Note: This function assumes the page for which swap slot is being uncharged 7090 * is order 0 page. 7091 */ 7092 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 7093 { 7094 /* 7095 * Cgroup1's unified memory+swap counter has been charged with the 7096 * new swapcache page, finish the transfer by uncharging the swap 7097 * slot. The swap slot would also get uncharged when it dies, but 7098 * it can stick around indefinitely and we'd count the page twice 7099 * the entire time. 7100 * 7101 * Cgroup2 has separate resource counters for memory and swap, 7102 * so this is a non-issue here. Memory and swap charge lifetimes 7103 * correspond 1:1 to page and swap slot lifetimes: we charge the 7104 * page to memory here, and uncharge swap when the slot is freed. 7105 */ 7106 if (!mem_cgroup_disabled() && do_memsw_account()) { 7107 /* 7108 * The swap entry might not get freed for a long time, 7109 * let's not wait for it. The page already received a 7110 * memory+swap charge, drop the swap entry duplicate. 7111 */ 7112 mem_cgroup_uncharge_swap(entry, 1); 7113 } 7114 } 7115 7116 struct uncharge_gather { 7117 struct mem_cgroup *memcg; 7118 unsigned long nr_memory; 7119 unsigned long pgpgout; 7120 unsigned long nr_kmem; 7121 int nid; 7122 }; 7123 7124 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 7125 { 7126 memset(ug, 0, sizeof(*ug)); 7127 } 7128 7129 static void uncharge_batch(const struct uncharge_gather *ug) 7130 { 7131 unsigned long flags; 7132 7133 if (ug->nr_memory) { 7134 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 7135 if (do_memsw_account()) 7136 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 7137 if (ug->nr_kmem) 7138 memcg_account_kmem(ug->memcg, -ug->nr_kmem); 7139 memcg_oom_recover(ug->memcg); 7140 } 7141 7142 local_irq_save(flags); 7143 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 7144 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 7145 memcg_check_events(ug->memcg, ug->nid); 7146 local_irq_restore(flags); 7147 7148 /* drop reference from uncharge_folio */ 7149 css_put(&ug->memcg->css); 7150 } 7151 7152 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 7153 { 7154 long nr_pages; 7155 struct mem_cgroup *memcg; 7156 struct obj_cgroup *objcg; 7157 7158 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 7159 7160 /* 7161 * Nobody should be changing or seriously looking at 7162 * folio memcg or objcg at this point, we have fully 7163 * exclusive access to the folio. 7164 */ 7165 if (folio_memcg_kmem(folio)) { 7166 objcg = __folio_objcg(folio); 7167 /* 7168 * This get matches the put at the end of the function and 7169 * kmem pages do not hold memcg references anymore. 7170 */ 7171 memcg = get_mem_cgroup_from_objcg(objcg); 7172 } else { 7173 memcg = __folio_memcg(folio); 7174 } 7175 7176 if (!memcg) 7177 return; 7178 7179 if (ug->memcg != memcg) { 7180 if (ug->memcg) { 7181 uncharge_batch(ug); 7182 uncharge_gather_clear(ug); 7183 } 7184 ug->memcg = memcg; 7185 ug->nid = folio_nid(folio); 7186 7187 /* pairs with css_put in uncharge_batch */ 7188 css_get(&memcg->css); 7189 } 7190 7191 nr_pages = folio_nr_pages(folio); 7192 7193 if (folio_memcg_kmem(folio)) { 7194 ug->nr_memory += nr_pages; 7195 ug->nr_kmem += nr_pages; 7196 7197 folio->memcg_data = 0; 7198 obj_cgroup_put(objcg); 7199 } else { 7200 /* LRU pages aren't accounted at the root level */ 7201 if (!mem_cgroup_is_root(memcg)) 7202 ug->nr_memory += nr_pages; 7203 ug->pgpgout++; 7204 7205 folio->memcg_data = 0; 7206 } 7207 7208 css_put(&memcg->css); 7209 } 7210 7211 void __mem_cgroup_uncharge(struct folio *folio) 7212 { 7213 struct uncharge_gather ug; 7214 7215 /* Don't touch folio->lru of any random page, pre-check: */ 7216 if (!folio_memcg(folio)) 7217 return; 7218 7219 uncharge_gather_clear(&ug); 7220 uncharge_folio(folio, &ug); 7221 uncharge_batch(&ug); 7222 } 7223 7224 /** 7225 * __mem_cgroup_uncharge_list - uncharge a list of page 7226 * @page_list: list of pages to uncharge 7227 * 7228 * Uncharge a list of pages previously charged with 7229 * __mem_cgroup_charge(). 7230 */ 7231 void __mem_cgroup_uncharge_list(struct list_head *page_list) 7232 { 7233 struct uncharge_gather ug; 7234 struct folio *folio; 7235 7236 uncharge_gather_clear(&ug); 7237 list_for_each_entry(folio, page_list, lru) 7238 uncharge_folio(folio, &ug); 7239 if (ug.memcg) 7240 uncharge_batch(&ug); 7241 } 7242 7243 /** 7244 * mem_cgroup_migrate - Charge a folio's replacement. 7245 * @old: Currently circulating folio. 7246 * @new: Replacement folio. 7247 * 7248 * Charge @new as a replacement folio for @old. @old will 7249 * be uncharged upon free. 7250 * 7251 * Both folios must be locked, @new->mapping must be set up. 7252 */ 7253 void mem_cgroup_migrate(struct folio *old, struct folio *new) 7254 { 7255 struct mem_cgroup *memcg; 7256 long nr_pages = folio_nr_pages(new); 7257 unsigned long flags; 7258 7259 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 7260 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 7261 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 7262 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 7263 7264 if (mem_cgroup_disabled()) 7265 return; 7266 7267 /* Page cache replacement: new folio already charged? */ 7268 if (folio_memcg(new)) 7269 return; 7270 7271 memcg = folio_memcg(old); 7272 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 7273 if (!memcg) 7274 return; 7275 7276 /* Force-charge the new page. The old one will be freed soon */ 7277 if (!mem_cgroup_is_root(memcg)) { 7278 page_counter_charge(&memcg->memory, nr_pages); 7279 if (do_memsw_account()) 7280 page_counter_charge(&memcg->memsw, nr_pages); 7281 } 7282 7283 css_get(&memcg->css); 7284 commit_charge(new, memcg); 7285 7286 local_irq_save(flags); 7287 mem_cgroup_charge_statistics(memcg, nr_pages); 7288 memcg_check_events(memcg, folio_nid(new)); 7289 local_irq_restore(flags); 7290 } 7291 7292 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7293 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7294 7295 void mem_cgroup_sk_alloc(struct sock *sk) 7296 { 7297 struct mem_cgroup *memcg; 7298 7299 if (!mem_cgroup_sockets_enabled) 7300 return; 7301 7302 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7303 if (!in_task()) 7304 return; 7305 7306 rcu_read_lock(); 7307 memcg = mem_cgroup_from_task(current); 7308 if (mem_cgroup_is_root(memcg)) 7309 goto out; 7310 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7311 goto out; 7312 if (css_tryget(&memcg->css)) 7313 sk->sk_memcg = memcg; 7314 out: 7315 rcu_read_unlock(); 7316 } 7317 7318 void mem_cgroup_sk_free(struct sock *sk) 7319 { 7320 if (sk->sk_memcg) 7321 css_put(&sk->sk_memcg->css); 7322 } 7323 7324 /** 7325 * mem_cgroup_charge_skmem - charge socket memory 7326 * @memcg: memcg to charge 7327 * @nr_pages: number of pages to charge 7328 * @gfp_mask: reclaim mode 7329 * 7330 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7331 * @memcg's configured limit, %false if it doesn't. 7332 */ 7333 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 7334 gfp_t gfp_mask) 7335 { 7336 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7337 struct page_counter *fail; 7338 7339 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7340 memcg->tcpmem_pressure = 0; 7341 return true; 7342 } 7343 memcg->tcpmem_pressure = 1; 7344 if (gfp_mask & __GFP_NOFAIL) { 7345 page_counter_charge(&memcg->tcpmem, nr_pages); 7346 return true; 7347 } 7348 return false; 7349 } 7350 7351 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 7352 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7353 return true; 7354 } 7355 7356 return false; 7357 } 7358 7359 /** 7360 * mem_cgroup_uncharge_skmem - uncharge socket memory 7361 * @memcg: memcg to uncharge 7362 * @nr_pages: number of pages to uncharge 7363 */ 7364 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7365 { 7366 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7367 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7368 return; 7369 } 7370 7371 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7372 7373 refill_stock(memcg, nr_pages); 7374 } 7375 7376 static int __init cgroup_memory(char *s) 7377 { 7378 char *token; 7379 7380 while ((token = strsep(&s, ",")) != NULL) { 7381 if (!*token) 7382 continue; 7383 if (!strcmp(token, "nosocket")) 7384 cgroup_memory_nosocket = true; 7385 if (!strcmp(token, "nokmem")) 7386 cgroup_memory_nokmem = true; 7387 if (!strcmp(token, "nobpf")) 7388 cgroup_memory_nobpf = true; 7389 } 7390 return 1; 7391 } 7392 __setup("cgroup.memory=", cgroup_memory); 7393 7394 /* 7395 * subsys_initcall() for memory controller. 7396 * 7397 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7398 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7399 * basically everything that doesn't depend on a specific mem_cgroup structure 7400 * should be initialized from here. 7401 */ 7402 static int __init mem_cgroup_init(void) 7403 { 7404 int cpu, node; 7405 7406 /* 7407 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7408 * used for per-memcg-per-cpu caching of per-node statistics. In order 7409 * to work fine, we should make sure that the overfill threshold can't 7410 * exceed S32_MAX / PAGE_SIZE. 7411 */ 7412 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7413 7414 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7415 memcg_hotplug_cpu_dead); 7416 7417 for_each_possible_cpu(cpu) 7418 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7419 drain_local_stock); 7420 7421 for_each_node(node) { 7422 struct mem_cgroup_tree_per_node *rtpn; 7423 7424 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7425 node_online(node) ? node : NUMA_NO_NODE); 7426 7427 rtpn->rb_root = RB_ROOT; 7428 rtpn->rb_rightmost = NULL; 7429 spin_lock_init(&rtpn->lock); 7430 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7431 } 7432 7433 return 0; 7434 } 7435 subsys_initcall(mem_cgroup_init); 7436 7437 #ifdef CONFIG_SWAP 7438 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7439 { 7440 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7441 /* 7442 * The root cgroup cannot be destroyed, so it's refcount must 7443 * always be >= 1. 7444 */ 7445 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { 7446 VM_BUG_ON(1); 7447 break; 7448 } 7449 memcg = parent_mem_cgroup(memcg); 7450 if (!memcg) 7451 memcg = root_mem_cgroup; 7452 } 7453 return memcg; 7454 } 7455 7456 /** 7457 * mem_cgroup_swapout - transfer a memsw charge to swap 7458 * @folio: folio whose memsw charge to transfer 7459 * @entry: swap entry to move the charge to 7460 * 7461 * Transfer the memsw charge of @folio to @entry. 7462 */ 7463 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) 7464 { 7465 struct mem_cgroup *memcg, *swap_memcg; 7466 unsigned int nr_entries; 7467 unsigned short oldid; 7468 7469 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 7470 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 7471 7472 if (mem_cgroup_disabled()) 7473 return; 7474 7475 if (!do_memsw_account()) 7476 return; 7477 7478 memcg = folio_memcg(folio); 7479 7480 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 7481 if (!memcg) 7482 return; 7483 7484 /* 7485 * In case the memcg owning these pages has been offlined and doesn't 7486 * have an ID allocated to it anymore, charge the closest online 7487 * ancestor for the swap instead and transfer the memory+swap charge. 7488 */ 7489 swap_memcg = mem_cgroup_id_get_online(memcg); 7490 nr_entries = folio_nr_pages(folio); 7491 /* Get references for the tail pages, too */ 7492 if (nr_entries > 1) 7493 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7494 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7495 nr_entries); 7496 VM_BUG_ON_FOLIO(oldid, folio); 7497 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7498 7499 folio->memcg_data = 0; 7500 7501 if (!mem_cgroup_is_root(memcg)) 7502 page_counter_uncharge(&memcg->memory, nr_entries); 7503 7504 if (memcg != swap_memcg) { 7505 if (!mem_cgroup_is_root(swap_memcg)) 7506 page_counter_charge(&swap_memcg->memsw, nr_entries); 7507 page_counter_uncharge(&memcg->memsw, nr_entries); 7508 } 7509 7510 /* 7511 * Interrupts should be disabled here because the caller holds the 7512 * i_pages lock which is taken with interrupts-off. It is 7513 * important here to have the interrupts disabled because it is the 7514 * only synchronisation we have for updating the per-CPU variables. 7515 */ 7516 memcg_stats_lock(); 7517 mem_cgroup_charge_statistics(memcg, -nr_entries); 7518 memcg_stats_unlock(); 7519 memcg_check_events(memcg, folio_nid(folio)); 7520 7521 css_put(&memcg->css); 7522 } 7523 7524 /** 7525 * __mem_cgroup_try_charge_swap - try charging swap space for a folio 7526 * @folio: folio being added to swap 7527 * @entry: swap entry to charge 7528 * 7529 * Try to charge @folio's memcg for the swap space at @entry. 7530 * 7531 * Returns 0 on success, -ENOMEM on failure. 7532 */ 7533 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) 7534 { 7535 unsigned int nr_pages = folio_nr_pages(folio); 7536 struct page_counter *counter; 7537 struct mem_cgroup *memcg; 7538 unsigned short oldid; 7539 7540 if (do_memsw_account()) 7541 return 0; 7542 7543 memcg = folio_memcg(folio); 7544 7545 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); 7546 if (!memcg) 7547 return 0; 7548 7549 if (!entry.val) { 7550 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7551 return 0; 7552 } 7553 7554 memcg = mem_cgroup_id_get_online(memcg); 7555 7556 if (!mem_cgroup_is_root(memcg) && 7557 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7558 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7559 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7560 mem_cgroup_id_put(memcg); 7561 return -ENOMEM; 7562 } 7563 7564 /* Get references for the tail pages, too */ 7565 if (nr_pages > 1) 7566 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7567 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7568 VM_BUG_ON_FOLIO(oldid, folio); 7569 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7570 7571 return 0; 7572 } 7573 7574 /** 7575 * __mem_cgroup_uncharge_swap - uncharge swap space 7576 * @entry: swap entry to uncharge 7577 * @nr_pages: the amount of swap space to uncharge 7578 */ 7579 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7580 { 7581 struct mem_cgroup *memcg; 7582 unsigned short id; 7583 7584 if (mem_cgroup_disabled()) 7585 return; 7586 7587 id = swap_cgroup_record(entry, 0, nr_pages); 7588 rcu_read_lock(); 7589 memcg = mem_cgroup_from_id(id); 7590 if (memcg) { 7591 if (!mem_cgroup_is_root(memcg)) { 7592 if (do_memsw_account()) 7593 page_counter_uncharge(&memcg->memsw, nr_pages); 7594 else 7595 page_counter_uncharge(&memcg->swap, nr_pages); 7596 } 7597 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7598 mem_cgroup_id_put_many(memcg, nr_pages); 7599 } 7600 rcu_read_unlock(); 7601 } 7602 7603 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7604 { 7605 long nr_swap_pages = get_nr_swap_pages(); 7606 7607 if (mem_cgroup_disabled() || do_memsw_account()) 7608 return nr_swap_pages; 7609 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) 7610 nr_swap_pages = min_t(long, nr_swap_pages, 7611 READ_ONCE(memcg->swap.max) - 7612 page_counter_read(&memcg->swap)); 7613 return nr_swap_pages; 7614 } 7615 7616 bool mem_cgroup_swap_full(struct folio *folio) 7617 { 7618 struct mem_cgroup *memcg; 7619 7620 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 7621 7622 if (vm_swap_full()) 7623 return true; 7624 if (do_memsw_account()) 7625 return false; 7626 7627 memcg = folio_memcg(folio); 7628 if (!memcg) 7629 return false; 7630 7631 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { 7632 unsigned long usage = page_counter_read(&memcg->swap); 7633 7634 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7635 usage * 2 >= READ_ONCE(memcg->swap.max)) 7636 return true; 7637 } 7638 7639 return false; 7640 } 7641 7642 static int __init setup_swap_account(char *s) 7643 { 7644 pr_warn_once("The swapaccount= commandline option is deprecated. " 7645 "Please report your usecase to linux-mm@kvack.org if you " 7646 "depend on this functionality.\n"); 7647 return 1; 7648 } 7649 __setup("swapaccount=", setup_swap_account); 7650 7651 static u64 swap_current_read(struct cgroup_subsys_state *css, 7652 struct cftype *cft) 7653 { 7654 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7655 7656 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7657 } 7658 7659 static int swap_high_show(struct seq_file *m, void *v) 7660 { 7661 return seq_puts_memcg_tunable(m, 7662 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7663 } 7664 7665 static ssize_t swap_high_write(struct kernfs_open_file *of, 7666 char *buf, size_t nbytes, loff_t off) 7667 { 7668 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7669 unsigned long high; 7670 int err; 7671 7672 buf = strstrip(buf); 7673 err = page_counter_memparse(buf, "max", &high); 7674 if (err) 7675 return err; 7676 7677 page_counter_set_high(&memcg->swap, high); 7678 7679 return nbytes; 7680 } 7681 7682 static int swap_max_show(struct seq_file *m, void *v) 7683 { 7684 return seq_puts_memcg_tunable(m, 7685 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7686 } 7687 7688 static ssize_t swap_max_write(struct kernfs_open_file *of, 7689 char *buf, size_t nbytes, loff_t off) 7690 { 7691 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7692 unsigned long max; 7693 int err; 7694 7695 buf = strstrip(buf); 7696 err = page_counter_memparse(buf, "max", &max); 7697 if (err) 7698 return err; 7699 7700 xchg(&memcg->swap.max, max); 7701 7702 return nbytes; 7703 } 7704 7705 static int swap_events_show(struct seq_file *m, void *v) 7706 { 7707 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7708 7709 seq_printf(m, "high %lu\n", 7710 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7711 seq_printf(m, "max %lu\n", 7712 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7713 seq_printf(m, "fail %lu\n", 7714 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7715 7716 return 0; 7717 } 7718 7719 static struct cftype swap_files[] = { 7720 { 7721 .name = "swap.current", 7722 .flags = CFTYPE_NOT_ON_ROOT, 7723 .read_u64 = swap_current_read, 7724 }, 7725 { 7726 .name = "swap.high", 7727 .flags = CFTYPE_NOT_ON_ROOT, 7728 .seq_show = swap_high_show, 7729 .write = swap_high_write, 7730 }, 7731 { 7732 .name = "swap.max", 7733 .flags = CFTYPE_NOT_ON_ROOT, 7734 .seq_show = swap_max_show, 7735 .write = swap_max_write, 7736 }, 7737 { 7738 .name = "swap.events", 7739 .flags = CFTYPE_NOT_ON_ROOT, 7740 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7741 .seq_show = swap_events_show, 7742 }, 7743 { } /* terminate */ 7744 }; 7745 7746 static struct cftype memsw_files[] = { 7747 { 7748 .name = "memsw.usage_in_bytes", 7749 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7750 .read_u64 = mem_cgroup_read_u64, 7751 }, 7752 { 7753 .name = "memsw.max_usage_in_bytes", 7754 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7755 .write = mem_cgroup_reset, 7756 .read_u64 = mem_cgroup_read_u64, 7757 }, 7758 { 7759 .name = "memsw.limit_in_bytes", 7760 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7761 .write = mem_cgroup_write, 7762 .read_u64 = mem_cgroup_read_u64, 7763 }, 7764 { 7765 .name = "memsw.failcnt", 7766 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7767 .write = mem_cgroup_reset, 7768 .read_u64 = mem_cgroup_read_u64, 7769 }, 7770 { }, /* terminate */ 7771 }; 7772 7773 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 7774 /** 7775 * obj_cgroup_may_zswap - check if this cgroup can zswap 7776 * @objcg: the object cgroup 7777 * 7778 * Check if the hierarchical zswap limit has been reached. 7779 * 7780 * This doesn't check for specific headroom, and it is not atomic 7781 * either. But with zswap, the size of the allocation is only known 7782 * once compression has occured, and this optimistic pre-check avoids 7783 * spending cycles on compression when there is already no room left 7784 * or zswap is disabled altogether somewhere in the hierarchy. 7785 */ 7786 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 7787 { 7788 struct mem_cgroup *memcg, *original_memcg; 7789 bool ret = true; 7790 7791 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7792 return true; 7793 7794 original_memcg = get_mem_cgroup_from_objcg(objcg); 7795 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); 7796 memcg = parent_mem_cgroup(memcg)) { 7797 unsigned long max = READ_ONCE(memcg->zswap_max); 7798 unsigned long pages; 7799 7800 if (max == PAGE_COUNTER_MAX) 7801 continue; 7802 if (max == 0) { 7803 ret = false; 7804 break; 7805 } 7806 7807 cgroup_rstat_flush(memcg->css.cgroup); 7808 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; 7809 if (pages < max) 7810 continue; 7811 ret = false; 7812 break; 7813 } 7814 mem_cgroup_put(original_memcg); 7815 return ret; 7816 } 7817 7818 /** 7819 * obj_cgroup_charge_zswap - charge compression backend memory 7820 * @objcg: the object cgroup 7821 * @size: size of compressed object 7822 * 7823 * This forces the charge after obj_cgroup_may_swap() allowed 7824 * compression and storage in zwap for this cgroup to go ahead. 7825 */ 7826 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) 7827 { 7828 struct mem_cgroup *memcg; 7829 7830 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7831 return; 7832 7833 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); 7834 7835 /* PF_MEMALLOC context, charging must succeed */ 7836 if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) 7837 VM_WARN_ON_ONCE(1); 7838 7839 rcu_read_lock(); 7840 memcg = obj_cgroup_memcg(objcg); 7841 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); 7842 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); 7843 rcu_read_unlock(); 7844 } 7845 7846 /** 7847 * obj_cgroup_uncharge_zswap - uncharge compression backend memory 7848 * @objcg: the object cgroup 7849 * @size: size of compressed object 7850 * 7851 * Uncharges zswap memory on page in. 7852 */ 7853 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) 7854 { 7855 struct mem_cgroup *memcg; 7856 7857 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7858 return; 7859 7860 obj_cgroup_uncharge(objcg, size); 7861 7862 rcu_read_lock(); 7863 memcg = obj_cgroup_memcg(objcg); 7864 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); 7865 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); 7866 rcu_read_unlock(); 7867 } 7868 7869 static u64 zswap_current_read(struct cgroup_subsys_state *css, 7870 struct cftype *cft) 7871 { 7872 cgroup_rstat_flush(css->cgroup); 7873 return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B); 7874 } 7875 7876 static int zswap_max_show(struct seq_file *m, void *v) 7877 { 7878 return seq_puts_memcg_tunable(m, 7879 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); 7880 } 7881 7882 static ssize_t zswap_max_write(struct kernfs_open_file *of, 7883 char *buf, size_t nbytes, loff_t off) 7884 { 7885 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7886 unsigned long max; 7887 int err; 7888 7889 buf = strstrip(buf); 7890 err = page_counter_memparse(buf, "max", &max); 7891 if (err) 7892 return err; 7893 7894 xchg(&memcg->zswap_max, max); 7895 7896 return nbytes; 7897 } 7898 7899 static struct cftype zswap_files[] = { 7900 { 7901 .name = "zswap.current", 7902 .flags = CFTYPE_NOT_ON_ROOT, 7903 .read_u64 = zswap_current_read, 7904 }, 7905 { 7906 .name = "zswap.max", 7907 .flags = CFTYPE_NOT_ON_ROOT, 7908 .seq_show = zswap_max_show, 7909 .write = zswap_max_write, 7910 }, 7911 { } /* terminate */ 7912 }; 7913 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */ 7914 7915 static int __init mem_cgroup_swap_init(void) 7916 { 7917 if (mem_cgroup_disabled()) 7918 return 0; 7919 7920 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7921 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7922 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 7923 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); 7924 #endif 7925 return 0; 7926 } 7927 subsys_initcall(mem_cgroup_swap_init); 7928 7929 #endif /* CONFIG_SWAP */ 7930