1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); 82 83 /* Socket memory accounting disabled? */ 84 static bool cgroup_memory_nosocket __ro_after_init; 85 86 /* Kernel memory accounting disabled? */ 87 bool cgroup_memory_nokmem __ro_after_init; 88 89 /* Whether the swap controller is active */ 90 #ifdef CONFIG_MEMCG_SWAP 91 bool cgroup_memory_noswap __ro_after_init; 92 #else 93 #define cgroup_memory_noswap 1 94 #endif 95 96 #ifdef CONFIG_CGROUP_WRITEBACK 97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 98 #endif 99 100 /* Whether legacy memory+swap accounting is active */ 101 static bool do_memsw_account(void) 102 { 103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 104 } 105 106 #define THRESHOLDS_EVENTS_TARGET 128 107 #define SOFTLIMIT_EVENTS_TARGET 1024 108 109 /* 110 * Cgroups above their limits are maintained in a RB-Tree, independent of 111 * their hierarchy representation 112 */ 113 114 struct mem_cgroup_tree_per_node { 115 struct rb_root rb_root; 116 struct rb_node *rb_rightmost; 117 spinlock_t lock; 118 }; 119 120 struct mem_cgroup_tree { 121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 122 }; 123 124 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 125 126 /* for OOM */ 127 struct mem_cgroup_eventfd_list { 128 struct list_head list; 129 struct eventfd_ctx *eventfd; 130 }; 131 132 /* 133 * cgroup_event represents events which userspace want to receive. 134 */ 135 struct mem_cgroup_event { 136 /* 137 * memcg which the event belongs to. 138 */ 139 struct mem_cgroup *memcg; 140 /* 141 * eventfd to signal userspace about the event. 142 */ 143 struct eventfd_ctx *eventfd; 144 /* 145 * Each of these stored in a list by the cgroup. 146 */ 147 struct list_head list; 148 /* 149 * register_event() callback will be used to add new userspace 150 * waiter for changes related to this event. Use eventfd_signal() 151 * on eventfd to send notification to userspace. 152 */ 153 int (*register_event)(struct mem_cgroup *memcg, 154 struct eventfd_ctx *eventfd, const char *args); 155 /* 156 * unregister_event() callback will be called when userspace closes 157 * the eventfd or on cgroup removing. This callback must be set, 158 * if you want provide notification functionality. 159 */ 160 void (*unregister_event)(struct mem_cgroup *memcg, 161 struct eventfd_ctx *eventfd); 162 /* 163 * All fields below needed to unregister event when 164 * userspace closes eventfd. 165 */ 166 poll_table pt; 167 wait_queue_head_t *wqh; 168 wait_queue_entry_t wait; 169 struct work_struct remove; 170 }; 171 172 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 174 175 /* Stuffs for move charges at task migration. */ 176 /* 177 * Types of charges to be moved. 178 */ 179 #define MOVE_ANON 0x1U 180 #define MOVE_FILE 0x2U 181 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 182 183 /* "mc" and its members are protected by cgroup_mutex */ 184 static struct move_charge_struct { 185 spinlock_t lock; /* for from, to */ 186 struct mm_struct *mm; 187 struct mem_cgroup *from; 188 struct mem_cgroup *to; 189 unsigned long flags; 190 unsigned long precharge; 191 unsigned long moved_charge; 192 unsigned long moved_swap; 193 struct task_struct *moving_task; /* a task moving charges */ 194 wait_queue_head_t waitq; /* a waitq for other context */ 195 } mc = { 196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 198 }; 199 200 /* 201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 202 * limit reclaim to prevent infinite loops, if they ever occur. 203 */ 204 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 205 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 206 207 /* for encoding cft->private value on file */ 208 enum res_type { 209 _MEM, 210 _MEMSWAP, 211 _OOM_TYPE, 212 _KMEM, 213 _TCP, 214 }; 215 216 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 217 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 218 #define MEMFILE_ATTR(val) ((val) & 0xffff) 219 /* Used for OOM notifier */ 220 #define OOM_CONTROL (0) 221 222 /* 223 * Iteration constructs for visiting all cgroups (under a tree). If 224 * loops are exited prematurely (break), mem_cgroup_iter_break() must 225 * be used for reference counting. 226 */ 227 #define for_each_mem_cgroup_tree(iter, root) \ 228 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 229 iter != NULL; \ 230 iter = mem_cgroup_iter(root, iter, NULL)) 231 232 #define for_each_mem_cgroup(iter) \ 233 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 234 iter != NULL; \ 235 iter = mem_cgroup_iter(NULL, iter, NULL)) 236 237 static inline bool task_is_dying(void) 238 { 239 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 240 (current->flags & PF_EXITING); 241 } 242 243 /* Some nice accessors for the vmpressure. */ 244 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 245 { 246 if (!memcg) 247 memcg = root_mem_cgroup; 248 return &memcg->vmpressure; 249 } 250 251 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) 252 { 253 return container_of(vmpr, struct mem_cgroup, vmpressure); 254 } 255 256 #ifdef CONFIG_MEMCG_KMEM 257 extern spinlock_t css_set_lock; 258 259 bool mem_cgroup_kmem_disabled(void) 260 { 261 return cgroup_memory_nokmem; 262 } 263 264 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 265 unsigned int nr_pages); 266 267 static void obj_cgroup_release(struct percpu_ref *ref) 268 { 269 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 270 unsigned int nr_bytes; 271 unsigned int nr_pages; 272 unsigned long flags; 273 274 /* 275 * At this point all allocated objects are freed, and 276 * objcg->nr_charged_bytes can't have an arbitrary byte value. 277 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 278 * 279 * The following sequence can lead to it: 280 * 1) CPU0: objcg == stock->cached_objcg 281 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 282 * PAGE_SIZE bytes are charged 283 * 3) CPU1: a process from another memcg is allocating something, 284 * the stock if flushed, 285 * objcg->nr_charged_bytes = PAGE_SIZE - 92 286 * 5) CPU0: we do release this object, 287 * 92 bytes are added to stock->nr_bytes 288 * 6) CPU0: stock is flushed, 289 * 92 bytes are added to objcg->nr_charged_bytes 290 * 291 * In the result, nr_charged_bytes == PAGE_SIZE. 292 * This page will be uncharged in obj_cgroup_release(). 293 */ 294 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 295 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 296 nr_pages = nr_bytes >> PAGE_SHIFT; 297 298 if (nr_pages) 299 obj_cgroup_uncharge_pages(objcg, nr_pages); 300 301 spin_lock_irqsave(&css_set_lock, flags); 302 list_del(&objcg->list); 303 spin_unlock_irqrestore(&css_set_lock, flags); 304 305 percpu_ref_exit(ref); 306 kfree_rcu(objcg, rcu); 307 } 308 309 static struct obj_cgroup *obj_cgroup_alloc(void) 310 { 311 struct obj_cgroup *objcg; 312 int ret; 313 314 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 315 if (!objcg) 316 return NULL; 317 318 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 319 GFP_KERNEL); 320 if (ret) { 321 kfree(objcg); 322 return NULL; 323 } 324 INIT_LIST_HEAD(&objcg->list); 325 return objcg; 326 } 327 328 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 329 struct mem_cgroup *parent) 330 { 331 struct obj_cgroup *objcg, *iter; 332 333 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 334 335 spin_lock_irq(&css_set_lock); 336 337 /* 1) Ready to reparent active objcg. */ 338 list_add(&objcg->list, &memcg->objcg_list); 339 /* 2) Reparent active objcg and already reparented objcgs to parent. */ 340 list_for_each_entry(iter, &memcg->objcg_list, list) 341 WRITE_ONCE(iter->memcg, parent); 342 /* 3) Move already reparented objcgs to the parent's list */ 343 list_splice(&memcg->objcg_list, &parent->objcg_list); 344 345 spin_unlock_irq(&css_set_lock); 346 347 percpu_ref_kill(&objcg->refcnt); 348 } 349 350 /* 351 * This will be used as a shrinker list's index. 352 * The main reason for not using cgroup id for this: 353 * this works better in sparse environments, where we have a lot of memcgs, 354 * but only a few kmem-limited. Or also, if we have, for instance, 200 355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 356 * 200 entry array for that. 357 * 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It 359 * will double each time we have to increase it. 360 */ 361 static DEFINE_IDA(memcg_cache_ida); 362 int memcg_nr_cache_ids; 363 364 /* Protects memcg_nr_cache_ids */ 365 static DECLARE_RWSEM(memcg_cache_ids_sem); 366 367 void memcg_get_cache_ids(void) 368 { 369 down_read(&memcg_cache_ids_sem); 370 } 371 372 void memcg_put_cache_ids(void) 373 { 374 up_read(&memcg_cache_ids_sem); 375 } 376 377 /* 378 * MIN_SIZE is different than 1, because we would like to avoid going through 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited 380 * cgroups is a reasonable guess. In the future, it could be a parameter or 381 * tunable, but that is strictly not necessary. 382 * 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 384 * this constant directly from cgroup, but it is understandable that this is 385 * better kept as an internal representation in cgroup.c. In any case, the 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily 387 * increase ours as well if it increases. 388 */ 389 #define MEMCG_CACHES_MIN_SIZE 4 390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 391 392 /* 393 * A lot of the calls to the cache allocation functions are expected to be 394 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 395 * conditional to this static branch, we'll have to allow modules that does 396 * kmem_cache_alloc and the such to see this symbol as well 397 */ 398 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 399 EXPORT_SYMBOL(memcg_kmem_enabled_key); 400 #endif 401 402 /** 403 * mem_cgroup_css_from_page - css of the memcg associated with a page 404 * @page: page of interest 405 * 406 * If memcg is bound to the default hierarchy, css of the memcg associated 407 * with @page is returned. The returned css remains associated with @page 408 * until it is released. 409 * 410 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 411 * is returned. 412 */ 413 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 414 { 415 struct mem_cgroup *memcg; 416 417 memcg = page_memcg(page); 418 419 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 420 memcg = root_mem_cgroup; 421 422 return &memcg->css; 423 } 424 425 /** 426 * page_cgroup_ino - return inode number of the memcg a page is charged to 427 * @page: the page 428 * 429 * Look up the closest online ancestor of the memory cgroup @page is charged to 430 * and return its inode number or 0 if @page is not charged to any cgroup. It 431 * is safe to call this function without holding a reference to @page. 432 * 433 * Note, this function is inherently racy, because there is nothing to prevent 434 * the cgroup inode from getting torn down and potentially reallocated a moment 435 * after page_cgroup_ino() returns, so it only should be used by callers that 436 * do not care (such as procfs interfaces). 437 */ 438 ino_t page_cgroup_ino(struct page *page) 439 { 440 struct mem_cgroup *memcg; 441 unsigned long ino = 0; 442 443 rcu_read_lock(); 444 memcg = page_memcg_check(page); 445 446 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 447 memcg = parent_mem_cgroup(memcg); 448 if (memcg) 449 ino = cgroup_ino(memcg->css.cgroup); 450 rcu_read_unlock(); 451 return ino; 452 } 453 454 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 455 struct mem_cgroup_tree_per_node *mctz, 456 unsigned long new_usage_in_excess) 457 { 458 struct rb_node **p = &mctz->rb_root.rb_node; 459 struct rb_node *parent = NULL; 460 struct mem_cgroup_per_node *mz_node; 461 bool rightmost = true; 462 463 if (mz->on_tree) 464 return; 465 466 mz->usage_in_excess = new_usage_in_excess; 467 if (!mz->usage_in_excess) 468 return; 469 while (*p) { 470 parent = *p; 471 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 472 tree_node); 473 if (mz->usage_in_excess < mz_node->usage_in_excess) { 474 p = &(*p)->rb_left; 475 rightmost = false; 476 } else { 477 p = &(*p)->rb_right; 478 } 479 } 480 481 if (rightmost) 482 mctz->rb_rightmost = &mz->tree_node; 483 484 rb_link_node(&mz->tree_node, parent, p); 485 rb_insert_color(&mz->tree_node, &mctz->rb_root); 486 mz->on_tree = true; 487 } 488 489 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 490 struct mem_cgroup_tree_per_node *mctz) 491 { 492 if (!mz->on_tree) 493 return; 494 495 if (&mz->tree_node == mctz->rb_rightmost) 496 mctz->rb_rightmost = rb_prev(&mz->tree_node); 497 498 rb_erase(&mz->tree_node, &mctz->rb_root); 499 mz->on_tree = false; 500 } 501 502 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 503 struct mem_cgroup_tree_per_node *mctz) 504 { 505 unsigned long flags; 506 507 spin_lock_irqsave(&mctz->lock, flags); 508 __mem_cgroup_remove_exceeded(mz, mctz); 509 spin_unlock_irqrestore(&mctz->lock, flags); 510 } 511 512 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 513 { 514 unsigned long nr_pages = page_counter_read(&memcg->memory); 515 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 516 unsigned long excess = 0; 517 518 if (nr_pages > soft_limit) 519 excess = nr_pages - soft_limit; 520 521 return excess; 522 } 523 524 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) 525 { 526 unsigned long excess; 527 struct mem_cgroup_per_node *mz; 528 struct mem_cgroup_tree_per_node *mctz; 529 530 mctz = soft_limit_tree.rb_tree_per_node[nid]; 531 if (!mctz) 532 return; 533 /* 534 * Necessary to update all ancestors when hierarchy is used. 535 * because their event counter is not touched. 536 */ 537 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 538 mz = memcg->nodeinfo[nid]; 539 excess = soft_limit_excess(memcg); 540 /* 541 * We have to update the tree if mz is on RB-tree or 542 * mem is over its softlimit. 543 */ 544 if (excess || mz->on_tree) { 545 unsigned long flags; 546 547 spin_lock_irqsave(&mctz->lock, flags); 548 /* if on-tree, remove it */ 549 if (mz->on_tree) 550 __mem_cgroup_remove_exceeded(mz, mctz); 551 /* 552 * Insert again. mz->usage_in_excess will be updated. 553 * If excess is 0, no tree ops. 554 */ 555 __mem_cgroup_insert_exceeded(mz, mctz, excess); 556 spin_unlock_irqrestore(&mctz->lock, flags); 557 } 558 } 559 } 560 561 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 562 { 563 struct mem_cgroup_tree_per_node *mctz; 564 struct mem_cgroup_per_node *mz; 565 int nid; 566 567 for_each_node(nid) { 568 mz = memcg->nodeinfo[nid]; 569 mctz = soft_limit_tree.rb_tree_per_node[nid]; 570 if (mctz) 571 mem_cgroup_remove_exceeded(mz, mctz); 572 } 573 } 574 575 static struct mem_cgroup_per_node * 576 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 577 { 578 struct mem_cgroup_per_node *mz; 579 580 retry: 581 mz = NULL; 582 if (!mctz->rb_rightmost) 583 goto done; /* Nothing to reclaim from */ 584 585 mz = rb_entry(mctz->rb_rightmost, 586 struct mem_cgroup_per_node, tree_node); 587 /* 588 * Remove the node now but someone else can add it back, 589 * we will to add it back at the end of reclaim to its correct 590 * position in the tree. 591 */ 592 __mem_cgroup_remove_exceeded(mz, mctz); 593 if (!soft_limit_excess(mz->memcg) || 594 !css_tryget(&mz->memcg->css)) 595 goto retry; 596 done: 597 return mz; 598 } 599 600 static struct mem_cgroup_per_node * 601 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 602 { 603 struct mem_cgroup_per_node *mz; 604 605 spin_lock_irq(&mctz->lock); 606 mz = __mem_cgroup_largest_soft_limit_node(mctz); 607 spin_unlock_irq(&mctz->lock); 608 return mz; 609 } 610 611 /* 612 * memcg and lruvec stats flushing 613 * 614 * Many codepaths leading to stats update or read are performance sensitive and 615 * adding stats flushing in such codepaths is not desirable. So, to optimize the 616 * flushing the kernel does: 617 * 618 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let 619 * rstat update tree grow unbounded. 620 * 621 * 2) Flush the stats synchronously on reader side only when there are more than 622 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization 623 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but 624 * only for 2 seconds due to (1). 625 */ 626 static void flush_memcg_stats_dwork(struct work_struct *w); 627 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 628 static DEFINE_SPINLOCK(stats_flush_lock); 629 static DEFINE_PER_CPU(unsigned int, stats_updates); 630 static atomic_t stats_flush_threshold = ATOMIC_INIT(0); 631 632 static inline void memcg_rstat_updated(struct mem_cgroup *memcg) 633 { 634 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); 635 if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH)) 636 atomic_inc(&stats_flush_threshold); 637 } 638 639 static void __mem_cgroup_flush_stats(void) 640 { 641 unsigned long flag; 642 643 if (!spin_trylock_irqsave(&stats_flush_lock, flag)) 644 return; 645 646 cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup); 647 atomic_set(&stats_flush_threshold, 0); 648 spin_unlock_irqrestore(&stats_flush_lock, flag); 649 } 650 651 void mem_cgroup_flush_stats(void) 652 { 653 if (atomic_read(&stats_flush_threshold) > num_online_cpus()) 654 __mem_cgroup_flush_stats(); 655 } 656 657 static void flush_memcg_stats_dwork(struct work_struct *w) 658 { 659 mem_cgroup_flush_stats(); 660 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ); 661 } 662 663 /** 664 * __mod_memcg_state - update cgroup memory statistics 665 * @memcg: the memory cgroup 666 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 667 * @val: delta to add to the counter, can be negative 668 */ 669 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 670 { 671 if (mem_cgroup_disabled()) 672 return; 673 674 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 675 memcg_rstat_updated(memcg); 676 } 677 678 /* idx can be of type enum memcg_stat_item or node_stat_item. */ 679 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) 680 { 681 long x = 0; 682 int cpu; 683 684 for_each_possible_cpu(cpu) 685 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); 686 #ifdef CONFIG_SMP 687 if (x < 0) 688 x = 0; 689 #endif 690 return x; 691 } 692 693 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 694 int val) 695 { 696 struct mem_cgroup_per_node *pn; 697 struct mem_cgroup *memcg; 698 699 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 700 memcg = pn->memcg; 701 702 /* Update memcg */ 703 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); 704 705 /* Update lruvec */ 706 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); 707 708 memcg_rstat_updated(memcg); 709 } 710 711 /** 712 * __mod_lruvec_state - update lruvec memory statistics 713 * @lruvec: the lruvec 714 * @idx: the stat item 715 * @val: delta to add to the counter, can be negative 716 * 717 * The lruvec is the intersection of the NUMA node and a cgroup. This 718 * function updates the all three counters that are affected by a 719 * change of state at this level: per-node, per-cgroup, per-lruvec. 720 */ 721 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 722 int val) 723 { 724 /* Update node */ 725 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 726 727 /* Update memcg and lruvec */ 728 if (!mem_cgroup_disabled()) 729 __mod_memcg_lruvec_state(lruvec, idx, val); 730 } 731 732 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 733 int val) 734 { 735 struct page *head = compound_head(page); /* rmap on tail pages */ 736 struct mem_cgroup *memcg; 737 pg_data_t *pgdat = page_pgdat(page); 738 struct lruvec *lruvec; 739 740 rcu_read_lock(); 741 memcg = page_memcg(head); 742 /* Untracked pages have no memcg, no lruvec. Update only the node */ 743 if (!memcg) { 744 rcu_read_unlock(); 745 __mod_node_page_state(pgdat, idx, val); 746 return; 747 } 748 749 lruvec = mem_cgroup_lruvec(memcg, pgdat); 750 __mod_lruvec_state(lruvec, idx, val); 751 rcu_read_unlock(); 752 } 753 EXPORT_SYMBOL(__mod_lruvec_page_state); 754 755 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 756 { 757 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 758 struct mem_cgroup *memcg; 759 struct lruvec *lruvec; 760 761 rcu_read_lock(); 762 memcg = mem_cgroup_from_obj(p); 763 764 /* 765 * Untracked pages have no memcg, no lruvec. Update only the 766 * node. If we reparent the slab objects to the root memcg, 767 * when we free the slab object, we need to update the per-memcg 768 * vmstats to keep it correct for the root memcg. 769 */ 770 if (!memcg) { 771 __mod_node_page_state(pgdat, idx, val); 772 } else { 773 lruvec = mem_cgroup_lruvec(memcg, pgdat); 774 __mod_lruvec_state(lruvec, idx, val); 775 } 776 rcu_read_unlock(); 777 } 778 779 /* 780 * mod_objcg_mlstate() may be called with irq enabled, so 781 * mod_memcg_lruvec_state() should be used. 782 */ 783 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 784 struct pglist_data *pgdat, 785 enum node_stat_item idx, int nr) 786 { 787 struct mem_cgroup *memcg; 788 struct lruvec *lruvec; 789 790 rcu_read_lock(); 791 memcg = obj_cgroup_memcg(objcg); 792 lruvec = mem_cgroup_lruvec(memcg, pgdat); 793 mod_memcg_lruvec_state(lruvec, idx, nr); 794 rcu_read_unlock(); 795 } 796 797 /** 798 * __count_memcg_events - account VM events in a cgroup 799 * @memcg: the memory cgroup 800 * @idx: the event item 801 * @count: the number of events that occurred 802 */ 803 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 804 unsigned long count) 805 { 806 if (mem_cgroup_disabled()) 807 return; 808 809 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); 810 memcg_rstat_updated(memcg); 811 } 812 813 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 814 { 815 return READ_ONCE(memcg->vmstats.events[event]); 816 } 817 818 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 819 { 820 long x = 0; 821 int cpu; 822 823 for_each_possible_cpu(cpu) 824 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); 825 return x; 826 } 827 828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 829 int nr_pages) 830 { 831 /* pagein of a big page is an event. So, ignore page size */ 832 if (nr_pages > 0) 833 __count_memcg_events(memcg, PGPGIN, 1); 834 else { 835 __count_memcg_events(memcg, PGPGOUT, 1); 836 nr_pages = -nr_pages; /* for event */ 837 } 838 839 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 840 } 841 842 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 843 enum mem_cgroup_events_target target) 844 { 845 unsigned long val, next; 846 847 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 848 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 849 /* from time_after() in jiffies.h */ 850 if ((long)(next - val) < 0) { 851 switch (target) { 852 case MEM_CGROUP_TARGET_THRESH: 853 next = val + THRESHOLDS_EVENTS_TARGET; 854 break; 855 case MEM_CGROUP_TARGET_SOFTLIMIT: 856 next = val + SOFTLIMIT_EVENTS_TARGET; 857 break; 858 default: 859 break; 860 } 861 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 862 return true; 863 } 864 return false; 865 } 866 867 /* 868 * Check events in order. 869 * 870 */ 871 static void memcg_check_events(struct mem_cgroup *memcg, int nid) 872 { 873 /* threshold event is triggered in finer grain than soft limit */ 874 if (unlikely(mem_cgroup_event_ratelimit(memcg, 875 MEM_CGROUP_TARGET_THRESH))) { 876 bool do_softlimit; 877 878 do_softlimit = mem_cgroup_event_ratelimit(memcg, 879 MEM_CGROUP_TARGET_SOFTLIMIT); 880 mem_cgroup_threshold(memcg); 881 if (unlikely(do_softlimit)) 882 mem_cgroup_update_tree(memcg, nid); 883 } 884 } 885 886 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 887 { 888 /* 889 * mm_update_next_owner() may clear mm->owner to NULL 890 * if it races with swapoff, page migration, etc. 891 * So this can be called with p == NULL. 892 */ 893 if (unlikely(!p)) 894 return NULL; 895 896 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 897 } 898 EXPORT_SYMBOL(mem_cgroup_from_task); 899 900 static __always_inline struct mem_cgroup *active_memcg(void) 901 { 902 if (!in_task()) 903 return this_cpu_read(int_active_memcg); 904 else 905 return current->active_memcg; 906 } 907 908 /** 909 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 910 * @mm: mm from which memcg should be extracted. It can be NULL. 911 * 912 * Obtain a reference on mm->memcg and returns it if successful. If mm 913 * is NULL, then the memcg is chosen as follows: 914 * 1) The active memcg, if set. 915 * 2) current->mm->memcg, if available 916 * 3) root memcg 917 * If mem_cgroup is disabled, NULL is returned. 918 */ 919 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 920 { 921 struct mem_cgroup *memcg; 922 923 if (mem_cgroup_disabled()) 924 return NULL; 925 926 /* 927 * Page cache insertions can happen without an 928 * actual mm context, e.g. during disk probing 929 * on boot, loopback IO, acct() writes etc. 930 * 931 * No need to css_get on root memcg as the reference 932 * counting is disabled on the root level in the 933 * cgroup core. See CSS_NO_REF. 934 */ 935 if (unlikely(!mm)) { 936 memcg = active_memcg(); 937 if (unlikely(memcg)) { 938 /* remote memcg must hold a ref */ 939 css_get(&memcg->css); 940 return memcg; 941 } 942 mm = current->mm; 943 if (unlikely(!mm)) 944 return root_mem_cgroup; 945 } 946 947 rcu_read_lock(); 948 do { 949 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 950 if (unlikely(!memcg)) 951 memcg = root_mem_cgroup; 952 } while (!css_tryget(&memcg->css)); 953 rcu_read_unlock(); 954 return memcg; 955 } 956 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 957 958 static __always_inline bool memcg_kmem_bypass(void) 959 { 960 /* Allow remote memcg charging from any context. */ 961 if (unlikely(active_memcg())) 962 return false; 963 964 /* Memcg to charge can't be determined. */ 965 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) 966 return true; 967 968 return false; 969 } 970 971 /** 972 * mem_cgroup_iter - iterate over memory cgroup hierarchy 973 * @root: hierarchy root 974 * @prev: previously returned memcg, NULL on first invocation 975 * @reclaim: cookie for shared reclaim walks, NULL for full walks 976 * 977 * Returns references to children of the hierarchy below @root, or 978 * @root itself, or %NULL after a full round-trip. 979 * 980 * Caller must pass the return value in @prev on subsequent 981 * invocations for reference counting, or use mem_cgroup_iter_break() 982 * to cancel a hierarchy walk before the round-trip is complete. 983 * 984 * Reclaimers can specify a node in @reclaim to divide up the memcgs 985 * in the hierarchy among all concurrent reclaimers operating on the 986 * same node. 987 */ 988 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 989 struct mem_cgroup *prev, 990 struct mem_cgroup_reclaim_cookie *reclaim) 991 { 992 struct mem_cgroup_reclaim_iter *iter; 993 struct cgroup_subsys_state *css = NULL; 994 struct mem_cgroup *memcg = NULL; 995 struct mem_cgroup *pos = NULL; 996 997 if (mem_cgroup_disabled()) 998 return NULL; 999 1000 if (!root) 1001 root = root_mem_cgroup; 1002 1003 if (prev && !reclaim) 1004 pos = prev; 1005 1006 rcu_read_lock(); 1007 1008 if (reclaim) { 1009 struct mem_cgroup_per_node *mz; 1010 1011 mz = root->nodeinfo[reclaim->pgdat->node_id]; 1012 iter = &mz->iter; 1013 1014 if (prev && reclaim->generation != iter->generation) 1015 goto out_unlock; 1016 1017 while (1) { 1018 pos = READ_ONCE(iter->position); 1019 if (!pos || css_tryget(&pos->css)) 1020 break; 1021 /* 1022 * css reference reached zero, so iter->position will 1023 * be cleared by ->css_released. However, we should not 1024 * rely on this happening soon, because ->css_released 1025 * is called from a work queue, and by busy-waiting we 1026 * might block it. So we clear iter->position right 1027 * away. 1028 */ 1029 (void)cmpxchg(&iter->position, pos, NULL); 1030 } 1031 } 1032 1033 if (pos) 1034 css = &pos->css; 1035 1036 for (;;) { 1037 css = css_next_descendant_pre(css, &root->css); 1038 if (!css) { 1039 /* 1040 * Reclaimers share the hierarchy walk, and a 1041 * new one might jump in right at the end of 1042 * the hierarchy - make sure they see at least 1043 * one group and restart from the beginning. 1044 */ 1045 if (!prev) 1046 continue; 1047 break; 1048 } 1049 1050 /* 1051 * Verify the css and acquire a reference. The root 1052 * is provided by the caller, so we know it's alive 1053 * and kicking, and don't take an extra reference. 1054 */ 1055 memcg = mem_cgroup_from_css(css); 1056 1057 if (css == &root->css) 1058 break; 1059 1060 if (css_tryget(css)) 1061 break; 1062 1063 memcg = NULL; 1064 } 1065 1066 if (reclaim) { 1067 /* 1068 * The position could have already been updated by a competing 1069 * thread, so check that the value hasn't changed since we read 1070 * it to avoid reclaiming from the same cgroup twice. 1071 */ 1072 (void)cmpxchg(&iter->position, pos, memcg); 1073 1074 if (pos) 1075 css_put(&pos->css); 1076 1077 if (!memcg) 1078 iter->generation++; 1079 else if (!prev) 1080 reclaim->generation = iter->generation; 1081 } 1082 1083 out_unlock: 1084 rcu_read_unlock(); 1085 if (prev && prev != root) 1086 css_put(&prev->css); 1087 1088 return memcg; 1089 } 1090 1091 /** 1092 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1093 * @root: hierarchy root 1094 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1095 */ 1096 void mem_cgroup_iter_break(struct mem_cgroup *root, 1097 struct mem_cgroup *prev) 1098 { 1099 if (!root) 1100 root = root_mem_cgroup; 1101 if (prev && prev != root) 1102 css_put(&prev->css); 1103 } 1104 1105 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1106 struct mem_cgroup *dead_memcg) 1107 { 1108 struct mem_cgroup_reclaim_iter *iter; 1109 struct mem_cgroup_per_node *mz; 1110 int nid; 1111 1112 for_each_node(nid) { 1113 mz = from->nodeinfo[nid]; 1114 iter = &mz->iter; 1115 cmpxchg(&iter->position, dead_memcg, NULL); 1116 } 1117 } 1118 1119 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1120 { 1121 struct mem_cgroup *memcg = dead_memcg; 1122 struct mem_cgroup *last; 1123 1124 do { 1125 __invalidate_reclaim_iterators(memcg, dead_memcg); 1126 last = memcg; 1127 } while ((memcg = parent_mem_cgroup(memcg))); 1128 1129 /* 1130 * When cgruop1 non-hierarchy mode is used, 1131 * parent_mem_cgroup() does not walk all the way up to the 1132 * cgroup root (root_mem_cgroup). So we have to handle 1133 * dead_memcg from cgroup root separately. 1134 */ 1135 if (last != root_mem_cgroup) 1136 __invalidate_reclaim_iterators(root_mem_cgroup, 1137 dead_memcg); 1138 } 1139 1140 /** 1141 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1142 * @memcg: hierarchy root 1143 * @fn: function to call for each task 1144 * @arg: argument passed to @fn 1145 * 1146 * This function iterates over tasks attached to @memcg or to any of its 1147 * descendants and calls @fn for each task. If @fn returns a non-zero 1148 * value, the function breaks the iteration loop and returns the value. 1149 * Otherwise, it will iterate over all tasks and return 0. 1150 * 1151 * This function must not be called for the root memory cgroup. 1152 */ 1153 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1154 int (*fn)(struct task_struct *, void *), void *arg) 1155 { 1156 struct mem_cgroup *iter; 1157 int ret = 0; 1158 1159 BUG_ON(memcg == root_mem_cgroup); 1160 1161 for_each_mem_cgroup_tree(iter, memcg) { 1162 struct css_task_iter it; 1163 struct task_struct *task; 1164 1165 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1166 while (!ret && (task = css_task_iter_next(&it))) 1167 ret = fn(task, arg); 1168 css_task_iter_end(&it); 1169 if (ret) { 1170 mem_cgroup_iter_break(memcg, iter); 1171 break; 1172 } 1173 } 1174 return ret; 1175 } 1176 1177 #ifdef CONFIG_DEBUG_VM 1178 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1179 { 1180 struct mem_cgroup *memcg; 1181 1182 if (mem_cgroup_disabled()) 1183 return; 1184 1185 memcg = folio_memcg(folio); 1186 1187 if (!memcg) 1188 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio); 1189 else 1190 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); 1191 } 1192 #endif 1193 1194 /** 1195 * folio_lruvec_lock - Lock the lruvec for a folio. 1196 * @folio: Pointer to the folio. 1197 * 1198 * These functions are safe to use under any of the following conditions: 1199 * - folio locked 1200 * - folio_test_lru false 1201 * - folio_memcg_lock() 1202 * - folio frozen (refcount of 0) 1203 * 1204 * Return: The lruvec this folio is on with its lock held. 1205 */ 1206 struct lruvec *folio_lruvec_lock(struct folio *folio) 1207 { 1208 struct lruvec *lruvec = folio_lruvec(folio); 1209 1210 spin_lock(&lruvec->lru_lock); 1211 lruvec_memcg_debug(lruvec, folio); 1212 1213 return lruvec; 1214 } 1215 1216 /** 1217 * folio_lruvec_lock_irq - Lock the lruvec for a folio. 1218 * @folio: Pointer to the folio. 1219 * 1220 * These functions are safe to use under any of the following conditions: 1221 * - folio locked 1222 * - folio_test_lru false 1223 * - folio_memcg_lock() 1224 * - folio frozen (refcount of 0) 1225 * 1226 * Return: The lruvec this folio is on with its lock held and interrupts 1227 * disabled. 1228 */ 1229 struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1230 { 1231 struct lruvec *lruvec = folio_lruvec(folio); 1232 1233 spin_lock_irq(&lruvec->lru_lock); 1234 lruvec_memcg_debug(lruvec, folio); 1235 1236 return lruvec; 1237 } 1238 1239 /** 1240 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. 1241 * @folio: Pointer to the folio. 1242 * @flags: Pointer to irqsave flags. 1243 * 1244 * These functions are safe to use under any of the following conditions: 1245 * - folio locked 1246 * - folio_test_lru false 1247 * - folio_memcg_lock() 1248 * - folio frozen (refcount of 0) 1249 * 1250 * Return: The lruvec this folio is on with its lock held and interrupts 1251 * disabled. 1252 */ 1253 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1254 unsigned long *flags) 1255 { 1256 struct lruvec *lruvec = folio_lruvec(folio); 1257 1258 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1259 lruvec_memcg_debug(lruvec, folio); 1260 1261 return lruvec; 1262 } 1263 1264 /** 1265 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1266 * @lruvec: mem_cgroup per zone lru vector 1267 * @lru: index of lru list the page is sitting on 1268 * @zid: zone id of the accounted pages 1269 * @nr_pages: positive when adding or negative when removing 1270 * 1271 * This function must be called under lru_lock, just before a page is added 1272 * to or just after a page is removed from an lru list (that ordering being 1273 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1274 */ 1275 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1276 int zid, int nr_pages) 1277 { 1278 struct mem_cgroup_per_node *mz; 1279 unsigned long *lru_size; 1280 long size; 1281 1282 if (mem_cgroup_disabled()) 1283 return; 1284 1285 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1286 lru_size = &mz->lru_zone_size[zid][lru]; 1287 1288 if (nr_pages < 0) 1289 *lru_size += nr_pages; 1290 1291 size = *lru_size; 1292 if (WARN_ONCE(size < 0, 1293 "%s(%p, %d, %d): lru_size %ld\n", 1294 __func__, lruvec, lru, nr_pages, size)) { 1295 VM_BUG_ON(1); 1296 *lru_size = 0; 1297 } 1298 1299 if (nr_pages > 0) 1300 *lru_size += nr_pages; 1301 } 1302 1303 /** 1304 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1305 * @memcg: the memory cgroup 1306 * 1307 * Returns the maximum amount of memory @mem can be charged with, in 1308 * pages. 1309 */ 1310 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1311 { 1312 unsigned long margin = 0; 1313 unsigned long count; 1314 unsigned long limit; 1315 1316 count = page_counter_read(&memcg->memory); 1317 limit = READ_ONCE(memcg->memory.max); 1318 if (count < limit) 1319 margin = limit - count; 1320 1321 if (do_memsw_account()) { 1322 count = page_counter_read(&memcg->memsw); 1323 limit = READ_ONCE(memcg->memsw.max); 1324 if (count < limit) 1325 margin = min(margin, limit - count); 1326 else 1327 margin = 0; 1328 } 1329 1330 return margin; 1331 } 1332 1333 /* 1334 * A routine for checking "mem" is under move_account() or not. 1335 * 1336 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1337 * moving cgroups. This is for waiting at high-memory pressure 1338 * caused by "move". 1339 */ 1340 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1341 { 1342 struct mem_cgroup *from; 1343 struct mem_cgroup *to; 1344 bool ret = false; 1345 /* 1346 * Unlike task_move routines, we access mc.to, mc.from not under 1347 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1348 */ 1349 spin_lock(&mc.lock); 1350 from = mc.from; 1351 to = mc.to; 1352 if (!from) 1353 goto unlock; 1354 1355 ret = mem_cgroup_is_descendant(from, memcg) || 1356 mem_cgroup_is_descendant(to, memcg); 1357 unlock: 1358 spin_unlock(&mc.lock); 1359 return ret; 1360 } 1361 1362 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1363 { 1364 if (mc.moving_task && current != mc.moving_task) { 1365 if (mem_cgroup_under_move(memcg)) { 1366 DEFINE_WAIT(wait); 1367 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1368 /* moving charge context might have finished. */ 1369 if (mc.moving_task) 1370 schedule(); 1371 finish_wait(&mc.waitq, &wait); 1372 return true; 1373 } 1374 } 1375 return false; 1376 } 1377 1378 struct memory_stat { 1379 const char *name; 1380 unsigned int idx; 1381 }; 1382 1383 static const struct memory_stat memory_stats[] = { 1384 { "anon", NR_ANON_MAPPED }, 1385 { "file", NR_FILE_PAGES }, 1386 { "kernel_stack", NR_KERNEL_STACK_KB }, 1387 { "pagetables", NR_PAGETABLE }, 1388 { "percpu", MEMCG_PERCPU_B }, 1389 { "sock", MEMCG_SOCK }, 1390 { "shmem", NR_SHMEM }, 1391 { "file_mapped", NR_FILE_MAPPED }, 1392 { "file_dirty", NR_FILE_DIRTY }, 1393 { "file_writeback", NR_WRITEBACK }, 1394 #ifdef CONFIG_SWAP 1395 { "swapcached", NR_SWAPCACHE }, 1396 #endif 1397 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1398 { "anon_thp", NR_ANON_THPS }, 1399 { "file_thp", NR_FILE_THPS }, 1400 { "shmem_thp", NR_SHMEM_THPS }, 1401 #endif 1402 { "inactive_anon", NR_INACTIVE_ANON }, 1403 { "active_anon", NR_ACTIVE_ANON }, 1404 { "inactive_file", NR_INACTIVE_FILE }, 1405 { "active_file", NR_ACTIVE_FILE }, 1406 { "unevictable", NR_UNEVICTABLE }, 1407 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, 1408 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, 1409 1410 /* The memory events */ 1411 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, 1412 { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, 1413 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, 1414 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, 1415 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, 1416 { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, 1417 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, 1418 }; 1419 1420 /* Translate stat items to the correct unit for memory.stat output */ 1421 static int memcg_page_state_unit(int item) 1422 { 1423 switch (item) { 1424 case MEMCG_PERCPU_B: 1425 case NR_SLAB_RECLAIMABLE_B: 1426 case NR_SLAB_UNRECLAIMABLE_B: 1427 case WORKINGSET_REFAULT_ANON: 1428 case WORKINGSET_REFAULT_FILE: 1429 case WORKINGSET_ACTIVATE_ANON: 1430 case WORKINGSET_ACTIVATE_FILE: 1431 case WORKINGSET_RESTORE_ANON: 1432 case WORKINGSET_RESTORE_FILE: 1433 case WORKINGSET_NODERECLAIM: 1434 return 1; 1435 case NR_KERNEL_STACK_KB: 1436 return SZ_1K; 1437 default: 1438 return PAGE_SIZE; 1439 } 1440 } 1441 1442 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, 1443 int item) 1444 { 1445 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); 1446 } 1447 1448 static char *memory_stat_format(struct mem_cgroup *memcg) 1449 { 1450 struct seq_buf s; 1451 int i; 1452 1453 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1454 if (!s.buffer) 1455 return NULL; 1456 1457 /* 1458 * Provide statistics on the state of the memory subsystem as 1459 * well as cumulative event counters that show past behavior. 1460 * 1461 * This list is ordered following a combination of these gradients: 1462 * 1) generic big picture -> specifics and details 1463 * 2) reflecting userspace activity -> reflecting kernel heuristics 1464 * 1465 * Current memory state: 1466 */ 1467 mem_cgroup_flush_stats(); 1468 1469 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1470 u64 size; 1471 1472 size = memcg_page_state_output(memcg, memory_stats[i].idx); 1473 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1474 1475 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1476 size += memcg_page_state_output(memcg, 1477 NR_SLAB_RECLAIMABLE_B); 1478 seq_buf_printf(&s, "slab %llu\n", size); 1479 } 1480 } 1481 1482 /* Accumulated memory events */ 1483 1484 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1485 memcg_events(memcg, PGFAULT)); 1486 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1487 memcg_events(memcg, PGMAJFAULT)); 1488 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1489 memcg_events(memcg, PGREFILL)); 1490 seq_buf_printf(&s, "pgscan %lu\n", 1491 memcg_events(memcg, PGSCAN_KSWAPD) + 1492 memcg_events(memcg, PGSCAN_DIRECT)); 1493 seq_buf_printf(&s, "pgsteal %lu\n", 1494 memcg_events(memcg, PGSTEAL_KSWAPD) + 1495 memcg_events(memcg, PGSTEAL_DIRECT)); 1496 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1497 memcg_events(memcg, PGACTIVATE)); 1498 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1499 memcg_events(memcg, PGDEACTIVATE)); 1500 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1501 memcg_events(memcg, PGLAZYFREE)); 1502 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1503 memcg_events(memcg, PGLAZYFREED)); 1504 1505 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1506 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1507 memcg_events(memcg, THP_FAULT_ALLOC)); 1508 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1509 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1510 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1511 1512 /* The above should easily fit into one page */ 1513 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1514 1515 return s.buffer; 1516 } 1517 1518 #define K(x) ((x) << (PAGE_SHIFT-10)) 1519 /** 1520 * mem_cgroup_print_oom_context: Print OOM information relevant to 1521 * memory controller. 1522 * @memcg: The memory cgroup that went over limit 1523 * @p: Task that is going to be killed 1524 * 1525 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1526 * enabled 1527 */ 1528 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1529 { 1530 rcu_read_lock(); 1531 1532 if (memcg) { 1533 pr_cont(",oom_memcg="); 1534 pr_cont_cgroup_path(memcg->css.cgroup); 1535 } else 1536 pr_cont(",global_oom"); 1537 if (p) { 1538 pr_cont(",task_memcg="); 1539 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1540 } 1541 rcu_read_unlock(); 1542 } 1543 1544 /** 1545 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1546 * memory controller. 1547 * @memcg: The memory cgroup that went over limit 1548 */ 1549 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1550 { 1551 char *buf; 1552 1553 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1554 K((u64)page_counter_read(&memcg->memory)), 1555 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1556 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1557 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1558 K((u64)page_counter_read(&memcg->swap)), 1559 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1560 else { 1561 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1562 K((u64)page_counter_read(&memcg->memsw)), 1563 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1564 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1565 K((u64)page_counter_read(&memcg->kmem)), 1566 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1567 } 1568 1569 pr_info("Memory cgroup stats for "); 1570 pr_cont_cgroup_path(memcg->css.cgroup); 1571 pr_cont(":"); 1572 buf = memory_stat_format(memcg); 1573 if (!buf) 1574 return; 1575 pr_info("%s", buf); 1576 kfree(buf); 1577 } 1578 1579 /* 1580 * Return the memory (and swap, if configured) limit for a memcg. 1581 */ 1582 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1583 { 1584 unsigned long max = READ_ONCE(memcg->memory.max); 1585 1586 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1587 if (mem_cgroup_swappiness(memcg)) 1588 max += min(READ_ONCE(memcg->swap.max), 1589 (unsigned long)total_swap_pages); 1590 } else { /* v1 */ 1591 if (mem_cgroup_swappiness(memcg)) { 1592 /* Calculate swap excess capacity from memsw limit */ 1593 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1594 1595 max += min(swap, (unsigned long)total_swap_pages); 1596 } 1597 } 1598 return max; 1599 } 1600 1601 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1602 { 1603 return page_counter_read(&memcg->memory); 1604 } 1605 1606 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1607 int order) 1608 { 1609 struct oom_control oc = { 1610 .zonelist = NULL, 1611 .nodemask = NULL, 1612 .memcg = memcg, 1613 .gfp_mask = gfp_mask, 1614 .order = order, 1615 }; 1616 bool ret = true; 1617 1618 if (mutex_lock_killable(&oom_lock)) 1619 return true; 1620 1621 if (mem_cgroup_margin(memcg) >= (1 << order)) 1622 goto unlock; 1623 1624 /* 1625 * A few threads which were not waiting at mutex_lock_killable() can 1626 * fail to bail out. Therefore, check again after holding oom_lock. 1627 */ 1628 ret = task_is_dying() || out_of_memory(&oc); 1629 1630 unlock: 1631 mutex_unlock(&oom_lock); 1632 return ret; 1633 } 1634 1635 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1636 pg_data_t *pgdat, 1637 gfp_t gfp_mask, 1638 unsigned long *total_scanned) 1639 { 1640 struct mem_cgroup *victim = NULL; 1641 int total = 0; 1642 int loop = 0; 1643 unsigned long excess; 1644 unsigned long nr_scanned; 1645 struct mem_cgroup_reclaim_cookie reclaim = { 1646 .pgdat = pgdat, 1647 }; 1648 1649 excess = soft_limit_excess(root_memcg); 1650 1651 while (1) { 1652 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1653 if (!victim) { 1654 loop++; 1655 if (loop >= 2) { 1656 /* 1657 * If we have not been able to reclaim 1658 * anything, it might because there are 1659 * no reclaimable pages under this hierarchy 1660 */ 1661 if (!total) 1662 break; 1663 /* 1664 * We want to do more targeted reclaim. 1665 * excess >> 2 is not to excessive so as to 1666 * reclaim too much, nor too less that we keep 1667 * coming back to reclaim from this cgroup 1668 */ 1669 if (total >= (excess >> 2) || 1670 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1671 break; 1672 } 1673 continue; 1674 } 1675 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1676 pgdat, &nr_scanned); 1677 *total_scanned += nr_scanned; 1678 if (!soft_limit_excess(root_memcg)) 1679 break; 1680 } 1681 mem_cgroup_iter_break(root_memcg, victim); 1682 return total; 1683 } 1684 1685 #ifdef CONFIG_LOCKDEP 1686 static struct lockdep_map memcg_oom_lock_dep_map = { 1687 .name = "memcg_oom_lock", 1688 }; 1689 #endif 1690 1691 static DEFINE_SPINLOCK(memcg_oom_lock); 1692 1693 /* 1694 * Check OOM-Killer is already running under our hierarchy. 1695 * If someone is running, return false. 1696 */ 1697 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1698 { 1699 struct mem_cgroup *iter, *failed = NULL; 1700 1701 spin_lock(&memcg_oom_lock); 1702 1703 for_each_mem_cgroup_tree(iter, memcg) { 1704 if (iter->oom_lock) { 1705 /* 1706 * this subtree of our hierarchy is already locked 1707 * so we cannot give a lock. 1708 */ 1709 failed = iter; 1710 mem_cgroup_iter_break(memcg, iter); 1711 break; 1712 } else 1713 iter->oom_lock = true; 1714 } 1715 1716 if (failed) { 1717 /* 1718 * OK, we failed to lock the whole subtree so we have 1719 * to clean up what we set up to the failing subtree 1720 */ 1721 for_each_mem_cgroup_tree(iter, memcg) { 1722 if (iter == failed) { 1723 mem_cgroup_iter_break(memcg, iter); 1724 break; 1725 } 1726 iter->oom_lock = false; 1727 } 1728 } else 1729 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1730 1731 spin_unlock(&memcg_oom_lock); 1732 1733 return !failed; 1734 } 1735 1736 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1737 { 1738 struct mem_cgroup *iter; 1739 1740 spin_lock(&memcg_oom_lock); 1741 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1742 for_each_mem_cgroup_tree(iter, memcg) 1743 iter->oom_lock = false; 1744 spin_unlock(&memcg_oom_lock); 1745 } 1746 1747 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1748 { 1749 struct mem_cgroup *iter; 1750 1751 spin_lock(&memcg_oom_lock); 1752 for_each_mem_cgroup_tree(iter, memcg) 1753 iter->under_oom++; 1754 spin_unlock(&memcg_oom_lock); 1755 } 1756 1757 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1758 { 1759 struct mem_cgroup *iter; 1760 1761 /* 1762 * Be careful about under_oom underflows because a child memcg 1763 * could have been added after mem_cgroup_mark_under_oom. 1764 */ 1765 spin_lock(&memcg_oom_lock); 1766 for_each_mem_cgroup_tree(iter, memcg) 1767 if (iter->under_oom > 0) 1768 iter->under_oom--; 1769 spin_unlock(&memcg_oom_lock); 1770 } 1771 1772 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1773 1774 struct oom_wait_info { 1775 struct mem_cgroup *memcg; 1776 wait_queue_entry_t wait; 1777 }; 1778 1779 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1780 unsigned mode, int sync, void *arg) 1781 { 1782 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1783 struct mem_cgroup *oom_wait_memcg; 1784 struct oom_wait_info *oom_wait_info; 1785 1786 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1787 oom_wait_memcg = oom_wait_info->memcg; 1788 1789 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1790 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1791 return 0; 1792 return autoremove_wake_function(wait, mode, sync, arg); 1793 } 1794 1795 static void memcg_oom_recover(struct mem_cgroup *memcg) 1796 { 1797 /* 1798 * For the following lockless ->under_oom test, the only required 1799 * guarantee is that it must see the state asserted by an OOM when 1800 * this function is called as a result of userland actions 1801 * triggered by the notification of the OOM. This is trivially 1802 * achieved by invoking mem_cgroup_mark_under_oom() before 1803 * triggering notification. 1804 */ 1805 if (memcg && memcg->under_oom) 1806 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1807 } 1808 1809 enum oom_status { 1810 OOM_SUCCESS, 1811 OOM_FAILED, 1812 OOM_ASYNC, 1813 OOM_SKIPPED 1814 }; 1815 1816 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1817 { 1818 enum oom_status ret; 1819 bool locked; 1820 1821 if (order > PAGE_ALLOC_COSTLY_ORDER) 1822 return OOM_SKIPPED; 1823 1824 memcg_memory_event(memcg, MEMCG_OOM); 1825 1826 /* 1827 * We are in the middle of the charge context here, so we 1828 * don't want to block when potentially sitting on a callstack 1829 * that holds all kinds of filesystem and mm locks. 1830 * 1831 * cgroup1 allows disabling the OOM killer and waiting for outside 1832 * handling until the charge can succeed; remember the context and put 1833 * the task to sleep at the end of the page fault when all locks are 1834 * released. 1835 * 1836 * On the other hand, in-kernel OOM killer allows for an async victim 1837 * memory reclaim (oom_reaper) and that means that we are not solely 1838 * relying on the oom victim to make a forward progress and we can 1839 * invoke the oom killer here. 1840 * 1841 * Please note that mem_cgroup_out_of_memory might fail to find a 1842 * victim and then we have to bail out from the charge path. 1843 */ 1844 if (memcg->oom_kill_disable) { 1845 if (!current->in_user_fault) 1846 return OOM_SKIPPED; 1847 css_get(&memcg->css); 1848 current->memcg_in_oom = memcg; 1849 current->memcg_oom_gfp_mask = mask; 1850 current->memcg_oom_order = order; 1851 1852 return OOM_ASYNC; 1853 } 1854 1855 mem_cgroup_mark_under_oom(memcg); 1856 1857 locked = mem_cgroup_oom_trylock(memcg); 1858 1859 if (locked) 1860 mem_cgroup_oom_notify(memcg); 1861 1862 mem_cgroup_unmark_under_oom(memcg); 1863 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1864 ret = OOM_SUCCESS; 1865 else 1866 ret = OOM_FAILED; 1867 1868 if (locked) 1869 mem_cgroup_oom_unlock(memcg); 1870 1871 return ret; 1872 } 1873 1874 /** 1875 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1876 * @handle: actually kill/wait or just clean up the OOM state 1877 * 1878 * This has to be called at the end of a page fault if the memcg OOM 1879 * handler was enabled. 1880 * 1881 * Memcg supports userspace OOM handling where failed allocations must 1882 * sleep on a waitqueue until the userspace task resolves the 1883 * situation. Sleeping directly in the charge context with all kinds 1884 * of locks held is not a good idea, instead we remember an OOM state 1885 * in the task and mem_cgroup_oom_synchronize() has to be called at 1886 * the end of the page fault to complete the OOM handling. 1887 * 1888 * Returns %true if an ongoing memcg OOM situation was detected and 1889 * completed, %false otherwise. 1890 */ 1891 bool mem_cgroup_oom_synchronize(bool handle) 1892 { 1893 struct mem_cgroup *memcg = current->memcg_in_oom; 1894 struct oom_wait_info owait; 1895 bool locked; 1896 1897 /* OOM is global, do not handle */ 1898 if (!memcg) 1899 return false; 1900 1901 if (!handle) 1902 goto cleanup; 1903 1904 owait.memcg = memcg; 1905 owait.wait.flags = 0; 1906 owait.wait.func = memcg_oom_wake_function; 1907 owait.wait.private = current; 1908 INIT_LIST_HEAD(&owait.wait.entry); 1909 1910 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1911 mem_cgroup_mark_under_oom(memcg); 1912 1913 locked = mem_cgroup_oom_trylock(memcg); 1914 1915 if (locked) 1916 mem_cgroup_oom_notify(memcg); 1917 1918 if (locked && !memcg->oom_kill_disable) { 1919 mem_cgroup_unmark_under_oom(memcg); 1920 finish_wait(&memcg_oom_waitq, &owait.wait); 1921 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1922 current->memcg_oom_order); 1923 } else { 1924 schedule(); 1925 mem_cgroup_unmark_under_oom(memcg); 1926 finish_wait(&memcg_oom_waitq, &owait.wait); 1927 } 1928 1929 if (locked) { 1930 mem_cgroup_oom_unlock(memcg); 1931 /* 1932 * There is no guarantee that an OOM-lock contender 1933 * sees the wakeups triggered by the OOM kill 1934 * uncharges. Wake any sleepers explicitly. 1935 */ 1936 memcg_oom_recover(memcg); 1937 } 1938 cleanup: 1939 current->memcg_in_oom = NULL; 1940 css_put(&memcg->css); 1941 return true; 1942 } 1943 1944 /** 1945 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 1946 * @victim: task to be killed by the OOM killer 1947 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 1948 * 1949 * Returns a pointer to a memory cgroup, which has to be cleaned up 1950 * by killing all belonging OOM-killable tasks. 1951 * 1952 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 1953 */ 1954 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 1955 struct mem_cgroup *oom_domain) 1956 { 1957 struct mem_cgroup *oom_group = NULL; 1958 struct mem_cgroup *memcg; 1959 1960 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1961 return NULL; 1962 1963 if (!oom_domain) 1964 oom_domain = root_mem_cgroup; 1965 1966 rcu_read_lock(); 1967 1968 memcg = mem_cgroup_from_task(victim); 1969 if (memcg == root_mem_cgroup) 1970 goto out; 1971 1972 /* 1973 * If the victim task has been asynchronously moved to a different 1974 * memory cgroup, we might end up killing tasks outside oom_domain. 1975 * In this case it's better to ignore memory.group.oom. 1976 */ 1977 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 1978 goto out; 1979 1980 /* 1981 * Traverse the memory cgroup hierarchy from the victim task's 1982 * cgroup up to the OOMing cgroup (or root) to find the 1983 * highest-level memory cgroup with oom.group set. 1984 */ 1985 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1986 if (memcg->oom_group) 1987 oom_group = memcg; 1988 1989 if (memcg == oom_domain) 1990 break; 1991 } 1992 1993 if (oom_group) 1994 css_get(&oom_group->css); 1995 out: 1996 rcu_read_unlock(); 1997 1998 return oom_group; 1999 } 2000 2001 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2002 { 2003 pr_info("Tasks in "); 2004 pr_cont_cgroup_path(memcg->css.cgroup); 2005 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2006 } 2007 2008 /** 2009 * folio_memcg_lock - Bind a folio to its memcg. 2010 * @folio: The folio. 2011 * 2012 * This function prevents unlocked LRU folios from being moved to 2013 * another cgroup. 2014 * 2015 * It ensures lifetime of the bound memcg. The caller is responsible 2016 * for the lifetime of the folio. 2017 */ 2018 void folio_memcg_lock(struct folio *folio) 2019 { 2020 struct mem_cgroup *memcg; 2021 unsigned long flags; 2022 2023 /* 2024 * The RCU lock is held throughout the transaction. The fast 2025 * path can get away without acquiring the memcg->move_lock 2026 * because page moving starts with an RCU grace period. 2027 */ 2028 rcu_read_lock(); 2029 2030 if (mem_cgroup_disabled()) 2031 return; 2032 again: 2033 memcg = folio_memcg(folio); 2034 if (unlikely(!memcg)) 2035 return; 2036 2037 #ifdef CONFIG_PROVE_LOCKING 2038 local_irq_save(flags); 2039 might_lock(&memcg->move_lock); 2040 local_irq_restore(flags); 2041 #endif 2042 2043 if (atomic_read(&memcg->moving_account) <= 0) 2044 return; 2045 2046 spin_lock_irqsave(&memcg->move_lock, flags); 2047 if (memcg != folio_memcg(folio)) { 2048 spin_unlock_irqrestore(&memcg->move_lock, flags); 2049 goto again; 2050 } 2051 2052 /* 2053 * When charge migration first begins, we can have multiple 2054 * critical sections holding the fast-path RCU lock and one 2055 * holding the slowpath move_lock. Track the task who has the 2056 * move_lock for unlock_page_memcg(). 2057 */ 2058 memcg->move_lock_task = current; 2059 memcg->move_lock_flags = flags; 2060 } 2061 EXPORT_SYMBOL(folio_memcg_lock); 2062 2063 void lock_page_memcg(struct page *page) 2064 { 2065 folio_memcg_lock(page_folio(page)); 2066 } 2067 EXPORT_SYMBOL(lock_page_memcg); 2068 2069 static void __folio_memcg_unlock(struct mem_cgroup *memcg) 2070 { 2071 if (memcg && memcg->move_lock_task == current) { 2072 unsigned long flags = memcg->move_lock_flags; 2073 2074 memcg->move_lock_task = NULL; 2075 memcg->move_lock_flags = 0; 2076 2077 spin_unlock_irqrestore(&memcg->move_lock, flags); 2078 } 2079 2080 rcu_read_unlock(); 2081 } 2082 2083 /** 2084 * folio_memcg_unlock - Release the binding between a folio and its memcg. 2085 * @folio: The folio. 2086 * 2087 * This releases the binding created by folio_memcg_lock(). This does 2088 * not change the accounting of this folio to its memcg, but it does 2089 * permit others to change it. 2090 */ 2091 void folio_memcg_unlock(struct folio *folio) 2092 { 2093 __folio_memcg_unlock(folio_memcg(folio)); 2094 } 2095 EXPORT_SYMBOL(folio_memcg_unlock); 2096 2097 void unlock_page_memcg(struct page *page) 2098 { 2099 folio_memcg_unlock(page_folio(page)); 2100 } 2101 EXPORT_SYMBOL(unlock_page_memcg); 2102 2103 struct obj_stock { 2104 #ifdef CONFIG_MEMCG_KMEM 2105 struct obj_cgroup *cached_objcg; 2106 struct pglist_data *cached_pgdat; 2107 unsigned int nr_bytes; 2108 int nr_slab_reclaimable_b; 2109 int nr_slab_unreclaimable_b; 2110 #else 2111 int dummy[0]; 2112 #endif 2113 }; 2114 2115 struct memcg_stock_pcp { 2116 struct mem_cgroup *cached; /* this never be root cgroup */ 2117 unsigned int nr_pages; 2118 struct obj_stock task_obj; 2119 struct obj_stock irq_obj; 2120 2121 struct work_struct work; 2122 unsigned long flags; 2123 #define FLUSHING_CACHED_CHARGE 0 2124 }; 2125 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2126 static DEFINE_MUTEX(percpu_charge_mutex); 2127 2128 #ifdef CONFIG_MEMCG_KMEM 2129 static void drain_obj_stock(struct obj_stock *stock); 2130 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2131 struct mem_cgroup *root_memcg); 2132 2133 #else 2134 static inline void drain_obj_stock(struct obj_stock *stock) 2135 { 2136 } 2137 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2138 struct mem_cgroup *root_memcg) 2139 { 2140 return false; 2141 } 2142 #endif 2143 2144 /* 2145 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2146 * sequence used in this case to access content from object stock is slow. 2147 * To optimize for user context access, there are now two object stocks for 2148 * task context and interrupt context access respectively. 2149 * 2150 * The task context object stock can be accessed by disabling preemption only 2151 * which is cheap in non-preempt kernel. The interrupt context object stock 2152 * can only be accessed after disabling interrupt. User context code can 2153 * access interrupt object stock, but not vice versa. 2154 */ 2155 static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2156 { 2157 struct memcg_stock_pcp *stock; 2158 2159 if (likely(in_task())) { 2160 *pflags = 0UL; 2161 preempt_disable(); 2162 stock = this_cpu_ptr(&memcg_stock); 2163 return &stock->task_obj; 2164 } 2165 2166 local_irq_save(*pflags); 2167 stock = this_cpu_ptr(&memcg_stock); 2168 return &stock->irq_obj; 2169 } 2170 2171 static inline void put_obj_stock(unsigned long flags) 2172 { 2173 if (likely(in_task())) 2174 preempt_enable(); 2175 else 2176 local_irq_restore(flags); 2177 } 2178 2179 /** 2180 * consume_stock: Try to consume stocked charge on this cpu. 2181 * @memcg: memcg to consume from. 2182 * @nr_pages: how many pages to charge. 2183 * 2184 * The charges will only happen if @memcg matches the current cpu's memcg 2185 * stock, and at least @nr_pages are available in that stock. Failure to 2186 * service an allocation will refill the stock. 2187 * 2188 * returns true if successful, false otherwise. 2189 */ 2190 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2191 { 2192 struct memcg_stock_pcp *stock; 2193 unsigned long flags; 2194 bool ret = false; 2195 2196 if (nr_pages > MEMCG_CHARGE_BATCH) 2197 return ret; 2198 2199 local_irq_save(flags); 2200 2201 stock = this_cpu_ptr(&memcg_stock); 2202 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2203 stock->nr_pages -= nr_pages; 2204 ret = true; 2205 } 2206 2207 local_irq_restore(flags); 2208 2209 return ret; 2210 } 2211 2212 /* 2213 * Returns stocks cached in percpu and reset cached information. 2214 */ 2215 static void drain_stock(struct memcg_stock_pcp *stock) 2216 { 2217 struct mem_cgroup *old = stock->cached; 2218 2219 if (!old) 2220 return; 2221 2222 if (stock->nr_pages) { 2223 page_counter_uncharge(&old->memory, stock->nr_pages); 2224 if (do_memsw_account()) 2225 page_counter_uncharge(&old->memsw, stock->nr_pages); 2226 stock->nr_pages = 0; 2227 } 2228 2229 css_put(&old->css); 2230 stock->cached = NULL; 2231 } 2232 2233 static void drain_local_stock(struct work_struct *dummy) 2234 { 2235 struct memcg_stock_pcp *stock; 2236 unsigned long flags; 2237 2238 /* 2239 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. 2240 * drain_stock races is that we always operate on local CPU stock 2241 * here with IRQ disabled 2242 */ 2243 local_irq_save(flags); 2244 2245 stock = this_cpu_ptr(&memcg_stock); 2246 drain_obj_stock(&stock->irq_obj); 2247 if (in_task()) 2248 drain_obj_stock(&stock->task_obj); 2249 drain_stock(stock); 2250 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2251 2252 local_irq_restore(flags); 2253 } 2254 2255 /* 2256 * Cache charges(val) to local per_cpu area. 2257 * This will be consumed by consume_stock() function, later. 2258 */ 2259 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2260 { 2261 struct memcg_stock_pcp *stock; 2262 unsigned long flags; 2263 2264 local_irq_save(flags); 2265 2266 stock = this_cpu_ptr(&memcg_stock); 2267 if (stock->cached != memcg) { /* reset if necessary */ 2268 drain_stock(stock); 2269 css_get(&memcg->css); 2270 stock->cached = memcg; 2271 } 2272 stock->nr_pages += nr_pages; 2273 2274 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2275 drain_stock(stock); 2276 2277 local_irq_restore(flags); 2278 } 2279 2280 /* 2281 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2282 * of the hierarchy under it. 2283 */ 2284 static void drain_all_stock(struct mem_cgroup *root_memcg) 2285 { 2286 int cpu, curcpu; 2287 2288 /* If someone's already draining, avoid adding running more workers. */ 2289 if (!mutex_trylock(&percpu_charge_mutex)) 2290 return; 2291 /* 2292 * Notify other cpus that system-wide "drain" is running 2293 * We do not care about races with the cpu hotplug because cpu down 2294 * as well as workers from this path always operate on the local 2295 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2296 */ 2297 curcpu = get_cpu(); 2298 for_each_online_cpu(cpu) { 2299 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2300 struct mem_cgroup *memcg; 2301 bool flush = false; 2302 2303 rcu_read_lock(); 2304 memcg = stock->cached; 2305 if (memcg && stock->nr_pages && 2306 mem_cgroup_is_descendant(memcg, root_memcg)) 2307 flush = true; 2308 else if (obj_stock_flush_required(stock, root_memcg)) 2309 flush = true; 2310 rcu_read_unlock(); 2311 2312 if (flush && 2313 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2314 if (cpu == curcpu) 2315 drain_local_stock(&stock->work); 2316 else 2317 schedule_work_on(cpu, &stock->work); 2318 } 2319 } 2320 put_cpu(); 2321 mutex_unlock(&percpu_charge_mutex); 2322 } 2323 2324 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2325 { 2326 struct memcg_stock_pcp *stock; 2327 2328 stock = &per_cpu(memcg_stock, cpu); 2329 drain_stock(stock); 2330 2331 return 0; 2332 } 2333 2334 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2335 unsigned int nr_pages, 2336 gfp_t gfp_mask) 2337 { 2338 unsigned long nr_reclaimed = 0; 2339 2340 do { 2341 unsigned long pflags; 2342 2343 if (page_counter_read(&memcg->memory) <= 2344 READ_ONCE(memcg->memory.high)) 2345 continue; 2346 2347 memcg_memory_event(memcg, MEMCG_HIGH); 2348 2349 psi_memstall_enter(&pflags); 2350 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2351 gfp_mask, true); 2352 psi_memstall_leave(&pflags); 2353 } while ((memcg = parent_mem_cgroup(memcg)) && 2354 !mem_cgroup_is_root(memcg)); 2355 2356 return nr_reclaimed; 2357 } 2358 2359 static void high_work_func(struct work_struct *work) 2360 { 2361 struct mem_cgroup *memcg; 2362 2363 memcg = container_of(work, struct mem_cgroup, high_work); 2364 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2365 } 2366 2367 /* 2368 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2369 * enough to still cause a significant slowdown in most cases, while still 2370 * allowing diagnostics and tracing to proceed without becoming stuck. 2371 */ 2372 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2373 2374 /* 2375 * When calculating the delay, we use these either side of the exponentiation to 2376 * maintain precision and scale to a reasonable number of jiffies (see the table 2377 * below. 2378 * 2379 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2380 * overage ratio to a delay. 2381 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2382 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2383 * to produce a reasonable delay curve. 2384 * 2385 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2386 * reasonable delay curve compared to precision-adjusted overage, not 2387 * penalising heavily at first, but still making sure that growth beyond the 2388 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2389 * example, with a high of 100 megabytes: 2390 * 2391 * +-------+------------------------+ 2392 * | usage | time to allocate in ms | 2393 * +-------+------------------------+ 2394 * | 100M | 0 | 2395 * | 101M | 6 | 2396 * | 102M | 25 | 2397 * | 103M | 57 | 2398 * | 104M | 102 | 2399 * | 105M | 159 | 2400 * | 106M | 230 | 2401 * | 107M | 313 | 2402 * | 108M | 409 | 2403 * | 109M | 518 | 2404 * | 110M | 639 | 2405 * | 111M | 774 | 2406 * | 112M | 921 | 2407 * | 113M | 1081 | 2408 * | 114M | 1254 | 2409 * | 115M | 1439 | 2410 * | 116M | 1638 | 2411 * | 117M | 1849 | 2412 * | 118M | 2000 | 2413 * | 119M | 2000 | 2414 * | 120M | 2000 | 2415 * +-------+------------------------+ 2416 */ 2417 #define MEMCG_DELAY_PRECISION_SHIFT 20 2418 #define MEMCG_DELAY_SCALING_SHIFT 14 2419 2420 static u64 calculate_overage(unsigned long usage, unsigned long high) 2421 { 2422 u64 overage; 2423 2424 if (usage <= high) 2425 return 0; 2426 2427 /* 2428 * Prevent division by 0 in overage calculation by acting as if 2429 * it was a threshold of 1 page 2430 */ 2431 high = max(high, 1UL); 2432 2433 overage = usage - high; 2434 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2435 return div64_u64(overage, high); 2436 } 2437 2438 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2439 { 2440 u64 overage, max_overage = 0; 2441 2442 do { 2443 overage = calculate_overage(page_counter_read(&memcg->memory), 2444 READ_ONCE(memcg->memory.high)); 2445 max_overage = max(overage, max_overage); 2446 } while ((memcg = parent_mem_cgroup(memcg)) && 2447 !mem_cgroup_is_root(memcg)); 2448 2449 return max_overage; 2450 } 2451 2452 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2453 { 2454 u64 overage, max_overage = 0; 2455 2456 do { 2457 overage = calculate_overage(page_counter_read(&memcg->swap), 2458 READ_ONCE(memcg->swap.high)); 2459 if (overage) 2460 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2461 max_overage = max(overage, max_overage); 2462 } while ((memcg = parent_mem_cgroup(memcg)) && 2463 !mem_cgroup_is_root(memcg)); 2464 2465 return max_overage; 2466 } 2467 2468 /* 2469 * Get the number of jiffies that we should penalise a mischievous cgroup which 2470 * is exceeding its memory.high by checking both it and its ancestors. 2471 */ 2472 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2473 unsigned int nr_pages, 2474 u64 max_overage) 2475 { 2476 unsigned long penalty_jiffies; 2477 2478 if (!max_overage) 2479 return 0; 2480 2481 /* 2482 * We use overage compared to memory.high to calculate the number of 2483 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2484 * fairly lenient on small overages, and increasingly harsh when the 2485 * memcg in question makes it clear that it has no intention of stopping 2486 * its crazy behaviour, so we exponentially increase the delay based on 2487 * overage amount. 2488 */ 2489 penalty_jiffies = max_overage * max_overage * HZ; 2490 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2491 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2492 2493 /* 2494 * Factor in the task's own contribution to the overage, such that four 2495 * N-sized allocations are throttled approximately the same as one 2496 * 4N-sized allocation. 2497 * 2498 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2499 * larger the current charge patch is than that. 2500 */ 2501 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2502 } 2503 2504 /* 2505 * Scheduled by try_charge() to be executed from the userland return path 2506 * and reclaims memory over the high limit. 2507 */ 2508 void mem_cgroup_handle_over_high(void) 2509 { 2510 unsigned long penalty_jiffies; 2511 unsigned long pflags; 2512 unsigned long nr_reclaimed; 2513 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2514 int nr_retries = MAX_RECLAIM_RETRIES; 2515 struct mem_cgroup *memcg; 2516 bool in_retry = false; 2517 2518 if (likely(!nr_pages)) 2519 return; 2520 2521 memcg = get_mem_cgroup_from_mm(current->mm); 2522 current->memcg_nr_pages_over_high = 0; 2523 2524 retry_reclaim: 2525 /* 2526 * The allocating task should reclaim at least the batch size, but for 2527 * subsequent retries we only want to do what's necessary to prevent oom 2528 * or breaching resource isolation. 2529 * 2530 * This is distinct from memory.max or page allocator behaviour because 2531 * memory.high is currently batched, whereas memory.max and the page 2532 * allocator run every time an allocation is made. 2533 */ 2534 nr_reclaimed = reclaim_high(memcg, 2535 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2536 GFP_KERNEL); 2537 2538 /* 2539 * memory.high is breached and reclaim is unable to keep up. Throttle 2540 * allocators proactively to slow down excessive growth. 2541 */ 2542 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2543 mem_find_max_overage(memcg)); 2544 2545 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2546 swap_find_max_overage(memcg)); 2547 2548 /* 2549 * Clamp the max delay per usermode return so as to still keep the 2550 * application moving forwards and also permit diagnostics, albeit 2551 * extremely slowly. 2552 */ 2553 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2554 2555 /* 2556 * Don't sleep if the amount of jiffies this memcg owes us is so low 2557 * that it's not even worth doing, in an attempt to be nice to those who 2558 * go only a small amount over their memory.high value and maybe haven't 2559 * been aggressively reclaimed enough yet. 2560 */ 2561 if (penalty_jiffies <= HZ / 100) 2562 goto out; 2563 2564 /* 2565 * If reclaim is making forward progress but we're still over 2566 * memory.high, we want to encourage that rather than doing allocator 2567 * throttling. 2568 */ 2569 if (nr_reclaimed || nr_retries--) { 2570 in_retry = true; 2571 goto retry_reclaim; 2572 } 2573 2574 /* 2575 * If we exit early, we're guaranteed to die (since 2576 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2577 * need to account for any ill-begotten jiffies to pay them off later. 2578 */ 2579 psi_memstall_enter(&pflags); 2580 schedule_timeout_killable(penalty_jiffies); 2581 psi_memstall_leave(&pflags); 2582 2583 out: 2584 css_put(&memcg->css); 2585 } 2586 2587 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 2588 unsigned int nr_pages) 2589 { 2590 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2591 int nr_retries = MAX_RECLAIM_RETRIES; 2592 struct mem_cgroup *mem_over_limit; 2593 struct page_counter *counter; 2594 enum oom_status oom_status; 2595 unsigned long nr_reclaimed; 2596 bool passed_oom = false; 2597 bool may_swap = true; 2598 bool drained = false; 2599 unsigned long pflags; 2600 2601 retry: 2602 if (consume_stock(memcg, nr_pages)) 2603 return 0; 2604 2605 if (!do_memsw_account() || 2606 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2607 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2608 goto done_restock; 2609 if (do_memsw_account()) 2610 page_counter_uncharge(&memcg->memsw, batch); 2611 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2612 } else { 2613 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2614 may_swap = false; 2615 } 2616 2617 if (batch > nr_pages) { 2618 batch = nr_pages; 2619 goto retry; 2620 } 2621 2622 /* 2623 * Memcg doesn't have a dedicated reserve for atomic 2624 * allocations. But like the global atomic pool, we need to 2625 * put the burden of reclaim on regular allocation requests 2626 * and let these go through as privileged allocations. 2627 */ 2628 if (gfp_mask & __GFP_ATOMIC) 2629 goto force; 2630 2631 /* 2632 * Prevent unbounded recursion when reclaim operations need to 2633 * allocate memory. This might exceed the limits temporarily, 2634 * but we prefer facilitating memory reclaim and getting back 2635 * under the limit over triggering OOM kills in these cases. 2636 */ 2637 if (unlikely(current->flags & PF_MEMALLOC)) 2638 goto force; 2639 2640 if (unlikely(task_in_memcg_oom(current))) 2641 goto nomem; 2642 2643 if (!gfpflags_allow_blocking(gfp_mask)) 2644 goto nomem; 2645 2646 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2647 2648 psi_memstall_enter(&pflags); 2649 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2650 gfp_mask, may_swap); 2651 psi_memstall_leave(&pflags); 2652 2653 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2654 goto retry; 2655 2656 if (!drained) { 2657 drain_all_stock(mem_over_limit); 2658 drained = true; 2659 goto retry; 2660 } 2661 2662 if (gfp_mask & __GFP_NORETRY) 2663 goto nomem; 2664 /* 2665 * Even though the limit is exceeded at this point, reclaim 2666 * may have been able to free some pages. Retry the charge 2667 * before killing the task. 2668 * 2669 * Only for regular pages, though: huge pages are rather 2670 * unlikely to succeed so close to the limit, and we fall back 2671 * to regular pages anyway in case of failure. 2672 */ 2673 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2674 goto retry; 2675 /* 2676 * At task move, charge accounts can be doubly counted. So, it's 2677 * better to wait until the end of task_move if something is going on. 2678 */ 2679 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2680 goto retry; 2681 2682 if (nr_retries--) 2683 goto retry; 2684 2685 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2686 goto nomem; 2687 2688 /* Avoid endless loop for tasks bypassed by the oom killer */ 2689 if (passed_oom && task_is_dying()) 2690 goto nomem; 2691 2692 /* 2693 * keep retrying as long as the memcg oom killer is able to make 2694 * a forward progress or bypass the charge if the oom killer 2695 * couldn't make any progress. 2696 */ 2697 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2698 get_order(nr_pages * PAGE_SIZE)); 2699 if (oom_status == OOM_SUCCESS) { 2700 passed_oom = true; 2701 nr_retries = MAX_RECLAIM_RETRIES; 2702 goto retry; 2703 } 2704 nomem: 2705 if (!(gfp_mask & __GFP_NOFAIL)) 2706 return -ENOMEM; 2707 force: 2708 /* 2709 * The allocation either can't fail or will lead to more memory 2710 * being freed very soon. Allow memory usage go over the limit 2711 * temporarily by force charging it. 2712 */ 2713 page_counter_charge(&memcg->memory, nr_pages); 2714 if (do_memsw_account()) 2715 page_counter_charge(&memcg->memsw, nr_pages); 2716 2717 return 0; 2718 2719 done_restock: 2720 if (batch > nr_pages) 2721 refill_stock(memcg, batch - nr_pages); 2722 2723 /* 2724 * If the hierarchy is above the normal consumption range, schedule 2725 * reclaim on returning to userland. We can perform reclaim here 2726 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2727 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2728 * not recorded as it most likely matches current's and won't 2729 * change in the meantime. As high limit is checked again before 2730 * reclaim, the cost of mismatch is negligible. 2731 */ 2732 do { 2733 bool mem_high, swap_high; 2734 2735 mem_high = page_counter_read(&memcg->memory) > 2736 READ_ONCE(memcg->memory.high); 2737 swap_high = page_counter_read(&memcg->swap) > 2738 READ_ONCE(memcg->swap.high); 2739 2740 /* Don't bother a random interrupted task */ 2741 if (in_interrupt()) { 2742 if (mem_high) { 2743 schedule_work(&memcg->high_work); 2744 break; 2745 } 2746 continue; 2747 } 2748 2749 if (mem_high || swap_high) { 2750 /* 2751 * The allocating tasks in this cgroup will need to do 2752 * reclaim or be throttled to prevent further growth 2753 * of the memory or swap footprints. 2754 * 2755 * Target some best-effort fairness between the tasks, 2756 * and distribute reclaim work and delay penalties 2757 * based on how much each task is actually allocating. 2758 */ 2759 current->memcg_nr_pages_over_high += batch; 2760 set_notify_resume(current); 2761 break; 2762 } 2763 } while ((memcg = parent_mem_cgroup(memcg))); 2764 2765 return 0; 2766 } 2767 2768 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2769 unsigned int nr_pages) 2770 { 2771 if (mem_cgroup_is_root(memcg)) 2772 return 0; 2773 2774 return try_charge_memcg(memcg, gfp_mask, nr_pages); 2775 } 2776 2777 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2778 { 2779 if (mem_cgroup_is_root(memcg)) 2780 return; 2781 2782 page_counter_uncharge(&memcg->memory, nr_pages); 2783 if (do_memsw_account()) 2784 page_counter_uncharge(&memcg->memsw, nr_pages); 2785 } 2786 2787 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) 2788 { 2789 VM_BUG_ON_FOLIO(folio_memcg(folio), folio); 2790 /* 2791 * Any of the following ensures page's memcg stability: 2792 * 2793 * - the page lock 2794 * - LRU isolation 2795 * - lock_page_memcg() 2796 * - exclusive reference 2797 */ 2798 folio->memcg_data = (unsigned long)memcg; 2799 } 2800 2801 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 2802 { 2803 struct mem_cgroup *memcg; 2804 2805 rcu_read_lock(); 2806 retry: 2807 memcg = obj_cgroup_memcg(objcg); 2808 if (unlikely(!css_tryget(&memcg->css))) 2809 goto retry; 2810 rcu_read_unlock(); 2811 2812 return memcg; 2813 } 2814 2815 #ifdef CONFIG_MEMCG_KMEM 2816 /* 2817 * The allocated objcg pointers array is not accounted directly. 2818 * Moreover, it should not come from DMA buffer and is not readily 2819 * reclaimable. So those GFP bits should be masked off. 2820 */ 2821 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2822 2823 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2824 gfp_t gfp, bool new_page) 2825 { 2826 unsigned int objects = objs_per_slab_page(s, page); 2827 unsigned long memcg_data; 2828 void *vec; 2829 2830 gfp &= ~OBJCGS_CLEAR_MASK; 2831 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2832 page_to_nid(page)); 2833 if (!vec) 2834 return -ENOMEM; 2835 2836 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; 2837 if (new_page) { 2838 /* 2839 * If the slab page is brand new and nobody can yet access 2840 * it's memcg_data, no synchronization is required and 2841 * memcg_data can be simply assigned. 2842 */ 2843 page->memcg_data = memcg_data; 2844 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { 2845 /* 2846 * If the slab page is already in use, somebody can allocate 2847 * and assign obj_cgroups in parallel. In this case the existing 2848 * objcg vector should be reused. 2849 */ 2850 kfree(vec); 2851 return 0; 2852 } 2853 2854 kmemleak_not_leak(vec); 2855 return 0; 2856 } 2857 2858 /* 2859 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2860 * 2861 * A passed kernel object can be a slab object or a generic kernel page, so 2862 * different mechanisms for getting the memory cgroup pointer should be used. 2863 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 2864 * can not know for sure how the kernel object is implemented. 2865 * mem_cgroup_from_obj() can be safely used in such cases. 2866 * 2867 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2868 * cgroup_mutex, etc. 2869 */ 2870 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2871 { 2872 struct page *page; 2873 2874 if (mem_cgroup_disabled()) 2875 return NULL; 2876 2877 page = virt_to_head_page(p); 2878 2879 /* 2880 * Slab objects are accounted individually, not per-page. 2881 * Memcg membership data for each individual object is saved in 2882 * the page->obj_cgroups. 2883 */ 2884 if (page_objcgs_check(page)) { 2885 struct obj_cgroup *objcg; 2886 unsigned int off; 2887 2888 off = obj_to_index(page->slab_cache, page, p); 2889 objcg = page_objcgs(page)[off]; 2890 if (objcg) 2891 return obj_cgroup_memcg(objcg); 2892 2893 return NULL; 2894 } 2895 2896 /* 2897 * page_memcg_check() is used here, because page_has_obj_cgroups() 2898 * check above could fail because the object cgroups vector wasn't set 2899 * at that moment, but it can be set concurrently. 2900 * page_memcg_check(page) will guarantee that a proper memory 2901 * cgroup pointer or NULL will be returned. 2902 */ 2903 return page_memcg_check(page); 2904 } 2905 2906 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2907 { 2908 struct obj_cgroup *objcg = NULL; 2909 struct mem_cgroup *memcg; 2910 2911 if (memcg_kmem_bypass()) 2912 return NULL; 2913 2914 rcu_read_lock(); 2915 if (unlikely(active_memcg())) 2916 memcg = active_memcg(); 2917 else 2918 memcg = mem_cgroup_from_task(current); 2919 2920 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2921 objcg = rcu_dereference(memcg->objcg); 2922 if (objcg && obj_cgroup_tryget(objcg)) 2923 break; 2924 objcg = NULL; 2925 } 2926 rcu_read_unlock(); 2927 2928 return objcg; 2929 } 2930 2931 static int memcg_alloc_cache_id(void) 2932 { 2933 int id, size; 2934 int err; 2935 2936 id = ida_simple_get(&memcg_cache_ida, 2937 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2938 if (id < 0) 2939 return id; 2940 2941 if (id < memcg_nr_cache_ids) 2942 return id; 2943 2944 /* 2945 * There's no space for the new id in memcg_caches arrays, 2946 * so we have to grow them. 2947 */ 2948 down_write(&memcg_cache_ids_sem); 2949 2950 size = 2 * (id + 1); 2951 if (size < MEMCG_CACHES_MIN_SIZE) 2952 size = MEMCG_CACHES_MIN_SIZE; 2953 else if (size > MEMCG_CACHES_MAX_SIZE) 2954 size = MEMCG_CACHES_MAX_SIZE; 2955 2956 err = memcg_update_all_list_lrus(size); 2957 if (!err) 2958 memcg_nr_cache_ids = size; 2959 2960 up_write(&memcg_cache_ids_sem); 2961 2962 if (err) { 2963 ida_simple_remove(&memcg_cache_ida, id); 2964 return err; 2965 } 2966 return id; 2967 } 2968 2969 static void memcg_free_cache_id(int id) 2970 { 2971 ida_simple_remove(&memcg_cache_ida, id); 2972 } 2973 2974 /* 2975 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg 2976 * @objcg: object cgroup to uncharge 2977 * @nr_pages: number of pages to uncharge 2978 */ 2979 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, 2980 unsigned int nr_pages) 2981 { 2982 struct mem_cgroup *memcg; 2983 2984 memcg = get_mem_cgroup_from_objcg(objcg); 2985 2986 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2987 page_counter_uncharge(&memcg->kmem, nr_pages); 2988 refill_stock(memcg, nr_pages); 2989 2990 css_put(&memcg->css); 2991 } 2992 2993 /* 2994 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg 2995 * @objcg: object cgroup to charge 2996 * @gfp: reclaim mode 2997 * @nr_pages: number of pages to charge 2998 * 2999 * Returns 0 on success, an error code on failure. 3000 */ 3001 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, 3002 unsigned int nr_pages) 3003 { 3004 struct mem_cgroup *memcg; 3005 int ret; 3006 3007 memcg = get_mem_cgroup_from_objcg(objcg); 3008 3009 ret = try_charge_memcg(memcg, gfp, nr_pages); 3010 if (ret) 3011 goto out; 3012 3013 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3014 page_counter_charge(&memcg->kmem, nr_pages); 3015 out: 3016 css_put(&memcg->css); 3017 3018 return ret; 3019 } 3020 3021 /** 3022 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3023 * @page: page to charge 3024 * @gfp: reclaim mode 3025 * @order: allocation order 3026 * 3027 * Returns 0 on success, an error code on failure. 3028 */ 3029 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3030 { 3031 struct obj_cgroup *objcg; 3032 int ret = 0; 3033 3034 objcg = get_obj_cgroup_from_current(); 3035 if (objcg) { 3036 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); 3037 if (!ret) { 3038 page->memcg_data = (unsigned long)objcg | 3039 MEMCG_DATA_KMEM; 3040 return 0; 3041 } 3042 obj_cgroup_put(objcg); 3043 } 3044 return ret; 3045 } 3046 3047 /** 3048 * __memcg_kmem_uncharge_page: uncharge a kmem page 3049 * @page: page to uncharge 3050 * @order: allocation order 3051 */ 3052 void __memcg_kmem_uncharge_page(struct page *page, int order) 3053 { 3054 struct folio *folio = page_folio(page); 3055 struct obj_cgroup *objcg; 3056 unsigned int nr_pages = 1 << order; 3057 3058 if (!folio_memcg_kmem(folio)) 3059 return; 3060 3061 objcg = __folio_objcg(folio); 3062 obj_cgroup_uncharge_pages(objcg, nr_pages); 3063 folio->memcg_data = 0; 3064 obj_cgroup_put(objcg); 3065 } 3066 3067 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, 3068 enum node_stat_item idx, int nr) 3069 { 3070 unsigned long flags; 3071 struct obj_stock *stock = get_obj_stock(&flags); 3072 int *bytes; 3073 3074 /* 3075 * Save vmstat data in stock and skip vmstat array update unless 3076 * accumulating over a page of vmstat data or when pgdat or idx 3077 * changes. 3078 */ 3079 if (stock->cached_objcg != objcg) { 3080 drain_obj_stock(stock); 3081 obj_cgroup_get(objcg); 3082 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3083 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3084 stock->cached_objcg = objcg; 3085 stock->cached_pgdat = pgdat; 3086 } else if (stock->cached_pgdat != pgdat) { 3087 /* Flush the existing cached vmstat data */ 3088 struct pglist_data *oldpg = stock->cached_pgdat; 3089 3090 if (stock->nr_slab_reclaimable_b) { 3091 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, 3092 stock->nr_slab_reclaimable_b); 3093 stock->nr_slab_reclaimable_b = 0; 3094 } 3095 if (stock->nr_slab_unreclaimable_b) { 3096 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, 3097 stock->nr_slab_unreclaimable_b); 3098 stock->nr_slab_unreclaimable_b = 0; 3099 } 3100 stock->cached_pgdat = pgdat; 3101 } 3102 3103 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b 3104 : &stock->nr_slab_unreclaimable_b; 3105 /* 3106 * Even for large object >= PAGE_SIZE, the vmstat data will still be 3107 * cached locally at least once before pushing it out. 3108 */ 3109 if (!*bytes) { 3110 *bytes = nr; 3111 nr = 0; 3112 } else { 3113 *bytes += nr; 3114 if (abs(*bytes) > PAGE_SIZE) { 3115 nr = *bytes; 3116 *bytes = 0; 3117 } else { 3118 nr = 0; 3119 } 3120 } 3121 if (nr) 3122 mod_objcg_mlstate(objcg, pgdat, idx, nr); 3123 3124 put_obj_stock(flags); 3125 } 3126 3127 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3128 { 3129 unsigned long flags; 3130 struct obj_stock *stock = get_obj_stock(&flags); 3131 bool ret = false; 3132 3133 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3134 stock->nr_bytes -= nr_bytes; 3135 ret = true; 3136 } 3137 3138 put_obj_stock(flags); 3139 3140 return ret; 3141 } 3142 3143 static void drain_obj_stock(struct obj_stock *stock) 3144 { 3145 struct obj_cgroup *old = stock->cached_objcg; 3146 3147 if (!old) 3148 return; 3149 3150 if (stock->nr_bytes) { 3151 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3152 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3153 3154 if (nr_pages) 3155 obj_cgroup_uncharge_pages(old, nr_pages); 3156 3157 /* 3158 * The leftover is flushed to the centralized per-memcg value. 3159 * On the next attempt to refill obj stock it will be moved 3160 * to a per-cpu stock (probably, on an other CPU), see 3161 * refill_obj_stock(). 3162 * 3163 * How often it's flushed is a trade-off between the memory 3164 * limit enforcement accuracy and potential CPU contention, 3165 * so it might be changed in the future. 3166 */ 3167 atomic_add(nr_bytes, &old->nr_charged_bytes); 3168 stock->nr_bytes = 0; 3169 } 3170 3171 /* 3172 * Flush the vmstat data in current stock 3173 */ 3174 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { 3175 if (stock->nr_slab_reclaimable_b) { 3176 mod_objcg_mlstate(old, stock->cached_pgdat, 3177 NR_SLAB_RECLAIMABLE_B, 3178 stock->nr_slab_reclaimable_b); 3179 stock->nr_slab_reclaimable_b = 0; 3180 } 3181 if (stock->nr_slab_unreclaimable_b) { 3182 mod_objcg_mlstate(old, stock->cached_pgdat, 3183 NR_SLAB_UNRECLAIMABLE_B, 3184 stock->nr_slab_unreclaimable_b); 3185 stock->nr_slab_unreclaimable_b = 0; 3186 } 3187 stock->cached_pgdat = NULL; 3188 } 3189 3190 obj_cgroup_put(old); 3191 stock->cached_objcg = NULL; 3192 } 3193 3194 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3195 struct mem_cgroup *root_memcg) 3196 { 3197 struct mem_cgroup *memcg; 3198 3199 if (in_task() && stock->task_obj.cached_objcg) { 3200 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); 3201 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3202 return true; 3203 } 3204 if (stock->irq_obj.cached_objcg) { 3205 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); 3206 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3207 return true; 3208 } 3209 3210 return false; 3211 } 3212 3213 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, 3214 bool allow_uncharge) 3215 { 3216 unsigned long flags; 3217 struct obj_stock *stock = get_obj_stock(&flags); 3218 unsigned int nr_pages = 0; 3219 3220 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3221 drain_obj_stock(stock); 3222 obj_cgroup_get(objcg); 3223 stock->cached_objcg = objcg; 3224 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) 3225 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; 3226 allow_uncharge = true; /* Allow uncharge when objcg changes */ 3227 } 3228 stock->nr_bytes += nr_bytes; 3229 3230 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { 3231 nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3232 stock->nr_bytes &= (PAGE_SIZE - 1); 3233 } 3234 3235 put_obj_stock(flags); 3236 3237 if (nr_pages) 3238 obj_cgroup_uncharge_pages(objcg, nr_pages); 3239 } 3240 3241 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3242 { 3243 unsigned int nr_pages, nr_bytes; 3244 int ret; 3245 3246 if (consume_obj_stock(objcg, size)) 3247 return 0; 3248 3249 /* 3250 * In theory, objcg->nr_charged_bytes can have enough 3251 * pre-charged bytes to satisfy the allocation. However, 3252 * flushing objcg->nr_charged_bytes requires two atomic 3253 * operations, and objcg->nr_charged_bytes can't be big. 3254 * The shared objcg->nr_charged_bytes can also become a 3255 * performance bottleneck if all tasks of the same memcg are 3256 * trying to update it. So it's better to ignore it and try 3257 * grab some new pages. The stock's nr_bytes will be flushed to 3258 * objcg->nr_charged_bytes later on when objcg changes. 3259 * 3260 * The stock's nr_bytes may contain enough pre-charged bytes 3261 * to allow one less page from being charged, but we can't rely 3262 * on the pre-charged bytes not being changed outside of 3263 * consume_obj_stock() or refill_obj_stock(). So ignore those 3264 * pre-charged bytes as well when charging pages. To avoid a 3265 * page uncharge right after a page charge, we set the 3266 * allow_uncharge flag to false when calling refill_obj_stock() 3267 * to temporarily allow the pre-charged bytes to exceed the page 3268 * size limit. The maximum reachable value of the pre-charged 3269 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data 3270 * race. 3271 */ 3272 nr_pages = size >> PAGE_SHIFT; 3273 nr_bytes = size & (PAGE_SIZE - 1); 3274 3275 if (nr_bytes) 3276 nr_pages += 1; 3277 3278 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); 3279 if (!ret && nr_bytes) 3280 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); 3281 3282 return ret; 3283 } 3284 3285 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3286 { 3287 refill_obj_stock(objcg, size, true); 3288 } 3289 3290 #endif /* CONFIG_MEMCG_KMEM */ 3291 3292 /* 3293 * Because page_memcg(head) is not set on tails, set it now. 3294 */ 3295 void split_page_memcg(struct page *head, unsigned int nr) 3296 { 3297 struct folio *folio = page_folio(head); 3298 struct mem_cgroup *memcg = folio_memcg(folio); 3299 int i; 3300 3301 if (mem_cgroup_disabled() || !memcg) 3302 return; 3303 3304 for (i = 1; i < nr; i++) 3305 folio_page(folio, i)->memcg_data = folio->memcg_data; 3306 3307 if (folio_memcg_kmem(folio)) 3308 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); 3309 else 3310 css_get_many(&memcg->css, nr - 1); 3311 } 3312 3313 #ifdef CONFIG_MEMCG_SWAP 3314 /** 3315 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3316 * @entry: swap entry to be moved 3317 * @from: mem_cgroup which the entry is moved from 3318 * @to: mem_cgroup which the entry is moved to 3319 * 3320 * It succeeds only when the swap_cgroup's record for this entry is the same 3321 * as the mem_cgroup's id of @from. 3322 * 3323 * Returns 0 on success, -EINVAL on failure. 3324 * 3325 * The caller must have charged to @to, IOW, called page_counter_charge() about 3326 * both res and memsw, and called css_get(). 3327 */ 3328 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3329 struct mem_cgroup *from, struct mem_cgroup *to) 3330 { 3331 unsigned short old_id, new_id; 3332 3333 old_id = mem_cgroup_id(from); 3334 new_id = mem_cgroup_id(to); 3335 3336 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3337 mod_memcg_state(from, MEMCG_SWAP, -1); 3338 mod_memcg_state(to, MEMCG_SWAP, 1); 3339 return 0; 3340 } 3341 return -EINVAL; 3342 } 3343 #else 3344 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3345 struct mem_cgroup *from, struct mem_cgroup *to) 3346 { 3347 return -EINVAL; 3348 } 3349 #endif 3350 3351 static DEFINE_MUTEX(memcg_max_mutex); 3352 3353 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3354 unsigned long max, bool memsw) 3355 { 3356 bool enlarge = false; 3357 bool drained = false; 3358 int ret; 3359 bool limits_invariant; 3360 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3361 3362 do { 3363 if (signal_pending(current)) { 3364 ret = -EINTR; 3365 break; 3366 } 3367 3368 mutex_lock(&memcg_max_mutex); 3369 /* 3370 * Make sure that the new limit (memsw or memory limit) doesn't 3371 * break our basic invariant rule memory.max <= memsw.max. 3372 */ 3373 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3374 max <= memcg->memsw.max; 3375 if (!limits_invariant) { 3376 mutex_unlock(&memcg_max_mutex); 3377 ret = -EINVAL; 3378 break; 3379 } 3380 if (max > counter->max) 3381 enlarge = true; 3382 ret = page_counter_set_max(counter, max); 3383 mutex_unlock(&memcg_max_mutex); 3384 3385 if (!ret) 3386 break; 3387 3388 if (!drained) { 3389 drain_all_stock(memcg); 3390 drained = true; 3391 continue; 3392 } 3393 3394 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3395 GFP_KERNEL, !memsw)) { 3396 ret = -EBUSY; 3397 break; 3398 } 3399 } while (true); 3400 3401 if (!ret && enlarge) 3402 memcg_oom_recover(memcg); 3403 3404 return ret; 3405 } 3406 3407 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3408 gfp_t gfp_mask, 3409 unsigned long *total_scanned) 3410 { 3411 unsigned long nr_reclaimed = 0; 3412 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3413 unsigned long reclaimed; 3414 int loop = 0; 3415 struct mem_cgroup_tree_per_node *mctz; 3416 unsigned long excess; 3417 unsigned long nr_scanned; 3418 3419 if (order > 0) 3420 return 0; 3421 3422 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; 3423 3424 /* 3425 * Do not even bother to check the largest node if the root 3426 * is empty. Do it lockless to prevent lock bouncing. Races 3427 * are acceptable as soft limit is best effort anyway. 3428 */ 3429 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3430 return 0; 3431 3432 /* 3433 * This loop can run a while, specially if mem_cgroup's continuously 3434 * keep exceeding their soft limit and putting the system under 3435 * pressure 3436 */ 3437 do { 3438 if (next_mz) 3439 mz = next_mz; 3440 else 3441 mz = mem_cgroup_largest_soft_limit_node(mctz); 3442 if (!mz) 3443 break; 3444 3445 nr_scanned = 0; 3446 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3447 gfp_mask, &nr_scanned); 3448 nr_reclaimed += reclaimed; 3449 *total_scanned += nr_scanned; 3450 spin_lock_irq(&mctz->lock); 3451 __mem_cgroup_remove_exceeded(mz, mctz); 3452 3453 /* 3454 * If we failed to reclaim anything from this memory cgroup 3455 * it is time to move on to the next cgroup 3456 */ 3457 next_mz = NULL; 3458 if (!reclaimed) 3459 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3460 3461 excess = soft_limit_excess(mz->memcg); 3462 /* 3463 * One school of thought says that we should not add 3464 * back the node to the tree if reclaim returns 0. 3465 * But our reclaim could return 0, simply because due 3466 * to priority we are exposing a smaller subset of 3467 * memory to reclaim from. Consider this as a longer 3468 * term TODO. 3469 */ 3470 /* If excess == 0, no tree ops */ 3471 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3472 spin_unlock_irq(&mctz->lock); 3473 css_put(&mz->memcg->css); 3474 loop++; 3475 /* 3476 * Could not reclaim anything and there are no more 3477 * mem cgroups to try or we seem to be looping without 3478 * reclaiming anything. 3479 */ 3480 if (!nr_reclaimed && 3481 (next_mz == NULL || 3482 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3483 break; 3484 } while (!nr_reclaimed); 3485 if (next_mz) 3486 css_put(&next_mz->memcg->css); 3487 return nr_reclaimed; 3488 } 3489 3490 /* 3491 * Reclaims as many pages from the given memcg as possible. 3492 * 3493 * Caller is responsible for holding css reference for memcg. 3494 */ 3495 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3496 { 3497 int nr_retries = MAX_RECLAIM_RETRIES; 3498 3499 /* we call try-to-free pages for make this cgroup empty */ 3500 lru_add_drain_all(); 3501 3502 drain_all_stock(memcg); 3503 3504 /* try to free all pages in this cgroup */ 3505 while (nr_retries && page_counter_read(&memcg->memory)) { 3506 if (signal_pending(current)) 3507 return -EINTR; 3508 3509 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true)) 3510 nr_retries--; 3511 } 3512 3513 return 0; 3514 } 3515 3516 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3517 char *buf, size_t nbytes, 3518 loff_t off) 3519 { 3520 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3521 3522 if (mem_cgroup_is_root(memcg)) 3523 return -EINVAL; 3524 return mem_cgroup_force_empty(memcg) ?: nbytes; 3525 } 3526 3527 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3528 struct cftype *cft) 3529 { 3530 return 1; 3531 } 3532 3533 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3534 struct cftype *cft, u64 val) 3535 { 3536 if (val == 1) 3537 return 0; 3538 3539 pr_warn_once("Non-hierarchical mode is deprecated. " 3540 "Please report your usecase to linux-mm@kvack.org if you " 3541 "depend on this functionality.\n"); 3542 3543 return -EINVAL; 3544 } 3545 3546 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3547 { 3548 unsigned long val; 3549 3550 if (mem_cgroup_is_root(memcg)) { 3551 mem_cgroup_flush_stats(); 3552 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3553 memcg_page_state(memcg, NR_ANON_MAPPED); 3554 if (swap) 3555 val += memcg_page_state(memcg, MEMCG_SWAP); 3556 } else { 3557 if (!swap) 3558 val = page_counter_read(&memcg->memory); 3559 else 3560 val = page_counter_read(&memcg->memsw); 3561 } 3562 return val; 3563 } 3564 3565 enum { 3566 RES_USAGE, 3567 RES_LIMIT, 3568 RES_MAX_USAGE, 3569 RES_FAILCNT, 3570 RES_SOFT_LIMIT, 3571 }; 3572 3573 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3574 struct cftype *cft) 3575 { 3576 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3577 struct page_counter *counter; 3578 3579 switch (MEMFILE_TYPE(cft->private)) { 3580 case _MEM: 3581 counter = &memcg->memory; 3582 break; 3583 case _MEMSWAP: 3584 counter = &memcg->memsw; 3585 break; 3586 case _KMEM: 3587 counter = &memcg->kmem; 3588 break; 3589 case _TCP: 3590 counter = &memcg->tcpmem; 3591 break; 3592 default: 3593 BUG(); 3594 } 3595 3596 switch (MEMFILE_ATTR(cft->private)) { 3597 case RES_USAGE: 3598 if (counter == &memcg->memory) 3599 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3600 if (counter == &memcg->memsw) 3601 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3602 return (u64)page_counter_read(counter) * PAGE_SIZE; 3603 case RES_LIMIT: 3604 return (u64)counter->max * PAGE_SIZE; 3605 case RES_MAX_USAGE: 3606 return (u64)counter->watermark * PAGE_SIZE; 3607 case RES_FAILCNT: 3608 return counter->failcnt; 3609 case RES_SOFT_LIMIT: 3610 return (u64)memcg->soft_limit * PAGE_SIZE; 3611 default: 3612 BUG(); 3613 } 3614 } 3615 3616 #ifdef CONFIG_MEMCG_KMEM 3617 static int memcg_online_kmem(struct mem_cgroup *memcg) 3618 { 3619 struct obj_cgroup *objcg; 3620 int memcg_id; 3621 3622 if (cgroup_memory_nokmem) 3623 return 0; 3624 3625 BUG_ON(memcg->kmemcg_id >= 0); 3626 3627 memcg_id = memcg_alloc_cache_id(); 3628 if (memcg_id < 0) 3629 return memcg_id; 3630 3631 objcg = obj_cgroup_alloc(); 3632 if (!objcg) { 3633 memcg_free_cache_id(memcg_id); 3634 return -ENOMEM; 3635 } 3636 objcg->memcg = memcg; 3637 rcu_assign_pointer(memcg->objcg, objcg); 3638 3639 static_branch_enable(&memcg_kmem_enabled_key); 3640 3641 memcg->kmemcg_id = memcg_id; 3642 3643 return 0; 3644 } 3645 3646 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3647 { 3648 struct mem_cgroup *parent; 3649 int kmemcg_id; 3650 3651 if (memcg->kmemcg_id == -1) 3652 return; 3653 3654 parent = parent_mem_cgroup(memcg); 3655 if (!parent) 3656 parent = root_mem_cgroup; 3657 3658 memcg_reparent_objcgs(memcg, parent); 3659 3660 kmemcg_id = memcg->kmemcg_id; 3661 BUG_ON(kmemcg_id < 0); 3662 3663 /* 3664 * After we have finished memcg_reparent_objcgs(), all list_lrus 3665 * corresponding to this cgroup are guaranteed to remain empty. 3666 * The ordering is imposed by list_lru_node->lock taken by 3667 * memcg_drain_all_list_lrus(). 3668 */ 3669 memcg_drain_all_list_lrus(kmemcg_id, parent); 3670 3671 memcg_free_cache_id(kmemcg_id); 3672 memcg->kmemcg_id = -1; 3673 } 3674 #else 3675 static int memcg_online_kmem(struct mem_cgroup *memcg) 3676 { 3677 return 0; 3678 } 3679 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3680 { 3681 } 3682 #endif /* CONFIG_MEMCG_KMEM */ 3683 3684 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3685 { 3686 int ret; 3687 3688 mutex_lock(&memcg_max_mutex); 3689 3690 ret = page_counter_set_max(&memcg->tcpmem, max); 3691 if (ret) 3692 goto out; 3693 3694 if (!memcg->tcpmem_active) { 3695 /* 3696 * The active flag needs to be written after the static_key 3697 * update. This is what guarantees that the socket activation 3698 * function is the last one to run. See mem_cgroup_sk_alloc() 3699 * for details, and note that we don't mark any socket as 3700 * belonging to this memcg until that flag is up. 3701 * 3702 * We need to do this, because static_keys will span multiple 3703 * sites, but we can't control their order. If we mark a socket 3704 * as accounted, but the accounting functions are not patched in 3705 * yet, we'll lose accounting. 3706 * 3707 * We never race with the readers in mem_cgroup_sk_alloc(), 3708 * because when this value change, the code to process it is not 3709 * patched in yet. 3710 */ 3711 static_branch_inc(&memcg_sockets_enabled_key); 3712 memcg->tcpmem_active = true; 3713 } 3714 out: 3715 mutex_unlock(&memcg_max_mutex); 3716 return ret; 3717 } 3718 3719 /* 3720 * The user of this function is... 3721 * RES_LIMIT. 3722 */ 3723 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3724 char *buf, size_t nbytes, loff_t off) 3725 { 3726 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3727 unsigned long nr_pages; 3728 int ret; 3729 3730 buf = strstrip(buf); 3731 ret = page_counter_memparse(buf, "-1", &nr_pages); 3732 if (ret) 3733 return ret; 3734 3735 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3736 case RES_LIMIT: 3737 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3738 ret = -EINVAL; 3739 break; 3740 } 3741 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3742 case _MEM: 3743 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3744 break; 3745 case _MEMSWAP: 3746 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3747 break; 3748 case _KMEM: 3749 /* kmem.limit_in_bytes is deprecated. */ 3750 ret = -EOPNOTSUPP; 3751 break; 3752 case _TCP: 3753 ret = memcg_update_tcp_max(memcg, nr_pages); 3754 break; 3755 } 3756 break; 3757 case RES_SOFT_LIMIT: 3758 memcg->soft_limit = nr_pages; 3759 ret = 0; 3760 break; 3761 } 3762 return ret ?: nbytes; 3763 } 3764 3765 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3766 size_t nbytes, loff_t off) 3767 { 3768 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3769 struct page_counter *counter; 3770 3771 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3772 case _MEM: 3773 counter = &memcg->memory; 3774 break; 3775 case _MEMSWAP: 3776 counter = &memcg->memsw; 3777 break; 3778 case _KMEM: 3779 counter = &memcg->kmem; 3780 break; 3781 case _TCP: 3782 counter = &memcg->tcpmem; 3783 break; 3784 default: 3785 BUG(); 3786 } 3787 3788 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3789 case RES_MAX_USAGE: 3790 page_counter_reset_watermark(counter); 3791 break; 3792 case RES_FAILCNT: 3793 counter->failcnt = 0; 3794 break; 3795 default: 3796 BUG(); 3797 } 3798 3799 return nbytes; 3800 } 3801 3802 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3803 struct cftype *cft) 3804 { 3805 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3806 } 3807 3808 #ifdef CONFIG_MMU 3809 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3810 struct cftype *cft, u64 val) 3811 { 3812 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3813 3814 if (val & ~MOVE_MASK) 3815 return -EINVAL; 3816 3817 /* 3818 * No kind of locking is needed in here, because ->can_attach() will 3819 * check this value once in the beginning of the process, and then carry 3820 * on with stale data. This means that changes to this value will only 3821 * affect task migrations starting after the change. 3822 */ 3823 memcg->move_charge_at_immigrate = val; 3824 return 0; 3825 } 3826 #else 3827 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3828 struct cftype *cft, u64 val) 3829 { 3830 return -ENOSYS; 3831 } 3832 #endif 3833 3834 #ifdef CONFIG_NUMA 3835 3836 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3837 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3838 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3839 3840 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3841 int nid, unsigned int lru_mask, bool tree) 3842 { 3843 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3844 unsigned long nr = 0; 3845 enum lru_list lru; 3846 3847 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3848 3849 for_each_lru(lru) { 3850 if (!(BIT(lru) & lru_mask)) 3851 continue; 3852 if (tree) 3853 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3854 else 3855 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3856 } 3857 return nr; 3858 } 3859 3860 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3861 unsigned int lru_mask, 3862 bool tree) 3863 { 3864 unsigned long nr = 0; 3865 enum lru_list lru; 3866 3867 for_each_lru(lru) { 3868 if (!(BIT(lru) & lru_mask)) 3869 continue; 3870 if (tree) 3871 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3872 else 3873 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3874 } 3875 return nr; 3876 } 3877 3878 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3879 { 3880 struct numa_stat { 3881 const char *name; 3882 unsigned int lru_mask; 3883 }; 3884 3885 static const struct numa_stat stats[] = { 3886 { "total", LRU_ALL }, 3887 { "file", LRU_ALL_FILE }, 3888 { "anon", LRU_ALL_ANON }, 3889 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3890 }; 3891 const struct numa_stat *stat; 3892 int nid; 3893 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3894 3895 mem_cgroup_flush_stats(); 3896 3897 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3898 seq_printf(m, "%s=%lu", stat->name, 3899 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3900 false)); 3901 for_each_node_state(nid, N_MEMORY) 3902 seq_printf(m, " N%d=%lu", nid, 3903 mem_cgroup_node_nr_lru_pages(memcg, nid, 3904 stat->lru_mask, false)); 3905 seq_putc(m, '\n'); 3906 } 3907 3908 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3909 3910 seq_printf(m, "hierarchical_%s=%lu", stat->name, 3911 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 3912 true)); 3913 for_each_node_state(nid, N_MEMORY) 3914 seq_printf(m, " N%d=%lu", nid, 3915 mem_cgroup_node_nr_lru_pages(memcg, nid, 3916 stat->lru_mask, true)); 3917 seq_putc(m, '\n'); 3918 } 3919 3920 return 0; 3921 } 3922 #endif /* CONFIG_NUMA */ 3923 3924 static const unsigned int memcg1_stats[] = { 3925 NR_FILE_PAGES, 3926 NR_ANON_MAPPED, 3927 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3928 NR_ANON_THPS, 3929 #endif 3930 NR_SHMEM, 3931 NR_FILE_MAPPED, 3932 NR_FILE_DIRTY, 3933 NR_WRITEBACK, 3934 MEMCG_SWAP, 3935 }; 3936 3937 static const char *const memcg1_stat_names[] = { 3938 "cache", 3939 "rss", 3940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3941 "rss_huge", 3942 #endif 3943 "shmem", 3944 "mapped_file", 3945 "dirty", 3946 "writeback", 3947 "swap", 3948 }; 3949 3950 /* Universal VM events cgroup1 shows, original sort order */ 3951 static const unsigned int memcg1_events[] = { 3952 PGPGIN, 3953 PGPGOUT, 3954 PGFAULT, 3955 PGMAJFAULT, 3956 }; 3957 3958 static int memcg_stat_show(struct seq_file *m, void *v) 3959 { 3960 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 3961 unsigned long memory, memsw; 3962 struct mem_cgroup *mi; 3963 unsigned int i; 3964 3965 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3966 3967 mem_cgroup_flush_stats(); 3968 3969 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3970 unsigned long nr; 3971 3972 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3973 continue; 3974 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 3975 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 3976 } 3977 3978 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3979 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 3980 memcg_events_local(memcg, memcg1_events[i])); 3981 3982 for (i = 0; i < NR_LRU_LISTS; i++) 3983 seq_printf(m, "%s %lu\n", lru_list_name(i), 3984 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 3985 PAGE_SIZE); 3986 3987 /* Hierarchical information */ 3988 memory = memsw = PAGE_COUNTER_MAX; 3989 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3990 memory = min(memory, READ_ONCE(mi->memory.max)); 3991 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 3992 } 3993 seq_printf(m, "hierarchical_memory_limit %llu\n", 3994 (u64)memory * PAGE_SIZE); 3995 if (do_memsw_account()) 3996 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3997 (u64)memsw * PAGE_SIZE); 3998 3999 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4000 unsigned long nr; 4001 4002 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4003 continue; 4004 nr = memcg_page_state(memcg, memcg1_stats[i]); 4005 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4006 (u64)nr * PAGE_SIZE); 4007 } 4008 4009 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4010 seq_printf(m, "total_%s %llu\n", 4011 vm_event_name(memcg1_events[i]), 4012 (u64)memcg_events(memcg, memcg1_events[i])); 4013 4014 for (i = 0; i < NR_LRU_LISTS; i++) 4015 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4016 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4017 PAGE_SIZE); 4018 4019 #ifdef CONFIG_DEBUG_VM 4020 { 4021 pg_data_t *pgdat; 4022 struct mem_cgroup_per_node *mz; 4023 unsigned long anon_cost = 0; 4024 unsigned long file_cost = 0; 4025 4026 for_each_online_pgdat(pgdat) { 4027 mz = memcg->nodeinfo[pgdat->node_id]; 4028 4029 anon_cost += mz->lruvec.anon_cost; 4030 file_cost += mz->lruvec.file_cost; 4031 } 4032 seq_printf(m, "anon_cost %lu\n", anon_cost); 4033 seq_printf(m, "file_cost %lu\n", file_cost); 4034 } 4035 #endif 4036 4037 return 0; 4038 } 4039 4040 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4041 struct cftype *cft) 4042 { 4043 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4044 4045 return mem_cgroup_swappiness(memcg); 4046 } 4047 4048 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4049 struct cftype *cft, u64 val) 4050 { 4051 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4052 4053 if (val > 200) 4054 return -EINVAL; 4055 4056 if (!mem_cgroup_is_root(memcg)) 4057 memcg->swappiness = val; 4058 else 4059 vm_swappiness = val; 4060 4061 return 0; 4062 } 4063 4064 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4065 { 4066 struct mem_cgroup_threshold_ary *t; 4067 unsigned long usage; 4068 int i; 4069 4070 rcu_read_lock(); 4071 if (!swap) 4072 t = rcu_dereference(memcg->thresholds.primary); 4073 else 4074 t = rcu_dereference(memcg->memsw_thresholds.primary); 4075 4076 if (!t) 4077 goto unlock; 4078 4079 usage = mem_cgroup_usage(memcg, swap); 4080 4081 /* 4082 * current_threshold points to threshold just below or equal to usage. 4083 * If it's not true, a threshold was crossed after last 4084 * call of __mem_cgroup_threshold(). 4085 */ 4086 i = t->current_threshold; 4087 4088 /* 4089 * Iterate backward over array of thresholds starting from 4090 * current_threshold and check if a threshold is crossed. 4091 * If none of thresholds below usage is crossed, we read 4092 * only one element of the array here. 4093 */ 4094 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4095 eventfd_signal(t->entries[i].eventfd, 1); 4096 4097 /* i = current_threshold + 1 */ 4098 i++; 4099 4100 /* 4101 * Iterate forward over array of thresholds starting from 4102 * current_threshold+1 and check if a threshold is crossed. 4103 * If none of thresholds above usage is crossed, we read 4104 * only one element of the array here. 4105 */ 4106 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4107 eventfd_signal(t->entries[i].eventfd, 1); 4108 4109 /* Update current_threshold */ 4110 t->current_threshold = i - 1; 4111 unlock: 4112 rcu_read_unlock(); 4113 } 4114 4115 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4116 { 4117 while (memcg) { 4118 __mem_cgroup_threshold(memcg, false); 4119 if (do_memsw_account()) 4120 __mem_cgroup_threshold(memcg, true); 4121 4122 memcg = parent_mem_cgroup(memcg); 4123 } 4124 } 4125 4126 static int compare_thresholds(const void *a, const void *b) 4127 { 4128 const struct mem_cgroup_threshold *_a = a; 4129 const struct mem_cgroup_threshold *_b = b; 4130 4131 if (_a->threshold > _b->threshold) 4132 return 1; 4133 4134 if (_a->threshold < _b->threshold) 4135 return -1; 4136 4137 return 0; 4138 } 4139 4140 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4141 { 4142 struct mem_cgroup_eventfd_list *ev; 4143 4144 spin_lock(&memcg_oom_lock); 4145 4146 list_for_each_entry(ev, &memcg->oom_notify, list) 4147 eventfd_signal(ev->eventfd, 1); 4148 4149 spin_unlock(&memcg_oom_lock); 4150 return 0; 4151 } 4152 4153 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4154 { 4155 struct mem_cgroup *iter; 4156 4157 for_each_mem_cgroup_tree(iter, memcg) 4158 mem_cgroup_oom_notify_cb(iter); 4159 } 4160 4161 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4162 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4163 { 4164 struct mem_cgroup_thresholds *thresholds; 4165 struct mem_cgroup_threshold_ary *new; 4166 unsigned long threshold; 4167 unsigned long usage; 4168 int i, size, ret; 4169 4170 ret = page_counter_memparse(args, "-1", &threshold); 4171 if (ret) 4172 return ret; 4173 4174 mutex_lock(&memcg->thresholds_lock); 4175 4176 if (type == _MEM) { 4177 thresholds = &memcg->thresholds; 4178 usage = mem_cgroup_usage(memcg, false); 4179 } else if (type == _MEMSWAP) { 4180 thresholds = &memcg->memsw_thresholds; 4181 usage = mem_cgroup_usage(memcg, true); 4182 } else 4183 BUG(); 4184 4185 /* Check if a threshold crossed before adding a new one */ 4186 if (thresholds->primary) 4187 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4188 4189 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4190 4191 /* Allocate memory for new array of thresholds */ 4192 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4193 if (!new) { 4194 ret = -ENOMEM; 4195 goto unlock; 4196 } 4197 new->size = size; 4198 4199 /* Copy thresholds (if any) to new array */ 4200 if (thresholds->primary) 4201 memcpy(new->entries, thresholds->primary->entries, 4202 flex_array_size(new, entries, size - 1)); 4203 4204 /* Add new threshold */ 4205 new->entries[size - 1].eventfd = eventfd; 4206 new->entries[size - 1].threshold = threshold; 4207 4208 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4209 sort(new->entries, size, sizeof(*new->entries), 4210 compare_thresholds, NULL); 4211 4212 /* Find current threshold */ 4213 new->current_threshold = -1; 4214 for (i = 0; i < size; i++) { 4215 if (new->entries[i].threshold <= usage) { 4216 /* 4217 * new->current_threshold will not be used until 4218 * rcu_assign_pointer(), so it's safe to increment 4219 * it here. 4220 */ 4221 ++new->current_threshold; 4222 } else 4223 break; 4224 } 4225 4226 /* Free old spare buffer and save old primary buffer as spare */ 4227 kfree(thresholds->spare); 4228 thresholds->spare = thresholds->primary; 4229 4230 rcu_assign_pointer(thresholds->primary, new); 4231 4232 /* To be sure that nobody uses thresholds */ 4233 synchronize_rcu(); 4234 4235 unlock: 4236 mutex_unlock(&memcg->thresholds_lock); 4237 4238 return ret; 4239 } 4240 4241 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4242 struct eventfd_ctx *eventfd, const char *args) 4243 { 4244 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4245 } 4246 4247 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4248 struct eventfd_ctx *eventfd, const char *args) 4249 { 4250 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4251 } 4252 4253 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4254 struct eventfd_ctx *eventfd, enum res_type type) 4255 { 4256 struct mem_cgroup_thresholds *thresholds; 4257 struct mem_cgroup_threshold_ary *new; 4258 unsigned long usage; 4259 int i, j, size, entries; 4260 4261 mutex_lock(&memcg->thresholds_lock); 4262 4263 if (type == _MEM) { 4264 thresholds = &memcg->thresholds; 4265 usage = mem_cgroup_usage(memcg, false); 4266 } else if (type == _MEMSWAP) { 4267 thresholds = &memcg->memsw_thresholds; 4268 usage = mem_cgroup_usage(memcg, true); 4269 } else 4270 BUG(); 4271 4272 if (!thresholds->primary) 4273 goto unlock; 4274 4275 /* Check if a threshold crossed before removing */ 4276 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4277 4278 /* Calculate new number of threshold */ 4279 size = entries = 0; 4280 for (i = 0; i < thresholds->primary->size; i++) { 4281 if (thresholds->primary->entries[i].eventfd != eventfd) 4282 size++; 4283 else 4284 entries++; 4285 } 4286 4287 new = thresholds->spare; 4288 4289 /* If no items related to eventfd have been cleared, nothing to do */ 4290 if (!entries) 4291 goto unlock; 4292 4293 /* Set thresholds array to NULL if we don't have thresholds */ 4294 if (!size) { 4295 kfree(new); 4296 new = NULL; 4297 goto swap_buffers; 4298 } 4299 4300 new->size = size; 4301 4302 /* Copy thresholds and find current threshold */ 4303 new->current_threshold = -1; 4304 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4305 if (thresholds->primary->entries[i].eventfd == eventfd) 4306 continue; 4307 4308 new->entries[j] = thresholds->primary->entries[i]; 4309 if (new->entries[j].threshold <= usage) { 4310 /* 4311 * new->current_threshold will not be used 4312 * until rcu_assign_pointer(), so it's safe to increment 4313 * it here. 4314 */ 4315 ++new->current_threshold; 4316 } 4317 j++; 4318 } 4319 4320 swap_buffers: 4321 /* Swap primary and spare array */ 4322 thresholds->spare = thresholds->primary; 4323 4324 rcu_assign_pointer(thresholds->primary, new); 4325 4326 /* To be sure that nobody uses thresholds */ 4327 synchronize_rcu(); 4328 4329 /* If all events are unregistered, free the spare array */ 4330 if (!new) { 4331 kfree(thresholds->spare); 4332 thresholds->spare = NULL; 4333 } 4334 unlock: 4335 mutex_unlock(&memcg->thresholds_lock); 4336 } 4337 4338 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4339 struct eventfd_ctx *eventfd) 4340 { 4341 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4342 } 4343 4344 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4345 struct eventfd_ctx *eventfd) 4346 { 4347 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4348 } 4349 4350 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4351 struct eventfd_ctx *eventfd, const char *args) 4352 { 4353 struct mem_cgroup_eventfd_list *event; 4354 4355 event = kmalloc(sizeof(*event), GFP_KERNEL); 4356 if (!event) 4357 return -ENOMEM; 4358 4359 spin_lock(&memcg_oom_lock); 4360 4361 event->eventfd = eventfd; 4362 list_add(&event->list, &memcg->oom_notify); 4363 4364 /* already in OOM ? */ 4365 if (memcg->under_oom) 4366 eventfd_signal(eventfd, 1); 4367 spin_unlock(&memcg_oom_lock); 4368 4369 return 0; 4370 } 4371 4372 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4373 struct eventfd_ctx *eventfd) 4374 { 4375 struct mem_cgroup_eventfd_list *ev, *tmp; 4376 4377 spin_lock(&memcg_oom_lock); 4378 4379 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4380 if (ev->eventfd == eventfd) { 4381 list_del(&ev->list); 4382 kfree(ev); 4383 } 4384 } 4385 4386 spin_unlock(&memcg_oom_lock); 4387 } 4388 4389 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4390 { 4391 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4392 4393 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4394 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4395 seq_printf(sf, "oom_kill %lu\n", 4396 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4397 return 0; 4398 } 4399 4400 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4401 struct cftype *cft, u64 val) 4402 { 4403 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4404 4405 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4406 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) 4407 return -EINVAL; 4408 4409 memcg->oom_kill_disable = val; 4410 if (!val) 4411 memcg_oom_recover(memcg); 4412 4413 return 0; 4414 } 4415 4416 #ifdef CONFIG_CGROUP_WRITEBACK 4417 4418 #include <trace/events/writeback.h> 4419 4420 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4421 { 4422 return wb_domain_init(&memcg->cgwb_domain, gfp); 4423 } 4424 4425 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4426 { 4427 wb_domain_exit(&memcg->cgwb_domain); 4428 } 4429 4430 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4431 { 4432 wb_domain_size_changed(&memcg->cgwb_domain); 4433 } 4434 4435 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4436 { 4437 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4438 4439 if (!memcg->css.parent) 4440 return NULL; 4441 4442 return &memcg->cgwb_domain; 4443 } 4444 4445 /** 4446 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4447 * @wb: bdi_writeback in question 4448 * @pfilepages: out parameter for number of file pages 4449 * @pheadroom: out parameter for number of allocatable pages according to memcg 4450 * @pdirty: out parameter for number of dirty pages 4451 * @pwriteback: out parameter for number of pages under writeback 4452 * 4453 * Determine the numbers of file, headroom, dirty, and writeback pages in 4454 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4455 * is a bit more involved. 4456 * 4457 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4458 * headroom is calculated as the lowest headroom of itself and the 4459 * ancestors. Note that this doesn't consider the actual amount of 4460 * available memory in the system. The caller should further cap 4461 * *@pheadroom accordingly. 4462 */ 4463 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4464 unsigned long *pheadroom, unsigned long *pdirty, 4465 unsigned long *pwriteback) 4466 { 4467 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4468 struct mem_cgroup *parent; 4469 4470 mem_cgroup_flush_stats(); 4471 4472 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); 4473 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); 4474 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + 4475 memcg_page_state(memcg, NR_ACTIVE_FILE); 4476 4477 *pheadroom = PAGE_COUNTER_MAX; 4478 while ((parent = parent_mem_cgroup(memcg))) { 4479 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4480 READ_ONCE(memcg->memory.high)); 4481 unsigned long used = page_counter_read(&memcg->memory); 4482 4483 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4484 memcg = parent; 4485 } 4486 } 4487 4488 /* 4489 * Foreign dirty flushing 4490 * 4491 * There's an inherent mismatch between memcg and writeback. The former 4492 * tracks ownership per-page while the latter per-inode. This was a 4493 * deliberate design decision because honoring per-page ownership in the 4494 * writeback path is complicated, may lead to higher CPU and IO overheads 4495 * and deemed unnecessary given that write-sharing an inode across 4496 * different cgroups isn't a common use-case. 4497 * 4498 * Combined with inode majority-writer ownership switching, this works well 4499 * enough in most cases but there are some pathological cases. For 4500 * example, let's say there are two cgroups A and B which keep writing to 4501 * different but confined parts of the same inode. B owns the inode and 4502 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4503 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4504 * triggering background writeback. A will be slowed down without a way to 4505 * make writeback of the dirty pages happen. 4506 * 4507 * Conditions like the above can lead to a cgroup getting repeatedly and 4508 * severely throttled after making some progress after each 4509 * dirty_expire_interval while the underlying IO device is almost 4510 * completely idle. 4511 * 4512 * Solving this problem completely requires matching the ownership tracking 4513 * granularities between memcg and writeback in either direction. However, 4514 * the more egregious behaviors can be avoided by simply remembering the 4515 * most recent foreign dirtying events and initiating remote flushes on 4516 * them when local writeback isn't enough to keep the memory clean enough. 4517 * 4518 * The following two functions implement such mechanism. When a foreign 4519 * page - a page whose memcg and writeback ownerships don't match - is 4520 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4521 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4522 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4523 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4524 * foreign bdi_writebacks which haven't expired. Both the numbers of 4525 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4526 * limited to MEMCG_CGWB_FRN_CNT. 4527 * 4528 * The mechanism only remembers IDs and doesn't hold any object references. 4529 * As being wrong occasionally doesn't matter, updates and accesses to the 4530 * records are lockless and racy. 4531 */ 4532 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 4533 struct bdi_writeback *wb) 4534 { 4535 struct mem_cgroup *memcg = folio_memcg(folio); 4536 struct memcg_cgwb_frn *frn; 4537 u64 now = get_jiffies_64(); 4538 u64 oldest_at = now; 4539 int oldest = -1; 4540 int i; 4541 4542 trace_track_foreign_dirty(folio, wb); 4543 4544 /* 4545 * Pick the slot to use. If there is already a slot for @wb, keep 4546 * using it. If not replace the oldest one which isn't being 4547 * written out. 4548 */ 4549 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4550 frn = &memcg->cgwb_frn[i]; 4551 if (frn->bdi_id == wb->bdi->id && 4552 frn->memcg_id == wb->memcg_css->id) 4553 break; 4554 if (time_before64(frn->at, oldest_at) && 4555 atomic_read(&frn->done.cnt) == 1) { 4556 oldest = i; 4557 oldest_at = frn->at; 4558 } 4559 } 4560 4561 if (i < MEMCG_CGWB_FRN_CNT) { 4562 /* 4563 * Re-using an existing one. Update timestamp lazily to 4564 * avoid making the cacheline hot. We want them to be 4565 * reasonably up-to-date and significantly shorter than 4566 * dirty_expire_interval as that's what expires the record. 4567 * Use the shorter of 1s and dirty_expire_interval / 8. 4568 */ 4569 unsigned long update_intv = 4570 min_t(unsigned long, HZ, 4571 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4572 4573 if (time_before64(frn->at, now - update_intv)) 4574 frn->at = now; 4575 } else if (oldest >= 0) { 4576 /* replace the oldest free one */ 4577 frn = &memcg->cgwb_frn[oldest]; 4578 frn->bdi_id = wb->bdi->id; 4579 frn->memcg_id = wb->memcg_css->id; 4580 frn->at = now; 4581 } 4582 } 4583 4584 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4585 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4586 { 4587 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4588 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4589 u64 now = jiffies_64; 4590 int i; 4591 4592 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4593 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4594 4595 /* 4596 * If the record is older than dirty_expire_interval, 4597 * writeback on it has already started. No need to kick it 4598 * off again. Also, don't start a new one if there's 4599 * already one in flight. 4600 */ 4601 if (time_after64(frn->at, now - intv) && 4602 atomic_read(&frn->done.cnt) == 1) { 4603 frn->at = 0; 4604 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4605 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 4606 WB_REASON_FOREIGN_FLUSH, 4607 &frn->done); 4608 } 4609 } 4610 } 4611 4612 #else /* CONFIG_CGROUP_WRITEBACK */ 4613 4614 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4615 { 4616 return 0; 4617 } 4618 4619 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4620 { 4621 } 4622 4623 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4624 { 4625 } 4626 4627 #endif /* CONFIG_CGROUP_WRITEBACK */ 4628 4629 /* 4630 * DO NOT USE IN NEW FILES. 4631 * 4632 * "cgroup.event_control" implementation. 4633 * 4634 * This is way over-engineered. It tries to support fully configurable 4635 * events for each user. Such level of flexibility is completely 4636 * unnecessary especially in the light of the planned unified hierarchy. 4637 * 4638 * Please deprecate this and replace with something simpler if at all 4639 * possible. 4640 */ 4641 4642 /* 4643 * Unregister event and free resources. 4644 * 4645 * Gets called from workqueue. 4646 */ 4647 static void memcg_event_remove(struct work_struct *work) 4648 { 4649 struct mem_cgroup_event *event = 4650 container_of(work, struct mem_cgroup_event, remove); 4651 struct mem_cgroup *memcg = event->memcg; 4652 4653 remove_wait_queue(event->wqh, &event->wait); 4654 4655 event->unregister_event(memcg, event->eventfd); 4656 4657 /* Notify userspace the event is going away. */ 4658 eventfd_signal(event->eventfd, 1); 4659 4660 eventfd_ctx_put(event->eventfd); 4661 kfree(event); 4662 css_put(&memcg->css); 4663 } 4664 4665 /* 4666 * Gets called on EPOLLHUP on eventfd when user closes it. 4667 * 4668 * Called with wqh->lock held and interrupts disabled. 4669 */ 4670 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4671 int sync, void *key) 4672 { 4673 struct mem_cgroup_event *event = 4674 container_of(wait, struct mem_cgroup_event, wait); 4675 struct mem_cgroup *memcg = event->memcg; 4676 __poll_t flags = key_to_poll(key); 4677 4678 if (flags & EPOLLHUP) { 4679 /* 4680 * If the event has been detached at cgroup removal, we 4681 * can simply return knowing the other side will cleanup 4682 * for us. 4683 * 4684 * We can't race against event freeing since the other 4685 * side will require wqh->lock via remove_wait_queue(), 4686 * which we hold. 4687 */ 4688 spin_lock(&memcg->event_list_lock); 4689 if (!list_empty(&event->list)) { 4690 list_del_init(&event->list); 4691 /* 4692 * We are in atomic context, but cgroup_event_remove() 4693 * may sleep, so we have to call it in workqueue. 4694 */ 4695 schedule_work(&event->remove); 4696 } 4697 spin_unlock(&memcg->event_list_lock); 4698 } 4699 4700 return 0; 4701 } 4702 4703 static void memcg_event_ptable_queue_proc(struct file *file, 4704 wait_queue_head_t *wqh, poll_table *pt) 4705 { 4706 struct mem_cgroup_event *event = 4707 container_of(pt, struct mem_cgroup_event, pt); 4708 4709 event->wqh = wqh; 4710 add_wait_queue(wqh, &event->wait); 4711 } 4712 4713 /* 4714 * DO NOT USE IN NEW FILES. 4715 * 4716 * Parse input and register new cgroup event handler. 4717 * 4718 * Input must be in format '<event_fd> <control_fd> <args>'. 4719 * Interpretation of args is defined by control file implementation. 4720 */ 4721 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4722 char *buf, size_t nbytes, loff_t off) 4723 { 4724 struct cgroup_subsys_state *css = of_css(of); 4725 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4726 struct mem_cgroup_event *event; 4727 struct cgroup_subsys_state *cfile_css; 4728 unsigned int efd, cfd; 4729 struct fd efile; 4730 struct fd cfile; 4731 const char *name; 4732 char *endp; 4733 int ret; 4734 4735 buf = strstrip(buf); 4736 4737 efd = simple_strtoul(buf, &endp, 10); 4738 if (*endp != ' ') 4739 return -EINVAL; 4740 buf = endp + 1; 4741 4742 cfd = simple_strtoul(buf, &endp, 10); 4743 if ((*endp != ' ') && (*endp != '\0')) 4744 return -EINVAL; 4745 buf = endp + 1; 4746 4747 event = kzalloc(sizeof(*event), GFP_KERNEL); 4748 if (!event) 4749 return -ENOMEM; 4750 4751 event->memcg = memcg; 4752 INIT_LIST_HEAD(&event->list); 4753 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4754 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4755 INIT_WORK(&event->remove, memcg_event_remove); 4756 4757 efile = fdget(efd); 4758 if (!efile.file) { 4759 ret = -EBADF; 4760 goto out_kfree; 4761 } 4762 4763 event->eventfd = eventfd_ctx_fileget(efile.file); 4764 if (IS_ERR(event->eventfd)) { 4765 ret = PTR_ERR(event->eventfd); 4766 goto out_put_efile; 4767 } 4768 4769 cfile = fdget(cfd); 4770 if (!cfile.file) { 4771 ret = -EBADF; 4772 goto out_put_eventfd; 4773 } 4774 4775 /* the process need read permission on control file */ 4776 /* AV: shouldn't we check that it's been opened for read instead? */ 4777 ret = file_permission(cfile.file, MAY_READ); 4778 if (ret < 0) 4779 goto out_put_cfile; 4780 4781 /* 4782 * Determine the event callbacks and set them in @event. This used 4783 * to be done via struct cftype but cgroup core no longer knows 4784 * about these events. The following is crude but the whole thing 4785 * is for compatibility anyway. 4786 * 4787 * DO NOT ADD NEW FILES. 4788 */ 4789 name = cfile.file->f_path.dentry->d_name.name; 4790 4791 if (!strcmp(name, "memory.usage_in_bytes")) { 4792 event->register_event = mem_cgroup_usage_register_event; 4793 event->unregister_event = mem_cgroup_usage_unregister_event; 4794 } else if (!strcmp(name, "memory.oom_control")) { 4795 event->register_event = mem_cgroup_oom_register_event; 4796 event->unregister_event = mem_cgroup_oom_unregister_event; 4797 } else if (!strcmp(name, "memory.pressure_level")) { 4798 event->register_event = vmpressure_register_event; 4799 event->unregister_event = vmpressure_unregister_event; 4800 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4801 event->register_event = memsw_cgroup_usage_register_event; 4802 event->unregister_event = memsw_cgroup_usage_unregister_event; 4803 } else { 4804 ret = -EINVAL; 4805 goto out_put_cfile; 4806 } 4807 4808 /* 4809 * Verify @cfile should belong to @css. Also, remaining events are 4810 * automatically removed on cgroup destruction but the removal is 4811 * asynchronous, so take an extra ref on @css. 4812 */ 4813 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4814 &memory_cgrp_subsys); 4815 ret = -EINVAL; 4816 if (IS_ERR(cfile_css)) 4817 goto out_put_cfile; 4818 if (cfile_css != css) { 4819 css_put(cfile_css); 4820 goto out_put_cfile; 4821 } 4822 4823 ret = event->register_event(memcg, event->eventfd, buf); 4824 if (ret) 4825 goto out_put_css; 4826 4827 vfs_poll(efile.file, &event->pt); 4828 4829 spin_lock_irq(&memcg->event_list_lock); 4830 list_add(&event->list, &memcg->event_list); 4831 spin_unlock_irq(&memcg->event_list_lock); 4832 4833 fdput(cfile); 4834 fdput(efile); 4835 4836 return nbytes; 4837 4838 out_put_css: 4839 css_put(css); 4840 out_put_cfile: 4841 fdput(cfile); 4842 out_put_eventfd: 4843 eventfd_ctx_put(event->eventfd); 4844 out_put_efile: 4845 fdput(efile); 4846 out_kfree: 4847 kfree(event); 4848 4849 return ret; 4850 } 4851 4852 static struct cftype mem_cgroup_legacy_files[] = { 4853 { 4854 .name = "usage_in_bytes", 4855 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4856 .read_u64 = mem_cgroup_read_u64, 4857 }, 4858 { 4859 .name = "max_usage_in_bytes", 4860 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4861 .write = mem_cgroup_reset, 4862 .read_u64 = mem_cgroup_read_u64, 4863 }, 4864 { 4865 .name = "limit_in_bytes", 4866 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4867 .write = mem_cgroup_write, 4868 .read_u64 = mem_cgroup_read_u64, 4869 }, 4870 { 4871 .name = "soft_limit_in_bytes", 4872 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4873 .write = mem_cgroup_write, 4874 .read_u64 = mem_cgroup_read_u64, 4875 }, 4876 { 4877 .name = "failcnt", 4878 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4879 .write = mem_cgroup_reset, 4880 .read_u64 = mem_cgroup_read_u64, 4881 }, 4882 { 4883 .name = "stat", 4884 .seq_show = memcg_stat_show, 4885 }, 4886 { 4887 .name = "force_empty", 4888 .write = mem_cgroup_force_empty_write, 4889 }, 4890 { 4891 .name = "use_hierarchy", 4892 .write_u64 = mem_cgroup_hierarchy_write, 4893 .read_u64 = mem_cgroup_hierarchy_read, 4894 }, 4895 { 4896 .name = "cgroup.event_control", /* XXX: for compat */ 4897 .write = memcg_write_event_control, 4898 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4899 }, 4900 { 4901 .name = "swappiness", 4902 .read_u64 = mem_cgroup_swappiness_read, 4903 .write_u64 = mem_cgroup_swappiness_write, 4904 }, 4905 { 4906 .name = "move_charge_at_immigrate", 4907 .read_u64 = mem_cgroup_move_charge_read, 4908 .write_u64 = mem_cgroup_move_charge_write, 4909 }, 4910 { 4911 .name = "oom_control", 4912 .seq_show = mem_cgroup_oom_control_read, 4913 .write_u64 = mem_cgroup_oom_control_write, 4914 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4915 }, 4916 { 4917 .name = "pressure_level", 4918 }, 4919 #ifdef CONFIG_NUMA 4920 { 4921 .name = "numa_stat", 4922 .seq_show = memcg_numa_stat_show, 4923 }, 4924 #endif 4925 { 4926 .name = "kmem.limit_in_bytes", 4927 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4928 .write = mem_cgroup_write, 4929 .read_u64 = mem_cgroup_read_u64, 4930 }, 4931 { 4932 .name = "kmem.usage_in_bytes", 4933 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4934 .read_u64 = mem_cgroup_read_u64, 4935 }, 4936 { 4937 .name = "kmem.failcnt", 4938 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4939 .write = mem_cgroup_reset, 4940 .read_u64 = mem_cgroup_read_u64, 4941 }, 4942 { 4943 .name = "kmem.max_usage_in_bytes", 4944 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4945 .write = mem_cgroup_reset, 4946 .read_u64 = mem_cgroup_read_u64, 4947 }, 4948 #if defined(CONFIG_MEMCG_KMEM) && \ 4949 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 4950 { 4951 .name = "kmem.slabinfo", 4952 .seq_show = memcg_slab_show, 4953 }, 4954 #endif 4955 { 4956 .name = "kmem.tcp.limit_in_bytes", 4957 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 4958 .write = mem_cgroup_write, 4959 .read_u64 = mem_cgroup_read_u64, 4960 }, 4961 { 4962 .name = "kmem.tcp.usage_in_bytes", 4963 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 4964 .read_u64 = mem_cgroup_read_u64, 4965 }, 4966 { 4967 .name = "kmem.tcp.failcnt", 4968 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 4969 .write = mem_cgroup_reset, 4970 .read_u64 = mem_cgroup_read_u64, 4971 }, 4972 { 4973 .name = "kmem.tcp.max_usage_in_bytes", 4974 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 4975 .write = mem_cgroup_reset, 4976 .read_u64 = mem_cgroup_read_u64, 4977 }, 4978 { }, /* terminate */ 4979 }; 4980 4981 /* 4982 * Private memory cgroup IDR 4983 * 4984 * Swap-out records and page cache shadow entries need to store memcg 4985 * references in constrained space, so we maintain an ID space that is 4986 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 4987 * memory-controlled cgroups to 64k. 4988 * 4989 * However, there usually are many references to the offline CSS after 4990 * the cgroup has been destroyed, such as page cache or reclaimable 4991 * slab objects, that don't need to hang on to the ID. We want to keep 4992 * those dead CSS from occupying IDs, or we might quickly exhaust the 4993 * relatively small ID space and prevent the creation of new cgroups 4994 * even when there are much fewer than 64k cgroups - possibly none. 4995 * 4996 * Maintain a private 16-bit ID space for memcg, and allow the ID to 4997 * be freed and recycled when it's no longer needed, which is usually 4998 * when the CSS is offlined. 4999 * 5000 * The only exception to that are records of swapped out tmpfs/shmem 5001 * pages that need to be attributed to live ancestors on swapin. But 5002 * those references are manageable from userspace. 5003 */ 5004 5005 static DEFINE_IDR(mem_cgroup_idr); 5006 5007 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5008 { 5009 if (memcg->id.id > 0) { 5010 idr_remove(&mem_cgroup_idr, memcg->id.id); 5011 memcg->id.id = 0; 5012 } 5013 } 5014 5015 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5016 unsigned int n) 5017 { 5018 refcount_add(n, &memcg->id.ref); 5019 } 5020 5021 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5022 { 5023 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5024 mem_cgroup_id_remove(memcg); 5025 5026 /* Memcg ID pins CSS */ 5027 css_put(&memcg->css); 5028 } 5029 } 5030 5031 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5032 { 5033 mem_cgroup_id_put_many(memcg, 1); 5034 } 5035 5036 /** 5037 * mem_cgroup_from_id - look up a memcg from a memcg id 5038 * @id: the memcg id to look up 5039 * 5040 * Caller must hold rcu_read_lock(). 5041 */ 5042 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5043 { 5044 WARN_ON_ONCE(!rcu_read_lock_held()); 5045 return idr_find(&mem_cgroup_idr, id); 5046 } 5047 5048 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5049 { 5050 struct mem_cgroup_per_node *pn; 5051 int tmp = node; 5052 /* 5053 * This routine is called against possible nodes. 5054 * But it's BUG to call kmalloc() against offline node. 5055 * 5056 * TODO: this routine can waste much memory for nodes which will 5057 * never be onlined. It's better to use memory hotplug callback 5058 * function. 5059 */ 5060 if (!node_state(node, N_NORMAL_MEMORY)) 5061 tmp = -1; 5062 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5063 if (!pn) 5064 return 1; 5065 5066 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, 5067 GFP_KERNEL_ACCOUNT); 5068 if (!pn->lruvec_stats_percpu) { 5069 kfree(pn); 5070 return 1; 5071 } 5072 5073 lruvec_init(&pn->lruvec); 5074 pn->usage_in_excess = 0; 5075 pn->on_tree = false; 5076 pn->memcg = memcg; 5077 5078 memcg->nodeinfo[node] = pn; 5079 return 0; 5080 } 5081 5082 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5083 { 5084 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5085 5086 if (!pn) 5087 return; 5088 5089 free_percpu(pn->lruvec_stats_percpu); 5090 kfree(pn); 5091 } 5092 5093 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5094 { 5095 int node; 5096 5097 for_each_node(node) 5098 free_mem_cgroup_per_node_info(memcg, node); 5099 free_percpu(memcg->vmstats_percpu); 5100 kfree(memcg); 5101 } 5102 5103 static void mem_cgroup_free(struct mem_cgroup *memcg) 5104 { 5105 memcg_wb_domain_exit(memcg); 5106 __mem_cgroup_free(memcg); 5107 } 5108 5109 static struct mem_cgroup *mem_cgroup_alloc(void) 5110 { 5111 struct mem_cgroup *memcg; 5112 unsigned int size; 5113 int node; 5114 int __maybe_unused i; 5115 long error = -ENOMEM; 5116 5117 size = sizeof(struct mem_cgroup); 5118 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5119 5120 memcg = kzalloc(size, GFP_KERNEL); 5121 if (!memcg) 5122 return ERR_PTR(error); 5123 5124 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5125 1, MEM_CGROUP_ID_MAX, 5126 GFP_KERNEL); 5127 if (memcg->id.id < 0) { 5128 error = memcg->id.id; 5129 goto fail; 5130 } 5131 5132 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5133 GFP_KERNEL_ACCOUNT); 5134 if (!memcg->vmstats_percpu) 5135 goto fail; 5136 5137 for_each_node(node) 5138 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5139 goto fail; 5140 5141 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5142 goto fail; 5143 5144 INIT_WORK(&memcg->high_work, high_work_func); 5145 INIT_LIST_HEAD(&memcg->oom_notify); 5146 mutex_init(&memcg->thresholds_lock); 5147 spin_lock_init(&memcg->move_lock); 5148 vmpressure_init(&memcg->vmpressure); 5149 INIT_LIST_HEAD(&memcg->event_list); 5150 spin_lock_init(&memcg->event_list_lock); 5151 memcg->socket_pressure = jiffies; 5152 #ifdef CONFIG_MEMCG_KMEM 5153 memcg->kmemcg_id = -1; 5154 INIT_LIST_HEAD(&memcg->objcg_list); 5155 #endif 5156 #ifdef CONFIG_CGROUP_WRITEBACK 5157 INIT_LIST_HEAD(&memcg->cgwb_list); 5158 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5159 memcg->cgwb_frn[i].done = 5160 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5161 #endif 5162 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5163 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5164 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5165 memcg->deferred_split_queue.split_queue_len = 0; 5166 #endif 5167 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5168 return memcg; 5169 fail: 5170 mem_cgroup_id_remove(memcg); 5171 __mem_cgroup_free(memcg); 5172 return ERR_PTR(error); 5173 } 5174 5175 static struct cgroup_subsys_state * __ref 5176 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5177 { 5178 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5179 struct mem_cgroup *memcg, *old_memcg; 5180 long error = -ENOMEM; 5181 5182 old_memcg = set_active_memcg(parent); 5183 memcg = mem_cgroup_alloc(); 5184 set_active_memcg(old_memcg); 5185 if (IS_ERR(memcg)) 5186 return ERR_CAST(memcg); 5187 5188 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5189 memcg->soft_limit = PAGE_COUNTER_MAX; 5190 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5191 if (parent) { 5192 memcg->swappiness = mem_cgroup_swappiness(parent); 5193 memcg->oom_kill_disable = parent->oom_kill_disable; 5194 5195 page_counter_init(&memcg->memory, &parent->memory); 5196 page_counter_init(&memcg->swap, &parent->swap); 5197 page_counter_init(&memcg->kmem, &parent->kmem); 5198 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5199 } else { 5200 page_counter_init(&memcg->memory, NULL); 5201 page_counter_init(&memcg->swap, NULL); 5202 page_counter_init(&memcg->kmem, NULL); 5203 page_counter_init(&memcg->tcpmem, NULL); 5204 5205 root_mem_cgroup = memcg; 5206 return &memcg->css; 5207 } 5208 5209 /* The following stuff does not apply to the root */ 5210 error = memcg_online_kmem(memcg); 5211 if (error) 5212 goto fail; 5213 5214 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5215 static_branch_inc(&memcg_sockets_enabled_key); 5216 5217 return &memcg->css; 5218 fail: 5219 mem_cgroup_id_remove(memcg); 5220 mem_cgroup_free(memcg); 5221 return ERR_PTR(error); 5222 } 5223 5224 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5225 { 5226 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5227 5228 /* 5229 * A memcg must be visible for expand_shrinker_info() 5230 * by the time the maps are allocated. So, we allocate maps 5231 * here, when for_each_mem_cgroup() can't skip it. 5232 */ 5233 if (alloc_shrinker_info(memcg)) { 5234 mem_cgroup_id_remove(memcg); 5235 return -ENOMEM; 5236 } 5237 5238 /* Online state pins memcg ID, memcg ID pins CSS */ 5239 refcount_set(&memcg->id.ref, 1); 5240 css_get(css); 5241 5242 if (unlikely(mem_cgroup_is_root(memcg))) 5243 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 5244 2UL*HZ); 5245 return 0; 5246 } 5247 5248 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5249 { 5250 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5251 struct mem_cgroup_event *event, *tmp; 5252 5253 /* 5254 * Unregister events and notify userspace. 5255 * Notify userspace about cgroup removing only after rmdir of cgroup 5256 * directory to avoid race between userspace and kernelspace. 5257 */ 5258 spin_lock_irq(&memcg->event_list_lock); 5259 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5260 list_del_init(&event->list); 5261 schedule_work(&event->remove); 5262 } 5263 spin_unlock_irq(&memcg->event_list_lock); 5264 5265 page_counter_set_min(&memcg->memory, 0); 5266 page_counter_set_low(&memcg->memory, 0); 5267 5268 memcg_offline_kmem(memcg); 5269 reparent_shrinker_deferred(memcg); 5270 wb_memcg_offline(memcg); 5271 5272 drain_all_stock(memcg); 5273 5274 mem_cgroup_id_put(memcg); 5275 } 5276 5277 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5278 { 5279 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5280 5281 invalidate_reclaim_iterators(memcg); 5282 } 5283 5284 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5285 { 5286 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5287 int __maybe_unused i; 5288 5289 #ifdef CONFIG_CGROUP_WRITEBACK 5290 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5291 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5292 #endif 5293 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5294 static_branch_dec(&memcg_sockets_enabled_key); 5295 5296 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5297 static_branch_dec(&memcg_sockets_enabled_key); 5298 5299 vmpressure_cleanup(&memcg->vmpressure); 5300 cancel_work_sync(&memcg->high_work); 5301 mem_cgroup_remove_from_trees(memcg); 5302 free_shrinker_info(memcg); 5303 5304 /* Need to offline kmem if online_css() fails */ 5305 memcg_offline_kmem(memcg); 5306 mem_cgroup_free(memcg); 5307 } 5308 5309 /** 5310 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5311 * @css: the target css 5312 * 5313 * Reset the states of the mem_cgroup associated with @css. This is 5314 * invoked when the userland requests disabling on the default hierarchy 5315 * but the memcg is pinned through dependency. The memcg should stop 5316 * applying policies and should revert to the vanilla state as it may be 5317 * made visible again. 5318 * 5319 * The current implementation only resets the essential configurations. 5320 * This needs to be expanded to cover all the visible parts. 5321 */ 5322 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5323 { 5324 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5325 5326 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5327 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5328 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5329 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5330 page_counter_set_min(&memcg->memory, 0); 5331 page_counter_set_low(&memcg->memory, 0); 5332 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5333 memcg->soft_limit = PAGE_COUNTER_MAX; 5334 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5335 memcg_wb_domain_size_changed(memcg); 5336 } 5337 5338 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) 5339 { 5340 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5341 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 5342 struct memcg_vmstats_percpu *statc; 5343 long delta, v; 5344 int i, nid; 5345 5346 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); 5347 5348 for (i = 0; i < MEMCG_NR_STAT; i++) { 5349 /* 5350 * Collect the aggregated propagation counts of groups 5351 * below us. We're in a per-cpu loop here and this is 5352 * a global counter, so the first cycle will get them. 5353 */ 5354 delta = memcg->vmstats.state_pending[i]; 5355 if (delta) 5356 memcg->vmstats.state_pending[i] = 0; 5357 5358 /* Add CPU changes on this level since the last flush */ 5359 v = READ_ONCE(statc->state[i]); 5360 if (v != statc->state_prev[i]) { 5361 delta += v - statc->state_prev[i]; 5362 statc->state_prev[i] = v; 5363 } 5364 5365 if (!delta) 5366 continue; 5367 5368 /* Aggregate counts on this level and propagate upwards */ 5369 memcg->vmstats.state[i] += delta; 5370 if (parent) 5371 parent->vmstats.state_pending[i] += delta; 5372 } 5373 5374 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 5375 delta = memcg->vmstats.events_pending[i]; 5376 if (delta) 5377 memcg->vmstats.events_pending[i] = 0; 5378 5379 v = READ_ONCE(statc->events[i]); 5380 if (v != statc->events_prev[i]) { 5381 delta += v - statc->events_prev[i]; 5382 statc->events_prev[i] = v; 5383 } 5384 5385 if (!delta) 5386 continue; 5387 5388 memcg->vmstats.events[i] += delta; 5389 if (parent) 5390 parent->vmstats.events_pending[i] += delta; 5391 } 5392 5393 for_each_node_state(nid, N_MEMORY) { 5394 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; 5395 struct mem_cgroup_per_node *ppn = NULL; 5396 struct lruvec_stats_percpu *lstatc; 5397 5398 if (parent) 5399 ppn = parent->nodeinfo[nid]; 5400 5401 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); 5402 5403 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 5404 delta = pn->lruvec_stats.state_pending[i]; 5405 if (delta) 5406 pn->lruvec_stats.state_pending[i] = 0; 5407 5408 v = READ_ONCE(lstatc->state[i]); 5409 if (v != lstatc->state_prev[i]) { 5410 delta += v - lstatc->state_prev[i]; 5411 lstatc->state_prev[i] = v; 5412 } 5413 5414 if (!delta) 5415 continue; 5416 5417 pn->lruvec_stats.state[i] += delta; 5418 if (ppn) 5419 ppn->lruvec_stats.state_pending[i] += delta; 5420 } 5421 } 5422 } 5423 5424 #ifdef CONFIG_MMU 5425 /* Handlers for move charge at task migration. */ 5426 static int mem_cgroup_do_precharge(unsigned long count) 5427 { 5428 int ret; 5429 5430 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5431 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5432 if (!ret) { 5433 mc.precharge += count; 5434 return ret; 5435 } 5436 5437 /* Try charges one by one with reclaim, but do not retry */ 5438 while (count--) { 5439 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5440 if (ret) 5441 return ret; 5442 mc.precharge++; 5443 cond_resched(); 5444 } 5445 return 0; 5446 } 5447 5448 union mc_target { 5449 struct page *page; 5450 swp_entry_t ent; 5451 }; 5452 5453 enum mc_target_type { 5454 MC_TARGET_NONE = 0, 5455 MC_TARGET_PAGE, 5456 MC_TARGET_SWAP, 5457 MC_TARGET_DEVICE, 5458 }; 5459 5460 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5461 unsigned long addr, pte_t ptent) 5462 { 5463 struct page *page = vm_normal_page(vma, addr, ptent); 5464 5465 if (!page || !page_mapped(page)) 5466 return NULL; 5467 if (PageAnon(page)) { 5468 if (!(mc.flags & MOVE_ANON)) 5469 return NULL; 5470 } else { 5471 if (!(mc.flags & MOVE_FILE)) 5472 return NULL; 5473 } 5474 if (!get_page_unless_zero(page)) 5475 return NULL; 5476 5477 return page; 5478 } 5479 5480 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5481 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5482 pte_t ptent, swp_entry_t *entry) 5483 { 5484 struct page *page = NULL; 5485 swp_entry_t ent = pte_to_swp_entry(ptent); 5486 5487 if (!(mc.flags & MOVE_ANON)) 5488 return NULL; 5489 5490 /* 5491 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5492 * a device and because they are not accessible by CPU they are store 5493 * as special swap entry in the CPU page table. 5494 */ 5495 if (is_device_private_entry(ent)) { 5496 page = pfn_swap_entry_to_page(ent); 5497 /* 5498 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5499 * a refcount of 1 when free (unlike normal page) 5500 */ 5501 if (!page_ref_add_unless(page, 1, 1)) 5502 return NULL; 5503 return page; 5504 } 5505 5506 if (non_swap_entry(ent)) 5507 return NULL; 5508 5509 /* 5510 * Because lookup_swap_cache() updates some statistics counter, 5511 * we call find_get_page() with swapper_space directly. 5512 */ 5513 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5514 entry->val = ent.val; 5515 5516 return page; 5517 } 5518 #else 5519 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5520 pte_t ptent, swp_entry_t *entry) 5521 { 5522 return NULL; 5523 } 5524 #endif 5525 5526 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5527 unsigned long addr, pte_t ptent) 5528 { 5529 if (!vma->vm_file) /* anonymous vma */ 5530 return NULL; 5531 if (!(mc.flags & MOVE_FILE)) 5532 return NULL; 5533 5534 /* page is moved even if it's not RSS of this task(page-faulted). */ 5535 /* shmem/tmpfs may report page out on swap: account for that too. */ 5536 return find_get_incore_page(vma->vm_file->f_mapping, 5537 linear_page_index(vma, addr)); 5538 } 5539 5540 /** 5541 * mem_cgroup_move_account - move account of the page 5542 * @page: the page 5543 * @compound: charge the page as compound or small page 5544 * @from: mem_cgroup which the page is moved from. 5545 * @to: mem_cgroup which the page is moved to. @from != @to. 5546 * 5547 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5548 * 5549 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5550 * from old cgroup. 5551 */ 5552 static int mem_cgroup_move_account(struct page *page, 5553 bool compound, 5554 struct mem_cgroup *from, 5555 struct mem_cgroup *to) 5556 { 5557 struct folio *folio = page_folio(page); 5558 struct lruvec *from_vec, *to_vec; 5559 struct pglist_data *pgdat; 5560 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1; 5561 int nid, ret; 5562 5563 VM_BUG_ON(from == to); 5564 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5565 VM_BUG_ON(compound && !folio_test_multi(folio)); 5566 5567 /* 5568 * Prevent mem_cgroup_migrate() from looking at 5569 * page's memory cgroup of its source page while we change it. 5570 */ 5571 ret = -EBUSY; 5572 if (!folio_trylock(folio)) 5573 goto out; 5574 5575 ret = -EINVAL; 5576 if (folio_memcg(folio) != from) 5577 goto out_unlock; 5578 5579 pgdat = folio_pgdat(folio); 5580 from_vec = mem_cgroup_lruvec(from, pgdat); 5581 to_vec = mem_cgroup_lruvec(to, pgdat); 5582 5583 folio_memcg_lock(folio); 5584 5585 if (folio_test_anon(folio)) { 5586 if (folio_mapped(folio)) { 5587 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5588 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5589 if (folio_test_transhuge(folio)) { 5590 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5591 -nr_pages); 5592 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5593 nr_pages); 5594 } 5595 } 5596 } else { 5597 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5598 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5599 5600 if (folio_test_swapbacked(folio)) { 5601 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5602 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5603 } 5604 5605 if (folio_mapped(folio)) { 5606 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5607 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5608 } 5609 5610 if (folio_test_dirty(folio)) { 5611 struct address_space *mapping = folio_mapping(folio); 5612 5613 if (mapping_can_writeback(mapping)) { 5614 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5615 -nr_pages); 5616 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5617 nr_pages); 5618 } 5619 } 5620 } 5621 5622 if (folio_test_writeback(folio)) { 5623 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5624 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5625 } 5626 5627 /* 5628 * All state has been migrated, let's switch to the new memcg. 5629 * 5630 * It is safe to change page's memcg here because the page 5631 * is referenced, charged, isolated, and locked: we can't race 5632 * with (un)charging, migration, LRU putback, or anything else 5633 * that would rely on a stable page's memory cgroup. 5634 * 5635 * Note that lock_page_memcg is a memcg lock, not a page lock, 5636 * to save space. As soon as we switch page's memory cgroup to a 5637 * new memcg that isn't locked, the above state can change 5638 * concurrently again. Make sure we're truly done with it. 5639 */ 5640 smp_mb(); 5641 5642 css_get(&to->css); 5643 css_put(&from->css); 5644 5645 folio->memcg_data = (unsigned long)to; 5646 5647 __folio_memcg_unlock(from); 5648 5649 ret = 0; 5650 nid = folio_nid(folio); 5651 5652 local_irq_disable(); 5653 mem_cgroup_charge_statistics(to, nr_pages); 5654 memcg_check_events(to, nid); 5655 mem_cgroup_charge_statistics(from, -nr_pages); 5656 memcg_check_events(from, nid); 5657 local_irq_enable(); 5658 out_unlock: 5659 folio_unlock(folio); 5660 out: 5661 return ret; 5662 } 5663 5664 /** 5665 * get_mctgt_type - get target type of moving charge 5666 * @vma: the vma the pte to be checked belongs 5667 * @addr: the address corresponding to the pte to be checked 5668 * @ptent: the pte to be checked 5669 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5670 * 5671 * Returns 5672 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5673 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5674 * move charge. if @target is not NULL, the page is stored in target->page 5675 * with extra refcnt got(Callers should handle it). 5676 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5677 * target for charge migration. if @target is not NULL, the entry is stored 5678 * in target->ent. 5679 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5680 * (so ZONE_DEVICE page and thus not on the lru). 5681 * For now we such page is charge like a regular page would be as for all 5682 * intent and purposes it is just special memory taking the place of a 5683 * regular page. 5684 * 5685 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5686 * 5687 * Called with pte lock held. 5688 */ 5689 5690 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5691 unsigned long addr, pte_t ptent, union mc_target *target) 5692 { 5693 struct page *page = NULL; 5694 enum mc_target_type ret = MC_TARGET_NONE; 5695 swp_entry_t ent = { .val = 0 }; 5696 5697 if (pte_present(ptent)) 5698 page = mc_handle_present_pte(vma, addr, ptent); 5699 else if (is_swap_pte(ptent)) 5700 page = mc_handle_swap_pte(vma, ptent, &ent); 5701 else if (pte_none(ptent)) 5702 page = mc_handle_file_pte(vma, addr, ptent); 5703 5704 if (!page && !ent.val) 5705 return ret; 5706 if (page) { 5707 /* 5708 * Do only loose check w/o serialization. 5709 * mem_cgroup_move_account() checks the page is valid or 5710 * not under LRU exclusion. 5711 */ 5712 if (page_memcg(page) == mc.from) { 5713 ret = MC_TARGET_PAGE; 5714 if (is_device_private_page(page)) 5715 ret = MC_TARGET_DEVICE; 5716 if (target) 5717 target->page = page; 5718 } 5719 if (!ret || !target) 5720 put_page(page); 5721 } 5722 /* 5723 * There is a swap entry and a page doesn't exist or isn't charged. 5724 * But we cannot move a tail-page in a THP. 5725 */ 5726 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5727 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5728 ret = MC_TARGET_SWAP; 5729 if (target) 5730 target->ent = ent; 5731 } 5732 return ret; 5733 } 5734 5735 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5736 /* 5737 * We don't consider PMD mapped swapping or file mapped pages because THP does 5738 * not support them for now. 5739 * Caller should make sure that pmd_trans_huge(pmd) is true. 5740 */ 5741 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5742 unsigned long addr, pmd_t pmd, union mc_target *target) 5743 { 5744 struct page *page = NULL; 5745 enum mc_target_type ret = MC_TARGET_NONE; 5746 5747 if (unlikely(is_swap_pmd(pmd))) { 5748 VM_BUG_ON(thp_migration_supported() && 5749 !is_pmd_migration_entry(pmd)); 5750 return ret; 5751 } 5752 page = pmd_page(pmd); 5753 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5754 if (!(mc.flags & MOVE_ANON)) 5755 return ret; 5756 if (page_memcg(page) == mc.from) { 5757 ret = MC_TARGET_PAGE; 5758 if (target) { 5759 get_page(page); 5760 target->page = page; 5761 } 5762 } 5763 return ret; 5764 } 5765 #else 5766 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5767 unsigned long addr, pmd_t pmd, union mc_target *target) 5768 { 5769 return MC_TARGET_NONE; 5770 } 5771 #endif 5772 5773 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5774 unsigned long addr, unsigned long end, 5775 struct mm_walk *walk) 5776 { 5777 struct vm_area_struct *vma = walk->vma; 5778 pte_t *pte; 5779 spinlock_t *ptl; 5780 5781 ptl = pmd_trans_huge_lock(pmd, vma); 5782 if (ptl) { 5783 /* 5784 * Note their can not be MC_TARGET_DEVICE for now as we do not 5785 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5786 * this might change. 5787 */ 5788 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5789 mc.precharge += HPAGE_PMD_NR; 5790 spin_unlock(ptl); 5791 return 0; 5792 } 5793 5794 if (pmd_trans_unstable(pmd)) 5795 return 0; 5796 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5797 for (; addr != end; pte++, addr += PAGE_SIZE) 5798 if (get_mctgt_type(vma, addr, *pte, NULL)) 5799 mc.precharge++; /* increment precharge temporarily */ 5800 pte_unmap_unlock(pte - 1, ptl); 5801 cond_resched(); 5802 5803 return 0; 5804 } 5805 5806 static const struct mm_walk_ops precharge_walk_ops = { 5807 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5808 }; 5809 5810 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5811 { 5812 unsigned long precharge; 5813 5814 mmap_read_lock(mm); 5815 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5816 mmap_read_unlock(mm); 5817 5818 precharge = mc.precharge; 5819 mc.precharge = 0; 5820 5821 return precharge; 5822 } 5823 5824 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5825 { 5826 unsigned long precharge = mem_cgroup_count_precharge(mm); 5827 5828 VM_BUG_ON(mc.moving_task); 5829 mc.moving_task = current; 5830 return mem_cgroup_do_precharge(precharge); 5831 } 5832 5833 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5834 static void __mem_cgroup_clear_mc(void) 5835 { 5836 struct mem_cgroup *from = mc.from; 5837 struct mem_cgroup *to = mc.to; 5838 5839 /* we must uncharge all the leftover precharges from mc.to */ 5840 if (mc.precharge) { 5841 cancel_charge(mc.to, mc.precharge); 5842 mc.precharge = 0; 5843 } 5844 /* 5845 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5846 * we must uncharge here. 5847 */ 5848 if (mc.moved_charge) { 5849 cancel_charge(mc.from, mc.moved_charge); 5850 mc.moved_charge = 0; 5851 } 5852 /* we must fixup refcnts and charges */ 5853 if (mc.moved_swap) { 5854 /* uncharge swap account from the old cgroup */ 5855 if (!mem_cgroup_is_root(mc.from)) 5856 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5857 5858 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5859 5860 /* 5861 * we charged both to->memory and to->memsw, so we 5862 * should uncharge to->memory. 5863 */ 5864 if (!mem_cgroup_is_root(mc.to)) 5865 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5866 5867 mc.moved_swap = 0; 5868 } 5869 memcg_oom_recover(from); 5870 memcg_oom_recover(to); 5871 wake_up_all(&mc.waitq); 5872 } 5873 5874 static void mem_cgroup_clear_mc(void) 5875 { 5876 struct mm_struct *mm = mc.mm; 5877 5878 /* 5879 * we must clear moving_task before waking up waiters at the end of 5880 * task migration. 5881 */ 5882 mc.moving_task = NULL; 5883 __mem_cgroup_clear_mc(); 5884 spin_lock(&mc.lock); 5885 mc.from = NULL; 5886 mc.to = NULL; 5887 mc.mm = NULL; 5888 spin_unlock(&mc.lock); 5889 5890 mmput(mm); 5891 } 5892 5893 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5894 { 5895 struct cgroup_subsys_state *css; 5896 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5897 struct mem_cgroup *from; 5898 struct task_struct *leader, *p; 5899 struct mm_struct *mm; 5900 unsigned long move_flags; 5901 int ret = 0; 5902 5903 /* charge immigration isn't supported on the default hierarchy */ 5904 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5905 return 0; 5906 5907 /* 5908 * Multi-process migrations only happen on the default hierarchy 5909 * where charge immigration is not used. Perform charge 5910 * immigration if @tset contains a leader and whine if there are 5911 * multiple. 5912 */ 5913 p = NULL; 5914 cgroup_taskset_for_each_leader(leader, css, tset) { 5915 WARN_ON_ONCE(p); 5916 p = leader; 5917 memcg = mem_cgroup_from_css(css); 5918 } 5919 if (!p) 5920 return 0; 5921 5922 /* 5923 * We are now committed to this value whatever it is. Changes in this 5924 * tunable will only affect upcoming migrations, not the current one. 5925 * So we need to save it, and keep it going. 5926 */ 5927 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 5928 if (!move_flags) 5929 return 0; 5930 5931 from = mem_cgroup_from_task(p); 5932 5933 VM_BUG_ON(from == memcg); 5934 5935 mm = get_task_mm(p); 5936 if (!mm) 5937 return 0; 5938 /* We move charges only when we move a owner of the mm */ 5939 if (mm->owner == p) { 5940 VM_BUG_ON(mc.from); 5941 VM_BUG_ON(mc.to); 5942 VM_BUG_ON(mc.precharge); 5943 VM_BUG_ON(mc.moved_charge); 5944 VM_BUG_ON(mc.moved_swap); 5945 5946 spin_lock(&mc.lock); 5947 mc.mm = mm; 5948 mc.from = from; 5949 mc.to = memcg; 5950 mc.flags = move_flags; 5951 spin_unlock(&mc.lock); 5952 /* We set mc.moving_task later */ 5953 5954 ret = mem_cgroup_precharge_mc(mm); 5955 if (ret) 5956 mem_cgroup_clear_mc(); 5957 } else { 5958 mmput(mm); 5959 } 5960 return ret; 5961 } 5962 5963 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5964 { 5965 if (mc.to) 5966 mem_cgroup_clear_mc(); 5967 } 5968 5969 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 5970 unsigned long addr, unsigned long end, 5971 struct mm_walk *walk) 5972 { 5973 int ret = 0; 5974 struct vm_area_struct *vma = walk->vma; 5975 pte_t *pte; 5976 spinlock_t *ptl; 5977 enum mc_target_type target_type; 5978 union mc_target target; 5979 struct page *page; 5980 5981 ptl = pmd_trans_huge_lock(pmd, vma); 5982 if (ptl) { 5983 if (mc.precharge < HPAGE_PMD_NR) { 5984 spin_unlock(ptl); 5985 return 0; 5986 } 5987 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 5988 if (target_type == MC_TARGET_PAGE) { 5989 page = target.page; 5990 if (!isolate_lru_page(page)) { 5991 if (!mem_cgroup_move_account(page, true, 5992 mc.from, mc.to)) { 5993 mc.precharge -= HPAGE_PMD_NR; 5994 mc.moved_charge += HPAGE_PMD_NR; 5995 } 5996 putback_lru_page(page); 5997 } 5998 put_page(page); 5999 } else if (target_type == MC_TARGET_DEVICE) { 6000 page = target.page; 6001 if (!mem_cgroup_move_account(page, true, 6002 mc.from, mc.to)) { 6003 mc.precharge -= HPAGE_PMD_NR; 6004 mc.moved_charge += HPAGE_PMD_NR; 6005 } 6006 put_page(page); 6007 } 6008 spin_unlock(ptl); 6009 return 0; 6010 } 6011 6012 if (pmd_trans_unstable(pmd)) 6013 return 0; 6014 retry: 6015 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6016 for (; addr != end; addr += PAGE_SIZE) { 6017 pte_t ptent = *(pte++); 6018 bool device = false; 6019 swp_entry_t ent; 6020 6021 if (!mc.precharge) 6022 break; 6023 6024 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6025 case MC_TARGET_DEVICE: 6026 device = true; 6027 fallthrough; 6028 case MC_TARGET_PAGE: 6029 page = target.page; 6030 /* 6031 * We can have a part of the split pmd here. Moving it 6032 * can be done but it would be too convoluted so simply 6033 * ignore such a partial THP and keep it in original 6034 * memcg. There should be somebody mapping the head. 6035 */ 6036 if (PageTransCompound(page)) 6037 goto put; 6038 if (!device && isolate_lru_page(page)) 6039 goto put; 6040 if (!mem_cgroup_move_account(page, false, 6041 mc.from, mc.to)) { 6042 mc.precharge--; 6043 /* we uncharge from mc.from later. */ 6044 mc.moved_charge++; 6045 } 6046 if (!device) 6047 putback_lru_page(page); 6048 put: /* get_mctgt_type() gets the page */ 6049 put_page(page); 6050 break; 6051 case MC_TARGET_SWAP: 6052 ent = target.ent; 6053 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6054 mc.precharge--; 6055 mem_cgroup_id_get_many(mc.to, 1); 6056 /* we fixup other refcnts and charges later. */ 6057 mc.moved_swap++; 6058 } 6059 break; 6060 default: 6061 break; 6062 } 6063 } 6064 pte_unmap_unlock(pte - 1, ptl); 6065 cond_resched(); 6066 6067 if (addr != end) { 6068 /* 6069 * We have consumed all precharges we got in can_attach(). 6070 * We try charge one by one, but don't do any additional 6071 * charges to mc.to if we have failed in charge once in attach() 6072 * phase. 6073 */ 6074 ret = mem_cgroup_do_precharge(1); 6075 if (!ret) 6076 goto retry; 6077 } 6078 6079 return ret; 6080 } 6081 6082 static const struct mm_walk_ops charge_walk_ops = { 6083 .pmd_entry = mem_cgroup_move_charge_pte_range, 6084 }; 6085 6086 static void mem_cgroup_move_charge(void) 6087 { 6088 lru_add_drain_all(); 6089 /* 6090 * Signal lock_page_memcg() to take the memcg's move_lock 6091 * while we're moving its pages to another memcg. Then wait 6092 * for already started RCU-only updates to finish. 6093 */ 6094 atomic_inc(&mc.from->moving_account); 6095 synchronize_rcu(); 6096 retry: 6097 if (unlikely(!mmap_read_trylock(mc.mm))) { 6098 /* 6099 * Someone who are holding the mmap_lock might be waiting in 6100 * waitq. So we cancel all extra charges, wake up all waiters, 6101 * and retry. Because we cancel precharges, we might not be able 6102 * to move enough charges, but moving charge is a best-effort 6103 * feature anyway, so it wouldn't be a big problem. 6104 */ 6105 __mem_cgroup_clear_mc(); 6106 cond_resched(); 6107 goto retry; 6108 } 6109 /* 6110 * When we have consumed all precharges and failed in doing 6111 * additional charge, the page walk just aborts. 6112 */ 6113 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6114 NULL); 6115 6116 mmap_read_unlock(mc.mm); 6117 atomic_dec(&mc.from->moving_account); 6118 } 6119 6120 static void mem_cgroup_move_task(void) 6121 { 6122 if (mc.to) { 6123 mem_cgroup_move_charge(); 6124 mem_cgroup_clear_mc(); 6125 } 6126 } 6127 #else /* !CONFIG_MMU */ 6128 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6129 { 6130 return 0; 6131 } 6132 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6133 { 6134 } 6135 static void mem_cgroup_move_task(void) 6136 { 6137 } 6138 #endif 6139 6140 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6141 { 6142 if (value == PAGE_COUNTER_MAX) 6143 seq_puts(m, "max\n"); 6144 else 6145 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6146 6147 return 0; 6148 } 6149 6150 static u64 memory_current_read(struct cgroup_subsys_state *css, 6151 struct cftype *cft) 6152 { 6153 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6154 6155 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6156 } 6157 6158 static int memory_min_show(struct seq_file *m, void *v) 6159 { 6160 return seq_puts_memcg_tunable(m, 6161 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6162 } 6163 6164 static ssize_t memory_min_write(struct kernfs_open_file *of, 6165 char *buf, size_t nbytes, loff_t off) 6166 { 6167 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6168 unsigned long min; 6169 int err; 6170 6171 buf = strstrip(buf); 6172 err = page_counter_memparse(buf, "max", &min); 6173 if (err) 6174 return err; 6175 6176 page_counter_set_min(&memcg->memory, min); 6177 6178 return nbytes; 6179 } 6180 6181 static int memory_low_show(struct seq_file *m, void *v) 6182 { 6183 return seq_puts_memcg_tunable(m, 6184 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6185 } 6186 6187 static ssize_t memory_low_write(struct kernfs_open_file *of, 6188 char *buf, size_t nbytes, loff_t off) 6189 { 6190 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6191 unsigned long low; 6192 int err; 6193 6194 buf = strstrip(buf); 6195 err = page_counter_memparse(buf, "max", &low); 6196 if (err) 6197 return err; 6198 6199 page_counter_set_low(&memcg->memory, low); 6200 6201 return nbytes; 6202 } 6203 6204 static int memory_high_show(struct seq_file *m, void *v) 6205 { 6206 return seq_puts_memcg_tunable(m, 6207 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6208 } 6209 6210 static ssize_t memory_high_write(struct kernfs_open_file *of, 6211 char *buf, size_t nbytes, loff_t off) 6212 { 6213 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6214 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6215 bool drained = false; 6216 unsigned long high; 6217 int err; 6218 6219 buf = strstrip(buf); 6220 err = page_counter_memparse(buf, "max", &high); 6221 if (err) 6222 return err; 6223 6224 page_counter_set_high(&memcg->memory, high); 6225 6226 for (;;) { 6227 unsigned long nr_pages = page_counter_read(&memcg->memory); 6228 unsigned long reclaimed; 6229 6230 if (nr_pages <= high) 6231 break; 6232 6233 if (signal_pending(current)) 6234 break; 6235 6236 if (!drained) { 6237 drain_all_stock(memcg); 6238 drained = true; 6239 continue; 6240 } 6241 6242 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6243 GFP_KERNEL, true); 6244 6245 if (!reclaimed && !nr_retries--) 6246 break; 6247 } 6248 6249 memcg_wb_domain_size_changed(memcg); 6250 return nbytes; 6251 } 6252 6253 static int memory_max_show(struct seq_file *m, void *v) 6254 { 6255 return seq_puts_memcg_tunable(m, 6256 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6257 } 6258 6259 static ssize_t memory_max_write(struct kernfs_open_file *of, 6260 char *buf, size_t nbytes, loff_t off) 6261 { 6262 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6263 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6264 bool drained = false; 6265 unsigned long max; 6266 int err; 6267 6268 buf = strstrip(buf); 6269 err = page_counter_memparse(buf, "max", &max); 6270 if (err) 6271 return err; 6272 6273 xchg(&memcg->memory.max, max); 6274 6275 for (;;) { 6276 unsigned long nr_pages = page_counter_read(&memcg->memory); 6277 6278 if (nr_pages <= max) 6279 break; 6280 6281 if (signal_pending(current)) 6282 break; 6283 6284 if (!drained) { 6285 drain_all_stock(memcg); 6286 drained = true; 6287 continue; 6288 } 6289 6290 if (nr_reclaims) { 6291 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6292 GFP_KERNEL, true)) 6293 nr_reclaims--; 6294 continue; 6295 } 6296 6297 memcg_memory_event(memcg, MEMCG_OOM); 6298 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6299 break; 6300 } 6301 6302 memcg_wb_domain_size_changed(memcg); 6303 return nbytes; 6304 } 6305 6306 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6307 { 6308 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6309 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6310 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6311 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6312 seq_printf(m, "oom_kill %lu\n", 6313 atomic_long_read(&events[MEMCG_OOM_KILL])); 6314 } 6315 6316 static int memory_events_show(struct seq_file *m, void *v) 6317 { 6318 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6319 6320 __memory_events_show(m, memcg->memory_events); 6321 return 0; 6322 } 6323 6324 static int memory_events_local_show(struct seq_file *m, void *v) 6325 { 6326 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6327 6328 __memory_events_show(m, memcg->memory_events_local); 6329 return 0; 6330 } 6331 6332 static int memory_stat_show(struct seq_file *m, void *v) 6333 { 6334 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6335 char *buf; 6336 6337 buf = memory_stat_format(memcg); 6338 if (!buf) 6339 return -ENOMEM; 6340 seq_puts(m, buf); 6341 kfree(buf); 6342 return 0; 6343 } 6344 6345 #ifdef CONFIG_NUMA 6346 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, 6347 int item) 6348 { 6349 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item); 6350 } 6351 6352 static int memory_numa_stat_show(struct seq_file *m, void *v) 6353 { 6354 int i; 6355 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6356 6357 mem_cgroup_flush_stats(); 6358 6359 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6360 int nid; 6361 6362 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6363 continue; 6364 6365 seq_printf(m, "%s", memory_stats[i].name); 6366 for_each_node_state(nid, N_MEMORY) { 6367 u64 size; 6368 struct lruvec *lruvec; 6369 6370 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6371 size = lruvec_page_state_output(lruvec, 6372 memory_stats[i].idx); 6373 seq_printf(m, " N%d=%llu", nid, size); 6374 } 6375 seq_putc(m, '\n'); 6376 } 6377 6378 return 0; 6379 } 6380 #endif 6381 6382 static int memory_oom_group_show(struct seq_file *m, void *v) 6383 { 6384 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6385 6386 seq_printf(m, "%d\n", memcg->oom_group); 6387 6388 return 0; 6389 } 6390 6391 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6392 char *buf, size_t nbytes, loff_t off) 6393 { 6394 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6395 int ret, oom_group; 6396 6397 buf = strstrip(buf); 6398 if (!buf) 6399 return -EINVAL; 6400 6401 ret = kstrtoint(buf, 0, &oom_group); 6402 if (ret) 6403 return ret; 6404 6405 if (oom_group != 0 && oom_group != 1) 6406 return -EINVAL; 6407 6408 memcg->oom_group = oom_group; 6409 6410 return nbytes; 6411 } 6412 6413 static struct cftype memory_files[] = { 6414 { 6415 .name = "current", 6416 .flags = CFTYPE_NOT_ON_ROOT, 6417 .read_u64 = memory_current_read, 6418 }, 6419 { 6420 .name = "min", 6421 .flags = CFTYPE_NOT_ON_ROOT, 6422 .seq_show = memory_min_show, 6423 .write = memory_min_write, 6424 }, 6425 { 6426 .name = "low", 6427 .flags = CFTYPE_NOT_ON_ROOT, 6428 .seq_show = memory_low_show, 6429 .write = memory_low_write, 6430 }, 6431 { 6432 .name = "high", 6433 .flags = CFTYPE_NOT_ON_ROOT, 6434 .seq_show = memory_high_show, 6435 .write = memory_high_write, 6436 }, 6437 { 6438 .name = "max", 6439 .flags = CFTYPE_NOT_ON_ROOT, 6440 .seq_show = memory_max_show, 6441 .write = memory_max_write, 6442 }, 6443 { 6444 .name = "events", 6445 .flags = CFTYPE_NOT_ON_ROOT, 6446 .file_offset = offsetof(struct mem_cgroup, events_file), 6447 .seq_show = memory_events_show, 6448 }, 6449 { 6450 .name = "events.local", 6451 .flags = CFTYPE_NOT_ON_ROOT, 6452 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6453 .seq_show = memory_events_local_show, 6454 }, 6455 { 6456 .name = "stat", 6457 .seq_show = memory_stat_show, 6458 }, 6459 #ifdef CONFIG_NUMA 6460 { 6461 .name = "numa_stat", 6462 .seq_show = memory_numa_stat_show, 6463 }, 6464 #endif 6465 { 6466 .name = "oom.group", 6467 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6468 .seq_show = memory_oom_group_show, 6469 .write = memory_oom_group_write, 6470 }, 6471 { } /* terminate */ 6472 }; 6473 6474 struct cgroup_subsys memory_cgrp_subsys = { 6475 .css_alloc = mem_cgroup_css_alloc, 6476 .css_online = mem_cgroup_css_online, 6477 .css_offline = mem_cgroup_css_offline, 6478 .css_released = mem_cgroup_css_released, 6479 .css_free = mem_cgroup_css_free, 6480 .css_reset = mem_cgroup_css_reset, 6481 .css_rstat_flush = mem_cgroup_css_rstat_flush, 6482 .can_attach = mem_cgroup_can_attach, 6483 .cancel_attach = mem_cgroup_cancel_attach, 6484 .post_attach = mem_cgroup_move_task, 6485 .dfl_cftypes = memory_files, 6486 .legacy_cftypes = mem_cgroup_legacy_files, 6487 .early_init = 0, 6488 }; 6489 6490 /* 6491 * This function calculates an individual cgroup's effective 6492 * protection which is derived from its own memory.min/low, its 6493 * parent's and siblings' settings, as well as the actual memory 6494 * distribution in the tree. 6495 * 6496 * The following rules apply to the effective protection values: 6497 * 6498 * 1. At the first level of reclaim, effective protection is equal to 6499 * the declared protection in memory.min and memory.low. 6500 * 6501 * 2. To enable safe delegation of the protection configuration, at 6502 * subsequent levels the effective protection is capped to the 6503 * parent's effective protection. 6504 * 6505 * 3. To make complex and dynamic subtrees easier to configure, the 6506 * user is allowed to overcommit the declared protection at a given 6507 * level. If that is the case, the parent's effective protection is 6508 * distributed to the children in proportion to how much protection 6509 * they have declared and how much of it they are utilizing. 6510 * 6511 * This makes distribution proportional, but also work-conserving: 6512 * if one cgroup claims much more protection than it uses memory, 6513 * the unused remainder is available to its siblings. 6514 * 6515 * 4. Conversely, when the declared protection is undercommitted at a 6516 * given level, the distribution of the larger parental protection 6517 * budget is NOT proportional. A cgroup's protection from a sibling 6518 * is capped to its own memory.min/low setting. 6519 * 6520 * 5. However, to allow protecting recursive subtrees from each other 6521 * without having to declare each individual cgroup's fixed share 6522 * of the ancestor's claim to protection, any unutilized - 6523 * "floating" - protection from up the tree is distributed in 6524 * proportion to each cgroup's *usage*. This makes the protection 6525 * neutral wrt sibling cgroups and lets them compete freely over 6526 * the shared parental protection budget, but it protects the 6527 * subtree as a whole from neighboring subtrees. 6528 * 6529 * Note that 4. and 5. are not in conflict: 4. is about protecting 6530 * against immediate siblings whereas 5. is about protecting against 6531 * neighboring subtrees. 6532 */ 6533 static unsigned long effective_protection(unsigned long usage, 6534 unsigned long parent_usage, 6535 unsigned long setting, 6536 unsigned long parent_effective, 6537 unsigned long siblings_protected) 6538 { 6539 unsigned long protected; 6540 unsigned long ep; 6541 6542 protected = min(usage, setting); 6543 /* 6544 * If all cgroups at this level combined claim and use more 6545 * protection then what the parent affords them, distribute 6546 * shares in proportion to utilization. 6547 * 6548 * We are using actual utilization rather than the statically 6549 * claimed protection in order to be work-conserving: claimed 6550 * but unused protection is available to siblings that would 6551 * otherwise get a smaller chunk than what they claimed. 6552 */ 6553 if (siblings_protected > parent_effective) 6554 return protected * parent_effective / siblings_protected; 6555 6556 /* 6557 * Ok, utilized protection of all children is within what the 6558 * parent affords them, so we know whatever this child claims 6559 * and utilizes is effectively protected. 6560 * 6561 * If there is unprotected usage beyond this value, reclaim 6562 * will apply pressure in proportion to that amount. 6563 * 6564 * If there is unutilized protection, the cgroup will be fully 6565 * shielded from reclaim, but we do return a smaller value for 6566 * protection than what the group could enjoy in theory. This 6567 * is okay. With the overcommit distribution above, effective 6568 * protection is always dependent on how memory is actually 6569 * consumed among the siblings anyway. 6570 */ 6571 ep = protected; 6572 6573 /* 6574 * If the children aren't claiming (all of) the protection 6575 * afforded to them by the parent, distribute the remainder in 6576 * proportion to the (unprotected) memory of each cgroup. That 6577 * way, cgroups that aren't explicitly prioritized wrt each 6578 * other compete freely over the allowance, but they are 6579 * collectively protected from neighboring trees. 6580 * 6581 * We're using unprotected memory for the weight so that if 6582 * some cgroups DO claim explicit protection, we don't protect 6583 * the same bytes twice. 6584 * 6585 * Check both usage and parent_usage against the respective 6586 * protected values. One should imply the other, but they 6587 * aren't read atomically - make sure the division is sane. 6588 */ 6589 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6590 return ep; 6591 if (parent_effective > siblings_protected && 6592 parent_usage > siblings_protected && 6593 usage > protected) { 6594 unsigned long unclaimed; 6595 6596 unclaimed = parent_effective - siblings_protected; 6597 unclaimed *= usage - protected; 6598 unclaimed /= parent_usage - siblings_protected; 6599 6600 ep += unclaimed; 6601 } 6602 6603 return ep; 6604 } 6605 6606 /** 6607 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range 6608 * @root: the top ancestor of the sub-tree being checked 6609 * @memcg: the memory cgroup to check 6610 * 6611 * WARNING: This function is not stateless! It can only be used as part 6612 * of a top-down tree iteration, not for isolated queries. 6613 */ 6614 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6615 struct mem_cgroup *memcg) 6616 { 6617 unsigned long usage, parent_usage; 6618 struct mem_cgroup *parent; 6619 6620 if (mem_cgroup_disabled()) 6621 return; 6622 6623 if (!root) 6624 root = root_mem_cgroup; 6625 6626 /* 6627 * Effective values of the reclaim targets are ignored so they 6628 * can be stale. Have a look at mem_cgroup_protection for more 6629 * details. 6630 * TODO: calculation should be more robust so that we do not need 6631 * that special casing. 6632 */ 6633 if (memcg == root) 6634 return; 6635 6636 usage = page_counter_read(&memcg->memory); 6637 if (!usage) 6638 return; 6639 6640 parent = parent_mem_cgroup(memcg); 6641 /* No parent means a non-hierarchical mode on v1 memcg */ 6642 if (!parent) 6643 return; 6644 6645 if (parent == root) { 6646 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6647 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6648 return; 6649 } 6650 6651 parent_usage = page_counter_read(&parent->memory); 6652 6653 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6654 READ_ONCE(memcg->memory.min), 6655 READ_ONCE(parent->memory.emin), 6656 atomic_long_read(&parent->memory.children_min_usage))); 6657 6658 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6659 READ_ONCE(memcg->memory.low), 6660 READ_ONCE(parent->memory.elow), 6661 atomic_long_read(&parent->memory.children_low_usage))); 6662 } 6663 6664 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, 6665 gfp_t gfp) 6666 { 6667 long nr_pages = folio_nr_pages(folio); 6668 int ret; 6669 6670 ret = try_charge(memcg, gfp, nr_pages); 6671 if (ret) 6672 goto out; 6673 6674 css_get(&memcg->css); 6675 commit_charge(folio, memcg); 6676 6677 local_irq_disable(); 6678 mem_cgroup_charge_statistics(memcg, nr_pages); 6679 memcg_check_events(memcg, folio_nid(folio)); 6680 local_irq_enable(); 6681 out: 6682 return ret; 6683 } 6684 6685 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) 6686 { 6687 struct mem_cgroup *memcg; 6688 int ret; 6689 6690 memcg = get_mem_cgroup_from_mm(mm); 6691 ret = charge_memcg(folio, memcg, gfp); 6692 css_put(&memcg->css); 6693 6694 return ret; 6695 } 6696 6697 /** 6698 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin 6699 * @page: page to charge 6700 * @mm: mm context of the victim 6701 * @gfp: reclaim mode 6702 * @entry: swap entry for which the page is allocated 6703 * 6704 * This function charges a page allocated for swapin. Please call this before 6705 * adding the page to the swapcache. 6706 * 6707 * Returns 0 on success. Otherwise, an error code is returned. 6708 */ 6709 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 6710 gfp_t gfp, swp_entry_t entry) 6711 { 6712 struct folio *folio = page_folio(page); 6713 struct mem_cgroup *memcg; 6714 unsigned short id; 6715 int ret; 6716 6717 if (mem_cgroup_disabled()) 6718 return 0; 6719 6720 id = lookup_swap_cgroup_id(entry); 6721 rcu_read_lock(); 6722 memcg = mem_cgroup_from_id(id); 6723 if (!memcg || !css_tryget_online(&memcg->css)) 6724 memcg = get_mem_cgroup_from_mm(mm); 6725 rcu_read_unlock(); 6726 6727 ret = charge_memcg(folio, memcg, gfp); 6728 6729 css_put(&memcg->css); 6730 return ret; 6731 } 6732 6733 /* 6734 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot 6735 * @entry: swap entry for which the page is charged 6736 * 6737 * Call this function after successfully adding the charged page to swapcache. 6738 * 6739 * Note: This function assumes the page for which swap slot is being uncharged 6740 * is order 0 page. 6741 */ 6742 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 6743 { 6744 /* 6745 * Cgroup1's unified memory+swap counter has been charged with the 6746 * new swapcache page, finish the transfer by uncharging the swap 6747 * slot. The swap slot would also get uncharged when it dies, but 6748 * it can stick around indefinitely and we'd count the page twice 6749 * the entire time. 6750 * 6751 * Cgroup2 has separate resource counters for memory and swap, 6752 * so this is a non-issue here. Memory and swap charge lifetimes 6753 * correspond 1:1 to page and swap slot lifetimes: we charge the 6754 * page to memory here, and uncharge swap when the slot is freed. 6755 */ 6756 if (!mem_cgroup_disabled() && do_memsw_account()) { 6757 /* 6758 * The swap entry might not get freed for a long time, 6759 * let's not wait for it. The page already received a 6760 * memory+swap charge, drop the swap entry duplicate. 6761 */ 6762 mem_cgroup_uncharge_swap(entry, 1); 6763 } 6764 } 6765 6766 struct uncharge_gather { 6767 struct mem_cgroup *memcg; 6768 unsigned long nr_memory; 6769 unsigned long pgpgout; 6770 unsigned long nr_kmem; 6771 int nid; 6772 }; 6773 6774 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6775 { 6776 memset(ug, 0, sizeof(*ug)); 6777 } 6778 6779 static void uncharge_batch(const struct uncharge_gather *ug) 6780 { 6781 unsigned long flags; 6782 6783 if (ug->nr_memory) { 6784 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); 6785 if (do_memsw_account()) 6786 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); 6787 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6788 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6789 memcg_oom_recover(ug->memcg); 6790 } 6791 6792 local_irq_save(flags); 6793 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6794 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); 6795 memcg_check_events(ug->memcg, ug->nid); 6796 local_irq_restore(flags); 6797 6798 /* drop reference from uncharge_folio */ 6799 css_put(&ug->memcg->css); 6800 } 6801 6802 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) 6803 { 6804 long nr_pages; 6805 struct mem_cgroup *memcg; 6806 struct obj_cgroup *objcg; 6807 bool use_objcg = folio_memcg_kmem(folio); 6808 6809 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 6810 6811 /* 6812 * Nobody should be changing or seriously looking at 6813 * folio memcg or objcg at this point, we have fully 6814 * exclusive access to the folio. 6815 */ 6816 if (use_objcg) { 6817 objcg = __folio_objcg(folio); 6818 /* 6819 * This get matches the put at the end of the function and 6820 * kmem pages do not hold memcg references anymore. 6821 */ 6822 memcg = get_mem_cgroup_from_objcg(objcg); 6823 } else { 6824 memcg = __folio_memcg(folio); 6825 } 6826 6827 if (!memcg) 6828 return; 6829 6830 if (ug->memcg != memcg) { 6831 if (ug->memcg) { 6832 uncharge_batch(ug); 6833 uncharge_gather_clear(ug); 6834 } 6835 ug->memcg = memcg; 6836 ug->nid = folio_nid(folio); 6837 6838 /* pairs with css_put in uncharge_batch */ 6839 css_get(&memcg->css); 6840 } 6841 6842 nr_pages = folio_nr_pages(folio); 6843 6844 if (use_objcg) { 6845 ug->nr_memory += nr_pages; 6846 ug->nr_kmem += nr_pages; 6847 6848 folio->memcg_data = 0; 6849 obj_cgroup_put(objcg); 6850 } else { 6851 /* LRU pages aren't accounted at the root level */ 6852 if (!mem_cgroup_is_root(memcg)) 6853 ug->nr_memory += nr_pages; 6854 ug->pgpgout++; 6855 6856 folio->memcg_data = 0; 6857 } 6858 6859 css_put(&memcg->css); 6860 } 6861 6862 void __mem_cgroup_uncharge(struct folio *folio) 6863 { 6864 struct uncharge_gather ug; 6865 6866 /* Don't touch folio->lru of any random page, pre-check: */ 6867 if (!folio_memcg(folio)) 6868 return; 6869 6870 uncharge_gather_clear(&ug); 6871 uncharge_folio(folio, &ug); 6872 uncharge_batch(&ug); 6873 } 6874 6875 /** 6876 * __mem_cgroup_uncharge_list - uncharge a list of page 6877 * @page_list: list of pages to uncharge 6878 * 6879 * Uncharge a list of pages previously charged with 6880 * __mem_cgroup_charge(). 6881 */ 6882 void __mem_cgroup_uncharge_list(struct list_head *page_list) 6883 { 6884 struct uncharge_gather ug; 6885 struct folio *folio; 6886 6887 uncharge_gather_clear(&ug); 6888 list_for_each_entry(folio, page_list, lru) 6889 uncharge_folio(folio, &ug); 6890 if (ug.memcg) 6891 uncharge_batch(&ug); 6892 } 6893 6894 /** 6895 * mem_cgroup_migrate - Charge a folio's replacement. 6896 * @old: Currently circulating folio. 6897 * @new: Replacement folio. 6898 * 6899 * Charge @new as a replacement folio for @old. @old will 6900 * be uncharged upon free. 6901 * 6902 * Both folios must be locked, @new->mapping must be set up. 6903 */ 6904 void mem_cgroup_migrate(struct folio *old, struct folio *new) 6905 { 6906 struct mem_cgroup *memcg; 6907 long nr_pages = folio_nr_pages(new); 6908 unsigned long flags; 6909 6910 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 6911 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 6912 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); 6913 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); 6914 6915 if (mem_cgroup_disabled()) 6916 return; 6917 6918 /* Page cache replacement: new folio already charged? */ 6919 if (folio_memcg(new)) 6920 return; 6921 6922 memcg = folio_memcg(old); 6923 VM_WARN_ON_ONCE_FOLIO(!memcg, old); 6924 if (!memcg) 6925 return; 6926 6927 /* Force-charge the new page. The old one will be freed soon */ 6928 if (!mem_cgroup_is_root(memcg)) { 6929 page_counter_charge(&memcg->memory, nr_pages); 6930 if (do_memsw_account()) 6931 page_counter_charge(&memcg->memsw, nr_pages); 6932 } 6933 6934 css_get(&memcg->css); 6935 commit_charge(new, memcg); 6936 6937 local_irq_save(flags); 6938 mem_cgroup_charge_statistics(memcg, nr_pages); 6939 memcg_check_events(memcg, folio_nid(new)); 6940 local_irq_restore(flags); 6941 } 6942 6943 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 6944 EXPORT_SYMBOL(memcg_sockets_enabled_key); 6945 6946 void mem_cgroup_sk_alloc(struct sock *sk) 6947 { 6948 struct mem_cgroup *memcg; 6949 6950 if (!mem_cgroup_sockets_enabled) 6951 return; 6952 6953 /* Do not associate the sock with unrelated interrupted task's memcg. */ 6954 if (in_interrupt()) 6955 return; 6956 6957 rcu_read_lock(); 6958 memcg = mem_cgroup_from_task(current); 6959 if (memcg == root_mem_cgroup) 6960 goto out; 6961 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 6962 goto out; 6963 if (css_tryget(&memcg->css)) 6964 sk->sk_memcg = memcg; 6965 out: 6966 rcu_read_unlock(); 6967 } 6968 6969 void mem_cgroup_sk_free(struct sock *sk) 6970 { 6971 if (sk->sk_memcg) 6972 css_put(&sk->sk_memcg->css); 6973 } 6974 6975 /** 6976 * mem_cgroup_charge_skmem - charge socket memory 6977 * @memcg: memcg to charge 6978 * @nr_pages: number of pages to charge 6979 * @gfp_mask: reclaim mode 6980 * 6981 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 6982 * @memcg's configured limit, %false if it doesn't. 6983 */ 6984 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 6985 gfp_t gfp_mask) 6986 { 6987 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 6988 struct page_counter *fail; 6989 6990 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 6991 memcg->tcpmem_pressure = 0; 6992 return true; 6993 } 6994 memcg->tcpmem_pressure = 1; 6995 if (gfp_mask & __GFP_NOFAIL) { 6996 page_counter_charge(&memcg->tcpmem, nr_pages); 6997 return true; 6998 } 6999 return false; 7000 } 7001 7002 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { 7003 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7004 return true; 7005 } 7006 7007 return false; 7008 } 7009 7010 /** 7011 * mem_cgroup_uncharge_skmem - uncharge socket memory 7012 * @memcg: memcg to uncharge 7013 * @nr_pages: number of pages to uncharge 7014 */ 7015 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7016 { 7017 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7018 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7019 return; 7020 } 7021 7022 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7023 7024 refill_stock(memcg, nr_pages); 7025 } 7026 7027 static int __init cgroup_memory(char *s) 7028 { 7029 char *token; 7030 7031 while ((token = strsep(&s, ",")) != NULL) { 7032 if (!*token) 7033 continue; 7034 if (!strcmp(token, "nosocket")) 7035 cgroup_memory_nosocket = true; 7036 if (!strcmp(token, "nokmem")) 7037 cgroup_memory_nokmem = true; 7038 } 7039 return 0; 7040 } 7041 __setup("cgroup.memory=", cgroup_memory); 7042 7043 /* 7044 * subsys_initcall() for memory controller. 7045 * 7046 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7047 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7048 * basically everything that doesn't depend on a specific mem_cgroup structure 7049 * should be initialized from here. 7050 */ 7051 static int __init mem_cgroup_init(void) 7052 { 7053 int cpu, node; 7054 7055 /* 7056 * Currently s32 type (can refer to struct batched_lruvec_stat) is 7057 * used for per-memcg-per-cpu caching of per-node statistics. In order 7058 * to work fine, we should make sure that the overfill threshold can't 7059 * exceed S32_MAX / PAGE_SIZE. 7060 */ 7061 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); 7062 7063 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7064 memcg_hotplug_cpu_dead); 7065 7066 for_each_possible_cpu(cpu) 7067 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7068 drain_local_stock); 7069 7070 for_each_node(node) { 7071 struct mem_cgroup_tree_per_node *rtpn; 7072 7073 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7074 node_online(node) ? node : NUMA_NO_NODE); 7075 7076 rtpn->rb_root = RB_ROOT; 7077 rtpn->rb_rightmost = NULL; 7078 spin_lock_init(&rtpn->lock); 7079 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7080 } 7081 7082 return 0; 7083 } 7084 subsys_initcall(mem_cgroup_init); 7085 7086 #ifdef CONFIG_MEMCG_SWAP 7087 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7088 { 7089 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7090 /* 7091 * The root cgroup cannot be destroyed, so it's refcount must 7092 * always be >= 1. 7093 */ 7094 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7095 VM_BUG_ON(1); 7096 break; 7097 } 7098 memcg = parent_mem_cgroup(memcg); 7099 if (!memcg) 7100 memcg = root_mem_cgroup; 7101 } 7102 return memcg; 7103 } 7104 7105 /** 7106 * mem_cgroup_swapout - transfer a memsw charge to swap 7107 * @page: page whose memsw charge to transfer 7108 * @entry: swap entry to move the charge to 7109 * 7110 * Transfer the memsw charge of @page to @entry. 7111 */ 7112 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7113 { 7114 struct mem_cgroup *memcg, *swap_memcg; 7115 unsigned int nr_entries; 7116 unsigned short oldid; 7117 7118 VM_BUG_ON_PAGE(PageLRU(page), page); 7119 VM_BUG_ON_PAGE(page_count(page), page); 7120 7121 if (mem_cgroup_disabled()) 7122 return; 7123 7124 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7125 return; 7126 7127 memcg = page_memcg(page); 7128 7129 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7130 if (!memcg) 7131 return; 7132 7133 /* 7134 * In case the memcg owning these pages has been offlined and doesn't 7135 * have an ID allocated to it anymore, charge the closest online 7136 * ancestor for the swap instead and transfer the memory+swap charge. 7137 */ 7138 swap_memcg = mem_cgroup_id_get_online(memcg); 7139 nr_entries = thp_nr_pages(page); 7140 /* Get references for the tail pages, too */ 7141 if (nr_entries > 1) 7142 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7143 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7144 nr_entries); 7145 VM_BUG_ON_PAGE(oldid, page); 7146 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7147 7148 page->memcg_data = 0; 7149 7150 if (!mem_cgroup_is_root(memcg)) 7151 page_counter_uncharge(&memcg->memory, nr_entries); 7152 7153 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7154 if (!mem_cgroup_is_root(swap_memcg)) 7155 page_counter_charge(&swap_memcg->memsw, nr_entries); 7156 page_counter_uncharge(&memcg->memsw, nr_entries); 7157 } 7158 7159 /* 7160 * Interrupts should be disabled here because the caller holds the 7161 * i_pages lock which is taken with interrupts-off. It is 7162 * important here to have the interrupts disabled because it is the 7163 * only synchronisation we have for updating the per-CPU variables. 7164 */ 7165 VM_BUG_ON(!irqs_disabled()); 7166 mem_cgroup_charge_statistics(memcg, -nr_entries); 7167 memcg_check_events(memcg, page_to_nid(page)); 7168 7169 css_put(&memcg->css); 7170 } 7171 7172 /** 7173 * __mem_cgroup_try_charge_swap - try charging swap space for a page 7174 * @page: page being added to swap 7175 * @entry: swap entry to charge 7176 * 7177 * Try to charge @page's memcg for the swap space at @entry. 7178 * 7179 * Returns 0 on success, -ENOMEM on failure. 7180 */ 7181 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7182 { 7183 unsigned int nr_pages = thp_nr_pages(page); 7184 struct page_counter *counter; 7185 struct mem_cgroup *memcg; 7186 unsigned short oldid; 7187 7188 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7189 return 0; 7190 7191 memcg = page_memcg(page); 7192 7193 VM_WARN_ON_ONCE_PAGE(!memcg, page); 7194 if (!memcg) 7195 return 0; 7196 7197 if (!entry.val) { 7198 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7199 return 0; 7200 } 7201 7202 memcg = mem_cgroup_id_get_online(memcg); 7203 7204 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7205 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7206 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7207 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7208 mem_cgroup_id_put(memcg); 7209 return -ENOMEM; 7210 } 7211 7212 /* Get references for the tail pages, too */ 7213 if (nr_pages > 1) 7214 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7215 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7216 VM_BUG_ON_PAGE(oldid, page); 7217 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7218 7219 return 0; 7220 } 7221 7222 /** 7223 * __mem_cgroup_uncharge_swap - uncharge swap space 7224 * @entry: swap entry to uncharge 7225 * @nr_pages: the amount of swap space to uncharge 7226 */ 7227 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7228 { 7229 struct mem_cgroup *memcg; 7230 unsigned short id; 7231 7232 id = swap_cgroup_record(entry, 0, nr_pages); 7233 rcu_read_lock(); 7234 memcg = mem_cgroup_from_id(id); 7235 if (memcg) { 7236 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7237 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7238 page_counter_uncharge(&memcg->swap, nr_pages); 7239 else 7240 page_counter_uncharge(&memcg->memsw, nr_pages); 7241 } 7242 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7243 mem_cgroup_id_put_many(memcg, nr_pages); 7244 } 7245 rcu_read_unlock(); 7246 } 7247 7248 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7249 { 7250 long nr_swap_pages = get_nr_swap_pages(); 7251 7252 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7253 return nr_swap_pages; 7254 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7255 nr_swap_pages = min_t(long, nr_swap_pages, 7256 READ_ONCE(memcg->swap.max) - 7257 page_counter_read(&memcg->swap)); 7258 return nr_swap_pages; 7259 } 7260 7261 bool mem_cgroup_swap_full(struct page *page) 7262 { 7263 struct mem_cgroup *memcg; 7264 7265 VM_BUG_ON_PAGE(!PageLocked(page), page); 7266 7267 if (vm_swap_full()) 7268 return true; 7269 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7270 return false; 7271 7272 memcg = page_memcg(page); 7273 if (!memcg) 7274 return false; 7275 7276 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7277 unsigned long usage = page_counter_read(&memcg->swap); 7278 7279 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7280 usage * 2 >= READ_ONCE(memcg->swap.max)) 7281 return true; 7282 } 7283 7284 return false; 7285 } 7286 7287 static int __init setup_swap_account(char *s) 7288 { 7289 if (!strcmp(s, "1")) 7290 cgroup_memory_noswap = false; 7291 else if (!strcmp(s, "0")) 7292 cgroup_memory_noswap = true; 7293 return 1; 7294 } 7295 __setup("swapaccount=", setup_swap_account); 7296 7297 static u64 swap_current_read(struct cgroup_subsys_state *css, 7298 struct cftype *cft) 7299 { 7300 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7301 7302 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7303 } 7304 7305 static int swap_high_show(struct seq_file *m, void *v) 7306 { 7307 return seq_puts_memcg_tunable(m, 7308 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7309 } 7310 7311 static ssize_t swap_high_write(struct kernfs_open_file *of, 7312 char *buf, size_t nbytes, loff_t off) 7313 { 7314 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7315 unsigned long high; 7316 int err; 7317 7318 buf = strstrip(buf); 7319 err = page_counter_memparse(buf, "max", &high); 7320 if (err) 7321 return err; 7322 7323 page_counter_set_high(&memcg->swap, high); 7324 7325 return nbytes; 7326 } 7327 7328 static int swap_max_show(struct seq_file *m, void *v) 7329 { 7330 return seq_puts_memcg_tunable(m, 7331 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7332 } 7333 7334 static ssize_t swap_max_write(struct kernfs_open_file *of, 7335 char *buf, size_t nbytes, loff_t off) 7336 { 7337 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7338 unsigned long max; 7339 int err; 7340 7341 buf = strstrip(buf); 7342 err = page_counter_memparse(buf, "max", &max); 7343 if (err) 7344 return err; 7345 7346 xchg(&memcg->swap.max, max); 7347 7348 return nbytes; 7349 } 7350 7351 static int swap_events_show(struct seq_file *m, void *v) 7352 { 7353 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7354 7355 seq_printf(m, "high %lu\n", 7356 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7357 seq_printf(m, "max %lu\n", 7358 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7359 seq_printf(m, "fail %lu\n", 7360 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7361 7362 return 0; 7363 } 7364 7365 static struct cftype swap_files[] = { 7366 { 7367 .name = "swap.current", 7368 .flags = CFTYPE_NOT_ON_ROOT, 7369 .read_u64 = swap_current_read, 7370 }, 7371 { 7372 .name = "swap.high", 7373 .flags = CFTYPE_NOT_ON_ROOT, 7374 .seq_show = swap_high_show, 7375 .write = swap_high_write, 7376 }, 7377 { 7378 .name = "swap.max", 7379 .flags = CFTYPE_NOT_ON_ROOT, 7380 .seq_show = swap_max_show, 7381 .write = swap_max_write, 7382 }, 7383 { 7384 .name = "swap.events", 7385 .flags = CFTYPE_NOT_ON_ROOT, 7386 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7387 .seq_show = swap_events_show, 7388 }, 7389 { } /* terminate */ 7390 }; 7391 7392 static struct cftype memsw_files[] = { 7393 { 7394 .name = "memsw.usage_in_bytes", 7395 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7396 .read_u64 = mem_cgroup_read_u64, 7397 }, 7398 { 7399 .name = "memsw.max_usage_in_bytes", 7400 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7401 .write = mem_cgroup_reset, 7402 .read_u64 = mem_cgroup_read_u64, 7403 }, 7404 { 7405 .name = "memsw.limit_in_bytes", 7406 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7407 .write = mem_cgroup_write, 7408 .read_u64 = mem_cgroup_read_u64, 7409 }, 7410 { 7411 .name = "memsw.failcnt", 7412 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7413 .write = mem_cgroup_reset, 7414 .read_u64 = mem_cgroup_read_u64, 7415 }, 7416 { }, /* terminate */ 7417 }; 7418 7419 /* 7420 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7421 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7422 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7423 * boot parameter. This may result in premature OOPS inside 7424 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7425 */ 7426 static int __init mem_cgroup_swap_init(void) 7427 { 7428 /* No memory control -> no swap control */ 7429 if (mem_cgroup_disabled()) 7430 cgroup_memory_noswap = true; 7431 7432 if (cgroup_memory_noswap) 7433 return 0; 7434 7435 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7436 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7437 7438 return 0; 7439 } 7440 core_initcall(mem_cgroup_swap_init); 7441 7442 #endif /* CONFIG_MEMCG_SWAP */ 7443