1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 */ 24 25 #include <linux/page_counter.h> 26 #include <linux/memcontrol.h> 27 #include <linux/cgroup.h> 28 #include <linux/pagewalk.h> 29 #include <linux/sched/mm.h> 30 #include <linux/shmem_fs.h> 31 #include <linux/hugetlb.h> 32 #include <linux/pagemap.h> 33 #include <linux/vm_event_item.h> 34 #include <linux/smp.h> 35 #include <linux/page-flags.h> 36 #include <linux/backing-dev.h> 37 #include <linux/bit_spinlock.h> 38 #include <linux/rcupdate.h> 39 #include <linux/limits.h> 40 #include <linux/export.h> 41 #include <linux/mutex.h> 42 #include <linux/rbtree.h> 43 #include <linux/slab.h> 44 #include <linux/swap.h> 45 #include <linux/swapops.h> 46 #include <linux/spinlock.h> 47 #include <linux/eventfd.h> 48 #include <linux/poll.h> 49 #include <linux/sort.h> 50 #include <linux/fs.h> 51 #include <linux/seq_file.h> 52 #include <linux/vmpressure.h> 53 #include <linux/mm_inline.h> 54 #include <linux/swap_cgroup.h> 55 #include <linux/cpu.h> 56 #include <linux/oom.h> 57 #include <linux/lockdep.h> 58 #include <linux/file.h> 59 #include <linux/tracehook.h> 60 #include <linux/psi.h> 61 #include <linux/seq_buf.h> 62 #include "internal.h" 63 #include <net/sock.h> 64 #include <net/ip.h> 65 #include "slab.h" 66 67 #include <linux/uaccess.h> 68 69 #include <trace/events/vmscan.h> 70 71 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 72 EXPORT_SYMBOL(memory_cgrp_subsys); 73 74 struct mem_cgroup *root_mem_cgroup __read_mostly; 75 76 /* Active memory cgroup to use from an interrupt context */ 77 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 78 79 /* Socket memory accounting disabled? */ 80 static bool cgroup_memory_nosocket; 81 82 /* Kernel memory accounting disabled? */ 83 static bool cgroup_memory_nokmem; 84 85 /* Whether the swap controller is active */ 86 #ifdef CONFIG_MEMCG_SWAP 87 bool cgroup_memory_noswap __read_mostly; 88 #else 89 #define cgroup_memory_noswap 1 90 #endif 91 92 #ifdef CONFIG_CGROUP_WRITEBACK 93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 94 #endif 95 96 /* Whether legacy memory+swap accounting is active */ 97 static bool do_memsw_account(void) 98 { 99 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 100 } 101 102 #define THRESHOLDS_EVENTS_TARGET 128 103 #define SOFTLIMIT_EVENTS_TARGET 1024 104 105 /* 106 * Cgroups above their limits are maintained in a RB-Tree, independent of 107 * their hierarchy representation 108 */ 109 110 struct mem_cgroup_tree_per_node { 111 struct rb_root rb_root; 112 struct rb_node *rb_rightmost; 113 spinlock_t lock; 114 }; 115 116 struct mem_cgroup_tree { 117 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 118 }; 119 120 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 121 122 /* for OOM */ 123 struct mem_cgroup_eventfd_list { 124 struct list_head list; 125 struct eventfd_ctx *eventfd; 126 }; 127 128 /* 129 * cgroup_event represents events which userspace want to receive. 130 */ 131 struct mem_cgroup_event { 132 /* 133 * memcg which the event belongs to. 134 */ 135 struct mem_cgroup *memcg; 136 /* 137 * eventfd to signal userspace about the event. 138 */ 139 struct eventfd_ctx *eventfd; 140 /* 141 * Each of these stored in a list by the cgroup. 142 */ 143 struct list_head list; 144 /* 145 * register_event() callback will be used to add new userspace 146 * waiter for changes related to this event. Use eventfd_signal() 147 * on eventfd to send notification to userspace. 148 */ 149 int (*register_event)(struct mem_cgroup *memcg, 150 struct eventfd_ctx *eventfd, const char *args); 151 /* 152 * unregister_event() callback will be called when userspace closes 153 * the eventfd or on cgroup removing. This callback must be set, 154 * if you want provide notification functionality. 155 */ 156 void (*unregister_event)(struct mem_cgroup *memcg, 157 struct eventfd_ctx *eventfd); 158 /* 159 * All fields below needed to unregister event when 160 * userspace closes eventfd. 161 */ 162 poll_table pt; 163 wait_queue_head_t *wqh; 164 wait_queue_entry_t wait; 165 struct work_struct remove; 166 }; 167 168 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 170 171 /* Stuffs for move charges at task migration. */ 172 /* 173 * Types of charges to be moved. 174 */ 175 #define MOVE_ANON 0x1U 176 #define MOVE_FILE 0x2U 177 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 178 179 /* "mc" and its members are protected by cgroup_mutex */ 180 static struct move_charge_struct { 181 spinlock_t lock; /* for from, to */ 182 struct mm_struct *mm; 183 struct mem_cgroup *from; 184 struct mem_cgroup *to; 185 unsigned long flags; 186 unsigned long precharge; 187 unsigned long moved_charge; 188 unsigned long moved_swap; 189 struct task_struct *moving_task; /* a task moving charges */ 190 wait_queue_head_t waitq; /* a waitq for other context */ 191 } mc = { 192 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 193 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 194 }; 195 196 /* 197 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 198 * limit reclaim to prevent infinite loops, if they ever occur. 199 */ 200 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 201 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 202 203 /* for encoding cft->private value on file */ 204 enum res_type { 205 _MEM, 206 _MEMSWAP, 207 _OOM_TYPE, 208 _KMEM, 209 _TCP, 210 }; 211 212 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 213 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 214 #define MEMFILE_ATTR(val) ((val) & 0xffff) 215 /* Used for OOM nofiier */ 216 #define OOM_CONTROL (0) 217 218 /* 219 * Iteration constructs for visiting all cgroups (under a tree). If 220 * loops are exited prematurely (break), mem_cgroup_iter_break() must 221 * be used for reference counting. 222 */ 223 #define for_each_mem_cgroup_tree(iter, root) \ 224 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 225 iter != NULL; \ 226 iter = mem_cgroup_iter(root, iter, NULL)) 227 228 #define for_each_mem_cgroup(iter) \ 229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 230 iter != NULL; \ 231 iter = mem_cgroup_iter(NULL, iter, NULL)) 232 233 static inline bool should_force_charge(void) 234 { 235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 236 (current->flags & PF_EXITING); 237 } 238 239 /* Some nice accessors for the vmpressure. */ 240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 241 { 242 if (!memcg) 243 memcg = root_mem_cgroup; 244 return &memcg->vmpressure; 245 } 246 247 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 248 { 249 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 250 } 251 252 #ifdef CONFIG_MEMCG_KMEM 253 extern spinlock_t css_set_lock; 254 255 static void obj_cgroup_release(struct percpu_ref *ref) 256 { 257 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 258 struct mem_cgroup *memcg; 259 unsigned int nr_bytes; 260 unsigned int nr_pages; 261 unsigned long flags; 262 263 /* 264 * At this point all allocated objects are freed, and 265 * objcg->nr_charged_bytes can't have an arbitrary byte value. 266 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 267 * 268 * The following sequence can lead to it: 269 * 1) CPU0: objcg == stock->cached_objcg 270 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 271 * PAGE_SIZE bytes are charged 272 * 3) CPU1: a process from another memcg is allocating something, 273 * the stock if flushed, 274 * objcg->nr_charged_bytes = PAGE_SIZE - 92 275 * 5) CPU0: we do release this object, 276 * 92 bytes are added to stock->nr_bytes 277 * 6) CPU0: stock is flushed, 278 * 92 bytes are added to objcg->nr_charged_bytes 279 * 280 * In the result, nr_charged_bytes == PAGE_SIZE. 281 * This page will be uncharged in obj_cgroup_release(). 282 */ 283 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 284 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 285 nr_pages = nr_bytes >> PAGE_SHIFT; 286 287 spin_lock_irqsave(&css_set_lock, flags); 288 memcg = obj_cgroup_memcg(objcg); 289 if (nr_pages) 290 __memcg_kmem_uncharge(memcg, nr_pages); 291 list_del(&objcg->list); 292 mem_cgroup_put(memcg); 293 spin_unlock_irqrestore(&css_set_lock, flags); 294 295 percpu_ref_exit(ref); 296 kfree_rcu(objcg, rcu); 297 } 298 299 static struct obj_cgroup *obj_cgroup_alloc(void) 300 { 301 struct obj_cgroup *objcg; 302 int ret; 303 304 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 305 if (!objcg) 306 return NULL; 307 308 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 309 GFP_KERNEL); 310 if (ret) { 311 kfree(objcg); 312 return NULL; 313 } 314 INIT_LIST_HEAD(&objcg->list); 315 return objcg; 316 } 317 318 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 319 struct mem_cgroup *parent) 320 { 321 struct obj_cgroup *objcg, *iter; 322 323 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 324 325 spin_lock_irq(&css_set_lock); 326 327 /* Move active objcg to the parent's list */ 328 xchg(&objcg->memcg, parent); 329 css_get(&parent->css); 330 list_add(&objcg->list, &parent->objcg_list); 331 332 /* Move already reparented objcgs to the parent's list */ 333 list_for_each_entry(iter, &memcg->objcg_list, list) { 334 css_get(&parent->css); 335 xchg(&iter->memcg, parent); 336 css_put(&memcg->css); 337 } 338 list_splice(&memcg->objcg_list, &parent->objcg_list); 339 340 spin_unlock_irq(&css_set_lock); 341 342 percpu_ref_kill(&objcg->refcnt); 343 } 344 345 /* 346 * This will be used as a shrinker list's index. 347 * The main reason for not using cgroup id for this: 348 * this works better in sparse environments, where we have a lot of memcgs, 349 * but only a few kmem-limited. Or also, if we have, for instance, 200 350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 351 * 200 entry array for that. 352 * 353 * The current size of the caches array is stored in memcg_nr_cache_ids. It 354 * will double each time we have to increase it. 355 */ 356 static DEFINE_IDA(memcg_cache_ida); 357 int memcg_nr_cache_ids; 358 359 /* Protects memcg_nr_cache_ids */ 360 static DECLARE_RWSEM(memcg_cache_ids_sem); 361 362 void memcg_get_cache_ids(void) 363 { 364 down_read(&memcg_cache_ids_sem); 365 } 366 367 void memcg_put_cache_ids(void) 368 { 369 up_read(&memcg_cache_ids_sem); 370 } 371 372 /* 373 * MIN_SIZE is different than 1, because we would like to avoid going through 374 * the alloc/free process all the time. In a small machine, 4 kmem-limited 375 * cgroups is a reasonable guess. In the future, it could be a parameter or 376 * tunable, but that is strictly not necessary. 377 * 378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 379 * this constant directly from cgroup, but it is understandable that this is 380 * better kept as an internal representation in cgroup.c. In any case, the 381 * cgrp_id space is not getting any smaller, and we don't have to necessarily 382 * increase ours as well if it increases. 383 */ 384 #define MEMCG_CACHES_MIN_SIZE 4 385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 386 387 /* 388 * A lot of the calls to the cache allocation functions are expected to be 389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 390 * conditional to this static branch, we'll have to allow modules that does 391 * kmem_cache_alloc and the such to see this symbol as well 392 */ 393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 394 EXPORT_SYMBOL(memcg_kmem_enabled_key); 395 #endif 396 397 static int memcg_shrinker_map_size; 398 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 399 400 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 401 { 402 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 403 } 404 405 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 406 int size, int old_size) 407 { 408 struct memcg_shrinker_map *new, *old; 409 int nid; 410 411 lockdep_assert_held(&memcg_shrinker_map_mutex); 412 413 for_each_node(nid) { 414 old = rcu_dereference_protected( 415 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 416 /* Not yet online memcg */ 417 if (!old) 418 return 0; 419 420 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 421 if (!new) 422 return -ENOMEM; 423 424 /* Set all old bits, clear all new bits */ 425 memset(new->map, (int)0xff, old_size); 426 memset((void *)new->map + old_size, 0, size - old_size); 427 428 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 429 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 430 } 431 432 return 0; 433 } 434 435 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 436 { 437 struct mem_cgroup_per_node *pn; 438 struct memcg_shrinker_map *map; 439 int nid; 440 441 if (mem_cgroup_is_root(memcg)) 442 return; 443 444 for_each_node(nid) { 445 pn = mem_cgroup_nodeinfo(memcg, nid); 446 map = rcu_dereference_protected(pn->shrinker_map, true); 447 if (map) 448 kvfree(map); 449 rcu_assign_pointer(pn->shrinker_map, NULL); 450 } 451 } 452 453 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 454 { 455 struct memcg_shrinker_map *map; 456 int nid, size, ret = 0; 457 458 if (mem_cgroup_is_root(memcg)) 459 return 0; 460 461 mutex_lock(&memcg_shrinker_map_mutex); 462 size = memcg_shrinker_map_size; 463 for_each_node(nid) { 464 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 465 if (!map) { 466 memcg_free_shrinker_maps(memcg); 467 ret = -ENOMEM; 468 break; 469 } 470 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 471 } 472 mutex_unlock(&memcg_shrinker_map_mutex); 473 474 return ret; 475 } 476 477 int memcg_expand_shrinker_maps(int new_id) 478 { 479 int size, old_size, ret = 0; 480 struct mem_cgroup *memcg; 481 482 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 483 old_size = memcg_shrinker_map_size; 484 if (size <= old_size) 485 return 0; 486 487 mutex_lock(&memcg_shrinker_map_mutex); 488 if (!root_mem_cgroup) 489 goto unlock; 490 491 for_each_mem_cgroup(memcg) { 492 if (mem_cgroup_is_root(memcg)) 493 continue; 494 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 495 if (ret) { 496 mem_cgroup_iter_break(NULL, memcg); 497 goto unlock; 498 } 499 } 500 unlock: 501 if (!ret) 502 memcg_shrinker_map_size = size; 503 mutex_unlock(&memcg_shrinker_map_mutex); 504 return ret; 505 } 506 507 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 508 { 509 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 510 struct memcg_shrinker_map *map; 511 512 rcu_read_lock(); 513 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 514 /* Pairs with smp mb in shrink_slab() */ 515 smp_mb__before_atomic(); 516 set_bit(shrinker_id, map->map); 517 rcu_read_unlock(); 518 } 519 } 520 521 /** 522 * mem_cgroup_css_from_page - css of the memcg associated with a page 523 * @page: page of interest 524 * 525 * If memcg is bound to the default hierarchy, css of the memcg associated 526 * with @page is returned. The returned css remains associated with @page 527 * until it is released. 528 * 529 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 530 * is returned. 531 */ 532 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 533 { 534 struct mem_cgroup *memcg; 535 536 memcg = page->mem_cgroup; 537 538 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 539 memcg = root_mem_cgroup; 540 541 return &memcg->css; 542 } 543 544 /** 545 * page_cgroup_ino - return inode number of the memcg a page is charged to 546 * @page: the page 547 * 548 * Look up the closest online ancestor of the memory cgroup @page is charged to 549 * and return its inode number or 0 if @page is not charged to any cgroup. It 550 * is safe to call this function without holding a reference to @page. 551 * 552 * Note, this function is inherently racy, because there is nothing to prevent 553 * the cgroup inode from getting torn down and potentially reallocated a moment 554 * after page_cgroup_ino() returns, so it only should be used by callers that 555 * do not care (such as procfs interfaces). 556 */ 557 ino_t page_cgroup_ino(struct page *page) 558 { 559 struct mem_cgroup *memcg; 560 unsigned long ino = 0; 561 562 rcu_read_lock(); 563 memcg = page->mem_cgroup; 564 565 /* 566 * The lowest bit set means that memcg isn't a valid 567 * memcg pointer, but a obj_cgroups pointer. 568 * In this case the page is shared and doesn't belong 569 * to any specific memory cgroup. 570 */ 571 if ((unsigned long) memcg & 0x1UL) 572 memcg = NULL; 573 574 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 575 memcg = parent_mem_cgroup(memcg); 576 if (memcg) 577 ino = cgroup_ino(memcg->css.cgroup); 578 rcu_read_unlock(); 579 return ino; 580 } 581 582 static struct mem_cgroup_per_node * 583 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 584 { 585 int nid = page_to_nid(page); 586 587 return memcg->nodeinfo[nid]; 588 } 589 590 static struct mem_cgroup_tree_per_node * 591 soft_limit_tree_node(int nid) 592 { 593 return soft_limit_tree.rb_tree_per_node[nid]; 594 } 595 596 static struct mem_cgroup_tree_per_node * 597 soft_limit_tree_from_page(struct page *page) 598 { 599 int nid = page_to_nid(page); 600 601 return soft_limit_tree.rb_tree_per_node[nid]; 602 } 603 604 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 605 struct mem_cgroup_tree_per_node *mctz, 606 unsigned long new_usage_in_excess) 607 { 608 struct rb_node **p = &mctz->rb_root.rb_node; 609 struct rb_node *parent = NULL; 610 struct mem_cgroup_per_node *mz_node; 611 bool rightmost = true; 612 613 if (mz->on_tree) 614 return; 615 616 mz->usage_in_excess = new_usage_in_excess; 617 if (!mz->usage_in_excess) 618 return; 619 while (*p) { 620 parent = *p; 621 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 622 tree_node); 623 if (mz->usage_in_excess < mz_node->usage_in_excess) { 624 p = &(*p)->rb_left; 625 rightmost = false; 626 } 627 628 /* 629 * We can't avoid mem cgroups that are over their soft 630 * limit by the same amount 631 */ 632 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 633 p = &(*p)->rb_right; 634 } 635 636 if (rightmost) 637 mctz->rb_rightmost = &mz->tree_node; 638 639 rb_link_node(&mz->tree_node, parent, p); 640 rb_insert_color(&mz->tree_node, &mctz->rb_root); 641 mz->on_tree = true; 642 } 643 644 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 645 struct mem_cgroup_tree_per_node *mctz) 646 { 647 if (!mz->on_tree) 648 return; 649 650 if (&mz->tree_node == mctz->rb_rightmost) 651 mctz->rb_rightmost = rb_prev(&mz->tree_node); 652 653 rb_erase(&mz->tree_node, &mctz->rb_root); 654 mz->on_tree = false; 655 } 656 657 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 658 struct mem_cgroup_tree_per_node *mctz) 659 { 660 unsigned long flags; 661 662 spin_lock_irqsave(&mctz->lock, flags); 663 __mem_cgroup_remove_exceeded(mz, mctz); 664 spin_unlock_irqrestore(&mctz->lock, flags); 665 } 666 667 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 668 { 669 unsigned long nr_pages = page_counter_read(&memcg->memory); 670 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 671 unsigned long excess = 0; 672 673 if (nr_pages > soft_limit) 674 excess = nr_pages - soft_limit; 675 676 return excess; 677 } 678 679 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 680 { 681 unsigned long excess; 682 struct mem_cgroup_per_node *mz; 683 struct mem_cgroup_tree_per_node *mctz; 684 685 mctz = soft_limit_tree_from_page(page); 686 if (!mctz) 687 return; 688 /* 689 * Necessary to update all ancestors when hierarchy is used. 690 * because their event counter is not touched. 691 */ 692 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 693 mz = mem_cgroup_page_nodeinfo(memcg, page); 694 excess = soft_limit_excess(memcg); 695 /* 696 * We have to update the tree if mz is on RB-tree or 697 * mem is over its softlimit. 698 */ 699 if (excess || mz->on_tree) { 700 unsigned long flags; 701 702 spin_lock_irqsave(&mctz->lock, flags); 703 /* if on-tree, remove it */ 704 if (mz->on_tree) 705 __mem_cgroup_remove_exceeded(mz, mctz); 706 /* 707 * Insert again. mz->usage_in_excess will be updated. 708 * If excess is 0, no tree ops. 709 */ 710 __mem_cgroup_insert_exceeded(mz, mctz, excess); 711 spin_unlock_irqrestore(&mctz->lock, flags); 712 } 713 } 714 } 715 716 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 717 { 718 struct mem_cgroup_tree_per_node *mctz; 719 struct mem_cgroup_per_node *mz; 720 int nid; 721 722 for_each_node(nid) { 723 mz = mem_cgroup_nodeinfo(memcg, nid); 724 mctz = soft_limit_tree_node(nid); 725 if (mctz) 726 mem_cgroup_remove_exceeded(mz, mctz); 727 } 728 } 729 730 static struct mem_cgroup_per_node * 731 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 732 { 733 struct mem_cgroup_per_node *mz; 734 735 retry: 736 mz = NULL; 737 if (!mctz->rb_rightmost) 738 goto done; /* Nothing to reclaim from */ 739 740 mz = rb_entry(mctz->rb_rightmost, 741 struct mem_cgroup_per_node, tree_node); 742 /* 743 * Remove the node now but someone else can add it back, 744 * we will to add it back at the end of reclaim to its correct 745 * position in the tree. 746 */ 747 __mem_cgroup_remove_exceeded(mz, mctz); 748 if (!soft_limit_excess(mz->memcg) || 749 !css_tryget(&mz->memcg->css)) 750 goto retry; 751 done: 752 return mz; 753 } 754 755 static struct mem_cgroup_per_node * 756 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 757 { 758 struct mem_cgroup_per_node *mz; 759 760 spin_lock_irq(&mctz->lock); 761 mz = __mem_cgroup_largest_soft_limit_node(mctz); 762 spin_unlock_irq(&mctz->lock); 763 return mz; 764 } 765 766 /** 767 * __mod_memcg_state - update cgroup memory statistics 768 * @memcg: the memory cgroup 769 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 770 * @val: delta to add to the counter, can be negative 771 */ 772 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 773 { 774 long x, threshold = MEMCG_CHARGE_BATCH; 775 776 if (mem_cgroup_disabled()) 777 return; 778 779 if (memcg_stat_item_in_bytes(idx)) 780 threshold <<= PAGE_SHIFT; 781 782 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 783 if (unlikely(abs(x) > threshold)) { 784 struct mem_cgroup *mi; 785 786 /* 787 * Batch local counters to keep them in sync with 788 * the hierarchical ones. 789 */ 790 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 791 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 792 atomic_long_add(x, &mi->vmstats[idx]); 793 x = 0; 794 } 795 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 796 } 797 798 static struct mem_cgroup_per_node * 799 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 800 { 801 struct mem_cgroup *parent; 802 803 parent = parent_mem_cgroup(pn->memcg); 804 if (!parent) 805 return NULL; 806 return mem_cgroup_nodeinfo(parent, nid); 807 } 808 809 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 810 int val) 811 { 812 struct mem_cgroup_per_node *pn; 813 struct mem_cgroup *memcg; 814 long x, threshold = MEMCG_CHARGE_BATCH; 815 816 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 817 memcg = pn->memcg; 818 819 /* Update memcg */ 820 __mod_memcg_state(memcg, idx, val); 821 822 /* Update lruvec */ 823 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 824 825 if (vmstat_item_in_bytes(idx)) 826 threshold <<= PAGE_SHIFT; 827 828 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 829 if (unlikely(abs(x) > threshold)) { 830 pg_data_t *pgdat = lruvec_pgdat(lruvec); 831 struct mem_cgroup_per_node *pi; 832 833 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 834 atomic_long_add(x, &pi->lruvec_stat[idx]); 835 x = 0; 836 } 837 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 838 } 839 840 /** 841 * __mod_lruvec_state - update lruvec memory statistics 842 * @lruvec: the lruvec 843 * @idx: the stat item 844 * @val: delta to add to the counter, can be negative 845 * 846 * The lruvec is the intersection of the NUMA node and a cgroup. This 847 * function updates the all three counters that are affected by a 848 * change of state at this level: per-node, per-cgroup, per-lruvec. 849 */ 850 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 851 int val) 852 { 853 /* Update node */ 854 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 855 856 /* Update memcg and lruvec */ 857 if (!mem_cgroup_disabled()) 858 __mod_memcg_lruvec_state(lruvec, idx, val); 859 } 860 861 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) 862 { 863 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 864 struct mem_cgroup *memcg; 865 struct lruvec *lruvec; 866 867 rcu_read_lock(); 868 memcg = mem_cgroup_from_obj(p); 869 870 /* Untracked pages have no memcg, no lruvec. Update only the node */ 871 if (!memcg || memcg == root_mem_cgroup) { 872 __mod_node_page_state(pgdat, idx, val); 873 } else { 874 lruvec = mem_cgroup_lruvec(memcg, pgdat); 875 __mod_lruvec_state(lruvec, idx, val); 876 } 877 rcu_read_unlock(); 878 } 879 880 void mod_memcg_obj_state(void *p, int idx, int val) 881 { 882 struct mem_cgroup *memcg; 883 884 rcu_read_lock(); 885 memcg = mem_cgroup_from_obj(p); 886 if (memcg) 887 mod_memcg_state(memcg, idx, val); 888 rcu_read_unlock(); 889 } 890 891 /** 892 * __count_memcg_events - account VM events in a cgroup 893 * @memcg: the memory cgroup 894 * @idx: the event item 895 * @count: the number of events that occured 896 */ 897 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 898 unsigned long count) 899 { 900 unsigned long x; 901 902 if (mem_cgroup_disabled()) 903 return; 904 905 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 906 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 907 struct mem_cgroup *mi; 908 909 /* 910 * Batch local counters to keep them in sync with 911 * the hierarchical ones. 912 */ 913 __this_cpu_add(memcg->vmstats_local->events[idx], x); 914 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 915 atomic_long_add(x, &mi->vmevents[idx]); 916 x = 0; 917 } 918 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 919 } 920 921 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 922 { 923 return atomic_long_read(&memcg->vmevents[event]); 924 } 925 926 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 927 { 928 long x = 0; 929 int cpu; 930 931 for_each_possible_cpu(cpu) 932 x += per_cpu(memcg->vmstats_local->events[event], cpu); 933 return x; 934 } 935 936 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 937 struct page *page, 938 int nr_pages) 939 { 940 /* pagein of a big page is an event. So, ignore page size */ 941 if (nr_pages > 0) 942 __count_memcg_events(memcg, PGPGIN, 1); 943 else { 944 __count_memcg_events(memcg, PGPGOUT, 1); 945 nr_pages = -nr_pages; /* for event */ 946 } 947 948 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 949 } 950 951 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 952 enum mem_cgroup_events_target target) 953 { 954 unsigned long val, next; 955 956 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 957 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 958 /* from time_after() in jiffies.h */ 959 if ((long)(next - val) < 0) { 960 switch (target) { 961 case MEM_CGROUP_TARGET_THRESH: 962 next = val + THRESHOLDS_EVENTS_TARGET; 963 break; 964 case MEM_CGROUP_TARGET_SOFTLIMIT: 965 next = val + SOFTLIMIT_EVENTS_TARGET; 966 break; 967 default: 968 break; 969 } 970 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 971 return true; 972 } 973 return false; 974 } 975 976 /* 977 * Check events in order. 978 * 979 */ 980 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 981 { 982 /* threshold event is triggered in finer grain than soft limit */ 983 if (unlikely(mem_cgroup_event_ratelimit(memcg, 984 MEM_CGROUP_TARGET_THRESH))) { 985 bool do_softlimit; 986 987 do_softlimit = mem_cgroup_event_ratelimit(memcg, 988 MEM_CGROUP_TARGET_SOFTLIMIT); 989 mem_cgroup_threshold(memcg); 990 if (unlikely(do_softlimit)) 991 mem_cgroup_update_tree(memcg, page); 992 } 993 } 994 995 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 996 { 997 /* 998 * mm_update_next_owner() may clear mm->owner to NULL 999 * if it races with swapoff, page migration, etc. 1000 * So this can be called with p == NULL. 1001 */ 1002 if (unlikely(!p)) 1003 return NULL; 1004 1005 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1006 } 1007 EXPORT_SYMBOL(mem_cgroup_from_task); 1008 1009 /** 1010 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1011 * @mm: mm from which memcg should be extracted. It can be NULL. 1012 * 1013 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 1014 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 1015 * returned. 1016 */ 1017 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1018 { 1019 struct mem_cgroup *memcg; 1020 1021 if (mem_cgroup_disabled()) 1022 return NULL; 1023 1024 rcu_read_lock(); 1025 do { 1026 /* 1027 * Page cache insertions can happen withou an 1028 * actual mm context, e.g. during disk probing 1029 * on boot, loopback IO, acct() writes etc. 1030 */ 1031 if (unlikely(!mm)) 1032 memcg = root_mem_cgroup; 1033 else { 1034 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1035 if (unlikely(!memcg)) 1036 memcg = root_mem_cgroup; 1037 } 1038 } while (!css_tryget(&memcg->css)); 1039 rcu_read_unlock(); 1040 return memcg; 1041 } 1042 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1043 1044 /** 1045 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 1046 * @page: page from which memcg should be extracted. 1047 * 1048 * Obtain a reference on page->memcg and returns it if successful. Otherwise 1049 * root_mem_cgroup is returned. 1050 */ 1051 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 1052 { 1053 struct mem_cgroup *memcg = page->mem_cgroup; 1054 1055 if (mem_cgroup_disabled()) 1056 return NULL; 1057 1058 rcu_read_lock(); 1059 /* Page should not get uncharged and freed memcg under us. */ 1060 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 1061 memcg = root_mem_cgroup; 1062 rcu_read_unlock(); 1063 return memcg; 1064 } 1065 EXPORT_SYMBOL(get_mem_cgroup_from_page); 1066 1067 static __always_inline struct mem_cgroup *active_memcg(void) 1068 { 1069 if (in_interrupt()) 1070 return this_cpu_read(int_active_memcg); 1071 else 1072 return current->active_memcg; 1073 } 1074 1075 static __always_inline struct mem_cgroup *get_active_memcg(void) 1076 { 1077 struct mem_cgroup *memcg; 1078 1079 rcu_read_lock(); 1080 memcg = active_memcg(); 1081 if (memcg) { 1082 /* current->active_memcg must hold a ref. */ 1083 if (WARN_ON_ONCE(!css_tryget(&memcg->css))) 1084 memcg = root_mem_cgroup; 1085 else 1086 memcg = current->active_memcg; 1087 } 1088 rcu_read_unlock(); 1089 1090 return memcg; 1091 } 1092 1093 static __always_inline bool memcg_kmem_bypass(void) 1094 { 1095 /* Allow remote memcg charging from any context. */ 1096 if (unlikely(active_memcg())) 1097 return false; 1098 1099 /* Memcg to charge can't be determined. */ 1100 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 1101 return true; 1102 1103 return false; 1104 } 1105 1106 /** 1107 * If active memcg is set, do not fallback to current->mm->memcg. 1108 */ 1109 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1110 { 1111 if (memcg_kmem_bypass()) 1112 return NULL; 1113 1114 if (unlikely(active_memcg())) 1115 return get_active_memcg(); 1116 1117 return get_mem_cgroup_from_mm(current->mm); 1118 } 1119 1120 /** 1121 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1122 * @root: hierarchy root 1123 * @prev: previously returned memcg, NULL on first invocation 1124 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1125 * 1126 * Returns references to children of the hierarchy below @root, or 1127 * @root itself, or %NULL after a full round-trip. 1128 * 1129 * Caller must pass the return value in @prev on subsequent 1130 * invocations for reference counting, or use mem_cgroup_iter_break() 1131 * to cancel a hierarchy walk before the round-trip is complete. 1132 * 1133 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1134 * in the hierarchy among all concurrent reclaimers operating on the 1135 * same node. 1136 */ 1137 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1138 struct mem_cgroup *prev, 1139 struct mem_cgroup_reclaim_cookie *reclaim) 1140 { 1141 struct mem_cgroup_reclaim_iter *iter; 1142 struct cgroup_subsys_state *css = NULL; 1143 struct mem_cgroup *memcg = NULL; 1144 struct mem_cgroup *pos = NULL; 1145 1146 if (mem_cgroup_disabled()) 1147 return NULL; 1148 1149 if (!root) 1150 root = root_mem_cgroup; 1151 1152 if (prev && !reclaim) 1153 pos = prev; 1154 1155 if (!root->use_hierarchy && root != root_mem_cgroup) { 1156 if (prev) 1157 goto out; 1158 return root; 1159 } 1160 1161 rcu_read_lock(); 1162 1163 if (reclaim) { 1164 struct mem_cgroup_per_node *mz; 1165 1166 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1167 iter = &mz->iter; 1168 1169 if (prev && reclaim->generation != iter->generation) 1170 goto out_unlock; 1171 1172 while (1) { 1173 pos = READ_ONCE(iter->position); 1174 if (!pos || css_tryget(&pos->css)) 1175 break; 1176 /* 1177 * css reference reached zero, so iter->position will 1178 * be cleared by ->css_released. However, we should not 1179 * rely on this happening soon, because ->css_released 1180 * is called from a work queue, and by busy-waiting we 1181 * might block it. So we clear iter->position right 1182 * away. 1183 */ 1184 (void)cmpxchg(&iter->position, pos, NULL); 1185 } 1186 } 1187 1188 if (pos) 1189 css = &pos->css; 1190 1191 for (;;) { 1192 css = css_next_descendant_pre(css, &root->css); 1193 if (!css) { 1194 /* 1195 * Reclaimers share the hierarchy walk, and a 1196 * new one might jump in right at the end of 1197 * the hierarchy - make sure they see at least 1198 * one group and restart from the beginning. 1199 */ 1200 if (!prev) 1201 continue; 1202 break; 1203 } 1204 1205 /* 1206 * Verify the css and acquire a reference. The root 1207 * is provided by the caller, so we know it's alive 1208 * and kicking, and don't take an extra reference. 1209 */ 1210 memcg = mem_cgroup_from_css(css); 1211 1212 if (css == &root->css) 1213 break; 1214 1215 if (css_tryget(css)) 1216 break; 1217 1218 memcg = NULL; 1219 } 1220 1221 if (reclaim) { 1222 /* 1223 * The position could have already been updated by a competing 1224 * thread, so check that the value hasn't changed since we read 1225 * it to avoid reclaiming from the same cgroup twice. 1226 */ 1227 (void)cmpxchg(&iter->position, pos, memcg); 1228 1229 if (pos) 1230 css_put(&pos->css); 1231 1232 if (!memcg) 1233 iter->generation++; 1234 else if (!prev) 1235 reclaim->generation = iter->generation; 1236 } 1237 1238 out_unlock: 1239 rcu_read_unlock(); 1240 out: 1241 if (prev && prev != root) 1242 css_put(&prev->css); 1243 1244 return memcg; 1245 } 1246 1247 /** 1248 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1249 * @root: hierarchy root 1250 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1251 */ 1252 void mem_cgroup_iter_break(struct mem_cgroup *root, 1253 struct mem_cgroup *prev) 1254 { 1255 if (!root) 1256 root = root_mem_cgroup; 1257 if (prev && prev != root) 1258 css_put(&prev->css); 1259 } 1260 1261 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1262 struct mem_cgroup *dead_memcg) 1263 { 1264 struct mem_cgroup_reclaim_iter *iter; 1265 struct mem_cgroup_per_node *mz; 1266 int nid; 1267 1268 for_each_node(nid) { 1269 mz = mem_cgroup_nodeinfo(from, nid); 1270 iter = &mz->iter; 1271 cmpxchg(&iter->position, dead_memcg, NULL); 1272 } 1273 } 1274 1275 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1276 { 1277 struct mem_cgroup *memcg = dead_memcg; 1278 struct mem_cgroup *last; 1279 1280 do { 1281 __invalidate_reclaim_iterators(memcg, dead_memcg); 1282 last = memcg; 1283 } while ((memcg = parent_mem_cgroup(memcg))); 1284 1285 /* 1286 * When cgruop1 non-hierarchy mode is used, 1287 * parent_mem_cgroup() does not walk all the way up to the 1288 * cgroup root (root_mem_cgroup). So we have to handle 1289 * dead_memcg from cgroup root separately. 1290 */ 1291 if (last != root_mem_cgroup) 1292 __invalidate_reclaim_iterators(root_mem_cgroup, 1293 dead_memcg); 1294 } 1295 1296 /** 1297 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1298 * @memcg: hierarchy root 1299 * @fn: function to call for each task 1300 * @arg: argument passed to @fn 1301 * 1302 * This function iterates over tasks attached to @memcg or to any of its 1303 * descendants and calls @fn for each task. If @fn returns a non-zero 1304 * value, the function breaks the iteration loop and returns the value. 1305 * Otherwise, it will iterate over all tasks and return 0. 1306 * 1307 * This function must not be called for the root memory cgroup. 1308 */ 1309 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1310 int (*fn)(struct task_struct *, void *), void *arg) 1311 { 1312 struct mem_cgroup *iter; 1313 int ret = 0; 1314 1315 BUG_ON(memcg == root_mem_cgroup); 1316 1317 for_each_mem_cgroup_tree(iter, memcg) { 1318 struct css_task_iter it; 1319 struct task_struct *task; 1320 1321 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1322 while (!ret && (task = css_task_iter_next(&it))) 1323 ret = fn(task, arg); 1324 css_task_iter_end(&it); 1325 if (ret) { 1326 mem_cgroup_iter_break(memcg, iter); 1327 break; 1328 } 1329 } 1330 return ret; 1331 } 1332 1333 /** 1334 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1335 * @page: the page 1336 * @pgdat: pgdat of the page 1337 * 1338 * This function relies on page->mem_cgroup being stable - see the 1339 * access rules in commit_charge(). 1340 */ 1341 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1342 { 1343 struct mem_cgroup_per_node *mz; 1344 struct mem_cgroup *memcg; 1345 struct lruvec *lruvec; 1346 1347 if (mem_cgroup_disabled()) { 1348 lruvec = &pgdat->__lruvec; 1349 goto out; 1350 } 1351 1352 memcg = page->mem_cgroup; 1353 /* 1354 * Swapcache readahead pages are added to the LRU - and 1355 * possibly migrated - before they are charged. 1356 */ 1357 if (!memcg) 1358 memcg = root_mem_cgroup; 1359 1360 mz = mem_cgroup_page_nodeinfo(memcg, page); 1361 lruvec = &mz->lruvec; 1362 out: 1363 /* 1364 * Since a node can be onlined after the mem_cgroup was created, 1365 * we have to be prepared to initialize lruvec->zone here; 1366 * and if offlined then reonlined, we need to reinitialize it. 1367 */ 1368 if (unlikely(lruvec->pgdat != pgdat)) 1369 lruvec->pgdat = pgdat; 1370 return lruvec; 1371 } 1372 1373 /** 1374 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1375 * @lruvec: mem_cgroup per zone lru vector 1376 * @lru: index of lru list the page is sitting on 1377 * @zid: zone id of the accounted pages 1378 * @nr_pages: positive when adding or negative when removing 1379 * 1380 * This function must be called under lru_lock, just before a page is added 1381 * to or just after a page is removed from an lru list (that ordering being 1382 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1383 */ 1384 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1385 int zid, int nr_pages) 1386 { 1387 struct mem_cgroup_per_node *mz; 1388 unsigned long *lru_size; 1389 long size; 1390 1391 if (mem_cgroup_disabled()) 1392 return; 1393 1394 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1395 lru_size = &mz->lru_zone_size[zid][lru]; 1396 1397 if (nr_pages < 0) 1398 *lru_size += nr_pages; 1399 1400 size = *lru_size; 1401 if (WARN_ONCE(size < 0, 1402 "%s(%p, %d, %d): lru_size %ld\n", 1403 __func__, lruvec, lru, nr_pages, size)) { 1404 VM_BUG_ON(1); 1405 *lru_size = 0; 1406 } 1407 1408 if (nr_pages > 0) 1409 *lru_size += nr_pages; 1410 } 1411 1412 /** 1413 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1414 * @memcg: the memory cgroup 1415 * 1416 * Returns the maximum amount of memory @mem can be charged with, in 1417 * pages. 1418 */ 1419 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1420 { 1421 unsigned long margin = 0; 1422 unsigned long count; 1423 unsigned long limit; 1424 1425 count = page_counter_read(&memcg->memory); 1426 limit = READ_ONCE(memcg->memory.max); 1427 if (count < limit) 1428 margin = limit - count; 1429 1430 if (do_memsw_account()) { 1431 count = page_counter_read(&memcg->memsw); 1432 limit = READ_ONCE(memcg->memsw.max); 1433 if (count < limit) 1434 margin = min(margin, limit - count); 1435 else 1436 margin = 0; 1437 } 1438 1439 return margin; 1440 } 1441 1442 /* 1443 * A routine for checking "mem" is under move_account() or not. 1444 * 1445 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1446 * moving cgroups. This is for waiting at high-memory pressure 1447 * caused by "move". 1448 */ 1449 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1450 { 1451 struct mem_cgroup *from; 1452 struct mem_cgroup *to; 1453 bool ret = false; 1454 /* 1455 * Unlike task_move routines, we access mc.to, mc.from not under 1456 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1457 */ 1458 spin_lock(&mc.lock); 1459 from = mc.from; 1460 to = mc.to; 1461 if (!from) 1462 goto unlock; 1463 1464 ret = mem_cgroup_is_descendant(from, memcg) || 1465 mem_cgroup_is_descendant(to, memcg); 1466 unlock: 1467 spin_unlock(&mc.lock); 1468 return ret; 1469 } 1470 1471 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1472 { 1473 if (mc.moving_task && current != mc.moving_task) { 1474 if (mem_cgroup_under_move(memcg)) { 1475 DEFINE_WAIT(wait); 1476 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1477 /* moving charge context might have finished. */ 1478 if (mc.moving_task) 1479 schedule(); 1480 finish_wait(&mc.waitq, &wait); 1481 return true; 1482 } 1483 } 1484 return false; 1485 } 1486 1487 struct memory_stat { 1488 const char *name; 1489 unsigned int ratio; 1490 unsigned int idx; 1491 }; 1492 1493 static struct memory_stat memory_stats[] = { 1494 { "anon", PAGE_SIZE, NR_ANON_MAPPED }, 1495 { "file", PAGE_SIZE, NR_FILE_PAGES }, 1496 { "kernel_stack", 1024, NR_KERNEL_STACK_KB }, 1497 { "percpu", 1, MEMCG_PERCPU_B }, 1498 { "sock", PAGE_SIZE, MEMCG_SOCK }, 1499 { "shmem", PAGE_SIZE, NR_SHMEM }, 1500 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED }, 1501 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY }, 1502 { "file_writeback", PAGE_SIZE, NR_WRITEBACK }, 1503 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1504 /* 1505 * The ratio will be initialized in memory_stats_init(). Because 1506 * on some architectures, the macro of HPAGE_PMD_SIZE is not 1507 * constant(e.g. powerpc). 1508 */ 1509 { "anon_thp", 0, NR_ANON_THPS }, 1510 #endif 1511 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, 1512 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, 1513 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE }, 1514 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE }, 1515 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE }, 1516 1517 /* 1518 * Note: The slab_reclaimable and slab_unreclaimable must be 1519 * together and slab_reclaimable must be in front. 1520 */ 1521 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B }, 1522 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B }, 1523 1524 /* The memory events */ 1525 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON }, 1526 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE }, 1527 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON }, 1528 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE }, 1529 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON }, 1530 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE }, 1531 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM }, 1532 }; 1533 1534 static int __init memory_stats_init(void) 1535 { 1536 int i; 1537 1538 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1539 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1540 if (memory_stats[i].idx == NR_ANON_THPS) 1541 memory_stats[i].ratio = HPAGE_PMD_SIZE; 1542 #endif 1543 VM_BUG_ON(!memory_stats[i].ratio); 1544 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT); 1545 } 1546 1547 return 0; 1548 } 1549 pure_initcall(memory_stats_init); 1550 1551 static char *memory_stat_format(struct mem_cgroup *memcg) 1552 { 1553 struct seq_buf s; 1554 int i; 1555 1556 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1557 if (!s.buffer) 1558 return NULL; 1559 1560 /* 1561 * Provide statistics on the state of the memory subsystem as 1562 * well as cumulative event counters that show past behavior. 1563 * 1564 * This list is ordered following a combination of these gradients: 1565 * 1) generic big picture -> specifics and details 1566 * 2) reflecting userspace activity -> reflecting kernel heuristics 1567 * 1568 * Current memory state: 1569 */ 1570 1571 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1572 u64 size; 1573 1574 size = memcg_page_state(memcg, memory_stats[i].idx); 1575 size *= memory_stats[i].ratio; 1576 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1577 1578 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1579 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + 1580 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); 1581 seq_buf_printf(&s, "slab %llu\n", size); 1582 } 1583 } 1584 1585 /* Accumulated memory events */ 1586 1587 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1588 memcg_events(memcg, PGFAULT)); 1589 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1590 memcg_events(memcg, PGMAJFAULT)); 1591 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1592 memcg_events(memcg, PGREFILL)); 1593 seq_buf_printf(&s, "pgscan %lu\n", 1594 memcg_events(memcg, PGSCAN_KSWAPD) + 1595 memcg_events(memcg, PGSCAN_DIRECT)); 1596 seq_buf_printf(&s, "pgsteal %lu\n", 1597 memcg_events(memcg, PGSTEAL_KSWAPD) + 1598 memcg_events(memcg, PGSTEAL_DIRECT)); 1599 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1600 memcg_events(memcg, PGACTIVATE)); 1601 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1602 memcg_events(memcg, PGDEACTIVATE)); 1603 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1604 memcg_events(memcg, PGLAZYFREE)); 1605 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1606 memcg_events(memcg, PGLAZYFREED)); 1607 1608 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1609 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1610 memcg_events(memcg, THP_FAULT_ALLOC)); 1611 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1612 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1613 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1614 1615 /* The above should easily fit into one page */ 1616 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1617 1618 return s.buffer; 1619 } 1620 1621 #define K(x) ((x) << (PAGE_SHIFT-10)) 1622 /** 1623 * mem_cgroup_print_oom_context: Print OOM information relevant to 1624 * memory controller. 1625 * @memcg: The memory cgroup that went over limit 1626 * @p: Task that is going to be killed 1627 * 1628 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1629 * enabled 1630 */ 1631 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1632 { 1633 rcu_read_lock(); 1634 1635 if (memcg) { 1636 pr_cont(",oom_memcg="); 1637 pr_cont_cgroup_path(memcg->css.cgroup); 1638 } else 1639 pr_cont(",global_oom"); 1640 if (p) { 1641 pr_cont(",task_memcg="); 1642 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1643 } 1644 rcu_read_unlock(); 1645 } 1646 1647 /** 1648 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1649 * memory controller. 1650 * @memcg: The memory cgroup that went over limit 1651 */ 1652 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1653 { 1654 char *buf; 1655 1656 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1657 K((u64)page_counter_read(&memcg->memory)), 1658 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1659 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1660 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1661 K((u64)page_counter_read(&memcg->swap)), 1662 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1663 else { 1664 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1665 K((u64)page_counter_read(&memcg->memsw)), 1666 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1667 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1668 K((u64)page_counter_read(&memcg->kmem)), 1669 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1670 } 1671 1672 pr_info("Memory cgroup stats for "); 1673 pr_cont_cgroup_path(memcg->css.cgroup); 1674 pr_cont(":"); 1675 buf = memory_stat_format(memcg); 1676 if (!buf) 1677 return; 1678 pr_info("%s", buf); 1679 kfree(buf); 1680 } 1681 1682 /* 1683 * Return the memory (and swap, if configured) limit for a memcg. 1684 */ 1685 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1686 { 1687 unsigned long max = READ_ONCE(memcg->memory.max); 1688 1689 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1690 if (mem_cgroup_swappiness(memcg)) 1691 max += min(READ_ONCE(memcg->swap.max), 1692 (unsigned long)total_swap_pages); 1693 } else { /* v1 */ 1694 if (mem_cgroup_swappiness(memcg)) { 1695 /* Calculate swap excess capacity from memsw limit */ 1696 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1697 1698 max += min(swap, (unsigned long)total_swap_pages); 1699 } 1700 } 1701 return max; 1702 } 1703 1704 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1705 { 1706 return page_counter_read(&memcg->memory); 1707 } 1708 1709 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1710 int order) 1711 { 1712 struct oom_control oc = { 1713 .zonelist = NULL, 1714 .nodemask = NULL, 1715 .memcg = memcg, 1716 .gfp_mask = gfp_mask, 1717 .order = order, 1718 }; 1719 bool ret = true; 1720 1721 if (mutex_lock_killable(&oom_lock)) 1722 return true; 1723 1724 if (mem_cgroup_margin(memcg) >= (1 << order)) 1725 goto unlock; 1726 1727 /* 1728 * A few threads which were not waiting at mutex_lock_killable() can 1729 * fail to bail out. Therefore, check again after holding oom_lock. 1730 */ 1731 ret = should_force_charge() || out_of_memory(&oc); 1732 1733 unlock: 1734 mutex_unlock(&oom_lock); 1735 return ret; 1736 } 1737 1738 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1739 pg_data_t *pgdat, 1740 gfp_t gfp_mask, 1741 unsigned long *total_scanned) 1742 { 1743 struct mem_cgroup *victim = NULL; 1744 int total = 0; 1745 int loop = 0; 1746 unsigned long excess; 1747 unsigned long nr_scanned; 1748 struct mem_cgroup_reclaim_cookie reclaim = { 1749 .pgdat = pgdat, 1750 }; 1751 1752 excess = soft_limit_excess(root_memcg); 1753 1754 while (1) { 1755 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1756 if (!victim) { 1757 loop++; 1758 if (loop >= 2) { 1759 /* 1760 * If we have not been able to reclaim 1761 * anything, it might because there are 1762 * no reclaimable pages under this hierarchy 1763 */ 1764 if (!total) 1765 break; 1766 /* 1767 * We want to do more targeted reclaim. 1768 * excess >> 2 is not to excessive so as to 1769 * reclaim too much, nor too less that we keep 1770 * coming back to reclaim from this cgroup 1771 */ 1772 if (total >= (excess >> 2) || 1773 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1774 break; 1775 } 1776 continue; 1777 } 1778 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1779 pgdat, &nr_scanned); 1780 *total_scanned += nr_scanned; 1781 if (!soft_limit_excess(root_memcg)) 1782 break; 1783 } 1784 mem_cgroup_iter_break(root_memcg, victim); 1785 return total; 1786 } 1787 1788 #ifdef CONFIG_LOCKDEP 1789 static struct lockdep_map memcg_oom_lock_dep_map = { 1790 .name = "memcg_oom_lock", 1791 }; 1792 #endif 1793 1794 static DEFINE_SPINLOCK(memcg_oom_lock); 1795 1796 /* 1797 * Check OOM-Killer is already running under our hierarchy. 1798 * If someone is running, return false. 1799 */ 1800 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1801 { 1802 struct mem_cgroup *iter, *failed = NULL; 1803 1804 spin_lock(&memcg_oom_lock); 1805 1806 for_each_mem_cgroup_tree(iter, memcg) { 1807 if (iter->oom_lock) { 1808 /* 1809 * this subtree of our hierarchy is already locked 1810 * so we cannot give a lock. 1811 */ 1812 failed = iter; 1813 mem_cgroup_iter_break(memcg, iter); 1814 break; 1815 } else 1816 iter->oom_lock = true; 1817 } 1818 1819 if (failed) { 1820 /* 1821 * OK, we failed to lock the whole subtree so we have 1822 * to clean up what we set up to the failing subtree 1823 */ 1824 for_each_mem_cgroup_tree(iter, memcg) { 1825 if (iter == failed) { 1826 mem_cgroup_iter_break(memcg, iter); 1827 break; 1828 } 1829 iter->oom_lock = false; 1830 } 1831 } else 1832 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1833 1834 spin_unlock(&memcg_oom_lock); 1835 1836 return !failed; 1837 } 1838 1839 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1840 { 1841 struct mem_cgroup *iter; 1842 1843 spin_lock(&memcg_oom_lock); 1844 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1845 for_each_mem_cgroup_tree(iter, memcg) 1846 iter->oom_lock = false; 1847 spin_unlock(&memcg_oom_lock); 1848 } 1849 1850 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1851 { 1852 struct mem_cgroup *iter; 1853 1854 spin_lock(&memcg_oom_lock); 1855 for_each_mem_cgroup_tree(iter, memcg) 1856 iter->under_oom++; 1857 spin_unlock(&memcg_oom_lock); 1858 } 1859 1860 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1861 { 1862 struct mem_cgroup *iter; 1863 1864 /* 1865 * Be careful about under_oom underflows becase a child memcg 1866 * could have been added after mem_cgroup_mark_under_oom. 1867 */ 1868 spin_lock(&memcg_oom_lock); 1869 for_each_mem_cgroup_tree(iter, memcg) 1870 if (iter->under_oom > 0) 1871 iter->under_oom--; 1872 spin_unlock(&memcg_oom_lock); 1873 } 1874 1875 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1876 1877 struct oom_wait_info { 1878 struct mem_cgroup *memcg; 1879 wait_queue_entry_t wait; 1880 }; 1881 1882 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1883 unsigned mode, int sync, void *arg) 1884 { 1885 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1886 struct mem_cgroup *oom_wait_memcg; 1887 struct oom_wait_info *oom_wait_info; 1888 1889 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1890 oom_wait_memcg = oom_wait_info->memcg; 1891 1892 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1893 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1894 return 0; 1895 return autoremove_wake_function(wait, mode, sync, arg); 1896 } 1897 1898 static void memcg_oom_recover(struct mem_cgroup *memcg) 1899 { 1900 /* 1901 * For the following lockless ->under_oom test, the only required 1902 * guarantee is that it must see the state asserted by an OOM when 1903 * this function is called as a result of userland actions 1904 * triggered by the notification of the OOM. This is trivially 1905 * achieved by invoking mem_cgroup_mark_under_oom() before 1906 * triggering notification. 1907 */ 1908 if (memcg && memcg->under_oom) 1909 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1910 } 1911 1912 enum oom_status { 1913 OOM_SUCCESS, 1914 OOM_FAILED, 1915 OOM_ASYNC, 1916 OOM_SKIPPED 1917 }; 1918 1919 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1920 { 1921 enum oom_status ret; 1922 bool locked; 1923 1924 if (order > PAGE_ALLOC_COSTLY_ORDER) 1925 return OOM_SKIPPED; 1926 1927 memcg_memory_event(memcg, MEMCG_OOM); 1928 1929 /* 1930 * We are in the middle of the charge context here, so we 1931 * don't want to block when potentially sitting on a callstack 1932 * that holds all kinds of filesystem and mm locks. 1933 * 1934 * cgroup1 allows disabling the OOM killer and waiting for outside 1935 * handling until the charge can succeed; remember the context and put 1936 * the task to sleep at the end of the page fault when all locks are 1937 * released. 1938 * 1939 * On the other hand, in-kernel OOM killer allows for an async victim 1940 * memory reclaim (oom_reaper) and that means that we are not solely 1941 * relying on the oom victim to make a forward progress and we can 1942 * invoke the oom killer here. 1943 * 1944 * Please note that mem_cgroup_out_of_memory might fail to find a 1945 * victim and then we have to bail out from the charge path. 1946 */ 1947 if (memcg->oom_kill_disable) { 1948 if (!current->in_user_fault) 1949 return OOM_SKIPPED; 1950 css_get(&memcg->css); 1951 current->memcg_in_oom = memcg; 1952 current->memcg_oom_gfp_mask = mask; 1953 current->memcg_oom_order = order; 1954 1955 return OOM_ASYNC; 1956 } 1957 1958 mem_cgroup_mark_under_oom(memcg); 1959 1960 locked = mem_cgroup_oom_trylock(memcg); 1961 1962 if (locked) 1963 mem_cgroup_oom_notify(memcg); 1964 1965 mem_cgroup_unmark_under_oom(memcg); 1966 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1967 ret = OOM_SUCCESS; 1968 else 1969 ret = OOM_FAILED; 1970 1971 if (locked) 1972 mem_cgroup_oom_unlock(memcg); 1973 1974 return ret; 1975 } 1976 1977 /** 1978 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1979 * @handle: actually kill/wait or just clean up the OOM state 1980 * 1981 * This has to be called at the end of a page fault if the memcg OOM 1982 * handler was enabled. 1983 * 1984 * Memcg supports userspace OOM handling where failed allocations must 1985 * sleep on a waitqueue until the userspace task resolves the 1986 * situation. Sleeping directly in the charge context with all kinds 1987 * of locks held is not a good idea, instead we remember an OOM state 1988 * in the task and mem_cgroup_oom_synchronize() has to be called at 1989 * the end of the page fault to complete the OOM handling. 1990 * 1991 * Returns %true if an ongoing memcg OOM situation was detected and 1992 * completed, %false otherwise. 1993 */ 1994 bool mem_cgroup_oom_synchronize(bool handle) 1995 { 1996 struct mem_cgroup *memcg = current->memcg_in_oom; 1997 struct oom_wait_info owait; 1998 bool locked; 1999 2000 /* OOM is global, do not handle */ 2001 if (!memcg) 2002 return false; 2003 2004 if (!handle) 2005 goto cleanup; 2006 2007 owait.memcg = memcg; 2008 owait.wait.flags = 0; 2009 owait.wait.func = memcg_oom_wake_function; 2010 owait.wait.private = current; 2011 INIT_LIST_HEAD(&owait.wait.entry); 2012 2013 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2014 mem_cgroup_mark_under_oom(memcg); 2015 2016 locked = mem_cgroup_oom_trylock(memcg); 2017 2018 if (locked) 2019 mem_cgroup_oom_notify(memcg); 2020 2021 if (locked && !memcg->oom_kill_disable) { 2022 mem_cgroup_unmark_under_oom(memcg); 2023 finish_wait(&memcg_oom_waitq, &owait.wait); 2024 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 2025 current->memcg_oom_order); 2026 } else { 2027 schedule(); 2028 mem_cgroup_unmark_under_oom(memcg); 2029 finish_wait(&memcg_oom_waitq, &owait.wait); 2030 } 2031 2032 if (locked) { 2033 mem_cgroup_oom_unlock(memcg); 2034 /* 2035 * There is no guarantee that an OOM-lock contender 2036 * sees the wakeups triggered by the OOM kill 2037 * uncharges. Wake any sleepers explicitely. 2038 */ 2039 memcg_oom_recover(memcg); 2040 } 2041 cleanup: 2042 current->memcg_in_oom = NULL; 2043 css_put(&memcg->css); 2044 return true; 2045 } 2046 2047 /** 2048 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2049 * @victim: task to be killed by the OOM killer 2050 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2051 * 2052 * Returns a pointer to a memory cgroup, which has to be cleaned up 2053 * by killing all belonging OOM-killable tasks. 2054 * 2055 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2056 */ 2057 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2058 struct mem_cgroup *oom_domain) 2059 { 2060 struct mem_cgroup *oom_group = NULL; 2061 struct mem_cgroup *memcg; 2062 2063 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2064 return NULL; 2065 2066 if (!oom_domain) 2067 oom_domain = root_mem_cgroup; 2068 2069 rcu_read_lock(); 2070 2071 memcg = mem_cgroup_from_task(victim); 2072 if (memcg == root_mem_cgroup) 2073 goto out; 2074 2075 /* 2076 * If the victim task has been asynchronously moved to a different 2077 * memory cgroup, we might end up killing tasks outside oom_domain. 2078 * In this case it's better to ignore memory.group.oom. 2079 */ 2080 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2081 goto out; 2082 2083 /* 2084 * Traverse the memory cgroup hierarchy from the victim task's 2085 * cgroup up to the OOMing cgroup (or root) to find the 2086 * highest-level memory cgroup with oom.group set. 2087 */ 2088 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2089 if (memcg->oom_group) 2090 oom_group = memcg; 2091 2092 if (memcg == oom_domain) 2093 break; 2094 } 2095 2096 if (oom_group) 2097 css_get(&oom_group->css); 2098 out: 2099 rcu_read_unlock(); 2100 2101 return oom_group; 2102 } 2103 2104 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2105 { 2106 pr_info("Tasks in "); 2107 pr_cont_cgroup_path(memcg->css.cgroup); 2108 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2109 } 2110 2111 /** 2112 * lock_page_memcg - lock a page->mem_cgroup binding 2113 * @page: the page 2114 * 2115 * This function protects unlocked LRU pages from being moved to 2116 * another cgroup. 2117 * 2118 * It ensures lifetime of the returned memcg. Caller is responsible 2119 * for the lifetime of the page; __unlock_page_memcg() is available 2120 * when @page might get freed inside the locked section. 2121 */ 2122 struct mem_cgroup *lock_page_memcg(struct page *page) 2123 { 2124 struct page *head = compound_head(page); /* rmap on tail pages */ 2125 struct mem_cgroup *memcg; 2126 unsigned long flags; 2127 2128 /* 2129 * The RCU lock is held throughout the transaction. The fast 2130 * path can get away without acquiring the memcg->move_lock 2131 * because page moving starts with an RCU grace period. 2132 * 2133 * The RCU lock also protects the memcg from being freed when 2134 * the page state that is going to change is the only thing 2135 * preventing the page itself from being freed. E.g. writeback 2136 * doesn't hold a page reference and relies on PG_writeback to 2137 * keep off truncation, migration and so forth. 2138 */ 2139 rcu_read_lock(); 2140 2141 if (mem_cgroup_disabled()) 2142 return NULL; 2143 again: 2144 memcg = head->mem_cgroup; 2145 if (unlikely(!memcg)) 2146 return NULL; 2147 2148 if (atomic_read(&memcg->moving_account) <= 0) 2149 return memcg; 2150 2151 spin_lock_irqsave(&memcg->move_lock, flags); 2152 if (memcg != head->mem_cgroup) { 2153 spin_unlock_irqrestore(&memcg->move_lock, flags); 2154 goto again; 2155 } 2156 2157 /* 2158 * When charge migration first begins, we can have locked and 2159 * unlocked page stat updates happening concurrently. Track 2160 * the task who has the lock for unlock_page_memcg(). 2161 */ 2162 memcg->move_lock_task = current; 2163 memcg->move_lock_flags = flags; 2164 2165 return memcg; 2166 } 2167 EXPORT_SYMBOL(lock_page_memcg); 2168 2169 /** 2170 * __unlock_page_memcg - unlock and unpin a memcg 2171 * @memcg: the memcg 2172 * 2173 * Unlock and unpin a memcg returned by lock_page_memcg(). 2174 */ 2175 void __unlock_page_memcg(struct mem_cgroup *memcg) 2176 { 2177 if (memcg && memcg->move_lock_task == current) { 2178 unsigned long flags = memcg->move_lock_flags; 2179 2180 memcg->move_lock_task = NULL; 2181 memcg->move_lock_flags = 0; 2182 2183 spin_unlock_irqrestore(&memcg->move_lock, flags); 2184 } 2185 2186 rcu_read_unlock(); 2187 } 2188 2189 /** 2190 * unlock_page_memcg - unlock a page->mem_cgroup binding 2191 * @page: the page 2192 */ 2193 void unlock_page_memcg(struct page *page) 2194 { 2195 struct page *head = compound_head(page); 2196 2197 __unlock_page_memcg(head->mem_cgroup); 2198 } 2199 EXPORT_SYMBOL(unlock_page_memcg); 2200 2201 struct memcg_stock_pcp { 2202 struct mem_cgroup *cached; /* this never be root cgroup */ 2203 unsigned int nr_pages; 2204 2205 #ifdef CONFIG_MEMCG_KMEM 2206 struct obj_cgroup *cached_objcg; 2207 unsigned int nr_bytes; 2208 #endif 2209 2210 struct work_struct work; 2211 unsigned long flags; 2212 #define FLUSHING_CACHED_CHARGE 0 2213 }; 2214 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2215 static DEFINE_MUTEX(percpu_charge_mutex); 2216 2217 #ifdef CONFIG_MEMCG_KMEM 2218 static void drain_obj_stock(struct memcg_stock_pcp *stock); 2219 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2220 struct mem_cgroup *root_memcg); 2221 2222 #else 2223 static inline void drain_obj_stock(struct memcg_stock_pcp *stock) 2224 { 2225 } 2226 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2227 struct mem_cgroup *root_memcg) 2228 { 2229 return false; 2230 } 2231 #endif 2232 2233 /** 2234 * consume_stock: Try to consume stocked charge on this cpu. 2235 * @memcg: memcg to consume from. 2236 * @nr_pages: how many pages to charge. 2237 * 2238 * The charges will only happen if @memcg matches the current cpu's memcg 2239 * stock, and at least @nr_pages are available in that stock. Failure to 2240 * service an allocation will refill the stock. 2241 * 2242 * returns true if successful, false otherwise. 2243 */ 2244 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2245 { 2246 struct memcg_stock_pcp *stock; 2247 unsigned long flags; 2248 bool ret = false; 2249 2250 if (nr_pages > MEMCG_CHARGE_BATCH) 2251 return ret; 2252 2253 local_irq_save(flags); 2254 2255 stock = this_cpu_ptr(&memcg_stock); 2256 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2257 stock->nr_pages -= nr_pages; 2258 ret = true; 2259 } 2260 2261 local_irq_restore(flags); 2262 2263 return ret; 2264 } 2265 2266 /* 2267 * Returns stocks cached in percpu and reset cached information. 2268 */ 2269 static void drain_stock(struct memcg_stock_pcp *stock) 2270 { 2271 struct mem_cgroup *old = stock->cached; 2272 2273 if (!old) 2274 return; 2275 2276 if (stock->nr_pages) { 2277 page_counter_uncharge(&old->memory, stock->nr_pages); 2278 if (do_memsw_account()) 2279 page_counter_uncharge(&old->memsw, stock->nr_pages); 2280 stock->nr_pages = 0; 2281 } 2282 2283 css_put(&old->css); 2284 stock->cached = NULL; 2285 } 2286 2287 static void drain_local_stock(struct work_struct *dummy) 2288 { 2289 struct memcg_stock_pcp *stock; 2290 unsigned long flags; 2291 2292 /* 2293 * The only protection from memory hotplug vs. drain_stock races is 2294 * that we always operate on local CPU stock here with IRQ disabled 2295 */ 2296 local_irq_save(flags); 2297 2298 stock = this_cpu_ptr(&memcg_stock); 2299 drain_obj_stock(stock); 2300 drain_stock(stock); 2301 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2302 2303 local_irq_restore(flags); 2304 } 2305 2306 /* 2307 * Cache charges(val) to local per_cpu area. 2308 * This will be consumed by consume_stock() function, later. 2309 */ 2310 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2311 { 2312 struct memcg_stock_pcp *stock; 2313 unsigned long flags; 2314 2315 local_irq_save(flags); 2316 2317 stock = this_cpu_ptr(&memcg_stock); 2318 if (stock->cached != memcg) { /* reset if necessary */ 2319 drain_stock(stock); 2320 css_get(&memcg->css); 2321 stock->cached = memcg; 2322 } 2323 stock->nr_pages += nr_pages; 2324 2325 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2326 drain_stock(stock); 2327 2328 local_irq_restore(flags); 2329 } 2330 2331 /* 2332 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2333 * of the hierarchy under it. 2334 */ 2335 static void drain_all_stock(struct mem_cgroup *root_memcg) 2336 { 2337 int cpu, curcpu; 2338 2339 /* If someone's already draining, avoid adding running more workers. */ 2340 if (!mutex_trylock(&percpu_charge_mutex)) 2341 return; 2342 /* 2343 * Notify other cpus that system-wide "drain" is running 2344 * We do not care about races with the cpu hotplug because cpu down 2345 * as well as workers from this path always operate on the local 2346 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2347 */ 2348 curcpu = get_cpu(); 2349 for_each_online_cpu(cpu) { 2350 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2351 struct mem_cgroup *memcg; 2352 bool flush = false; 2353 2354 rcu_read_lock(); 2355 memcg = stock->cached; 2356 if (memcg && stock->nr_pages && 2357 mem_cgroup_is_descendant(memcg, root_memcg)) 2358 flush = true; 2359 if (obj_stock_flush_required(stock, root_memcg)) 2360 flush = true; 2361 rcu_read_unlock(); 2362 2363 if (flush && 2364 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2365 if (cpu == curcpu) 2366 drain_local_stock(&stock->work); 2367 else 2368 schedule_work_on(cpu, &stock->work); 2369 } 2370 } 2371 put_cpu(); 2372 mutex_unlock(&percpu_charge_mutex); 2373 } 2374 2375 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2376 { 2377 struct memcg_stock_pcp *stock; 2378 struct mem_cgroup *memcg, *mi; 2379 2380 stock = &per_cpu(memcg_stock, cpu); 2381 drain_stock(stock); 2382 2383 for_each_mem_cgroup(memcg) { 2384 int i; 2385 2386 for (i = 0; i < MEMCG_NR_STAT; i++) { 2387 int nid; 2388 long x; 2389 2390 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2391 if (x) 2392 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2393 atomic_long_add(x, &memcg->vmstats[i]); 2394 2395 if (i >= NR_VM_NODE_STAT_ITEMS) 2396 continue; 2397 2398 for_each_node(nid) { 2399 struct mem_cgroup_per_node *pn; 2400 2401 pn = mem_cgroup_nodeinfo(memcg, nid); 2402 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2403 if (x) 2404 do { 2405 atomic_long_add(x, &pn->lruvec_stat[i]); 2406 } while ((pn = parent_nodeinfo(pn, nid))); 2407 } 2408 } 2409 2410 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2411 long x; 2412 2413 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2414 if (x) 2415 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2416 atomic_long_add(x, &memcg->vmevents[i]); 2417 } 2418 } 2419 2420 return 0; 2421 } 2422 2423 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2424 unsigned int nr_pages, 2425 gfp_t gfp_mask) 2426 { 2427 unsigned long nr_reclaimed = 0; 2428 2429 do { 2430 unsigned long pflags; 2431 2432 if (page_counter_read(&memcg->memory) <= 2433 READ_ONCE(memcg->memory.high)) 2434 continue; 2435 2436 memcg_memory_event(memcg, MEMCG_HIGH); 2437 2438 psi_memstall_enter(&pflags); 2439 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2440 gfp_mask, true); 2441 psi_memstall_leave(&pflags); 2442 } while ((memcg = parent_mem_cgroup(memcg)) && 2443 !mem_cgroup_is_root(memcg)); 2444 2445 return nr_reclaimed; 2446 } 2447 2448 static void high_work_func(struct work_struct *work) 2449 { 2450 struct mem_cgroup *memcg; 2451 2452 memcg = container_of(work, struct mem_cgroup, high_work); 2453 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2454 } 2455 2456 /* 2457 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2458 * enough to still cause a significant slowdown in most cases, while still 2459 * allowing diagnostics and tracing to proceed without becoming stuck. 2460 */ 2461 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2462 2463 /* 2464 * When calculating the delay, we use these either side of the exponentiation to 2465 * maintain precision and scale to a reasonable number of jiffies (see the table 2466 * below. 2467 * 2468 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2469 * overage ratio to a delay. 2470 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2471 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2472 * to produce a reasonable delay curve. 2473 * 2474 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2475 * reasonable delay curve compared to precision-adjusted overage, not 2476 * penalising heavily at first, but still making sure that growth beyond the 2477 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2478 * example, with a high of 100 megabytes: 2479 * 2480 * +-------+------------------------+ 2481 * | usage | time to allocate in ms | 2482 * +-------+------------------------+ 2483 * | 100M | 0 | 2484 * | 101M | 6 | 2485 * | 102M | 25 | 2486 * | 103M | 57 | 2487 * | 104M | 102 | 2488 * | 105M | 159 | 2489 * | 106M | 230 | 2490 * | 107M | 313 | 2491 * | 108M | 409 | 2492 * | 109M | 518 | 2493 * | 110M | 639 | 2494 * | 111M | 774 | 2495 * | 112M | 921 | 2496 * | 113M | 1081 | 2497 * | 114M | 1254 | 2498 * | 115M | 1439 | 2499 * | 116M | 1638 | 2500 * | 117M | 1849 | 2501 * | 118M | 2000 | 2502 * | 119M | 2000 | 2503 * | 120M | 2000 | 2504 * +-------+------------------------+ 2505 */ 2506 #define MEMCG_DELAY_PRECISION_SHIFT 20 2507 #define MEMCG_DELAY_SCALING_SHIFT 14 2508 2509 static u64 calculate_overage(unsigned long usage, unsigned long high) 2510 { 2511 u64 overage; 2512 2513 if (usage <= high) 2514 return 0; 2515 2516 /* 2517 * Prevent division by 0 in overage calculation by acting as if 2518 * it was a threshold of 1 page 2519 */ 2520 high = max(high, 1UL); 2521 2522 overage = usage - high; 2523 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2524 return div64_u64(overage, high); 2525 } 2526 2527 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2528 { 2529 u64 overage, max_overage = 0; 2530 2531 do { 2532 overage = calculate_overage(page_counter_read(&memcg->memory), 2533 READ_ONCE(memcg->memory.high)); 2534 max_overage = max(overage, max_overage); 2535 } while ((memcg = parent_mem_cgroup(memcg)) && 2536 !mem_cgroup_is_root(memcg)); 2537 2538 return max_overage; 2539 } 2540 2541 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2542 { 2543 u64 overage, max_overage = 0; 2544 2545 do { 2546 overage = calculate_overage(page_counter_read(&memcg->swap), 2547 READ_ONCE(memcg->swap.high)); 2548 if (overage) 2549 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2550 max_overage = max(overage, max_overage); 2551 } while ((memcg = parent_mem_cgroup(memcg)) && 2552 !mem_cgroup_is_root(memcg)); 2553 2554 return max_overage; 2555 } 2556 2557 /* 2558 * Get the number of jiffies that we should penalise a mischievous cgroup which 2559 * is exceeding its memory.high by checking both it and its ancestors. 2560 */ 2561 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2562 unsigned int nr_pages, 2563 u64 max_overage) 2564 { 2565 unsigned long penalty_jiffies; 2566 2567 if (!max_overage) 2568 return 0; 2569 2570 /* 2571 * We use overage compared to memory.high to calculate the number of 2572 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2573 * fairly lenient on small overages, and increasingly harsh when the 2574 * memcg in question makes it clear that it has no intention of stopping 2575 * its crazy behaviour, so we exponentially increase the delay based on 2576 * overage amount. 2577 */ 2578 penalty_jiffies = max_overage * max_overage * HZ; 2579 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2580 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2581 2582 /* 2583 * Factor in the task's own contribution to the overage, such that four 2584 * N-sized allocations are throttled approximately the same as one 2585 * 4N-sized allocation. 2586 * 2587 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2588 * larger the current charge patch is than that. 2589 */ 2590 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2591 } 2592 2593 /* 2594 * Scheduled by try_charge() to be executed from the userland return path 2595 * and reclaims memory over the high limit. 2596 */ 2597 void mem_cgroup_handle_over_high(void) 2598 { 2599 unsigned long penalty_jiffies; 2600 unsigned long pflags; 2601 unsigned long nr_reclaimed; 2602 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2603 int nr_retries = MAX_RECLAIM_RETRIES; 2604 struct mem_cgroup *memcg; 2605 bool in_retry = false; 2606 2607 if (likely(!nr_pages)) 2608 return; 2609 2610 memcg = get_mem_cgroup_from_mm(current->mm); 2611 current->memcg_nr_pages_over_high = 0; 2612 2613 retry_reclaim: 2614 /* 2615 * The allocating task should reclaim at least the batch size, but for 2616 * subsequent retries we only want to do what's necessary to prevent oom 2617 * or breaching resource isolation. 2618 * 2619 * This is distinct from memory.max or page allocator behaviour because 2620 * memory.high is currently batched, whereas memory.max and the page 2621 * allocator run every time an allocation is made. 2622 */ 2623 nr_reclaimed = reclaim_high(memcg, 2624 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2625 GFP_KERNEL); 2626 2627 /* 2628 * memory.high is breached and reclaim is unable to keep up. Throttle 2629 * allocators proactively to slow down excessive growth. 2630 */ 2631 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2632 mem_find_max_overage(memcg)); 2633 2634 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2635 swap_find_max_overage(memcg)); 2636 2637 /* 2638 * Clamp the max delay per usermode return so as to still keep the 2639 * application moving forwards and also permit diagnostics, albeit 2640 * extremely slowly. 2641 */ 2642 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2643 2644 /* 2645 * Don't sleep if the amount of jiffies this memcg owes us is so low 2646 * that it's not even worth doing, in an attempt to be nice to those who 2647 * go only a small amount over their memory.high value and maybe haven't 2648 * been aggressively reclaimed enough yet. 2649 */ 2650 if (penalty_jiffies <= HZ / 100) 2651 goto out; 2652 2653 /* 2654 * If reclaim is making forward progress but we're still over 2655 * memory.high, we want to encourage that rather than doing allocator 2656 * throttling. 2657 */ 2658 if (nr_reclaimed || nr_retries--) { 2659 in_retry = true; 2660 goto retry_reclaim; 2661 } 2662 2663 /* 2664 * If we exit early, we're guaranteed to die (since 2665 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2666 * need to account for any ill-begotten jiffies to pay them off later. 2667 */ 2668 psi_memstall_enter(&pflags); 2669 schedule_timeout_killable(penalty_jiffies); 2670 psi_memstall_leave(&pflags); 2671 2672 out: 2673 css_put(&memcg->css); 2674 } 2675 2676 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2677 unsigned int nr_pages) 2678 { 2679 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2680 int nr_retries = MAX_RECLAIM_RETRIES; 2681 struct mem_cgroup *mem_over_limit; 2682 struct page_counter *counter; 2683 enum oom_status oom_status; 2684 unsigned long nr_reclaimed; 2685 bool may_swap = true; 2686 bool drained = false; 2687 unsigned long pflags; 2688 2689 if (mem_cgroup_is_root(memcg)) 2690 return 0; 2691 retry: 2692 if (consume_stock(memcg, nr_pages)) 2693 return 0; 2694 2695 if (!do_memsw_account() || 2696 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2697 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2698 goto done_restock; 2699 if (do_memsw_account()) 2700 page_counter_uncharge(&memcg->memsw, batch); 2701 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2702 } else { 2703 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2704 may_swap = false; 2705 } 2706 2707 if (batch > nr_pages) { 2708 batch = nr_pages; 2709 goto retry; 2710 } 2711 2712 /* 2713 * Memcg doesn't have a dedicated reserve for atomic 2714 * allocations. But like the global atomic pool, we need to 2715 * put the burden of reclaim on regular allocation requests 2716 * and let these go through as privileged allocations. 2717 */ 2718 if (gfp_mask & __GFP_ATOMIC) 2719 goto force; 2720 2721 /* 2722 * Unlike in global OOM situations, memcg is not in a physical 2723 * memory shortage. Allow dying and OOM-killed tasks to 2724 * bypass the last charges so that they can exit quickly and 2725 * free their memory. 2726 */ 2727 if (unlikely(should_force_charge())) 2728 goto force; 2729 2730 /* 2731 * Prevent unbounded recursion when reclaim operations need to 2732 * allocate memory. This might exceed the limits temporarily, 2733 * but we prefer facilitating memory reclaim and getting back 2734 * under the limit over triggering OOM kills in these cases. 2735 */ 2736 if (unlikely(current->flags & PF_MEMALLOC)) 2737 goto force; 2738 2739 if (unlikely(task_in_memcg_oom(current))) 2740 goto nomem; 2741 2742 if (!gfpflags_allow_blocking(gfp_mask)) 2743 goto nomem; 2744 2745 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2746 2747 psi_memstall_enter(&pflags); 2748 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2749 gfp_mask, may_swap); 2750 psi_memstall_leave(&pflags); 2751 2752 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2753 goto retry; 2754 2755 if (!drained) { 2756 drain_all_stock(mem_over_limit); 2757 drained = true; 2758 goto retry; 2759 } 2760 2761 if (gfp_mask & __GFP_NORETRY) 2762 goto nomem; 2763 /* 2764 * Even though the limit is exceeded at this point, reclaim 2765 * may have been able to free some pages. Retry the charge 2766 * before killing the task. 2767 * 2768 * Only for regular pages, though: huge pages are rather 2769 * unlikely to succeed so close to the limit, and we fall back 2770 * to regular pages anyway in case of failure. 2771 */ 2772 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2773 goto retry; 2774 /* 2775 * At task move, charge accounts can be doubly counted. So, it's 2776 * better to wait until the end of task_move if something is going on. 2777 */ 2778 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2779 goto retry; 2780 2781 if (nr_retries--) 2782 goto retry; 2783 2784 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2785 goto nomem; 2786 2787 if (gfp_mask & __GFP_NOFAIL) 2788 goto force; 2789 2790 if (fatal_signal_pending(current)) 2791 goto force; 2792 2793 /* 2794 * keep retrying as long as the memcg oom killer is able to make 2795 * a forward progress or bypass the charge if the oom killer 2796 * couldn't make any progress. 2797 */ 2798 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2799 get_order(nr_pages * PAGE_SIZE)); 2800 switch (oom_status) { 2801 case OOM_SUCCESS: 2802 nr_retries = MAX_RECLAIM_RETRIES; 2803 goto retry; 2804 case OOM_FAILED: 2805 goto force; 2806 default: 2807 goto nomem; 2808 } 2809 nomem: 2810 if (!(gfp_mask & __GFP_NOFAIL)) 2811 return -ENOMEM; 2812 force: 2813 /* 2814 * The allocation either can't fail or will lead to more memory 2815 * being freed very soon. Allow memory usage go over the limit 2816 * temporarily by force charging it. 2817 */ 2818 page_counter_charge(&memcg->memory, nr_pages); 2819 if (do_memsw_account()) 2820 page_counter_charge(&memcg->memsw, nr_pages); 2821 2822 return 0; 2823 2824 done_restock: 2825 if (batch > nr_pages) 2826 refill_stock(memcg, batch - nr_pages); 2827 2828 /* 2829 * If the hierarchy is above the normal consumption range, schedule 2830 * reclaim on returning to userland. We can perform reclaim here 2831 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2832 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2833 * not recorded as it most likely matches current's and won't 2834 * change in the meantime. As high limit is checked again before 2835 * reclaim, the cost of mismatch is negligible. 2836 */ 2837 do { 2838 bool mem_high, swap_high; 2839 2840 mem_high = page_counter_read(&memcg->memory) > 2841 READ_ONCE(memcg->memory.high); 2842 swap_high = page_counter_read(&memcg->swap) > 2843 READ_ONCE(memcg->swap.high); 2844 2845 /* Don't bother a random interrupted task */ 2846 if (in_interrupt()) { 2847 if (mem_high) { 2848 schedule_work(&memcg->high_work); 2849 break; 2850 } 2851 continue; 2852 } 2853 2854 if (mem_high || swap_high) { 2855 /* 2856 * The allocating tasks in this cgroup will need to do 2857 * reclaim or be throttled to prevent further growth 2858 * of the memory or swap footprints. 2859 * 2860 * Target some best-effort fairness between the tasks, 2861 * and distribute reclaim work and delay penalties 2862 * based on how much each task is actually allocating. 2863 */ 2864 current->memcg_nr_pages_over_high += batch; 2865 set_notify_resume(current); 2866 break; 2867 } 2868 } while ((memcg = parent_mem_cgroup(memcg))); 2869 2870 return 0; 2871 } 2872 2873 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2874 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2875 { 2876 if (mem_cgroup_is_root(memcg)) 2877 return; 2878 2879 page_counter_uncharge(&memcg->memory, nr_pages); 2880 if (do_memsw_account()) 2881 page_counter_uncharge(&memcg->memsw, nr_pages); 2882 } 2883 #endif 2884 2885 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2886 { 2887 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2888 /* 2889 * Any of the following ensures page->mem_cgroup stability: 2890 * 2891 * - the page lock 2892 * - LRU isolation 2893 * - lock_page_memcg() 2894 * - exclusive reference 2895 */ 2896 page->mem_cgroup = memcg; 2897 } 2898 2899 #ifdef CONFIG_MEMCG_KMEM 2900 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2901 gfp_t gfp) 2902 { 2903 unsigned int objects = objs_per_slab_page(s, page); 2904 void *vec; 2905 2906 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2907 page_to_nid(page)); 2908 if (!vec) 2909 return -ENOMEM; 2910 2911 if (cmpxchg(&page->obj_cgroups, NULL, 2912 (struct obj_cgroup **) ((unsigned long)vec | 0x1UL))) 2913 kfree(vec); 2914 else 2915 kmemleak_not_leak(vec); 2916 2917 return 0; 2918 } 2919 2920 /* 2921 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2922 * 2923 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 2924 * cgroup_mutex, etc. 2925 */ 2926 struct mem_cgroup *mem_cgroup_from_obj(void *p) 2927 { 2928 struct page *page; 2929 2930 if (mem_cgroup_disabled()) 2931 return NULL; 2932 2933 page = virt_to_head_page(p); 2934 2935 /* 2936 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer 2937 * or a pointer to obj_cgroup vector. In the latter case the lowest 2938 * bit of the pointer is set. 2939 * The page->mem_cgroup pointer can be asynchronously changed 2940 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed 2941 * from a valid memcg pointer to objcg vector or back. 2942 */ 2943 if (!page->mem_cgroup) 2944 return NULL; 2945 2946 /* 2947 * Slab objects are accounted individually, not per-page. 2948 * Memcg membership data for each individual object is saved in 2949 * the page->obj_cgroups. 2950 */ 2951 if (page_has_obj_cgroups(page)) { 2952 struct obj_cgroup *objcg; 2953 unsigned int off; 2954 2955 off = obj_to_index(page->slab_cache, page, p); 2956 objcg = page_obj_cgroups(page)[off]; 2957 if (objcg) 2958 return obj_cgroup_memcg(objcg); 2959 2960 return NULL; 2961 } 2962 2963 /* All other pages use page->mem_cgroup */ 2964 return page->mem_cgroup; 2965 } 2966 2967 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 2968 { 2969 struct obj_cgroup *objcg = NULL; 2970 struct mem_cgroup *memcg; 2971 2972 if (memcg_kmem_bypass()) 2973 return NULL; 2974 2975 rcu_read_lock(); 2976 if (unlikely(active_memcg())) 2977 memcg = active_memcg(); 2978 else 2979 memcg = mem_cgroup_from_task(current); 2980 2981 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 2982 objcg = rcu_dereference(memcg->objcg); 2983 if (objcg && obj_cgroup_tryget(objcg)) 2984 break; 2985 } 2986 rcu_read_unlock(); 2987 2988 return objcg; 2989 } 2990 2991 static int memcg_alloc_cache_id(void) 2992 { 2993 int id, size; 2994 int err; 2995 2996 id = ida_simple_get(&memcg_cache_ida, 2997 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2998 if (id < 0) 2999 return id; 3000 3001 if (id < memcg_nr_cache_ids) 3002 return id; 3003 3004 /* 3005 * There's no space for the new id in memcg_caches arrays, 3006 * so we have to grow them. 3007 */ 3008 down_write(&memcg_cache_ids_sem); 3009 3010 size = 2 * (id + 1); 3011 if (size < MEMCG_CACHES_MIN_SIZE) 3012 size = MEMCG_CACHES_MIN_SIZE; 3013 else if (size > MEMCG_CACHES_MAX_SIZE) 3014 size = MEMCG_CACHES_MAX_SIZE; 3015 3016 err = memcg_update_all_list_lrus(size); 3017 if (!err) 3018 memcg_nr_cache_ids = size; 3019 3020 up_write(&memcg_cache_ids_sem); 3021 3022 if (err) { 3023 ida_simple_remove(&memcg_cache_ida, id); 3024 return err; 3025 } 3026 return id; 3027 } 3028 3029 static void memcg_free_cache_id(int id) 3030 { 3031 ida_simple_remove(&memcg_cache_ida, id); 3032 } 3033 3034 /** 3035 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 3036 * @memcg: memory cgroup to charge 3037 * @gfp: reclaim mode 3038 * @nr_pages: number of pages to charge 3039 * 3040 * Returns 0 on success, an error code on failure. 3041 */ 3042 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 3043 unsigned int nr_pages) 3044 { 3045 struct page_counter *counter; 3046 int ret; 3047 3048 ret = try_charge(memcg, gfp, nr_pages); 3049 if (ret) 3050 return ret; 3051 3052 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3053 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3054 3055 /* 3056 * Enforce __GFP_NOFAIL allocation because callers are not 3057 * prepared to see failures and likely do not have any failure 3058 * handling code. 3059 */ 3060 if (gfp & __GFP_NOFAIL) { 3061 page_counter_charge(&memcg->kmem, nr_pages); 3062 return 0; 3063 } 3064 cancel_charge(memcg, nr_pages); 3065 return -ENOMEM; 3066 } 3067 return 0; 3068 } 3069 3070 /** 3071 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 3072 * @memcg: memcg to uncharge 3073 * @nr_pages: number of pages to uncharge 3074 */ 3075 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 3076 { 3077 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3078 page_counter_uncharge(&memcg->kmem, nr_pages); 3079 3080 page_counter_uncharge(&memcg->memory, nr_pages); 3081 if (do_memsw_account()) 3082 page_counter_uncharge(&memcg->memsw, nr_pages); 3083 } 3084 3085 /** 3086 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3087 * @page: page to charge 3088 * @gfp: reclaim mode 3089 * @order: allocation order 3090 * 3091 * Returns 0 on success, an error code on failure. 3092 */ 3093 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3094 { 3095 struct mem_cgroup *memcg; 3096 int ret = 0; 3097 3098 memcg = get_mem_cgroup_from_current(); 3099 if (memcg && !mem_cgroup_is_root(memcg)) { 3100 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 3101 if (!ret) { 3102 page->mem_cgroup = memcg; 3103 __SetPageKmemcg(page); 3104 return 0; 3105 } 3106 css_put(&memcg->css); 3107 } 3108 return ret; 3109 } 3110 3111 /** 3112 * __memcg_kmem_uncharge_page: uncharge a kmem page 3113 * @page: page to uncharge 3114 * @order: allocation order 3115 */ 3116 void __memcg_kmem_uncharge_page(struct page *page, int order) 3117 { 3118 struct mem_cgroup *memcg = page->mem_cgroup; 3119 unsigned int nr_pages = 1 << order; 3120 3121 if (!memcg) 3122 return; 3123 3124 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3125 __memcg_kmem_uncharge(memcg, nr_pages); 3126 page->mem_cgroup = NULL; 3127 css_put(&memcg->css); 3128 3129 /* slab pages do not have PageKmemcg flag set */ 3130 if (PageKmemcg(page)) 3131 __ClearPageKmemcg(page); 3132 } 3133 3134 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3135 { 3136 struct memcg_stock_pcp *stock; 3137 unsigned long flags; 3138 bool ret = false; 3139 3140 local_irq_save(flags); 3141 3142 stock = this_cpu_ptr(&memcg_stock); 3143 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3144 stock->nr_bytes -= nr_bytes; 3145 ret = true; 3146 } 3147 3148 local_irq_restore(flags); 3149 3150 return ret; 3151 } 3152 3153 static void drain_obj_stock(struct memcg_stock_pcp *stock) 3154 { 3155 struct obj_cgroup *old = stock->cached_objcg; 3156 3157 if (!old) 3158 return; 3159 3160 if (stock->nr_bytes) { 3161 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3162 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3163 3164 if (nr_pages) { 3165 rcu_read_lock(); 3166 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages); 3167 rcu_read_unlock(); 3168 } 3169 3170 /* 3171 * The leftover is flushed to the centralized per-memcg value. 3172 * On the next attempt to refill obj stock it will be moved 3173 * to a per-cpu stock (probably, on an other CPU), see 3174 * refill_obj_stock(). 3175 * 3176 * How often it's flushed is a trade-off between the memory 3177 * limit enforcement accuracy and potential CPU contention, 3178 * so it might be changed in the future. 3179 */ 3180 atomic_add(nr_bytes, &old->nr_charged_bytes); 3181 stock->nr_bytes = 0; 3182 } 3183 3184 obj_cgroup_put(old); 3185 stock->cached_objcg = NULL; 3186 } 3187 3188 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3189 struct mem_cgroup *root_memcg) 3190 { 3191 struct mem_cgroup *memcg; 3192 3193 if (stock->cached_objcg) { 3194 memcg = obj_cgroup_memcg(stock->cached_objcg); 3195 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3196 return true; 3197 } 3198 3199 return false; 3200 } 3201 3202 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3203 { 3204 struct memcg_stock_pcp *stock; 3205 unsigned long flags; 3206 3207 local_irq_save(flags); 3208 3209 stock = this_cpu_ptr(&memcg_stock); 3210 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3211 drain_obj_stock(stock); 3212 obj_cgroup_get(objcg); 3213 stock->cached_objcg = objcg; 3214 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); 3215 } 3216 stock->nr_bytes += nr_bytes; 3217 3218 if (stock->nr_bytes > PAGE_SIZE) 3219 drain_obj_stock(stock); 3220 3221 local_irq_restore(flags); 3222 } 3223 3224 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3225 { 3226 struct mem_cgroup *memcg; 3227 unsigned int nr_pages, nr_bytes; 3228 int ret; 3229 3230 if (consume_obj_stock(objcg, size)) 3231 return 0; 3232 3233 /* 3234 * In theory, memcg->nr_charged_bytes can have enough 3235 * pre-charged bytes to satisfy the allocation. However, 3236 * flushing memcg->nr_charged_bytes requires two atomic 3237 * operations, and memcg->nr_charged_bytes can't be big, 3238 * so it's better to ignore it and try grab some new pages. 3239 * memcg->nr_charged_bytes will be flushed in 3240 * refill_obj_stock(), called from this function or 3241 * independently later. 3242 */ 3243 rcu_read_lock(); 3244 memcg = obj_cgroup_memcg(objcg); 3245 css_get(&memcg->css); 3246 rcu_read_unlock(); 3247 3248 nr_pages = size >> PAGE_SHIFT; 3249 nr_bytes = size & (PAGE_SIZE - 1); 3250 3251 if (nr_bytes) 3252 nr_pages += 1; 3253 3254 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); 3255 if (!ret && nr_bytes) 3256 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); 3257 3258 css_put(&memcg->css); 3259 return ret; 3260 } 3261 3262 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3263 { 3264 refill_obj_stock(objcg, size); 3265 } 3266 3267 #endif /* CONFIG_MEMCG_KMEM */ 3268 3269 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3270 3271 /* 3272 * Because tail pages are not marked as "used", set it. We're under 3273 * pgdat->lru_lock and migration entries setup in all page mappings. 3274 */ 3275 void mem_cgroup_split_huge_fixup(struct page *head) 3276 { 3277 struct mem_cgroup *memcg = head->mem_cgroup; 3278 int i; 3279 3280 if (mem_cgroup_disabled()) 3281 return; 3282 3283 for (i = 1; i < HPAGE_PMD_NR; i++) { 3284 css_get(&memcg->css); 3285 head[i].mem_cgroup = memcg; 3286 } 3287 } 3288 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3289 3290 #ifdef CONFIG_MEMCG_SWAP 3291 /** 3292 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3293 * @entry: swap entry to be moved 3294 * @from: mem_cgroup which the entry is moved from 3295 * @to: mem_cgroup which the entry is moved to 3296 * 3297 * It succeeds only when the swap_cgroup's record for this entry is the same 3298 * as the mem_cgroup's id of @from. 3299 * 3300 * Returns 0 on success, -EINVAL on failure. 3301 * 3302 * The caller must have charged to @to, IOW, called page_counter_charge() about 3303 * both res and memsw, and called css_get(). 3304 */ 3305 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3306 struct mem_cgroup *from, struct mem_cgroup *to) 3307 { 3308 unsigned short old_id, new_id; 3309 3310 old_id = mem_cgroup_id(from); 3311 new_id = mem_cgroup_id(to); 3312 3313 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3314 mod_memcg_state(from, MEMCG_SWAP, -1); 3315 mod_memcg_state(to, MEMCG_SWAP, 1); 3316 return 0; 3317 } 3318 return -EINVAL; 3319 } 3320 #else 3321 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3322 struct mem_cgroup *from, struct mem_cgroup *to) 3323 { 3324 return -EINVAL; 3325 } 3326 #endif 3327 3328 static DEFINE_MUTEX(memcg_max_mutex); 3329 3330 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3331 unsigned long max, bool memsw) 3332 { 3333 bool enlarge = false; 3334 bool drained = false; 3335 int ret; 3336 bool limits_invariant; 3337 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3338 3339 do { 3340 if (signal_pending(current)) { 3341 ret = -EINTR; 3342 break; 3343 } 3344 3345 mutex_lock(&memcg_max_mutex); 3346 /* 3347 * Make sure that the new limit (memsw or memory limit) doesn't 3348 * break our basic invariant rule memory.max <= memsw.max. 3349 */ 3350 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3351 max <= memcg->memsw.max; 3352 if (!limits_invariant) { 3353 mutex_unlock(&memcg_max_mutex); 3354 ret = -EINVAL; 3355 break; 3356 } 3357 if (max > counter->max) 3358 enlarge = true; 3359 ret = page_counter_set_max(counter, max); 3360 mutex_unlock(&memcg_max_mutex); 3361 3362 if (!ret) 3363 break; 3364 3365 if (!drained) { 3366 drain_all_stock(memcg); 3367 drained = true; 3368 continue; 3369 } 3370 3371 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3372 GFP_KERNEL, !memsw)) { 3373 ret = -EBUSY; 3374 break; 3375 } 3376 } while (true); 3377 3378 if (!ret && enlarge) 3379 memcg_oom_recover(memcg); 3380 3381 return ret; 3382 } 3383 3384 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3385 gfp_t gfp_mask, 3386 unsigned long *total_scanned) 3387 { 3388 unsigned long nr_reclaimed = 0; 3389 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3390 unsigned long reclaimed; 3391 int loop = 0; 3392 struct mem_cgroup_tree_per_node *mctz; 3393 unsigned long excess; 3394 unsigned long nr_scanned; 3395 3396 if (order > 0) 3397 return 0; 3398 3399 mctz = soft_limit_tree_node(pgdat->node_id); 3400 3401 /* 3402 * Do not even bother to check the largest node if the root 3403 * is empty. Do it lockless to prevent lock bouncing. Races 3404 * are acceptable as soft limit is best effort anyway. 3405 */ 3406 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3407 return 0; 3408 3409 /* 3410 * This loop can run a while, specially if mem_cgroup's continuously 3411 * keep exceeding their soft limit and putting the system under 3412 * pressure 3413 */ 3414 do { 3415 if (next_mz) 3416 mz = next_mz; 3417 else 3418 mz = mem_cgroup_largest_soft_limit_node(mctz); 3419 if (!mz) 3420 break; 3421 3422 nr_scanned = 0; 3423 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3424 gfp_mask, &nr_scanned); 3425 nr_reclaimed += reclaimed; 3426 *total_scanned += nr_scanned; 3427 spin_lock_irq(&mctz->lock); 3428 __mem_cgroup_remove_exceeded(mz, mctz); 3429 3430 /* 3431 * If we failed to reclaim anything from this memory cgroup 3432 * it is time to move on to the next cgroup 3433 */ 3434 next_mz = NULL; 3435 if (!reclaimed) 3436 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3437 3438 excess = soft_limit_excess(mz->memcg); 3439 /* 3440 * One school of thought says that we should not add 3441 * back the node to the tree if reclaim returns 0. 3442 * But our reclaim could return 0, simply because due 3443 * to priority we are exposing a smaller subset of 3444 * memory to reclaim from. Consider this as a longer 3445 * term TODO. 3446 */ 3447 /* If excess == 0, no tree ops */ 3448 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3449 spin_unlock_irq(&mctz->lock); 3450 css_put(&mz->memcg->css); 3451 loop++; 3452 /* 3453 * Could not reclaim anything and there are no more 3454 * mem cgroups to try or we seem to be looping without 3455 * reclaiming anything. 3456 */ 3457 if (!nr_reclaimed && 3458 (next_mz == NULL || 3459 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3460 break; 3461 } while (!nr_reclaimed); 3462 if (next_mz) 3463 css_put(&next_mz->memcg->css); 3464 return nr_reclaimed; 3465 } 3466 3467 /* 3468 * Test whether @memcg has children, dead or alive. Note that this 3469 * function doesn't care whether @memcg has use_hierarchy enabled and 3470 * returns %true if there are child csses according to the cgroup 3471 * hierarchy. Testing use_hierarchy is the caller's responsibility. 3472 */ 3473 static inline bool memcg_has_children(struct mem_cgroup *memcg) 3474 { 3475 bool ret; 3476 3477 rcu_read_lock(); 3478 ret = css_next_child(NULL, &memcg->css); 3479 rcu_read_unlock(); 3480 return ret; 3481 } 3482 3483 /* 3484 * Reclaims as many pages from the given memcg as possible. 3485 * 3486 * Caller is responsible for holding css reference for memcg. 3487 */ 3488 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3489 { 3490 int nr_retries = MAX_RECLAIM_RETRIES; 3491 3492 /* we call try-to-free pages for make this cgroup empty */ 3493 lru_add_drain_all(); 3494 3495 drain_all_stock(memcg); 3496 3497 /* try to free all pages in this cgroup */ 3498 while (nr_retries && page_counter_read(&memcg->memory)) { 3499 int progress; 3500 3501 if (signal_pending(current)) 3502 return -EINTR; 3503 3504 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3505 GFP_KERNEL, true); 3506 if (!progress) { 3507 nr_retries--; 3508 /* maybe some writeback is necessary */ 3509 congestion_wait(BLK_RW_ASYNC, HZ/10); 3510 } 3511 3512 } 3513 3514 return 0; 3515 } 3516 3517 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3518 char *buf, size_t nbytes, 3519 loff_t off) 3520 { 3521 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3522 3523 if (mem_cgroup_is_root(memcg)) 3524 return -EINVAL; 3525 return mem_cgroup_force_empty(memcg) ?: nbytes; 3526 } 3527 3528 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3529 struct cftype *cft) 3530 { 3531 return mem_cgroup_from_css(css)->use_hierarchy; 3532 } 3533 3534 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3535 struct cftype *cft, u64 val) 3536 { 3537 int retval = 0; 3538 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3539 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 3540 3541 if (memcg->use_hierarchy == val) 3542 return 0; 3543 3544 /* 3545 * If parent's use_hierarchy is set, we can't make any modifications 3546 * in the child subtrees. If it is unset, then the change can 3547 * occur, provided the current cgroup has no children. 3548 * 3549 * For the root cgroup, parent_mem is NULL, we allow value to be 3550 * set if there are no children. 3551 */ 3552 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 3553 (val == 1 || val == 0)) { 3554 if (!memcg_has_children(memcg)) 3555 memcg->use_hierarchy = val; 3556 else 3557 retval = -EBUSY; 3558 } else 3559 retval = -EINVAL; 3560 3561 return retval; 3562 } 3563 3564 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3565 { 3566 unsigned long val; 3567 3568 if (mem_cgroup_is_root(memcg)) { 3569 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3570 memcg_page_state(memcg, NR_ANON_MAPPED); 3571 if (swap) 3572 val += memcg_page_state(memcg, MEMCG_SWAP); 3573 } else { 3574 if (!swap) 3575 val = page_counter_read(&memcg->memory); 3576 else 3577 val = page_counter_read(&memcg->memsw); 3578 } 3579 return val; 3580 } 3581 3582 enum { 3583 RES_USAGE, 3584 RES_LIMIT, 3585 RES_MAX_USAGE, 3586 RES_FAILCNT, 3587 RES_SOFT_LIMIT, 3588 }; 3589 3590 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3591 struct cftype *cft) 3592 { 3593 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3594 struct page_counter *counter; 3595 3596 switch (MEMFILE_TYPE(cft->private)) { 3597 case _MEM: 3598 counter = &memcg->memory; 3599 break; 3600 case _MEMSWAP: 3601 counter = &memcg->memsw; 3602 break; 3603 case _KMEM: 3604 counter = &memcg->kmem; 3605 break; 3606 case _TCP: 3607 counter = &memcg->tcpmem; 3608 break; 3609 default: 3610 BUG(); 3611 } 3612 3613 switch (MEMFILE_ATTR(cft->private)) { 3614 case RES_USAGE: 3615 if (counter == &memcg->memory) 3616 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3617 if (counter == &memcg->memsw) 3618 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3619 return (u64)page_counter_read(counter) * PAGE_SIZE; 3620 case RES_LIMIT: 3621 return (u64)counter->max * PAGE_SIZE; 3622 case RES_MAX_USAGE: 3623 return (u64)counter->watermark * PAGE_SIZE; 3624 case RES_FAILCNT: 3625 return counter->failcnt; 3626 case RES_SOFT_LIMIT: 3627 return (u64)memcg->soft_limit * PAGE_SIZE; 3628 default: 3629 BUG(); 3630 } 3631 } 3632 3633 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3634 { 3635 unsigned long stat[MEMCG_NR_STAT] = {0}; 3636 struct mem_cgroup *mi; 3637 int node, cpu, i; 3638 3639 for_each_online_cpu(cpu) 3640 for (i = 0; i < MEMCG_NR_STAT; i++) 3641 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3642 3643 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3644 for (i = 0; i < MEMCG_NR_STAT; i++) 3645 atomic_long_add(stat[i], &mi->vmstats[i]); 3646 3647 for_each_node(node) { 3648 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3649 struct mem_cgroup_per_node *pi; 3650 3651 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3652 stat[i] = 0; 3653 3654 for_each_online_cpu(cpu) 3655 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3656 stat[i] += per_cpu( 3657 pn->lruvec_stat_cpu->count[i], cpu); 3658 3659 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3660 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3661 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3662 } 3663 } 3664 3665 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3666 { 3667 unsigned long events[NR_VM_EVENT_ITEMS]; 3668 struct mem_cgroup *mi; 3669 int cpu, i; 3670 3671 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3672 events[i] = 0; 3673 3674 for_each_online_cpu(cpu) 3675 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3676 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3677 cpu); 3678 3679 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3680 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3681 atomic_long_add(events[i], &mi->vmevents[i]); 3682 } 3683 3684 #ifdef CONFIG_MEMCG_KMEM 3685 static int memcg_online_kmem(struct mem_cgroup *memcg) 3686 { 3687 struct obj_cgroup *objcg; 3688 int memcg_id; 3689 3690 if (cgroup_memory_nokmem) 3691 return 0; 3692 3693 BUG_ON(memcg->kmemcg_id >= 0); 3694 BUG_ON(memcg->kmem_state); 3695 3696 memcg_id = memcg_alloc_cache_id(); 3697 if (memcg_id < 0) 3698 return memcg_id; 3699 3700 objcg = obj_cgroup_alloc(); 3701 if (!objcg) { 3702 memcg_free_cache_id(memcg_id); 3703 return -ENOMEM; 3704 } 3705 objcg->memcg = memcg; 3706 rcu_assign_pointer(memcg->objcg, objcg); 3707 3708 static_branch_enable(&memcg_kmem_enabled_key); 3709 3710 /* 3711 * A memory cgroup is considered kmem-online as soon as it gets 3712 * kmemcg_id. Setting the id after enabling static branching will 3713 * guarantee no one starts accounting before all call sites are 3714 * patched. 3715 */ 3716 memcg->kmemcg_id = memcg_id; 3717 memcg->kmem_state = KMEM_ONLINE; 3718 3719 return 0; 3720 } 3721 3722 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3723 { 3724 struct cgroup_subsys_state *css; 3725 struct mem_cgroup *parent, *child; 3726 int kmemcg_id; 3727 3728 if (memcg->kmem_state != KMEM_ONLINE) 3729 return; 3730 3731 memcg->kmem_state = KMEM_ALLOCATED; 3732 3733 parent = parent_mem_cgroup(memcg); 3734 if (!parent) 3735 parent = root_mem_cgroup; 3736 3737 memcg_reparent_objcgs(memcg, parent); 3738 3739 kmemcg_id = memcg->kmemcg_id; 3740 BUG_ON(kmemcg_id < 0); 3741 3742 /* 3743 * Change kmemcg_id of this cgroup and all its descendants to the 3744 * parent's id, and then move all entries from this cgroup's list_lrus 3745 * to ones of the parent. After we have finished, all list_lrus 3746 * corresponding to this cgroup are guaranteed to remain empty. The 3747 * ordering is imposed by list_lru_node->lock taken by 3748 * memcg_drain_all_list_lrus(). 3749 */ 3750 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3751 css_for_each_descendant_pre(css, &memcg->css) { 3752 child = mem_cgroup_from_css(css); 3753 BUG_ON(child->kmemcg_id != kmemcg_id); 3754 child->kmemcg_id = parent->kmemcg_id; 3755 if (!memcg->use_hierarchy) 3756 break; 3757 } 3758 rcu_read_unlock(); 3759 3760 memcg_drain_all_list_lrus(kmemcg_id, parent); 3761 3762 memcg_free_cache_id(kmemcg_id); 3763 } 3764 3765 static void memcg_free_kmem(struct mem_cgroup *memcg) 3766 { 3767 /* css_alloc() failed, offlining didn't happen */ 3768 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3769 memcg_offline_kmem(memcg); 3770 } 3771 #else 3772 static int memcg_online_kmem(struct mem_cgroup *memcg) 3773 { 3774 return 0; 3775 } 3776 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3777 { 3778 } 3779 static void memcg_free_kmem(struct mem_cgroup *memcg) 3780 { 3781 } 3782 #endif /* CONFIG_MEMCG_KMEM */ 3783 3784 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3785 unsigned long max) 3786 { 3787 int ret; 3788 3789 mutex_lock(&memcg_max_mutex); 3790 ret = page_counter_set_max(&memcg->kmem, max); 3791 mutex_unlock(&memcg_max_mutex); 3792 return ret; 3793 } 3794 3795 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3796 { 3797 int ret; 3798 3799 mutex_lock(&memcg_max_mutex); 3800 3801 ret = page_counter_set_max(&memcg->tcpmem, max); 3802 if (ret) 3803 goto out; 3804 3805 if (!memcg->tcpmem_active) { 3806 /* 3807 * The active flag needs to be written after the static_key 3808 * update. This is what guarantees that the socket activation 3809 * function is the last one to run. See mem_cgroup_sk_alloc() 3810 * for details, and note that we don't mark any socket as 3811 * belonging to this memcg until that flag is up. 3812 * 3813 * We need to do this, because static_keys will span multiple 3814 * sites, but we can't control their order. If we mark a socket 3815 * as accounted, but the accounting functions are not patched in 3816 * yet, we'll lose accounting. 3817 * 3818 * We never race with the readers in mem_cgroup_sk_alloc(), 3819 * because when this value change, the code to process it is not 3820 * patched in yet. 3821 */ 3822 static_branch_inc(&memcg_sockets_enabled_key); 3823 memcg->tcpmem_active = true; 3824 } 3825 out: 3826 mutex_unlock(&memcg_max_mutex); 3827 return ret; 3828 } 3829 3830 /* 3831 * The user of this function is... 3832 * RES_LIMIT. 3833 */ 3834 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3835 char *buf, size_t nbytes, loff_t off) 3836 { 3837 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3838 unsigned long nr_pages; 3839 int ret; 3840 3841 buf = strstrip(buf); 3842 ret = page_counter_memparse(buf, "-1", &nr_pages); 3843 if (ret) 3844 return ret; 3845 3846 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3847 case RES_LIMIT: 3848 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3849 ret = -EINVAL; 3850 break; 3851 } 3852 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3853 case _MEM: 3854 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3855 break; 3856 case _MEMSWAP: 3857 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3858 break; 3859 case _KMEM: 3860 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3861 "Please report your usecase to linux-mm@kvack.org if you " 3862 "depend on this functionality.\n"); 3863 ret = memcg_update_kmem_max(memcg, nr_pages); 3864 break; 3865 case _TCP: 3866 ret = memcg_update_tcp_max(memcg, nr_pages); 3867 break; 3868 } 3869 break; 3870 case RES_SOFT_LIMIT: 3871 memcg->soft_limit = nr_pages; 3872 ret = 0; 3873 break; 3874 } 3875 return ret ?: nbytes; 3876 } 3877 3878 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3879 size_t nbytes, loff_t off) 3880 { 3881 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3882 struct page_counter *counter; 3883 3884 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3885 case _MEM: 3886 counter = &memcg->memory; 3887 break; 3888 case _MEMSWAP: 3889 counter = &memcg->memsw; 3890 break; 3891 case _KMEM: 3892 counter = &memcg->kmem; 3893 break; 3894 case _TCP: 3895 counter = &memcg->tcpmem; 3896 break; 3897 default: 3898 BUG(); 3899 } 3900 3901 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3902 case RES_MAX_USAGE: 3903 page_counter_reset_watermark(counter); 3904 break; 3905 case RES_FAILCNT: 3906 counter->failcnt = 0; 3907 break; 3908 default: 3909 BUG(); 3910 } 3911 3912 return nbytes; 3913 } 3914 3915 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3916 struct cftype *cft) 3917 { 3918 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3919 } 3920 3921 #ifdef CONFIG_MMU 3922 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3923 struct cftype *cft, u64 val) 3924 { 3925 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3926 3927 if (val & ~MOVE_MASK) 3928 return -EINVAL; 3929 3930 /* 3931 * No kind of locking is needed in here, because ->can_attach() will 3932 * check this value once in the beginning of the process, and then carry 3933 * on with stale data. This means that changes to this value will only 3934 * affect task migrations starting after the change. 3935 */ 3936 memcg->move_charge_at_immigrate = val; 3937 return 0; 3938 } 3939 #else 3940 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3941 struct cftype *cft, u64 val) 3942 { 3943 return -ENOSYS; 3944 } 3945 #endif 3946 3947 #ifdef CONFIG_NUMA 3948 3949 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3950 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3951 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3952 3953 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3954 int nid, unsigned int lru_mask, bool tree) 3955 { 3956 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3957 unsigned long nr = 0; 3958 enum lru_list lru; 3959 3960 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3961 3962 for_each_lru(lru) { 3963 if (!(BIT(lru) & lru_mask)) 3964 continue; 3965 if (tree) 3966 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 3967 else 3968 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 3969 } 3970 return nr; 3971 } 3972 3973 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 3974 unsigned int lru_mask, 3975 bool tree) 3976 { 3977 unsigned long nr = 0; 3978 enum lru_list lru; 3979 3980 for_each_lru(lru) { 3981 if (!(BIT(lru) & lru_mask)) 3982 continue; 3983 if (tree) 3984 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 3985 else 3986 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 3987 } 3988 return nr; 3989 } 3990 3991 static int memcg_numa_stat_show(struct seq_file *m, void *v) 3992 { 3993 struct numa_stat { 3994 const char *name; 3995 unsigned int lru_mask; 3996 }; 3997 3998 static const struct numa_stat stats[] = { 3999 { "total", LRU_ALL }, 4000 { "file", LRU_ALL_FILE }, 4001 { "anon", LRU_ALL_ANON }, 4002 { "unevictable", BIT(LRU_UNEVICTABLE) }, 4003 }; 4004 const struct numa_stat *stat; 4005 int nid; 4006 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4007 4008 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4009 seq_printf(m, "%s=%lu", stat->name, 4010 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4011 false)); 4012 for_each_node_state(nid, N_MEMORY) 4013 seq_printf(m, " N%d=%lu", nid, 4014 mem_cgroup_node_nr_lru_pages(memcg, nid, 4015 stat->lru_mask, false)); 4016 seq_putc(m, '\n'); 4017 } 4018 4019 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4020 4021 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4022 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4023 true)); 4024 for_each_node_state(nid, N_MEMORY) 4025 seq_printf(m, " N%d=%lu", nid, 4026 mem_cgroup_node_nr_lru_pages(memcg, nid, 4027 stat->lru_mask, true)); 4028 seq_putc(m, '\n'); 4029 } 4030 4031 return 0; 4032 } 4033 #endif /* CONFIG_NUMA */ 4034 4035 static const unsigned int memcg1_stats[] = { 4036 NR_FILE_PAGES, 4037 NR_ANON_MAPPED, 4038 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4039 NR_ANON_THPS, 4040 #endif 4041 NR_SHMEM, 4042 NR_FILE_MAPPED, 4043 NR_FILE_DIRTY, 4044 NR_WRITEBACK, 4045 MEMCG_SWAP, 4046 }; 4047 4048 static const char *const memcg1_stat_names[] = { 4049 "cache", 4050 "rss", 4051 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4052 "rss_huge", 4053 #endif 4054 "shmem", 4055 "mapped_file", 4056 "dirty", 4057 "writeback", 4058 "swap", 4059 }; 4060 4061 /* Universal VM events cgroup1 shows, original sort order */ 4062 static const unsigned int memcg1_events[] = { 4063 PGPGIN, 4064 PGPGOUT, 4065 PGFAULT, 4066 PGMAJFAULT, 4067 }; 4068 4069 static int memcg_stat_show(struct seq_file *m, void *v) 4070 { 4071 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4072 unsigned long memory, memsw; 4073 struct mem_cgroup *mi; 4074 unsigned int i; 4075 4076 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4077 4078 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4079 unsigned long nr; 4080 4081 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4082 continue; 4083 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4084 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4085 if (memcg1_stats[i] == NR_ANON_THPS) 4086 nr *= HPAGE_PMD_NR; 4087 #endif 4088 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4089 } 4090 4091 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4092 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4093 memcg_events_local(memcg, memcg1_events[i])); 4094 4095 for (i = 0; i < NR_LRU_LISTS; i++) 4096 seq_printf(m, "%s %lu\n", lru_list_name(i), 4097 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4098 PAGE_SIZE); 4099 4100 /* Hierarchical information */ 4101 memory = memsw = PAGE_COUNTER_MAX; 4102 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4103 memory = min(memory, READ_ONCE(mi->memory.max)); 4104 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4105 } 4106 seq_printf(m, "hierarchical_memory_limit %llu\n", 4107 (u64)memory * PAGE_SIZE); 4108 if (do_memsw_account()) 4109 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4110 (u64)memsw * PAGE_SIZE); 4111 4112 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4113 unsigned long nr; 4114 4115 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4116 continue; 4117 nr = memcg_page_state(memcg, memcg1_stats[i]); 4118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4119 if (memcg1_stats[i] == NR_ANON_THPS) 4120 nr *= HPAGE_PMD_NR; 4121 #endif 4122 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4123 (u64)nr * PAGE_SIZE); 4124 } 4125 4126 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4127 seq_printf(m, "total_%s %llu\n", 4128 vm_event_name(memcg1_events[i]), 4129 (u64)memcg_events(memcg, memcg1_events[i])); 4130 4131 for (i = 0; i < NR_LRU_LISTS; i++) 4132 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4133 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4134 PAGE_SIZE); 4135 4136 #ifdef CONFIG_DEBUG_VM 4137 { 4138 pg_data_t *pgdat; 4139 struct mem_cgroup_per_node *mz; 4140 unsigned long anon_cost = 0; 4141 unsigned long file_cost = 0; 4142 4143 for_each_online_pgdat(pgdat) { 4144 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 4145 4146 anon_cost += mz->lruvec.anon_cost; 4147 file_cost += mz->lruvec.file_cost; 4148 } 4149 seq_printf(m, "anon_cost %lu\n", anon_cost); 4150 seq_printf(m, "file_cost %lu\n", file_cost); 4151 } 4152 #endif 4153 4154 return 0; 4155 } 4156 4157 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4158 struct cftype *cft) 4159 { 4160 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4161 4162 return mem_cgroup_swappiness(memcg); 4163 } 4164 4165 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4166 struct cftype *cft, u64 val) 4167 { 4168 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4169 4170 if (val > 100) 4171 return -EINVAL; 4172 4173 if (css->parent) 4174 memcg->swappiness = val; 4175 else 4176 vm_swappiness = val; 4177 4178 return 0; 4179 } 4180 4181 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4182 { 4183 struct mem_cgroup_threshold_ary *t; 4184 unsigned long usage; 4185 int i; 4186 4187 rcu_read_lock(); 4188 if (!swap) 4189 t = rcu_dereference(memcg->thresholds.primary); 4190 else 4191 t = rcu_dereference(memcg->memsw_thresholds.primary); 4192 4193 if (!t) 4194 goto unlock; 4195 4196 usage = mem_cgroup_usage(memcg, swap); 4197 4198 /* 4199 * current_threshold points to threshold just below or equal to usage. 4200 * If it's not true, a threshold was crossed after last 4201 * call of __mem_cgroup_threshold(). 4202 */ 4203 i = t->current_threshold; 4204 4205 /* 4206 * Iterate backward over array of thresholds starting from 4207 * current_threshold and check if a threshold is crossed. 4208 * If none of thresholds below usage is crossed, we read 4209 * only one element of the array here. 4210 */ 4211 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4212 eventfd_signal(t->entries[i].eventfd, 1); 4213 4214 /* i = current_threshold + 1 */ 4215 i++; 4216 4217 /* 4218 * Iterate forward over array of thresholds starting from 4219 * current_threshold+1 and check if a threshold is crossed. 4220 * If none of thresholds above usage is crossed, we read 4221 * only one element of the array here. 4222 */ 4223 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4224 eventfd_signal(t->entries[i].eventfd, 1); 4225 4226 /* Update current_threshold */ 4227 t->current_threshold = i - 1; 4228 unlock: 4229 rcu_read_unlock(); 4230 } 4231 4232 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4233 { 4234 while (memcg) { 4235 __mem_cgroup_threshold(memcg, false); 4236 if (do_memsw_account()) 4237 __mem_cgroup_threshold(memcg, true); 4238 4239 memcg = parent_mem_cgroup(memcg); 4240 } 4241 } 4242 4243 static int compare_thresholds(const void *a, const void *b) 4244 { 4245 const struct mem_cgroup_threshold *_a = a; 4246 const struct mem_cgroup_threshold *_b = b; 4247 4248 if (_a->threshold > _b->threshold) 4249 return 1; 4250 4251 if (_a->threshold < _b->threshold) 4252 return -1; 4253 4254 return 0; 4255 } 4256 4257 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4258 { 4259 struct mem_cgroup_eventfd_list *ev; 4260 4261 spin_lock(&memcg_oom_lock); 4262 4263 list_for_each_entry(ev, &memcg->oom_notify, list) 4264 eventfd_signal(ev->eventfd, 1); 4265 4266 spin_unlock(&memcg_oom_lock); 4267 return 0; 4268 } 4269 4270 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4271 { 4272 struct mem_cgroup *iter; 4273 4274 for_each_mem_cgroup_tree(iter, memcg) 4275 mem_cgroup_oom_notify_cb(iter); 4276 } 4277 4278 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4279 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4280 { 4281 struct mem_cgroup_thresholds *thresholds; 4282 struct mem_cgroup_threshold_ary *new; 4283 unsigned long threshold; 4284 unsigned long usage; 4285 int i, size, ret; 4286 4287 ret = page_counter_memparse(args, "-1", &threshold); 4288 if (ret) 4289 return ret; 4290 4291 mutex_lock(&memcg->thresholds_lock); 4292 4293 if (type == _MEM) { 4294 thresholds = &memcg->thresholds; 4295 usage = mem_cgroup_usage(memcg, false); 4296 } else if (type == _MEMSWAP) { 4297 thresholds = &memcg->memsw_thresholds; 4298 usage = mem_cgroup_usage(memcg, true); 4299 } else 4300 BUG(); 4301 4302 /* Check if a threshold crossed before adding a new one */ 4303 if (thresholds->primary) 4304 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4305 4306 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4307 4308 /* Allocate memory for new array of thresholds */ 4309 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4310 if (!new) { 4311 ret = -ENOMEM; 4312 goto unlock; 4313 } 4314 new->size = size; 4315 4316 /* Copy thresholds (if any) to new array */ 4317 if (thresholds->primary) 4318 memcpy(new->entries, thresholds->primary->entries, 4319 flex_array_size(new, entries, size - 1)); 4320 4321 /* Add new threshold */ 4322 new->entries[size - 1].eventfd = eventfd; 4323 new->entries[size - 1].threshold = threshold; 4324 4325 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4326 sort(new->entries, size, sizeof(*new->entries), 4327 compare_thresholds, NULL); 4328 4329 /* Find current threshold */ 4330 new->current_threshold = -1; 4331 for (i = 0; i < size; i++) { 4332 if (new->entries[i].threshold <= usage) { 4333 /* 4334 * new->current_threshold will not be used until 4335 * rcu_assign_pointer(), so it's safe to increment 4336 * it here. 4337 */ 4338 ++new->current_threshold; 4339 } else 4340 break; 4341 } 4342 4343 /* Free old spare buffer and save old primary buffer as spare */ 4344 kfree(thresholds->spare); 4345 thresholds->spare = thresholds->primary; 4346 4347 rcu_assign_pointer(thresholds->primary, new); 4348 4349 /* To be sure that nobody uses thresholds */ 4350 synchronize_rcu(); 4351 4352 unlock: 4353 mutex_unlock(&memcg->thresholds_lock); 4354 4355 return ret; 4356 } 4357 4358 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4359 struct eventfd_ctx *eventfd, const char *args) 4360 { 4361 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4362 } 4363 4364 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4365 struct eventfd_ctx *eventfd, const char *args) 4366 { 4367 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4368 } 4369 4370 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4371 struct eventfd_ctx *eventfd, enum res_type type) 4372 { 4373 struct mem_cgroup_thresholds *thresholds; 4374 struct mem_cgroup_threshold_ary *new; 4375 unsigned long usage; 4376 int i, j, size, entries; 4377 4378 mutex_lock(&memcg->thresholds_lock); 4379 4380 if (type == _MEM) { 4381 thresholds = &memcg->thresholds; 4382 usage = mem_cgroup_usage(memcg, false); 4383 } else if (type == _MEMSWAP) { 4384 thresholds = &memcg->memsw_thresholds; 4385 usage = mem_cgroup_usage(memcg, true); 4386 } else 4387 BUG(); 4388 4389 if (!thresholds->primary) 4390 goto unlock; 4391 4392 /* Check if a threshold crossed before removing */ 4393 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4394 4395 /* Calculate new number of threshold */ 4396 size = entries = 0; 4397 for (i = 0; i < thresholds->primary->size; i++) { 4398 if (thresholds->primary->entries[i].eventfd != eventfd) 4399 size++; 4400 else 4401 entries++; 4402 } 4403 4404 new = thresholds->spare; 4405 4406 /* If no items related to eventfd have been cleared, nothing to do */ 4407 if (!entries) 4408 goto unlock; 4409 4410 /* Set thresholds array to NULL if we don't have thresholds */ 4411 if (!size) { 4412 kfree(new); 4413 new = NULL; 4414 goto swap_buffers; 4415 } 4416 4417 new->size = size; 4418 4419 /* Copy thresholds and find current threshold */ 4420 new->current_threshold = -1; 4421 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4422 if (thresholds->primary->entries[i].eventfd == eventfd) 4423 continue; 4424 4425 new->entries[j] = thresholds->primary->entries[i]; 4426 if (new->entries[j].threshold <= usage) { 4427 /* 4428 * new->current_threshold will not be used 4429 * until rcu_assign_pointer(), so it's safe to increment 4430 * it here. 4431 */ 4432 ++new->current_threshold; 4433 } 4434 j++; 4435 } 4436 4437 swap_buffers: 4438 /* Swap primary and spare array */ 4439 thresholds->spare = thresholds->primary; 4440 4441 rcu_assign_pointer(thresholds->primary, new); 4442 4443 /* To be sure that nobody uses thresholds */ 4444 synchronize_rcu(); 4445 4446 /* If all events are unregistered, free the spare array */ 4447 if (!new) { 4448 kfree(thresholds->spare); 4449 thresholds->spare = NULL; 4450 } 4451 unlock: 4452 mutex_unlock(&memcg->thresholds_lock); 4453 } 4454 4455 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4456 struct eventfd_ctx *eventfd) 4457 { 4458 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4459 } 4460 4461 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4462 struct eventfd_ctx *eventfd) 4463 { 4464 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4465 } 4466 4467 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4468 struct eventfd_ctx *eventfd, const char *args) 4469 { 4470 struct mem_cgroup_eventfd_list *event; 4471 4472 event = kmalloc(sizeof(*event), GFP_KERNEL); 4473 if (!event) 4474 return -ENOMEM; 4475 4476 spin_lock(&memcg_oom_lock); 4477 4478 event->eventfd = eventfd; 4479 list_add(&event->list, &memcg->oom_notify); 4480 4481 /* already in OOM ? */ 4482 if (memcg->under_oom) 4483 eventfd_signal(eventfd, 1); 4484 spin_unlock(&memcg_oom_lock); 4485 4486 return 0; 4487 } 4488 4489 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4490 struct eventfd_ctx *eventfd) 4491 { 4492 struct mem_cgroup_eventfd_list *ev, *tmp; 4493 4494 spin_lock(&memcg_oom_lock); 4495 4496 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4497 if (ev->eventfd == eventfd) { 4498 list_del(&ev->list); 4499 kfree(ev); 4500 } 4501 } 4502 4503 spin_unlock(&memcg_oom_lock); 4504 } 4505 4506 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4507 { 4508 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4509 4510 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4511 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4512 seq_printf(sf, "oom_kill %lu\n", 4513 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4514 return 0; 4515 } 4516 4517 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4518 struct cftype *cft, u64 val) 4519 { 4520 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4521 4522 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4523 if (!css->parent || !((val == 0) || (val == 1))) 4524 return -EINVAL; 4525 4526 memcg->oom_kill_disable = val; 4527 if (!val) 4528 memcg_oom_recover(memcg); 4529 4530 return 0; 4531 } 4532 4533 #ifdef CONFIG_CGROUP_WRITEBACK 4534 4535 #include <trace/events/writeback.h> 4536 4537 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4538 { 4539 return wb_domain_init(&memcg->cgwb_domain, gfp); 4540 } 4541 4542 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4543 { 4544 wb_domain_exit(&memcg->cgwb_domain); 4545 } 4546 4547 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4548 { 4549 wb_domain_size_changed(&memcg->cgwb_domain); 4550 } 4551 4552 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4553 { 4554 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4555 4556 if (!memcg->css.parent) 4557 return NULL; 4558 4559 return &memcg->cgwb_domain; 4560 } 4561 4562 /* 4563 * idx can be of type enum memcg_stat_item or node_stat_item. 4564 * Keep in sync with memcg_exact_page(). 4565 */ 4566 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4567 { 4568 long x = atomic_long_read(&memcg->vmstats[idx]); 4569 int cpu; 4570 4571 for_each_online_cpu(cpu) 4572 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4573 if (x < 0) 4574 x = 0; 4575 return x; 4576 } 4577 4578 /** 4579 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4580 * @wb: bdi_writeback in question 4581 * @pfilepages: out parameter for number of file pages 4582 * @pheadroom: out parameter for number of allocatable pages according to memcg 4583 * @pdirty: out parameter for number of dirty pages 4584 * @pwriteback: out parameter for number of pages under writeback 4585 * 4586 * Determine the numbers of file, headroom, dirty, and writeback pages in 4587 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4588 * is a bit more involved. 4589 * 4590 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4591 * headroom is calculated as the lowest headroom of itself and the 4592 * ancestors. Note that this doesn't consider the actual amount of 4593 * available memory in the system. The caller should further cap 4594 * *@pheadroom accordingly. 4595 */ 4596 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4597 unsigned long *pheadroom, unsigned long *pdirty, 4598 unsigned long *pwriteback) 4599 { 4600 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4601 struct mem_cgroup *parent; 4602 4603 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4604 4605 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4606 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4607 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4608 *pheadroom = PAGE_COUNTER_MAX; 4609 4610 while ((parent = parent_mem_cgroup(memcg))) { 4611 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4612 READ_ONCE(memcg->memory.high)); 4613 unsigned long used = page_counter_read(&memcg->memory); 4614 4615 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4616 memcg = parent; 4617 } 4618 } 4619 4620 /* 4621 * Foreign dirty flushing 4622 * 4623 * There's an inherent mismatch between memcg and writeback. The former 4624 * trackes ownership per-page while the latter per-inode. This was a 4625 * deliberate design decision because honoring per-page ownership in the 4626 * writeback path is complicated, may lead to higher CPU and IO overheads 4627 * and deemed unnecessary given that write-sharing an inode across 4628 * different cgroups isn't a common use-case. 4629 * 4630 * Combined with inode majority-writer ownership switching, this works well 4631 * enough in most cases but there are some pathological cases. For 4632 * example, let's say there are two cgroups A and B which keep writing to 4633 * different but confined parts of the same inode. B owns the inode and 4634 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4635 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4636 * triggering background writeback. A will be slowed down without a way to 4637 * make writeback of the dirty pages happen. 4638 * 4639 * Conditions like the above can lead to a cgroup getting repatedly and 4640 * severely throttled after making some progress after each 4641 * dirty_expire_interval while the underyling IO device is almost 4642 * completely idle. 4643 * 4644 * Solving this problem completely requires matching the ownership tracking 4645 * granularities between memcg and writeback in either direction. However, 4646 * the more egregious behaviors can be avoided by simply remembering the 4647 * most recent foreign dirtying events and initiating remote flushes on 4648 * them when local writeback isn't enough to keep the memory clean enough. 4649 * 4650 * The following two functions implement such mechanism. When a foreign 4651 * page - a page whose memcg and writeback ownerships don't match - is 4652 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4653 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4654 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4655 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4656 * foreign bdi_writebacks which haven't expired. Both the numbers of 4657 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4658 * limited to MEMCG_CGWB_FRN_CNT. 4659 * 4660 * The mechanism only remembers IDs and doesn't hold any object references. 4661 * As being wrong occasionally doesn't matter, updates and accesses to the 4662 * records are lockless and racy. 4663 */ 4664 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4665 struct bdi_writeback *wb) 4666 { 4667 struct mem_cgroup *memcg = page->mem_cgroup; 4668 struct memcg_cgwb_frn *frn; 4669 u64 now = get_jiffies_64(); 4670 u64 oldest_at = now; 4671 int oldest = -1; 4672 int i; 4673 4674 trace_track_foreign_dirty(page, wb); 4675 4676 /* 4677 * Pick the slot to use. If there is already a slot for @wb, keep 4678 * using it. If not replace the oldest one which isn't being 4679 * written out. 4680 */ 4681 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4682 frn = &memcg->cgwb_frn[i]; 4683 if (frn->bdi_id == wb->bdi->id && 4684 frn->memcg_id == wb->memcg_css->id) 4685 break; 4686 if (time_before64(frn->at, oldest_at) && 4687 atomic_read(&frn->done.cnt) == 1) { 4688 oldest = i; 4689 oldest_at = frn->at; 4690 } 4691 } 4692 4693 if (i < MEMCG_CGWB_FRN_CNT) { 4694 /* 4695 * Re-using an existing one. Update timestamp lazily to 4696 * avoid making the cacheline hot. We want them to be 4697 * reasonably up-to-date and significantly shorter than 4698 * dirty_expire_interval as that's what expires the record. 4699 * Use the shorter of 1s and dirty_expire_interval / 8. 4700 */ 4701 unsigned long update_intv = 4702 min_t(unsigned long, HZ, 4703 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4704 4705 if (time_before64(frn->at, now - update_intv)) 4706 frn->at = now; 4707 } else if (oldest >= 0) { 4708 /* replace the oldest free one */ 4709 frn = &memcg->cgwb_frn[oldest]; 4710 frn->bdi_id = wb->bdi->id; 4711 frn->memcg_id = wb->memcg_css->id; 4712 frn->at = now; 4713 } 4714 } 4715 4716 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4717 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4718 { 4719 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4720 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4721 u64 now = jiffies_64; 4722 int i; 4723 4724 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4725 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4726 4727 /* 4728 * If the record is older than dirty_expire_interval, 4729 * writeback on it has already started. No need to kick it 4730 * off again. Also, don't start a new one if there's 4731 * already one in flight. 4732 */ 4733 if (time_after64(frn->at, now - intv) && 4734 atomic_read(&frn->done.cnt) == 1) { 4735 frn->at = 0; 4736 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4737 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4738 WB_REASON_FOREIGN_FLUSH, 4739 &frn->done); 4740 } 4741 } 4742 } 4743 4744 #else /* CONFIG_CGROUP_WRITEBACK */ 4745 4746 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4747 { 4748 return 0; 4749 } 4750 4751 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4752 { 4753 } 4754 4755 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4756 { 4757 } 4758 4759 #endif /* CONFIG_CGROUP_WRITEBACK */ 4760 4761 /* 4762 * DO NOT USE IN NEW FILES. 4763 * 4764 * "cgroup.event_control" implementation. 4765 * 4766 * This is way over-engineered. It tries to support fully configurable 4767 * events for each user. Such level of flexibility is completely 4768 * unnecessary especially in the light of the planned unified hierarchy. 4769 * 4770 * Please deprecate this and replace with something simpler if at all 4771 * possible. 4772 */ 4773 4774 /* 4775 * Unregister event and free resources. 4776 * 4777 * Gets called from workqueue. 4778 */ 4779 static void memcg_event_remove(struct work_struct *work) 4780 { 4781 struct mem_cgroup_event *event = 4782 container_of(work, struct mem_cgroup_event, remove); 4783 struct mem_cgroup *memcg = event->memcg; 4784 4785 remove_wait_queue(event->wqh, &event->wait); 4786 4787 event->unregister_event(memcg, event->eventfd); 4788 4789 /* Notify userspace the event is going away. */ 4790 eventfd_signal(event->eventfd, 1); 4791 4792 eventfd_ctx_put(event->eventfd); 4793 kfree(event); 4794 css_put(&memcg->css); 4795 } 4796 4797 /* 4798 * Gets called on EPOLLHUP on eventfd when user closes it. 4799 * 4800 * Called with wqh->lock held and interrupts disabled. 4801 */ 4802 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4803 int sync, void *key) 4804 { 4805 struct mem_cgroup_event *event = 4806 container_of(wait, struct mem_cgroup_event, wait); 4807 struct mem_cgroup *memcg = event->memcg; 4808 __poll_t flags = key_to_poll(key); 4809 4810 if (flags & EPOLLHUP) { 4811 /* 4812 * If the event has been detached at cgroup removal, we 4813 * can simply return knowing the other side will cleanup 4814 * for us. 4815 * 4816 * We can't race against event freeing since the other 4817 * side will require wqh->lock via remove_wait_queue(), 4818 * which we hold. 4819 */ 4820 spin_lock(&memcg->event_list_lock); 4821 if (!list_empty(&event->list)) { 4822 list_del_init(&event->list); 4823 /* 4824 * We are in atomic context, but cgroup_event_remove() 4825 * may sleep, so we have to call it in workqueue. 4826 */ 4827 schedule_work(&event->remove); 4828 } 4829 spin_unlock(&memcg->event_list_lock); 4830 } 4831 4832 return 0; 4833 } 4834 4835 static void memcg_event_ptable_queue_proc(struct file *file, 4836 wait_queue_head_t *wqh, poll_table *pt) 4837 { 4838 struct mem_cgroup_event *event = 4839 container_of(pt, struct mem_cgroup_event, pt); 4840 4841 event->wqh = wqh; 4842 add_wait_queue(wqh, &event->wait); 4843 } 4844 4845 /* 4846 * DO NOT USE IN NEW FILES. 4847 * 4848 * Parse input and register new cgroup event handler. 4849 * 4850 * Input must be in format '<event_fd> <control_fd> <args>'. 4851 * Interpretation of args is defined by control file implementation. 4852 */ 4853 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4854 char *buf, size_t nbytes, loff_t off) 4855 { 4856 struct cgroup_subsys_state *css = of_css(of); 4857 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4858 struct mem_cgroup_event *event; 4859 struct cgroup_subsys_state *cfile_css; 4860 unsigned int efd, cfd; 4861 struct fd efile; 4862 struct fd cfile; 4863 const char *name; 4864 char *endp; 4865 int ret; 4866 4867 buf = strstrip(buf); 4868 4869 efd = simple_strtoul(buf, &endp, 10); 4870 if (*endp != ' ') 4871 return -EINVAL; 4872 buf = endp + 1; 4873 4874 cfd = simple_strtoul(buf, &endp, 10); 4875 if ((*endp != ' ') && (*endp != '\0')) 4876 return -EINVAL; 4877 buf = endp + 1; 4878 4879 event = kzalloc(sizeof(*event), GFP_KERNEL); 4880 if (!event) 4881 return -ENOMEM; 4882 4883 event->memcg = memcg; 4884 INIT_LIST_HEAD(&event->list); 4885 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4886 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4887 INIT_WORK(&event->remove, memcg_event_remove); 4888 4889 efile = fdget(efd); 4890 if (!efile.file) { 4891 ret = -EBADF; 4892 goto out_kfree; 4893 } 4894 4895 event->eventfd = eventfd_ctx_fileget(efile.file); 4896 if (IS_ERR(event->eventfd)) { 4897 ret = PTR_ERR(event->eventfd); 4898 goto out_put_efile; 4899 } 4900 4901 cfile = fdget(cfd); 4902 if (!cfile.file) { 4903 ret = -EBADF; 4904 goto out_put_eventfd; 4905 } 4906 4907 /* the process need read permission on control file */ 4908 /* AV: shouldn't we check that it's been opened for read instead? */ 4909 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4910 if (ret < 0) 4911 goto out_put_cfile; 4912 4913 /* 4914 * Determine the event callbacks and set them in @event. This used 4915 * to be done via struct cftype but cgroup core no longer knows 4916 * about these events. The following is crude but the whole thing 4917 * is for compatibility anyway. 4918 * 4919 * DO NOT ADD NEW FILES. 4920 */ 4921 name = cfile.file->f_path.dentry->d_name.name; 4922 4923 if (!strcmp(name, "memory.usage_in_bytes")) { 4924 event->register_event = mem_cgroup_usage_register_event; 4925 event->unregister_event = mem_cgroup_usage_unregister_event; 4926 } else if (!strcmp(name, "memory.oom_control")) { 4927 event->register_event = mem_cgroup_oom_register_event; 4928 event->unregister_event = mem_cgroup_oom_unregister_event; 4929 } else if (!strcmp(name, "memory.pressure_level")) { 4930 event->register_event = vmpressure_register_event; 4931 event->unregister_event = vmpressure_unregister_event; 4932 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4933 event->register_event = memsw_cgroup_usage_register_event; 4934 event->unregister_event = memsw_cgroup_usage_unregister_event; 4935 } else { 4936 ret = -EINVAL; 4937 goto out_put_cfile; 4938 } 4939 4940 /* 4941 * Verify @cfile should belong to @css. Also, remaining events are 4942 * automatically removed on cgroup destruction but the removal is 4943 * asynchronous, so take an extra ref on @css. 4944 */ 4945 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4946 &memory_cgrp_subsys); 4947 ret = -EINVAL; 4948 if (IS_ERR(cfile_css)) 4949 goto out_put_cfile; 4950 if (cfile_css != css) { 4951 css_put(cfile_css); 4952 goto out_put_cfile; 4953 } 4954 4955 ret = event->register_event(memcg, event->eventfd, buf); 4956 if (ret) 4957 goto out_put_css; 4958 4959 vfs_poll(efile.file, &event->pt); 4960 4961 spin_lock(&memcg->event_list_lock); 4962 list_add(&event->list, &memcg->event_list); 4963 spin_unlock(&memcg->event_list_lock); 4964 4965 fdput(cfile); 4966 fdput(efile); 4967 4968 return nbytes; 4969 4970 out_put_css: 4971 css_put(css); 4972 out_put_cfile: 4973 fdput(cfile); 4974 out_put_eventfd: 4975 eventfd_ctx_put(event->eventfd); 4976 out_put_efile: 4977 fdput(efile); 4978 out_kfree: 4979 kfree(event); 4980 4981 return ret; 4982 } 4983 4984 static struct cftype mem_cgroup_legacy_files[] = { 4985 { 4986 .name = "usage_in_bytes", 4987 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4988 .read_u64 = mem_cgroup_read_u64, 4989 }, 4990 { 4991 .name = "max_usage_in_bytes", 4992 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4993 .write = mem_cgroup_reset, 4994 .read_u64 = mem_cgroup_read_u64, 4995 }, 4996 { 4997 .name = "limit_in_bytes", 4998 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4999 .write = mem_cgroup_write, 5000 .read_u64 = mem_cgroup_read_u64, 5001 }, 5002 { 5003 .name = "soft_limit_in_bytes", 5004 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 5005 .write = mem_cgroup_write, 5006 .read_u64 = mem_cgroup_read_u64, 5007 }, 5008 { 5009 .name = "failcnt", 5010 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5011 .write = mem_cgroup_reset, 5012 .read_u64 = mem_cgroup_read_u64, 5013 }, 5014 { 5015 .name = "stat", 5016 .seq_show = memcg_stat_show, 5017 }, 5018 { 5019 .name = "force_empty", 5020 .write = mem_cgroup_force_empty_write, 5021 }, 5022 { 5023 .name = "use_hierarchy", 5024 .write_u64 = mem_cgroup_hierarchy_write, 5025 .read_u64 = mem_cgroup_hierarchy_read, 5026 }, 5027 { 5028 .name = "cgroup.event_control", /* XXX: for compat */ 5029 .write = memcg_write_event_control, 5030 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5031 }, 5032 { 5033 .name = "swappiness", 5034 .read_u64 = mem_cgroup_swappiness_read, 5035 .write_u64 = mem_cgroup_swappiness_write, 5036 }, 5037 { 5038 .name = "move_charge_at_immigrate", 5039 .read_u64 = mem_cgroup_move_charge_read, 5040 .write_u64 = mem_cgroup_move_charge_write, 5041 }, 5042 { 5043 .name = "oom_control", 5044 .seq_show = mem_cgroup_oom_control_read, 5045 .write_u64 = mem_cgroup_oom_control_write, 5046 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 5047 }, 5048 { 5049 .name = "pressure_level", 5050 }, 5051 #ifdef CONFIG_NUMA 5052 { 5053 .name = "numa_stat", 5054 .seq_show = memcg_numa_stat_show, 5055 }, 5056 #endif 5057 { 5058 .name = "kmem.limit_in_bytes", 5059 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5060 .write = mem_cgroup_write, 5061 .read_u64 = mem_cgroup_read_u64, 5062 }, 5063 { 5064 .name = "kmem.usage_in_bytes", 5065 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5066 .read_u64 = mem_cgroup_read_u64, 5067 }, 5068 { 5069 .name = "kmem.failcnt", 5070 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5071 .write = mem_cgroup_reset, 5072 .read_u64 = mem_cgroup_read_u64, 5073 }, 5074 { 5075 .name = "kmem.max_usage_in_bytes", 5076 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5077 .write = mem_cgroup_reset, 5078 .read_u64 = mem_cgroup_read_u64, 5079 }, 5080 #if defined(CONFIG_MEMCG_KMEM) && \ 5081 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5082 { 5083 .name = "kmem.slabinfo", 5084 .seq_show = memcg_slab_show, 5085 }, 5086 #endif 5087 { 5088 .name = "kmem.tcp.limit_in_bytes", 5089 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5090 .write = mem_cgroup_write, 5091 .read_u64 = mem_cgroup_read_u64, 5092 }, 5093 { 5094 .name = "kmem.tcp.usage_in_bytes", 5095 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5096 .read_u64 = mem_cgroup_read_u64, 5097 }, 5098 { 5099 .name = "kmem.tcp.failcnt", 5100 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5101 .write = mem_cgroup_reset, 5102 .read_u64 = mem_cgroup_read_u64, 5103 }, 5104 { 5105 .name = "kmem.tcp.max_usage_in_bytes", 5106 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5107 .write = mem_cgroup_reset, 5108 .read_u64 = mem_cgroup_read_u64, 5109 }, 5110 { }, /* terminate */ 5111 }; 5112 5113 /* 5114 * Private memory cgroup IDR 5115 * 5116 * Swap-out records and page cache shadow entries need to store memcg 5117 * references in constrained space, so we maintain an ID space that is 5118 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5119 * memory-controlled cgroups to 64k. 5120 * 5121 * However, there usually are many references to the offline CSS after 5122 * the cgroup has been destroyed, such as page cache or reclaimable 5123 * slab objects, that don't need to hang on to the ID. We want to keep 5124 * those dead CSS from occupying IDs, or we might quickly exhaust the 5125 * relatively small ID space and prevent the creation of new cgroups 5126 * even when there are much fewer than 64k cgroups - possibly none. 5127 * 5128 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5129 * be freed and recycled when it's no longer needed, which is usually 5130 * when the CSS is offlined. 5131 * 5132 * The only exception to that are records of swapped out tmpfs/shmem 5133 * pages that need to be attributed to live ancestors on swapin. But 5134 * those references are manageable from userspace. 5135 */ 5136 5137 static DEFINE_IDR(mem_cgroup_idr); 5138 5139 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5140 { 5141 if (memcg->id.id > 0) { 5142 idr_remove(&mem_cgroup_idr, memcg->id.id); 5143 memcg->id.id = 0; 5144 } 5145 } 5146 5147 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5148 unsigned int n) 5149 { 5150 refcount_add(n, &memcg->id.ref); 5151 } 5152 5153 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5154 { 5155 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5156 mem_cgroup_id_remove(memcg); 5157 5158 /* Memcg ID pins CSS */ 5159 css_put(&memcg->css); 5160 } 5161 } 5162 5163 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5164 { 5165 mem_cgroup_id_put_many(memcg, 1); 5166 } 5167 5168 /** 5169 * mem_cgroup_from_id - look up a memcg from a memcg id 5170 * @id: the memcg id to look up 5171 * 5172 * Caller must hold rcu_read_lock(). 5173 */ 5174 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5175 { 5176 WARN_ON_ONCE(!rcu_read_lock_held()); 5177 return idr_find(&mem_cgroup_idr, id); 5178 } 5179 5180 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5181 { 5182 struct mem_cgroup_per_node *pn; 5183 int tmp = node; 5184 /* 5185 * This routine is called against possible nodes. 5186 * But it's BUG to call kmalloc() against offline node. 5187 * 5188 * TODO: this routine can waste much memory for nodes which will 5189 * never be onlined. It's better to use memory hotplug callback 5190 * function. 5191 */ 5192 if (!node_state(node, N_NORMAL_MEMORY)) 5193 tmp = -1; 5194 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5195 if (!pn) 5196 return 1; 5197 5198 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5199 GFP_KERNEL_ACCOUNT); 5200 if (!pn->lruvec_stat_local) { 5201 kfree(pn); 5202 return 1; 5203 } 5204 5205 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, 5206 GFP_KERNEL_ACCOUNT); 5207 if (!pn->lruvec_stat_cpu) { 5208 free_percpu(pn->lruvec_stat_local); 5209 kfree(pn); 5210 return 1; 5211 } 5212 5213 lruvec_init(&pn->lruvec); 5214 pn->usage_in_excess = 0; 5215 pn->on_tree = false; 5216 pn->memcg = memcg; 5217 5218 memcg->nodeinfo[node] = pn; 5219 return 0; 5220 } 5221 5222 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5223 { 5224 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5225 5226 if (!pn) 5227 return; 5228 5229 free_percpu(pn->lruvec_stat_cpu); 5230 free_percpu(pn->lruvec_stat_local); 5231 kfree(pn); 5232 } 5233 5234 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5235 { 5236 int node; 5237 5238 for_each_node(node) 5239 free_mem_cgroup_per_node_info(memcg, node); 5240 free_percpu(memcg->vmstats_percpu); 5241 free_percpu(memcg->vmstats_local); 5242 kfree(memcg); 5243 } 5244 5245 static void mem_cgroup_free(struct mem_cgroup *memcg) 5246 { 5247 memcg_wb_domain_exit(memcg); 5248 /* 5249 * Flush percpu vmstats and vmevents to guarantee the value correctness 5250 * on parent's and all ancestor levels. 5251 */ 5252 memcg_flush_percpu_vmstats(memcg); 5253 memcg_flush_percpu_vmevents(memcg); 5254 __mem_cgroup_free(memcg); 5255 } 5256 5257 static struct mem_cgroup *mem_cgroup_alloc(void) 5258 { 5259 struct mem_cgroup *memcg; 5260 unsigned int size; 5261 int node; 5262 int __maybe_unused i; 5263 long error = -ENOMEM; 5264 5265 size = sizeof(struct mem_cgroup); 5266 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5267 5268 memcg = kzalloc(size, GFP_KERNEL); 5269 if (!memcg) 5270 return ERR_PTR(error); 5271 5272 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5273 1, MEM_CGROUP_ID_MAX, 5274 GFP_KERNEL); 5275 if (memcg->id.id < 0) { 5276 error = memcg->id.id; 5277 goto fail; 5278 } 5279 5280 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5281 GFP_KERNEL_ACCOUNT); 5282 if (!memcg->vmstats_local) 5283 goto fail; 5284 5285 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5286 GFP_KERNEL_ACCOUNT); 5287 if (!memcg->vmstats_percpu) 5288 goto fail; 5289 5290 for_each_node(node) 5291 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5292 goto fail; 5293 5294 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5295 goto fail; 5296 5297 INIT_WORK(&memcg->high_work, high_work_func); 5298 INIT_LIST_HEAD(&memcg->oom_notify); 5299 mutex_init(&memcg->thresholds_lock); 5300 spin_lock_init(&memcg->move_lock); 5301 vmpressure_init(&memcg->vmpressure); 5302 INIT_LIST_HEAD(&memcg->event_list); 5303 spin_lock_init(&memcg->event_list_lock); 5304 memcg->socket_pressure = jiffies; 5305 #ifdef CONFIG_MEMCG_KMEM 5306 memcg->kmemcg_id = -1; 5307 INIT_LIST_HEAD(&memcg->objcg_list); 5308 #endif 5309 #ifdef CONFIG_CGROUP_WRITEBACK 5310 INIT_LIST_HEAD(&memcg->cgwb_list); 5311 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5312 memcg->cgwb_frn[i].done = 5313 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5314 #endif 5315 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5316 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5317 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5318 memcg->deferred_split_queue.split_queue_len = 0; 5319 #endif 5320 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5321 return memcg; 5322 fail: 5323 mem_cgroup_id_remove(memcg); 5324 __mem_cgroup_free(memcg); 5325 return ERR_PTR(error); 5326 } 5327 5328 static struct cgroup_subsys_state * __ref 5329 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5330 { 5331 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5332 struct mem_cgroup *memcg, *old_memcg; 5333 long error = -ENOMEM; 5334 5335 old_memcg = set_active_memcg(parent); 5336 memcg = mem_cgroup_alloc(); 5337 set_active_memcg(old_memcg); 5338 if (IS_ERR(memcg)) 5339 return ERR_CAST(memcg); 5340 5341 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5342 memcg->soft_limit = PAGE_COUNTER_MAX; 5343 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5344 if (parent) { 5345 memcg->swappiness = mem_cgroup_swappiness(parent); 5346 memcg->oom_kill_disable = parent->oom_kill_disable; 5347 } 5348 if (!parent) { 5349 page_counter_init(&memcg->memory, NULL); 5350 page_counter_init(&memcg->swap, NULL); 5351 page_counter_init(&memcg->kmem, NULL); 5352 page_counter_init(&memcg->tcpmem, NULL); 5353 } else if (parent->use_hierarchy) { 5354 memcg->use_hierarchy = true; 5355 page_counter_init(&memcg->memory, &parent->memory); 5356 page_counter_init(&memcg->swap, &parent->swap); 5357 page_counter_init(&memcg->kmem, &parent->kmem); 5358 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5359 } else { 5360 page_counter_init(&memcg->memory, &root_mem_cgroup->memory); 5361 page_counter_init(&memcg->swap, &root_mem_cgroup->swap); 5362 page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); 5363 page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); 5364 /* 5365 * Deeper hierachy with use_hierarchy == false doesn't make 5366 * much sense so let cgroup subsystem know about this 5367 * unfortunate state in our controller. 5368 */ 5369 if (parent != root_mem_cgroup) 5370 memory_cgrp_subsys.broken_hierarchy = true; 5371 } 5372 5373 /* The following stuff does not apply to the root */ 5374 if (!parent) { 5375 root_mem_cgroup = memcg; 5376 return &memcg->css; 5377 } 5378 5379 error = memcg_online_kmem(memcg); 5380 if (error) 5381 goto fail; 5382 5383 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5384 static_branch_inc(&memcg_sockets_enabled_key); 5385 5386 return &memcg->css; 5387 fail: 5388 mem_cgroup_id_remove(memcg); 5389 mem_cgroup_free(memcg); 5390 return ERR_PTR(error); 5391 } 5392 5393 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5394 { 5395 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5396 5397 /* 5398 * A memcg must be visible for memcg_expand_shrinker_maps() 5399 * by the time the maps are allocated. So, we allocate maps 5400 * here, when for_each_mem_cgroup() can't skip it. 5401 */ 5402 if (memcg_alloc_shrinker_maps(memcg)) { 5403 mem_cgroup_id_remove(memcg); 5404 return -ENOMEM; 5405 } 5406 5407 /* Online state pins memcg ID, memcg ID pins CSS */ 5408 refcount_set(&memcg->id.ref, 1); 5409 css_get(css); 5410 return 0; 5411 } 5412 5413 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5414 { 5415 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5416 struct mem_cgroup_event *event, *tmp; 5417 5418 /* 5419 * Unregister events and notify userspace. 5420 * Notify userspace about cgroup removing only after rmdir of cgroup 5421 * directory to avoid race between userspace and kernelspace. 5422 */ 5423 spin_lock(&memcg->event_list_lock); 5424 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5425 list_del_init(&event->list); 5426 schedule_work(&event->remove); 5427 } 5428 spin_unlock(&memcg->event_list_lock); 5429 5430 page_counter_set_min(&memcg->memory, 0); 5431 page_counter_set_low(&memcg->memory, 0); 5432 5433 memcg_offline_kmem(memcg); 5434 wb_memcg_offline(memcg); 5435 5436 drain_all_stock(memcg); 5437 5438 mem_cgroup_id_put(memcg); 5439 } 5440 5441 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5442 { 5443 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5444 5445 invalidate_reclaim_iterators(memcg); 5446 } 5447 5448 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5449 { 5450 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5451 int __maybe_unused i; 5452 5453 #ifdef CONFIG_CGROUP_WRITEBACK 5454 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5455 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5456 #endif 5457 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5458 static_branch_dec(&memcg_sockets_enabled_key); 5459 5460 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5461 static_branch_dec(&memcg_sockets_enabled_key); 5462 5463 vmpressure_cleanup(&memcg->vmpressure); 5464 cancel_work_sync(&memcg->high_work); 5465 mem_cgroup_remove_from_trees(memcg); 5466 memcg_free_shrinker_maps(memcg); 5467 memcg_free_kmem(memcg); 5468 mem_cgroup_free(memcg); 5469 } 5470 5471 /** 5472 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5473 * @css: the target css 5474 * 5475 * Reset the states of the mem_cgroup associated with @css. This is 5476 * invoked when the userland requests disabling on the default hierarchy 5477 * but the memcg is pinned through dependency. The memcg should stop 5478 * applying policies and should revert to the vanilla state as it may be 5479 * made visible again. 5480 * 5481 * The current implementation only resets the essential configurations. 5482 * This needs to be expanded to cover all the visible parts. 5483 */ 5484 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5485 { 5486 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5487 5488 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5489 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5490 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5491 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5492 page_counter_set_min(&memcg->memory, 0); 5493 page_counter_set_low(&memcg->memory, 0); 5494 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5495 memcg->soft_limit = PAGE_COUNTER_MAX; 5496 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5497 memcg_wb_domain_size_changed(memcg); 5498 } 5499 5500 #ifdef CONFIG_MMU 5501 /* Handlers for move charge at task migration. */ 5502 static int mem_cgroup_do_precharge(unsigned long count) 5503 { 5504 int ret; 5505 5506 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5507 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5508 if (!ret) { 5509 mc.precharge += count; 5510 return ret; 5511 } 5512 5513 /* Try charges one by one with reclaim, but do not retry */ 5514 while (count--) { 5515 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5516 if (ret) 5517 return ret; 5518 mc.precharge++; 5519 cond_resched(); 5520 } 5521 return 0; 5522 } 5523 5524 union mc_target { 5525 struct page *page; 5526 swp_entry_t ent; 5527 }; 5528 5529 enum mc_target_type { 5530 MC_TARGET_NONE = 0, 5531 MC_TARGET_PAGE, 5532 MC_TARGET_SWAP, 5533 MC_TARGET_DEVICE, 5534 }; 5535 5536 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5537 unsigned long addr, pte_t ptent) 5538 { 5539 struct page *page = vm_normal_page(vma, addr, ptent); 5540 5541 if (!page || !page_mapped(page)) 5542 return NULL; 5543 if (PageAnon(page)) { 5544 if (!(mc.flags & MOVE_ANON)) 5545 return NULL; 5546 } else { 5547 if (!(mc.flags & MOVE_FILE)) 5548 return NULL; 5549 } 5550 if (!get_page_unless_zero(page)) 5551 return NULL; 5552 5553 return page; 5554 } 5555 5556 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5557 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5558 pte_t ptent, swp_entry_t *entry) 5559 { 5560 struct page *page = NULL; 5561 swp_entry_t ent = pte_to_swp_entry(ptent); 5562 5563 if (!(mc.flags & MOVE_ANON)) 5564 return NULL; 5565 5566 /* 5567 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5568 * a device and because they are not accessible by CPU they are store 5569 * as special swap entry in the CPU page table. 5570 */ 5571 if (is_device_private_entry(ent)) { 5572 page = device_private_entry_to_page(ent); 5573 /* 5574 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5575 * a refcount of 1 when free (unlike normal page) 5576 */ 5577 if (!page_ref_add_unless(page, 1, 1)) 5578 return NULL; 5579 return page; 5580 } 5581 5582 if (non_swap_entry(ent)) 5583 return NULL; 5584 5585 /* 5586 * Because lookup_swap_cache() updates some statistics counter, 5587 * we call find_get_page() with swapper_space directly. 5588 */ 5589 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5590 entry->val = ent.val; 5591 5592 return page; 5593 } 5594 #else 5595 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5596 pte_t ptent, swp_entry_t *entry) 5597 { 5598 return NULL; 5599 } 5600 #endif 5601 5602 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5603 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5604 { 5605 if (!vma->vm_file) /* anonymous vma */ 5606 return NULL; 5607 if (!(mc.flags & MOVE_FILE)) 5608 return NULL; 5609 5610 /* page is moved even if it's not RSS of this task(page-faulted). */ 5611 /* shmem/tmpfs may report page out on swap: account for that too. */ 5612 return find_get_incore_page(vma->vm_file->f_mapping, 5613 linear_page_index(vma, addr)); 5614 } 5615 5616 /** 5617 * mem_cgroup_move_account - move account of the page 5618 * @page: the page 5619 * @compound: charge the page as compound or small page 5620 * @from: mem_cgroup which the page is moved from. 5621 * @to: mem_cgroup which the page is moved to. @from != @to. 5622 * 5623 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5624 * 5625 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5626 * from old cgroup. 5627 */ 5628 static int mem_cgroup_move_account(struct page *page, 5629 bool compound, 5630 struct mem_cgroup *from, 5631 struct mem_cgroup *to) 5632 { 5633 struct lruvec *from_vec, *to_vec; 5634 struct pglist_data *pgdat; 5635 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5636 int ret; 5637 5638 VM_BUG_ON(from == to); 5639 VM_BUG_ON_PAGE(PageLRU(page), page); 5640 VM_BUG_ON(compound && !PageTransHuge(page)); 5641 5642 /* 5643 * Prevent mem_cgroup_migrate() from looking at 5644 * page->mem_cgroup of its source page while we change it. 5645 */ 5646 ret = -EBUSY; 5647 if (!trylock_page(page)) 5648 goto out; 5649 5650 ret = -EINVAL; 5651 if (page->mem_cgroup != from) 5652 goto out_unlock; 5653 5654 pgdat = page_pgdat(page); 5655 from_vec = mem_cgroup_lruvec(from, pgdat); 5656 to_vec = mem_cgroup_lruvec(to, pgdat); 5657 5658 lock_page_memcg(page); 5659 5660 if (PageAnon(page)) { 5661 if (page_mapped(page)) { 5662 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5663 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5664 if (PageTransHuge(page)) { 5665 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5666 -nr_pages); 5667 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5668 nr_pages); 5669 } 5670 5671 } 5672 } else { 5673 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5674 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5675 5676 if (PageSwapBacked(page)) { 5677 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5678 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5679 } 5680 5681 if (page_mapped(page)) { 5682 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5683 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5684 } 5685 5686 if (PageDirty(page)) { 5687 struct address_space *mapping = page_mapping(page); 5688 5689 if (mapping_can_writeback(mapping)) { 5690 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5691 -nr_pages); 5692 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5693 nr_pages); 5694 } 5695 } 5696 } 5697 5698 if (PageWriteback(page)) { 5699 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5700 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5701 } 5702 5703 /* 5704 * All state has been migrated, let's switch to the new memcg. 5705 * 5706 * It is safe to change page->mem_cgroup here because the page 5707 * is referenced, charged, isolated, and locked: we can't race 5708 * with (un)charging, migration, LRU putback, or anything else 5709 * that would rely on a stable page->mem_cgroup. 5710 * 5711 * Note that lock_page_memcg is a memcg lock, not a page lock, 5712 * to save space. As soon as we switch page->mem_cgroup to a 5713 * new memcg that isn't locked, the above state can change 5714 * concurrently again. Make sure we're truly done with it. 5715 */ 5716 smp_mb(); 5717 5718 css_get(&to->css); 5719 css_put(&from->css); 5720 5721 page->mem_cgroup = to; 5722 5723 __unlock_page_memcg(from); 5724 5725 ret = 0; 5726 5727 local_irq_disable(); 5728 mem_cgroup_charge_statistics(to, page, nr_pages); 5729 memcg_check_events(to, page); 5730 mem_cgroup_charge_statistics(from, page, -nr_pages); 5731 memcg_check_events(from, page); 5732 local_irq_enable(); 5733 out_unlock: 5734 unlock_page(page); 5735 out: 5736 return ret; 5737 } 5738 5739 /** 5740 * get_mctgt_type - get target type of moving charge 5741 * @vma: the vma the pte to be checked belongs 5742 * @addr: the address corresponding to the pte to be checked 5743 * @ptent: the pte to be checked 5744 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5745 * 5746 * Returns 5747 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5748 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5749 * move charge. if @target is not NULL, the page is stored in target->page 5750 * with extra refcnt got(Callers should handle it). 5751 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5752 * target for charge migration. if @target is not NULL, the entry is stored 5753 * in target->ent. 5754 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5755 * (so ZONE_DEVICE page and thus not on the lru). 5756 * For now we such page is charge like a regular page would be as for all 5757 * intent and purposes it is just special memory taking the place of a 5758 * regular page. 5759 * 5760 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5761 * 5762 * Called with pte lock held. 5763 */ 5764 5765 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5766 unsigned long addr, pte_t ptent, union mc_target *target) 5767 { 5768 struct page *page = NULL; 5769 enum mc_target_type ret = MC_TARGET_NONE; 5770 swp_entry_t ent = { .val = 0 }; 5771 5772 if (pte_present(ptent)) 5773 page = mc_handle_present_pte(vma, addr, ptent); 5774 else if (is_swap_pte(ptent)) 5775 page = mc_handle_swap_pte(vma, ptent, &ent); 5776 else if (pte_none(ptent)) 5777 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5778 5779 if (!page && !ent.val) 5780 return ret; 5781 if (page) { 5782 /* 5783 * Do only loose check w/o serialization. 5784 * mem_cgroup_move_account() checks the page is valid or 5785 * not under LRU exclusion. 5786 */ 5787 if (page->mem_cgroup == mc.from) { 5788 ret = MC_TARGET_PAGE; 5789 if (is_device_private_page(page)) 5790 ret = MC_TARGET_DEVICE; 5791 if (target) 5792 target->page = page; 5793 } 5794 if (!ret || !target) 5795 put_page(page); 5796 } 5797 /* 5798 * There is a swap entry and a page doesn't exist or isn't charged. 5799 * But we cannot move a tail-page in a THP. 5800 */ 5801 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5802 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5803 ret = MC_TARGET_SWAP; 5804 if (target) 5805 target->ent = ent; 5806 } 5807 return ret; 5808 } 5809 5810 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5811 /* 5812 * We don't consider PMD mapped swapping or file mapped pages because THP does 5813 * not support them for now. 5814 * Caller should make sure that pmd_trans_huge(pmd) is true. 5815 */ 5816 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5817 unsigned long addr, pmd_t pmd, union mc_target *target) 5818 { 5819 struct page *page = NULL; 5820 enum mc_target_type ret = MC_TARGET_NONE; 5821 5822 if (unlikely(is_swap_pmd(pmd))) { 5823 VM_BUG_ON(thp_migration_supported() && 5824 !is_pmd_migration_entry(pmd)); 5825 return ret; 5826 } 5827 page = pmd_page(pmd); 5828 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5829 if (!(mc.flags & MOVE_ANON)) 5830 return ret; 5831 if (page->mem_cgroup == mc.from) { 5832 ret = MC_TARGET_PAGE; 5833 if (target) { 5834 get_page(page); 5835 target->page = page; 5836 } 5837 } 5838 return ret; 5839 } 5840 #else 5841 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5842 unsigned long addr, pmd_t pmd, union mc_target *target) 5843 { 5844 return MC_TARGET_NONE; 5845 } 5846 #endif 5847 5848 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5849 unsigned long addr, unsigned long end, 5850 struct mm_walk *walk) 5851 { 5852 struct vm_area_struct *vma = walk->vma; 5853 pte_t *pte; 5854 spinlock_t *ptl; 5855 5856 ptl = pmd_trans_huge_lock(pmd, vma); 5857 if (ptl) { 5858 /* 5859 * Note their can not be MC_TARGET_DEVICE for now as we do not 5860 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5861 * this might change. 5862 */ 5863 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5864 mc.precharge += HPAGE_PMD_NR; 5865 spin_unlock(ptl); 5866 return 0; 5867 } 5868 5869 if (pmd_trans_unstable(pmd)) 5870 return 0; 5871 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5872 for (; addr != end; pte++, addr += PAGE_SIZE) 5873 if (get_mctgt_type(vma, addr, *pte, NULL)) 5874 mc.precharge++; /* increment precharge temporarily */ 5875 pte_unmap_unlock(pte - 1, ptl); 5876 cond_resched(); 5877 5878 return 0; 5879 } 5880 5881 static const struct mm_walk_ops precharge_walk_ops = { 5882 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5883 }; 5884 5885 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5886 { 5887 unsigned long precharge; 5888 5889 mmap_read_lock(mm); 5890 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5891 mmap_read_unlock(mm); 5892 5893 precharge = mc.precharge; 5894 mc.precharge = 0; 5895 5896 return precharge; 5897 } 5898 5899 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5900 { 5901 unsigned long precharge = mem_cgroup_count_precharge(mm); 5902 5903 VM_BUG_ON(mc.moving_task); 5904 mc.moving_task = current; 5905 return mem_cgroup_do_precharge(precharge); 5906 } 5907 5908 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5909 static void __mem_cgroup_clear_mc(void) 5910 { 5911 struct mem_cgroup *from = mc.from; 5912 struct mem_cgroup *to = mc.to; 5913 5914 /* we must uncharge all the leftover precharges from mc.to */ 5915 if (mc.precharge) { 5916 cancel_charge(mc.to, mc.precharge); 5917 mc.precharge = 0; 5918 } 5919 /* 5920 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5921 * we must uncharge here. 5922 */ 5923 if (mc.moved_charge) { 5924 cancel_charge(mc.from, mc.moved_charge); 5925 mc.moved_charge = 0; 5926 } 5927 /* we must fixup refcnts and charges */ 5928 if (mc.moved_swap) { 5929 /* uncharge swap account from the old cgroup */ 5930 if (!mem_cgroup_is_root(mc.from)) 5931 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5932 5933 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5934 5935 /* 5936 * we charged both to->memory and to->memsw, so we 5937 * should uncharge to->memory. 5938 */ 5939 if (!mem_cgroup_is_root(mc.to)) 5940 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5941 5942 mc.moved_swap = 0; 5943 } 5944 memcg_oom_recover(from); 5945 memcg_oom_recover(to); 5946 wake_up_all(&mc.waitq); 5947 } 5948 5949 static void mem_cgroup_clear_mc(void) 5950 { 5951 struct mm_struct *mm = mc.mm; 5952 5953 /* 5954 * we must clear moving_task before waking up waiters at the end of 5955 * task migration. 5956 */ 5957 mc.moving_task = NULL; 5958 __mem_cgroup_clear_mc(); 5959 spin_lock(&mc.lock); 5960 mc.from = NULL; 5961 mc.to = NULL; 5962 mc.mm = NULL; 5963 spin_unlock(&mc.lock); 5964 5965 mmput(mm); 5966 } 5967 5968 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5969 { 5970 struct cgroup_subsys_state *css; 5971 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5972 struct mem_cgroup *from; 5973 struct task_struct *leader, *p; 5974 struct mm_struct *mm; 5975 unsigned long move_flags; 5976 int ret = 0; 5977 5978 /* charge immigration isn't supported on the default hierarchy */ 5979 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5980 return 0; 5981 5982 /* 5983 * Multi-process migrations only happen on the default hierarchy 5984 * where charge immigration is not used. Perform charge 5985 * immigration if @tset contains a leader and whine if there are 5986 * multiple. 5987 */ 5988 p = NULL; 5989 cgroup_taskset_for_each_leader(leader, css, tset) { 5990 WARN_ON_ONCE(p); 5991 p = leader; 5992 memcg = mem_cgroup_from_css(css); 5993 } 5994 if (!p) 5995 return 0; 5996 5997 /* 5998 * We are now commited to this value whatever it is. Changes in this 5999 * tunable will only affect upcoming migrations, not the current one. 6000 * So we need to save it, and keep it going. 6001 */ 6002 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 6003 if (!move_flags) 6004 return 0; 6005 6006 from = mem_cgroup_from_task(p); 6007 6008 VM_BUG_ON(from == memcg); 6009 6010 mm = get_task_mm(p); 6011 if (!mm) 6012 return 0; 6013 /* We move charges only when we move a owner of the mm */ 6014 if (mm->owner == p) { 6015 VM_BUG_ON(mc.from); 6016 VM_BUG_ON(mc.to); 6017 VM_BUG_ON(mc.precharge); 6018 VM_BUG_ON(mc.moved_charge); 6019 VM_BUG_ON(mc.moved_swap); 6020 6021 spin_lock(&mc.lock); 6022 mc.mm = mm; 6023 mc.from = from; 6024 mc.to = memcg; 6025 mc.flags = move_flags; 6026 spin_unlock(&mc.lock); 6027 /* We set mc.moving_task later */ 6028 6029 ret = mem_cgroup_precharge_mc(mm); 6030 if (ret) 6031 mem_cgroup_clear_mc(); 6032 } else { 6033 mmput(mm); 6034 } 6035 return ret; 6036 } 6037 6038 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6039 { 6040 if (mc.to) 6041 mem_cgroup_clear_mc(); 6042 } 6043 6044 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6045 unsigned long addr, unsigned long end, 6046 struct mm_walk *walk) 6047 { 6048 int ret = 0; 6049 struct vm_area_struct *vma = walk->vma; 6050 pte_t *pte; 6051 spinlock_t *ptl; 6052 enum mc_target_type target_type; 6053 union mc_target target; 6054 struct page *page; 6055 6056 ptl = pmd_trans_huge_lock(pmd, vma); 6057 if (ptl) { 6058 if (mc.precharge < HPAGE_PMD_NR) { 6059 spin_unlock(ptl); 6060 return 0; 6061 } 6062 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6063 if (target_type == MC_TARGET_PAGE) { 6064 page = target.page; 6065 if (!isolate_lru_page(page)) { 6066 if (!mem_cgroup_move_account(page, true, 6067 mc.from, mc.to)) { 6068 mc.precharge -= HPAGE_PMD_NR; 6069 mc.moved_charge += HPAGE_PMD_NR; 6070 } 6071 putback_lru_page(page); 6072 } 6073 put_page(page); 6074 } else if (target_type == MC_TARGET_DEVICE) { 6075 page = target.page; 6076 if (!mem_cgroup_move_account(page, true, 6077 mc.from, mc.to)) { 6078 mc.precharge -= HPAGE_PMD_NR; 6079 mc.moved_charge += HPAGE_PMD_NR; 6080 } 6081 put_page(page); 6082 } 6083 spin_unlock(ptl); 6084 return 0; 6085 } 6086 6087 if (pmd_trans_unstable(pmd)) 6088 return 0; 6089 retry: 6090 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6091 for (; addr != end; addr += PAGE_SIZE) { 6092 pte_t ptent = *(pte++); 6093 bool device = false; 6094 swp_entry_t ent; 6095 6096 if (!mc.precharge) 6097 break; 6098 6099 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6100 case MC_TARGET_DEVICE: 6101 device = true; 6102 fallthrough; 6103 case MC_TARGET_PAGE: 6104 page = target.page; 6105 /* 6106 * We can have a part of the split pmd here. Moving it 6107 * can be done but it would be too convoluted so simply 6108 * ignore such a partial THP and keep it in original 6109 * memcg. There should be somebody mapping the head. 6110 */ 6111 if (PageTransCompound(page)) 6112 goto put; 6113 if (!device && isolate_lru_page(page)) 6114 goto put; 6115 if (!mem_cgroup_move_account(page, false, 6116 mc.from, mc.to)) { 6117 mc.precharge--; 6118 /* we uncharge from mc.from later. */ 6119 mc.moved_charge++; 6120 } 6121 if (!device) 6122 putback_lru_page(page); 6123 put: /* get_mctgt_type() gets the page */ 6124 put_page(page); 6125 break; 6126 case MC_TARGET_SWAP: 6127 ent = target.ent; 6128 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6129 mc.precharge--; 6130 mem_cgroup_id_get_many(mc.to, 1); 6131 /* we fixup other refcnts and charges later. */ 6132 mc.moved_swap++; 6133 } 6134 break; 6135 default: 6136 break; 6137 } 6138 } 6139 pte_unmap_unlock(pte - 1, ptl); 6140 cond_resched(); 6141 6142 if (addr != end) { 6143 /* 6144 * We have consumed all precharges we got in can_attach(). 6145 * We try charge one by one, but don't do any additional 6146 * charges to mc.to if we have failed in charge once in attach() 6147 * phase. 6148 */ 6149 ret = mem_cgroup_do_precharge(1); 6150 if (!ret) 6151 goto retry; 6152 } 6153 6154 return ret; 6155 } 6156 6157 static const struct mm_walk_ops charge_walk_ops = { 6158 .pmd_entry = mem_cgroup_move_charge_pte_range, 6159 }; 6160 6161 static void mem_cgroup_move_charge(void) 6162 { 6163 lru_add_drain_all(); 6164 /* 6165 * Signal lock_page_memcg() to take the memcg's move_lock 6166 * while we're moving its pages to another memcg. Then wait 6167 * for already started RCU-only updates to finish. 6168 */ 6169 atomic_inc(&mc.from->moving_account); 6170 synchronize_rcu(); 6171 retry: 6172 if (unlikely(!mmap_read_trylock(mc.mm))) { 6173 /* 6174 * Someone who are holding the mmap_lock might be waiting in 6175 * waitq. So we cancel all extra charges, wake up all waiters, 6176 * and retry. Because we cancel precharges, we might not be able 6177 * to move enough charges, but moving charge is a best-effort 6178 * feature anyway, so it wouldn't be a big problem. 6179 */ 6180 __mem_cgroup_clear_mc(); 6181 cond_resched(); 6182 goto retry; 6183 } 6184 /* 6185 * When we have consumed all precharges and failed in doing 6186 * additional charge, the page walk just aborts. 6187 */ 6188 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6189 NULL); 6190 6191 mmap_read_unlock(mc.mm); 6192 atomic_dec(&mc.from->moving_account); 6193 } 6194 6195 static void mem_cgroup_move_task(void) 6196 { 6197 if (mc.to) { 6198 mem_cgroup_move_charge(); 6199 mem_cgroup_clear_mc(); 6200 } 6201 } 6202 #else /* !CONFIG_MMU */ 6203 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6204 { 6205 return 0; 6206 } 6207 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6208 { 6209 } 6210 static void mem_cgroup_move_task(void) 6211 { 6212 } 6213 #endif 6214 6215 /* 6216 * Cgroup retains root cgroups across [un]mount cycles making it necessary 6217 * to verify whether we're attached to the default hierarchy on each mount 6218 * attempt. 6219 */ 6220 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 6221 { 6222 /* 6223 * use_hierarchy is forced on the default hierarchy. cgroup core 6224 * guarantees that @root doesn't have any children, so turning it 6225 * on for the root memcg is enough. 6226 */ 6227 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 6228 root_mem_cgroup->use_hierarchy = true; 6229 else 6230 root_mem_cgroup->use_hierarchy = false; 6231 } 6232 6233 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6234 { 6235 if (value == PAGE_COUNTER_MAX) 6236 seq_puts(m, "max\n"); 6237 else 6238 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6239 6240 return 0; 6241 } 6242 6243 static u64 memory_current_read(struct cgroup_subsys_state *css, 6244 struct cftype *cft) 6245 { 6246 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6247 6248 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6249 } 6250 6251 static int memory_min_show(struct seq_file *m, void *v) 6252 { 6253 return seq_puts_memcg_tunable(m, 6254 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6255 } 6256 6257 static ssize_t memory_min_write(struct kernfs_open_file *of, 6258 char *buf, size_t nbytes, loff_t off) 6259 { 6260 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6261 unsigned long min; 6262 int err; 6263 6264 buf = strstrip(buf); 6265 err = page_counter_memparse(buf, "max", &min); 6266 if (err) 6267 return err; 6268 6269 page_counter_set_min(&memcg->memory, min); 6270 6271 return nbytes; 6272 } 6273 6274 static int memory_low_show(struct seq_file *m, void *v) 6275 { 6276 return seq_puts_memcg_tunable(m, 6277 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6278 } 6279 6280 static ssize_t memory_low_write(struct kernfs_open_file *of, 6281 char *buf, size_t nbytes, loff_t off) 6282 { 6283 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6284 unsigned long low; 6285 int err; 6286 6287 buf = strstrip(buf); 6288 err = page_counter_memparse(buf, "max", &low); 6289 if (err) 6290 return err; 6291 6292 page_counter_set_low(&memcg->memory, low); 6293 6294 return nbytes; 6295 } 6296 6297 static int memory_high_show(struct seq_file *m, void *v) 6298 { 6299 return seq_puts_memcg_tunable(m, 6300 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6301 } 6302 6303 static ssize_t memory_high_write(struct kernfs_open_file *of, 6304 char *buf, size_t nbytes, loff_t off) 6305 { 6306 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6307 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6308 bool drained = false; 6309 unsigned long high; 6310 int err; 6311 6312 buf = strstrip(buf); 6313 err = page_counter_memparse(buf, "max", &high); 6314 if (err) 6315 return err; 6316 6317 for (;;) { 6318 unsigned long nr_pages = page_counter_read(&memcg->memory); 6319 unsigned long reclaimed; 6320 6321 if (nr_pages <= high) 6322 break; 6323 6324 if (signal_pending(current)) 6325 break; 6326 6327 if (!drained) { 6328 drain_all_stock(memcg); 6329 drained = true; 6330 continue; 6331 } 6332 6333 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6334 GFP_KERNEL, true); 6335 6336 if (!reclaimed && !nr_retries--) 6337 break; 6338 } 6339 6340 page_counter_set_high(&memcg->memory, high); 6341 6342 memcg_wb_domain_size_changed(memcg); 6343 6344 return nbytes; 6345 } 6346 6347 static int memory_max_show(struct seq_file *m, void *v) 6348 { 6349 return seq_puts_memcg_tunable(m, 6350 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6351 } 6352 6353 static ssize_t memory_max_write(struct kernfs_open_file *of, 6354 char *buf, size_t nbytes, loff_t off) 6355 { 6356 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6357 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6358 bool drained = false; 6359 unsigned long max; 6360 int err; 6361 6362 buf = strstrip(buf); 6363 err = page_counter_memparse(buf, "max", &max); 6364 if (err) 6365 return err; 6366 6367 xchg(&memcg->memory.max, max); 6368 6369 for (;;) { 6370 unsigned long nr_pages = page_counter_read(&memcg->memory); 6371 6372 if (nr_pages <= max) 6373 break; 6374 6375 if (signal_pending(current)) 6376 break; 6377 6378 if (!drained) { 6379 drain_all_stock(memcg); 6380 drained = true; 6381 continue; 6382 } 6383 6384 if (nr_reclaims) { 6385 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6386 GFP_KERNEL, true)) 6387 nr_reclaims--; 6388 continue; 6389 } 6390 6391 memcg_memory_event(memcg, MEMCG_OOM); 6392 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6393 break; 6394 } 6395 6396 memcg_wb_domain_size_changed(memcg); 6397 return nbytes; 6398 } 6399 6400 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6401 { 6402 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6403 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6404 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6405 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6406 seq_printf(m, "oom_kill %lu\n", 6407 atomic_long_read(&events[MEMCG_OOM_KILL])); 6408 } 6409 6410 static int memory_events_show(struct seq_file *m, void *v) 6411 { 6412 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6413 6414 __memory_events_show(m, memcg->memory_events); 6415 return 0; 6416 } 6417 6418 static int memory_events_local_show(struct seq_file *m, void *v) 6419 { 6420 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6421 6422 __memory_events_show(m, memcg->memory_events_local); 6423 return 0; 6424 } 6425 6426 static int memory_stat_show(struct seq_file *m, void *v) 6427 { 6428 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6429 char *buf; 6430 6431 buf = memory_stat_format(memcg); 6432 if (!buf) 6433 return -ENOMEM; 6434 seq_puts(m, buf); 6435 kfree(buf); 6436 return 0; 6437 } 6438 6439 #ifdef CONFIG_NUMA 6440 static int memory_numa_stat_show(struct seq_file *m, void *v) 6441 { 6442 int i; 6443 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6444 6445 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6446 int nid; 6447 6448 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6449 continue; 6450 6451 seq_printf(m, "%s", memory_stats[i].name); 6452 for_each_node_state(nid, N_MEMORY) { 6453 u64 size; 6454 struct lruvec *lruvec; 6455 6456 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6457 size = lruvec_page_state(lruvec, memory_stats[i].idx); 6458 size *= memory_stats[i].ratio; 6459 seq_printf(m, " N%d=%llu", nid, size); 6460 } 6461 seq_putc(m, '\n'); 6462 } 6463 6464 return 0; 6465 } 6466 #endif 6467 6468 static int memory_oom_group_show(struct seq_file *m, void *v) 6469 { 6470 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6471 6472 seq_printf(m, "%d\n", memcg->oom_group); 6473 6474 return 0; 6475 } 6476 6477 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6478 char *buf, size_t nbytes, loff_t off) 6479 { 6480 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6481 int ret, oom_group; 6482 6483 buf = strstrip(buf); 6484 if (!buf) 6485 return -EINVAL; 6486 6487 ret = kstrtoint(buf, 0, &oom_group); 6488 if (ret) 6489 return ret; 6490 6491 if (oom_group != 0 && oom_group != 1) 6492 return -EINVAL; 6493 6494 memcg->oom_group = oom_group; 6495 6496 return nbytes; 6497 } 6498 6499 static struct cftype memory_files[] = { 6500 { 6501 .name = "current", 6502 .flags = CFTYPE_NOT_ON_ROOT, 6503 .read_u64 = memory_current_read, 6504 }, 6505 { 6506 .name = "min", 6507 .flags = CFTYPE_NOT_ON_ROOT, 6508 .seq_show = memory_min_show, 6509 .write = memory_min_write, 6510 }, 6511 { 6512 .name = "low", 6513 .flags = CFTYPE_NOT_ON_ROOT, 6514 .seq_show = memory_low_show, 6515 .write = memory_low_write, 6516 }, 6517 { 6518 .name = "high", 6519 .flags = CFTYPE_NOT_ON_ROOT, 6520 .seq_show = memory_high_show, 6521 .write = memory_high_write, 6522 }, 6523 { 6524 .name = "max", 6525 .flags = CFTYPE_NOT_ON_ROOT, 6526 .seq_show = memory_max_show, 6527 .write = memory_max_write, 6528 }, 6529 { 6530 .name = "events", 6531 .flags = CFTYPE_NOT_ON_ROOT, 6532 .file_offset = offsetof(struct mem_cgroup, events_file), 6533 .seq_show = memory_events_show, 6534 }, 6535 { 6536 .name = "events.local", 6537 .flags = CFTYPE_NOT_ON_ROOT, 6538 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6539 .seq_show = memory_events_local_show, 6540 }, 6541 { 6542 .name = "stat", 6543 .seq_show = memory_stat_show, 6544 }, 6545 #ifdef CONFIG_NUMA 6546 { 6547 .name = "numa_stat", 6548 .seq_show = memory_numa_stat_show, 6549 }, 6550 #endif 6551 { 6552 .name = "oom.group", 6553 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6554 .seq_show = memory_oom_group_show, 6555 .write = memory_oom_group_write, 6556 }, 6557 { } /* terminate */ 6558 }; 6559 6560 struct cgroup_subsys memory_cgrp_subsys = { 6561 .css_alloc = mem_cgroup_css_alloc, 6562 .css_online = mem_cgroup_css_online, 6563 .css_offline = mem_cgroup_css_offline, 6564 .css_released = mem_cgroup_css_released, 6565 .css_free = mem_cgroup_css_free, 6566 .css_reset = mem_cgroup_css_reset, 6567 .can_attach = mem_cgroup_can_attach, 6568 .cancel_attach = mem_cgroup_cancel_attach, 6569 .post_attach = mem_cgroup_move_task, 6570 .bind = mem_cgroup_bind, 6571 .dfl_cftypes = memory_files, 6572 .legacy_cftypes = mem_cgroup_legacy_files, 6573 .early_init = 0, 6574 }; 6575 6576 /* 6577 * This function calculates an individual cgroup's effective 6578 * protection which is derived from its own memory.min/low, its 6579 * parent's and siblings' settings, as well as the actual memory 6580 * distribution in the tree. 6581 * 6582 * The following rules apply to the effective protection values: 6583 * 6584 * 1. At the first level of reclaim, effective protection is equal to 6585 * the declared protection in memory.min and memory.low. 6586 * 6587 * 2. To enable safe delegation of the protection configuration, at 6588 * subsequent levels the effective protection is capped to the 6589 * parent's effective protection. 6590 * 6591 * 3. To make complex and dynamic subtrees easier to configure, the 6592 * user is allowed to overcommit the declared protection at a given 6593 * level. If that is the case, the parent's effective protection is 6594 * distributed to the children in proportion to how much protection 6595 * they have declared and how much of it they are utilizing. 6596 * 6597 * This makes distribution proportional, but also work-conserving: 6598 * if one cgroup claims much more protection than it uses memory, 6599 * the unused remainder is available to its siblings. 6600 * 6601 * 4. Conversely, when the declared protection is undercommitted at a 6602 * given level, the distribution of the larger parental protection 6603 * budget is NOT proportional. A cgroup's protection from a sibling 6604 * is capped to its own memory.min/low setting. 6605 * 6606 * 5. However, to allow protecting recursive subtrees from each other 6607 * without having to declare each individual cgroup's fixed share 6608 * of the ancestor's claim to protection, any unutilized - 6609 * "floating" - protection from up the tree is distributed in 6610 * proportion to each cgroup's *usage*. This makes the protection 6611 * neutral wrt sibling cgroups and lets them compete freely over 6612 * the shared parental protection budget, but it protects the 6613 * subtree as a whole from neighboring subtrees. 6614 * 6615 * Note that 4. and 5. are not in conflict: 4. is about protecting 6616 * against immediate siblings whereas 5. is about protecting against 6617 * neighboring subtrees. 6618 */ 6619 static unsigned long effective_protection(unsigned long usage, 6620 unsigned long parent_usage, 6621 unsigned long setting, 6622 unsigned long parent_effective, 6623 unsigned long siblings_protected) 6624 { 6625 unsigned long protected; 6626 unsigned long ep; 6627 6628 protected = min(usage, setting); 6629 /* 6630 * If all cgroups at this level combined claim and use more 6631 * protection then what the parent affords them, distribute 6632 * shares in proportion to utilization. 6633 * 6634 * We are using actual utilization rather than the statically 6635 * claimed protection in order to be work-conserving: claimed 6636 * but unused protection is available to siblings that would 6637 * otherwise get a smaller chunk than what they claimed. 6638 */ 6639 if (siblings_protected > parent_effective) 6640 return protected * parent_effective / siblings_protected; 6641 6642 /* 6643 * Ok, utilized protection of all children is within what the 6644 * parent affords them, so we know whatever this child claims 6645 * and utilizes is effectively protected. 6646 * 6647 * If there is unprotected usage beyond this value, reclaim 6648 * will apply pressure in proportion to that amount. 6649 * 6650 * If there is unutilized protection, the cgroup will be fully 6651 * shielded from reclaim, but we do return a smaller value for 6652 * protection than what the group could enjoy in theory. This 6653 * is okay. With the overcommit distribution above, effective 6654 * protection is always dependent on how memory is actually 6655 * consumed among the siblings anyway. 6656 */ 6657 ep = protected; 6658 6659 /* 6660 * If the children aren't claiming (all of) the protection 6661 * afforded to them by the parent, distribute the remainder in 6662 * proportion to the (unprotected) memory of each cgroup. That 6663 * way, cgroups that aren't explicitly prioritized wrt each 6664 * other compete freely over the allowance, but they are 6665 * collectively protected from neighboring trees. 6666 * 6667 * We're using unprotected memory for the weight so that if 6668 * some cgroups DO claim explicit protection, we don't protect 6669 * the same bytes twice. 6670 * 6671 * Check both usage and parent_usage against the respective 6672 * protected values. One should imply the other, but they 6673 * aren't read atomically - make sure the division is sane. 6674 */ 6675 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6676 return ep; 6677 if (parent_effective > siblings_protected && 6678 parent_usage > siblings_protected && 6679 usage > protected) { 6680 unsigned long unclaimed; 6681 6682 unclaimed = parent_effective - siblings_protected; 6683 unclaimed *= usage - protected; 6684 unclaimed /= parent_usage - siblings_protected; 6685 6686 ep += unclaimed; 6687 } 6688 6689 return ep; 6690 } 6691 6692 /** 6693 * mem_cgroup_protected - check if memory consumption is in the normal range 6694 * @root: the top ancestor of the sub-tree being checked 6695 * @memcg: the memory cgroup to check 6696 * 6697 * WARNING: This function is not stateless! It can only be used as part 6698 * of a top-down tree iteration, not for isolated queries. 6699 */ 6700 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6701 struct mem_cgroup *memcg) 6702 { 6703 unsigned long usage, parent_usage; 6704 struct mem_cgroup *parent; 6705 6706 if (mem_cgroup_disabled()) 6707 return; 6708 6709 if (!root) 6710 root = root_mem_cgroup; 6711 6712 /* 6713 * Effective values of the reclaim targets are ignored so they 6714 * can be stale. Have a look at mem_cgroup_protection for more 6715 * details. 6716 * TODO: calculation should be more robust so that we do not need 6717 * that special casing. 6718 */ 6719 if (memcg == root) 6720 return; 6721 6722 usage = page_counter_read(&memcg->memory); 6723 if (!usage) 6724 return; 6725 6726 parent = parent_mem_cgroup(memcg); 6727 /* No parent means a non-hierarchical mode on v1 memcg */ 6728 if (!parent) 6729 return; 6730 6731 if (parent == root) { 6732 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6733 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6734 return; 6735 } 6736 6737 parent_usage = page_counter_read(&parent->memory); 6738 6739 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6740 READ_ONCE(memcg->memory.min), 6741 READ_ONCE(parent->memory.emin), 6742 atomic_long_read(&parent->memory.children_min_usage))); 6743 6744 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6745 READ_ONCE(memcg->memory.low), 6746 READ_ONCE(parent->memory.elow), 6747 atomic_long_read(&parent->memory.children_low_usage))); 6748 } 6749 6750 /** 6751 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6752 * @page: page to charge 6753 * @mm: mm context of the victim 6754 * @gfp_mask: reclaim mode 6755 * 6756 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6757 * pages according to @gfp_mask if necessary. 6758 * 6759 * Returns 0 on success. Otherwise, an error code is returned. 6760 */ 6761 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6762 { 6763 unsigned int nr_pages = thp_nr_pages(page); 6764 struct mem_cgroup *memcg = NULL; 6765 int ret = 0; 6766 6767 if (mem_cgroup_disabled()) 6768 goto out; 6769 6770 if (PageSwapCache(page)) { 6771 swp_entry_t ent = { .val = page_private(page), }; 6772 unsigned short id; 6773 6774 /* 6775 * Every swap fault against a single page tries to charge the 6776 * page, bail as early as possible. shmem_unuse() encounters 6777 * already charged pages, too. page->mem_cgroup is protected 6778 * by the page lock, which serializes swap cache removal, which 6779 * in turn serializes uncharging. 6780 */ 6781 VM_BUG_ON_PAGE(!PageLocked(page), page); 6782 if (compound_head(page)->mem_cgroup) 6783 goto out; 6784 6785 id = lookup_swap_cgroup_id(ent); 6786 rcu_read_lock(); 6787 memcg = mem_cgroup_from_id(id); 6788 if (memcg && !css_tryget_online(&memcg->css)) 6789 memcg = NULL; 6790 rcu_read_unlock(); 6791 } 6792 6793 if (!memcg) 6794 memcg = get_mem_cgroup_from_mm(mm); 6795 6796 ret = try_charge(memcg, gfp_mask, nr_pages); 6797 if (ret) 6798 goto out_put; 6799 6800 css_get(&memcg->css); 6801 commit_charge(page, memcg); 6802 6803 local_irq_disable(); 6804 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6805 memcg_check_events(memcg, page); 6806 local_irq_enable(); 6807 6808 if (PageSwapCache(page)) { 6809 swp_entry_t entry = { .val = page_private(page) }; 6810 /* 6811 * The swap entry might not get freed for a long time, 6812 * let's not wait for it. The page already received a 6813 * memory+swap charge, drop the swap entry duplicate. 6814 */ 6815 mem_cgroup_uncharge_swap(entry, nr_pages); 6816 } 6817 6818 out_put: 6819 css_put(&memcg->css); 6820 out: 6821 return ret; 6822 } 6823 6824 struct uncharge_gather { 6825 struct mem_cgroup *memcg; 6826 unsigned long nr_pages; 6827 unsigned long pgpgout; 6828 unsigned long nr_kmem; 6829 struct page *dummy_page; 6830 }; 6831 6832 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6833 { 6834 memset(ug, 0, sizeof(*ug)); 6835 } 6836 6837 static void uncharge_batch(const struct uncharge_gather *ug) 6838 { 6839 unsigned long flags; 6840 6841 if (!mem_cgroup_is_root(ug->memcg)) { 6842 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); 6843 if (do_memsw_account()) 6844 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); 6845 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6846 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6847 memcg_oom_recover(ug->memcg); 6848 } 6849 6850 local_irq_save(flags); 6851 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6852 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); 6853 memcg_check_events(ug->memcg, ug->dummy_page); 6854 local_irq_restore(flags); 6855 6856 /* drop reference from uncharge_page */ 6857 css_put(&ug->memcg->css); 6858 } 6859 6860 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6861 { 6862 unsigned long nr_pages; 6863 6864 VM_BUG_ON_PAGE(PageLRU(page), page); 6865 6866 if (!page->mem_cgroup) 6867 return; 6868 6869 /* 6870 * Nobody should be changing or seriously looking at 6871 * page->mem_cgroup at this point, we have fully 6872 * exclusive access to the page. 6873 */ 6874 6875 if (ug->memcg != page->mem_cgroup) { 6876 if (ug->memcg) { 6877 uncharge_batch(ug); 6878 uncharge_gather_clear(ug); 6879 } 6880 ug->memcg = page->mem_cgroup; 6881 6882 /* pairs with css_put in uncharge_batch */ 6883 css_get(&ug->memcg->css); 6884 } 6885 6886 nr_pages = compound_nr(page); 6887 ug->nr_pages += nr_pages; 6888 6889 if (!PageKmemcg(page)) { 6890 ug->pgpgout++; 6891 } else { 6892 ug->nr_kmem += nr_pages; 6893 __ClearPageKmemcg(page); 6894 } 6895 6896 ug->dummy_page = page; 6897 page->mem_cgroup = NULL; 6898 css_put(&ug->memcg->css); 6899 } 6900 6901 static void uncharge_list(struct list_head *page_list) 6902 { 6903 struct uncharge_gather ug; 6904 struct list_head *next; 6905 6906 uncharge_gather_clear(&ug); 6907 6908 /* 6909 * Note that the list can be a single page->lru; hence the 6910 * do-while loop instead of a simple list_for_each_entry(). 6911 */ 6912 next = page_list->next; 6913 do { 6914 struct page *page; 6915 6916 page = list_entry(next, struct page, lru); 6917 next = page->lru.next; 6918 6919 uncharge_page(page, &ug); 6920 } while (next != page_list); 6921 6922 if (ug.memcg) 6923 uncharge_batch(&ug); 6924 } 6925 6926 /** 6927 * mem_cgroup_uncharge - uncharge a page 6928 * @page: page to uncharge 6929 * 6930 * Uncharge a page previously charged with mem_cgroup_charge(). 6931 */ 6932 void mem_cgroup_uncharge(struct page *page) 6933 { 6934 struct uncharge_gather ug; 6935 6936 if (mem_cgroup_disabled()) 6937 return; 6938 6939 /* Don't touch page->lru of any random page, pre-check: */ 6940 if (!page->mem_cgroup) 6941 return; 6942 6943 uncharge_gather_clear(&ug); 6944 uncharge_page(page, &ug); 6945 uncharge_batch(&ug); 6946 } 6947 6948 /** 6949 * mem_cgroup_uncharge_list - uncharge a list of page 6950 * @page_list: list of pages to uncharge 6951 * 6952 * Uncharge a list of pages previously charged with 6953 * mem_cgroup_charge(). 6954 */ 6955 void mem_cgroup_uncharge_list(struct list_head *page_list) 6956 { 6957 if (mem_cgroup_disabled()) 6958 return; 6959 6960 if (!list_empty(page_list)) 6961 uncharge_list(page_list); 6962 } 6963 6964 /** 6965 * mem_cgroup_migrate - charge a page's replacement 6966 * @oldpage: currently circulating page 6967 * @newpage: replacement page 6968 * 6969 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6970 * be uncharged upon free. 6971 * 6972 * Both pages must be locked, @newpage->mapping must be set up. 6973 */ 6974 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6975 { 6976 struct mem_cgroup *memcg; 6977 unsigned int nr_pages; 6978 unsigned long flags; 6979 6980 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6981 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6982 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6983 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6984 newpage); 6985 6986 if (mem_cgroup_disabled()) 6987 return; 6988 6989 /* Page cache replacement: new page already charged? */ 6990 if (newpage->mem_cgroup) 6991 return; 6992 6993 /* Swapcache readahead pages can get replaced before being charged */ 6994 memcg = oldpage->mem_cgroup; 6995 if (!memcg) 6996 return; 6997 6998 /* Force-charge the new page. The old one will be freed soon */ 6999 nr_pages = thp_nr_pages(newpage); 7000 7001 page_counter_charge(&memcg->memory, nr_pages); 7002 if (do_memsw_account()) 7003 page_counter_charge(&memcg->memsw, nr_pages); 7004 7005 css_get(&memcg->css); 7006 commit_charge(newpage, memcg); 7007 7008 local_irq_save(flags); 7009 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7010 memcg_check_events(memcg, newpage); 7011 local_irq_restore(flags); 7012 } 7013 7014 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7015 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7016 7017 void mem_cgroup_sk_alloc(struct sock *sk) 7018 { 7019 struct mem_cgroup *memcg; 7020 7021 if (!mem_cgroup_sockets_enabled) 7022 return; 7023 7024 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7025 if (in_interrupt()) 7026 return; 7027 7028 rcu_read_lock(); 7029 memcg = mem_cgroup_from_task(current); 7030 if (memcg == root_mem_cgroup) 7031 goto out; 7032 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7033 goto out; 7034 if (css_tryget(&memcg->css)) 7035 sk->sk_memcg = memcg; 7036 out: 7037 rcu_read_unlock(); 7038 } 7039 7040 void mem_cgroup_sk_free(struct sock *sk) 7041 { 7042 if (sk->sk_memcg) 7043 css_put(&sk->sk_memcg->css); 7044 } 7045 7046 /** 7047 * mem_cgroup_charge_skmem - charge socket memory 7048 * @memcg: memcg to charge 7049 * @nr_pages: number of pages to charge 7050 * 7051 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7052 * @memcg's configured limit, %false if the charge had to be forced. 7053 */ 7054 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7055 { 7056 gfp_t gfp_mask = GFP_KERNEL; 7057 7058 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7059 struct page_counter *fail; 7060 7061 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7062 memcg->tcpmem_pressure = 0; 7063 return true; 7064 } 7065 page_counter_charge(&memcg->tcpmem, nr_pages); 7066 memcg->tcpmem_pressure = 1; 7067 return false; 7068 } 7069 7070 /* Don't block in the packet receive path */ 7071 if (in_softirq()) 7072 gfp_mask = GFP_NOWAIT; 7073 7074 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7075 7076 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7077 return true; 7078 7079 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7080 return false; 7081 } 7082 7083 /** 7084 * mem_cgroup_uncharge_skmem - uncharge socket memory 7085 * @memcg: memcg to uncharge 7086 * @nr_pages: number of pages to uncharge 7087 */ 7088 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7089 { 7090 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7091 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7092 return; 7093 } 7094 7095 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7096 7097 refill_stock(memcg, nr_pages); 7098 } 7099 7100 static int __init cgroup_memory(char *s) 7101 { 7102 char *token; 7103 7104 while ((token = strsep(&s, ",")) != NULL) { 7105 if (!*token) 7106 continue; 7107 if (!strcmp(token, "nosocket")) 7108 cgroup_memory_nosocket = true; 7109 if (!strcmp(token, "nokmem")) 7110 cgroup_memory_nokmem = true; 7111 } 7112 return 0; 7113 } 7114 __setup("cgroup.memory=", cgroup_memory); 7115 7116 /* 7117 * subsys_initcall() for memory controller. 7118 * 7119 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7120 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7121 * basically everything that doesn't depend on a specific mem_cgroup structure 7122 * should be initialized from here. 7123 */ 7124 static int __init mem_cgroup_init(void) 7125 { 7126 int cpu, node; 7127 7128 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7129 memcg_hotplug_cpu_dead); 7130 7131 for_each_possible_cpu(cpu) 7132 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7133 drain_local_stock); 7134 7135 for_each_node(node) { 7136 struct mem_cgroup_tree_per_node *rtpn; 7137 7138 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7139 node_online(node) ? node : NUMA_NO_NODE); 7140 7141 rtpn->rb_root = RB_ROOT; 7142 rtpn->rb_rightmost = NULL; 7143 spin_lock_init(&rtpn->lock); 7144 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7145 } 7146 7147 return 0; 7148 } 7149 subsys_initcall(mem_cgroup_init); 7150 7151 #ifdef CONFIG_MEMCG_SWAP 7152 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7153 { 7154 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7155 /* 7156 * The root cgroup cannot be destroyed, so it's refcount must 7157 * always be >= 1. 7158 */ 7159 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7160 VM_BUG_ON(1); 7161 break; 7162 } 7163 memcg = parent_mem_cgroup(memcg); 7164 if (!memcg) 7165 memcg = root_mem_cgroup; 7166 } 7167 return memcg; 7168 } 7169 7170 /** 7171 * mem_cgroup_swapout - transfer a memsw charge to swap 7172 * @page: page whose memsw charge to transfer 7173 * @entry: swap entry to move the charge to 7174 * 7175 * Transfer the memsw charge of @page to @entry. 7176 */ 7177 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7178 { 7179 struct mem_cgroup *memcg, *swap_memcg; 7180 unsigned int nr_entries; 7181 unsigned short oldid; 7182 7183 VM_BUG_ON_PAGE(PageLRU(page), page); 7184 VM_BUG_ON_PAGE(page_count(page), page); 7185 7186 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7187 return; 7188 7189 memcg = page->mem_cgroup; 7190 7191 /* Readahead page, never charged */ 7192 if (!memcg) 7193 return; 7194 7195 /* 7196 * In case the memcg owning these pages has been offlined and doesn't 7197 * have an ID allocated to it anymore, charge the closest online 7198 * ancestor for the swap instead and transfer the memory+swap charge. 7199 */ 7200 swap_memcg = mem_cgroup_id_get_online(memcg); 7201 nr_entries = thp_nr_pages(page); 7202 /* Get references for the tail pages, too */ 7203 if (nr_entries > 1) 7204 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7205 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7206 nr_entries); 7207 VM_BUG_ON_PAGE(oldid, page); 7208 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7209 7210 page->mem_cgroup = NULL; 7211 7212 if (!mem_cgroup_is_root(memcg)) 7213 page_counter_uncharge(&memcg->memory, nr_entries); 7214 7215 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7216 if (!mem_cgroup_is_root(swap_memcg)) 7217 page_counter_charge(&swap_memcg->memsw, nr_entries); 7218 page_counter_uncharge(&memcg->memsw, nr_entries); 7219 } 7220 7221 /* 7222 * Interrupts should be disabled here because the caller holds the 7223 * i_pages lock which is taken with interrupts-off. It is 7224 * important here to have the interrupts disabled because it is the 7225 * only synchronisation we have for updating the per-CPU variables. 7226 */ 7227 VM_BUG_ON(!irqs_disabled()); 7228 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7229 memcg_check_events(memcg, page); 7230 7231 css_put(&memcg->css); 7232 } 7233 7234 /** 7235 * mem_cgroup_try_charge_swap - try charging swap space for a page 7236 * @page: page being added to swap 7237 * @entry: swap entry to charge 7238 * 7239 * Try to charge @page's memcg for the swap space at @entry. 7240 * 7241 * Returns 0 on success, -ENOMEM on failure. 7242 */ 7243 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7244 { 7245 unsigned int nr_pages = thp_nr_pages(page); 7246 struct page_counter *counter; 7247 struct mem_cgroup *memcg; 7248 unsigned short oldid; 7249 7250 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7251 return 0; 7252 7253 memcg = page->mem_cgroup; 7254 7255 /* Readahead page, never charged */ 7256 if (!memcg) 7257 return 0; 7258 7259 if (!entry.val) { 7260 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7261 return 0; 7262 } 7263 7264 memcg = mem_cgroup_id_get_online(memcg); 7265 7266 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7267 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7268 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7269 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7270 mem_cgroup_id_put(memcg); 7271 return -ENOMEM; 7272 } 7273 7274 /* Get references for the tail pages, too */ 7275 if (nr_pages > 1) 7276 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7277 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7278 VM_BUG_ON_PAGE(oldid, page); 7279 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7280 7281 return 0; 7282 } 7283 7284 /** 7285 * mem_cgroup_uncharge_swap - uncharge swap space 7286 * @entry: swap entry to uncharge 7287 * @nr_pages: the amount of swap space to uncharge 7288 */ 7289 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7290 { 7291 struct mem_cgroup *memcg; 7292 unsigned short id; 7293 7294 id = swap_cgroup_record(entry, 0, nr_pages); 7295 rcu_read_lock(); 7296 memcg = mem_cgroup_from_id(id); 7297 if (memcg) { 7298 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7299 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7300 page_counter_uncharge(&memcg->swap, nr_pages); 7301 else 7302 page_counter_uncharge(&memcg->memsw, nr_pages); 7303 } 7304 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7305 mem_cgroup_id_put_many(memcg, nr_pages); 7306 } 7307 rcu_read_unlock(); 7308 } 7309 7310 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7311 { 7312 long nr_swap_pages = get_nr_swap_pages(); 7313 7314 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7315 return nr_swap_pages; 7316 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7317 nr_swap_pages = min_t(long, nr_swap_pages, 7318 READ_ONCE(memcg->swap.max) - 7319 page_counter_read(&memcg->swap)); 7320 return nr_swap_pages; 7321 } 7322 7323 bool mem_cgroup_swap_full(struct page *page) 7324 { 7325 struct mem_cgroup *memcg; 7326 7327 VM_BUG_ON_PAGE(!PageLocked(page), page); 7328 7329 if (vm_swap_full()) 7330 return true; 7331 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7332 return false; 7333 7334 memcg = page->mem_cgroup; 7335 if (!memcg) 7336 return false; 7337 7338 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7339 unsigned long usage = page_counter_read(&memcg->swap); 7340 7341 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7342 usage * 2 >= READ_ONCE(memcg->swap.max)) 7343 return true; 7344 } 7345 7346 return false; 7347 } 7348 7349 static int __init setup_swap_account(char *s) 7350 { 7351 if (!strcmp(s, "1")) 7352 cgroup_memory_noswap = 0; 7353 else if (!strcmp(s, "0")) 7354 cgroup_memory_noswap = 1; 7355 return 1; 7356 } 7357 __setup("swapaccount=", setup_swap_account); 7358 7359 static u64 swap_current_read(struct cgroup_subsys_state *css, 7360 struct cftype *cft) 7361 { 7362 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7363 7364 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7365 } 7366 7367 static int swap_high_show(struct seq_file *m, void *v) 7368 { 7369 return seq_puts_memcg_tunable(m, 7370 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7371 } 7372 7373 static ssize_t swap_high_write(struct kernfs_open_file *of, 7374 char *buf, size_t nbytes, loff_t off) 7375 { 7376 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7377 unsigned long high; 7378 int err; 7379 7380 buf = strstrip(buf); 7381 err = page_counter_memparse(buf, "max", &high); 7382 if (err) 7383 return err; 7384 7385 page_counter_set_high(&memcg->swap, high); 7386 7387 return nbytes; 7388 } 7389 7390 static int swap_max_show(struct seq_file *m, void *v) 7391 { 7392 return seq_puts_memcg_tunable(m, 7393 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7394 } 7395 7396 static ssize_t swap_max_write(struct kernfs_open_file *of, 7397 char *buf, size_t nbytes, loff_t off) 7398 { 7399 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7400 unsigned long max; 7401 int err; 7402 7403 buf = strstrip(buf); 7404 err = page_counter_memparse(buf, "max", &max); 7405 if (err) 7406 return err; 7407 7408 xchg(&memcg->swap.max, max); 7409 7410 return nbytes; 7411 } 7412 7413 static int swap_events_show(struct seq_file *m, void *v) 7414 { 7415 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7416 7417 seq_printf(m, "high %lu\n", 7418 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7419 seq_printf(m, "max %lu\n", 7420 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7421 seq_printf(m, "fail %lu\n", 7422 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7423 7424 return 0; 7425 } 7426 7427 static struct cftype swap_files[] = { 7428 { 7429 .name = "swap.current", 7430 .flags = CFTYPE_NOT_ON_ROOT, 7431 .read_u64 = swap_current_read, 7432 }, 7433 { 7434 .name = "swap.high", 7435 .flags = CFTYPE_NOT_ON_ROOT, 7436 .seq_show = swap_high_show, 7437 .write = swap_high_write, 7438 }, 7439 { 7440 .name = "swap.max", 7441 .flags = CFTYPE_NOT_ON_ROOT, 7442 .seq_show = swap_max_show, 7443 .write = swap_max_write, 7444 }, 7445 { 7446 .name = "swap.events", 7447 .flags = CFTYPE_NOT_ON_ROOT, 7448 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7449 .seq_show = swap_events_show, 7450 }, 7451 { } /* terminate */ 7452 }; 7453 7454 static struct cftype memsw_files[] = { 7455 { 7456 .name = "memsw.usage_in_bytes", 7457 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7458 .read_u64 = mem_cgroup_read_u64, 7459 }, 7460 { 7461 .name = "memsw.max_usage_in_bytes", 7462 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7463 .write = mem_cgroup_reset, 7464 .read_u64 = mem_cgroup_read_u64, 7465 }, 7466 { 7467 .name = "memsw.limit_in_bytes", 7468 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7469 .write = mem_cgroup_write, 7470 .read_u64 = mem_cgroup_read_u64, 7471 }, 7472 { 7473 .name = "memsw.failcnt", 7474 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7475 .write = mem_cgroup_reset, 7476 .read_u64 = mem_cgroup_read_u64, 7477 }, 7478 { }, /* terminate */ 7479 }; 7480 7481 /* 7482 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7483 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7484 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7485 * boot parameter. This may result in premature OOPS inside 7486 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7487 */ 7488 static int __init mem_cgroup_swap_init(void) 7489 { 7490 /* No memory control -> no swap control */ 7491 if (mem_cgroup_disabled()) 7492 cgroup_memory_noswap = true; 7493 7494 if (cgroup_memory_noswap) 7495 return 0; 7496 7497 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7498 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7499 7500 return 0; 7501 } 7502 core_initcall(mem_cgroup_swap_init); 7503 7504 #endif /* CONFIG_MEMCG_SWAP */ 7505