1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 * 10 * Memory thresholds 11 * Copyright (C) 2009 Nokia Corporation 12 * Author: Kirill A. Shutemov 13 * 14 * Kernel Memory Controller 15 * Copyright (C) 2012 Parallels Inc. and Google Inc. 16 * Authors: Glauber Costa and Suleiman Souhlal 17 * 18 * Native page reclaim 19 * Charge lifetime sanitation 20 * Lockless page tracking & accounting 21 * Unified hierarchy configuration model 22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 23 * 24 * Per memcg lru locking 25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi 26 */ 27 28 #include <linux/page_counter.h> 29 #include <linux/memcontrol.h> 30 #include <linux/cgroup.h> 31 #include <linux/pagewalk.h> 32 #include <linux/sched/mm.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/hugetlb.h> 35 #include <linux/pagemap.h> 36 #include <linux/vm_event_item.h> 37 #include <linux/smp.h> 38 #include <linux/page-flags.h> 39 #include <linux/backing-dev.h> 40 #include <linux/bit_spinlock.h> 41 #include <linux/rcupdate.h> 42 #include <linux/limits.h> 43 #include <linux/export.h> 44 #include <linux/mutex.h> 45 #include <linux/rbtree.h> 46 #include <linux/slab.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/spinlock.h> 50 #include <linux/eventfd.h> 51 #include <linux/poll.h> 52 #include <linux/sort.h> 53 #include <linux/fs.h> 54 #include <linux/seq_file.h> 55 #include <linux/vmpressure.h> 56 #include <linux/mm_inline.h> 57 #include <linux/swap_cgroup.h> 58 #include <linux/cpu.h> 59 #include <linux/oom.h> 60 #include <linux/lockdep.h> 61 #include <linux/file.h> 62 #include <linux/tracehook.h> 63 #include <linux/psi.h> 64 #include <linux/seq_buf.h> 65 #include "internal.h" 66 #include <net/sock.h> 67 #include <net/ip.h> 68 #include "slab.h" 69 70 #include <linux/uaccess.h> 71 72 #include <trace/events/vmscan.h> 73 74 struct cgroup_subsys memory_cgrp_subsys __read_mostly; 75 EXPORT_SYMBOL(memory_cgrp_subsys); 76 77 struct mem_cgroup *root_mem_cgroup __read_mostly; 78 79 /* Active memory cgroup to use from an interrupt context */ 80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); 81 82 /* Socket memory accounting disabled? */ 83 static bool cgroup_memory_nosocket; 84 85 /* Kernel memory accounting disabled? */ 86 static bool cgroup_memory_nokmem; 87 88 /* Whether the swap controller is active */ 89 #ifdef CONFIG_MEMCG_SWAP 90 bool cgroup_memory_noswap __read_mostly; 91 #else 92 #define cgroup_memory_noswap 1 93 #endif 94 95 #ifdef CONFIG_CGROUP_WRITEBACK 96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); 97 #endif 98 99 /* Whether legacy memory+swap accounting is active */ 100 static bool do_memsw_account(void) 101 { 102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap; 103 } 104 105 #define THRESHOLDS_EVENTS_TARGET 128 106 #define SOFTLIMIT_EVENTS_TARGET 1024 107 108 /* 109 * Cgroups above their limits are maintained in a RB-Tree, independent of 110 * their hierarchy representation 111 */ 112 113 struct mem_cgroup_tree_per_node { 114 struct rb_root rb_root; 115 struct rb_node *rb_rightmost; 116 spinlock_t lock; 117 }; 118 119 struct mem_cgroup_tree { 120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 121 }; 122 123 static struct mem_cgroup_tree soft_limit_tree __read_mostly; 124 125 /* for OOM */ 126 struct mem_cgroup_eventfd_list { 127 struct list_head list; 128 struct eventfd_ctx *eventfd; 129 }; 130 131 /* 132 * cgroup_event represents events which userspace want to receive. 133 */ 134 struct mem_cgroup_event { 135 /* 136 * memcg which the event belongs to. 137 */ 138 struct mem_cgroup *memcg; 139 /* 140 * eventfd to signal userspace about the event. 141 */ 142 struct eventfd_ctx *eventfd; 143 /* 144 * Each of these stored in a list by the cgroup. 145 */ 146 struct list_head list; 147 /* 148 * register_event() callback will be used to add new userspace 149 * waiter for changes related to this event. Use eventfd_signal() 150 * on eventfd to send notification to userspace. 151 */ 152 int (*register_event)(struct mem_cgroup *memcg, 153 struct eventfd_ctx *eventfd, const char *args); 154 /* 155 * unregister_event() callback will be called when userspace closes 156 * the eventfd or on cgroup removing. This callback must be set, 157 * if you want provide notification functionality. 158 */ 159 void (*unregister_event)(struct mem_cgroup *memcg, 160 struct eventfd_ctx *eventfd); 161 /* 162 * All fields below needed to unregister event when 163 * userspace closes eventfd. 164 */ 165 poll_table pt; 166 wait_queue_head_t *wqh; 167 wait_queue_entry_t wait; 168 struct work_struct remove; 169 }; 170 171 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 173 174 /* Stuffs for move charges at task migration. */ 175 /* 176 * Types of charges to be moved. 177 */ 178 #define MOVE_ANON 0x1U 179 #define MOVE_FILE 0x2U 180 #define MOVE_MASK (MOVE_ANON | MOVE_FILE) 181 182 /* "mc" and its members are protected by cgroup_mutex */ 183 static struct move_charge_struct { 184 spinlock_t lock; /* for from, to */ 185 struct mm_struct *mm; 186 struct mem_cgroup *from; 187 struct mem_cgroup *to; 188 unsigned long flags; 189 unsigned long precharge; 190 unsigned long moved_charge; 191 unsigned long moved_swap; 192 struct task_struct *moving_task; /* a task moving charges */ 193 wait_queue_head_t waitq; /* a waitq for other context */ 194 } mc = { 195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 197 }; 198 199 /* 200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 201 * limit reclaim to prevent infinite loops, if they ever occur. 202 */ 203 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 204 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 205 206 /* for encoding cft->private value on file */ 207 enum res_type { 208 _MEM, 209 _MEMSWAP, 210 _OOM_TYPE, 211 _KMEM, 212 _TCP, 213 }; 214 215 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 216 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 217 #define MEMFILE_ATTR(val) ((val) & 0xffff) 218 /* Used for OOM nofiier */ 219 #define OOM_CONTROL (0) 220 221 /* 222 * Iteration constructs for visiting all cgroups (under a tree). If 223 * loops are exited prematurely (break), mem_cgroup_iter_break() must 224 * be used for reference counting. 225 */ 226 #define for_each_mem_cgroup_tree(iter, root) \ 227 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 228 iter != NULL; \ 229 iter = mem_cgroup_iter(root, iter, NULL)) 230 231 #define for_each_mem_cgroup(iter) \ 232 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 233 iter != NULL; \ 234 iter = mem_cgroup_iter(NULL, iter, NULL)) 235 236 static inline bool should_force_charge(void) 237 { 238 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || 239 (current->flags & PF_EXITING); 240 } 241 242 /* Some nice accessors for the vmpressure. */ 243 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 244 { 245 if (!memcg) 246 memcg = root_mem_cgroup; 247 return &memcg->vmpressure; 248 } 249 250 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 251 { 252 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 253 } 254 255 #ifdef CONFIG_MEMCG_KMEM 256 extern spinlock_t css_set_lock; 257 258 static void obj_cgroup_release(struct percpu_ref *ref) 259 { 260 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); 261 struct mem_cgroup *memcg; 262 unsigned int nr_bytes; 263 unsigned int nr_pages; 264 unsigned long flags; 265 266 /* 267 * At this point all allocated objects are freed, and 268 * objcg->nr_charged_bytes can't have an arbitrary byte value. 269 * However, it can be PAGE_SIZE or (x * PAGE_SIZE). 270 * 271 * The following sequence can lead to it: 272 * 1) CPU0: objcg == stock->cached_objcg 273 * 2) CPU1: we do a small allocation (e.g. 92 bytes), 274 * PAGE_SIZE bytes are charged 275 * 3) CPU1: a process from another memcg is allocating something, 276 * the stock if flushed, 277 * objcg->nr_charged_bytes = PAGE_SIZE - 92 278 * 5) CPU0: we do release this object, 279 * 92 bytes are added to stock->nr_bytes 280 * 6) CPU0: stock is flushed, 281 * 92 bytes are added to objcg->nr_charged_bytes 282 * 283 * In the result, nr_charged_bytes == PAGE_SIZE. 284 * This page will be uncharged in obj_cgroup_release(). 285 */ 286 nr_bytes = atomic_read(&objcg->nr_charged_bytes); 287 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); 288 nr_pages = nr_bytes >> PAGE_SHIFT; 289 290 spin_lock_irqsave(&css_set_lock, flags); 291 memcg = obj_cgroup_memcg(objcg); 292 if (nr_pages) 293 __memcg_kmem_uncharge(memcg, nr_pages); 294 list_del(&objcg->list); 295 mem_cgroup_put(memcg); 296 spin_unlock_irqrestore(&css_set_lock, flags); 297 298 percpu_ref_exit(ref); 299 kfree_rcu(objcg, rcu); 300 } 301 302 static struct obj_cgroup *obj_cgroup_alloc(void) 303 { 304 struct obj_cgroup *objcg; 305 int ret; 306 307 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); 308 if (!objcg) 309 return NULL; 310 311 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, 312 GFP_KERNEL); 313 if (ret) { 314 kfree(objcg); 315 return NULL; 316 } 317 INIT_LIST_HEAD(&objcg->list); 318 return objcg; 319 } 320 321 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, 322 struct mem_cgroup *parent) 323 { 324 struct obj_cgroup *objcg, *iter; 325 326 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); 327 328 spin_lock_irq(&css_set_lock); 329 330 /* Move active objcg to the parent's list */ 331 xchg(&objcg->memcg, parent); 332 css_get(&parent->css); 333 list_add(&objcg->list, &parent->objcg_list); 334 335 /* Move already reparented objcgs to the parent's list */ 336 list_for_each_entry(iter, &memcg->objcg_list, list) { 337 css_get(&parent->css); 338 xchg(&iter->memcg, parent); 339 css_put(&memcg->css); 340 } 341 list_splice(&memcg->objcg_list, &parent->objcg_list); 342 343 spin_unlock_irq(&css_set_lock); 344 345 percpu_ref_kill(&objcg->refcnt); 346 } 347 348 /* 349 * This will be used as a shrinker list's index. 350 * The main reason for not using cgroup id for this: 351 * this works better in sparse environments, where we have a lot of memcgs, 352 * but only a few kmem-limited. Or also, if we have, for instance, 200 353 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 354 * 200 entry array for that. 355 * 356 * The current size of the caches array is stored in memcg_nr_cache_ids. It 357 * will double each time we have to increase it. 358 */ 359 static DEFINE_IDA(memcg_cache_ida); 360 int memcg_nr_cache_ids; 361 362 /* Protects memcg_nr_cache_ids */ 363 static DECLARE_RWSEM(memcg_cache_ids_sem); 364 365 void memcg_get_cache_ids(void) 366 { 367 down_read(&memcg_cache_ids_sem); 368 } 369 370 void memcg_put_cache_ids(void) 371 { 372 up_read(&memcg_cache_ids_sem); 373 } 374 375 /* 376 * MIN_SIZE is different than 1, because we would like to avoid going through 377 * the alloc/free process all the time. In a small machine, 4 kmem-limited 378 * cgroups is a reasonable guess. In the future, it could be a parameter or 379 * tunable, but that is strictly not necessary. 380 * 381 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 382 * this constant directly from cgroup, but it is understandable that this is 383 * better kept as an internal representation in cgroup.c. In any case, the 384 * cgrp_id space is not getting any smaller, and we don't have to necessarily 385 * increase ours as well if it increases. 386 */ 387 #define MEMCG_CACHES_MIN_SIZE 4 388 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 389 390 /* 391 * A lot of the calls to the cache allocation functions are expected to be 392 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are 393 * conditional to this static branch, we'll have to allow modules that does 394 * kmem_cache_alloc and the such to see this symbol as well 395 */ 396 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); 397 EXPORT_SYMBOL(memcg_kmem_enabled_key); 398 #endif 399 400 static int memcg_shrinker_map_size; 401 static DEFINE_MUTEX(memcg_shrinker_map_mutex); 402 403 static void memcg_free_shrinker_map_rcu(struct rcu_head *head) 404 { 405 kvfree(container_of(head, struct memcg_shrinker_map, rcu)); 406 } 407 408 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, 409 int size, int old_size) 410 { 411 struct memcg_shrinker_map *new, *old; 412 int nid; 413 414 lockdep_assert_held(&memcg_shrinker_map_mutex); 415 416 for_each_node(nid) { 417 old = rcu_dereference_protected( 418 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); 419 /* Not yet online memcg */ 420 if (!old) 421 return 0; 422 423 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); 424 if (!new) 425 return -ENOMEM; 426 427 /* Set all old bits, clear all new bits */ 428 memset(new->map, (int)0xff, old_size); 429 memset((void *)new->map + old_size, 0, size - old_size); 430 431 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); 432 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); 433 } 434 435 return 0; 436 } 437 438 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) 439 { 440 struct mem_cgroup_per_node *pn; 441 struct memcg_shrinker_map *map; 442 int nid; 443 444 if (mem_cgroup_is_root(memcg)) 445 return; 446 447 for_each_node(nid) { 448 pn = mem_cgroup_nodeinfo(memcg, nid); 449 map = rcu_dereference_protected(pn->shrinker_map, true); 450 if (map) 451 kvfree(map); 452 rcu_assign_pointer(pn->shrinker_map, NULL); 453 } 454 } 455 456 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) 457 { 458 struct memcg_shrinker_map *map; 459 int nid, size, ret = 0; 460 461 if (mem_cgroup_is_root(memcg)) 462 return 0; 463 464 mutex_lock(&memcg_shrinker_map_mutex); 465 size = memcg_shrinker_map_size; 466 for_each_node(nid) { 467 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid); 468 if (!map) { 469 memcg_free_shrinker_maps(memcg); 470 ret = -ENOMEM; 471 break; 472 } 473 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); 474 } 475 mutex_unlock(&memcg_shrinker_map_mutex); 476 477 return ret; 478 } 479 480 int memcg_expand_shrinker_maps(int new_id) 481 { 482 int size, old_size, ret = 0; 483 struct mem_cgroup *memcg; 484 485 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long); 486 old_size = memcg_shrinker_map_size; 487 if (size <= old_size) 488 return 0; 489 490 mutex_lock(&memcg_shrinker_map_mutex); 491 if (!root_mem_cgroup) 492 goto unlock; 493 494 for_each_mem_cgroup(memcg) { 495 if (mem_cgroup_is_root(memcg)) 496 continue; 497 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); 498 if (ret) { 499 mem_cgroup_iter_break(NULL, memcg); 500 goto unlock; 501 } 502 } 503 unlock: 504 if (!ret) 505 memcg_shrinker_map_size = size; 506 mutex_unlock(&memcg_shrinker_map_mutex); 507 return ret; 508 } 509 510 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) 511 { 512 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { 513 struct memcg_shrinker_map *map; 514 515 rcu_read_lock(); 516 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); 517 /* Pairs with smp mb in shrink_slab() */ 518 smp_mb__before_atomic(); 519 set_bit(shrinker_id, map->map); 520 rcu_read_unlock(); 521 } 522 } 523 524 /** 525 * mem_cgroup_css_from_page - css of the memcg associated with a page 526 * @page: page of interest 527 * 528 * If memcg is bound to the default hierarchy, css of the memcg associated 529 * with @page is returned. The returned css remains associated with @page 530 * until it is released. 531 * 532 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 533 * is returned. 534 */ 535 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 536 { 537 struct mem_cgroup *memcg; 538 539 memcg = page_memcg(page); 540 541 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 542 memcg = root_mem_cgroup; 543 544 return &memcg->css; 545 } 546 547 /** 548 * page_cgroup_ino - return inode number of the memcg a page is charged to 549 * @page: the page 550 * 551 * Look up the closest online ancestor of the memory cgroup @page is charged to 552 * and return its inode number or 0 if @page is not charged to any cgroup. It 553 * is safe to call this function without holding a reference to @page. 554 * 555 * Note, this function is inherently racy, because there is nothing to prevent 556 * the cgroup inode from getting torn down and potentially reallocated a moment 557 * after page_cgroup_ino() returns, so it only should be used by callers that 558 * do not care (such as procfs interfaces). 559 */ 560 ino_t page_cgroup_ino(struct page *page) 561 { 562 struct mem_cgroup *memcg; 563 unsigned long ino = 0; 564 565 rcu_read_lock(); 566 memcg = page_memcg_check(page); 567 568 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 569 memcg = parent_mem_cgroup(memcg); 570 if (memcg) 571 ino = cgroup_ino(memcg->css.cgroup); 572 rcu_read_unlock(); 573 return ino; 574 } 575 576 static struct mem_cgroup_per_node * 577 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) 578 { 579 int nid = page_to_nid(page); 580 581 return memcg->nodeinfo[nid]; 582 } 583 584 static struct mem_cgroup_tree_per_node * 585 soft_limit_tree_node(int nid) 586 { 587 return soft_limit_tree.rb_tree_per_node[nid]; 588 } 589 590 static struct mem_cgroup_tree_per_node * 591 soft_limit_tree_from_page(struct page *page) 592 { 593 int nid = page_to_nid(page); 594 595 return soft_limit_tree.rb_tree_per_node[nid]; 596 } 597 598 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz, 599 struct mem_cgroup_tree_per_node *mctz, 600 unsigned long new_usage_in_excess) 601 { 602 struct rb_node **p = &mctz->rb_root.rb_node; 603 struct rb_node *parent = NULL; 604 struct mem_cgroup_per_node *mz_node; 605 bool rightmost = true; 606 607 if (mz->on_tree) 608 return; 609 610 mz->usage_in_excess = new_usage_in_excess; 611 if (!mz->usage_in_excess) 612 return; 613 while (*p) { 614 parent = *p; 615 mz_node = rb_entry(parent, struct mem_cgroup_per_node, 616 tree_node); 617 if (mz->usage_in_excess < mz_node->usage_in_excess) { 618 p = &(*p)->rb_left; 619 rightmost = false; 620 } else { 621 p = &(*p)->rb_right; 622 } 623 } 624 625 if (rightmost) 626 mctz->rb_rightmost = &mz->tree_node; 627 628 rb_link_node(&mz->tree_node, parent, p); 629 rb_insert_color(&mz->tree_node, &mctz->rb_root); 630 mz->on_tree = true; 631 } 632 633 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 634 struct mem_cgroup_tree_per_node *mctz) 635 { 636 if (!mz->on_tree) 637 return; 638 639 if (&mz->tree_node == mctz->rb_rightmost) 640 mctz->rb_rightmost = rb_prev(&mz->tree_node); 641 642 rb_erase(&mz->tree_node, &mctz->rb_root); 643 mz->on_tree = false; 644 } 645 646 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz, 647 struct mem_cgroup_tree_per_node *mctz) 648 { 649 unsigned long flags; 650 651 spin_lock_irqsave(&mctz->lock, flags); 652 __mem_cgroup_remove_exceeded(mz, mctz); 653 spin_unlock_irqrestore(&mctz->lock, flags); 654 } 655 656 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 657 { 658 unsigned long nr_pages = page_counter_read(&memcg->memory); 659 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 660 unsigned long excess = 0; 661 662 if (nr_pages > soft_limit) 663 excess = nr_pages - soft_limit; 664 665 return excess; 666 } 667 668 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 669 { 670 unsigned long excess; 671 struct mem_cgroup_per_node *mz; 672 struct mem_cgroup_tree_per_node *mctz; 673 674 mctz = soft_limit_tree_from_page(page); 675 if (!mctz) 676 return; 677 /* 678 * Necessary to update all ancestors when hierarchy is used. 679 * because their event counter is not touched. 680 */ 681 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 682 mz = mem_cgroup_page_nodeinfo(memcg, page); 683 excess = soft_limit_excess(memcg); 684 /* 685 * We have to update the tree if mz is on RB-tree or 686 * mem is over its softlimit. 687 */ 688 if (excess || mz->on_tree) { 689 unsigned long flags; 690 691 spin_lock_irqsave(&mctz->lock, flags); 692 /* if on-tree, remove it */ 693 if (mz->on_tree) 694 __mem_cgroup_remove_exceeded(mz, mctz); 695 /* 696 * Insert again. mz->usage_in_excess will be updated. 697 * If excess is 0, no tree ops. 698 */ 699 __mem_cgroup_insert_exceeded(mz, mctz, excess); 700 spin_unlock_irqrestore(&mctz->lock, flags); 701 } 702 } 703 } 704 705 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 706 { 707 struct mem_cgroup_tree_per_node *mctz; 708 struct mem_cgroup_per_node *mz; 709 int nid; 710 711 for_each_node(nid) { 712 mz = mem_cgroup_nodeinfo(memcg, nid); 713 mctz = soft_limit_tree_node(nid); 714 if (mctz) 715 mem_cgroup_remove_exceeded(mz, mctz); 716 } 717 } 718 719 static struct mem_cgroup_per_node * 720 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 721 { 722 struct mem_cgroup_per_node *mz; 723 724 retry: 725 mz = NULL; 726 if (!mctz->rb_rightmost) 727 goto done; /* Nothing to reclaim from */ 728 729 mz = rb_entry(mctz->rb_rightmost, 730 struct mem_cgroup_per_node, tree_node); 731 /* 732 * Remove the node now but someone else can add it back, 733 * we will to add it back at the end of reclaim to its correct 734 * position in the tree. 735 */ 736 __mem_cgroup_remove_exceeded(mz, mctz); 737 if (!soft_limit_excess(mz->memcg) || 738 !css_tryget(&mz->memcg->css)) 739 goto retry; 740 done: 741 return mz; 742 } 743 744 static struct mem_cgroup_per_node * 745 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) 746 { 747 struct mem_cgroup_per_node *mz; 748 749 spin_lock_irq(&mctz->lock); 750 mz = __mem_cgroup_largest_soft_limit_node(mctz); 751 spin_unlock_irq(&mctz->lock); 752 return mz; 753 } 754 755 /** 756 * __mod_memcg_state - update cgroup memory statistics 757 * @memcg: the memory cgroup 758 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item 759 * @val: delta to add to the counter, can be negative 760 */ 761 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) 762 { 763 long x, threshold = MEMCG_CHARGE_BATCH; 764 765 if (mem_cgroup_disabled()) 766 return; 767 768 if (memcg_stat_item_in_bytes(idx)) 769 threshold <<= PAGE_SHIFT; 770 771 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 772 if (unlikely(abs(x) > threshold)) { 773 struct mem_cgroup *mi; 774 775 /* 776 * Batch local counters to keep them in sync with 777 * the hierarchical ones. 778 */ 779 __this_cpu_add(memcg->vmstats_local->stat[idx], x); 780 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 781 atomic_long_add(x, &mi->vmstats[idx]); 782 x = 0; 783 } 784 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 785 } 786 787 static struct mem_cgroup_per_node * 788 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) 789 { 790 struct mem_cgroup *parent; 791 792 parent = parent_mem_cgroup(pn->memcg); 793 if (!parent) 794 return NULL; 795 return mem_cgroup_nodeinfo(parent, nid); 796 } 797 798 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 799 int val) 800 { 801 struct mem_cgroup_per_node *pn; 802 struct mem_cgroup *memcg; 803 long x, threshold = MEMCG_CHARGE_BATCH; 804 805 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 806 memcg = pn->memcg; 807 808 /* Update memcg */ 809 __mod_memcg_state(memcg, idx, val); 810 811 /* Update lruvec */ 812 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); 813 814 if (vmstat_item_in_bytes(idx)) 815 threshold <<= PAGE_SHIFT; 816 817 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 818 if (unlikely(abs(x) > threshold)) { 819 pg_data_t *pgdat = lruvec_pgdat(lruvec); 820 struct mem_cgroup_per_node *pi; 821 822 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 823 atomic_long_add(x, &pi->lruvec_stat[idx]); 824 x = 0; 825 } 826 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 827 } 828 829 /** 830 * __mod_lruvec_state - update lruvec memory statistics 831 * @lruvec: the lruvec 832 * @idx: the stat item 833 * @val: delta to add to the counter, can be negative 834 * 835 * The lruvec is the intersection of the NUMA node and a cgroup. This 836 * function updates the all three counters that are affected by a 837 * change of state at this level: per-node, per-cgroup, per-lruvec. 838 */ 839 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 840 int val) 841 { 842 /* Update node */ 843 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 844 845 /* Update memcg and lruvec */ 846 if (!mem_cgroup_disabled()) 847 __mod_memcg_lruvec_state(lruvec, idx, val); 848 } 849 850 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, 851 int val) 852 { 853 struct page *head = compound_head(page); /* rmap on tail pages */ 854 struct mem_cgroup *memcg = page_memcg(head); 855 pg_data_t *pgdat = page_pgdat(page); 856 struct lruvec *lruvec; 857 858 /* Untracked pages have no memcg, no lruvec. Update only the node */ 859 if (!memcg) { 860 __mod_node_page_state(pgdat, idx, val); 861 return; 862 } 863 864 lruvec = mem_cgroup_lruvec(memcg, pgdat); 865 __mod_lruvec_state(lruvec, idx, val); 866 } 867 EXPORT_SYMBOL(__mod_lruvec_page_state); 868 869 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) 870 { 871 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); 872 struct mem_cgroup *memcg; 873 struct lruvec *lruvec; 874 875 rcu_read_lock(); 876 memcg = mem_cgroup_from_obj(p); 877 878 /* 879 * Untracked pages have no memcg, no lruvec. Update only the 880 * node. If we reparent the slab objects to the root memcg, 881 * when we free the slab object, we need to update the per-memcg 882 * vmstats to keep it correct for the root memcg. 883 */ 884 if (!memcg) { 885 __mod_node_page_state(pgdat, idx, val); 886 } else { 887 lruvec = mem_cgroup_lruvec(memcg, pgdat); 888 __mod_lruvec_state(lruvec, idx, val); 889 } 890 rcu_read_unlock(); 891 } 892 893 /** 894 * __count_memcg_events - account VM events in a cgroup 895 * @memcg: the memory cgroup 896 * @idx: the event item 897 * @count: the number of events that occured 898 */ 899 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 900 unsigned long count) 901 { 902 unsigned long x; 903 904 if (mem_cgroup_disabled()) 905 return; 906 907 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 908 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 909 struct mem_cgroup *mi; 910 911 /* 912 * Batch local counters to keep them in sync with 913 * the hierarchical ones. 914 */ 915 __this_cpu_add(memcg->vmstats_local->events[idx], x); 916 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 917 atomic_long_add(x, &mi->vmevents[idx]); 918 x = 0; 919 } 920 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 921 } 922 923 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) 924 { 925 return atomic_long_read(&memcg->vmevents[event]); 926 } 927 928 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) 929 { 930 long x = 0; 931 int cpu; 932 933 for_each_possible_cpu(cpu) 934 x += per_cpu(memcg->vmstats_local->events[event], cpu); 935 return x; 936 } 937 938 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 939 struct page *page, 940 int nr_pages) 941 { 942 /* pagein of a big page is an event. So, ignore page size */ 943 if (nr_pages > 0) 944 __count_memcg_events(memcg, PGPGIN, 1); 945 else { 946 __count_memcg_events(memcg, PGPGOUT, 1); 947 nr_pages = -nr_pages; /* for event */ 948 } 949 950 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 951 } 952 953 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 954 enum mem_cgroup_events_target target) 955 { 956 unsigned long val, next; 957 958 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 959 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 960 /* from time_after() in jiffies.h */ 961 if ((long)(next - val) < 0) { 962 switch (target) { 963 case MEM_CGROUP_TARGET_THRESH: 964 next = val + THRESHOLDS_EVENTS_TARGET; 965 break; 966 case MEM_CGROUP_TARGET_SOFTLIMIT: 967 next = val + SOFTLIMIT_EVENTS_TARGET; 968 break; 969 default: 970 break; 971 } 972 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 973 return true; 974 } 975 return false; 976 } 977 978 /* 979 * Check events in order. 980 * 981 */ 982 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 983 { 984 /* threshold event is triggered in finer grain than soft limit */ 985 if (unlikely(mem_cgroup_event_ratelimit(memcg, 986 MEM_CGROUP_TARGET_THRESH))) { 987 bool do_softlimit; 988 989 do_softlimit = mem_cgroup_event_ratelimit(memcg, 990 MEM_CGROUP_TARGET_SOFTLIMIT); 991 mem_cgroup_threshold(memcg); 992 if (unlikely(do_softlimit)) 993 mem_cgroup_update_tree(memcg, page); 994 } 995 } 996 997 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 998 { 999 /* 1000 * mm_update_next_owner() may clear mm->owner to NULL 1001 * if it races with swapoff, page migration, etc. 1002 * So this can be called with p == NULL. 1003 */ 1004 if (unlikely(!p)) 1005 return NULL; 1006 1007 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1008 } 1009 EXPORT_SYMBOL(mem_cgroup_from_task); 1010 1011 /** 1012 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. 1013 * @mm: mm from which memcg should be extracted. It can be NULL. 1014 * 1015 * Obtain a reference on mm->memcg and returns it if successful. Otherwise 1016 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is 1017 * returned. 1018 */ 1019 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1020 { 1021 struct mem_cgroup *memcg; 1022 1023 if (mem_cgroup_disabled()) 1024 return NULL; 1025 1026 rcu_read_lock(); 1027 do { 1028 /* 1029 * Page cache insertions can happen withou an 1030 * actual mm context, e.g. during disk probing 1031 * on boot, loopback IO, acct() writes etc. 1032 */ 1033 if (unlikely(!mm)) 1034 memcg = root_mem_cgroup; 1035 else { 1036 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1037 if (unlikely(!memcg)) 1038 memcg = root_mem_cgroup; 1039 } 1040 } while (!css_tryget(&memcg->css)); 1041 rcu_read_unlock(); 1042 return memcg; 1043 } 1044 EXPORT_SYMBOL(get_mem_cgroup_from_mm); 1045 1046 /** 1047 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg. 1048 * @page: page from which memcg should be extracted. 1049 * 1050 * Obtain a reference on page->memcg and returns it if successful. Otherwise 1051 * root_mem_cgroup is returned. 1052 */ 1053 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 1054 { 1055 struct mem_cgroup *memcg = page_memcg(page); 1056 1057 if (mem_cgroup_disabled()) 1058 return NULL; 1059 1060 rcu_read_lock(); 1061 /* Page should not get uncharged and freed memcg under us. */ 1062 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) 1063 memcg = root_mem_cgroup; 1064 rcu_read_unlock(); 1065 return memcg; 1066 } 1067 EXPORT_SYMBOL(get_mem_cgroup_from_page); 1068 1069 static __always_inline struct mem_cgroup *active_memcg(void) 1070 { 1071 if (in_interrupt()) 1072 return this_cpu_read(int_active_memcg); 1073 else 1074 return current->active_memcg; 1075 } 1076 1077 static __always_inline struct mem_cgroup *get_active_memcg(void) 1078 { 1079 struct mem_cgroup *memcg; 1080 1081 rcu_read_lock(); 1082 memcg = active_memcg(); 1083 if (memcg) { 1084 /* current->active_memcg must hold a ref. */ 1085 if (WARN_ON_ONCE(!css_tryget(&memcg->css))) 1086 memcg = root_mem_cgroup; 1087 else 1088 memcg = current->active_memcg; 1089 } 1090 rcu_read_unlock(); 1091 1092 return memcg; 1093 } 1094 1095 static __always_inline bool memcg_kmem_bypass(void) 1096 { 1097 /* Allow remote memcg charging from any context. */ 1098 if (unlikely(active_memcg())) 1099 return false; 1100 1101 /* Memcg to charge can't be determined. */ 1102 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 1103 return true; 1104 1105 return false; 1106 } 1107 1108 /** 1109 * If active memcg is set, do not fallback to current->mm->memcg. 1110 */ 1111 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1112 { 1113 if (memcg_kmem_bypass()) 1114 return NULL; 1115 1116 if (unlikely(active_memcg())) 1117 return get_active_memcg(); 1118 1119 return get_mem_cgroup_from_mm(current->mm); 1120 } 1121 1122 /** 1123 * mem_cgroup_iter - iterate over memory cgroup hierarchy 1124 * @root: hierarchy root 1125 * @prev: previously returned memcg, NULL on first invocation 1126 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1127 * 1128 * Returns references to children of the hierarchy below @root, or 1129 * @root itself, or %NULL after a full round-trip. 1130 * 1131 * Caller must pass the return value in @prev on subsequent 1132 * invocations for reference counting, or use mem_cgroup_iter_break() 1133 * to cancel a hierarchy walk before the round-trip is complete. 1134 * 1135 * Reclaimers can specify a node in @reclaim to divide up the memcgs 1136 * in the hierarchy among all concurrent reclaimers operating on the 1137 * same node. 1138 */ 1139 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 1140 struct mem_cgroup *prev, 1141 struct mem_cgroup_reclaim_cookie *reclaim) 1142 { 1143 struct mem_cgroup_reclaim_iter *iter; 1144 struct cgroup_subsys_state *css = NULL; 1145 struct mem_cgroup *memcg = NULL; 1146 struct mem_cgroup *pos = NULL; 1147 1148 if (mem_cgroup_disabled()) 1149 return NULL; 1150 1151 if (!root) 1152 root = root_mem_cgroup; 1153 1154 if (prev && !reclaim) 1155 pos = prev; 1156 1157 rcu_read_lock(); 1158 1159 if (reclaim) { 1160 struct mem_cgroup_per_node *mz; 1161 1162 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id); 1163 iter = &mz->iter; 1164 1165 if (prev && reclaim->generation != iter->generation) 1166 goto out_unlock; 1167 1168 while (1) { 1169 pos = READ_ONCE(iter->position); 1170 if (!pos || css_tryget(&pos->css)) 1171 break; 1172 /* 1173 * css reference reached zero, so iter->position will 1174 * be cleared by ->css_released. However, we should not 1175 * rely on this happening soon, because ->css_released 1176 * is called from a work queue, and by busy-waiting we 1177 * might block it. So we clear iter->position right 1178 * away. 1179 */ 1180 (void)cmpxchg(&iter->position, pos, NULL); 1181 } 1182 } 1183 1184 if (pos) 1185 css = &pos->css; 1186 1187 for (;;) { 1188 css = css_next_descendant_pre(css, &root->css); 1189 if (!css) { 1190 /* 1191 * Reclaimers share the hierarchy walk, and a 1192 * new one might jump in right at the end of 1193 * the hierarchy - make sure they see at least 1194 * one group and restart from the beginning. 1195 */ 1196 if (!prev) 1197 continue; 1198 break; 1199 } 1200 1201 /* 1202 * Verify the css and acquire a reference. The root 1203 * is provided by the caller, so we know it's alive 1204 * and kicking, and don't take an extra reference. 1205 */ 1206 memcg = mem_cgroup_from_css(css); 1207 1208 if (css == &root->css) 1209 break; 1210 1211 if (css_tryget(css)) 1212 break; 1213 1214 memcg = NULL; 1215 } 1216 1217 if (reclaim) { 1218 /* 1219 * The position could have already been updated by a competing 1220 * thread, so check that the value hasn't changed since we read 1221 * it to avoid reclaiming from the same cgroup twice. 1222 */ 1223 (void)cmpxchg(&iter->position, pos, memcg); 1224 1225 if (pos) 1226 css_put(&pos->css); 1227 1228 if (!memcg) 1229 iter->generation++; 1230 else if (!prev) 1231 reclaim->generation = iter->generation; 1232 } 1233 1234 out_unlock: 1235 rcu_read_unlock(); 1236 if (prev && prev != root) 1237 css_put(&prev->css); 1238 1239 return memcg; 1240 } 1241 1242 /** 1243 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 1244 * @root: hierarchy root 1245 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 1246 */ 1247 void mem_cgroup_iter_break(struct mem_cgroup *root, 1248 struct mem_cgroup *prev) 1249 { 1250 if (!root) 1251 root = root_mem_cgroup; 1252 if (prev && prev != root) 1253 css_put(&prev->css); 1254 } 1255 1256 static void __invalidate_reclaim_iterators(struct mem_cgroup *from, 1257 struct mem_cgroup *dead_memcg) 1258 { 1259 struct mem_cgroup_reclaim_iter *iter; 1260 struct mem_cgroup_per_node *mz; 1261 int nid; 1262 1263 for_each_node(nid) { 1264 mz = mem_cgroup_nodeinfo(from, nid); 1265 iter = &mz->iter; 1266 cmpxchg(&iter->position, dead_memcg, NULL); 1267 } 1268 } 1269 1270 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1271 { 1272 struct mem_cgroup *memcg = dead_memcg; 1273 struct mem_cgroup *last; 1274 1275 do { 1276 __invalidate_reclaim_iterators(memcg, dead_memcg); 1277 last = memcg; 1278 } while ((memcg = parent_mem_cgroup(memcg))); 1279 1280 /* 1281 * When cgruop1 non-hierarchy mode is used, 1282 * parent_mem_cgroup() does not walk all the way up to the 1283 * cgroup root (root_mem_cgroup). So we have to handle 1284 * dead_memcg from cgroup root separately. 1285 */ 1286 if (last != root_mem_cgroup) 1287 __invalidate_reclaim_iterators(root_mem_cgroup, 1288 dead_memcg); 1289 } 1290 1291 /** 1292 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1293 * @memcg: hierarchy root 1294 * @fn: function to call for each task 1295 * @arg: argument passed to @fn 1296 * 1297 * This function iterates over tasks attached to @memcg or to any of its 1298 * descendants and calls @fn for each task. If @fn returns a non-zero 1299 * value, the function breaks the iteration loop and returns the value. 1300 * Otherwise, it will iterate over all tasks and return 0. 1301 * 1302 * This function must not be called for the root memory cgroup. 1303 */ 1304 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1305 int (*fn)(struct task_struct *, void *), void *arg) 1306 { 1307 struct mem_cgroup *iter; 1308 int ret = 0; 1309 1310 BUG_ON(memcg == root_mem_cgroup); 1311 1312 for_each_mem_cgroup_tree(iter, memcg) { 1313 struct css_task_iter it; 1314 struct task_struct *task; 1315 1316 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); 1317 while (!ret && (task = css_task_iter_next(&it))) 1318 ret = fn(task, arg); 1319 css_task_iter_end(&it); 1320 if (ret) { 1321 mem_cgroup_iter_break(memcg, iter); 1322 break; 1323 } 1324 } 1325 return ret; 1326 } 1327 1328 #ifdef CONFIG_DEBUG_VM 1329 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) 1330 { 1331 struct mem_cgroup *memcg; 1332 1333 if (mem_cgroup_disabled()) 1334 return; 1335 1336 memcg = page_memcg(page); 1337 1338 if (!memcg) 1339 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); 1340 else 1341 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); 1342 } 1343 #endif 1344 1345 /** 1346 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1347 * @page: the page 1348 * @pgdat: pgdat of the page 1349 * 1350 * This function relies on page's memcg being stable - see the 1351 * access rules in commit_charge(). 1352 */ 1353 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat) 1354 { 1355 struct mem_cgroup_per_node *mz; 1356 struct mem_cgroup *memcg; 1357 struct lruvec *lruvec; 1358 1359 if (mem_cgroup_disabled()) { 1360 lruvec = &pgdat->__lruvec; 1361 goto out; 1362 } 1363 1364 memcg = page_memcg(page); 1365 /* 1366 * Swapcache readahead pages are added to the LRU - and 1367 * possibly migrated - before they are charged. 1368 */ 1369 if (!memcg) 1370 memcg = root_mem_cgroup; 1371 1372 mz = mem_cgroup_page_nodeinfo(memcg, page); 1373 lruvec = &mz->lruvec; 1374 out: 1375 /* 1376 * Since a node can be onlined after the mem_cgroup was created, 1377 * we have to be prepared to initialize lruvec->zone here; 1378 * and if offlined then reonlined, we need to reinitialize it. 1379 */ 1380 if (unlikely(lruvec->pgdat != pgdat)) 1381 lruvec->pgdat = pgdat; 1382 return lruvec; 1383 } 1384 1385 /** 1386 * lock_page_lruvec - lock and return lruvec for a given page. 1387 * @page: the page 1388 * 1389 * This series functions should be used in either conditions: 1390 * PageLRU is cleared or unset 1391 * or page->_refcount is zero 1392 * or page is locked. 1393 */ 1394 struct lruvec *lock_page_lruvec(struct page *page) 1395 { 1396 struct lruvec *lruvec; 1397 struct pglist_data *pgdat = page_pgdat(page); 1398 1399 rcu_read_lock(); 1400 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1401 spin_lock(&lruvec->lru_lock); 1402 rcu_read_unlock(); 1403 1404 lruvec_memcg_debug(lruvec, page); 1405 1406 return lruvec; 1407 } 1408 1409 struct lruvec *lock_page_lruvec_irq(struct page *page) 1410 { 1411 struct lruvec *lruvec; 1412 struct pglist_data *pgdat = page_pgdat(page); 1413 1414 rcu_read_lock(); 1415 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1416 spin_lock_irq(&lruvec->lru_lock); 1417 rcu_read_unlock(); 1418 1419 lruvec_memcg_debug(lruvec, page); 1420 1421 return lruvec; 1422 } 1423 1424 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) 1425 { 1426 struct lruvec *lruvec; 1427 struct pglist_data *pgdat = page_pgdat(page); 1428 1429 rcu_read_lock(); 1430 lruvec = mem_cgroup_page_lruvec(page, pgdat); 1431 spin_lock_irqsave(&lruvec->lru_lock, *flags); 1432 rcu_read_unlock(); 1433 1434 lruvec_memcg_debug(lruvec, page); 1435 1436 return lruvec; 1437 } 1438 1439 /** 1440 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1441 * @lruvec: mem_cgroup per zone lru vector 1442 * @lru: index of lru list the page is sitting on 1443 * @zid: zone id of the accounted pages 1444 * @nr_pages: positive when adding or negative when removing 1445 * 1446 * This function must be called under lru_lock, just before a page is added 1447 * to or just after a page is removed from an lru list (that ordering being 1448 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1449 */ 1450 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1451 int zid, int nr_pages) 1452 { 1453 struct mem_cgroup_per_node *mz; 1454 unsigned long *lru_size; 1455 long size; 1456 1457 if (mem_cgroup_disabled()) 1458 return; 1459 1460 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1461 lru_size = &mz->lru_zone_size[zid][lru]; 1462 1463 if (nr_pages < 0) 1464 *lru_size += nr_pages; 1465 1466 size = *lru_size; 1467 if (WARN_ONCE(size < 0, 1468 "%s(%p, %d, %d): lru_size %ld\n", 1469 __func__, lruvec, lru, nr_pages, size)) { 1470 VM_BUG_ON(1); 1471 *lru_size = 0; 1472 } 1473 1474 if (nr_pages > 0) 1475 *lru_size += nr_pages; 1476 } 1477 1478 /** 1479 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1480 * @memcg: the memory cgroup 1481 * 1482 * Returns the maximum amount of memory @mem can be charged with, in 1483 * pages. 1484 */ 1485 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1486 { 1487 unsigned long margin = 0; 1488 unsigned long count; 1489 unsigned long limit; 1490 1491 count = page_counter_read(&memcg->memory); 1492 limit = READ_ONCE(memcg->memory.max); 1493 if (count < limit) 1494 margin = limit - count; 1495 1496 if (do_memsw_account()) { 1497 count = page_counter_read(&memcg->memsw); 1498 limit = READ_ONCE(memcg->memsw.max); 1499 if (count < limit) 1500 margin = min(margin, limit - count); 1501 else 1502 margin = 0; 1503 } 1504 1505 return margin; 1506 } 1507 1508 /* 1509 * A routine for checking "mem" is under move_account() or not. 1510 * 1511 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1512 * moving cgroups. This is for waiting at high-memory pressure 1513 * caused by "move". 1514 */ 1515 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1516 { 1517 struct mem_cgroup *from; 1518 struct mem_cgroup *to; 1519 bool ret = false; 1520 /* 1521 * Unlike task_move routines, we access mc.to, mc.from not under 1522 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1523 */ 1524 spin_lock(&mc.lock); 1525 from = mc.from; 1526 to = mc.to; 1527 if (!from) 1528 goto unlock; 1529 1530 ret = mem_cgroup_is_descendant(from, memcg) || 1531 mem_cgroup_is_descendant(to, memcg); 1532 unlock: 1533 spin_unlock(&mc.lock); 1534 return ret; 1535 } 1536 1537 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1538 { 1539 if (mc.moving_task && current != mc.moving_task) { 1540 if (mem_cgroup_under_move(memcg)) { 1541 DEFINE_WAIT(wait); 1542 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1543 /* moving charge context might have finished. */ 1544 if (mc.moving_task) 1545 schedule(); 1546 finish_wait(&mc.waitq, &wait); 1547 return true; 1548 } 1549 } 1550 return false; 1551 } 1552 1553 struct memory_stat { 1554 const char *name; 1555 unsigned int ratio; 1556 unsigned int idx; 1557 }; 1558 1559 static struct memory_stat memory_stats[] = { 1560 { "anon", PAGE_SIZE, NR_ANON_MAPPED }, 1561 { "file", PAGE_SIZE, NR_FILE_PAGES }, 1562 { "kernel_stack", 1024, NR_KERNEL_STACK_KB }, 1563 { "pagetables", PAGE_SIZE, NR_PAGETABLE }, 1564 { "percpu", 1, MEMCG_PERCPU_B }, 1565 { "sock", PAGE_SIZE, MEMCG_SOCK }, 1566 { "shmem", PAGE_SIZE, NR_SHMEM }, 1567 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED }, 1568 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY }, 1569 { "file_writeback", PAGE_SIZE, NR_WRITEBACK }, 1570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1571 /* 1572 * The ratio will be initialized in memory_stats_init(). Because 1573 * on some architectures, the macro of HPAGE_PMD_SIZE is not 1574 * constant(e.g. powerpc). 1575 */ 1576 { "anon_thp", 0, NR_ANON_THPS }, 1577 { "file_thp", 0, NR_FILE_THPS }, 1578 { "shmem_thp", 0, NR_SHMEM_THPS }, 1579 #endif 1580 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, 1581 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, 1582 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE }, 1583 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE }, 1584 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE }, 1585 1586 /* 1587 * Note: The slab_reclaimable and slab_unreclaimable must be 1588 * together and slab_reclaimable must be in front. 1589 */ 1590 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B }, 1591 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B }, 1592 1593 /* The memory events */ 1594 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON }, 1595 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE }, 1596 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON }, 1597 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE }, 1598 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON }, 1599 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE }, 1600 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM }, 1601 }; 1602 1603 static int __init memory_stats_init(void) 1604 { 1605 int i; 1606 1607 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1608 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1609 if (memory_stats[i].idx == NR_ANON_THPS || 1610 memory_stats[i].idx == NR_FILE_THPS || 1611 memory_stats[i].idx == NR_SHMEM_THPS) 1612 memory_stats[i].ratio = HPAGE_PMD_SIZE; 1613 #endif 1614 VM_BUG_ON(!memory_stats[i].ratio); 1615 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT); 1616 } 1617 1618 return 0; 1619 } 1620 pure_initcall(memory_stats_init); 1621 1622 static char *memory_stat_format(struct mem_cgroup *memcg) 1623 { 1624 struct seq_buf s; 1625 int i; 1626 1627 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 1628 if (!s.buffer) 1629 return NULL; 1630 1631 /* 1632 * Provide statistics on the state of the memory subsystem as 1633 * well as cumulative event counters that show past behavior. 1634 * 1635 * This list is ordered following a combination of these gradients: 1636 * 1) generic big picture -> specifics and details 1637 * 2) reflecting userspace activity -> reflecting kernel heuristics 1638 * 1639 * Current memory state: 1640 */ 1641 1642 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 1643 u64 size; 1644 1645 size = memcg_page_state(memcg, memory_stats[i].idx); 1646 size *= memory_stats[i].ratio; 1647 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); 1648 1649 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { 1650 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + 1651 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); 1652 seq_buf_printf(&s, "slab %llu\n", size); 1653 } 1654 } 1655 1656 /* Accumulated memory events */ 1657 1658 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT), 1659 memcg_events(memcg, PGFAULT)); 1660 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), 1661 memcg_events(memcg, PGMAJFAULT)); 1662 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL), 1663 memcg_events(memcg, PGREFILL)); 1664 seq_buf_printf(&s, "pgscan %lu\n", 1665 memcg_events(memcg, PGSCAN_KSWAPD) + 1666 memcg_events(memcg, PGSCAN_DIRECT)); 1667 seq_buf_printf(&s, "pgsteal %lu\n", 1668 memcg_events(memcg, PGSTEAL_KSWAPD) + 1669 memcg_events(memcg, PGSTEAL_DIRECT)); 1670 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE), 1671 memcg_events(memcg, PGACTIVATE)); 1672 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE), 1673 memcg_events(memcg, PGDEACTIVATE)); 1674 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE), 1675 memcg_events(memcg, PGLAZYFREE)); 1676 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED), 1677 memcg_events(memcg, PGLAZYFREED)); 1678 1679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1680 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC), 1681 memcg_events(memcg, THP_FAULT_ALLOC)); 1682 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC), 1683 memcg_events(memcg, THP_COLLAPSE_ALLOC)); 1684 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1685 1686 /* The above should easily fit into one page */ 1687 WARN_ON_ONCE(seq_buf_has_overflowed(&s)); 1688 1689 return s.buffer; 1690 } 1691 1692 #define K(x) ((x) << (PAGE_SHIFT-10)) 1693 /** 1694 * mem_cgroup_print_oom_context: Print OOM information relevant to 1695 * memory controller. 1696 * @memcg: The memory cgroup that went over limit 1697 * @p: Task that is going to be killed 1698 * 1699 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1700 * enabled 1701 */ 1702 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1703 { 1704 rcu_read_lock(); 1705 1706 if (memcg) { 1707 pr_cont(",oom_memcg="); 1708 pr_cont_cgroup_path(memcg->css.cgroup); 1709 } else 1710 pr_cont(",global_oom"); 1711 if (p) { 1712 pr_cont(",task_memcg="); 1713 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1714 } 1715 rcu_read_unlock(); 1716 } 1717 1718 /** 1719 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to 1720 * memory controller. 1721 * @memcg: The memory cgroup that went over limit 1722 */ 1723 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1724 { 1725 char *buf; 1726 1727 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1728 K((u64)page_counter_read(&memcg->memory)), 1729 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); 1730 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1731 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", 1732 K((u64)page_counter_read(&memcg->swap)), 1733 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); 1734 else { 1735 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1736 K((u64)page_counter_read(&memcg->memsw)), 1737 K((u64)memcg->memsw.max), memcg->memsw.failcnt); 1738 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1739 K((u64)page_counter_read(&memcg->kmem)), 1740 K((u64)memcg->kmem.max), memcg->kmem.failcnt); 1741 } 1742 1743 pr_info("Memory cgroup stats for "); 1744 pr_cont_cgroup_path(memcg->css.cgroup); 1745 pr_cont(":"); 1746 buf = memory_stat_format(memcg); 1747 if (!buf) 1748 return; 1749 pr_info("%s", buf); 1750 kfree(buf); 1751 } 1752 1753 /* 1754 * Return the memory (and swap, if configured) limit for a memcg. 1755 */ 1756 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1757 { 1758 unsigned long max = READ_ONCE(memcg->memory.max); 1759 1760 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 1761 if (mem_cgroup_swappiness(memcg)) 1762 max += min(READ_ONCE(memcg->swap.max), 1763 (unsigned long)total_swap_pages); 1764 } else { /* v1 */ 1765 if (mem_cgroup_swappiness(memcg)) { 1766 /* Calculate swap excess capacity from memsw limit */ 1767 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; 1768 1769 max += min(swap, (unsigned long)total_swap_pages); 1770 } 1771 } 1772 return max; 1773 } 1774 1775 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1776 { 1777 return page_counter_read(&memcg->memory); 1778 } 1779 1780 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1781 int order) 1782 { 1783 struct oom_control oc = { 1784 .zonelist = NULL, 1785 .nodemask = NULL, 1786 .memcg = memcg, 1787 .gfp_mask = gfp_mask, 1788 .order = order, 1789 }; 1790 bool ret = true; 1791 1792 if (mutex_lock_killable(&oom_lock)) 1793 return true; 1794 1795 if (mem_cgroup_margin(memcg) >= (1 << order)) 1796 goto unlock; 1797 1798 /* 1799 * A few threads which were not waiting at mutex_lock_killable() can 1800 * fail to bail out. Therefore, check again after holding oom_lock. 1801 */ 1802 ret = should_force_charge() || out_of_memory(&oc); 1803 1804 unlock: 1805 mutex_unlock(&oom_lock); 1806 return ret; 1807 } 1808 1809 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1810 pg_data_t *pgdat, 1811 gfp_t gfp_mask, 1812 unsigned long *total_scanned) 1813 { 1814 struct mem_cgroup *victim = NULL; 1815 int total = 0; 1816 int loop = 0; 1817 unsigned long excess; 1818 unsigned long nr_scanned; 1819 struct mem_cgroup_reclaim_cookie reclaim = { 1820 .pgdat = pgdat, 1821 }; 1822 1823 excess = soft_limit_excess(root_memcg); 1824 1825 while (1) { 1826 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1827 if (!victim) { 1828 loop++; 1829 if (loop >= 2) { 1830 /* 1831 * If we have not been able to reclaim 1832 * anything, it might because there are 1833 * no reclaimable pages under this hierarchy 1834 */ 1835 if (!total) 1836 break; 1837 /* 1838 * We want to do more targeted reclaim. 1839 * excess >> 2 is not to excessive so as to 1840 * reclaim too much, nor too less that we keep 1841 * coming back to reclaim from this cgroup 1842 */ 1843 if (total >= (excess >> 2) || 1844 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1845 break; 1846 } 1847 continue; 1848 } 1849 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 1850 pgdat, &nr_scanned); 1851 *total_scanned += nr_scanned; 1852 if (!soft_limit_excess(root_memcg)) 1853 break; 1854 } 1855 mem_cgroup_iter_break(root_memcg, victim); 1856 return total; 1857 } 1858 1859 #ifdef CONFIG_LOCKDEP 1860 static struct lockdep_map memcg_oom_lock_dep_map = { 1861 .name = "memcg_oom_lock", 1862 }; 1863 #endif 1864 1865 static DEFINE_SPINLOCK(memcg_oom_lock); 1866 1867 /* 1868 * Check OOM-Killer is already running under our hierarchy. 1869 * If someone is running, return false. 1870 */ 1871 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1872 { 1873 struct mem_cgroup *iter, *failed = NULL; 1874 1875 spin_lock(&memcg_oom_lock); 1876 1877 for_each_mem_cgroup_tree(iter, memcg) { 1878 if (iter->oom_lock) { 1879 /* 1880 * this subtree of our hierarchy is already locked 1881 * so we cannot give a lock. 1882 */ 1883 failed = iter; 1884 mem_cgroup_iter_break(memcg, iter); 1885 break; 1886 } else 1887 iter->oom_lock = true; 1888 } 1889 1890 if (failed) { 1891 /* 1892 * OK, we failed to lock the whole subtree so we have 1893 * to clean up what we set up to the failing subtree 1894 */ 1895 for_each_mem_cgroup_tree(iter, memcg) { 1896 if (iter == failed) { 1897 mem_cgroup_iter_break(memcg, iter); 1898 break; 1899 } 1900 iter->oom_lock = false; 1901 } 1902 } else 1903 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1904 1905 spin_unlock(&memcg_oom_lock); 1906 1907 return !failed; 1908 } 1909 1910 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1911 { 1912 struct mem_cgroup *iter; 1913 1914 spin_lock(&memcg_oom_lock); 1915 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1916 for_each_mem_cgroup_tree(iter, memcg) 1917 iter->oom_lock = false; 1918 spin_unlock(&memcg_oom_lock); 1919 } 1920 1921 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1922 { 1923 struct mem_cgroup *iter; 1924 1925 spin_lock(&memcg_oom_lock); 1926 for_each_mem_cgroup_tree(iter, memcg) 1927 iter->under_oom++; 1928 spin_unlock(&memcg_oom_lock); 1929 } 1930 1931 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1932 { 1933 struct mem_cgroup *iter; 1934 1935 /* 1936 * Be careful about under_oom underflows becase a child memcg 1937 * could have been added after mem_cgroup_mark_under_oom. 1938 */ 1939 spin_lock(&memcg_oom_lock); 1940 for_each_mem_cgroup_tree(iter, memcg) 1941 if (iter->under_oom > 0) 1942 iter->under_oom--; 1943 spin_unlock(&memcg_oom_lock); 1944 } 1945 1946 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1947 1948 struct oom_wait_info { 1949 struct mem_cgroup *memcg; 1950 wait_queue_entry_t wait; 1951 }; 1952 1953 static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1954 unsigned mode, int sync, void *arg) 1955 { 1956 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1957 struct mem_cgroup *oom_wait_memcg; 1958 struct oom_wait_info *oom_wait_info; 1959 1960 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1961 oom_wait_memcg = oom_wait_info->memcg; 1962 1963 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1964 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1965 return 0; 1966 return autoremove_wake_function(wait, mode, sync, arg); 1967 } 1968 1969 static void memcg_oom_recover(struct mem_cgroup *memcg) 1970 { 1971 /* 1972 * For the following lockless ->under_oom test, the only required 1973 * guarantee is that it must see the state asserted by an OOM when 1974 * this function is called as a result of userland actions 1975 * triggered by the notification of the OOM. This is trivially 1976 * achieved by invoking mem_cgroup_mark_under_oom() before 1977 * triggering notification. 1978 */ 1979 if (memcg && memcg->under_oom) 1980 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1981 } 1982 1983 enum oom_status { 1984 OOM_SUCCESS, 1985 OOM_FAILED, 1986 OOM_ASYNC, 1987 OOM_SKIPPED 1988 }; 1989 1990 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1991 { 1992 enum oom_status ret; 1993 bool locked; 1994 1995 if (order > PAGE_ALLOC_COSTLY_ORDER) 1996 return OOM_SKIPPED; 1997 1998 memcg_memory_event(memcg, MEMCG_OOM); 1999 2000 /* 2001 * We are in the middle of the charge context here, so we 2002 * don't want to block when potentially sitting on a callstack 2003 * that holds all kinds of filesystem and mm locks. 2004 * 2005 * cgroup1 allows disabling the OOM killer and waiting for outside 2006 * handling until the charge can succeed; remember the context and put 2007 * the task to sleep at the end of the page fault when all locks are 2008 * released. 2009 * 2010 * On the other hand, in-kernel OOM killer allows for an async victim 2011 * memory reclaim (oom_reaper) and that means that we are not solely 2012 * relying on the oom victim to make a forward progress and we can 2013 * invoke the oom killer here. 2014 * 2015 * Please note that mem_cgroup_out_of_memory might fail to find a 2016 * victim and then we have to bail out from the charge path. 2017 */ 2018 if (memcg->oom_kill_disable) { 2019 if (!current->in_user_fault) 2020 return OOM_SKIPPED; 2021 css_get(&memcg->css); 2022 current->memcg_in_oom = memcg; 2023 current->memcg_oom_gfp_mask = mask; 2024 current->memcg_oom_order = order; 2025 2026 return OOM_ASYNC; 2027 } 2028 2029 mem_cgroup_mark_under_oom(memcg); 2030 2031 locked = mem_cgroup_oom_trylock(memcg); 2032 2033 if (locked) 2034 mem_cgroup_oom_notify(memcg); 2035 2036 mem_cgroup_unmark_under_oom(memcg); 2037 if (mem_cgroup_out_of_memory(memcg, mask, order)) 2038 ret = OOM_SUCCESS; 2039 else 2040 ret = OOM_FAILED; 2041 2042 if (locked) 2043 mem_cgroup_oom_unlock(memcg); 2044 2045 return ret; 2046 } 2047 2048 /** 2049 * mem_cgroup_oom_synchronize - complete memcg OOM handling 2050 * @handle: actually kill/wait or just clean up the OOM state 2051 * 2052 * This has to be called at the end of a page fault if the memcg OOM 2053 * handler was enabled. 2054 * 2055 * Memcg supports userspace OOM handling where failed allocations must 2056 * sleep on a waitqueue until the userspace task resolves the 2057 * situation. Sleeping directly in the charge context with all kinds 2058 * of locks held is not a good idea, instead we remember an OOM state 2059 * in the task and mem_cgroup_oom_synchronize() has to be called at 2060 * the end of the page fault to complete the OOM handling. 2061 * 2062 * Returns %true if an ongoing memcg OOM situation was detected and 2063 * completed, %false otherwise. 2064 */ 2065 bool mem_cgroup_oom_synchronize(bool handle) 2066 { 2067 struct mem_cgroup *memcg = current->memcg_in_oom; 2068 struct oom_wait_info owait; 2069 bool locked; 2070 2071 /* OOM is global, do not handle */ 2072 if (!memcg) 2073 return false; 2074 2075 if (!handle) 2076 goto cleanup; 2077 2078 owait.memcg = memcg; 2079 owait.wait.flags = 0; 2080 owait.wait.func = memcg_oom_wake_function; 2081 owait.wait.private = current; 2082 INIT_LIST_HEAD(&owait.wait.entry); 2083 2084 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2085 mem_cgroup_mark_under_oom(memcg); 2086 2087 locked = mem_cgroup_oom_trylock(memcg); 2088 2089 if (locked) 2090 mem_cgroup_oom_notify(memcg); 2091 2092 if (locked && !memcg->oom_kill_disable) { 2093 mem_cgroup_unmark_under_oom(memcg); 2094 finish_wait(&memcg_oom_waitq, &owait.wait); 2095 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 2096 current->memcg_oom_order); 2097 } else { 2098 schedule(); 2099 mem_cgroup_unmark_under_oom(memcg); 2100 finish_wait(&memcg_oom_waitq, &owait.wait); 2101 } 2102 2103 if (locked) { 2104 mem_cgroup_oom_unlock(memcg); 2105 /* 2106 * There is no guarantee that an OOM-lock contender 2107 * sees the wakeups triggered by the OOM kill 2108 * uncharges. Wake any sleepers explicitely. 2109 */ 2110 memcg_oom_recover(memcg); 2111 } 2112 cleanup: 2113 current->memcg_in_oom = NULL; 2114 css_put(&memcg->css); 2115 return true; 2116 } 2117 2118 /** 2119 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM 2120 * @victim: task to be killed by the OOM killer 2121 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM 2122 * 2123 * Returns a pointer to a memory cgroup, which has to be cleaned up 2124 * by killing all belonging OOM-killable tasks. 2125 * 2126 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. 2127 */ 2128 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 2129 struct mem_cgroup *oom_domain) 2130 { 2131 struct mem_cgroup *oom_group = NULL; 2132 struct mem_cgroup *memcg; 2133 2134 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 2135 return NULL; 2136 2137 if (!oom_domain) 2138 oom_domain = root_mem_cgroup; 2139 2140 rcu_read_lock(); 2141 2142 memcg = mem_cgroup_from_task(victim); 2143 if (memcg == root_mem_cgroup) 2144 goto out; 2145 2146 /* 2147 * If the victim task has been asynchronously moved to a different 2148 * memory cgroup, we might end up killing tasks outside oom_domain. 2149 * In this case it's better to ignore memory.group.oom. 2150 */ 2151 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) 2152 goto out; 2153 2154 /* 2155 * Traverse the memory cgroup hierarchy from the victim task's 2156 * cgroup up to the OOMing cgroup (or root) to find the 2157 * highest-level memory cgroup with oom.group set. 2158 */ 2159 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 2160 if (memcg->oom_group) 2161 oom_group = memcg; 2162 2163 if (memcg == oom_domain) 2164 break; 2165 } 2166 2167 if (oom_group) 2168 css_get(&oom_group->css); 2169 out: 2170 rcu_read_unlock(); 2171 2172 return oom_group; 2173 } 2174 2175 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 2176 { 2177 pr_info("Tasks in "); 2178 pr_cont_cgroup_path(memcg->css.cgroup); 2179 pr_cont(" are going to be killed due to memory.oom.group set\n"); 2180 } 2181 2182 /** 2183 * lock_page_memcg - lock a page and memcg binding 2184 * @page: the page 2185 * 2186 * This function protects unlocked LRU pages from being moved to 2187 * another cgroup. 2188 * 2189 * It ensures lifetime of the returned memcg. Caller is responsible 2190 * for the lifetime of the page; __unlock_page_memcg() is available 2191 * when @page might get freed inside the locked section. 2192 */ 2193 struct mem_cgroup *lock_page_memcg(struct page *page) 2194 { 2195 struct page *head = compound_head(page); /* rmap on tail pages */ 2196 struct mem_cgroup *memcg; 2197 unsigned long flags; 2198 2199 /* 2200 * The RCU lock is held throughout the transaction. The fast 2201 * path can get away without acquiring the memcg->move_lock 2202 * because page moving starts with an RCU grace period. 2203 * 2204 * The RCU lock also protects the memcg from being freed when 2205 * the page state that is going to change is the only thing 2206 * preventing the page itself from being freed. E.g. writeback 2207 * doesn't hold a page reference and relies on PG_writeback to 2208 * keep off truncation, migration and so forth. 2209 */ 2210 rcu_read_lock(); 2211 2212 if (mem_cgroup_disabled()) 2213 return NULL; 2214 again: 2215 memcg = page_memcg(head); 2216 if (unlikely(!memcg)) 2217 return NULL; 2218 2219 #ifdef CONFIG_PROVE_LOCKING 2220 local_irq_save(flags); 2221 might_lock(&memcg->move_lock); 2222 local_irq_restore(flags); 2223 #endif 2224 2225 if (atomic_read(&memcg->moving_account) <= 0) 2226 return memcg; 2227 2228 spin_lock_irqsave(&memcg->move_lock, flags); 2229 if (memcg != page_memcg(head)) { 2230 spin_unlock_irqrestore(&memcg->move_lock, flags); 2231 goto again; 2232 } 2233 2234 /* 2235 * When charge migration first begins, we can have locked and 2236 * unlocked page stat updates happening concurrently. Track 2237 * the task who has the lock for unlock_page_memcg(). 2238 */ 2239 memcg->move_lock_task = current; 2240 memcg->move_lock_flags = flags; 2241 2242 return memcg; 2243 } 2244 EXPORT_SYMBOL(lock_page_memcg); 2245 2246 /** 2247 * __unlock_page_memcg - unlock and unpin a memcg 2248 * @memcg: the memcg 2249 * 2250 * Unlock and unpin a memcg returned by lock_page_memcg(). 2251 */ 2252 void __unlock_page_memcg(struct mem_cgroup *memcg) 2253 { 2254 if (memcg && memcg->move_lock_task == current) { 2255 unsigned long flags = memcg->move_lock_flags; 2256 2257 memcg->move_lock_task = NULL; 2258 memcg->move_lock_flags = 0; 2259 2260 spin_unlock_irqrestore(&memcg->move_lock, flags); 2261 } 2262 2263 rcu_read_unlock(); 2264 } 2265 2266 /** 2267 * unlock_page_memcg - unlock a page and memcg binding 2268 * @page: the page 2269 */ 2270 void unlock_page_memcg(struct page *page) 2271 { 2272 struct page *head = compound_head(page); 2273 2274 __unlock_page_memcg(page_memcg(head)); 2275 } 2276 EXPORT_SYMBOL(unlock_page_memcg); 2277 2278 struct memcg_stock_pcp { 2279 struct mem_cgroup *cached; /* this never be root cgroup */ 2280 unsigned int nr_pages; 2281 2282 #ifdef CONFIG_MEMCG_KMEM 2283 struct obj_cgroup *cached_objcg; 2284 unsigned int nr_bytes; 2285 #endif 2286 2287 struct work_struct work; 2288 unsigned long flags; 2289 #define FLUSHING_CACHED_CHARGE 0 2290 }; 2291 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 2292 static DEFINE_MUTEX(percpu_charge_mutex); 2293 2294 #ifdef CONFIG_MEMCG_KMEM 2295 static void drain_obj_stock(struct memcg_stock_pcp *stock); 2296 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2297 struct mem_cgroup *root_memcg); 2298 2299 #else 2300 static inline void drain_obj_stock(struct memcg_stock_pcp *stock) 2301 { 2302 } 2303 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 2304 struct mem_cgroup *root_memcg) 2305 { 2306 return false; 2307 } 2308 #endif 2309 2310 /** 2311 * consume_stock: Try to consume stocked charge on this cpu. 2312 * @memcg: memcg to consume from. 2313 * @nr_pages: how many pages to charge. 2314 * 2315 * The charges will only happen if @memcg matches the current cpu's memcg 2316 * stock, and at least @nr_pages are available in that stock. Failure to 2317 * service an allocation will refill the stock. 2318 * 2319 * returns true if successful, false otherwise. 2320 */ 2321 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2322 { 2323 struct memcg_stock_pcp *stock; 2324 unsigned long flags; 2325 bool ret = false; 2326 2327 if (nr_pages > MEMCG_CHARGE_BATCH) 2328 return ret; 2329 2330 local_irq_save(flags); 2331 2332 stock = this_cpu_ptr(&memcg_stock); 2333 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 2334 stock->nr_pages -= nr_pages; 2335 ret = true; 2336 } 2337 2338 local_irq_restore(flags); 2339 2340 return ret; 2341 } 2342 2343 /* 2344 * Returns stocks cached in percpu and reset cached information. 2345 */ 2346 static void drain_stock(struct memcg_stock_pcp *stock) 2347 { 2348 struct mem_cgroup *old = stock->cached; 2349 2350 if (!old) 2351 return; 2352 2353 if (stock->nr_pages) { 2354 page_counter_uncharge(&old->memory, stock->nr_pages); 2355 if (do_memsw_account()) 2356 page_counter_uncharge(&old->memsw, stock->nr_pages); 2357 stock->nr_pages = 0; 2358 } 2359 2360 css_put(&old->css); 2361 stock->cached = NULL; 2362 } 2363 2364 static void drain_local_stock(struct work_struct *dummy) 2365 { 2366 struct memcg_stock_pcp *stock; 2367 unsigned long flags; 2368 2369 /* 2370 * The only protection from memory hotplug vs. drain_stock races is 2371 * that we always operate on local CPU stock here with IRQ disabled 2372 */ 2373 local_irq_save(flags); 2374 2375 stock = this_cpu_ptr(&memcg_stock); 2376 drain_obj_stock(stock); 2377 drain_stock(stock); 2378 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2379 2380 local_irq_restore(flags); 2381 } 2382 2383 /* 2384 * Cache charges(val) to local per_cpu area. 2385 * This will be consumed by consume_stock() function, later. 2386 */ 2387 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 2388 { 2389 struct memcg_stock_pcp *stock; 2390 unsigned long flags; 2391 2392 local_irq_save(flags); 2393 2394 stock = this_cpu_ptr(&memcg_stock); 2395 if (stock->cached != memcg) { /* reset if necessary */ 2396 drain_stock(stock); 2397 css_get(&memcg->css); 2398 stock->cached = memcg; 2399 } 2400 stock->nr_pages += nr_pages; 2401 2402 if (stock->nr_pages > MEMCG_CHARGE_BATCH) 2403 drain_stock(stock); 2404 2405 local_irq_restore(flags); 2406 } 2407 2408 /* 2409 * Drains all per-CPU charge caches for given root_memcg resp. subtree 2410 * of the hierarchy under it. 2411 */ 2412 static void drain_all_stock(struct mem_cgroup *root_memcg) 2413 { 2414 int cpu, curcpu; 2415 2416 /* If someone's already draining, avoid adding running more workers. */ 2417 if (!mutex_trylock(&percpu_charge_mutex)) 2418 return; 2419 /* 2420 * Notify other cpus that system-wide "drain" is running 2421 * We do not care about races with the cpu hotplug because cpu down 2422 * as well as workers from this path always operate on the local 2423 * per-cpu data. CPU up doesn't touch memcg_stock at all. 2424 */ 2425 curcpu = get_cpu(); 2426 for_each_online_cpu(cpu) { 2427 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2428 struct mem_cgroup *memcg; 2429 bool flush = false; 2430 2431 rcu_read_lock(); 2432 memcg = stock->cached; 2433 if (memcg && stock->nr_pages && 2434 mem_cgroup_is_descendant(memcg, root_memcg)) 2435 flush = true; 2436 if (obj_stock_flush_required(stock, root_memcg)) 2437 flush = true; 2438 rcu_read_unlock(); 2439 2440 if (flush && 2441 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 2442 if (cpu == curcpu) 2443 drain_local_stock(&stock->work); 2444 else 2445 schedule_work_on(cpu, &stock->work); 2446 } 2447 } 2448 put_cpu(); 2449 mutex_unlock(&percpu_charge_mutex); 2450 } 2451 2452 static int memcg_hotplug_cpu_dead(unsigned int cpu) 2453 { 2454 struct memcg_stock_pcp *stock; 2455 struct mem_cgroup *memcg, *mi; 2456 2457 stock = &per_cpu(memcg_stock, cpu); 2458 drain_stock(stock); 2459 2460 for_each_mem_cgroup(memcg) { 2461 int i; 2462 2463 for (i = 0; i < MEMCG_NR_STAT; i++) { 2464 int nid; 2465 long x; 2466 2467 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2468 if (x) 2469 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2470 atomic_long_add(x, &memcg->vmstats[i]); 2471 2472 if (i >= NR_VM_NODE_STAT_ITEMS) 2473 continue; 2474 2475 for_each_node(nid) { 2476 struct mem_cgroup_per_node *pn; 2477 2478 pn = mem_cgroup_nodeinfo(memcg, nid); 2479 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); 2480 if (x) 2481 do { 2482 atomic_long_add(x, &pn->lruvec_stat[i]); 2483 } while ((pn = parent_nodeinfo(pn, nid))); 2484 } 2485 } 2486 2487 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2488 long x; 2489 2490 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2491 if (x) 2492 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 2493 atomic_long_add(x, &memcg->vmevents[i]); 2494 } 2495 } 2496 2497 return 0; 2498 } 2499 2500 static unsigned long reclaim_high(struct mem_cgroup *memcg, 2501 unsigned int nr_pages, 2502 gfp_t gfp_mask) 2503 { 2504 unsigned long nr_reclaimed = 0; 2505 2506 do { 2507 unsigned long pflags; 2508 2509 if (page_counter_read(&memcg->memory) <= 2510 READ_ONCE(memcg->memory.high)) 2511 continue; 2512 2513 memcg_memory_event(memcg, MEMCG_HIGH); 2514 2515 psi_memstall_enter(&pflags); 2516 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, 2517 gfp_mask, true); 2518 psi_memstall_leave(&pflags); 2519 } while ((memcg = parent_mem_cgroup(memcg)) && 2520 !mem_cgroup_is_root(memcg)); 2521 2522 return nr_reclaimed; 2523 } 2524 2525 static void high_work_func(struct work_struct *work) 2526 { 2527 struct mem_cgroup *memcg; 2528 2529 memcg = container_of(work, struct mem_cgroup, high_work); 2530 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); 2531 } 2532 2533 /* 2534 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is 2535 * enough to still cause a significant slowdown in most cases, while still 2536 * allowing diagnostics and tracing to proceed without becoming stuck. 2537 */ 2538 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) 2539 2540 /* 2541 * When calculating the delay, we use these either side of the exponentiation to 2542 * maintain precision and scale to a reasonable number of jiffies (see the table 2543 * below. 2544 * 2545 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the 2546 * overage ratio to a delay. 2547 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the 2548 * proposed penalty in order to reduce to a reasonable number of jiffies, and 2549 * to produce a reasonable delay curve. 2550 * 2551 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a 2552 * reasonable delay curve compared to precision-adjusted overage, not 2553 * penalising heavily at first, but still making sure that growth beyond the 2554 * limit penalises misbehaviour cgroups by slowing them down exponentially. For 2555 * example, with a high of 100 megabytes: 2556 * 2557 * +-------+------------------------+ 2558 * | usage | time to allocate in ms | 2559 * +-------+------------------------+ 2560 * | 100M | 0 | 2561 * | 101M | 6 | 2562 * | 102M | 25 | 2563 * | 103M | 57 | 2564 * | 104M | 102 | 2565 * | 105M | 159 | 2566 * | 106M | 230 | 2567 * | 107M | 313 | 2568 * | 108M | 409 | 2569 * | 109M | 518 | 2570 * | 110M | 639 | 2571 * | 111M | 774 | 2572 * | 112M | 921 | 2573 * | 113M | 1081 | 2574 * | 114M | 1254 | 2575 * | 115M | 1439 | 2576 * | 116M | 1638 | 2577 * | 117M | 1849 | 2578 * | 118M | 2000 | 2579 * | 119M | 2000 | 2580 * | 120M | 2000 | 2581 * +-------+------------------------+ 2582 */ 2583 #define MEMCG_DELAY_PRECISION_SHIFT 20 2584 #define MEMCG_DELAY_SCALING_SHIFT 14 2585 2586 static u64 calculate_overage(unsigned long usage, unsigned long high) 2587 { 2588 u64 overage; 2589 2590 if (usage <= high) 2591 return 0; 2592 2593 /* 2594 * Prevent division by 0 in overage calculation by acting as if 2595 * it was a threshold of 1 page 2596 */ 2597 high = max(high, 1UL); 2598 2599 overage = usage - high; 2600 overage <<= MEMCG_DELAY_PRECISION_SHIFT; 2601 return div64_u64(overage, high); 2602 } 2603 2604 static u64 mem_find_max_overage(struct mem_cgroup *memcg) 2605 { 2606 u64 overage, max_overage = 0; 2607 2608 do { 2609 overage = calculate_overage(page_counter_read(&memcg->memory), 2610 READ_ONCE(memcg->memory.high)); 2611 max_overage = max(overage, max_overage); 2612 } while ((memcg = parent_mem_cgroup(memcg)) && 2613 !mem_cgroup_is_root(memcg)); 2614 2615 return max_overage; 2616 } 2617 2618 static u64 swap_find_max_overage(struct mem_cgroup *memcg) 2619 { 2620 u64 overage, max_overage = 0; 2621 2622 do { 2623 overage = calculate_overage(page_counter_read(&memcg->swap), 2624 READ_ONCE(memcg->swap.high)); 2625 if (overage) 2626 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); 2627 max_overage = max(overage, max_overage); 2628 } while ((memcg = parent_mem_cgroup(memcg)) && 2629 !mem_cgroup_is_root(memcg)); 2630 2631 return max_overage; 2632 } 2633 2634 /* 2635 * Get the number of jiffies that we should penalise a mischievous cgroup which 2636 * is exceeding its memory.high by checking both it and its ancestors. 2637 */ 2638 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, 2639 unsigned int nr_pages, 2640 u64 max_overage) 2641 { 2642 unsigned long penalty_jiffies; 2643 2644 if (!max_overage) 2645 return 0; 2646 2647 /* 2648 * We use overage compared to memory.high to calculate the number of 2649 * jiffies to sleep (penalty_jiffies). Ideally this value should be 2650 * fairly lenient on small overages, and increasingly harsh when the 2651 * memcg in question makes it clear that it has no intention of stopping 2652 * its crazy behaviour, so we exponentially increase the delay based on 2653 * overage amount. 2654 */ 2655 penalty_jiffies = max_overage * max_overage * HZ; 2656 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; 2657 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; 2658 2659 /* 2660 * Factor in the task's own contribution to the overage, such that four 2661 * N-sized allocations are throttled approximately the same as one 2662 * 4N-sized allocation. 2663 * 2664 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or 2665 * larger the current charge patch is than that. 2666 */ 2667 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; 2668 } 2669 2670 /* 2671 * Scheduled by try_charge() to be executed from the userland return path 2672 * and reclaims memory over the high limit. 2673 */ 2674 void mem_cgroup_handle_over_high(void) 2675 { 2676 unsigned long penalty_jiffies; 2677 unsigned long pflags; 2678 unsigned long nr_reclaimed; 2679 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2680 int nr_retries = MAX_RECLAIM_RETRIES; 2681 struct mem_cgroup *memcg; 2682 bool in_retry = false; 2683 2684 if (likely(!nr_pages)) 2685 return; 2686 2687 memcg = get_mem_cgroup_from_mm(current->mm); 2688 current->memcg_nr_pages_over_high = 0; 2689 2690 retry_reclaim: 2691 /* 2692 * The allocating task should reclaim at least the batch size, but for 2693 * subsequent retries we only want to do what's necessary to prevent oom 2694 * or breaching resource isolation. 2695 * 2696 * This is distinct from memory.max or page allocator behaviour because 2697 * memory.high is currently batched, whereas memory.max and the page 2698 * allocator run every time an allocation is made. 2699 */ 2700 nr_reclaimed = reclaim_high(memcg, 2701 in_retry ? SWAP_CLUSTER_MAX : nr_pages, 2702 GFP_KERNEL); 2703 2704 /* 2705 * memory.high is breached and reclaim is unable to keep up. Throttle 2706 * allocators proactively to slow down excessive growth. 2707 */ 2708 penalty_jiffies = calculate_high_delay(memcg, nr_pages, 2709 mem_find_max_overage(memcg)); 2710 2711 penalty_jiffies += calculate_high_delay(memcg, nr_pages, 2712 swap_find_max_overage(memcg)); 2713 2714 /* 2715 * Clamp the max delay per usermode return so as to still keep the 2716 * application moving forwards and also permit diagnostics, albeit 2717 * extremely slowly. 2718 */ 2719 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); 2720 2721 /* 2722 * Don't sleep if the amount of jiffies this memcg owes us is so low 2723 * that it's not even worth doing, in an attempt to be nice to those who 2724 * go only a small amount over their memory.high value and maybe haven't 2725 * been aggressively reclaimed enough yet. 2726 */ 2727 if (penalty_jiffies <= HZ / 100) 2728 goto out; 2729 2730 /* 2731 * If reclaim is making forward progress but we're still over 2732 * memory.high, we want to encourage that rather than doing allocator 2733 * throttling. 2734 */ 2735 if (nr_reclaimed || nr_retries--) { 2736 in_retry = true; 2737 goto retry_reclaim; 2738 } 2739 2740 /* 2741 * If we exit early, we're guaranteed to die (since 2742 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't 2743 * need to account for any ill-begotten jiffies to pay them off later. 2744 */ 2745 psi_memstall_enter(&pflags); 2746 schedule_timeout_killable(penalty_jiffies); 2747 psi_memstall_leave(&pflags); 2748 2749 out: 2750 css_put(&memcg->css); 2751 } 2752 2753 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2754 unsigned int nr_pages) 2755 { 2756 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); 2757 int nr_retries = MAX_RECLAIM_RETRIES; 2758 struct mem_cgroup *mem_over_limit; 2759 struct page_counter *counter; 2760 enum oom_status oom_status; 2761 unsigned long nr_reclaimed; 2762 bool may_swap = true; 2763 bool drained = false; 2764 unsigned long pflags; 2765 2766 if (mem_cgroup_is_root(memcg)) 2767 return 0; 2768 retry: 2769 if (consume_stock(memcg, nr_pages)) 2770 return 0; 2771 2772 if (!do_memsw_account() || 2773 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2774 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2775 goto done_restock; 2776 if (do_memsw_account()) 2777 page_counter_uncharge(&memcg->memsw, batch); 2778 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2779 } else { 2780 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2781 may_swap = false; 2782 } 2783 2784 if (batch > nr_pages) { 2785 batch = nr_pages; 2786 goto retry; 2787 } 2788 2789 /* 2790 * Memcg doesn't have a dedicated reserve for atomic 2791 * allocations. But like the global atomic pool, we need to 2792 * put the burden of reclaim on regular allocation requests 2793 * and let these go through as privileged allocations. 2794 */ 2795 if (gfp_mask & __GFP_ATOMIC) 2796 goto force; 2797 2798 /* 2799 * Unlike in global OOM situations, memcg is not in a physical 2800 * memory shortage. Allow dying and OOM-killed tasks to 2801 * bypass the last charges so that they can exit quickly and 2802 * free their memory. 2803 */ 2804 if (unlikely(should_force_charge())) 2805 goto force; 2806 2807 /* 2808 * Prevent unbounded recursion when reclaim operations need to 2809 * allocate memory. This might exceed the limits temporarily, 2810 * but we prefer facilitating memory reclaim and getting back 2811 * under the limit over triggering OOM kills in these cases. 2812 */ 2813 if (unlikely(current->flags & PF_MEMALLOC)) 2814 goto force; 2815 2816 if (unlikely(task_in_memcg_oom(current))) 2817 goto nomem; 2818 2819 if (!gfpflags_allow_blocking(gfp_mask)) 2820 goto nomem; 2821 2822 memcg_memory_event(mem_over_limit, MEMCG_MAX); 2823 2824 psi_memstall_enter(&pflags); 2825 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2826 gfp_mask, may_swap); 2827 psi_memstall_leave(&pflags); 2828 2829 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2830 goto retry; 2831 2832 if (!drained) { 2833 drain_all_stock(mem_over_limit); 2834 drained = true; 2835 goto retry; 2836 } 2837 2838 if (gfp_mask & __GFP_NORETRY) 2839 goto nomem; 2840 /* 2841 * Even though the limit is exceeded at this point, reclaim 2842 * may have been able to free some pages. Retry the charge 2843 * before killing the task. 2844 * 2845 * Only for regular pages, though: huge pages are rather 2846 * unlikely to succeed so close to the limit, and we fall back 2847 * to regular pages anyway in case of failure. 2848 */ 2849 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2850 goto retry; 2851 /* 2852 * At task move, charge accounts can be doubly counted. So, it's 2853 * better to wait until the end of task_move if something is going on. 2854 */ 2855 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2856 goto retry; 2857 2858 if (nr_retries--) 2859 goto retry; 2860 2861 if (gfp_mask & __GFP_RETRY_MAYFAIL) 2862 goto nomem; 2863 2864 if (gfp_mask & __GFP_NOFAIL) 2865 goto force; 2866 2867 if (fatal_signal_pending(current)) 2868 goto force; 2869 2870 /* 2871 * keep retrying as long as the memcg oom killer is able to make 2872 * a forward progress or bypass the charge if the oom killer 2873 * couldn't make any progress. 2874 */ 2875 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask, 2876 get_order(nr_pages * PAGE_SIZE)); 2877 switch (oom_status) { 2878 case OOM_SUCCESS: 2879 nr_retries = MAX_RECLAIM_RETRIES; 2880 goto retry; 2881 case OOM_FAILED: 2882 goto force; 2883 default: 2884 goto nomem; 2885 } 2886 nomem: 2887 if (!(gfp_mask & __GFP_NOFAIL)) 2888 return -ENOMEM; 2889 force: 2890 /* 2891 * The allocation either can't fail or will lead to more memory 2892 * being freed very soon. Allow memory usage go over the limit 2893 * temporarily by force charging it. 2894 */ 2895 page_counter_charge(&memcg->memory, nr_pages); 2896 if (do_memsw_account()) 2897 page_counter_charge(&memcg->memsw, nr_pages); 2898 2899 return 0; 2900 2901 done_restock: 2902 if (batch > nr_pages) 2903 refill_stock(memcg, batch - nr_pages); 2904 2905 /* 2906 * If the hierarchy is above the normal consumption range, schedule 2907 * reclaim on returning to userland. We can perform reclaim here 2908 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2909 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2910 * not recorded as it most likely matches current's and won't 2911 * change in the meantime. As high limit is checked again before 2912 * reclaim, the cost of mismatch is negligible. 2913 */ 2914 do { 2915 bool mem_high, swap_high; 2916 2917 mem_high = page_counter_read(&memcg->memory) > 2918 READ_ONCE(memcg->memory.high); 2919 swap_high = page_counter_read(&memcg->swap) > 2920 READ_ONCE(memcg->swap.high); 2921 2922 /* Don't bother a random interrupted task */ 2923 if (in_interrupt()) { 2924 if (mem_high) { 2925 schedule_work(&memcg->high_work); 2926 break; 2927 } 2928 continue; 2929 } 2930 2931 if (mem_high || swap_high) { 2932 /* 2933 * The allocating tasks in this cgroup will need to do 2934 * reclaim or be throttled to prevent further growth 2935 * of the memory or swap footprints. 2936 * 2937 * Target some best-effort fairness between the tasks, 2938 * and distribute reclaim work and delay penalties 2939 * based on how much each task is actually allocating. 2940 */ 2941 current->memcg_nr_pages_over_high += batch; 2942 set_notify_resume(current); 2943 break; 2944 } 2945 } while ((memcg = parent_mem_cgroup(memcg))); 2946 2947 return 0; 2948 } 2949 2950 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU) 2951 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2952 { 2953 if (mem_cgroup_is_root(memcg)) 2954 return; 2955 2956 page_counter_uncharge(&memcg->memory, nr_pages); 2957 if (do_memsw_account()) 2958 page_counter_uncharge(&memcg->memsw, nr_pages); 2959 } 2960 #endif 2961 2962 static void commit_charge(struct page *page, struct mem_cgroup *memcg) 2963 { 2964 VM_BUG_ON_PAGE(page_memcg(page), page); 2965 /* 2966 * Any of the following ensures page's memcg stability: 2967 * 2968 * - the page lock 2969 * - LRU isolation 2970 * - lock_page_memcg() 2971 * - exclusive reference 2972 */ 2973 page->memcg_data = (unsigned long)memcg; 2974 } 2975 2976 #ifdef CONFIG_MEMCG_KMEM 2977 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2978 gfp_t gfp) 2979 { 2980 unsigned int objects = objs_per_slab_page(s, page); 2981 void *vec; 2982 2983 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, 2984 page_to_nid(page)); 2985 if (!vec) 2986 return -ENOMEM; 2987 2988 if (!set_page_objcgs(page, vec)) 2989 kfree(vec); 2990 else 2991 kmemleak_not_leak(vec); 2992 2993 return 0; 2994 } 2995 2996 /* 2997 * Returns a pointer to the memory cgroup to which the kernel object is charged. 2998 * 2999 * A passed kernel object can be a slab object or a generic kernel page, so 3000 * different mechanisms for getting the memory cgroup pointer should be used. 3001 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller 3002 * can not know for sure how the kernel object is implemented. 3003 * mem_cgroup_from_obj() can be safely used in such cases. 3004 * 3005 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), 3006 * cgroup_mutex, etc. 3007 */ 3008 struct mem_cgroup *mem_cgroup_from_obj(void *p) 3009 { 3010 struct page *page; 3011 3012 if (mem_cgroup_disabled()) 3013 return NULL; 3014 3015 page = virt_to_head_page(p); 3016 3017 /* 3018 * Slab objects are accounted individually, not per-page. 3019 * Memcg membership data for each individual object is saved in 3020 * the page->obj_cgroups. 3021 */ 3022 if (page_objcgs_check(page)) { 3023 struct obj_cgroup *objcg; 3024 unsigned int off; 3025 3026 off = obj_to_index(page->slab_cache, page, p); 3027 objcg = page_objcgs(page)[off]; 3028 if (objcg) 3029 return obj_cgroup_memcg(objcg); 3030 3031 return NULL; 3032 } 3033 3034 /* 3035 * page_memcg_check() is used here, because page_has_obj_cgroups() 3036 * check above could fail because the object cgroups vector wasn't set 3037 * at that moment, but it can be set concurrently. 3038 * page_memcg_check(page) will guarantee that a proper memory 3039 * cgroup pointer or NULL will be returned. 3040 */ 3041 return page_memcg_check(page); 3042 } 3043 3044 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) 3045 { 3046 struct obj_cgroup *objcg = NULL; 3047 struct mem_cgroup *memcg; 3048 3049 if (memcg_kmem_bypass()) 3050 return NULL; 3051 3052 rcu_read_lock(); 3053 if (unlikely(active_memcg())) 3054 memcg = active_memcg(); 3055 else 3056 memcg = mem_cgroup_from_task(current); 3057 3058 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 3059 objcg = rcu_dereference(memcg->objcg); 3060 if (objcg && obj_cgroup_tryget(objcg)) 3061 break; 3062 objcg = NULL; 3063 } 3064 rcu_read_unlock(); 3065 3066 return objcg; 3067 } 3068 3069 static int memcg_alloc_cache_id(void) 3070 { 3071 int id, size; 3072 int err; 3073 3074 id = ida_simple_get(&memcg_cache_ida, 3075 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 3076 if (id < 0) 3077 return id; 3078 3079 if (id < memcg_nr_cache_ids) 3080 return id; 3081 3082 /* 3083 * There's no space for the new id in memcg_caches arrays, 3084 * so we have to grow them. 3085 */ 3086 down_write(&memcg_cache_ids_sem); 3087 3088 size = 2 * (id + 1); 3089 if (size < MEMCG_CACHES_MIN_SIZE) 3090 size = MEMCG_CACHES_MIN_SIZE; 3091 else if (size > MEMCG_CACHES_MAX_SIZE) 3092 size = MEMCG_CACHES_MAX_SIZE; 3093 3094 err = memcg_update_all_list_lrus(size); 3095 if (!err) 3096 memcg_nr_cache_ids = size; 3097 3098 up_write(&memcg_cache_ids_sem); 3099 3100 if (err) { 3101 ida_simple_remove(&memcg_cache_ida, id); 3102 return err; 3103 } 3104 return id; 3105 } 3106 3107 static void memcg_free_cache_id(int id) 3108 { 3109 ida_simple_remove(&memcg_cache_ida, id); 3110 } 3111 3112 /** 3113 * __memcg_kmem_charge: charge a number of kernel pages to a memcg 3114 * @memcg: memory cgroup to charge 3115 * @gfp: reclaim mode 3116 * @nr_pages: number of pages to charge 3117 * 3118 * Returns 0 on success, an error code on failure. 3119 */ 3120 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 3121 unsigned int nr_pages) 3122 { 3123 struct page_counter *counter; 3124 int ret; 3125 3126 ret = try_charge(memcg, gfp, nr_pages); 3127 if (ret) 3128 return ret; 3129 3130 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && 3131 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { 3132 3133 /* 3134 * Enforce __GFP_NOFAIL allocation because callers are not 3135 * prepared to see failures and likely do not have any failure 3136 * handling code. 3137 */ 3138 if (gfp & __GFP_NOFAIL) { 3139 page_counter_charge(&memcg->kmem, nr_pages); 3140 return 0; 3141 } 3142 cancel_charge(memcg, nr_pages); 3143 return -ENOMEM; 3144 } 3145 return 0; 3146 } 3147 3148 /** 3149 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg 3150 * @memcg: memcg to uncharge 3151 * @nr_pages: number of pages to uncharge 3152 */ 3153 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) 3154 { 3155 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 3156 page_counter_uncharge(&memcg->kmem, nr_pages); 3157 3158 page_counter_uncharge(&memcg->memory, nr_pages); 3159 if (do_memsw_account()) 3160 page_counter_uncharge(&memcg->memsw, nr_pages); 3161 } 3162 3163 /** 3164 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup 3165 * @page: page to charge 3166 * @gfp: reclaim mode 3167 * @order: allocation order 3168 * 3169 * Returns 0 on success, an error code on failure. 3170 */ 3171 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) 3172 { 3173 struct mem_cgroup *memcg; 3174 int ret = 0; 3175 3176 memcg = get_mem_cgroup_from_current(); 3177 if (memcg && !mem_cgroup_is_root(memcg)) { 3178 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); 3179 if (!ret) { 3180 page->memcg_data = (unsigned long)memcg | 3181 MEMCG_DATA_KMEM; 3182 return 0; 3183 } 3184 css_put(&memcg->css); 3185 } 3186 return ret; 3187 } 3188 3189 /** 3190 * __memcg_kmem_uncharge_page: uncharge a kmem page 3191 * @page: page to uncharge 3192 * @order: allocation order 3193 */ 3194 void __memcg_kmem_uncharge_page(struct page *page, int order) 3195 { 3196 struct mem_cgroup *memcg = page_memcg(page); 3197 unsigned int nr_pages = 1 << order; 3198 3199 if (!memcg) 3200 return; 3201 3202 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3203 __memcg_kmem_uncharge(memcg, nr_pages); 3204 page->memcg_data = 0; 3205 css_put(&memcg->css); 3206 } 3207 3208 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3209 { 3210 struct memcg_stock_pcp *stock; 3211 unsigned long flags; 3212 bool ret = false; 3213 3214 local_irq_save(flags); 3215 3216 stock = this_cpu_ptr(&memcg_stock); 3217 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { 3218 stock->nr_bytes -= nr_bytes; 3219 ret = true; 3220 } 3221 3222 local_irq_restore(flags); 3223 3224 return ret; 3225 } 3226 3227 static void drain_obj_stock(struct memcg_stock_pcp *stock) 3228 { 3229 struct obj_cgroup *old = stock->cached_objcg; 3230 3231 if (!old) 3232 return; 3233 3234 if (stock->nr_bytes) { 3235 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; 3236 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); 3237 3238 if (nr_pages) { 3239 rcu_read_lock(); 3240 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages); 3241 rcu_read_unlock(); 3242 } 3243 3244 /* 3245 * The leftover is flushed to the centralized per-memcg value. 3246 * On the next attempt to refill obj stock it will be moved 3247 * to a per-cpu stock (probably, on an other CPU), see 3248 * refill_obj_stock(). 3249 * 3250 * How often it's flushed is a trade-off between the memory 3251 * limit enforcement accuracy and potential CPU contention, 3252 * so it might be changed in the future. 3253 */ 3254 atomic_add(nr_bytes, &old->nr_charged_bytes); 3255 stock->nr_bytes = 0; 3256 } 3257 3258 obj_cgroup_put(old); 3259 stock->cached_objcg = NULL; 3260 } 3261 3262 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, 3263 struct mem_cgroup *root_memcg) 3264 { 3265 struct mem_cgroup *memcg; 3266 3267 if (stock->cached_objcg) { 3268 memcg = obj_cgroup_memcg(stock->cached_objcg); 3269 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) 3270 return true; 3271 } 3272 3273 return false; 3274 } 3275 3276 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) 3277 { 3278 struct memcg_stock_pcp *stock; 3279 unsigned long flags; 3280 3281 local_irq_save(flags); 3282 3283 stock = this_cpu_ptr(&memcg_stock); 3284 if (stock->cached_objcg != objcg) { /* reset if necessary */ 3285 drain_obj_stock(stock); 3286 obj_cgroup_get(objcg); 3287 stock->cached_objcg = objcg; 3288 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); 3289 } 3290 stock->nr_bytes += nr_bytes; 3291 3292 if (stock->nr_bytes > PAGE_SIZE) 3293 drain_obj_stock(stock); 3294 3295 local_irq_restore(flags); 3296 } 3297 3298 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) 3299 { 3300 struct mem_cgroup *memcg; 3301 unsigned int nr_pages, nr_bytes; 3302 int ret; 3303 3304 if (consume_obj_stock(objcg, size)) 3305 return 0; 3306 3307 /* 3308 * In theory, memcg->nr_charged_bytes can have enough 3309 * pre-charged bytes to satisfy the allocation. However, 3310 * flushing memcg->nr_charged_bytes requires two atomic 3311 * operations, and memcg->nr_charged_bytes can't be big, 3312 * so it's better to ignore it and try grab some new pages. 3313 * memcg->nr_charged_bytes will be flushed in 3314 * refill_obj_stock(), called from this function or 3315 * independently later. 3316 */ 3317 rcu_read_lock(); 3318 retry: 3319 memcg = obj_cgroup_memcg(objcg); 3320 if (unlikely(!css_tryget(&memcg->css))) 3321 goto retry; 3322 rcu_read_unlock(); 3323 3324 nr_pages = size >> PAGE_SHIFT; 3325 nr_bytes = size & (PAGE_SIZE - 1); 3326 3327 if (nr_bytes) 3328 nr_pages += 1; 3329 3330 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); 3331 if (!ret && nr_bytes) 3332 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); 3333 3334 css_put(&memcg->css); 3335 return ret; 3336 } 3337 3338 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) 3339 { 3340 refill_obj_stock(objcg, size); 3341 } 3342 3343 #endif /* CONFIG_MEMCG_KMEM */ 3344 3345 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3346 /* 3347 * Because page_memcg(head) is not set on compound tails, set it now. 3348 */ 3349 void mem_cgroup_split_huge_fixup(struct page *head) 3350 { 3351 struct mem_cgroup *memcg = page_memcg(head); 3352 int i; 3353 3354 if (mem_cgroup_disabled()) 3355 return; 3356 3357 for (i = 1; i < HPAGE_PMD_NR; i++) { 3358 css_get(&memcg->css); 3359 head[i].memcg_data = (unsigned long)memcg; 3360 } 3361 } 3362 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 3363 3364 #ifdef CONFIG_MEMCG_SWAP 3365 /** 3366 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 3367 * @entry: swap entry to be moved 3368 * @from: mem_cgroup which the entry is moved from 3369 * @to: mem_cgroup which the entry is moved to 3370 * 3371 * It succeeds only when the swap_cgroup's record for this entry is the same 3372 * as the mem_cgroup's id of @from. 3373 * 3374 * Returns 0 on success, -EINVAL on failure. 3375 * 3376 * The caller must have charged to @to, IOW, called page_counter_charge() about 3377 * both res and memsw, and called css_get(). 3378 */ 3379 static int mem_cgroup_move_swap_account(swp_entry_t entry, 3380 struct mem_cgroup *from, struct mem_cgroup *to) 3381 { 3382 unsigned short old_id, new_id; 3383 3384 old_id = mem_cgroup_id(from); 3385 new_id = mem_cgroup_id(to); 3386 3387 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3388 mod_memcg_state(from, MEMCG_SWAP, -1); 3389 mod_memcg_state(to, MEMCG_SWAP, 1); 3390 return 0; 3391 } 3392 return -EINVAL; 3393 } 3394 #else 3395 static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3396 struct mem_cgroup *from, struct mem_cgroup *to) 3397 { 3398 return -EINVAL; 3399 } 3400 #endif 3401 3402 static DEFINE_MUTEX(memcg_max_mutex); 3403 3404 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, 3405 unsigned long max, bool memsw) 3406 { 3407 bool enlarge = false; 3408 bool drained = false; 3409 int ret; 3410 bool limits_invariant; 3411 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; 3412 3413 do { 3414 if (signal_pending(current)) { 3415 ret = -EINTR; 3416 break; 3417 } 3418 3419 mutex_lock(&memcg_max_mutex); 3420 /* 3421 * Make sure that the new limit (memsw or memory limit) doesn't 3422 * break our basic invariant rule memory.max <= memsw.max. 3423 */ 3424 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : 3425 max <= memcg->memsw.max; 3426 if (!limits_invariant) { 3427 mutex_unlock(&memcg_max_mutex); 3428 ret = -EINVAL; 3429 break; 3430 } 3431 if (max > counter->max) 3432 enlarge = true; 3433 ret = page_counter_set_max(counter, max); 3434 mutex_unlock(&memcg_max_mutex); 3435 3436 if (!ret) 3437 break; 3438 3439 if (!drained) { 3440 drain_all_stock(memcg); 3441 drained = true; 3442 continue; 3443 } 3444 3445 if (!try_to_free_mem_cgroup_pages(memcg, 1, 3446 GFP_KERNEL, !memsw)) { 3447 ret = -EBUSY; 3448 break; 3449 } 3450 } while (true); 3451 3452 if (!ret && enlarge) 3453 memcg_oom_recover(memcg); 3454 3455 return ret; 3456 } 3457 3458 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 3459 gfp_t gfp_mask, 3460 unsigned long *total_scanned) 3461 { 3462 unsigned long nr_reclaimed = 0; 3463 struct mem_cgroup_per_node *mz, *next_mz = NULL; 3464 unsigned long reclaimed; 3465 int loop = 0; 3466 struct mem_cgroup_tree_per_node *mctz; 3467 unsigned long excess; 3468 unsigned long nr_scanned; 3469 3470 if (order > 0) 3471 return 0; 3472 3473 mctz = soft_limit_tree_node(pgdat->node_id); 3474 3475 /* 3476 * Do not even bother to check the largest node if the root 3477 * is empty. Do it lockless to prevent lock bouncing. Races 3478 * are acceptable as soft limit is best effort anyway. 3479 */ 3480 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) 3481 return 0; 3482 3483 /* 3484 * This loop can run a while, specially if mem_cgroup's continuously 3485 * keep exceeding their soft limit and putting the system under 3486 * pressure 3487 */ 3488 do { 3489 if (next_mz) 3490 mz = next_mz; 3491 else 3492 mz = mem_cgroup_largest_soft_limit_node(mctz); 3493 if (!mz) 3494 break; 3495 3496 nr_scanned = 0; 3497 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, 3498 gfp_mask, &nr_scanned); 3499 nr_reclaimed += reclaimed; 3500 *total_scanned += nr_scanned; 3501 spin_lock_irq(&mctz->lock); 3502 __mem_cgroup_remove_exceeded(mz, mctz); 3503 3504 /* 3505 * If we failed to reclaim anything from this memory cgroup 3506 * it is time to move on to the next cgroup 3507 */ 3508 next_mz = NULL; 3509 if (!reclaimed) 3510 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 3511 3512 excess = soft_limit_excess(mz->memcg); 3513 /* 3514 * One school of thought says that we should not add 3515 * back the node to the tree if reclaim returns 0. 3516 * But our reclaim could return 0, simply because due 3517 * to priority we are exposing a smaller subset of 3518 * memory to reclaim from. Consider this as a longer 3519 * term TODO. 3520 */ 3521 /* If excess == 0, no tree ops */ 3522 __mem_cgroup_insert_exceeded(mz, mctz, excess); 3523 spin_unlock_irq(&mctz->lock); 3524 css_put(&mz->memcg->css); 3525 loop++; 3526 /* 3527 * Could not reclaim anything and there are no more 3528 * mem cgroups to try or we seem to be looping without 3529 * reclaiming anything. 3530 */ 3531 if (!nr_reclaimed && 3532 (next_mz == NULL || 3533 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 3534 break; 3535 } while (!nr_reclaimed); 3536 if (next_mz) 3537 css_put(&next_mz->memcg->css); 3538 return nr_reclaimed; 3539 } 3540 3541 /* 3542 * Reclaims as many pages from the given memcg as possible. 3543 * 3544 * Caller is responsible for holding css reference for memcg. 3545 */ 3546 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 3547 { 3548 int nr_retries = MAX_RECLAIM_RETRIES; 3549 3550 /* we call try-to-free pages for make this cgroup empty */ 3551 lru_add_drain_all(); 3552 3553 drain_all_stock(memcg); 3554 3555 /* try to free all pages in this cgroup */ 3556 while (nr_retries && page_counter_read(&memcg->memory)) { 3557 int progress; 3558 3559 if (signal_pending(current)) 3560 return -EINTR; 3561 3562 progress = try_to_free_mem_cgroup_pages(memcg, 1, 3563 GFP_KERNEL, true); 3564 if (!progress) { 3565 nr_retries--; 3566 /* maybe some writeback is necessary */ 3567 congestion_wait(BLK_RW_ASYNC, HZ/10); 3568 } 3569 3570 } 3571 3572 return 0; 3573 } 3574 3575 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 3576 char *buf, size_t nbytes, 3577 loff_t off) 3578 { 3579 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3580 3581 if (mem_cgroup_is_root(memcg)) 3582 return -EINVAL; 3583 return mem_cgroup_force_empty(memcg) ?: nbytes; 3584 } 3585 3586 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 3587 struct cftype *cft) 3588 { 3589 return 1; 3590 } 3591 3592 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 3593 struct cftype *cft, u64 val) 3594 { 3595 if (val == 1) 3596 return 0; 3597 3598 pr_warn_once("Non-hierarchical mode is deprecated. " 3599 "Please report your usecase to linux-mm@kvack.org if you " 3600 "depend on this functionality.\n"); 3601 3602 return -EINVAL; 3603 } 3604 3605 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 3606 { 3607 unsigned long val; 3608 3609 if (mem_cgroup_is_root(memcg)) { 3610 val = memcg_page_state(memcg, NR_FILE_PAGES) + 3611 memcg_page_state(memcg, NR_ANON_MAPPED); 3612 if (swap) 3613 val += memcg_page_state(memcg, MEMCG_SWAP); 3614 } else { 3615 if (!swap) 3616 val = page_counter_read(&memcg->memory); 3617 else 3618 val = page_counter_read(&memcg->memsw); 3619 } 3620 return val; 3621 } 3622 3623 enum { 3624 RES_USAGE, 3625 RES_LIMIT, 3626 RES_MAX_USAGE, 3627 RES_FAILCNT, 3628 RES_SOFT_LIMIT, 3629 }; 3630 3631 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 3632 struct cftype *cft) 3633 { 3634 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3635 struct page_counter *counter; 3636 3637 switch (MEMFILE_TYPE(cft->private)) { 3638 case _MEM: 3639 counter = &memcg->memory; 3640 break; 3641 case _MEMSWAP: 3642 counter = &memcg->memsw; 3643 break; 3644 case _KMEM: 3645 counter = &memcg->kmem; 3646 break; 3647 case _TCP: 3648 counter = &memcg->tcpmem; 3649 break; 3650 default: 3651 BUG(); 3652 } 3653 3654 switch (MEMFILE_ATTR(cft->private)) { 3655 case RES_USAGE: 3656 if (counter == &memcg->memory) 3657 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 3658 if (counter == &memcg->memsw) 3659 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 3660 return (u64)page_counter_read(counter) * PAGE_SIZE; 3661 case RES_LIMIT: 3662 return (u64)counter->max * PAGE_SIZE; 3663 case RES_MAX_USAGE: 3664 return (u64)counter->watermark * PAGE_SIZE; 3665 case RES_FAILCNT: 3666 return counter->failcnt; 3667 case RES_SOFT_LIMIT: 3668 return (u64)memcg->soft_limit * PAGE_SIZE; 3669 default: 3670 BUG(); 3671 } 3672 } 3673 3674 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) 3675 { 3676 unsigned long stat[MEMCG_NR_STAT] = {0}; 3677 struct mem_cgroup *mi; 3678 int node, cpu, i; 3679 3680 for_each_online_cpu(cpu) 3681 for (i = 0; i < MEMCG_NR_STAT; i++) 3682 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); 3683 3684 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3685 for (i = 0; i < MEMCG_NR_STAT; i++) 3686 atomic_long_add(stat[i], &mi->vmstats[i]); 3687 3688 for_each_node(node) { 3689 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 3690 struct mem_cgroup_per_node *pi; 3691 3692 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3693 stat[i] = 0; 3694 3695 for_each_online_cpu(cpu) 3696 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3697 stat[i] += per_cpu( 3698 pn->lruvec_stat_cpu->count[i], cpu); 3699 3700 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) 3701 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 3702 atomic_long_add(stat[i], &pi->lruvec_stat[i]); 3703 } 3704 } 3705 3706 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) 3707 { 3708 unsigned long events[NR_VM_EVENT_ITEMS]; 3709 struct mem_cgroup *mi; 3710 int cpu, i; 3711 3712 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3713 events[i] = 0; 3714 3715 for_each_online_cpu(cpu) 3716 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3717 events[i] += per_cpu(memcg->vmstats_percpu->events[i], 3718 cpu); 3719 3720 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 3721 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 3722 atomic_long_add(events[i], &mi->vmevents[i]); 3723 } 3724 3725 #ifdef CONFIG_MEMCG_KMEM 3726 static int memcg_online_kmem(struct mem_cgroup *memcg) 3727 { 3728 struct obj_cgroup *objcg; 3729 int memcg_id; 3730 3731 if (cgroup_memory_nokmem) 3732 return 0; 3733 3734 BUG_ON(memcg->kmemcg_id >= 0); 3735 BUG_ON(memcg->kmem_state); 3736 3737 memcg_id = memcg_alloc_cache_id(); 3738 if (memcg_id < 0) 3739 return memcg_id; 3740 3741 objcg = obj_cgroup_alloc(); 3742 if (!objcg) { 3743 memcg_free_cache_id(memcg_id); 3744 return -ENOMEM; 3745 } 3746 objcg->memcg = memcg; 3747 rcu_assign_pointer(memcg->objcg, objcg); 3748 3749 static_branch_enable(&memcg_kmem_enabled_key); 3750 3751 memcg->kmemcg_id = memcg_id; 3752 memcg->kmem_state = KMEM_ONLINE; 3753 3754 return 0; 3755 } 3756 3757 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3758 { 3759 struct cgroup_subsys_state *css; 3760 struct mem_cgroup *parent, *child; 3761 int kmemcg_id; 3762 3763 if (memcg->kmem_state != KMEM_ONLINE) 3764 return; 3765 3766 memcg->kmem_state = KMEM_ALLOCATED; 3767 3768 parent = parent_mem_cgroup(memcg); 3769 if (!parent) 3770 parent = root_mem_cgroup; 3771 3772 memcg_reparent_objcgs(memcg, parent); 3773 3774 kmemcg_id = memcg->kmemcg_id; 3775 BUG_ON(kmemcg_id < 0); 3776 3777 /* 3778 * Change kmemcg_id of this cgroup and all its descendants to the 3779 * parent's id, and then move all entries from this cgroup's list_lrus 3780 * to ones of the parent. After we have finished, all list_lrus 3781 * corresponding to this cgroup are guaranteed to remain empty. The 3782 * ordering is imposed by list_lru_node->lock taken by 3783 * memcg_drain_all_list_lrus(). 3784 */ 3785 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3786 css_for_each_descendant_pre(css, &memcg->css) { 3787 child = mem_cgroup_from_css(css); 3788 BUG_ON(child->kmemcg_id != kmemcg_id); 3789 child->kmemcg_id = parent->kmemcg_id; 3790 } 3791 rcu_read_unlock(); 3792 3793 memcg_drain_all_list_lrus(kmemcg_id, parent); 3794 3795 memcg_free_cache_id(kmemcg_id); 3796 } 3797 3798 static void memcg_free_kmem(struct mem_cgroup *memcg) 3799 { 3800 /* css_alloc() failed, offlining didn't happen */ 3801 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) 3802 memcg_offline_kmem(memcg); 3803 } 3804 #else 3805 static int memcg_online_kmem(struct mem_cgroup *memcg) 3806 { 3807 return 0; 3808 } 3809 static void memcg_offline_kmem(struct mem_cgroup *memcg) 3810 { 3811 } 3812 static void memcg_free_kmem(struct mem_cgroup *memcg) 3813 { 3814 } 3815 #endif /* CONFIG_MEMCG_KMEM */ 3816 3817 static int memcg_update_kmem_max(struct mem_cgroup *memcg, 3818 unsigned long max) 3819 { 3820 int ret; 3821 3822 mutex_lock(&memcg_max_mutex); 3823 ret = page_counter_set_max(&memcg->kmem, max); 3824 mutex_unlock(&memcg_max_mutex); 3825 return ret; 3826 } 3827 3828 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) 3829 { 3830 int ret; 3831 3832 mutex_lock(&memcg_max_mutex); 3833 3834 ret = page_counter_set_max(&memcg->tcpmem, max); 3835 if (ret) 3836 goto out; 3837 3838 if (!memcg->tcpmem_active) { 3839 /* 3840 * The active flag needs to be written after the static_key 3841 * update. This is what guarantees that the socket activation 3842 * function is the last one to run. See mem_cgroup_sk_alloc() 3843 * for details, and note that we don't mark any socket as 3844 * belonging to this memcg until that flag is up. 3845 * 3846 * We need to do this, because static_keys will span multiple 3847 * sites, but we can't control their order. If we mark a socket 3848 * as accounted, but the accounting functions are not patched in 3849 * yet, we'll lose accounting. 3850 * 3851 * We never race with the readers in mem_cgroup_sk_alloc(), 3852 * because when this value change, the code to process it is not 3853 * patched in yet. 3854 */ 3855 static_branch_inc(&memcg_sockets_enabled_key); 3856 memcg->tcpmem_active = true; 3857 } 3858 out: 3859 mutex_unlock(&memcg_max_mutex); 3860 return ret; 3861 } 3862 3863 /* 3864 * The user of this function is... 3865 * RES_LIMIT. 3866 */ 3867 static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 3868 char *buf, size_t nbytes, loff_t off) 3869 { 3870 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3871 unsigned long nr_pages; 3872 int ret; 3873 3874 buf = strstrip(buf); 3875 ret = page_counter_memparse(buf, "-1", &nr_pages); 3876 if (ret) 3877 return ret; 3878 3879 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3880 case RES_LIMIT: 3881 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3882 ret = -EINVAL; 3883 break; 3884 } 3885 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3886 case _MEM: 3887 ret = mem_cgroup_resize_max(memcg, nr_pages, false); 3888 break; 3889 case _MEMSWAP: 3890 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3891 break; 3892 case _KMEM: 3893 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3894 "Please report your usecase to linux-mm@kvack.org if you " 3895 "depend on this functionality.\n"); 3896 ret = memcg_update_kmem_max(memcg, nr_pages); 3897 break; 3898 case _TCP: 3899 ret = memcg_update_tcp_max(memcg, nr_pages); 3900 break; 3901 } 3902 break; 3903 case RES_SOFT_LIMIT: 3904 memcg->soft_limit = nr_pages; 3905 ret = 0; 3906 break; 3907 } 3908 return ret ?: nbytes; 3909 } 3910 3911 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3912 size_t nbytes, loff_t off) 3913 { 3914 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3915 struct page_counter *counter; 3916 3917 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3918 case _MEM: 3919 counter = &memcg->memory; 3920 break; 3921 case _MEMSWAP: 3922 counter = &memcg->memsw; 3923 break; 3924 case _KMEM: 3925 counter = &memcg->kmem; 3926 break; 3927 case _TCP: 3928 counter = &memcg->tcpmem; 3929 break; 3930 default: 3931 BUG(); 3932 } 3933 3934 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3935 case RES_MAX_USAGE: 3936 page_counter_reset_watermark(counter); 3937 break; 3938 case RES_FAILCNT: 3939 counter->failcnt = 0; 3940 break; 3941 default: 3942 BUG(); 3943 } 3944 3945 return nbytes; 3946 } 3947 3948 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3949 struct cftype *cft) 3950 { 3951 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3952 } 3953 3954 #ifdef CONFIG_MMU 3955 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3956 struct cftype *cft, u64 val) 3957 { 3958 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3959 3960 if (val & ~MOVE_MASK) 3961 return -EINVAL; 3962 3963 /* 3964 * No kind of locking is needed in here, because ->can_attach() will 3965 * check this value once in the beginning of the process, and then carry 3966 * on with stale data. This means that changes to this value will only 3967 * affect task migrations starting after the change. 3968 */ 3969 memcg->move_charge_at_immigrate = val; 3970 return 0; 3971 } 3972 #else 3973 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3974 struct cftype *cft, u64 val) 3975 { 3976 return -ENOSYS; 3977 } 3978 #endif 3979 3980 #ifdef CONFIG_NUMA 3981 3982 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 3983 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 3984 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 3985 3986 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 3987 int nid, unsigned int lru_mask, bool tree) 3988 { 3989 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 3990 unsigned long nr = 0; 3991 enum lru_list lru; 3992 3993 VM_BUG_ON((unsigned)nid >= nr_node_ids); 3994 3995 for_each_lru(lru) { 3996 if (!(BIT(lru) & lru_mask)) 3997 continue; 3998 if (tree) 3999 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); 4000 else 4001 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); 4002 } 4003 return nr; 4004 } 4005 4006 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 4007 unsigned int lru_mask, 4008 bool tree) 4009 { 4010 unsigned long nr = 0; 4011 enum lru_list lru; 4012 4013 for_each_lru(lru) { 4014 if (!(BIT(lru) & lru_mask)) 4015 continue; 4016 if (tree) 4017 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); 4018 else 4019 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); 4020 } 4021 return nr; 4022 } 4023 4024 static int memcg_numa_stat_show(struct seq_file *m, void *v) 4025 { 4026 struct numa_stat { 4027 const char *name; 4028 unsigned int lru_mask; 4029 }; 4030 4031 static const struct numa_stat stats[] = { 4032 { "total", LRU_ALL }, 4033 { "file", LRU_ALL_FILE }, 4034 { "anon", LRU_ALL_ANON }, 4035 { "unevictable", BIT(LRU_UNEVICTABLE) }, 4036 }; 4037 const struct numa_stat *stat; 4038 int nid; 4039 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4040 4041 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4042 seq_printf(m, "%s=%lu", stat->name, 4043 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4044 false)); 4045 for_each_node_state(nid, N_MEMORY) 4046 seq_printf(m, " N%d=%lu", nid, 4047 mem_cgroup_node_nr_lru_pages(memcg, nid, 4048 stat->lru_mask, false)); 4049 seq_putc(m, '\n'); 4050 } 4051 4052 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 4053 4054 seq_printf(m, "hierarchical_%s=%lu", stat->name, 4055 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, 4056 true)); 4057 for_each_node_state(nid, N_MEMORY) 4058 seq_printf(m, " N%d=%lu", nid, 4059 mem_cgroup_node_nr_lru_pages(memcg, nid, 4060 stat->lru_mask, true)); 4061 seq_putc(m, '\n'); 4062 } 4063 4064 return 0; 4065 } 4066 #endif /* CONFIG_NUMA */ 4067 4068 static const unsigned int memcg1_stats[] = { 4069 NR_FILE_PAGES, 4070 NR_ANON_MAPPED, 4071 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4072 NR_ANON_THPS, 4073 #endif 4074 NR_SHMEM, 4075 NR_FILE_MAPPED, 4076 NR_FILE_DIRTY, 4077 NR_WRITEBACK, 4078 MEMCG_SWAP, 4079 }; 4080 4081 static const char *const memcg1_stat_names[] = { 4082 "cache", 4083 "rss", 4084 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4085 "rss_huge", 4086 #endif 4087 "shmem", 4088 "mapped_file", 4089 "dirty", 4090 "writeback", 4091 "swap", 4092 }; 4093 4094 /* Universal VM events cgroup1 shows, original sort order */ 4095 static const unsigned int memcg1_events[] = { 4096 PGPGIN, 4097 PGPGOUT, 4098 PGFAULT, 4099 PGMAJFAULT, 4100 }; 4101 4102 static int memcg_stat_show(struct seq_file *m, void *v) 4103 { 4104 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 4105 unsigned long memory, memsw; 4106 struct mem_cgroup *mi; 4107 unsigned int i; 4108 4109 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 4110 4111 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4112 unsigned long nr; 4113 4114 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4115 continue; 4116 nr = memcg_page_state_local(memcg, memcg1_stats[i]); 4117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4118 if (memcg1_stats[i] == NR_ANON_THPS) 4119 nr *= HPAGE_PMD_NR; 4120 #endif 4121 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); 4122 } 4123 4124 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4125 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4126 memcg_events_local(memcg, memcg1_events[i])); 4127 4128 for (i = 0; i < NR_LRU_LISTS; i++) 4129 seq_printf(m, "%s %lu\n", lru_list_name(i), 4130 memcg_page_state_local(memcg, NR_LRU_BASE + i) * 4131 PAGE_SIZE); 4132 4133 /* Hierarchical information */ 4134 memory = memsw = PAGE_COUNTER_MAX; 4135 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 4136 memory = min(memory, READ_ONCE(mi->memory.max)); 4137 memsw = min(memsw, READ_ONCE(mi->memsw.max)); 4138 } 4139 seq_printf(m, "hierarchical_memory_limit %llu\n", 4140 (u64)memory * PAGE_SIZE); 4141 if (do_memsw_account()) 4142 seq_printf(m, "hierarchical_memsw_limit %llu\n", 4143 (u64)memsw * PAGE_SIZE); 4144 4145 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4146 unsigned long nr; 4147 4148 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4149 continue; 4150 nr = memcg_page_state(memcg, memcg1_stats[i]); 4151 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4152 if (memcg1_stats[i] == NR_ANON_THPS) 4153 nr *= HPAGE_PMD_NR; 4154 #endif 4155 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 4156 (u64)nr * PAGE_SIZE); 4157 } 4158 4159 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4160 seq_printf(m, "total_%s %llu\n", 4161 vm_event_name(memcg1_events[i]), 4162 (u64)memcg_events(memcg, memcg1_events[i])); 4163 4164 for (i = 0; i < NR_LRU_LISTS; i++) 4165 seq_printf(m, "total_%s %llu\n", lru_list_name(i), 4166 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * 4167 PAGE_SIZE); 4168 4169 #ifdef CONFIG_DEBUG_VM 4170 { 4171 pg_data_t *pgdat; 4172 struct mem_cgroup_per_node *mz; 4173 unsigned long anon_cost = 0; 4174 unsigned long file_cost = 0; 4175 4176 for_each_online_pgdat(pgdat) { 4177 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 4178 4179 anon_cost += mz->lruvec.anon_cost; 4180 file_cost += mz->lruvec.file_cost; 4181 } 4182 seq_printf(m, "anon_cost %lu\n", anon_cost); 4183 seq_printf(m, "file_cost %lu\n", file_cost); 4184 } 4185 #endif 4186 4187 return 0; 4188 } 4189 4190 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 4191 struct cftype *cft) 4192 { 4193 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4194 4195 return mem_cgroup_swappiness(memcg); 4196 } 4197 4198 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 4199 struct cftype *cft, u64 val) 4200 { 4201 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4202 4203 if (val > 100) 4204 return -EINVAL; 4205 4206 if (css->parent) 4207 memcg->swappiness = val; 4208 else 4209 vm_swappiness = val; 4210 4211 return 0; 4212 } 4213 4214 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 4215 { 4216 struct mem_cgroup_threshold_ary *t; 4217 unsigned long usage; 4218 int i; 4219 4220 rcu_read_lock(); 4221 if (!swap) 4222 t = rcu_dereference(memcg->thresholds.primary); 4223 else 4224 t = rcu_dereference(memcg->memsw_thresholds.primary); 4225 4226 if (!t) 4227 goto unlock; 4228 4229 usage = mem_cgroup_usage(memcg, swap); 4230 4231 /* 4232 * current_threshold points to threshold just below or equal to usage. 4233 * If it's not true, a threshold was crossed after last 4234 * call of __mem_cgroup_threshold(). 4235 */ 4236 i = t->current_threshold; 4237 4238 /* 4239 * Iterate backward over array of thresholds starting from 4240 * current_threshold and check if a threshold is crossed. 4241 * If none of thresholds below usage is crossed, we read 4242 * only one element of the array here. 4243 */ 4244 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 4245 eventfd_signal(t->entries[i].eventfd, 1); 4246 4247 /* i = current_threshold + 1 */ 4248 i++; 4249 4250 /* 4251 * Iterate forward over array of thresholds starting from 4252 * current_threshold+1 and check if a threshold is crossed. 4253 * If none of thresholds above usage is crossed, we read 4254 * only one element of the array here. 4255 */ 4256 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 4257 eventfd_signal(t->entries[i].eventfd, 1); 4258 4259 /* Update current_threshold */ 4260 t->current_threshold = i - 1; 4261 unlock: 4262 rcu_read_unlock(); 4263 } 4264 4265 static void mem_cgroup_threshold(struct mem_cgroup *memcg) 4266 { 4267 while (memcg) { 4268 __mem_cgroup_threshold(memcg, false); 4269 if (do_memsw_account()) 4270 __mem_cgroup_threshold(memcg, true); 4271 4272 memcg = parent_mem_cgroup(memcg); 4273 } 4274 } 4275 4276 static int compare_thresholds(const void *a, const void *b) 4277 { 4278 const struct mem_cgroup_threshold *_a = a; 4279 const struct mem_cgroup_threshold *_b = b; 4280 4281 if (_a->threshold > _b->threshold) 4282 return 1; 4283 4284 if (_a->threshold < _b->threshold) 4285 return -1; 4286 4287 return 0; 4288 } 4289 4290 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 4291 { 4292 struct mem_cgroup_eventfd_list *ev; 4293 4294 spin_lock(&memcg_oom_lock); 4295 4296 list_for_each_entry(ev, &memcg->oom_notify, list) 4297 eventfd_signal(ev->eventfd, 1); 4298 4299 spin_unlock(&memcg_oom_lock); 4300 return 0; 4301 } 4302 4303 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 4304 { 4305 struct mem_cgroup *iter; 4306 4307 for_each_mem_cgroup_tree(iter, memcg) 4308 mem_cgroup_oom_notify_cb(iter); 4309 } 4310 4311 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4312 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 4313 { 4314 struct mem_cgroup_thresholds *thresholds; 4315 struct mem_cgroup_threshold_ary *new; 4316 unsigned long threshold; 4317 unsigned long usage; 4318 int i, size, ret; 4319 4320 ret = page_counter_memparse(args, "-1", &threshold); 4321 if (ret) 4322 return ret; 4323 4324 mutex_lock(&memcg->thresholds_lock); 4325 4326 if (type == _MEM) { 4327 thresholds = &memcg->thresholds; 4328 usage = mem_cgroup_usage(memcg, false); 4329 } else if (type == _MEMSWAP) { 4330 thresholds = &memcg->memsw_thresholds; 4331 usage = mem_cgroup_usage(memcg, true); 4332 } else 4333 BUG(); 4334 4335 /* Check if a threshold crossed before adding a new one */ 4336 if (thresholds->primary) 4337 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4338 4339 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 4340 4341 /* Allocate memory for new array of thresholds */ 4342 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); 4343 if (!new) { 4344 ret = -ENOMEM; 4345 goto unlock; 4346 } 4347 new->size = size; 4348 4349 /* Copy thresholds (if any) to new array */ 4350 if (thresholds->primary) 4351 memcpy(new->entries, thresholds->primary->entries, 4352 flex_array_size(new, entries, size - 1)); 4353 4354 /* Add new threshold */ 4355 new->entries[size - 1].eventfd = eventfd; 4356 new->entries[size - 1].threshold = threshold; 4357 4358 /* Sort thresholds. Registering of new threshold isn't time-critical */ 4359 sort(new->entries, size, sizeof(*new->entries), 4360 compare_thresholds, NULL); 4361 4362 /* Find current threshold */ 4363 new->current_threshold = -1; 4364 for (i = 0; i < size; i++) { 4365 if (new->entries[i].threshold <= usage) { 4366 /* 4367 * new->current_threshold will not be used until 4368 * rcu_assign_pointer(), so it's safe to increment 4369 * it here. 4370 */ 4371 ++new->current_threshold; 4372 } else 4373 break; 4374 } 4375 4376 /* Free old spare buffer and save old primary buffer as spare */ 4377 kfree(thresholds->spare); 4378 thresholds->spare = thresholds->primary; 4379 4380 rcu_assign_pointer(thresholds->primary, new); 4381 4382 /* To be sure that nobody uses thresholds */ 4383 synchronize_rcu(); 4384 4385 unlock: 4386 mutex_unlock(&memcg->thresholds_lock); 4387 4388 return ret; 4389 } 4390 4391 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 4392 struct eventfd_ctx *eventfd, const char *args) 4393 { 4394 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 4395 } 4396 4397 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 4398 struct eventfd_ctx *eventfd, const char *args) 4399 { 4400 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 4401 } 4402 4403 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4404 struct eventfd_ctx *eventfd, enum res_type type) 4405 { 4406 struct mem_cgroup_thresholds *thresholds; 4407 struct mem_cgroup_threshold_ary *new; 4408 unsigned long usage; 4409 int i, j, size, entries; 4410 4411 mutex_lock(&memcg->thresholds_lock); 4412 4413 if (type == _MEM) { 4414 thresholds = &memcg->thresholds; 4415 usage = mem_cgroup_usage(memcg, false); 4416 } else if (type == _MEMSWAP) { 4417 thresholds = &memcg->memsw_thresholds; 4418 usage = mem_cgroup_usage(memcg, true); 4419 } else 4420 BUG(); 4421 4422 if (!thresholds->primary) 4423 goto unlock; 4424 4425 /* Check if a threshold crossed before removing */ 4426 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 4427 4428 /* Calculate new number of threshold */ 4429 size = entries = 0; 4430 for (i = 0; i < thresholds->primary->size; i++) { 4431 if (thresholds->primary->entries[i].eventfd != eventfd) 4432 size++; 4433 else 4434 entries++; 4435 } 4436 4437 new = thresholds->spare; 4438 4439 /* If no items related to eventfd have been cleared, nothing to do */ 4440 if (!entries) 4441 goto unlock; 4442 4443 /* Set thresholds array to NULL if we don't have thresholds */ 4444 if (!size) { 4445 kfree(new); 4446 new = NULL; 4447 goto swap_buffers; 4448 } 4449 4450 new->size = size; 4451 4452 /* Copy thresholds and find current threshold */ 4453 new->current_threshold = -1; 4454 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 4455 if (thresholds->primary->entries[i].eventfd == eventfd) 4456 continue; 4457 4458 new->entries[j] = thresholds->primary->entries[i]; 4459 if (new->entries[j].threshold <= usage) { 4460 /* 4461 * new->current_threshold will not be used 4462 * until rcu_assign_pointer(), so it's safe to increment 4463 * it here. 4464 */ 4465 ++new->current_threshold; 4466 } 4467 j++; 4468 } 4469 4470 swap_buffers: 4471 /* Swap primary and spare array */ 4472 thresholds->spare = thresholds->primary; 4473 4474 rcu_assign_pointer(thresholds->primary, new); 4475 4476 /* To be sure that nobody uses thresholds */ 4477 synchronize_rcu(); 4478 4479 /* If all events are unregistered, free the spare array */ 4480 if (!new) { 4481 kfree(thresholds->spare); 4482 thresholds->spare = NULL; 4483 } 4484 unlock: 4485 mutex_unlock(&memcg->thresholds_lock); 4486 } 4487 4488 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4489 struct eventfd_ctx *eventfd) 4490 { 4491 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 4492 } 4493 4494 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 4495 struct eventfd_ctx *eventfd) 4496 { 4497 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 4498 } 4499 4500 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 4501 struct eventfd_ctx *eventfd, const char *args) 4502 { 4503 struct mem_cgroup_eventfd_list *event; 4504 4505 event = kmalloc(sizeof(*event), GFP_KERNEL); 4506 if (!event) 4507 return -ENOMEM; 4508 4509 spin_lock(&memcg_oom_lock); 4510 4511 event->eventfd = eventfd; 4512 list_add(&event->list, &memcg->oom_notify); 4513 4514 /* already in OOM ? */ 4515 if (memcg->under_oom) 4516 eventfd_signal(eventfd, 1); 4517 spin_unlock(&memcg_oom_lock); 4518 4519 return 0; 4520 } 4521 4522 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 4523 struct eventfd_ctx *eventfd) 4524 { 4525 struct mem_cgroup_eventfd_list *ev, *tmp; 4526 4527 spin_lock(&memcg_oom_lock); 4528 4529 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 4530 if (ev->eventfd == eventfd) { 4531 list_del(&ev->list); 4532 kfree(ev); 4533 } 4534 } 4535 4536 spin_unlock(&memcg_oom_lock); 4537 } 4538 4539 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 4540 { 4541 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); 4542 4543 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 4544 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 4545 seq_printf(sf, "oom_kill %lu\n", 4546 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 4547 return 0; 4548 } 4549 4550 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 4551 struct cftype *cft, u64 val) 4552 { 4553 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4554 4555 /* cannot set to root cgroup and only 0 and 1 are allowed */ 4556 if (!css->parent || !((val == 0) || (val == 1))) 4557 return -EINVAL; 4558 4559 memcg->oom_kill_disable = val; 4560 if (!val) 4561 memcg_oom_recover(memcg); 4562 4563 return 0; 4564 } 4565 4566 #ifdef CONFIG_CGROUP_WRITEBACK 4567 4568 #include <trace/events/writeback.h> 4569 4570 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4571 { 4572 return wb_domain_init(&memcg->cgwb_domain, gfp); 4573 } 4574 4575 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4576 { 4577 wb_domain_exit(&memcg->cgwb_domain); 4578 } 4579 4580 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4581 { 4582 wb_domain_size_changed(&memcg->cgwb_domain); 4583 } 4584 4585 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 4586 { 4587 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4588 4589 if (!memcg->css.parent) 4590 return NULL; 4591 4592 return &memcg->cgwb_domain; 4593 } 4594 4595 /* 4596 * idx can be of type enum memcg_stat_item or node_stat_item. 4597 * Keep in sync with memcg_exact_page(). 4598 */ 4599 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 4600 { 4601 long x = atomic_long_read(&memcg->vmstats[idx]); 4602 int cpu; 4603 4604 for_each_online_cpu(cpu) 4605 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 4606 if (x < 0) 4607 x = 0; 4608 return x; 4609 } 4610 4611 /** 4612 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 4613 * @wb: bdi_writeback in question 4614 * @pfilepages: out parameter for number of file pages 4615 * @pheadroom: out parameter for number of allocatable pages according to memcg 4616 * @pdirty: out parameter for number of dirty pages 4617 * @pwriteback: out parameter for number of pages under writeback 4618 * 4619 * Determine the numbers of file, headroom, dirty, and writeback pages in 4620 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 4621 * is a bit more involved. 4622 * 4623 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 4624 * headroom is calculated as the lowest headroom of itself and the 4625 * ancestors. Note that this doesn't consider the actual amount of 4626 * available memory in the system. The caller should further cap 4627 * *@pheadroom accordingly. 4628 */ 4629 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 4630 unsigned long *pheadroom, unsigned long *pdirty, 4631 unsigned long *pwriteback) 4632 { 4633 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4634 struct mem_cgroup *parent; 4635 4636 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); 4637 4638 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); 4639 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + 4640 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); 4641 *pheadroom = PAGE_COUNTER_MAX; 4642 4643 while ((parent = parent_mem_cgroup(memcg))) { 4644 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4645 READ_ONCE(memcg->memory.high)); 4646 unsigned long used = page_counter_read(&memcg->memory); 4647 4648 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 4649 memcg = parent; 4650 } 4651 } 4652 4653 /* 4654 * Foreign dirty flushing 4655 * 4656 * There's an inherent mismatch between memcg and writeback. The former 4657 * trackes ownership per-page while the latter per-inode. This was a 4658 * deliberate design decision because honoring per-page ownership in the 4659 * writeback path is complicated, may lead to higher CPU and IO overheads 4660 * and deemed unnecessary given that write-sharing an inode across 4661 * different cgroups isn't a common use-case. 4662 * 4663 * Combined with inode majority-writer ownership switching, this works well 4664 * enough in most cases but there are some pathological cases. For 4665 * example, let's say there are two cgroups A and B which keep writing to 4666 * different but confined parts of the same inode. B owns the inode and 4667 * A's memory is limited far below B's. A's dirty ratio can rise enough to 4668 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid 4669 * triggering background writeback. A will be slowed down without a way to 4670 * make writeback of the dirty pages happen. 4671 * 4672 * Conditions like the above can lead to a cgroup getting repatedly and 4673 * severely throttled after making some progress after each 4674 * dirty_expire_interval while the underyling IO device is almost 4675 * completely idle. 4676 * 4677 * Solving this problem completely requires matching the ownership tracking 4678 * granularities between memcg and writeback in either direction. However, 4679 * the more egregious behaviors can be avoided by simply remembering the 4680 * most recent foreign dirtying events and initiating remote flushes on 4681 * them when local writeback isn't enough to keep the memory clean enough. 4682 * 4683 * The following two functions implement such mechanism. When a foreign 4684 * page - a page whose memcg and writeback ownerships don't match - is 4685 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning 4686 * bdi_writeback on the page owning memcg. When balance_dirty_pages() 4687 * decides that the memcg needs to sleep due to high dirty ratio, it calls 4688 * mem_cgroup_flush_foreign() which queues writeback on the recorded 4689 * foreign bdi_writebacks which haven't expired. Both the numbers of 4690 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are 4691 * limited to MEMCG_CGWB_FRN_CNT. 4692 * 4693 * The mechanism only remembers IDs and doesn't hold any object references. 4694 * As being wrong occasionally doesn't matter, updates and accesses to the 4695 * records are lockless and racy. 4696 */ 4697 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 4698 struct bdi_writeback *wb) 4699 { 4700 struct mem_cgroup *memcg = page_memcg(page); 4701 struct memcg_cgwb_frn *frn; 4702 u64 now = get_jiffies_64(); 4703 u64 oldest_at = now; 4704 int oldest = -1; 4705 int i; 4706 4707 trace_track_foreign_dirty(page, wb); 4708 4709 /* 4710 * Pick the slot to use. If there is already a slot for @wb, keep 4711 * using it. If not replace the oldest one which isn't being 4712 * written out. 4713 */ 4714 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4715 frn = &memcg->cgwb_frn[i]; 4716 if (frn->bdi_id == wb->bdi->id && 4717 frn->memcg_id == wb->memcg_css->id) 4718 break; 4719 if (time_before64(frn->at, oldest_at) && 4720 atomic_read(&frn->done.cnt) == 1) { 4721 oldest = i; 4722 oldest_at = frn->at; 4723 } 4724 } 4725 4726 if (i < MEMCG_CGWB_FRN_CNT) { 4727 /* 4728 * Re-using an existing one. Update timestamp lazily to 4729 * avoid making the cacheline hot. We want them to be 4730 * reasonably up-to-date and significantly shorter than 4731 * dirty_expire_interval as that's what expires the record. 4732 * Use the shorter of 1s and dirty_expire_interval / 8. 4733 */ 4734 unsigned long update_intv = 4735 min_t(unsigned long, HZ, 4736 msecs_to_jiffies(dirty_expire_interval * 10) / 8); 4737 4738 if (time_before64(frn->at, now - update_intv)) 4739 frn->at = now; 4740 } else if (oldest >= 0) { 4741 /* replace the oldest free one */ 4742 frn = &memcg->cgwb_frn[oldest]; 4743 frn->bdi_id = wb->bdi->id; 4744 frn->memcg_id = wb->memcg_css->id; 4745 frn->at = now; 4746 } 4747 } 4748 4749 /* issue foreign writeback flushes for recorded foreign dirtying events */ 4750 void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 4751 { 4752 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 4753 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); 4754 u64 now = jiffies_64; 4755 int i; 4756 4757 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { 4758 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; 4759 4760 /* 4761 * If the record is older than dirty_expire_interval, 4762 * writeback on it has already started. No need to kick it 4763 * off again. Also, don't start a new one if there's 4764 * already one in flight. 4765 */ 4766 if (time_after64(frn->at, now - intv) && 4767 atomic_read(&frn->done.cnt) == 1) { 4768 frn->at = 0; 4769 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); 4770 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0, 4771 WB_REASON_FOREIGN_FLUSH, 4772 &frn->done); 4773 } 4774 } 4775 } 4776 4777 #else /* CONFIG_CGROUP_WRITEBACK */ 4778 4779 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 4780 { 4781 return 0; 4782 } 4783 4784 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 4785 { 4786 } 4787 4788 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 4789 { 4790 } 4791 4792 #endif /* CONFIG_CGROUP_WRITEBACK */ 4793 4794 /* 4795 * DO NOT USE IN NEW FILES. 4796 * 4797 * "cgroup.event_control" implementation. 4798 * 4799 * This is way over-engineered. It tries to support fully configurable 4800 * events for each user. Such level of flexibility is completely 4801 * unnecessary especially in the light of the planned unified hierarchy. 4802 * 4803 * Please deprecate this and replace with something simpler if at all 4804 * possible. 4805 */ 4806 4807 /* 4808 * Unregister event and free resources. 4809 * 4810 * Gets called from workqueue. 4811 */ 4812 static void memcg_event_remove(struct work_struct *work) 4813 { 4814 struct mem_cgroup_event *event = 4815 container_of(work, struct mem_cgroup_event, remove); 4816 struct mem_cgroup *memcg = event->memcg; 4817 4818 remove_wait_queue(event->wqh, &event->wait); 4819 4820 event->unregister_event(memcg, event->eventfd); 4821 4822 /* Notify userspace the event is going away. */ 4823 eventfd_signal(event->eventfd, 1); 4824 4825 eventfd_ctx_put(event->eventfd); 4826 kfree(event); 4827 css_put(&memcg->css); 4828 } 4829 4830 /* 4831 * Gets called on EPOLLHUP on eventfd when user closes it. 4832 * 4833 * Called with wqh->lock held and interrupts disabled. 4834 */ 4835 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 4836 int sync, void *key) 4837 { 4838 struct mem_cgroup_event *event = 4839 container_of(wait, struct mem_cgroup_event, wait); 4840 struct mem_cgroup *memcg = event->memcg; 4841 __poll_t flags = key_to_poll(key); 4842 4843 if (flags & EPOLLHUP) { 4844 /* 4845 * If the event has been detached at cgroup removal, we 4846 * can simply return knowing the other side will cleanup 4847 * for us. 4848 * 4849 * We can't race against event freeing since the other 4850 * side will require wqh->lock via remove_wait_queue(), 4851 * which we hold. 4852 */ 4853 spin_lock(&memcg->event_list_lock); 4854 if (!list_empty(&event->list)) { 4855 list_del_init(&event->list); 4856 /* 4857 * We are in atomic context, but cgroup_event_remove() 4858 * may sleep, so we have to call it in workqueue. 4859 */ 4860 schedule_work(&event->remove); 4861 } 4862 spin_unlock(&memcg->event_list_lock); 4863 } 4864 4865 return 0; 4866 } 4867 4868 static void memcg_event_ptable_queue_proc(struct file *file, 4869 wait_queue_head_t *wqh, poll_table *pt) 4870 { 4871 struct mem_cgroup_event *event = 4872 container_of(pt, struct mem_cgroup_event, pt); 4873 4874 event->wqh = wqh; 4875 add_wait_queue(wqh, &event->wait); 4876 } 4877 4878 /* 4879 * DO NOT USE IN NEW FILES. 4880 * 4881 * Parse input and register new cgroup event handler. 4882 * 4883 * Input must be in format '<event_fd> <control_fd> <args>'. 4884 * Interpretation of args is defined by control file implementation. 4885 */ 4886 static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 4887 char *buf, size_t nbytes, loff_t off) 4888 { 4889 struct cgroup_subsys_state *css = of_css(of); 4890 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4891 struct mem_cgroup_event *event; 4892 struct cgroup_subsys_state *cfile_css; 4893 unsigned int efd, cfd; 4894 struct fd efile; 4895 struct fd cfile; 4896 const char *name; 4897 char *endp; 4898 int ret; 4899 4900 buf = strstrip(buf); 4901 4902 efd = simple_strtoul(buf, &endp, 10); 4903 if (*endp != ' ') 4904 return -EINVAL; 4905 buf = endp + 1; 4906 4907 cfd = simple_strtoul(buf, &endp, 10); 4908 if ((*endp != ' ') && (*endp != '\0')) 4909 return -EINVAL; 4910 buf = endp + 1; 4911 4912 event = kzalloc(sizeof(*event), GFP_KERNEL); 4913 if (!event) 4914 return -ENOMEM; 4915 4916 event->memcg = memcg; 4917 INIT_LIST_HEAD(&event->list); 4918 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 4919 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 4920 INIT_WORK(&event->remove, memcg_event_remove); 4921 4922 efile = fdget(efd); 4923 if (!efile.file) { 4924 ret = -EBADF; 4925 goto out_kfree; 4926 } 4927 4928 event->eventfd = eventfd_ctx_fileget(efile.file); 4929 if (IS_ERR(event->eventfd)) { 4930 ret = PTR_ERR(event->eventfd); 4931 goto out_put_efile; 4932 } 4933 4934 cfile = fdget(cfd); 4935 if (!cfile.file) { 4936 ret = -EBADF; 4937 goto out_put_eventfd; 4938 } 4939 4940 /* the process need read permission on control file */ 4941 /* AV: shouldn't we check that it's been opened for read instead? */ 4942 ret = inode_permission(file_inode(cfile.file), MAY_READ); 4943 if (ret < 0) 4944 goto out_put_cfile; 4945 4946 /* 4947 * Determine the event callbacks and set them in @event. This used 4948 * to be done via struct cftype but cgroup core no longer knows 4949 * about these events. The following is crude but the whole thing 4950 * is for compatibility anyway. 4951 * 4952 * DO NOT ADD NEW FILES. 4953 */ 4954 name = cfile.file->f_path.dentry->d_name.name; 4955 4956 if (!strcmp(name, "memory.usage_in_bytes")) { 4957 event->register_event = mem_cgroup_usage_register_event; 4958 event->unregister_event = mem_cgroup_usage_unregister_event; 4959 } else if (!strcmp(name, "memory.oom_control")) { 4960 event->register_event = mem_cgroup_oom_register_event; 4961 event->unregister_event = mem_cgroup_oom_unregister_event; 4962 } else if (!strcmp(name, "memory.pressure_level")) { 4963 event->register_event = vmpressure_register_event; 4964 event->unregister_event = vmpressure_unregister_event; 4965 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 4966 event->register_event = memsw_cgroup_usage_register_event; 4967 event->unregister_event = memsw_cgroup_usage_unregister_event; 4968 } else { 4969 ret = -EINVAL; 4970 goto out_put_cfile; 4971 } 4972 4973 /* 4974 * Verify @cfile should belong to @css. Also, remaining events are 4975 * automatically removed on cgroup destruction but the removal is 4976 * asynchronous, so take an extra ref on @css. 4977 */ 4978 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 4979 &memory_cgrp_subsys); 4980 ret = -EINVAL; 4981 if (IS_ERR(cfile_css)) 4982 goto out_put_cfile; 4983 if (cfile_css != css) { 4984 css_put(cfile_css); 4985 goto out_put_cfile; 4986 } 4987 4988 ret = event->register_event(memcg, event->eventfd, buf); 4989 if (ret) 4990 goto out_put_css; 4991 4992 vfs_poll(efile.file, &event->pt); 4993 4994 spin_lock(&memcg->event_list_lock); 4995 list_add(&event->list, &memcg->event_list); 4996 spin_unlock(&memcg->event_list_lock); 4997 4998 fdput(cfile); 4999 fdput(efile); 5000 5001 return nbytes; 5002 5003 out_put_css: 5004 css_put(css); 5005 out_put_cfile: 5006 fdput(cfile); 5007 out_put_eventfd: 5008 eventfd_ctx_put(event->eventfd); 5009 out_put_efile: 5010 fdput(efile); 5011 out_kfree: 5012 kfree(event); 5013 5014 return ret; 5015 } 5016 5017 static struct cftype mem_cgroup_legacy_files[] = { 5018 { 5019 .name = "usage_in_bytes", 5020 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 5021 .read_u64 = mem_cgroup_read_u64, 5022 }, 5023 { 5024 .name = "max_usage_in_bytes", 5025 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 5026 .write = mem_cgroup_reset, 5027 .read_u64 = mem_cgroup_read_u64, 5028 }, 5029 { 5030 .name = "limit_in_bytes", 5031 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 5032 .write = mem_cgroup_write, 5033 .read_u64 = mem_cgroup_read_u64, 5034 }, 5035 { 5036 .name = "soft_limit_in_bytes", 5037 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 5038 .write = mem_cgroup_write, 5039 .read_u64 = mem_cgroup_read_u64, 5040 }, 5041 { 5042 .name = "failcnt", 5043 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 5044 .write = mem_cgroup_reset, 5045 .read_u64 = mem_cgroup_read_u64, 5046 }, 5047 { 5048 .name = "stat", 5049 .seq_show = memcg_stat_show, 5050 }, 5051 { 5052 .name = "force_empty", 5053 .write = mem_cgroup_force_empty_write, 5054 }, 5055 { 5056 .name = "use_hierarchy", 5057 .write_u64 = mem_cgroup_hierarchy_write, 5058 .read_u64 = mem_cgroup_hierarchy_read, 5059 }, 5060 { 5061 .name = "cgroup.event_control", /* XXX: for compat */ 5062 .write = memcg_write_event_control, 5063 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 5064 }, 5065 { 5066 .name = "swappiness", 5067 .read_u64 = mem_cgroup_swappiness_read, 5068 .write_u64 = mem_cgroup_swappiness_write, 5069 }, 5070 { 5071 .name = "move_charge_at_immigrate", 5072 .read_u64 = mem_cgroup_move_charge_read, 5073 .write_u64 = mem_cgroup_move_charge_write, 5074 }, 5075 { 5076 .name = "oom_control", 5077 .seq_show = mem_cgroup_oom_control_read, 5078 .write_u64 = mem_cgroup_oom_control_write, 5079 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 5080 }, 5081 { 5082 .name = "pressure_level", 5083 }, 5084 #ifdef CONFIG_NUMA 5085 { 5086 .name = "numa_stat", 5087 .seq_show = memcg_numa_stat_show, 5088 }, 5089 #endif 5090 { 5091 .name = "kmem.limit_in_bytes", 5092 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5093 .write = mem_cgroup_write, 5094 .read_u64 = mem_cgroup_read_u64, 5095 }, 5096 { 5097 .name = "kmem.usage_in_bytes", 5098 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 5099 .read_u64 = mem_cgroup_read_u64, 5100 }, 5101 { 5102 .name = "kmem.failcnt", 5103 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 5104 .write = mem_cgroup_reset, 5105 .read_u64 = mem_cgroup_read_u64, 5106 }, 5107 { 5108 .name = "kmem.max_usage_in_bytes", 5109 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 5110 .write = mem_cgroup_reset, 5111 .read_u64 = mem_cgroup_read_u64, 5112 }, 5113 #if defined(CONFIG_MEMCG_KMEM) && \ 5114 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) 5115 { 5116 .name = "kmem.slabinfo", 5117 .seq_show = memcg_slab_show, 5118 }, 5119 #endif 5120 { 5121 .name = "kmem.tcp.limit_in_bytes", 5122 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT), 5123 .write = mem_cgroup_write, 5124 .read_u64 = mem_cgroup_read_u64, 5125 }, 5126 { 5127 .name = "kmem.tcp.usage_in_bytes", 5128 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE), 5129 .read_u64 = mem_cgroup_read_u64, 5130 }, 5131 { 5132 .name = "kmem.tcp.failcnt", 5133 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT), 5134 .write = mem_cgroup_reset, 5135 .read_u64 = mem_cgroup_read_u64, 5136 }, 5137 { 5138 .name = "kmem.tcp.max_usage_in_bytes", 5139 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE), 5140 .write = mem_cgroup_reset, 5141 .read_u64 = mem_cgroup_read_u64, 5142 }, 5143 { }, /* terminate */ 5144 }; 5145 5146 /* 5147 * Private memory cgroup IDR 5148 * 5149 * Swap-out records and page cache shadow entries need to store memcg 5150 * references in constrained space, so we maintain an ID space that is 5151 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of 5152 * memory-controlled cgroups to 64k. 5153 * 5154 * However, there usually are many references to the offline CSS after 5155 * the cgroup has been destroyed, such as page cache or reclaimable 5156 * slab objects, that don't need to hang on to the ID. We want to keep 5157 * those dead CSS from occupying IDs, or we might quickly exhaust the 5158 * relatively small ID space and prevent the creation of new cgroups 5159 * even when there are much fewer than 64k cgroups - possibly none. 5160 * 5161 * Maintain a private 16-bit ID space for memcg, and allow the ID to 5162 * be freed and recycled when it's no longer needed, which is usually 5163 * when the CSS is offlined. 5164 * 5165 * The only exception to that are records of swapped out tmpfs/shmem 5166 * pages that need to be attributed to live ancestors on swapin. But 5167 * those references are manageable from userspace. 5168 */ 5169 5170 static DEFINE_IDR(mem_cgroup_idr); 5171 5172 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) 5173 { 5174 if (memcg->id.id > 0) { 5175 idr_remove(&mem_cgroup_idr, memcg->id.id); 5176 memcg->id.id = 0; 5177 } 5178 } 5179 5180 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, 5181 unsigned int n) 5182 { 5183 refcount_add(n, &memcg->id.ref); 5184 } 5185 5186 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 5187 { 5188 if (refcount_sub_and_test(n, &memcg->id.ref)) { 5189 mem_cgroup_id_remove(memcg); 5190 5191 /* Memcg ID pins CSS */ 5192 css_put(&memcg->css); 5193 } 5194 } 5195 5196 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 5197 { 5198 mem_cgroup_id_put_many(memcg, 1); 5199 } 5200 5201 /** 5202 * mem_cgroup_from_id - look up a memcg from a memcg id 5203 * @id: the memcg id to look up 5204 * 5205 * Caller must hold rcu_read_lock(). 5206 */ 5207 struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 5208 { 5209 WARN_ON_ONCE(!rcu_read_lock_held()); 5210 return idr_find(&mem_cgroup_idr, id); 5211 } 5212 5213 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5214 { 5215 struct mem_cgroup_per_node *pn; 5216 int tmp = node; 5217 /* 5218 * This routine is called against possible nodes. 5219 * But it's BUG to call kmalloc() against offline node. 5220 * 5221 * TODO: this routine can waste much memory for nodes which will 5222 * never be onlined. It's better to use memory hotplug callback 5223 * function. 5224 */ 5225 if (!node_state(node, N_NORMAL_MEMORY)) 5226 tmp = -1; 5227 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 5228 if (!pn) 5229 return 1; 5230 5231 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat, 5232 GFP_KERNEL_ACCOUNT); 5233 if (!pn->lruvec_stat_local) { 5234 kfree(pn); 5235 return 1; 5236 } 5237 5238 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, 5239 GFP_KERNEL_ACCOUNT); 5240 if (!pn->lruvec_stat_cpu) { 5241 free_percpu(pn->lruvec_stat_local); 5242 kfree(pn); 5243 return 1; 5244 } 5245 5246 lruvec_init(&pn->lruvec); 5247 pn->usage_in_excess = 0; 5248 pn->on_tree = false; 5249 pn->memcg = memcg; 5250 5251 memcg->nodeinfo[node] = pn; 5252 return 0; 5253 } 5254 5255 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) 5256 { 5257 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; 5258 5259 if (!pn) 5260 return; 5261 5262 free_percpu(pn->lruvec_stat_cpu); 5263 free_percpu(pn->lruvec_stat_local); 5264 kfree(pn); 5265 } 5266 5267 static void __mem_cgroup_free(struct mem_cgroup *memcg) 5268 { 5269 int node; 5270 5271 for_each_node(node) 5272 free_mem_cgroup_per_node_info(memcg, node); 5273 free_percpu(memcg->vmstats_percpu); 5274 free_percpu(memcg->vmstats_local); 5275 kfree(memcg); 5276 } 5277 5278 static void mem_cgroup_free(struct mem_cgroup *memcg) 5279 { 5280 memcg_wb_domain_exit(memcg); 5281 /* 5282 * Flush percpu vmstats and vmevents to guarantee the value correctness 5283 * on parent's and all ancestor levels. 5284 */ 5285 memcg_flush_percpu_vmstats(memcg); 5286 memcg_flush_percpu_vmevents(memcg); 5287 __mem_cgroup_free(memcg); 5288 } 5289 5290 static struct mem_cgroup *mem_cgroup_alloc(void) 5291 { 5292 struct mem_cgroup *memcg; 5293 unsigned int size; 5294 int node; 5295 int __maybe_unused i; 5296 long error = -ENOMEM; 5297 5298 size = sizeof(struct mem_cgroup); 5299 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 5300 5301 memcg = kzalloc(size, GFP_KERNEL); 5302 if (!memcg) 5303 return ERR_PTR(error); 5304 5305 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, 5306 1, MEM_CGROUP_ID_MAX, 5307 GFP_KERNEL); 5308 if (memcg->id.id < 0) { 5309 error = memcg->id.id; 5310 goto fail; 5311 } 5312 5313 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5314 GFP_KERNEL_ACCOUNT); 5315 if (!memcg->vmstats_local) 5316 goto fail; 5317 5318 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, 5319 GFP_KERNEL_ACCOUNT); 5320 if (!memcg->vmstats_percpu) 5321 goto fail; 5322 5323 for_each_node(node) 5324 if (alloc_mem_cgroup_per_node_info(memcg, node)) 5325 goto fail; 5326 5327 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 5328 goto fail; 5329 5330 INIT_WORK(&memcg->high_work, high_work_func); 5331 INIT_LIST_HEAD(&memcg->oom_notify); 5332 mutex_init(&memcg->thresholds_lock); 5333 spin_lock_init(&memcg->move_lock); 5334 vmpressure_init(&memcg->vmpressure); 5335 INIT_LIST_HEAD(&memcg->event_list); 5336 spin_lock_init(&memcg->event_list_lock); 5337 memcg->socket_pressure = jiffies; 5338 #ifdef CONFIG_MEMCG_KMEM 5339 memcg->kmemcg_id = -1; 5340 INIT_LIST_HEAD(&memcg->objcg_list); 5341 #endif 5342 #ifdef CONFIG_CGROUP_WRITEBACK 5343 INIT_LIST_HEAD(&memcg->cgwb_list); 5344 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5345 memcg->cgwb_frn[i].done = 5346 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5347 #endif 5348 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5349 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5350 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5351 memcg->deferred_split_queue.split_queue_len = 0; 5352 #endif 5353 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 5354 return memcg; 5355 fail: 5356 mem_cgroup_id_remove(memcg); 5357 __mem_cgroup_free(memcg); 5358 return ERR_PTR(error); 5359 } 5360 5361 static struct cgroup_subsys_state * __ref 5362 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 5363 { 5364 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); 5365 struct mem_cgroup *memcg, *old_memcg; 5366 long error = -ENOMEM; 5367 5368 old_memcg = set_active_memcg(parent); 5369 memcg = mem_cgroup_alloc(); 5370 set_active_memcg(old_memcg); 5371 if (IS_ERR(memcg)) 5372 return ERR_CAST(memcg); 5373 5374 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5375 memcg->soft_limit = PAGE_COUNTER_MAX; 5376 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5377 if (parent) { 5378 memcg->swappiness = mem_cgroup_swappiness(parent); 5379 memcg->oom_kill_disable = parent->oom_kill_disable; 5380 5381 page_counter_init(&memcg->memory, &parent->memory); 5382 page_counter_init(&memcg->swap, &parent->swap); 5383 page_counter_init(&memcg->kmem, &parent->kmem); 5384 page_counter_init(&memcg->tcpmem, &parent->tcpmem); 5385 } else { 5386 page_counter_init(&memcg->memory, NULL); 5387 page_counter_init(&memcg->swap, NULL); 5388 page_counter_init(&memcg->kmem, NULL); 5389 page_counter_init(&memcg->tcpmem, NULL); 5390 5391 root_mem_cgroup = memcg; 5392 return &memcg->css; 5393 } 5394 5395 /* The following stuff does not apply to the root */ 5396 error = memcg_online_kmem(memcg); 5397 if (error) 5398 goto fail; 5399 5400 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5401 static_branch_inc(&memcg_sockets_enabled_key); 5402 5403 return &memcg->css; 5404 fail: 5405 mem_cgroup_id_remove(memcg); 5406 mem_cgroup_free(memcg); 5407 return ERR_PTR(error); 5408 } 5409 5410 static int mem_cgroup_css_online(struct cgroup_subsys_state *css) 5411 { 5412 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5413 5414 /* 5415 * A memcg must be visible for memcg_expand_shrinker_maps() 5416 * by the time the maps are allocated. So, we allocate maps 5417 * here, when for_each_mem_cgroup() can't skip it. 5418 */ 5419 if (memcg_alloc_shrinker_maps(memcg)) { 5420 mem_cgroup_id_remove(memcg); 5421 return -ENOMEM; 5422 } 5423 5424 /* Online state pins memcg ID, memcg ID pins CSS */ 5425 refcount_set(&memcg->id.ref, 1); 5426 css_get(css); 5427 return 0; 5428 } 5429 5430 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 5431 { 5432 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5433 struct mem_cgroup_event *event, *tmp; 5434 5435 /* 5436 * Unregister events and notify userspace. 5437 * Notify userspace about cgroup removing only after rmdir of cgroup 5438 * directory to avoid race between userspace and kernelspace. 5439 */ 5440 spin_lock(&memcg->event_list_lock); 5441 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 5442 list_del_init(&event->list); 5443 schedule_work(&event->remove); 5444 } 5445 spin_unlock(&memcg->event_list_lock); 5446 5447 page_counter_set_min(&memcg->memory, 0); 5448 page_counter_set_low(&memcg->memory, 0); 5449 5450 memcg_offline_kmem(memcg); 5451 wb_memcg_offline(memcg); 5452 5453 drain_all_stock(memcg); 5454 5455 mem_cgroup_id_put(memcg); 5456 } 5457 5458 static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 5459 { 5460 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5461 5462 invalidate_reclaim_iterators(memcg); 5463 } 5464 5465 static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 5466 { 5467 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5468 int __maybe_unused i; 5469 5470 #ifdef CONFIG_CGROUP_WRITEBACK 5471 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) 5472 wb_wait_for_completion(&memcg->cgwb_frn[i].done); 5473 #endif 5474 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) 5475 static_branch_dec(&memcg_sockets_enabled_key); 5476 5477 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) 5478 static_branch_dec(&memcg_sockets_enabled_key); 5479 5480 vmpressure_cleanup(&memcg->vmpressure); 5481 cancel_work_sync(&memcg->high_work); 5482 mem_cgroup_remove_from_trees(memcg); 5483 memcg_free_shrinker_maps(memcg); 5484 memcg_free_kmem(memcg); 5485 mem_cgroup_free(memcg); 5486 } 5487 5488 /** 5489 * mem_cgroup_css_reset - reset the states of a mem_cgroup 5490 * @css: the target css 5491 * 5492 * Reset the states of the mem_cgroup associated with @css. This is 5493 * invoked when the userland requests disabling on the default hierarchy 5494 * but the memcg is pinned through dependency. The memcg should stop 5495 * applying policies and should revert to the vanilla state as it may be 5496 * made visible again. 5497 * 5498 * The current implementation only resets the essential configurations. 5499 * This needs to be expanded to cover all the visible parts. 5500 */ 5501 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 5502 { 5503 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5504 5505 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); 5506 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); 5507 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); 5508 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5509 page_counter_set_min(&memcg->memory, 0); 5510 page_counter_set_low(&memcg->memory, 0); 5511 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5512 memcg->soft_limit = PAGE_COUNTER_MAX; 5513 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); 5514 memcg_wb_domain_size_changed(memcg); 5515 } 5516 5517 #ifdef CONFIG_MMU 5518 /* Handlers for move charge at task migration. */ 5519 static int mem_cgroup_do_precharge(unsigned long count) 5520 { 5521 int ret; 5522 5523 /* Try a single bulk charge without reclaim first, kswapd may wake */ 5524 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 5525 if (!ret) { 5526 mc.precharge += count; 5527 return ret; 5528 } 5529 5530 /* Try charges one by one with reclaim, but do not retry */ 5531 while (count--) { 5532 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); 5533 if (ret) 5534 return ret; 5535 mc.precharge++; 5536 cond_resched(); 5537 } 5538 return 0; 5539 } 5540 5541 union mc_target { 5542 struct page *page; 5543 swp_entry_t ent; 5544 }; 5545 5546 enum mc_target_type { 5547 MC_TARGET_NONE = 0, 5548 MC_TARGET_PAGE, 5549 MC_TARGET_SWAP, 5550 MC_TARGET_DEVICE, 5551 }; 5552 5553 static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 5554 unsigned long addr, pte_t ptent) 5555 { 5556 struct page *page = vm_normal_page(vma, addr, ptent); 5557 5558 if (!page || !page_mapped(page)) 5559 return NULL; 5560 if (PageAnon(page)) { 5561 if (!(mc.flags & MOVE_ANON)) 5562 return NULL; 5563 } else { 5564 if (!(mc.flags & MOVE_FILE)) 5565 return NULL; 5566 } 5567 if (!get_page_unless_zero(page)) 5568 return NULL; 5569 5570 return page; 5571 } 5572 5573 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE) 5574 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5575 pte_t ptent, swp_entry_t *entry) 5576 { 5577 struct page *page = NULL; 5578 swp_entry_t ent = pte_to_swp_entry(ptent); 5579 5580 if (!(mc.flags & MOVE_ANON)) 5581 return NULL; 5582 5583 /* 5584 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to 5585 * a device and because they are not accessible by CPU they are store 5586 * as special swap entry in the CPU page table. 5587 */ 5588 if (is_device_private_entry(ent)) { 5589 page = device_private_entry_to_page(ent); 5590 /* 5591 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have 5592 * a refcount of 1 when free (unlike normal page) 5593 */ 5594 if (!page_ref_add_unless(page, 1, 1)) 5595 return NULL; 5596 return page; 5597 } 5598 5599 if (non_swap_entry(ent)) 5600 return NULL; 5601 5602 /* 5603 * Because lookup_swap_cache() updates some statistics counter, 5604 * we call find_get_page() with swapper_space directly. 5605 */ 5606 page = find_get_page(swap_address_space(ent), swp_offset(ent)); 5607 entry->val = ent.val; 5608 5609 return page; 5610 } 5611 #else 5612 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5613 pte_t ptent, swp_entry_t *entry) 5614 { 5615 return NULL; 5616 } 5617 #endif 5618 5619 static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5620 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5621 { 5622 if (!vma->vm_file) /* anonymous vma */ 5623 return NULL; 5624 if (!(mc.flags & MOVE_FILE)) 5625 return NULL; 5626 5627 /* page is moved even if it's not RSS of this task(page-faulted). */ 5628 /* shmem/tmpfs may report page out on swap: account for that too. */ 5629 return find_get_incore_page(vma->vm_file->f_mapping, 5630 linear_page_index(vma, addr)); 5631 } 5632 5633 /** 5634 * mem_cgroup_move_account - move account of the page 5635 * @page: the page 5636 * @compound: charge the page as compound or small page 5637 * @from: mem_cgroup which the page is moved from. 5638 * @to: mem_cgroup which the page is moved to. @from != @to. 5639 * 5640 * The caller must make sure the page is not on LRU (isolate_page() is useful.) 5641 * 5642 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 5643 * from old cgroup. 5644 */ 5645 static int mem_cgroup_move_account(struct page *page, 5646 bool compound, 5647 struct mem_cgroup *from, 5648 struct mem_cgroup *to) 5649 { 5650 struct lruvec *from_vec, *to_vec; 5651 struct pglist_data *pgdat; 5652 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; 5653 int ret; 5654 5655 VM_BUG_ON(from == to); 5656 VM_BUG_ON_PAGE(PageLRU(page), page); 5657 VM_BUG_ON(compound && !PageTransHuge(page)); 5658 5659 /* 5660 * Prevent mem_cgroup_migrate() from looking at 5661 * page's memory cgroup of its source page while we change it. 5662 */ 5663 ret = -EBUSY; 5664 if (!trylock_page(page)) 5665 goto out; 5666 5667 ret = -EINVAL; 5668 if (page_memcg(page) != from) 5669 goto out_unlock; 5670 5671 pgdat = page_pgdat(page); 5672 from_vec = mem_cgroup_lruvec(from, pgdat); 5673 to_vec = mem_cgroup_lruvec(to, pgdat); 5674 5675 lock_page_memcg(page); 5676 5677 if (PageAnon(page)) { 5678 if (page_mapped(page)) { 5679 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); 5680 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages); 5681 if (PageTransHuge(page)) { 5682 __mod_lruvec_state(from_vec, NR_ANON_THPS, 5683 -nr_pages); 5684 __mod_lruvec_state(to_vec, NR_ANON_THPS, 5685 nr_pages); 5686 } 5687 5688 } 5689 } else { 5690 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); 5691 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages); 5692 5693 if (PageSwapBacked(page)) { 5694 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); 5695 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages); 5696 } 5697 5698 if (page_mapped(page)) { 5699 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); 5700 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); 5701 } 5702 5703 if (PageDirty(page)) { 5704 struct address_space *mapping = page_mapping(page); 5705 5706 if (mapping_can_writeback(mapping)) { 5707 __mod_lruvec_state(from_vec, NR_FILE_DIRTY, 5708 -nr_pages); 5709 __mod_lruvec_state(to_vec, NR_FILE_DIRTY, 5710 nr_pages); 5711 } 5712 } 5713 } 5714 5715 if (PageWriteback(page)) { 5716 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); 5717 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages); 5718 } 5719 5720 /* 5721 * All state has been migrated, let's switch to the new memcg. 5722 * 5723 * It is safe to change page's memcg here because the page 5724 * is referenced, charged, isolated, and locked: we can't race 5725 * with (un)charging, migration, LRU putback, or anything else 5726 * that would rely on a stable page's memory cgroup. 5727 * 5728 * Note that lock_page_memcg is a memcg lock, not a page lock, 5729 * to save space. As soon as we switch page's memory cgroup to a 5730 * new memcg that isn't locked, the above state can change 5731 * concurrently again. Make sure we're truly done with it. 5732 */ 5733 smp_mb(); 5734 5735 css_get(&to->css); 5736 css_put(&from->css); 5737 5738 page->memcg_data = (unsigned long)to; 5739 5740 __unlock_page_memcg(from); 5741 5742 ret = 0; 5743 5744 local_irq_disable(); 5745 mem_cgroup_charge_statistics(to, page, nr_pages); 5746 memcg_check_events(to, page); 5747 mem_cgroup_charge_statistics(from, page, -nr_pages); 5748 memcg_check_events(from, page); 5749 local_irq_enable(); 5750 out_unlock: 5751 unlock_page(page); 5752 out: 5753 return ret; 5754 } 5755 5756 /** 5757 * get_mctgt_type - get target type of moving charge 5758 * @vma: the vma the pte to be checked belongs 5759 * @addr: the address corresponding to the pte to be checked 5760 * @ptent: the pte to be checked 5761 * @target: the pointer the target page or swap ent will be stored(can be NULL) 5762 * 5763 * Returns 5764 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5765 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5766 * move charge. if @target is not NULL, the page is stored in target->page 5767 * with extra refcnt got(Callers should handle it). 5768 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5769 * target for charge migration. if @target is not NULL, the entry is stored 5770 * in target->ent. 5771 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE 5772 * (so ZONE_DEVICE page and thus not on the lru). 5773 * For now we such page is charge like a regular page would be as for all 5774 * intent and purposes it is just special memory taking the place of a 5775 * regular page. 5776 * 5777 * See Documentations/vm/hmm.txt and include/linux/hmm.h 5778 * 5779 * Called with pte lock held. 5780 */ 5781 5782 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 5783 unsigned long addr, pte_t ptent, union mc_target *target) 5784 { 5785 struct page *page = NULL; 5786 enum mc_target_type ret = MC_TARGET_NONE; 5787 swp_entry_t ent = { .val = 0 }; 5788 5789 if (pte_present(ptent)) 5790 page = mc_handle_present_pte(vma, addr, ptent); 5791 else if (is_swap_pte(ptent)) 5792 page = mc_handle_swap_pte(vma, ptent, &ent); 5793 else if (pte_none(ptent)) 5794 page = mc_handle_file_pte(vma, addr, ptent, &ent); 5795 5796 if (!page && !ent.val) 5797 return ret; 5798 if (page) { 5799 /* 5800 * Do only loose check w/o serialization. 5801 * mem_cgroup_move_account() checks the page is valid or 5802 * not under LRU exclusion. 5803 */ 5804 if (page_memcg(page) == mc.from) { 5805 ret = MC_TARGET_PAGE; 5806 if (is_device_private_page(page)) 5807 ret = MC_TARGET_DEVICE; 5808 if (target) 5809 target->page = page; 5810 } 5811 if (!ret || !target) 5812 put_page(page); 5813 } 5814 /* 5815 * There is a swap entry and a page doesn't exist or isn't charged. 5816 * But we cannot move a tail-page in a THP. 5817 */ 5818 if (ent.val && !ret && (!page || !PageTransCompound(page)) && 5819 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 5820 ret = MC_TARGET_SWAP; 5821 if (target) 5822 target->ent = ent; 5823 } 5824 return ret; 5825 } 5826 5827 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5828 /* 5829 * We don't consider PMD mapped swapping or file mapped pages because THP does 5830 * not support them for now. 5831 * Caller should make sure that pmd_trans_huge(pmd) is true. 5832 */ 5833 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5834 unsigned long addr, pmd_t pmd, union mc_target *target) 5835 { 5836 struct page *page = NULL; 5837 enum mc_target_type ret = MC_TARGET_NONE; 5838 5839 if (unlikely(is_swap_pmd(pmd))) { 5840 VM_BUG_ON(thp_migration_supported() && 5841 !is_pmd_migration_entry(pmd)); 5842 return ret; 5843 } 5844 page = pmd_page(pmd); 5845 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 5846 if (!(mc.flags & MOVE_ANON)) 5847 return ret; 5848 if (page_memcg(page) == mc.from) { 5849 ret = MC_TARGET_PAGE; 5850 if (target) { 5851 get_page(page); 5852 target->page = page; 5853 } 5854 } 5855 return ret; 5856 } 5857 #else 5858 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 5859 unsigned long addr, pmd_t pmd, union mc_target *target) 5860 { 5861 return MC_TARGET_NONE; 5862 } 5863 #endif 5864 5865 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 5866 unsigned long addr, unsigned long end, 5867 struct mm_walk *walk) 5868 { 5869 struct vm_area_struct *vma = walk->vma; 5870 pte_t *pte; 5871 spinlock_t *ptl; 5872 5873 ptl = pmd_trans_huge_lock(pmd, vma); 5874 if (ptl) { 5875 /* 5876 * Note their can not be MC_TARGET_DEVICE for now as we do not 5877 * support transparent huge page with MEMORY_DEVICE_PRIVATE but 5878 * this might change. 5879 */ 5880 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 5881 mc.precharge += HPAGE_PMD_NR; 5882 spin_unlock(ptl); 5883 return 0; 5884 } 5885 5886 if (pmd_trans_unstable(pmd)) 5887 return 0; 5888 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 5889 for (; addr != end; pte++, addr += PAGE_SIZE) 5890 if (get_mctgt_type(vma, addr, *pte, NULL)) 5891 mc.precharge++; /* increment precharge temporarily */ 5892 pte_unmap_unlock(pte - 1, ptl); 5893 cond_resched(); 5894 5895 return 0; 5896 } 5897 5898 static const struct mm_walk_ops precharge_walk_ops = { 5899 .pmd_entry = mem_cgroup_count_precharge_pte_range, 5900 }; 5901 5902 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 5903 { 5904 unsigned long precharge; 5905 5906 mmap_read_lock(mm); 5907 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); 5908 mmap_read_unlock(mm); 5909 5910 precharge = mc.precharge; 5911 mc.precharge = 0; 5912 5913 return precharge; 5914 } 5915 5916 static int mem_cgroup_precharge_mc(struct mm_struct *mm) 5917 { 5918 unsigned long precharge = mem_cgroup_count_precharge(mm); 5919 5920 VM_BUG_ON(mc.moving_task); 5921 mc.moving_task = current; 5922 return mem_cgroup_do_precharge(precharge); 5923 } 5924 5925 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 5926 static void __mem_cgroup_clear_mc(void) 5927 { 5928 struct mem_cgroup *from = mc.from; 5929 struct mem_cgroup *to = mc.to; 5930 5931 /* we must uncharge all the leftover precharges from mc.to */ 5932 if (mc.precharge) { 5933 cancel_charge(mc.to, mc.precharge); 5934 mc.precharge = 0; 5935 } 5936 /* 5937 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 5938 * we must uncharge here. 5939 */ 5940 if (mc.moved_charge) { 5941 cancel_charge(mc.from, mc.moved_charge); 5942 mc.moved_charge = 0; 5943 } 5944 /* we must fixup refcnts and charges */ 5945 if (mc.moved_swap) { 5946 /* uncharge swap account from the old cgroup */ 5947 if (!mem_cgroup_is_root(mc.from)) 5948 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 5949 5950 mem_cgroup_id_put_many(mc.from, mc.moved_swap); 5951 5952 /* 5953 * we charged both to->memory and to->memsw, so we 5954 * should uncharge to->memory. 5955 */ 5956 if (!mem_cgroup_is_root(mc.to)) 5957 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 5958 5959 mc.moved_swap = 0; 5960 } 5961 memcg_oom_recover(from); 5962 memcg_oom_recover(to); 5963 wake_up_all(&mc.waitq); 5964 } 5965 5966 static void mem_cgroup_clear_mc(void) 5967 { 5968 struct mm_struct *mm = mc.mm; 5969 5970 /* 5971 * we must clear moving_task before waking up waiters at the end of 5972 * task migration. 5973 */ 5974 mc.moving_task = NULL; 5975 __mem_cgroup_clear_mc(); 5976 spin_lock(&mc.lock); 5977 mc.from = NULL; 5978 mc.to = NULL; 5979 mc.mm = NULL; 5980 spin_unlock(&mc.lock); 5981 5982 mmput(mm); 5983 } 5984 5985 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5986 { 5987 struct cgroup_subsys_state *css; 5988 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ 5989 struct mem_cgroup *from; 5990 struct task_struct *leader, *p; 5991 struct mm_struct *mm; 5992 unsigned long move_flags; 5993 int ret = 0; 5994 5995 /* charge immigration isn't supported on the default hierarchy */ 5996 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5997 return 0; 5998 5999 /* 6000 * Multi-process migrations only happen on the default hierarchy 6001 * where charge immigration is not used. Perform charge 6002 * immigration if @tset contains a leader and whine if there are 6003 * multiple. 6004 */ 6005 p = NULL; 6006 cgroup_taskset_for_each_leader(leader, css, tset) { 6007 WARN_ON_ONCE(p); 6008 p = leader; 6009 memcg = mem_cgroup_from_css(css); 6010 } 6011 if (!p) 6012 return 0; 6013 6014 /* 6015 * We are now commited to this value whatever it is. Changes in this 6016 * tunable will only affect upcoming migrations, not the current one. 6017 * So we need to save it, and keep it going. 6018 */ 6019 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 6020 if (!move_flags) 6021 return 0; 6022 6023 from = mem_cgroup_from_task(p); 6024 6025 VM_BUG_ON(from == memcg); 6026 6027 mm = get_task_mm(p); 6028 if (!mm) 6029 return 0; 6030 /* We move charges only when we move a owner of the mm */ 6031 if (mm->owner == p) { 6032 VM_BUG_ON(mc.from); 6033 VM_BUG_ON(mc.to); 6034 VM_BUG_ON(mc.precharge); 6035 VM_BUG_ON(mc.moved_charge); 6036 VM_BUG_ON(mc.moved_swap); 6037 6038 spin_lock(&mc.lock); 6039 mc.mm = mm; 6040 mc.from = from; 6041 mc.to = memcg; 6042 mc.flags = move_flags; 6043 spin_unlock(&mc.lock); 6044 /* We set mc.moving_task later */ 6045 6046 ret = mem_cgroup_precharge_mc(mm); 6047 if (ret) 6048 mem_cgroup_clear_mc(); 6049 } else { 6050 mmput(mm); 6051 } 6052 return ret; 6053 } 6054 6055 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6056 { 6057 if (mc.to) 6058 mem_cgroup_clear_mc(); 6059 } 6060 6061 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 6062 unsigned long addr, unsigned long end, 6063 struct mm_walk *walk) 6064 { 6065 int ret = 0; 6066 struct vm_area_struct *vma = walk->vma; 6067 pte_t *pte; 6068 spinlock_t *ptl; 6069 enum mc_target_type target_type; 6070 union mc_target target; 6071 struct page *page; 6072 6073 ptl = pmd_trans_huge_lock(pmd, vma); 6074 if (ptl) { 6075 if (mc.precharge < HPAGE_PMD_NR) { 6076 spin_unlock(ptl); 6077 return 0; 6078 } 6079 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 6080 if (target_type == MC_TARGET_PAGE) { 6081 page = target.page; 6082 if (!isolate_lru_page(page)) { 6083 if (!mem_cgroup_move_account(page, true, 6084 mc.from, mc.to)) { 6085 mc.precharge -= HPAGE_PMD_NR; 6086 mc.moved_charge += HPAGE_PMD_NR; 6087 } 6088 putback_lru_page(page); 6089 } 6090 put_page(page); 6091 } else if (target_type == MC_TARGET_DEVICE) { 6092 page = target.page; 6093 if (!mem_cgroup_move_account(page, true, 6094 mc.from, mc.to)) { 6095 mc.precharge -= HPAGE_PMD_NR; 6096 mc.moved_charge += HPAGE_PMD_NR; 6097 } 6098 put_page(page); 6099 } 6100 spin_unlock(ptl); 6101 return 0; 6102 } 6103 6104 if (pmd_trans_unstable(pmd)) 6105 return 0; 6106 retry: 6107 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 6108 for (; addr != end; addr += PAGE_SIZE) { 6109 pte_t ptent = *(pte++); 6110 bool device = false; 6111 swp_entry_t ent; 6112 6113 if (!mc.precharge) 6114 break; 6115 6116 switch (get_mctgt_type(vma, addr, ptent, &target)) { 6117 case MC_TARGET_DEVICE: 6118 device = true; 6119 fallthrough; 6120 case MC_TARGET_PAGE: 6121 page = target.page; 6122 /* 6123 * We can have a part of the split pmd here. Moving it 6124 * can be done but it would be too convoluted so simply 6125 * ignore such a partial THP and keep it in original 6126 * memcg. There should be somebody mapping the head. 6127 */ 6128 if (PageTransCompound(page)) 6129 goto put; 6130 if (!device && isolate_lru_page(page)) 6131 goto put; 6132 if (!mem_cgroup_move_account(page, false, 6133 mc.from, mc.to)) { 6134 mc.precharge--; 6135 /* we uncharge from mc.from later. */ 6136 mc.moved_charge++; 6137 } 6138 if (!device) 6139 putback_lru_page(page); 6140 put: /* get_mctgt_type() gets the page */ 6141 put_page(page); 6142 break; 6143 case MC_TARGET_SWAP: 6144 ent = target.ent; 6145 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 6146 mc.precharge--; 6147 mem_cgroup_id_get_many(mc.to, 1); 6148 /* we fixup other refcnts and charges later. */ 6149 mc.moved_swap++; 6150 } 6151 break; 6152 default: 6153 break; 6154 } 6155 } 6156 pte_unmap_unlock(pte - 1, ptl); 6157 cond_resched(); 6158 6159 if (addr != end) { 6160 /* 6161 * We have consumed all precharges we got in can_attach(). 6162 * We try charge one by one, but don't do any additional 6163 * charges to mc.to if we have failed in charge once in attach() 6164 * phase. 6165 */ 6166 ret = mem_cgroup_do_precharge(1); 6167 if (!ret) 6168 goto retry; 6169 } 6170 6171 return ret; 6172 } 6173 6174 static const struct mm_walk_ops charge_walk_ops = { 6175 .pmd_entry = mem_cgroup_move_charge_pte_range, 6176 }; 6177 6178 static void mem_cgroup_move_charge(void) 6179 { 6180 lru_add_drain_all(); 6181 /* 6182 * Signal lock_page_memcg() to take the memcg's move_lock 6183 * while we're moving its pages to another memcg. Then wait 6184 * for already started RCU-only updates to finish. 6185 */ 6186 atomic_inc(&mc.from->moving_account); 6187 synchronize_rcu(); 6188 retry: 6189 if (unlikely(!mmap_read_trylock(mc.mm))) { 6190 /* 6191 * Someone who are holding the mmap_lock might be waiting in 6192 * waitq. So we cancel all extra charges, wake up all waiters, 6193 * and retry. Because we cancel precharges, we might not be able 6194 * to move enough charges, but moving charge is a best-effort 6195 * feature anyway, so it wouldn't be a big problem. 6196 */ 6197 __mem_cgroup_clear_mc(); 6198 cond_resched(); 6199 goto retry; 6200 } 6201 /* 6202 * When we have consumed all precharges and failed in doing 6203 * additional charge, the page walk just aborts. 6204 */ 6205 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, 6206 NULL); 6207 6208 mmap_read_unlock(mc.mm); 6209 atomic_dec(&mc.from->moving_account); 6210 } 6211 6212 static void mem_cgroup_move_task(void) 6213 { 6214 if (mc.to) { 6215 mem_cgroup_move_charge(); 6216 mem_cgroup_clear_mc(); 6217 } 6218 } 6219 #else /* !CONFIG_MMU */ 6220 static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 6221 { 6222 return 0; 6223 } 6224 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 6225 { 6226 } 6227 static void mem_cgroup_move_task(void) 6228 { 6229 } 6230 #endif 6231 6232 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) 6233 { 6234 if (value == PAGE_COUNTER_MAX) 6235 seq_puts(m, "max\n"); 6236 else 6237 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); 6238 6239 return 0; 6240 } 6241 6242 static u64 memory_current_read(struct cgroup_subsys_state *css, 6243 struct cftype *cft) 6244 { 6245 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6246 6247 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 6248 } 6249 6250 static int memory_min_show(struct seq_file *m, void *v) 6251 { 6252 return seq_puts_memcg_tunable(m, 6253 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); 6254 } 6255 6256 static ssize_t memory_min_write(struct kernfs_open_file *of, 6257 char *buf, size_t nbytes, loff_t off) 6258 { 6259 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6260 unsigned long min; 6261 int err; 6262 6263 buf = strstrip(buf); 6264 err = page_counter_memparse(buf, "max", &min); 6265 if (err) 6266 return err; 6267 6268 page_counter_set_min(&memcg->memory, min); 6269 6270 return nbytes; 6271 } 6272 6273 static int memory_low_show(struct seq_file *m, void *v) 6274 { 6275 return seq_puts_memcg_tunable(m, 6276 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); 6277 } 6278 6279 static ssize_t memory_low_write(struct kernfs_open_file *of, 6280 char *buf, size_t nbytes, loff_t off) 6281 { 6282 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6283 unsigned long low; 6284 int err; 6285 6286 buf = strstrip(buf); 6287 err = page_counter_memparse(buf, "max", &low); 6288 if (err) 6289 return err; 6290 6291 page_counter_set_low(&memcg->memory, low); 6292 6293 return nbytes; 6294 } 6295 6296 static int memory_high_show(struct seq_file *m, void *v) 6297 { 6298 return seq_puts_memcg_tunable(m, 6299 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6300 } 6301 6302 static ssize_t memory_high_write(struct kernfs_open_file *of, 6303 char *buf, size_t nbytes, loff_t off) 6304 { 6305 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6306 unsigned int nr_retries = MAX_RECLAIM_RETRIES; 6307 bool drained = false; 6308 unsigned long high; 6309 int err; 6310 6311 buf = strstrip(buf); 6312 err = page_counter_memparse(buf, "max", &high); 6313 if (err) 6314 return err; 6315 6316 for (;;) { 6317 unsigned long nr_pages = page_counter_read(&memcg->memory); 6318 unsigned long reclaimed; 6319 6320 if (nr_pages <= high) 6321 break; 6322 6323 if (signal_pending(current)) 6324 break; 6325 6326 if (!drained) { 6327 drain_all_stock(memcg); 6328 drained = true; 6329 continue; 6330 } 6331 6332 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 6333 GFP_KERNEL, true); 6334 6335 if (!reclaimed && !nr_retries--) 6336 break; 6337 } 6338 6339 page_counter_set_high(&memcg->memory, high); 6340 6341 memcg_wb_domain_size_changed(memcg); 6342 6343 return nbytes; 6344 } 6345 6346 static int memory_max_show(struct seq_file *m, void *v) 6347 { 6348 return seq_puts_memcg_tunable(m, 6349 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); 6350 } 6351 6352 static ssize_t memory_max_write(struct kernfs_open_file *of, 6353 char *buf, size_t nbytes, loff_t off) 6354 { 6355 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6356 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; 6357 bool drained = false; 6358 unsigned long max; 6359 int err; 6360 6361 buf = strstrip(buf); 6362 err = page_counter_memparse(buf, "max", &max); 6363 if (err) 6364 return err; 6365 6366 xchg(&memcg->memory.max, max); 6367 6368 for (;;) { 6369 unsigned long nr_pages = page_counter_read(&memcg->memory); 6370 6371 if (nr_pages <= max) 6372 break; 6373 6374 if (signal_pending(current)) 6375 break; 6376 6377 if (!drained) { 6378 drain_all_stock(memcg); 6379 drained = true; 6380 continue; 6381 } 6382 6383 if (nr_reclaims) { 6384 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 6385 GFP_KERNEL, true)) 6386 nr_reclaims--; 6387 continue; 6388 } 6389 6390 memcg_memory_event(memcg, MEMCG_OOM); 6391 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 6392 break; 6393 } 6394 6395 memcg_wb_domain_size_changed(memcg); 6396 return nbytes; 6397 } 6398 6399 static void __memory_events_show(struct seq_file *m, atomic_long_t *events) 6400 { 6401 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); 6402 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); 6403 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); 6404 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); 6405 seq_printf(m, "oom_kill %lu\n", 6406 atomic_long_read(&events[MEMCG_OOM_KILL])); 6407 } 6408 6409 static int memory_events_show(struct seq_file *m, void *v) 6410 { 6411 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6412 6413 __memory_events_show(m, memcg->memory_events); 6414 return 0; 6415 } 6416 6417 static int memory_events_local_show(struct seq_file *m, void *v) 6418 { 6419 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6420 6421 __memory_events_show(m, memcg->memory_events_local); 6422 return 0; 6423 } 6424 6425 static int memory_stat_show(struct seq_file *m, void *v) 6426 { 6427 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6428 char *buf; 6429 6430 buf = memory_stat_format(memcg); 6431 if (!buf) 6432 return -ENOMEM; 6433 seq_puts(m, buf); 6434 kfree(buf); 6435 return 0; 6436 } 6437 6438 #ifdef CONFIG_NUMA 6439 static int memory_numa_stat_show(struct seq_file *m, void *v) 6440 { 6441 int i; 6442 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6443 6444 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { 6445 int nid; 6446 6447 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) 6448 continue; 6449 6450 seq_printf(m, "%s", memory_stats[i].name); 6451 for_each_node_state(nid, N_MEMORY) { 6452 u64 size; 6453 struct lruvec *lruvec; 6454 6455 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); 6456 size = lruvec_page_state(lruvec, memory_stats[i].idx); 6457 size *= memory_stats[i].ratio; 6458 seq_printf(m, " N%d=%llu", nid, size); 6459 } 6460 seq_putc(m, '\n'); 6461 } 6462 6463 return 0; 6464 } 6465 #endif 6466 6467 static int memory_oom_group_show(struct seq_file *m, void *v) 6468 { 6469 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 6470 6471 seq_printf(m, "%d\n", memcg->oom_group); 6472 6473 return 0; 6474 } 6475 6476 static ssize_t memory_oom_group_write(struct kernfs_open_file *of, 6477 char *buf, size_t nbytes, loff_t off) 6478 { 6479 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 6480 int ret, oom_group; 6481 6482 buf = strstrip(buf); 6483 if (!buf) 6484 return -EINVAL; 6485 6486 ret = kstrtoint(buf, 0, &oom_group); 6487 if (ret) 6488 return ret; 6489 6490 if (oom_group != 0 && oom_group != 1) 6491 return -EINVAL; 6492 6493 memcg->oom_group = oom_group; 6494 6495 return nbytes; 6496 } 6497 6498 static struct cftype memory_files[] = { 6499 { 6500 .name = "current", 6501 .flags = CFTYPE_NOT_ON_ROOT, 6502 .read_u64 = memory_current_read, 6503 }, 6504 { 6505 .name = "min", 6506 .flags = CFTYPE_NOT_ON_ROOT, 6507 .seq_show = memory_min_show, 6508 .write = memory_min_write, 6509 }, 6510 { 6511 .name = "low", 6512 .flags = CFTYPE_NOT_ON_ROOT, 6513 .seq_show = memory_low_show, 6514 .write = memory_low_write, 6515 }, 6516 { 6517 .name = "high", 6518 .flags = CFTYPE_NOT_ON_ROOT, 6519 .seq_show = memory_high_show, 6520 .write = memory_high_write, 6521 }, 6522 { 6523 .name = "max", 6524 .flags = CFTYPE_NOT_ON_ROOT, 6525 .seq_show = memory_max_show, 6526 .write = memory_max_write, 6527 }, 6528 { 6529 .name = "events", 6530 .flags = CFTYPE_NOT_ON_ROOT, 6531 .file_offset = offsetof(struct mem_cgroup, events_file), 6532 .seq_show = memory_events_show, 6533 }, 6534 { 6535 .name = "events.local", 6536 .flags = CFTYPE_NOT_ON_ROOT, 6537 .file_offset = offsetof(struct mem_cgroup, events_local_file), 6538 .seq_show = memory_events_local_show, 6539 }, 6540 { 6541 .name = "stat", 6542 .seq_show = memory_stat_show, 6543 }, 6544 #ifdef CONFIG_NUMA 6545 { 6546 .name = "numa_stat", 6547 .seq_show = memory_numa_stat_show, 6548 }, 6549 #endif 6550 { 6551 .name = "oom.group", 6552 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, 6553 .seq_show = memory_oom_group_show, 6554 .write = memory_oom_group_write, 6555 }, 6556 { } /* terminate */ 6557 }; 6558 6559 struct cgroup_subsys memory_cgrp_subsys = { 6560 .css_alloc = mem_cgroup_css_alloc, 6561 .css_online = mem_cgroup_css_online, 6562 .css_offline = mem_cgroup_css_offline, 6563 .css_released = mem_cgroup_css_released, 6564 .css_free = mem_cgroup_css_free, 6565 .css_reset = mem_cgroup_css_reset, 6566 .can_attach = mem_cgroup_can_attach, 6567 .cancel_attach = mem_cgroup_cancel_attach, 6568 .post_attach = mem_cgroup_move_task, 6569 .dfl_cftypes = memory_files, 6570 .legacy_cftypes = mem_cgroup_legacy_files, 6571 .early_init = 0, 6572 }; 6573 6574 /* 6575 * This function calculates an individual cgroup's effective 6576 * protection which is derived from its own memory.min/low, its 6577 * parent's and siblings' settings, as well as the actual memory 6578 * distribution in the tree. 6579 * 6580 * The following rules apply to the effective protection values: 6581 * 6582 * 1. At the first level of reclaim, effective protection is equal to 6583 * the declared protection in memory.min and memory.low. 6584 * 6585 * 2. To enable safe delegation of the protection configuration, at 6586 * subsequent levels the effective protection is capped to the 6587 * parent's effective protection. 6588 * 6589 * 3. To make complex and dynamic subtrees easier to configure, the 6590 * user is allowed to overcommit the declared protection at a given 6591 * level. If that is the case, the parent's effective protection is 6592 * distributed to the children in proportion to how much protection 6593 * they have declared and how much of it they are utilizing. 6594 * 6595 * This makes distribution proportional, but also work-conserving: 6596 * if one cgroup claims much more protection than it uses memory, 6597 * the unused remainder is available to its siblings. 6598 * 6599 * 4. Conversely, when the declared protection is undercommitted at a 6600 * given level, the distribution of the larger parental protection 6601 * budget is NOT proportional. A cgroup's protection from a sibling 6602 * is capped to its own memory.min/low setting. 6603 * 6604 * 5. However, to allow protecting recursive subtrees from each other 6605 * without having to declare each individual cgroup's fixed share 6606 * of the ancestor's claim to protection, any unutilized - 6607 * "floating" - protection from up the tree is distributed in 6608 * proportion to each cgroup's *usage*. This makes the protection 6609 * neutral wrt sibling cgroups and lets them compete freely over 6610 * the shared parental protection budget, but it protects the 6611 * subtree as a whole from neighboring subtrees. 6612 * 6613 * Note that 4. and 5. are not in conflict: 4. is about protecting 6614 * against immediate siblings whereas 5. is about protecting against 6615 * neighboring subtrees. 6616 */ 6617 static unsigned long effective_protection(unsigned long usage, 6618 unsigned long parent_usage, 6619 unsigned long setting, 6620 unsigned long parent_effective, 6621 unsigned long siblings_protected) 6622 { 6623 unsigned long protected; 6624 unsigned long ep; 6625 6626 protected = min(usage, setting); 6627 /* 6628 * If all cgroups at this level combined claim and use more 6629 * protection then what the parent affords them, distribute 6630 * shares in proportion to utilization. 6631 * 6632 * We are using actual utilization rather than the statically 6633 * claimed protection in order to be work-conserving: claimed 6634 * but unused protection is available to siblings that would 6635 * otherwise get a smaller chunk than what they claimed. 6636 */ 6637 if (siblings_protected > parent_effective) 6638 return protected * parent_effective / siblings_protected; 6639 6640 /* 6641 * Ok, utilized protection of all children is within what the 6642 * parent affords them, so we know whatever this child claims 6643 * and utilizes is effectively protected. 6644 * 6645 * If there is unprotected usage beyond this value, reclaim 6646 * will apply pressure in proportion to that amount. 6647 * 6648 * If there is unutilized protection, the cgroup will be fully 6649 * shielded from reclaim, but we do return a smaller value for 6650 * protection than what the group could enjoy in theory. This 6651 * is okay. With the overcommit distribution above, effective 6652 * protection is always dependent on how memory is actually 6653 * consumed among the siblings anyway. 6654 */ 6655 ep = protected; 6656 6657 /* 6658 * If the children aren't claiming (all of) the protection 6659 * afforded to them by the parent, distribute the remainder in 6660 * proportion to the (unprotected) memory of each cgroup. That 6661 * way, cgroups that aren't explicitly prioritized wrt each 6662 * other compete freely over the allowance, but they are 6663 * collectively protected from neighboring trees. 6664 * 6665 * We're using unprotected memory for the weight so that if 6666 * some cgroups DO claim explicit protection, we don't protect 6667 * the same bytes twice. 6668 * 6669 * Check both usage and parent_usage against the respective 6670 * protected values. One should imply the other, but they 6671 * aren't read atomically - make sure the division is sane. 6672 */ 6673 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) 6674 return ep; 6675 if (parent_effective > siblings_protected && 6676 parent_usage > siblings_protected && 6677 usage > protected) { 6678 unsigned long unclaimed; 6679 6680 unclaimed = parent_effective - siblings_protected; 6681 unclaimed *= usage - protected; 6682 unclaimed /= parent_usage - siblings_protected; 6683 6684 ep += unclaimed; 6685 } 6686 6687 return ep; 6688 } 6689 6690 /** 6691 * mem_cgroup_protected - check if memory consumption is in the normal range 6692 * @root: the top ancestor of the sub-tree being checked 6693 * @memcg: the memory cgroup to check 6694 * 6695 * WARNING: This function is not stateless! It can only be used as part 6696 * of a top-down tree iteration, not for isolated queries. 6697 */ 6698 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 6699 struct mem_cgroup *memcg) 6700 { 6701 unsigned long usage, parent_usage; 6702 struct mem_cgroup *parent; 6703 6704 if (mem_cgroup_disabled()) 6705 return; 6706 6707 if (!root) 6708 root = root_mem_cgroup; 6709 6710 /* 6711 * Effective values of the reclaim targets are ignored so they 6712 * can be stale. Have a look at mem_cgroup_protection for more 6713 * details. 6714 * TODO: calculation should be more robust so that we do not need 6715 * that special casing. 6716 */ 6717 if (memcg == root) 6718 return; 6719 6720 usage = page_counter_read(&memcg->memory); 6721 if (!usage) 6722 return; 6723 6724 parent = parent_mem_cgroup(memcg); 6725 /* No parent means a non-hierarchical mode on v1 memcg */ 6726 if (!parent) 6727 return; 6728 6729 if (parent == root) { 6730 memcg->memory.emin = READ_ONCE(memcg->memory.min); 6731 memcg->memory.elow = READ_ONCE(memcg->memory.low); 6732 return; 6733 } 6734 6735 parent_usage = page_counter_read(&parent->memory); 6736 6737 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, 6738 READ_ONCE(memcg->memory.min), 6739 READ_ONCE(parent->memory.emin), 6740 atomic_long_read(&parent->memory.children_min_usage))); 6741 6742 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, 6743 READ_ONCE(memcg->memory.low), 6744 READ_ONCE(parent->memory.elow), 6745 atomic_long_read(&parent->memory.children_low_usage))); 6746 } 6747 6748 /** 6749 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6750 * @page: page to charge 6751 * @mm: mm context of the victim 6752 * @gfp_mask: reclaim mode 6753 * 6754 * Try to charge @page to the memcg that @mm belongs to, reclaiming 6755 * pages according to @gfp_mask if necessary. 6756 * 6757 * Returns 0 on success. Otherwise, an error code is returned. 6758 */ 6759 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 6760 { 6761 unsigned int nr_pages = thp_nr_pages(page); 6762 struct mem_cgroup *memcg = NULL; 6763 int ret = 0; 6764 6765 if (mem_cgroup_disabled()) 6766 goto out; 6767 6768 if (PageSwapCache(page)) { 6769 swp_entry_t ent = { .val = page_private(page), }; 6770 unsigned short id; 6771 6772 /* 6773 * Every swap fault against a single page tries to charge the 6774 * page, bail as early as possible. shmem_unuse() encounters 6775 * already charged pages, too. page and memcg binding is 6776 * protected by the page lock, which serializes swap cache 6777 * removal, which in turn serializes uncharging. 6778 */ 6779 VM_BUG_ON_PAGE(!PageLocked(page), page); 6780 if (page_memcg(compound_head(page))) 6781 goto out; 6782 6783 id = lookup_swap_cgroup_id(ent); 6784 rcu_read_lock(); 6785 memcg = mem_cgroup_from_id(id); 6786 if (memcg && !css_tryget_online(&memcg->css)) 6787 memcg = NULL; 6788 rcu_read_unlock(); 6789 } 6790 6791 if (!memcg) 6792 memcg = get_mem_cgroup_from_mm(mm); 6793 6794 ret = try_charge(memcg, gfp_mask, nr_pages); 6795 if (ret) 6796 goto out_put; 6797 6798 css_get(&memcg->css); 6799 commit_charge(page, memcg); 6800 6801 local_irq_disable(); 6802 mem_cgroup_charge_statistics(memcg, page, nr_pages); 6803 memcg_check_events(memcg, page); 6804 local_irq_enable(); 6805 6806 if (PageSwapCache(page)) { 6807 swp_entry_t entry = { .val = page_private(page) }; 6808 /* 6809 * The swap entry might not get freed for a long time, 6810 * let's not wait for it. The page already received a 6811 * memory+swap charge, drop the swap entry duplicate. 6812 */ 6813 mem_cgroup_uncharge_swap(entry, nr_pages); 6814 } 6815 6816 out_put: 6817 css_put(&memcg->css); 6818 out: 6819 return ret; 6820 } 6821 6822 struct uncharge_gather { 6823 struct mem_cgroup *memcg; 6824 unsigned long nr_pages; 6825 unsigned long pgpgout; 6826 unsigned long nr_kmem; 6827 struct page *dummy_page; 6828 }; 6829 6830 static inline void uncharge_gather_clear(struct uncharge_gather *ug) 6831 { 6832 memset(ug, 0, sizeof(*ug)); 6833 } 6834 6835 static void uncharge_batch(const struct uncharge_gather *ug) 6836 { 6837 unsigned long flags; 6838 6839 if (!mem_cgroup_is_root(ug->memcg)) { 6840 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); 6841 if (do_memsw_account()) 6842 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); 6843 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) 6844 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); 6845 memcg_oom_recover(ug->memcg); 6846 } 6847 6848 local_irq_save(flags); 6849 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6850 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); 6851 memcg_check_events(ug->memcg, ug->dummy_page); 6852 local_irq_restore(flags); 6853 6854 /* drop reference from uncharge_page */ 6855 css_put(&ug->memcg->css); 6856 } 6857 6858 static void uncharge_page(struct page *page, struct uncharge_gather *ug) 6859 { 6860 unsigned long nr_pages; 6861 6862 VM_BUG_ON_PAGE(PageLRU(page), page); 6863 6864 if (!page_memcg(page)) 6865 return; 6866 6867 /* 6868 * Nobody should be changing or seriously looking at 6869 * page_memcg(page) at this point, we have fully 6870 * exclusive access to the page. 6871 */ 6872 6873 if (ug->memcg != page_memcg(page)) { 6874 if (ug->memcg) { 6875 uncharge_batch(ug); 6876 uncharge_gather_clear(ug); 6877 } 6878 ug->memcg = page_memcg(page); 6879 6880 /* pairs with css_put in uncharge_batch */ 6881 css_get(&ug->memcg->css); 6882 } 6883 6884 nr_pages = compound_nr(page); 6885 ug->nr_pages += nr_pages; 6886 6887 if (PageMemcgKmem(page)) 6888 ug->nr_kmem += nr_pages; 6889 else 6890 ug->pgpgout++; 6891 6892 ug->dummy_page = page; 6893 page->memcg_data = 0; 6894 css_put(&ug->memcg->css); 6895 } 6896 6897 static void uncharge_list(struct list_head *page_list) 6898 { 6899 struct uncharge_gather ug; 6900 struct list_head *next; 6901 6902 uncharge_gather_clear(&ug); 6903 6904 /* 6905 * Note that the list can be a single page->lru; hence the 6906 * do-while loop instead of a simple list_for_each_entry(). 6907 */ 6908 next = page_list->next; 6909 do { 6910 struct page *page; 6911 6912 page = list_entry(next, struct page, lru); 6913 next = page->lru.next; 6914 6915 uncharge_page(page, &ug); 6916 } while (next != page_list); 6917 6918 if (ug.memcg) 6919 uncharge_batch(&ug); 6920 } 6921 6922 /** 6923 * mem_cgroup_uncharge - uncharge a page 6924 * @page: page to uncharge 6925 * 6926 * Uncharge a page previously charged with mem_cgroup_charge(). 6927 */ 6928 void mem_cgroup_uncharge(struct page *page) 6929 { 6930 struct uncharge_gather ug; 6931 6932 if (mem_cgroup_disabled()) 6933 return; 6934 6935 /* Don't touch page->lru of any random page, pre-check: */ 6936 if (!page_memcg(page)) 6937 return; 6938 6939 uncharge_gather_clear(&ug); 6940 uncharge_page(page, &ug); 6941 uncharge_batch(&ug); 6942 } 6943 6944 /** 6945 * mem_cgroup_uncharge_list - uncharge a list of page 6946 * @page_list: list of pages to uncharge 6947 * 6948 * Uncharge a list of pages previously charged with 6949 * mem_cgroup_charge(). 6950 */ 6951 void mem_cgroup_uncharge_list(struct list_head *page_list) 6952 { 6953 if (mem_cgroup_disabled()) 6954 return; 6955 6956 if (!list_empty(page_list)) 6957 uncharge_list(page_list); 6958 } 6959 6960 /** 6961 * mem_cgroup_migrate - charge a page's replacement 6962 * @oldpage: currently circulating page 6963 * @newpage: replacement page 6964 * 6965 * Charge @newpage as a replacement page for @oldpage. @oldpage will 6966 * be uncharged upon free. 6967 * 6968 * Both pages must be locked, @newpage->mapping must be set up. 6969 */ 6970 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) 6971 { 6972 struct mem_cgroup *memcg; 6973 unsigned int nr_pages; 6974 unsigned long flags; 6975 6976 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 6977 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 6978 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6979 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6980 newpage); 6981 6982 if (mem_cgroup_disabled()) 6983 return; 6984 6985 /* Page cache replacement: new page already charged? */ 6986 if (page_memcg(newpage)) 6987 return; 6988 6989 memcg = page_memcg(oldpage); 6990 if (!memcg) 6991 return; 6992 6993 /* Force-charge the new page. The old one will be freed soon */ 6994 nr_pages = thp_nr_pages(newpage); 6995 6996 page_counter_charge(&memcg->memory, nr_pages); 6997 if (do_memsw_account()) 6998 page_counter_charge(&memcg->memsw, nr_pages); 6999 7000 css_get(&memcg->css); 7001 commit_charge(newpage, memcg); 7002 7003 local_irq_save(flags); 7004 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); 7005 memcg_check_events(memcg, newpage); 7006 local_irq_restore(flags); 7007 } 7008 7009 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 7010 EXPORT_SYMBOL(memcg_sockets_enabled_key); 7011 7012 void mem_cgroup_sk_alloc(struct sock *sk) 7013 { 7014 struct mem_cgroup *memcg; 7015 7016 if (!mem_cgroup_sockets_enabled) 7017 return; 7018 7019 /* Do not associate the sock with unrelated interrupted task's memcg. */ 7020 if (in_interrupt()) 7021 return; 7022 7023 rcu_read_lock(); 7024 memcg = mem_cgroup_from_task(current); 7025 if (memcg == root_mem_cgroup) 7026 goto out; 7027 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) 7028 goto out; 7029 if (css_tryget(&memcg->css)) 7030 sk->sk_memcg = memcg; 7031 out: 7032 rcu_read_unlock(); 7033 } 7034 7035 void mem_cgroup_sk_free(struct sock *sk) 7036 { 7037 if (sk->sk_memcg) 7038 css_put(&sk->sk_memcg->css); 7039 } 7040 7041 /** 7042 * mem_cgroup_charge_skmem - charge socket memory 7043 * @memcg: memcg to charge 7044 * @nr_pages: number of pages to charge 7045 * 7046 * Charges @nr_pages to @memcg. Returns %true if the charge fit within 7047 * @memcg's configured limit, %false if the charge had to be forced. 7048 */ 7049 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7050 { 7051 gfp_t gfp_mask = GFP_KERNEL; 7052 7053 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7054 struct page_counter *fail; 7055 7056 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { 7057 memcg->tcpmem_pressure = 0; 7058 return true; 7059 } 7060 page_counter_charge(&memcg->tcpmem, nr_pages); 7061 memcg->tcpmem_pressure = 1; 7062 return false; 7063 } 7064 7065 /* Don't block in the packet receive path */ 7066 if (in_softirq()) 7067 gfp_mask = GFP_NOWAIT; 7068 7069 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); 7070 7071 if (try_charge(memcg, gfp_mask, nr_pages) == 0) 7072 return true; 7073 7074 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); 7075 return false; 7076 } 7077 7078 /** 7079 * mem_cgroup_uncharge_skmem - uncharge socket memory 7080 * @memcg: memcg to uncharge 7081 * @nr_pages: number of pages to uncharge 7082 */ 7083 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 7084 { 7085 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { 7086 page_counter_uncharge(&memcg->tcpmem, nr_pages); 7087 return; 7088 } 7089 7090 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); 7091 7092 refill_stock(memcg, nr_pages); 7093 } 7094 7095 static int __init cgroup_memory(char *s) 7096 { 7097 char *token; 7098 7099 while ((token = strsep(&s, ",")) != NULL) { 7100 if (!*token) 7101 continue; 7102 if (!strcmp(token, "nosocket")) 7103 cgroup_memory_nosocket = true; 7104 if (!strcmp(token, "nokmem")) 7105 cgroup_memory_nokmem = true; 7106 } 7107 return 0; 7108 } 7109 __setup("cgroup.memory=", cgroup_memory); 7110 7111 /* 7112 * subsys_initcall() for memory controller. 7113 * 7114 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this 7115 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but 7116 * basically everything that doesn't depend on a specific mem_cgroup structure 7117 * should be initialized from here. 7118 */ 7119 static int __init mem_cgroup_init(void) 7120 { 7121 int cpu, node; 7122 7123 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, 7124 memcg_hotplug_cpu_dead); 7125 7126 for_each_possible_cpu(cpu) 7127 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 7128 drain_local_stock); 7129 7130 for_each_node(node) { 7131 struct mem_cgroup_tree_per_node *rtpn; 7132 7133 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 7134 node_online(node) ? node : NUMA_NO_NODE); 7135 7136 rtpn->rb_root = RB_ROOT; 7137 rtpn->rb_rightmost = NULL; 7138 spin_lock_init(&rtpn->lock); 7139 soft_limit_tree.rb_tree_per_node[node] = rtpn; 7140 } 7141 7142 return 0; 7143 } 7144 subsys_initcall(mem_cgroup_init); 7145 7146 #ifdef CONFIG_MEMCG_SWAP 7147 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 7148 { 7149 while (!refcount_inc_not_zero(&memcg->id.ref)) { 7150 /* 7151 * The root cgroup cannot be destroyed, so it's refcount must 7152 * always be >= 1. 7153 */ 7154 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 7155 VM_BUG_ON(1); 7156 break; 7157 } 7158 memcg = parent_mem_cgroup(memcg); 7159 if (!memcg) 7160 memcg = root_mem_cgroup; 7161 } 7162 return memcg; 7163 } 7164 7165 /** 7166 * mem_cgroup_swapout - transfer a memsw charge to swap 7167 * @page: page whose memsw charge to transfer 7168 * @entry: swap entry to move the charge to 7169 * 7170 * Transfer the memsw charge of @page to @entry. 7171 */ 7172 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 7173 { 7174 struct mem_cgroup *memcg, *swap_memcg; 7175 unsigned int nr_entries; 7176 unsigned short oldid; 7177 7178 VM_BUG_ON_PAGE(PageLRU(page), page); 7179 VM_BUG_ON_PAGE(page_count(page), page); 7180 7181 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7182 return; 7183 7184 memcg = page_memcg(page); 7185 7186 /* Readahead page, never charged */ 7187 if (!memcg) 7188 return; 7189 7190 /* 7191 * In case the memcg owning these pages has been offlined and doesn't 7192 * have an ID allocated to it anymore, charge the closest online 7193 * ancestor for the swap instead and transfer the memory+swap charge. 7194 */ 7195 swap_memcg = mem_cgroup_id_get_online(memcg); 7196 nr_entries = thp_nr_pages(page); 7197 /* Get references for the tail pages, too */ 7198 if (nr_entries > 1) 7199 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); 7200 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 7201 nr_entries); 7202 VM_BUG_ON_PAGE(oldid, page); 7203 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); 7204 7205 page->memcg_data = 0; 7206 7207 if (!mem_cgroup_is_root(memcg)) 7208 page_counter_uncharge(&memcg->memory, nr_entries); 7209 7210 if (!cgroup_memory_noswap && memcg != swap_memcg) { 7211 if (!mem_cgroup_is_root(swap_memcg)) 7212 page_counter_charge(&swap_memcg->memsw, nr_entries); 7213 page_counter_uncharge(&memcg->memsw, nr_entries); 7214 } 7215 7216 /* 7217 * Interrupts should be disabled here because the caller holds the 7218 * i_pages lock which is taken with interrupts-off. It is 7219 * important here to have the interrupts disabled because it is the 7220 * only synchronisation we have for updating the per-CPU variables. 7221 */ 7222 VM_BUG_ON(!irqs_disabled()); 7223 mem_cgroup_charge_statistics(memcg, page, -nr_entries); 7224 memcg_check_events(memcg, page); 7225 7226 css_put(&memcg->css); 7227 } 7228 7229 /** 7230 * mem_cgroup_try_charge_swap - try charging swap space for a page 7231 * @page: page being added to swap 7232 * @entry: swap entry to charge 7233 * 7234 * Try to charge @page's memcg for the swap space at @entry. 7235 * 7236 * Returns 0 on success, -ENOMEM on failure. 7237 */ 7238 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) 7239 { 7240 unsigned int nr_pages = thp_nr_pages(page); 7241 struct page_counter *counter; 7242 struct mem_cgroup *memcg; 7243 unsigned short oldid; 7244 7245 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7246 return 0; 7247 7248 memcg = page_memcg(page); 7249 7250 /* Readahead page, never charged */ 7251 if (!memcg) 7252 return 0; 7253 7254 if (!entry.val) { 7255 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7256 return 0; 7257 } 7258 7259 memcg = mem_cgroup_id_get_online(memcg); 7260 7261 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && 7262 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { 7263 memcg_memory_event(memcg, MEMCG_SWAP_MAX); 7264 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); 7265 mem_cgroup_id_put(memcg); 7266 return -ENOMEM; 7267 } 7268 7269 /* Get references for the tail pages, too */ 7270 if (nr_pages > 1) 7271 mem_cgroup_id_get_many(memcg, nr_pages - 1); 7272 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); 7273 VM_BUG_ON_PAGE(oldid, page); 7274 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); 7275 7276 return 0; 7277 } 7278 7279 /** 7280 * mem_cgroup_uncharge_swap - uncharge swap space 7281 * @entry: swap entry to uncharge 7282 * @nr_pages: the amount of swap space to uncharge 7283 */ 7284 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 7285 { 7286 struct mem_cgroup *memcg; 7287 unsigned short id; 7288 7289 id = swap_cgroup_record(entry, 0, nr_pages); 7290 rcu_read_lock(); 7291 memcg = mem_cgroup_from_id(id); 7292 if (memcg) { 7293 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { 7294 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7295 page_counter_uncharge(&memcg->swap, nr_pages); 7296 else 7297 page_counter_uncharge(&memcg->memsw, nr_pages); 7298 } 7299 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); 7300 mem_cgroup_id_put_many(memcg, nr_pages); 7301 } 7302 rcu_read_unlock(); 7303 } 7304 7305 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 7306 { 7307 long nr_swap_pages = get_nr_swap_pages(); 7308 7309 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7310 return nr_swap_pages; 7311 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) 7312 nr_swap_pages = min_t(long, nr_swap_pages, 7313 READ_ONCE(memcg->swap.max) - 7314 page_counter_read(&memcg->swap)); 7315 return nr_swap_pages; 7316 } 7317 7318 bool mem_cgroup_swap_full(struct page *page) 7319 { 7320 struct mem_cgroup *memcg; 7321 7322 VM_BUG_ON_PAGE(!PageLocked(page), page); 7323 7324 if (vm_swap_full()) 7325 return true; 7326 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 7327 return false; 7328 7329 memcg = page_memcg(page); 7330 if (!memcg) 7331 return false; 7332 7333 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { 7334 unsigned long usage = page_counter_read(&memcg->swap); 7335 7336 if (usage * 2 >= READ_ONCE(memcg->swap.high) || 7337 usage * 2 >= READ_ONCE(memcg->swap.max)) 7338 return true; 7339 } 7340 7341 return false; 7342 } 7343 7344 static int __init setup_swap_account(char *s) 7345 { 7346 if (!strcmp(s, "1")) 7347 cgroup_memory_noswap = false; 7348 else if (!strcmp(s, "0")) 7349 cgroup_memory_noswap = true; 7350 return 1; 7351 } 7352 __setup("swapaccount=", setup_swap_account); 7353 7354 static u64 swap_current_read(struct cgroup_subsys_state *css, 7355 struct cftype *cft) 7356 { 7357 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 7358 7359 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; 7360 } 7361 7362 static int swap_high_show(struct seq_file *m, void *v) 7363 { 7364 return seq_puts_memcg_tunable(m, 7365 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); 7366 } 7367 7368 static ssize_t swap_high_write(struct kernfs_open_file *of, 7369 char *buf, size_t nbytes, loff_t off) 7370 { 7371 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7372 unsigned long high; 7373 int err; 7374 7375 buf = strstrip(buf); 7376 err = page_counter_memparse(buf, "max", &high); 7377 if (err) 7378 return err; 7379 7380 page_counter_set_high(&memcg->swap, high); 7381 7382 return nbytes; 7383 } 7384 7385 static int swap_max_show(struct seq_file *m, void *v) 7386 { 7387 return seq_puts_memcg_tunable(m, 7388 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); 7389 } 7390 7391 static ssize_t swap_max_write(struct kernfs_open_file *of, 7392 char *buf, size_t nbytes, loff_t off) 7393 { 7394 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 7395 unsigned long max; 7396 int err; 7397 7398 buf = strstrip(buf); 7399 err = page_counter_memparse(buf, "max", &max); 7400 if (err) 7401 return err; 7402 7403 xchg(&memcg->swap.max, max); 7404 7405 return nbytes; 7406 } 7407 7408 static int swap_events_show(struct seq_file *m, void *v) 7409 { 7410 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 7411 7412 seq_printf(m, "high %lu\n", 7413 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); 7414 seq_printf(m, "max %lu\n", 7415 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); 7416 seq_printf(m, "fail %lu\n", 7417 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); 7418 7419 return 0; 7420 } 7421 7422 static struct cftype swap_files[] = { 7423 { 7424 .name = "swap.current", 7425 .flags = CFTYPE_NOT_ON_ROOT, 7426 .read_u64 = swap_current_read, 7427 }, 7428 { 7429 .name = "swap.high", 7430 .flags = CFTYPE_NOT_ON_ROOT, 7431 .seq_show = swap_high_show, 7432 .write = swap_high_write, 7433 }, 7434 { 7435 .name = "swap.max", 7436 .flags = CFTYPE_NOT_ON_ROOT, 7437 .seq_show = swap_max_show, 7438 .write = swap_max_write, 7439 }, 7440 { 7441 .name = "swap.events", 7442 .flags = CFTYPE_NOT_ON_ROOT, 7443 .file_offset = offsetof(struct mem_cgroup, swap_events_file), 7444 .seq_show = swap_events_show, 7445 }, 7446 { } /* terminate */ 7447 }; 7448 7449 static struct cftype memsw_files[] = { 7450 { 7451 .name = "memsw.usage_in_bytes", 7452 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 7453 .read_u64 = mem_cgroup_read_u64, 7454 }, 7455 { 7456 .name = "memsw.max_usage_in_bytes", 7457 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 7458 .write = mem_cgroup_reset, 7459 .read_u64 = mem_cgroup_read_u64, 7460 }, 7461 { 7462 .name = "memsw.limit_in_bytes", 7463 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 7464 .write = mem_cgroup_write, 7465 .read_u64 = mem_cgroup_read_u64, 7466 }, 7467 { 7468 .name = "memsw.failcnt", 7469 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 7470 .write = mem_cgroup_reset, 7471 .read_u64 = mem_cgroup_read_u64, 7472 }, 7473 { }, /* terminate */ 7474 }; 7475 7476 /* 7477 * If mem_cgroup_swap_init() is implemented as a subsys_initcall() 7478 * instead of a core_initcall(), this could mean cgroup_memory_noswap still 7479 * remains set to false even when memcg is disabled via "cgroup_disable=memory" 7480 * boot parameter. This may result in premature OOPS inside 7481 * mem_cgroup_get_nr_swap_pages() function in corner cases. 7482 */ 7483 static int __init mem_cgroup_swap_init(void) 7484 { 7485 /* No memory control -> no swap control */ 7486 if (mem_cgroup_disabled()) 7487 cgroup_memory_noswap = true; 7488 7489 if (cgroup_memory_noswap) 7490 return 0; 7491 7492 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); 7493 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); 7494 7495 return 0; 7496 } 7497 core_initcall(mem_cgroup_swap_init); 7498 7499 #endif /* CONFIG_MEMCG_SWAP */ 7500